content
stringlengths 5
1.05M
|
---|
__author__ = 'Todd.Hay'
# -------------------------------------------------------------------------------
# Name: SpecialActions.py
# Purpose:
#
# Author: Todd.Hay
# Email: [email protected]
#
# Created: Jan 11, 2016
# License: MIT
#-------------------------------------------------------------------------------
from PyQt5.QtCore import pyqtProperty, pyqtSignal, pyqtSlot, QObject, QVariant, QThread
from PyQt5.QtQml import QJSValue
from py.common.FramListModel import FramListModel
import logging
from py.trawl.TrawlBackdeckDB_model import Specimen, TypesLu, SpeciesSamplingPlanLu, \
BarcodesLu, Settings, LengthWeightRelationshipLu, Hauls, PrincipalInvestigatorLu, PiActionCodesLu, TaxonomyLu
from peewee import *
from playhouse.shortcuts import model_to_dict, dict_to_model
from py.common.SoundPlayer import SoundPlayer
from py.common.LabelPrinter import LabelPrinter
from datetime import datetime
from copy import deepcopy
import re
class SpecialActionsModel(FramListModel):
def __init__(self):
super().__init__()
self.add_role_name(name="parentSpecimenId") # SPECIMEN.SPECIMEN_ID for the parent specimen record
self.add_role_name(name="parentSpecimenNumber") # What's shown to the user / 1st column of tvSamples table
self.add_role_name(name="specimenId") # SPECIMEN.SPECIMEN_ID for the child specimen record
self.add_role_name(name="specialActionId")
self.add_role_name(name="piId")
self.add_role_name(name="principalInvestigator")
self.add_role_name(name="specialAction")
self.add_role_name(name="value")
self.add_role_name(name="widgetType")
class PiProjectModel(FramListModel):
def __init__(self):
super().__init__()
self.add_role_name(name="piId")
self.add_role_name(name="principalInvestigator")
self.add_role_name(name="planId")
self.add_role_name(name="planName")
class SpecialActions(QObject):
"""
Class for the SpecialActionsScreen
"""
modelChanged = pyqtSignal()
modelInitialized = pyqtSignal()
specimenTypeChanged = pyqtSignal()
parentSpecimenCountChanged = pyqtSignal()
rowIndexChanged = pyqtSignal()
rowWidgetTypeChanged = pyqtSignal()
piProjectModelChanged = pyqtSignal()
printerStatusReceived = pyqtSignal(str, bool, str, arguments=["comport", "success", "message"])
def __init__(self, app=None, db=None):
super().__init__()
self._logger = logging.getLogger(__name__)
self._app = app
self._db = db
# Set up the models
self._model = SpecialActionsModel()
self._pi_project_model = PiProjectModel()
self._sound_player = SoundPlayer()
self._label_printer = LabelPrinter(app=self._app, db=self._db)
self._label_printer.tagIdChanged.connect(self._updated_printer_tag_received)
self._label_printer.printerStatusReceived.connect(self._printer_status_received)
self._standardSurveySpecimen = None
self._parent_specimen_count = 0
self._row_index = -1
self._row_widget_type = None
def _printer_status_received(self, comport, success, message):
"""
Method to catch the message coming back from the printer
:param comport:
:param success:
:param message:
:return:
"""
self.printerStatusReceived.emit(comport, success, message)
# logging.info('message received: ' + str(message))
def _updated_printer_tag_received(self, tag_id):
"""
Method used to catch the newly updated printer tag and then use that to derive the tvSamples rowIndex in question
and then to an upsert on that row to save the item["value"] to the database
:param str:
:return:
"""
# logging.info('new tag_id: ' + str(tag_id))
# Update the model
previous_tag_id = None
if not tag_id[-1:].isdigit():
if tag_id[-1:] == "A":
previous_tag_id = tag_id[:-1]
else:
char = chr(ord(tag_id[-1:]) - 1)
previous_tag_id = str(tag_id[:-1]) + char
# logging.info('previous_tag_id: ' + str(previous_tag_id))
index = self._model.get_item_index(rolename="value", value=previous_tag_id)
if index != -1:
self._model.setProperty(index=index, property="value", value=tag_id)
self.upsert_specimen(row_index=index)
# logging.info('index found and upserted complete, index: ' + str(index))
@pyqtSlot(str, int, str, str)
def printLabel(self, comport, pi_id, action, specimen_number):
"""
Method called from QML to print a label. This passes a request to the self._label_printer object
:return:
"""
self._label_printer.print_job(comport=comport, pi_id=pi_id, action=action, specimen_number=specimen_number)
@pyqtProperty(QVariant, notify=modelChanged)
def model(self):
"""
return the SpecimensModel
:return:
"""
return self._model
@pyqtProperty(QVariant, notify=piProjectModelChanged)
def piProjectModel(self):
return self._pi_project_model
@pyqtProperty(str)
def rowWidgetType(self):
"""
Method to return the widgetType of the currently selected tvSamples row. This is used to keep track
of what type of automatic measurement the row can take in from scales, barcode reader, etc.
:return:
"""
return self._row_widget_type
@rowWidgetType.setter
def rowWidgetType(self, value):
"""
Method to set the self._row_widget_type
:param value: str - enumerated values include: id, measurement, coral, salmon, sex - same as the
states in special actions
:return:
"""
# if value not in ["id", "measurement", "coral", "salmon", "sex", "sponge", "maturityLevel", "yesno"]:
# return
self._row_widget_type = value
self.rowWidgetTypeChanged.emit()
@pyqtProperty(int)
def rowIndex(self):
"""
Method to return the currently selected row of the tvSamples TableView
:return:
"""
return self._row_index
@rowIndex.setter
def rowIndex(self, value):
"""
Method to set the self._row_index to keep track of the currently selected row in tvSamples
This is needed when taking in a measurement from the barcode scanner or an automatic
length / weight measurement
:param value:
:return:
"""
if not isinstance(value, int):
return
self._row_index = value
self.rowIndexChanged.emit()
@pyqtProperty(QVariant, notify=parentSpecimenCountChanged)
def parentSpecimenCount(self):
return self._parent_specimen_count
@parentSpecimenCount.setter
def parentSpecimenCount(self, value):
if not isinstance(value, int):
return
self._parent_specimen_count = value
self.parentSpecimenCountChanged.emit()
@pyqtProperty(QVariant, notify=specimenTypeChanged)
def standardSurveySpecimen(self):
return self._standardSurveySpecimen
@standardSurveySpecimen.setter
def standardSurveySpecimen(self, value):
if not isinstance(value, bool) and not isinstance(value, None):
return
self._standardSurveySpecimen = value
self.specimenTypeChanged.emit()
@pyqtSlot(int, result=str)
def get_tag_id(self, row_index):
"""
Method to get a new tag ID
:return:
"""
# mapping = {"ovaryNumber": {"type": "000", "action": "Ovary", "id": "ovarySpecimenId"},
# "stomachNumber": {"type": "001", "action": "Stomach", "id": "stomachSpecimenId"},
# "tissueNumber": {"type": "002", "action": "Tissue", "id": "tissueSpecimenId"},
# "finclipNumber": {"type": "003", "action": "Finclip", "id": "finclipSpecimenId"}}
# if action not in mapping:
# return
if not isinstance(row_index, int) or row_index == -1:
return
item = self._model.get(row_index)
pi_id = item["piId"]
action_type_id = item["specialActionId"]
value = item["value"]
specimen_id = item["specimenId"]
try:
# Item 1 - Year / Item 2 - Vessel
for setting in Settings.select():
if setting.parameter == "Survey Year":
year = setting.value
try:
now_year = datetime.now().strftime("%Y")
if year != now_year:
year = now_year
except Exception as ex2:
year = datetime.now().strftime("%Y")
logging.info(f"unable to update the year: {ex2}")
logging.info(f"year = {year}")
elif setting.parameter == "Vessel ID":
vessel_id = setting.value
# Item 3 - Haul ID
haul_number = str(self._app.state_machine.haul["haul_number"])
if len(haul_number) > 3:
haul_number = haul_number[-3:]
# Item 4 - Specimen Type Code
try:
pi_action_code_id = \
PiActionCodesLu.select(PiActionCodesLu) \
.join(PrincipalInvestigatorLu,
on=(PiActionCodesLu.principal_investigator == PrincipalInvestigatorLu.principal_investigator)) \
.join(TypesLu, on=(PiActionCodesLu.action_type == TypesLu.type_id).alias('types')) \
.where(PrincipalInvestigatorLu.principal_investigator == pi_id,
TypesLu.type_id == action_type_id).get().pi_action_code
except DoesNotExist as ex:
pi_action_code_id = 999 # Give it a bogus entry
# logging.info('pi action code: ' + str(pi_action_code_id))
specimen_type_id = str(pi_action_code_id).zfill(3)
# Item 5 - Specimen Number
# Query for specimen number - get the latest one for the given specimen type (i.e. ovary, stomach, tissue, finclip)
spec_num_length = 20
# if pi_action_code_id != 999:
# specimens = (Specimen.select(Specimen, TypesLu)
# .join(SpeciesSamplingPlanLu,
# on=(SpeciesSamplingPlanLu.species_sampling_plan==Specimen.species_sampling_plan).alias('plan'))
# .join(PrincipalInvestigatorLu,
# on=(SpeciesSamplingPlanLu.principal_investigator==PrincipalInvestigatorLu.principal_investigator).alias('pi'))
# .join(TypesLu, on=(Specimen.action_type == TypesLu.type_id).alias('types'))
# .where(TypesLu.type_id == action_type_id,
# PrincipalInvestigatorLu.principal_investigator==pi_id,
# fn.length(Specimen.alpha_value) == spec_num_length).order_by(Specimen.alpha_value.desc()))
# specimens = (Specimen.select(fn.substr(Specimen.alpha_value, 18, 3).alias('specimen_number'))
# .join(SpeciesSamplingPlanLu,
# on=(SpeciesSamplingPlanLu.species_sampling_plan == Specimen.species_sampling_plan).alias('plan'))
# .join(PrincipalInvestigatorLu,
# on=(SpeciesSamplingPlanLu.principal_investigator == PrincipalInvestigatorLu.principal_investigator).alias('pi'))
# .join(TypesLu, on=(Specimen.action_type == TypesLu.type_id).alias('types'))
# .where(TypesLu.type_id == action_type_id,
# PrincipalInvestigatorLu.principal_investigator == pi_id,
# ((fn.length(Specimen.alpha_value) == spec_num_length) |
# (fn.length(Specimen.alpha_value) == spec_num_length + 1)),
# (fn.substr(Specimen.alpha_value, 1, 4) == year),
# (fn.substr(Specimen.alpha_value, 6, 3) == vessel_id)).order_by(
# fn.substr(Specimen.alpha_value, 18, 3).desc()))
specimens = (Specimen.select(fn.substr(Specimen.alpha_value, 18, 3).alias('specimen_number'))
.join(TypesLu, on=(Specimen.action_type == TypesLu.type_id).alias('types'))
.where(TypesLu.type_id == action_type_id,
((fn.length(Specimen.alpha_value) == spec_num_length) |
(fn.length(Specimen.alpha_value) == spec_num_length + 1)),
(fn.substr(Specimen.alpha_value, 1, 4) == year),
(fn.substr(Specimen.alpha_value, 6, 3) == vessel_id)).order_by(
fn.substr(Specimen.alpha_value, 18, 3).desc()))
# else:
#
# where_clause = (((fn.length(Specimen.alpha_value) == spec_num_length) |
# (fn.length(Specimen.alpha_value) == spec_num_length + 1)) &
# (Specimen.alpha_value.contains("-999-")))
# specimens = (Specimen.select(Specimen)
# .join(SpeciesSamplingPlanLu,
# on=(SpeciesSamplingPlanLu.species_sampling_plan == Specimen.species_sampling_plan).alias('plan'))
# .join(PrincipalInvestigatorLu,
# on=(
# SpeciesSamplingPlanLu.principal_investigator == PrincipalInvestigatorLu.principal_investigator).alias(
# 'pi'))
# .where(where_clause).order_by(Specimen.alpha_value.desc()))
# Get the newest specimen. Note that one may not exist as it hasn't been created yet
try:
last_specimen_num = specimens.get().specimen_number
# last_specimen_num = specimens.get().alpha_value
except DoesNotExist as dne:
last_specimen_num = None
logging.info('last specimen num: ' + str(last_specimen_num))
"""
Use Cases
1. No existing SPECIMEN record exists for this specimen_type - insert a new one by one-upping the
last number for this specimen_type
2. An existing SPECIMEN exists for this specimen_type - so a number should already be added, don't
override then, correct? We should only give the next number up ever after having queried the
specimen table for the last number for this specimen_type - which is what we have in
last_specimen_num
"""
# logging.info('value: ' + str(value))
if specimen_id is None or specimen_id == "" or \
value is None or value == "" or len(value) < spec_num_length or value == "Error":
# No specimen record exists for this specimen_type, so we're creating a new specimen_value
# So one up the highest number and put an "a" at the end of it
if last_specimen_num:
specimen_num = str(int(re.sub(r'[^\d.]+', '', last_specimen_num)[-3:]) + 1).zfill(3)
else:
specimen_num = "001"
else:
# Specimen record exists, then nothing to do here. Clicking the print button will up the last
# alpha character
return item["value"]
sep = "-"
tag_id = year + sep + vessel_id + sep + haul_number + sep + specimen_type_id + \
sep + specimen_num
# One final confirmation that this tag_id does not already exist in the database
dup_count = Specimen.select().where(Specimen.alpha_value.contains(tag_id)).count()
if dup_count > 0:
logging.error("duplicate tag found: {0}, count: {1}".format(tag_id, dup_count))
return ""
except Exception as ex:
logging.info('get_tag_id error: ' + str(ex))
tag_id = "Error"
# logging.info('tag_id: ' + str(tag_id))
return tag_id
def _get_widget_type(self, display_name):
"""
Method to return the type of the widget with a given display name. This drives which UI widgets
are displayed on the right side of SpecialActionsScreen.qml
:param display_name: str - text of the specialAction role that is displayed in the tvSamples TableView
:return:
"""
if not isinstance(display_name, str):
return
display_name = display_name.lower()
widget_type = "id"
if "sex" in display_name:
widget_type = "sex"
elif "id" in display_name:
widget_type = "id"
elif "length" in display_name or \
"width" in display_name or \
"weight" in display_name:
widget_type = "measurement"
taxon_id = self._app.state_machine.species["taxonomy_id"]
if self._app.process_catch.checkSpeciesType("salmon", taxonId=taxon_id):
widget_type = "salmon"
elif self._app.process_catch.checkSpeciesType("coral", taxonId=taxon_id):
widget_type = "coral"
elif self._app.process_catch.checkSpeciesType("sponge", taxonId=taxon_id):
widget_type = "sponge"
return widget_type
def _get_value_type(self, value):
"""
Method to convert the value to an appropriate type given the property
:param value:
:return:
"""
try:
value = float(value)
return "numeric"
except ValueError as ex:
try:
value = int(value)
return "numeric"
except ValueError as ex_int:
pass
return "alpha"
@pyqtSlot()
def initialize_pi_project_list(self):
"""
Method to initialize the tvProjects list in the dlgSpecimen when Add Specimen is clicked
:return:
"""
self._pi_project_model.clear()
taxon_id = self._app.state_machine.species["taxonomy_id"]
plans = SpeciesSamplingPlanLu.select(SpeciesSamplingPlanLu, PrincipalInvestigatorLu) \
.join(PrincipalInvestigatorLu,
on=(SpeciesSamplingPlanLu.principal_investigator==PrincipalInvestigatorLu.principal_investigator).alias('pi')) \
.where((SpeciesSamplingPlanLu.parent_species_sampling_plan.is_null(True)) & \
(
(
(SpeciesSamplingPlanLu.plan_name == "FRAM Standard Survey") &
(
(
((SpeciesSamplingPlanLu.display_name == "Salmon") |
(SpeciesSamplingPlanLu.display_name == "Coral") |
(SpeciesSamplingPlanLu.display_name == "Sponge")) &
(SpeciesSamplingPlanLu.taxonomy == taxon_id)
) |
(SpeciesSamplingPlanLu.display_name == "Whole Specimen ID"))
) |
((SpeciesSamplingPlanLu.taxonomy == taxon_id) &
(SpeciesSamplingPlanLu.plan_name != "FRAM Standard Survey")
)
)
) \
.order_by(PrincipalInvestigatorLu.last_name)
for plan in plans:
is_coral = self._app.process_catch.checkSpeciesType("coral", taxon_id)
if is_coral:
if plan.display_name == "Whole Specimen ID":
continue
is_sponge = self._app.process_catch.checkSpeciesType("sponge", taxon_id)
if is_sponge:
if plan.display_name == "Whole Specimen ID":
continue
is_salmon = self._app.process_catch.checkSpeciesType("salmon", taxon_id)
if is_salmon:
pass
plan_name = plan.plan_name
if plan_name == "FRAM Standard Survey":
plan_name = plan.display_name
item = {"piId": plan.pi.principal_investigator,
"principalInvestigator": plan.pi.last_name,
"planId": plan.species_sampling_plan,
"planName": plan_name}
self._pi_project_model.appendItem(item)
self.modelInitialized.emit()
def _create_list_template(self):
"""
Method used to create the tvSpecimens list items that are applicable to this given species/taxonomy id
This is used when initializing the list from either the Process Catch or the Fish Sampling Screen
:return:
"""
templates = []
# Create a blank templates (i.e. no values) tableview items from existing protocols, but data is not populated
taxon_id = self._app.state_machine.species["taxonomy_id"]
plans = self._app.protocol_viewer.get_special_actions(taxon_id=taxon_id)
parent_plans = [x for x in plans if x["parentPlan"] is None]
for parent_plan in parent_plans:
# self.parentSpecimenCount += 1
# Have a protocol at the very top species sampling plan record, add it's actions to the list
if parent_plan["topProtocol"] is not None:
for action in parent_plan["actions"]:
is_coral = self._app.process_catch.checkSpeciesType("coral", taxon_id)
is_sponge = self._app.process_catch.checkSpeciesType("sponge", taxon_id)
if is_coral:
if action["displayName"] == "Whole Specimen ID":
# Don't include the Whole Specimen ID as an option for corals, as that is already included
# self.parentSpecimenCount -= 1
continue
specialAction = "Coral " + action["displayName"]
elif is_sponge:
if action["displayName"] == "Whole Specimen ID":
continue
specialAction = "Sponge " + action["displayName"]
else:
specialAction = action["displayName"]
item = {"parentSpecimenNumber": None, # self.parentSpecimenCount,
"parentSpecimenId": None,
"specimenId": None,
"specialActionId": action["actionTypeId"],
"principalInvestigator": parent_plan["pi"],
"piId": parent_plan["piId"],
"specialAction": specialAction,
"widgetType": action["widgetType"],
"planId": parent_plan["plan"],
"value": None}
templates.append(item)
# self._model.appendItem(item)
# Get all of the children species sampling plans and add their actions
child_plans = [x for x in plans if x["parentPlan"] == parent_plan["plan"]]
for child_plan in child_plans:
for action in child_plan["actions"]:
is_coral = self._app.process_catch.checkSpeciesType("coral", taxon_id)
is_sponge = self._app.process_catch.checkSpeciesType("sponge", taxon_id)
if is_coral:
if action["displayName"] == "Whole Specimen ID":
continue
specialAction = "Coral " + action["displayName"]
elif is_sponge:
if action["displayName"] == "Whole Specimen ID":
continue
specialAction = "Sponge " + action["displayName"]
else:
specialAction = action["displayName"]
item = {"parentSpecimenNumber": None, # self.parentSpecimenCount,
"parentSpecimenId": None,
"specimenId": None,
"specialActionId": action["actionTypeId"],
"principalInvestigator": parent_plan["pi"],
"piId": parent_plan["piId"],
"specialAction": specialAction,
"widgetType": action["widgetType"],
"planId": parent_plan["plan"],
"value": None}
templates.append(item)
# self._model.appendItem(item)
return templates
@pyqtSlot()
def initialize_fish_sampling_list(self):
"""
Method to initialize the tvSamples list when the screen is called from the FishSamplingScreen.qml screen. Query
the database to retrieve existing specimens that have already been collected for this taxonomy_id
for this given haul > could be at the ProcessCatch or the FishSampling level. Will need to treat them
differently if they come from ProcessCatchScreen v. FishSamplingScreen
:return:
"""
self._model.clear()
templates = self._create_list_template()
try:
where_clause = (Specimen.specimen == self._app.state_machine.specimen["parentSpecimenId"])
parent = (Specimen.select(Specimen, SpeciesSamplingPlanLu, PrincipalInvestigatorLu, TypesLu)
.join(SpeciesSamplingPlanLu,
on=(Specimen.species_sampling_plan == SpeciesSamplingPlanLu.species_sampling_plan).alias(
"plan"))
.join(PrincipalInvestigatorLu,
on=(
SpeciesSamplingPlanLu.principal_investigator == PrincipalInvestigatorLu.principal_investigator).alias(
"pi"))
.join(TypesLu, JOIN.LEFT_OUTER, on=(Specimen.action_type == TypesLu.type_id).alias("types"))
.where(where_clause)).get()
self.parentSpecimenCount = self._app.state_machine.specimen["specimenNumber"]
for template in templates:
# Add the current template to the model
current_item = deepcopy(template)
current_item["parentSpecimenNumber"] = self.parentSpecimenCount
current_item["parentSpecimenId"] = self._app.state_machine.specimen["parentSpecimenId"]
self._model.appendItem(current_item)
index = self._model.count - 1
# Get the existing child from the database
try:
child = Specimen.select(Specimen, TypesLu) \
.join(TypesLu, on=(Specimen.action_type == TypesLu.type_id).alias("types")) \
.where((Specimen.parent_specimen == parent.specimen) &
(Specimen.species_sampling_plan == current_item["planId"]) &
(Specimen.action_type == current_item["specialActionId"])).get()
# Update the value + specimenId, assuming that a record exists in the database (i.e. child exists)
value = None
# TODO - Todd Hay - might want to change the below logic when taking IDs
# For instance, what if we have both a printed ID label, which has alpha
# characters and then someone tries to take a barcode. This would
# continue to show the printed alpha tag ID and new show the barcode
if child.alpha_value is not None:
value = child.alpha_value
elif child.numeric_value is not None:
value = child.numeric_value
if index is not None:
self._model.setProperty(index=index, property="specimenId", value=child.specimen)
self._model.setProperty(index=index, property="value", value=value)
except DoesNotExist as ex:
# Could not find a child record in the database, skip updating the model specimenId + value fields
pass
except Exception as ex:
logging.info('other exception: ' + str(ex))
pass
except DoesNotExist as ex:
logging.error("record does not exist: " + str(ex))
except Exception as ex:
logging.error("General exception: " + str(ex))
@pyqtSlot()
def initialize_process_catch_list(self):
"""
Method to initialize the tvSamples list when the screen is called from the ProcessCatchScreen.qml.
This will list out all of the special actions for a given specimen
:return:
"""
self._model.clear()
self.parentSpecimenCount = 0
templates = self._create_list_template()
"""
Query the database to retrieve existing specimens that have already been collected for this taxonomy_id
for this given haul > could be at the ProcessCatch or the FishSampling level. Will need to treat them differently
if they come from ProcessCatchScreen v. FishSamplingScreen as follows:
- ProcessCatchScreen.qml -
- FishSamplingScreen.qml -
"""
# Get All of the Parents first
try:
if self._app.state_machine.previousScreen == "processcatch":
where_clause = ((Specimen.catch == self._app.state_machine.species["catch_id"]) &
(Specimen.parent_specimen.is_null(True)) &
((SpeciesSamplingPlanLu.plan_name != "FRAM Standard Survey") |
((SpeciesSamplingPlanLu.plan_name == "FRAM Standard Survey") &
((SpeciesSamplingPlanLu.display_name == "Whole Specimen ID") |
(SpeciesSamplingPlanLu.display_name == "Coral") |
(SpeciesSamplingPlanLu.display_name == "Salmon") |
(SpeciesSamplingPlanLu.display_name == "Sponge")
))
))
elif self._app.state_machine.previousScreen == "fishsampling":
where_clause = ((Specimen.parent_specimen == self._app.state_machine.specimen["parentSpecimenId"]) &
((SpeciesSamplingPlanLu.plan_name != "FRAM Standard Survey") |
((SpeciesSamplingPlanLu.plan_name == "FRAM Standard Survey") &
(SpeciesSamplingPlanLu.display_name == "Whole Specimen ID"))
)
)
parents = (Specimen.select(Specimen, SpeciesSamplingPlanLu, PrincipalInvestigatorLu, TypesLu)
.join(SpeciesSamplingPlanLu,
on=(Specimen.species_sampling_plan == SpeciesSamplingPlanLu.species_sampling_plan).alias("plan"))
.join(PrincipalInvestigatorLu,
on=(SpeciesSamplingPlanLu.principal_investigator == PrincipalInvestigatorLu.principal_investigator).alias("pi"))
.join(TypesLu, JOIN.LEFT_OUTER, on=(Specimen.action_type == TypesLu.type_id).alias("types"))
.where(where_clause))
current_parent_specimen_id = -1
for parent in parents:
# Get all of the special actions that match this PI + Plan
template = [x for x in templates if x["piId"] == parent.plan.pi.principal_investigator and
x["planId"] == parent.plan.species_sampling_plan]
# logging.info('template: ' + str(template))
if current_parent_specimen_id != parent.specimen:
if self._app.state_machine.previousScreen == "processcatch":
self.parentSpecimenCount += 1
elif self._app.state_machine.previousScreen == "fishsampling":
self.parentSpecimenCount = self._app.state_machine.specimen["specimenNumber"]
# Add each of the items in the current template to the model. Later we add in the actual values
for item in template:
current_item = deepcopy(item)
current_item["parentSpecimenNumber"] = self.parentSpecimenCount
if self._app.state_machine.previousScreen == "processcatch":
current_item["parentSpecimenId"] = parent.specimen
elif self._app.state_machine.previousScreen == "fishsampling":
current_item["parentSpecimenId"] = self._app.state_machine.specimen["parentSpecimenId"]
self._model.appendItem(current_item)
# Iterate through all of the specimen children
children = Specimen.select(Specimen, TypesLu) \
.join(TypesLu, on=(Specimen.action_type == TypesLu.type_id).alias("types")) \
.where(Specimen.parent_specimen == parent.specimen)
for child in children:
if child.types.subtype is not None and child.types.subtype != "":
specialAction = child.types.subtype + " " + child.types.type
else:
specialAction = child.types.type
# Coral - need to prepend specialAction with Coral as appropriate, otherwise
# extra model rows are added. This is a bad hack. It deals with the fact that for our actions, we have
# nothing specific to corals, yet we display the term Coral in the Special Actions table, where in actions
# we do have the 3 specific Salmon actions...I don't like this difference at all.
# 05/11/2018 - added in the same issue for Sponge as for coral, as the survey team members want to start
# treating sponges similarly
# taxon_id = parent.plan.taxonomy_id
taxon_id = parent.plan.taxonomy.taxonomy
if self._app.process_catch.checkSpeciesType("coral", taxonId=taxon_id):
specialAction = "Coral " + specialAction
elif self._app.process_catch.checkSpeciesType("sponge", taxonId=taxon_id):
specialAction = "Sponge " + specialAction
# Get the proper value, i.e. alpha or numeric value
value = None
# TODO - Todd Hay - might want to change the below logic when taking IDs
# For instance, what if we have both a printed ID label, which has alpha
# characters and then someone tries to take a barcode. This would
# continue to show the printed alpha tag ID and new show the barcode
if child.alpha_value is not None:
value = child.alpha_value
elif child.numeric_value is not None:
value = child.numeric_value
"""
Need to update 2 values in the item. First need to find the exact item.
- specimenId
- value
"""
index = [i for i, x in enumerate(self._model.items) if
x["piId"] == parent.plan.pi.principal_investigator and
x["planId"] == parent.plan.species_sampling_plan and
x["specialActionId"] == child.types.type_id and
x["parentSpecimenNumber"] == self.parentSpecimenCount]
if index is not None:
index = index[0]
self._model.setProperty(index=index, property="specimenId", value=child.specimen)
self._model.setProperty(index=index, property="value", value=value)
current_parent_specimen_id = parent.specimen
except DoesNotExist as ex:
logging.error("record does not exist: " + str(ex))
except Exception as ex:
logging.error("General exception: " + str(ex))
# logging.info('model count: ' + str(self._model.count))
# logging.info('model items: ' + str(self._model.items))
# Add in the new template items at the bottom of the list
if self._app.state_machine.previousScreen == "processcatch" or \
(self._app.state_machine.previousScreen == "fishsampling" and self._model.count == 0):
current_pi_id = -1
current_plan_id = -1
for template in templates:
if template["piId"] != current_pi_id or template["planId"] != current_plan_id:
if self._app.state_machine.previousScreen == "processcatch":
self.parentSpecimenCount += 1
elif self._app.state_machine.previousScreen == "fishsampling":
self.parentSpecimenCount = self._app.state_machine.specimen["specimenNumber"]
template["parentSpecimenId"] = self._app.state_machine.specimen["parentSpecimenId"]
# self.parentSpecimenCount += 1
current_pi_id = template["piId"]
current_plan_id = template["planId"]
template["parentSpecimenNumber"] = self.parentSpecimenCount
self._model.appendItem(template)
self.modelInitialized.emit()
@pyqtSlot(int, int, int)
def add_model_item(self, pi_id, plan_id, count):
"""
:return:
"""
items = [x for x in self._model.items if x["piId"] == pi_id and x["planId"] == plan_id]
for i in range(count):
self.parentSpecimenCount += 1
parent_specimen_number = -1
for item in items:
new_item = deepcopy(item)
if parent_specimen_number == -1:
parent_specimen_number = new_item["parentSpecimenNumber"]
if new_item["parentSpecimenNumber"] == parent_specimen_number:
new_item["parentSpecimenNumber"] = self.parentSpecimenCount
new_item["specimenId"] = None
new_item["parentSpecimenId"] = None
new_item["value"] = None
self.model.appendItem(new_item)
else:
break
@pyqtSlot(int, result=bool)
def if_exist_otolith_id(self, otolith_id):
specimen = Specimen.select().where(Specimen.numeric_value == otolith_id)
if specimen.count() > 0:
return True
else:
return False
@pyqtSlot(int)
def upsert_specimen(self, row_index):
"""
Method to perform an insert or replace of a given specimen, if it exists
:paremt row_index: int - index of the row being updated in tvSamples
:param specimen_id:
:return:
"""
if not isinstance(row_index, int) or row_index == -1:
return
logging.info("upserting row: {0}".format(row_index))
try:
if isinstance(row_index, QVariant) or isinstance(row_index, QJSValue):
row_index = row_index.toVariant()
item = self._model.get(row_index)
value = item["value"]
value_type = self._get_value_type(value=value)
special_action = item["specialAction"]
specimen_id = item["specimenId"]
logging.info('specimen_id: ' + str(specimen_id) +
', row_index: ' + str(row_index) +
', item: ' + str(item))
if specimen_id is None:
# Inserting a new record
# logging.info('inserting a record')
# Check if a parent record exists in a neighbor specimen, i.e. a specimen with the same parentSpecimenNumber
parentSpecimenId = -1
parentSpecimenNumber = item["parentSpecimenNumber"]
sibling_specimens = [x for x in self._model.items if x["parentSpecimenNumber"] == parentSpecimenNumber]
for sibling in sibling_specimens:
if sibling["parentSpecimenId"] is not None and sibling["parentSpecimenId"] != "":
parentSpecimenId = sibling["parentSpecimenId"]
break
species_sampling_plan = item["planId"]
if parentSpecimenId == -1:
logging.info('no parent found, inserting one...')
try:
q = Specimen.insert(catch = self._app.state_machine.species["catch_id"],
species_sampling_plan = species_sampling_plan)
q.execute()
parentSpecimenId = Specimen.select().order_by(Specimen.specimen.desc()).get().specimen
except DoesNotExist as ex:
logging.error('error inserting the parent: ' + str(ex))
# Use INSERT OR REPLACE statement, peewee upsert statement
if value_type == "numeric":
q = Specimen.insert(parent_specimen=parentSpecimenId,
catch=self._app.state_machine.species["catch_id"],
species_sampling_plan = species_sampling_plan,
action_type=item["specialActionId"],
numeric_value=item["value"])
elif value_type == "alpha":
q = Specimen.insert(parent_specimen = parentSpecimenId,
catch = self._app.state_machine.species["catch_id"],
species_sampling_plan = species_sampling_plan,
action_type = item["specialActionId"],
alpha_value = item["value"])
q.execute()
new_specimen_id = Specimen.select().order_by(Specimen.specimen.desc()).get().specimen
# Update the model with the new parentSpecimenId and specimenId as appropriate from the database
self._model.setProperty(index=row_index, property="parentSpecimenId", value=parentSpecimenId)
self._model.setProperty(index=row_index, property="specimenId", value=new_specimen_id)
new_item = self._model.get(row_index)
logging.info('inserted a record, new model item: ' + str(new_item))
else:
# Doing an update to an existing specimen record
if value_type == "numeric":
q = Specimen.update(numeric_value=value, alpha_value=None).where(Specimen.specimen == specimen_id)
elif value_type == "alpha":
q = Specimen.update(alpha_value=value, numeric_value=None).where(Specimen.specimen == specimen_id)
q.execute()
# TODO Todd Hay - Move all of the sounds to the SerialPortManager.py > data_received method
# as we should play a sound once a serial port feed is received
# Play the appropriate sound
if item["specialAction"].lower() in ["is sex length sample", "is age weight sample"]:
return
if "coral specimen id" in item["specialAction"].lower():
self._sound_player.play_sound(sound_name="takeBarcode")
elif "sponge specimen id" in item["specialAction"].lower():
self._sound_player.play_sound(sound_name="takeBarcode")
elif "otolith age id" in item["specialAction"].lower():
self._sound_player.play_sound(sound_name="takeBarcode")
elif "tissue id" in item["specialAction"].lower() and \
"sudmant" in item["principalInvestigator"].lower():
self._sound_player.play_sound(sound_name="takeSudmantBarcode")
elif "length" in item["specialAction"].lower():
self._sound_player.play_sound(sound_name="takeLength")
elif "width" in item["specialAction"].lower():
self._sound_player.play_sound(sound_name="takeWidth")
elif "weight" in item["specialAction"].lower():
self._sound_player.play_sound(sound_name="takeWeight")
except Exception as ex:
logging.error("Error updating the special project information: {0}".format(ex))
@pyqtSlot(int)
def delete_specimen(self, specimen_id):
"""
Method to perform an insert or replace of a given specimen, if it exists
:param specimen_id:
:return:
"""
if not isinstance(specimen_id, int):
return
# This should be an individual instance of a specimen, i.e. not a whole fish
try:
logging.info('deleting a record, specimen_id: {0}'.format(specimen_id))
# Delete from the database
specimen = Specimen.select().where(Specimen.specimen == specimen_id).get()
parent_specimen_id = specimen.parent_specimen.specimen
specimen.delete_instance(recursive=True, delete_nullable=True)
# Remove the specimenId from the model
index = self._model.get_item_index(rolename="specimenId", value=specimen_id)
if index != -1:
self._model.setProperty(index=index, property="specimenId", value=None)
parent_specimen_number = self._model.get(index)["parentSpecimenNumber"]
# Delete the parent specimen if no children exist anymore
count = Specimen.select().where(Specimen.parent_specimen == parent_specimen_id).count()
if count == 0:
specimen = Specimen.select().where(Specimen.specimen == parent_specimen_id).get()
specimen.delete_instance(recursive=True, delete_nullable=True)
# Empty the parentSpecimenNumber from the model
if index != -1:
sibling_specimens = [x for x in self._model.items if
x["parentSpecimenNumber"] == parent_specimen_number]
for sibling in sibling_specimens:
sibling["parentSpecimenId"] = None
except DoesNotExist as ex:
logging.info('Error deleting specimen: ' + str(ex)) |
import jax.numpy as jnp
import xarray as xr
from jax_cfd.base.grids import Grid
from jax_cfd.spectral import utils as spectral_utils
from fourierflow.utils import downsample_vorticity_hat, grid_correlation
def test_convert_vorticity_to_velocity_and_back():
path = './data/kolmogorov/re_1000/initial_conditions/test.nc'
ds = xr.open_dataset(path, engine='h5netcdf')
grid = Grid(shape=(2048, 2048),
domain=((0, 2 * jnp.pi), (0, 2 * jnp.pi)))
velocity_solve = spectral_utils.vorticity_to_velocity(grid)
vorticity_1 = ds.isel(sample=0).vorticity
vorticity_hat = jnp.fft.rfftn(vorticity_1.values, axes=(0, 1))
vxhat, vyhat = velocity_solve(vorticity_hat)
vx = jnp.fft.irfftn(vxhat, axes=(0, 1))
vy = jnp.fft.irfftn(vyhat, axes=(0, 1))
x, y = grid.axes()
dx = x[1] - x[0]
dy = y[1] - y[0]
dv_dx = (jnp.roll(vy, shift=-1, axis=0) - vy) / dx
du_dy = (jnp.roll(vx, shift=-1, axis=1) - vx) / dy
vorticity_2 = dv_dx - du_dy
vorticity_2 = xr.DataArray(vorticity_2,
coords={
'x': grid.axes()[0],
'y': grid.axes()[1],
},
dims=('x', 'y'))
rho = grid_correlation(vorticity_1, vorticity_2)
assert rho > 0.9999
def test_repeated_downsampling():
path = './data/kolmogorov/re_1000/initial_conditions/test.nc'
ds = xr.open_dataset(path, engine='h5netcdf')
vorticity_2048 = ds.isel(sample=0).vorticity
vorticity_2048_hat = jnp.fft.rfftn(vorticity_2048.values, axes=(0, 1))
domain = ((0, 2 * jnp.pi), (0, 2 * jnp.pi))
grid_2048 = Grid(shape=(2048, 2048), domain=domain)
grid_1024 = Grid(shape=(1024, 1024), domain=domain)
grid_512 = Grid(shape=(512, 512), domain=domain)
grid_256 = Grid(shape=(256, 256), domain=domain)
grid_128 = Grid(shape=(128, 128), domain=domain)
grid_64 = Grid(shape=(64, 64), domain=domain)
grid_32 = Grid(shape=(32, 32), domain=domain)
velocity_solve_2048 = spectral_utils.vorticity_to_velocity(grid_2048)
# We suffer up to 8% correlation loss when doing repeated downsampling!
grids = [grid_1024, grid_512, grid_256, grid_128, grid_64, grid_32]
ref_rhos = [0.9999999, 0.999, 0.998, 0.99, 0.97, .927]
# Keep halving 2048x2048 grid until we get a 32x32 grid.
grid_prev = grid_2048
vorticity_hat = vorticity_2048_hat
for ref_rho, grid in zip(ref_rhos, grids):
velocity_solve = spectral_utils.vorticity_to_velocity(grid_prev)
vorticity = downsample_vorticity_hat(
vorticity_hat, velocity_solve, grid_prev, grid, True)['vorticity']
# Directly downsample from 2048x2048 grid.
vorticity_direct = downsample_vorticity_hat(
vorticity_2048_hat, velocity_solve_2048, grid_2048, grid, True)['vorticity']
rho = grid_correlation(vorticity_direct, vorticity)
assert rho > ref_rho
# Prepare inputs for next iteration
vorticity_hat = jnp.fft.rfftn(vorticity.values, axes=(0, 1))
grid_prev = grid
|
from __future__ import absolute_import
import numpy as np
import scipy.ndimage as scind
from scipy.linalg import toeplitz
from .cpmorphology import fixup_scipy_ndimage_result as fix
from six.moves import range
def minimum(input, labels, index):
return fix(scind.minimum(input, labels, index))
def maximum(input, labels, index):
return fix(scind.maximum(input, labels, index))
def normalized_per_object(image, labels):
"""Normalize the intensities of each object to the [0, 1] range."""
nobjects = labels.max()
objects = np.arange(nobjects + 1)
lmin, lmax = scind.extrema(image, labels, objects)[:2]
# Divisor is the object's max - min, or 1 if they are the same.
divisor = np.ones((nobjects + 1,))
divisor[lmax > lmin] = (lmax - lmin)[lmax > lmin]
return (image - lmin[labels]) / divisor[labels]
def quantize(image, nlevels):
"""Quantize an image into integers 0, 1, ..., nlevels - 1.
image -- a numpy array of type float, range [0, 1]
nlevels -- an integer
"""
tmp = np.array(image // (1.0 / nlevels), dtype="i1")
return tmp.clip(0, nlevels - 1)
def cooccurrence(quantized_image, labels, scale_i=3, scale_j=0):
"""Calculates co-occurrence matrices for all the objects in the image.
Return an array P of shape (nobjects, nlevels, nlevels) such that
P[o, :, :] is the cooccurence matrix for object o.
quantized_image -- a numpy array of integer type
labels -- a numpy array of integer type
scale -- an integer
For each object O, the cooccurrence matrix is defined as follows.
Given a row number I in the matrix, let A be the set of pixels in
O with gray level I, excluding pixels in the rightmost S
columns of the image. Let B be the set of pixels in O that are S
pixels to the right of a pixel in A. Row I of the cooccurence
matrix is the gray-level histogram of the pixels in B.
"""
labels = labels.astype(int)
nlevels = quantized_image.max() + 1
nobjects = labels.max()
if scale_i < 0:
scale_i = -scale_i
scale_j = -scale_j
if scale_i == 0 and scale_j > 0:
image_a = quantized_image[:, :-scale_j]
image_b = quantized_image[:, scale_j:]
labels_ab = labels_a = labels[:, :-scale_j]
labels_b = labels[:, scale_j:]
elif scale_i > 0 and scale_j == 0:
image_a = quantized_image[:-scale_i, :]
image_b = quantized_image[scale_i:, :]
labels_ab = labels_a = labels[:-scale_i, :]
labels_b = labels[scale_i:, :]
elif scale_i > 0 and scale_j > 0:
image_a = quantized_image[:-scale_i, :-scale_j]
image_b = quantized_image[scale_i:, scale_j:]
labels_ab = labels_a = labels[:-scale_i, :-scale_j]
labels_b = labels[scale_i:, scale_j:]
else:
# scale_j should be negative
image_a = quantized_image[:-scale_i, -scale_j:]
image_b = quantized_image[scale_i:, :scale_j]
labels_ab = labels_a = labels[:-scale_i, -scale_j:]
labels_b = labels[scale_i:, :scale_j]
equilabel = (labels_a == labels_b) & (labels_a > 0)
if np.any(equilabel):
Q = (
nlevels * nlevels * (labels_ab[equilabel] - 1)
+ nlevels * image_a[equilabel]
+ image_b[equilabel]
)
R = np.bincount(Q)
if R.size != nobjects * nlevels * nlevels:
S = np.zeros(nobjects * nlevels * nlevels - R.size)
R = np.hstack((R, S))
P = R.reshape(nobjects, nlevels, nlevels)
pixel_count = fix(
scind.sum(equilabel, labels_ab, np.arange(nobjects, dtype=np.int32) + 1)
)
pixel_count = np.tile(
pixel_count[:, np.newaxis, np.newaxis], (1, nlevels, nlevels)
)
return (P.astype(float) / pixel_count.astype(float), nlevels)
else:
return np.zeros((nobjects, nlevels, nlevels)), nlevels
class Haralick(object):
"""
Calculate the Haralick texture features.
Currently, the implementation uses nevels = 8 different grey
levels.
The original reference is: Haralick et al. (1973), Textural
Features for Image Classification, _IEEE Transaction on Systems
Man, Cybernetics_, SMC-3(6):610-621. BEWARE: There are lots of
erroneous formulas for the Haralick features in the
literature. There is also an error in the original paper.
"""
def __init__(self, image, labels, scale_i, scale_j, nlevels=8, mask=None):
"""
image -- 2-D numpy array of 32-bit floating-point numbers.
labels -- 2-D numpy array of integers.
scale -- an integer.
nlevels -- an integer
"""
if mask is not None:
labels = labels.copy()
labels[~mask] = 0
normalized = normalized_per_object(image, labels)
quantized = quantize(normalized, nlevels)
self.P, nlevels = cooccurrence(quantized, labels, scale_i, scale_j)
self.nobjects = labels.max()
px = self.P.sum(2) # nobjects x nlevels
py = self.P.sum(1) # nobjects x nlevels
#
# Normalize px and py to deal with roundoff errors in sums
#
px = px / np.sum(px, 1)[:, np.newaxis]
py = py / np.sum(py, 1)[:, np.newaxis]
self.nlevels = nlevels
self.levels = np.arange(nlevels)
self.rlevels = np.tile(self.levels, (self.nobjects, 1))
self.levels2 = np.arange(2 * nlevels - 1)
self.rlevels2 = np.tile(self.levels2, (self.nobjects, 1))
self.mux = ((self.rlevels + 1) * px).sum(1)
mux = np.tile(self.mux, (nlevels, 1)).transpose()
self.muy = ((self.rlevels + 1) * py).sum(1)
muy = np.tile(self.muy, (nlevels, 1)).transpose()
self.sigmax = np.sqrt(((self.rlevels + 1 - mux) ** 2 * px).sum(1))
self.sigmay = np.sqrt(((self.rlevels + 1 - muy) ** 2 * py).sum(1))
eps = np.finfo(float).eps
self.hx = -(px * np.log(px + eps)).sum(1)
self.hy = -(py * np.log(py + eps)).sum(1)
pxpy = np.array(
[
np.dot(px[i, :, np.newaxis], py[i, np.newaxis])
for i in range(self.nobjects)
]
).reshape(self.P.shape)
self.hxy1 = -(self.P * np.log(pxpy + eps)).sum(2).sum(1)
self.hxy2 = -(pxpy * np.log(pxpy + eps)).sum(2).sum(1)
self.eps = eps
self.p_xplusy = np.zeros((self.nobjects, 2 * nlevels - 1))
self.p_xminusy = np.zeros((self.nobjects, nlevels))
for x in self.levels:
for y in self.levels:
self.p_xplusy[:, x + y] += self.P[:, x, y]
self.p_xminusy[:, np.abs(x - y)] += self.P[:, x, y]
# The remaining methods are for computing all the Haralick
# features. Each methods returns a vector of length nobjects.
def H1(self):
"Angular second moment."
return (self.P ** 2).sum(2).sum(1)
def H2(self):
"Contrast."
return (self.rlevels ** 2 * self.p_xminusy).sum(1)
def H3(self):
"Correlation."
multiplied = np.dot(self.levels[:, np.newaxis] + 1, self.levels[np.newaxis] + 1)
repeated = np.tile(multiplied[np.newaxis], (self.nobjects, 1, 1))
summed = (repeated * self.P).sum(2).sum(1)
h3 = (summed - self.mux * self.muy) / (self.sigmax * self.sigmay)
h3[np.isinf(h3)] = 0
return h3
def H4(self):
"Sum of squares: variation."
return self.sigmax ** 2
def H5(self):
"Inverse difference moment."
t = 1 + toeplitz(self.levels) ** 2
repeated = np.tile(t[np.newaxis], (self.nobjects, 1, 1))
return (1.0 / repeated * self.P).sum(2).sum(1)
def H6(self):
"Sum average."
if not hasattr(self, "_H6"):
self._H6 = ((self.rlevels2 + 2) * self.p_xplusy).sum(1)
return self._H6
def H7(self):
"Sum variance (error in Haralick's original paper here)."
h6 = np.tile(self.H6(), (self.rlevels2.shape[1], 1)).transpose()
return (((self.rlevels2 + 2) - h6) ** 2 * self.p_xplusy).sum(1)
def H8(self):
"Sum entropy."
return -(self.p_xplusy * np.log(self.p_xplusy + self.eps)).sum(1)
def H9(self):
"Entropy."
if not hasattr(self, "_H9"):
self._H9 = -(self.P * np.log(self.P + self.eps)).sum(2).sum(1)
return self._H9
def H10(self):
"Difference variance."
c = (self.rlevels * self.p_xminusy).sum(1)
c1 = np.tile(c, (self.nlevels, 1)).transpose()
e = self.rlevels - c1
return (self.p_xminusy * e ** 2).sum(1)
def H11(self):
"Difference entropy."
return -(self.p_xminusy * np.log(self.p_xminusy + self.eps)).sum(1)
def H12(self):
"Information measure of correlation 1."
maxima = np.vstack((self.hx, self.hy)).max(0)
return (self.H9() - self.hxy1) / maxima
def H13(self):
"Information measure of correlation 2."
# An imaginary result has been encountered once in the Matlab
# version. The reason is unclear.
return np.sqrt(1 - np.exp(-2 * (self.hxy2 - self.H9())))
# There is a H14, max correlation coefficient, but we don't currently
# use it.
def all(self):
return [
self.H1(),
self.H2(),
self.H3(),
self.H4(),
self.H5(),
self.H6(),
self.H7(),
self.H8(),
self.H9(),
self.H10(),
self.H11(),
self.H12(),
self.H13(),
]
|
#
# @lc app=leetcode id=1446 lang=python3
#
# [1446] Consecutive Characters
#
# @lc code=start
class Solution:
def maxPower(self, s: str) -> int:
maxLen = curLen = 0; c = ''
for ch in s:
if ch == c:
curLen += 1
else:
c = ch; curLen = 1
if curLen > maxLen:
maxLen = curLen
return maxLen
# @lc code=end
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Silvio Peroni <[email protected]>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
from __future__ import annotations
import codecs
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import List, Optional
class Reporter(object):
"""This class is used as a metaphoric agent being a reporter"""
def __init__(self, print_sentences: bool = True, prefix: str = "") -> None:
self.articles: List[List[str]] = []
self.last_article: Optional[List[str]] = None
self.last_sentence: Optional[str] = None
self.print_sentences: bool = print_sentences
self.prefix: str = prefix
def new_article(self) -> None:
if self.last_article is None or len(self.last_article) > 0:
self.last_article = []
self.last_sentence = None
self.articles.append(self.last_article)
if self.print_sentences and len(self.last_article) > 0:
print("\n")
def add_sentence(self, sentence: str, print_this_sentence: bool = True) -> None:
cur_sentence: str = self.prefix + sentence
self.last_sentence = cur_sentence
self.last_article.append(cur_sentence)
if self.print_sentences and print_this_sentence:
print(cur_sentence)
def get_last_sentence(self) -> Optional[str]:
return self.last_sentence
def get_articles_as_string(self) -> str:
result: str = ""
for article in self.articles:
for sentence in article:
result += sentence + "\n"
result += "\n"
return result
def write_file(self, file_path) -> None:
with codecs.open(file_path, 'w', encoding='utf-8') as f:
f.write(self.get_articles_as_string())
def is_empty(self) -> bool:
return self.last_sentence is None
|
import os
import sys
sys.path.insert(0,'..')
from citylearn import CityLearn
from pathlib import Path
import numpy as np
import torch
import matplotlib.pyplot as plt
from agents.sac import SAC
import gym
from stable_baselines3 import A2C
from stable_baselines3 import TD3
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.noise import NormalActionNoise
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3 import DQN
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3 import TD3
from stable_baselines3.common import results_plotter
from stable_baselines3.common.results_plotter import load_results, ts2xy, plot_results
##################################################################################################################
# Load environment
climate_zone = 5
data_path = Path("data/Climate_Zone_"+str(climate_zone))
sim_period = (0, 8760) #1 year
building_ids = ["Building_"+str(i) for i in [1]]
params = {'data_path':data_path,
'building_attributes':'building_attributes.json',
'weather_file':'weather_data.csv',
'solar_profile':'solar_generation_1kW.csv',
'carbon_intensity':'carbon_intensity.csv',
'building_ids':building_ids,
'buildings_states_actions':'buildings_state_action_space.json',
'simulation_period': sim_period,
'cost_function': ['ramping','1-load_factor','average_daily_peak','peak_demand','net_electricity_consumption','carbon_emissions'],
'central_agent': True, #à choisir en fonction de si on veut single or MARL
'save_memory': False }
# Contain the lower and upper bounds of the states and actions, to be provided to the agent to normalize the variables between 0 and 1.
env = CityLearn(**params)
#############################################################################################################
# The noise objects for TD3
n_actions = env.action_space.shape[-1]
action_noise = NormalActionNoise(mean=np.zeros(n_actions), sigma=0.1 * np.ones(n_actions))
model = TD3("MlpPolicy", env, action_noise=action_noise, verbose=1)
model.learn(total_timesteps=8760*2, log_interval=10)
model.save("td3_pendulum")
env = model.get_env()
del model # remove to demonstrate saving and loading
model = TD3.load("td3_pendulum")
obs = env.reset()
dones=False
list_rewards=[]
while not dones:
action, _states = model.predict(obs)
obs, rewards, dones, info = env.step(action)
list_rewards.append(rewards)
# plt.plot(np.arange(8760),list_rewards)
# plt.show()
mean_100ep_reward = round(np.mean(list_rewards[-100:]), 1)
print("Mean reward 100 derniers j:", mean_100ep_reward, "Num episodes:", len(list_rewards))
# # print(list_rewards[0:5])
print(env.cost()) |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.transact import transact
def test_transact():
"""Test module transact.py by downloading
transact.csv and testing shape of
extracted data has 261 rows and 3 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = transact(test_path)
try:
assert x_train.shape == (261, 3)
except:
shutil.rmtree(test_path)
raise()
|
# -*- coding:utf-8 -*-
import json
try:
JSONDecodeError = json.decoder.JSONDecodeError
except AttributeError:
JSONDecodeError = ValueError
class LoadCaseError(BaseException):
"""
组装用例时发生错误
"""
pass
class RunCaseError(BaseException):
"""
执行用例时发生错误
"""
pass
class NoSuchElementError(BaseException):
"""
元素路径错误:
"""
|
# -*- coding: utf-8 -*-
#######
# actinia-core - an open source REST API for scalable, distributed, high
# performance processing of geographical data that uses GRASS GIS for
# computational tasks. For details, see https://actinia.mundialis.de/
#
# Copyright (c) 2016-2018 Sören Gebbert and mundialis GmbH & Co. KG
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
#######
"""
Module management
* List all modules
* Describe single module
"""
from flask import jsonify, make_response
from flask_restful_swagger_2 import swagger
from actinia_core.resources.resource_base import ResourceBase
from actinia_gdi.apidocs import gmodules
from actinia_gdi.core.gmodulesGrass import createModuleList, createGrassModule
from actinia_gdi.model.gmodules import ModuleList
from actinia_gdi.model.responseModels import SimpleStatusCodeResponseModel
__license__ = "GPLv3"
__author__ = "Anika Bettge, Carmen Tawalika"
__copyright__ = "Copyright 2019, mundialis"
__maintainer__ = "Anika Bettge, Carmen Tawalika"
class ListModules(ResourceBase):
"""List all GRASS modules
"""
@swagger.doc(gmodules.listModules_get_docs)
def get(self):
"""Get a list of all GRASS GIS modules.
"""
module_list = createModuleList(self)
return make_response(jsonify(ModuleList(
status="success",
processes=module_list)), 200)
class DescribeModule(ResourceBase):
""" Definition for endpoint @app.route('grassmodules/<grassmodule>') to
desctibe one module
Contains HTTP GET endpoint
Contains swagger documentation
"""
@swagger.doc(gmodules.describeGrassModule_get_docs)
def get(self, grassmodule):
"""Describe a GRASS GIS module.
"""
try:
grass_module = createGrassModule(self, grassmodule)
return make_response(jsonify(grass_module), 200)
except Exception:
res = (jsonify(SimpleStatusCodeResponseModel(
status=404,
message='Error looking for module "' + grassmodule + '".'
)))
return make_response(res, 404)
|
for i in range(1, 10, 2):
print(i)
a = ['Mary', 'had', 'a', 'little', 'lamb']
for i in range(len(a)):
print(i, a[i])
for s in a:
print(s) |
from neo4japp.models.common import NEO4JBase
class GraphNode(NEO4JBase):
def __init__(self, id, label, domain_labels, data, sub_labels, display_name, url):
self.id = id
self.label = label
self.domain_labels = domain_labels
self.data = data
self.sub_labels = sub_labels
self.display_name = display_name
self.entity_url = url
class GraphRelationship(NEO4JBase):
def __init__(self, id, label, data, to, _from, to_label, from_label):
self.id = id
self.label = label
self.data = data
self.to = to
self._from = _from
self.to_label = to_label
self.from_label = from_label
@classmethod
def from_dict(cls, d):
copy = d.copy()
copy['_from'] = copy['from']
del copy['from']
return super().from_dict(copy)
def to_dict(self):
copy = super().to_dict().copy()
copy['from'] = copy['From']
del copy['From']
return copy
|
from typing import (
Any,
Awaitable,
Callable,
Sequence,
)
import trio
async def wait_first(callables: Sequence[Callable[[], Awaitable[Any]]]) -> None:
"""
Run any number of tasks but cancel out any outstanding tasks as soon as the first one finishes.
"""
async with trio.open_nursery() as nursery:
for task in callables:
async def _run_then_cancel() -> None:
await task()
nursery.cancel_scope.cancel()
nursery.start_soon(_run_then_cancel)
|
"""binary_switch for Homematic(IP) Local."""
from __future__ import annotations
import logging
from typing import Any, Union
from hahomematic.const import HmPlatform
from hahomematic.devices.switch import CeSwitch
from hahomematic.platforms.switch import HmSwitch
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DOMAIN
from .control_unit import ControlUnit
from .generic_entity import HaHomematicGenericEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Homematic(IP) Local switch platform."""
control_unit: ControlUnit = hass.data[DOMAIN][config_entry.entry_id]
@callback
def async_add_switch(args: Any) -> None:
"""Add switch from HAHM."""
entities: list[HaHomematicGenericEntity] = []
for hm_entity in args[0]:
entities.append(HaHomematicSwitch(control_unit, hm_entity))
if entities:
async_add_entities(entities)
config_entry.async_on_unload(
async_dispatcher_connect(
hass,
control_unit.async_signal_new_hm_entity(
config_entry.entry_id, HmPlatform.SWITCH
),
async_add_switch,
)
)
async_add_switch(
[control_unit.async_get_new_hm_entities_by_platform(HmPlatform.SWITCH)]
)
class HaHomematicSwitch(
HaHomematicGenericEntity[Union[CeSwitch, HmSwitch]], SwitchEntity
):
"""Representation of the HomematicIP switch entity."""
@property
def is_on(self) -> bool:
"""Return true if switch is on."""
return self._hm_entity.value is True
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the switch on."""
await self._hm_entity.turn_on()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the switch off."""
await self._hm_entity.turn_off()
|
"""
Data type conversion of different files
"""
import csv
import json
import yaml
import codecs
from itertools import islice
try:
from openpyxl import load_workbook
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install the library. https://pypi.org/project/openpyxl/")
def _check_data(list_data: list) -> list:
"""
Checking test data format.
:param list_data:
:return:
"""
if isinstance(list_data, list) is False:
raise TypeError("The data format is not `list`.")
if len(list_data) == 0:
raise ValueError("The data format cannot be `[]`.")
if isinstance(list_data[0], dict):
test_data = []
for data in list_data:
line = []
for d in data.values():
line.append(d)
test_data.append(line)
return test_data
else:
return list_data
def csv_to_list(file=None, line=1):
"""
Convert CSV file data to list
:param file: Path to file
:param line: Start line of read data
:return: list data
@data("data.csv", line=1)
def test_login(self, username, password):
print(username)
print(password)
"""
if file is None:
raise FileExistsError("Please specify the CSV file to convert.")
table_data = []
csv_data = csv.reader(codecs.open(file, 'r', 'utf_8_sig'))
for line in islice(csv_data, line - 1, None):
table_data.append(line)
return table_data
def excel_to_list(file=None, sheet="Sheet1", line=1):
"""
Convert Excel file data to list
:param file: Path to file
:param sheet: excel sheet, default name is Sheet1
:param line: Start line of read data
:return: list data
@data("data.xlsx", sheet="Sheet1", line=1)
def test_login(self, username, password):
print(username)
print(password)
"""
if file is None:
raise FileExistsError("Please specify the Excel file to convert.")
excel_table = load_workbook(file)
sheet = excel_table[sheet]
table_data = []
for line in sheet.iter_rows(line, sheet.max_row):
line_data = []
for field in line:
line_data.append(field.value)
table_data.append(line_data)
return table_data
def json_to_list(file, key=None):
"""
Convert JSON file data to list
:param file: Path to file
:param key: Specifies the key for the dictionary
:return: list data
@data("data.json", key="login")
def test_login(self, username, password):
print(username)
print(password)
"""
if file is None:
raise FileExistsError("Please specify the JSON file to convert.")
if key is None:
with open(file, "r", encoding="utf-8") as f:
data = json.load(f)
list_data = _check_data(data)
else:
with open(file, "r", encoding="utf-8") as f:
try:
data = json.load(f)[key]
list_data = _check_data(data)
except KeyError:
raise ValueError("Check the test data, no '{}'".format(key))
return list_data
def yaml_to_list(file, key=None):
"""
Convert JSON file data to list
:param file: Path to file
:param key: Specifies the key for the dictionary
:return: list data
@data("data.yaml", key="login")
def test_login(self, username, password):
print(username)
print(password)
"""
if file is None:
raise FileExistsError("Please specify the YAML file to convert.")
if key is None:
with open(file, "r", encoding="utf-8") as f:
data = yaml.load(f, Loader=yaml.FullLoader)
list_data = _check_data(data)
else:
with open(file, "r", encoding="utf-8") as f:
try:
data = yaml.load(f, Loader=yaml.FullLoader)[key]
list_data = _check_data(data)
except KeyError:
raise ValueError("Check the test data, no '{}'".format(key))
return list_data
|
class PersistentObject(object):
def __init__(self, **kwargs):
fields = super().__getattribute__('fields')
d = {}
for k in fields:
if not k in kwargs:
continue
d[k] = kwargs[k]
super().__setattr__('values', d)
def __getattr__(self, name):
if name not in super().__getattribute__('values'):
raise AttributeError()
return self.__getattribute__('values')[name]
def __setattr__(self, name, value):
if name not in self.__getattribute__('values'):
raise AttributeError()
self.__getattribute__('values')[name] = value
def to_dict(self):
return self.__getattribute__('values').copy()
def make_persistent_object(name, fields):
def ctor(self, **kwargs):
PersistentObject.__init__(self, **kwargs)
return type(
name,
(PersistentObject, ),
{
'fields': tuple(fields),
'__init__': ctor
}
)
|
# coding=utf-8
import tensorflow as tf
from base.base_model import BaseModel
class TemplateModel(BaseModel):
"""
Concrete model template.
"""
def __init__(self, config, data):
super(TemplateModel, self).__init__(config, data)
self.build_model()
self.init_saver()
def build_model(self):
"""
Here you build the tf.Graph of any model you want and also define the loss.
"""
pass
|
class Solution:
def jobScheduling(self, startTime: List[int], endTime: List[int], profit: List[int]) -> int:
jobs = sorted(list(zip(startTime, endTime, profit)))
sortedStartedTime, N = [data[0] for data in jobs], len(jobs)
dp = [0] * (N + 1)
for k in range(N - 1, -1, -1):
index = bisect.bisect_left(sortedStartedTime, jobs[k][1])
dp[k] = max(jobs[k][2] + dp[index], dp[k + 1])
return dp[0] |
from bitmovin.resources.models import AbstractModel
class SmoothContentProtection(AbstractModel):
def __init__(self, encoding_id, muxing_id, drm_id, id_=None, custom_data=None):
super().__init__(id_=id_, custom_data=custom_data)
self.encodingId = encoding_id
self.muxingId = muxing_id
self.drmId = drm_id
@classmethod
def parse_from_json_object(cls, json_object):
id_ = json_object.get('id')
custom_data = json_object.get('customData')
encoding_id = json_object.get('encodingId')
muxing_id = json_object.get('muxingId')
drm_id = json_object.get('drmId')
content_protection = SmoothContentProtection(id_=id_,
custom_data=custom_data,
encoding_id=encoding_id,
muxing_id=muxing_id,
drm_id=drm_id)
return content_protection
|
# -*- coding: UTF-8 -*-
"""
Created on ???
@author: Cleber Camilo
"""
import os
from mpbuild import MultiProcBuilder
class CompilerBuilder :
def __init__(self, path, file, multiproc):
self.path = path
self.file = file
self.openFile = None
self.multiproc = multiproc
self.builder = None
def compile(self, paramExtr = ""):
os.chdir(self.path)
if self.multiproc:
self.builder = MultiProcBuilder(self.file, paramExtr)
self.builder.start()
else:
cmd = "make -f " + self.file + " " + paramExtr
self.openFile = os.popen(cmd,'r')
def readMsg(self):
if self.multiproc:
return self.builder.readMsg()
elif not self.openFile.closed:
return self.openFile.readline()
else: return ""
def close(self):
if self.multiproc:
self.builder.group.stop_all()
return self.builder.getResult()
while True:
try:
return self.openFile.close()
except:
pass
|
#!/usr/bin/env python3
from typing import List
def find_numbers_with_sum(numbers: List[int], sum: int) -> (int, int):
"""Find two numbers in the ordered list with the specified sum.
Arguments:
numbers -- the ordered list of numbers to search through
sum -- the sum that two numbers should add up to
This is a relatively simple algorithm to find two numbers (if they exist)
with a specific sum. Provided the list is in order, we can start by adding
the two numbers at the end of the list and comparing it to the desired sum.
If both numbers add up to more than the desired sum, then we can get a smaller sum
by choosing the second-largest number instead of the largest one.
Similarly, if both numbers add up to less than the desired sum, then we can get a
larger sum by choosing the second-smallest number instead of the smallest one. That
is repeated until two numbers with the specified sum are found or until the list is
exhausted.
By "closing in" on the range of numbers, we don't end up trying every possible pair
of numbers in the list, with a complexity of O(n^2). This algorithm runs with a
complexity of O(n) instead*. (* = it's been a while since I've done Big-O
notation, feel free to correct me with a pull request)
"""
start_num = 0
end_num = len(numbers) - 1
for i in range(end_num):
if (numbers[start_num] + numbers[end_num]) > sum:
end_num -= 1
elif (numbers[start_num] + numbers[end_num]) < sum:
start_num += 1
elif (numbers[start_num] + numbers[end_num]) == sum:
return (numbers[start_num], numbers[end_num])
raise RuntimeError("No two numbers sum to {}".format(sum))
def find_three_numbers_with_sum(numbers: List[int], sum: int) -> (int, int):
"""Find three numbers in the ordered list with the specified sum.
Arguments:
numbers -- the ordered list of numbers to search through
sum -- the sum that three numbers should add up to
This is a relatively simple algorithm to find three numbers (if they exist)
with a specific sum. Provided the list is in order, we split the list at the
last number, and use algorithm from find_two_numbers_with_sum() to find two
numbers that, when added to the last, add up to the desired sum (the two numbers
would just sum to [sum - last_number]).
Similar to the algorithm from find_two_numbers_with_sum(), the algorithm works by
"closing in" on the range of possible numbers. If no numbers add up to the desired
sum, split the list at the second-to-last number, run the algorithm from
find_two_numbers_with_sum on the first slice again. That is repeated until the three
numbers with the specified sum are found or until the list is exhausted.
By "closing in" on the range of numbers, we don't end up trying every possible pair
of numbers in the list, with a complexity of O(n^3). This algorithm runs with a
complexity of O(n^2) instead*. (* = it's been a while since I've done Big-O
notation, feel free to correct me with a pull request)
"""
end_num = len(numbers) - 1
for i in range(len(numbers)):
inner_start_num = 0
inner_end_num = end_num - 1
for i in range(inner_end_num):
if (numbers[inner_start_num] + numbers[inner_end_num] + numbers[end_num]) > sum:
inner_end_num -= 1
elif (numbers[inner_start_num] + numbers[inner_end_num] + numbers[end_num]) < sum:
inner_start_num += 1
elif (numbers[inner_start_num] + numbers[inner_end_num] + numbers[end_num]) == sum:
return (numbers[inner_start_num], numbers[inner_end_num], numbers[end_num])
end_num -= 1
raise RuntimeError("No three numbers sum to {}".format(sum))
if __name__ == '__main__':
with open('input', 'r') as input_file:
numbers = sorted(int(num) for num in input_file.read().splitlines())
# Part one - Find two numbers that sum to 2020
numberone, numbertwo = find_numbers_with_sum(numbers, 2020)
print(f"Answer to part one: {numberone * numbertwo}")
# Part two - Find three numbers that sum to 2020
numberone, numbertwo, numberthree = find_three_numbers_with_sum(numbers, 2020)
print(f"Answer to part two: {numberone * numbertwo * numberthree}")
|
import argparse
from pathlib import Path
import json
from typing import Iterable
import tempfile
import random
from allennlp.models import Model
from sklearn.model_selection import train_test_split
import target_extraction
from target_extraction.data_types import TargetTextCollection
from target_extraction.dataset_parsers import semeval_2014, wang_2017_election_twitter_test, wang_2017_election_twitter_train
from target_extraction.tokenizers import spacy_tokenizer, ark_twokenize
from target_extraction.allen import AllenNLPModel
def parse_path(path_string: str) -> Path:
path_string = Path(path_string).resolve()
return path_string
def text_to_json(text_fp: Path) -> Iterable[str]:
with text_fp.open('r') as text_file:
for line in text_file:
line = line.strip()
if line:
tokens = line.split()
yield {'text': line, 'tokens': tokens}
def predict_on_file(input_fp: Path, output_fp: Path, model: Model, batch_size: int) -> None:
first = True
output_fp.parent.mkdir(parents=True, exist_ok=True)
with output_fp.open('w+') as output_data_file:
for prediction in model.predict_sequences(text_to_json(input_fp),
batch_size=batch_size):
prediction_str = json.dumps(prediction)
if first:
first = False
else:
prediction_str = f'\n{prediction_str}'
output_data_file.write(prediction_str)
if __name__ == '__main__':
cuda_help = 'If loading the model from a pre-trained model whether that '\
'model should be loaded on to the GPU or not'
parser = argparse.ArgumentParser()
parser.add_argument("--train_fp", type=parse_path,
help='File path to the train data')
parser.add_argument("--test_fp", type=parse_path,
help='File path to the test data')
parser.add_argument("--number_to_predict_on", type=int,
help='Sub sample the data until this number of samples are left')
parser.add_argument("--batch_size", type=int, default=64,
help='Batch size. Higher this is the more memory you need')
parser.add_argument('--cuda', action="store_true", help=cuda_help)
parser.add_argument('dataset_name', type=str,
choices=['semeval_2014', 'election_twitter'],
help='dataset that is to be trained and predicted')
parser.add_argument('model_config', type=parse_path,
help='File Path to the Model configuration file')
parser.add_argument('model_save_dir', type=parse_path,
help='Directory to save the trained model')
parser.add_argument('data_fp', type=parse_path,
help='File Path to the data to predict on')
parser.add_argument('output_data_fp', type=parse_path,
help='File Path to the output predictions')
args = parser.parse_args()
dataset_name = args.dataset_name
model_name = f'{dataset_name} model'
model = AllenNLPModel(model_name, args.model_config, 'target-tagger',
args.model_save_dir)
if dataset_name == 'semeval_2014':
if not args.train_fp or not args.test_fp:
raise ValueError('If training and predicting for the SemEval '
'datasets the training and test file paths must '
'be given')
# As we are performing target extraction we use the conflict polarity
# targets like prior work
train_data = semeval_2014(args.train_fp, conflict=True)
test_data = semeval_2014(args.test_fp, conflict=True)
else:
temp_election_directory = Path('.', 'data', 'twitter_election_dataset')
train_data = wang_2017_election_twitter_train(temp_election_directory)
test_data = wang_2017_election_twitter_test(temp_election_directory)
if not args.model_save_dir.is_dir():
# Use the same size validation as the test data
test_size = len(test_data)
# Create the train and validation splits
train_data = list(train_data.values())
train_data, val_data = train_test_split(train_data, test_size=test_size)
train_data = TargetTextCollection(train_data)
val_data = TargetTextCollection(val_data)
# Tokenize the data
datasets = [train_data, val_data, test_data]
tokenizer = spacy_tokenizer()
sizes = []
target_sizes = []
for dataset in datasets:
dataset.tokenize(tokenizer)
returned_errors = dataset.sequence_labels(return_errors=True)
if returned_errors:
for error in returned_errors:
error_id = error['text_id']
del dataset[error_id]
returned_errors = dataset.sequence_labels(return_errors=True)
if returned_errors:
raise ValueError('Sequence label errors are still persisting')
sizes.append(len(dataset))
dataset: TargetTextCollection
target_sizes.append(dataset.number_targets())
print(f'Lengths Train: {sizes[0]}, Validation: {sizes[1]}, Test: {sizes[2]}')
print(f'Number of targets, Train: {target_sizes[0]}, Validation: '
f'{target_sizes[1]}, Test: {target_sizes[2]}')
print('Fitting model')
model.fit(train_data, val_data, test_data)
print('Finished fitting model\nNow Evaluating model:')
else:
test_data.tokenize(spacy_tokenizer())
device = -1
if args.cuda:
device = 0
model.load(cuda_device=device)
print('Finished loading model\nNow Evaluating model:')
for data in test_data.values():
data['tokens'] = data['tokenized_text']
test_iter = iter(test_data.values())
for test_pred in model.predict_sequences(test_data.values(), batch_size=args.batch_size):
relevant_test = next(test_iter)
relevant_test['predicted_sequence_labels'] = test_pred['sequence_labels']
test_scores = test_data.exact_match_score('predicted_sequence_labels')
print(f'Test F1 scores: {test_scores[2]}')
first = True
data_fp = args.data_fp
from time import time
t = time()
if args.number_to_predict_on:
data_count = 0
with data_fp.open('r') as data_file:
for line in data_file:
data_count += 1
if data_count <= args.number_to_predict_on:
raise ValueError(f'Number of lines in the data file {data_count} '
'to predict on is less than or equal to the number'
f' of lines to sub-sample {args.number_to_predict_on}')
lines_numbers_to_subsample = random.sample(range(data_count),
k=args.number_to_predict_on)
lines_numbers_to_subsample = set(lines_numbers_to_subsample)
with tempfile.TemporaryDirectory() as temp_dir:
temp_fp = Path(temp_dir, 'temp_input_file.txt')
with temp_fp.open('w+') as temp_file:
with data_fp.open('r') as data_file:
for index, line in enumerate(data_file):
if index in lines_numbers_to_subsample:
temp_file.write(line)
print(f'subsampled data {args.number_to_predict_on} lines')
predict_on_file(temp_fp, args.output_data_fp, model, args.batch_size)
else:
predict_on_file(data_fp, args.output_data_fp, model, args.batch_size)
print(f'Done took {time() - t}')
|
from mqtt_sn_gateway import messages
class TestPuback:
def test_parse(self):
data = b'\x07\r\x00\x01q\xf6\x00'
msg = messages.Puback.from_bytes(data)
assert msg.length == len(data)
assert msg.msg_type == messages.MessageType.PUBACK
assert msg.return_code == messages.ReturnCode.ACCEPTED
assert msg.msg_id == b"q\xf6"
assert msg.topic_id == 1
def test_to_bytes(self):
msg = messages.Puback(
topic_id=1, msg_id=b"q\xf6", return_code=messages.ReturnCode.ACCEPTED
)
assert msg.to_bytes() == b'\x07\r\x00\x01q\xf6\x00'
|
'''
Copyright 2022 Leonardo Cabral
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
## NullControl - Does nothing
## Define TeCoLab variable class
class Controller:
def __init__(self, T = 1):
self.T = T # Sampling period [x100 ms]
self.counter = 0
self.lastControlAction = (0, 0, 0)
# When programming a new controller, change only the following method
def controlAction(self, setPoints, temperatures):
# Control algorithm input variables
sp1_abs = setPoints[0]
sp1_rel = setPoints[1]
sp2_abs = setPoints[2]
sp2_rel = setPoints[3]
tem_Amb = temperatures[0]
tem_He1 = temperatures[1]
tem_He2 = temperatures[2]
# Control algorithm output variables
H1PWM = 0
H2PWM = 0
CoolerPWM = 0
# Control algorithm computation
# No computation for this controller
# Control algorithm return
return H1PWM, H2PWM, CoolerPWM
# When programming a new controller, do not change the following methods
def _control(self, setPoints, temperatures):
self.counter += 1
if self.counter >= 0:
self._resetCounter()
self.lastControlAction = self.controlAction(setPoints, temperatures)
return self.lastControlAction, +1 # +1 represents new control action
else:
return self.lastControlAction, 0 # 0 represents no new control action
def _resetCounter(self):
self.counter = -self.T |
import argparse
import logging
import importlib
import os
import sys
from . import (
collect_pread_gfa, collect_contig_gfa,
gen_gfa_v1,
gen_gfa_v2,
gen_bandage_csv,
)
LOG = logging.getLogger()
"""
# Collect all info needed to format the GFA-1 and GFA-2 representations of
# the assembly graphs.
time python3 -m falcon_kit.mains.collect_pread_gfa >| asm.gfa.json
time python3 -m falcon_kit.mains.collect_pread_gfa --add-string-graph >| sg.gfa.json
time python3 -m falcon_kit.mains.collect_contig_gfa >| contig.gfa.json
# Output the assembly pread graph.
time python3 -m falcon_kit.mains.gen_gfa_v1 asm.gfa.json >| asm.gfa
time python3 -m falcon_kit.mains.gen_gfa_v2 asm.gfa.json >| asm.gfa2
time python3 -m falcon_kit.mains.gen_bandage_csv asm.gfa.json >| asm.csv
# Output the string graph.
time python3 -m falcon_kit.mains.gen_gfa_v1 sg.gfa.json >| sg.gfa
time python3 -m falcon_kit.mains.gen_gfa_v2 sg.gfa.json >| sg.gfa2
time python3 -m falcon_kit.mains.gen_bandage_csv sg.gfa.json >| sg.csv
# Output the contig graph with associate contigs attached to each primary contig.
time python3 -m falcon_kit.mains.gen_gfa_v2 contig.gfa.json >| contig.gfa2
"""
def run_line(line):
parts = line.split()
while parts:
top = parts.pop(0)
# if top == "time": We could time it here! TODO, maybe.
if not top.startswith('python'):
LOG.info("Skipping '{}' in '{}'".format(top, line))
else:
break
else:
raise Exception("No 'python' executable name found.")
if not parts or not parts[0] == "-m":
raise Exception("Expected line of form '... python3 -m ...'")
parts.pop(0)
if not parts or 'falcon_kit.mains.' not in parts[0]:
raise Exception("Expected line of form '... python3 -m falcon_kit.mains.(foo) ...'")
module = parts.pop(0)
mod = importlib.import_module(module)
LOG.info("module={}".format(module))
LOG.debug("dir(module)={}".format(dir(mod)))
stdout = None
if len(parts) >= 2 and parts[-2].startswith('>'):
redirector = parts[-2]
out_fn = parts[-1]
parts = parts[:-2]
if redirector == ">>":
LOG.info(" Appending ({}) to '{}'".format(redirector, out_fn))
stdout = open(out_fn, 'a')
else:
LOG.info(" Writing ({}) to '{}'".format(redirector, out_fn))
stdout = open(out_fn, 'w')
sys.stdout = stdout
#argv = [sys.executable] + parts
argv = ['python3'] + parts
LOG.info(" For '{}', ARGV={!r}".format(module, argv))
mod.main(argv)
if stdout:
stdout.close() # in case we need to flush
def run(script_fn):
with open(script_fn) as ifs:
for line in ifs:
line = line.strip()
if not line or line.startswith("#"):
continue
argv = sys.argv
stdin = sys.stdin
stdout = sys.stdout
try:
run_line(line)
except Exception:
msg = "Line was:\n'{}'".format(line)
LOG.fatal(msg)
#LOG.exception(msg)
#msg = "Running next lines anway ..."
#LOG.warning(msg)
raise # We could continue, but we will quit for simplicity.
finally:
sys.argv = argv
sys.stdin = stdin
sys.stdout = stdout
class HelpF(argparse.RawTextHelpFormatter, argparse.ArgumentDefaultsHelpFormatter):
pass
def parse_args(argv):
epilog = """Example input:
python3 -m falcon_kit.mains.foo > stdout.txt
python3 -m falcon_kit.mains.bar --option arg1 arg2
...
Note: We are not trying to handle every possible use-case. This is meant to save start-up time.
"""
parser = argparse.ArgumentParser(description="Run several python programs, without re-invoking python.",
epilog=epilog,
formatter_class=HelpF)
parser.add_argument('script_fn', #type=str,
help='File containing lines of python module executions.')
args = parser.parse_args(argv[1:])
return args
def main(argv=sys.argv):
args = parse_args(argv)
logging.basicConfig(level=logging.INFO)
run(**vars(args))
if __name__ == '__main__': # pragma: no cover
main()
|
"""
:codeauthor: Rupesh Tare <[email protected]>
"""
import salt.modules.mod_random as mod_random
import salt.utils.pycrypto
from salt.exceptions import SaltInvocationError
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import patch
from tests.support.unit import TestCase, skipIf
def _test_hashlib():
try:
import hashlib
except ImportError:
return False
if not hasattr(hashlib, "algorithms_guaranteed"):
return False
else:
return True
SUPPORTED_HASHLIB = _test_hashlib()
@skipIf(not SUPPORTED_HASHLIB, "Hashlib does not contain needed functionality")
class ModrandomTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.modules.mod_random
"""
def setup_loader_modules(self):
return {mod_random: {}}
def test_hash(self):
"""
Test for Encodes a value with the specified encoder.
"""
self.assertEqual(mod_random.hash("value")[0:4], "ec2c")
self.assertRaises(SaltInvocationError, mod_random.hash, "value", "algorithm")
def test_str_encode(self):
"""
Test for The value to be encoded.
"""
self.assertRaises(SaltInvocationError, mod_random.str_encode, "None", "abc")
self.assertRaises(SaltInvocationError, mod_random.str_encode, None)
# We're using the base64 module which does not include the trailing new line
self.assertEqual(mod_random.str_encode("A"), "QQ==")
def test_get_str(self):
"""
Test for Returns a random string of the specified length.
"""
self.assertEqual(mod_random.get_str(length=1, chars="A"), "A")
self.assertEqual(len(mod_random.get_str(length=64)), 64)
ret = mod_random.get_str(
length=1,
lowercase=False,
uppercase=False,
printable=False,
whitespace=False,
punctuation=False,
)
self.assertNotRegex(ret, r"^[a-zA-Z]+$", "Found invalid characters")
self.assertRegex(ret, r"^[0-9]+$", "Not found required characters")
def test_shadow_hash(self):
"""
Test for Generates a salted hash suitable for /etc/shadow.
"""
with patch.object(salt.utils.pycrypto, "gen_hash", return_value="A"):
self.assertEqual(mod_random.shadow_hash(), "A")
|
DB = 'test'
UNIQUE_ID_COLLECTION = 'unique_id'
DOC_COLLECTION = 'doc'
|
""" Unit tests for skycomponents
"""
import logging
import unittest
import astropy.units as u
import numpy
from astropy.coordinates import SkyCoord
from data_models.polarisation import PolarisationFrame
from processing_components.skycomponent.operations import create_skycomponent, find_separation_skycomponents, \
find_skycomponent_matches, find_nearest_skycomponent, find_nearest_skycomponent_index
from processing_components.simulation.testing_support import create_low_test_skycomponents_from_gleam
log = logging.getLogger(__name__)
class TestSkycomponent(unittest.TestCase):
def setUp(self):
from data_models.parameters import arl_path
self.dir = arl_path('test_results')
self.frequency = numpy.array([1e8])
self.channel_bandwidth = numpy.array([1e6])
self.phasecentre = SkyCoord(ra=+30.0 * u.deg, dec=-45.0 * u.deg, frame='icrs', equinox='J2000')
self.components = create_low_test_skycomponents_from_gleam(flux_limit=2.0,
phasecentre=self.phasecentre,
frequency=self.frequency,
polarisation_frame=PolarisationFrame('stokesI'),
radius=0.1)
def test_time_setup(self):
pass
def test_copy(self):
fluxes = numpy.linspace(0, 1.0, 10)
sc = [create_skycomponent(direction=self.phasecentre, flux=numpy.array([[f]]), frequency=self.frequency,
polarisation_frame=PolarisationFrame('stokesI')) for f in fluxes]
assert len(sc) == len(fluxes)
def test_find_skycomponent_separation(self):
separations = find_separation_skycomponents(self.components)
assert separations[0, 0] == 0.0
assert numpy.max(separations) > 0.0
def test_find_skycomponent_separation_binary(self):
test = self.components[:len(self.components) // 2]
separations = find_separation_skycomponents(test, self.components)
assert separations[0, 0] == 0.0
assert numpy.max(separations) > 0.0
def test_find_skycomponent_matches(self):
matches = find_skycomponent_matches(self.components[:len(self.components) // 2], self.components)
assert matches == [(0, 0, 0.0), (1, 1, 0.0), (2, 2, 0.0), (3, 3, 0.0), (4, 4, 0.0), (5, 5, 0.0), (6, 6, 0.0)]
matches = find_skycomponent_matches(self.components[len(self.components) // 2:], self.components)
assert matches == [(0, 7, 0.0), (1, 8, 0.0), (2, 9, 0.0), (3, 10, 0.0), (4, 11, 0.0), (5, 12, 0.0),
(6, 13, 0.0)]
matches = find_skycomponent_matches(self.components, self.components[:len(self.components) // 2])
assert matches == [(0, 0, 0.0), (1, 1, 0.0), (2, 2, 0.0), (3, 3, 0.0), (4, 4, 0.0), (5, 5, 0.0), (6, 6, 0.0)]
matches = find_skycomponent_matches(self.components, self.components[len(self.components) // 2:])
assert matches == [(7, 0, 0.0), (8, 1, 0.0), (9, 2, 0.0), (10, 3, 0.0), (11, 4, 0.0), (12, 5, 0.0),
(13, 6, 0.0)]
def test_find_nearest_component_index(self):
match = find_nearest_skycomponent_index(self.components[3].direction, self.components)
assert match == 3
def test_find_nearest_component(self):
match, sep = find_nearest_skycomponent(self.components[3].direction, self.components)
assert match.name == 'GLEAM J021305-474112'
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
# ! /usr/bin/env python
""" `medigan` is a modular Python library for automating synthetic dataset generation.
.. codeauthor:: Richard Osuala <[email protected]>
.. codeauthor:: Noussair Lazrak <[email protected]>
"""
# Set default logging handler to avoid "No handler found" warnings.
import logging
from logging import NullHandler
# importing the generators module and class for the convenience of extending the "medigan.generators" namespace to
# "medigan", allowing 'from medigan import Generators'
from .generators import Generators
logging.getLogger(__name__).addHandler(NullHandler())
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
setup_requires=['setuptools>=30.3'],
packages=find_packages(where='src') + [
'ansible.modules.ansibilo',
'ansible.plugins.callback.ansibilo',
'ansible.plugins.filter.ansibilo',
],
package_dir={ # FIXME: wait for https://github.com/pypa/setuptools/issues/1136
'': 'src',
'ansible.modules.ansibilo': 'src/ansibilo/modules',
'ansible.plugins.callback.ansibilo': 'src/ansibilo/plugins/callbacks',
'ansible.plugins.filter.ansibilo': 'src/ansibilo/plugins/filters',
},
)
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 27 23:20:43 2020
@author: PRAVEEN KUMAR -1
"""
import numpy as np
from utility import activation_function
class OneHiddenLayerNN:
def __init__(self,n_x,n_h,n_y):
"""
Initilizes size of input, hidden layer unit, and output layer
ARGS:
- n_x: input size
- n_h: hidden layer units
- n_y: output size
"""
self.n_x = n_x
self.n_h = n_h
self.n_y = n_y
print("The size of the input layer is: n_x = " + str(n_x))
print("The size of the hidden layer is: n_h = " + str(n_h))
print("The size of the output layer is: n_y = " + str(n_y))
print("*"*50)
def layer_size(self,X):
"""
Returns shape of the layer
ARGS:
- X: Array/Matrix
RETURNS:
- size: tuple of the Shape of matrix
"""
size = X.shape
return size
def initilize_parameters(self):
"""
Initilizes weights and Bias
"""
np.random.seed(123)
W1 = np.random.randn(self.n_h,self.n_x) * 0.01
b1 = np.zeros((self.n_h,1))
W2 = np.random.randn(self.n_y,self.n_h) * 0.01
b2 = np.zeros((self.n_y,1))
parameters = {
"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
print("The size of the Weights 1st layer: W1 = " + str(W1.shape))
print("The size of the bais 1st layer: b1 = " + str(b1.shape))
print("The size of the weights 2nd layer: W2 = " + str(W2.shape))
print("The size of the bais 2nd layer: b2 = " + str(b2.shape))
print("*"*50)
return parameters
def forward_propagation(self,X):
"""
"""
parameters = self.initilize_parameters()
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
activation1 = (activation_function.tanh(np.add(np.matmul(W1,X),b1)))
activation2 = (activation_function.sigmoid(np.matmul(W2,activation1)+b2))
cache = {
'A1' : activation1,
'A2' : activation2
}
print('size of X: ', X.shape)
print('size of activation1: ',activation1.shape)
print('size of actiavtion2: ',activation2.shape)
print("*"*50)
return cache
def compute_cost(self,Y,A2,parameters):
"""
"""
m = Y.shape[1]
print('m:',m)
logprobs = logprobs = 1/m*(np.sum(np.multiply(np.log(A2),Y)+np.multiply(np.log(1-A2),(1-Y))))
cost = - np.sum(logprobs)
cost = float(np.squeeze(cost))
print('cost: ',cost)
print("*"*50)
return cost
def backward_propagation(self,X,Y,cache,parameters):
"""
"""
W1 = parameters['W1']
W2 = parameters['W2']
m = X.shape[1]
A2 = cache['A2']
A1 = cache['A1']
dZ2 = A2 - Y
dW2 = (1/m)* np.matmul(dZ2,A2.T)
db2 = (1/m) * np.sum(dZ2,axis=1, keepdims=True)
dZ1 = np.matmul(W2.T,dZ2) * (1 - np.power(A1, 2))
dW1 = np.matmul(dZ1,X.T)
db1 = (1/m) * np.sum(dZ1,axis=1,keepdims=True)
grads = {"dW1": dW1,
"db1": db1,
"dW2": dW2,
"db2": db2}
#print('grads: ', grads)
return grads
def update_parameters(self,parameters,grads,learning_rate=1.2):
"""
"""
W1 = parameters['W1']
W2 = parameters['W2']
b1 = parameters['b1']
b2 = parameters['b2']
dW1 = grads['dW1']
dW2 = grads['dW2']
db1 = grads['db1']
db2 = grads['db2']
W1 = W1 - (learning_rate * dW1)
W2 = W2 - (learning_rate * dW2)
b1 = b1 - (learning_rate * db1)
b2 = b2 - (learning_rate * db2)
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
#print('updated parameters: ',parameters)
return parameters
def nn_model(self,X, Y, n_h, num_iterations = 2400, print_cost=True):
"""
"""
n_x = self.layer_size(X)[0]
n_y = self.layer_size(Y)[0]
print("The size of the input layer is: n_x = " + str(n_x))
print("The size of the input layer is: n_y = " + str(n_y))
# initilize parameters
parameters = self.initilize_parameters()
for i in range(num_iterations):
#forwaard propagation
cache = nn.forward_propagation(X)
A2 = cache['A2']
#compute cost
cost = nn.compute_cost(A2,Y,parameters)
#Backword propagation
grads = nn.backward_propagation(X,Y,cache,parameters)
#optimize weights
parameters = self.update_parameters(parameters,grads, 0.15)
if print_cost and i % 1000 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
return parameters
if __name__=='__main__':
nn = OneHiddenLayerNN(n_x=5,n_h=4,n_y=4)
X = np.random.randn(5) * 0.01
X = X.reshape((X.shape[0],1))
Y = np.array([1,0,0,1])
Y = Y.reshape((Y.shape[0],1))
#cache = nn.forward_propagation(X)
#parameters = nn.initilize_parameters()
#print('parameters: ',parameters)
#cost = nn.compute_cost(Y,cache['A2'],parameters)
#grads = nn.backward_propagation(X,Y,cache,parameters)
#updated_params = nn.update_parameters(parameters,grads,0.15)
nn.nn_model(X,Y,5)
|
import sqlite3
conn = sqlite3.connect("ex_db.sqlite3")
# how can we make this more
def create_table(conn):
curs = conn.cursor()
#make sql create statement
create_table = """
CREATE TABLE Students(
id INTEBER PRIMAY KEY AUTOINCREMENT,
name CHAR(20),
fav_num INTEGER
)
"""
def insert_data(conn):
curs = conn.cursor()
my_data = [
("George", 7),
("Stephen", 88)
]
# for |
DAL_VIEW_PY = """# generated by appcreator
from django.db.models import Q
from dal import autocomplete
from . models import *
{% for x in data %}
class {{ x.model_name }}AC(autocomplete.Select2QuerySetView):
def get_queryset(self):
qs = {{ x.model_name }}.objects.all()
if self.q:
qs = qs.filter(
Q(legacy_id__icontains=self.q) |
Q({{ x.model_representation }}__icontains=self.q)
)
return qs
{% endfor %}
"""
DAL_URLS_PY = """# generated by appcreator
from django.conf.urls import url
from . import dal_views
app_name = '{{ app_name }}'
urlpatterns = [
{%- for x in data %}
url(
r'^{{ x.model_name|lower }}-autocomplete/$',
dal_views.{{ x.model_name}}AC.as_view(),
name='{{ x.model_name|lower }}-autocomplete'
),
{%- endfor %}
]
"""
APP_PY = """
from django.apps import AppConfig
class {{ app_name|title }}Config(AppConfig):
name = '{{ app_name }}'
"""
ADMIN_PY = """# generated by appcreator
from django.contrib import admin
from . models import (
{%- for x in data %}
{{ x.model_name }}{{ "," if not loop.last }}
{%- endfor %}
)
{%- for x in data %}
admin.site.register({{ x.model_name }})
{%- endfor %}
"""
URLS_PY = """# generated by appcreator
from django.conf.urls import url
from . import views
app_name = '{{ app_name }}'
urlpatterns = [
{%- for x in data %}
url(
r'^{{ x.model_name|lower }}/$',
views.{{ x.model_name}}ListView.as_view(),
name='{{ x.model_name|lower }}_browse'
),
url(
r'^{{ x.model_name|lower }}/detail/(?P<pk>[0-9]+)$',
views.{{ x.model_name}}DetailView.as_view(),
name='{{ x.model_name|lower }}_detail'
),
url(
r'^{{ x.model_name|lower }}/create/$',
views.{{ x.model_name}}Create.as_view(),
name='{{ x.model_name|lower }}_create'
),
url(
r'^{{ x.model_name|lower }}/edit/(?P<pk>[0-9]+)$',
views.{{ x.model_name}}Update.as_view(),
name='{{ x.model_name|lower }}_edit'
),
url(
r'^{{ x.model_name|lower }}/delete/(?P<pk>[0-9]+)$',
views.{{ x.model_name}}Delete.as_view(),
name='{{ x.model_name|lower }}_delete'),
{%- endfor %}
]
"""
FILTERS_PY = """# generated by appcreator
import django_filters
from django import forms
from dal import autocomplete
from vocabs.filters import generous_concept_filter
from vocabs.models import SkosConcept
from . models import (
{%- for x in data %}
{{ x.model_name }}{{ "," if not loop.last }}
{%- endfor %}
)
{% for x in data %}
class {{ x.model_name }}ListFilter(django_filters.FilterSet):
legacy_id = django_filters.CharFilter(
lookup_expr='icontains',
help_text={{ x.model_name}}._meta.get_field('legacy_id').help_text,
label={{ x.model_name}}._meta.get_field('legacy_id').verbose_name
)
{%- for y in x.model_fields %}
{%- if y.field_type == 'CharField' %}
{{y.field_name}} = django_filters.CharFilter(
lookup_expr='icontains',
help_text={{ x.model_name}}._meta.get_field('{{y.field_name}}').help_text,
label={{ x.model_name}}._meta.get_field('{{y.field_name}}').verbose_name
)
{%- endif %}
{%- if y.field_type == 'TextField' %}
{{y.field_name}} = django_filters.CharFilter(
lookup_expr='icontains',
help_text={{ x.model_name}}._meta.get_field('{{y.field_name}}').help_text,
label={{ x.model_name}}._meta.get_field('{{y.field_name}}').verbose_name
)
{%- endif %}
{%- if y.related_class == 'SkosConcept' %}
{{y.field_name}} = django_filters.ModelMultipleChoiceFilter(
queryset=SkosConcept.objects.filter(
collection__name="{{y.field_name}}"
),
help_text={{x.model_name}}._meta.get_field('{{y.field_name}}').help_text,
label={{x.model_name}}._meta.get_field('{{y.field_name}}').verbose_name,
method=generous_concept_filter,
widget=autocomplete.Select2Multiple(
url="/vocabs-ac/specific-concept-ac/{{y.field_name}}",
attrs={
'data-placeholder': 'Autocomplete ...',
'data-minimum-input-length': 2,
},
)
)
{%- elif y.field_type == 'ManyToManyField' %}
{{y.field_name}} = django_filters.ModelMultipleChoiceFilter(
queryset={{y.related_class}}.objects.all(),
help_text={{x.model_name}}._meta.get_field('{{y.field_name}}').help_text,
label={{x.model_name}}._meta.get_field('{{y.field_name}}').verbose_name,
widget=autocomplete.Select2Multiple(
url="{{ app_name }}-ac:{{y.related_class|lower}}-autocomplete",
)
)
{%- elif y.field_type == 'ForeignKey' %}
{{y.field_name}} = django_filters.ModelMultipleChoiceFilter(
queryset={{y.related_class}}.objects.all(),
help_text={{x.model_name}}._meta.get_field('{{y.field_name}}').help_text,
label={{x.model_name}}._meta.get_field('{{y.field_name}}').verbose_name,
widget=autocomplete.Select2Multiple(
url="{{ app_name }}-ac:{{y.related_class|lower}}-autocomplete",
)
)
{%- endif %}
{%- endfor %}
class Meta:
model = {{ x.model_name }}
fields = [
'id',
'legacy_id',
{% for y in x.model_fields %}
{%- if y.field_type == 'DateRangeField' or y.field_type == 'MultiPolygonField'%}
{%- else %}'{{ y.field_name }}',
{%- endif %}
{% endfor %}]
{% endfor %}
"""
FORMS_PY = """# generated by appcreator
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Fieldset, Div, MultiField, HTML
from crispy_forms.bootstrap import Accordion, AccordionGroup
from vocabs.models import SkosConcept
from . models import (
{%- for x in data %}
{{ x.model_name }}{{ "," if not loop.last }}
{%- endfor %}
)
{% for x in data %}
class {{ x.model_name }}FilterFormHelper(FormHelper):
def __init__(self, *args, **kwargs):
super({{ x.model_name }}FilterFormHelper, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.form_class = 'genericFilterForm'
self.form_method = 'GET'
self.helper.form_tag = False
self.add_input(Submit('Filter', 'Search'))
self.layout = Layout(
Fieldset(
'Basic search options',
'id',
css_id="basic_search_fields"
),
Accordion(
AccordionGroup(
'Advanced search',
{% for y in x.model_fields %}
{% if y.field_type == 'DateRangeField' or y.field_type == 'id' or y.field_type == 'MultiPolygonField' %}
{% else %}'{{ y.field_name }}',
{%- endif %}
{%- endfor %}
css_id="more"
),
AccordionGroup(
'admin',
'legacy_id',
css_id="admin_search"
),
)
)
class {{ x.model_name }}Form(forms.ModelForm):
{%- for y in x.model_fields %}
{%- if y.related_class == 'SkosConcept' and y.field_type == 'ForeignKey'%}
{{y.field_name}} = forms.ModelChoiceField(
required=False,
label="{{y.field_verbose_name}}",
queryset=SkosConcept.objects.filter(collection__name="{{y.field_name}}")
)
{%- elif y.related_class == 'SkosConcept' and y.field_type == 'ManyToManyField'%}
{{y.field_name}} = forms.ModelMultipleChoiceField(
required=False,
label="{{y.field_verbose_name}}",
queryset=SkosConcept.objects.filter(collection__name="{{y.field_name}}")
)
{%- endif -%}
{% endfor %}
class Meta:
model = {{ x.model_name }}
fields = "__all__"
def __init__(self, *args, **kwargs):
super({{ x.model_name }}Form, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = True
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-md-3'
self.helper.field_class = 'col-md-9'
self.helper.add_input(Submit('submit', 'save'),)
{% endfor %}
"""
TABLES_PY = """# generated by appcreator
import django_tables2 as tables
from django_tables2.utils import A
from browsing.browsing_utils import MergeColumn
from . models import (
{%- for x in data %}
{{ x.model_name }}{{ "," if not loop.last }}
{%- endfor %}
)
{% for x in data %}
class {{ x.model_name }}Table(tables.Table):
id = tables.LinkColumn(verbose_name='ID')
merge = MergeColumn(verbose_name='keep | remove', accessor='pk')
{%- for y in x.model_fields %}
{%- if y.field_type == 'ManyToManyField' %}
{{ y.field_name }} = tables.columns.ManyToManyColumn()
{%- endif %}
{%- endfor %}
class Meta:
model = {{ x.model_name }}
sequence = ('id',)
attrs = {"class": "table table-responsive table-hover"}
{% endfor %}
"""
VIEWS_PY = """# generated by appcreator
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.urls import reverse, reverse_lazy
from django.views.generic.detail import DetailView
from django.views.generic.edit import DeleteView
from . filters import *
from . forms import *
from . tables import *
from . models import (
{%- for x in data %}
{{ x.model_name }}{{ "," if not loop.last }}
{%- endfor %}
)
from browsing.browsing_utils import (
GenericListView, BaseCreateView, BaseUpdateView, BaseDetailView
)
{% for x in data %}
class {{ x.model_name }}ListView(GenericListView):
model = {{ x.model_name }}
filter_class = {{ x.model_name }}ListFilter
formhelper_class = {{ x.model_name }}FilterFormHelper
table_class = {{ x.model_name }}Table
init_columns = [
'id', {%- if x.model_representation != 'nan' %} '{{ x.model_representation }}', {%- endif %}
]
enable_merge = True
class {{ x.model_name }}DetailView(BaseDetailView):
model = {{ x.model_name }}
template_name = 'browsing/generic_detail.html'
class {{ x.model_name }}Create(BaseCreateView):
model = {{ x.model_name }}
form_class = {{ x.model_name }}Form
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super({{ x.model_name }}Create, self).dispatch(*args, **kwargs)
class {{ x.model_name }}Update(BaseUpdateView):
model = {{ x.model_name }}
form_class = {{ x.model_name }}Form
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super({{ x.model_name }}Update, self).dispatch(*args, **kwargs)
class {{ x.model_name }}Delete(DeleteView):
model = {{ x.model_name }}
template_name = 'webpage/confirm_delete.html'
success_url = reverse_lazy('{{ app_name }}:{{ x.model_name|lower }}_browse')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super({{ x.model_name }}Delete, self).dispatch(*args, **kwargs)
{% endfor %}
"""
MODELS_PY = """# generated by appcreator
from django.contrib.gis.db import models
from django.urls import reverse
from browsing.browsing_utils import model_to_dict
def set_extra(self, **kwargs):
self.extra = kwargs
return self
models.Field.set_extra = set_extra
class IdProvider(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
legacy_id = models.CharField(
blank=True, null=True,
max_length=250,
)
class Meta:
abstract = True
{% for x in data %}
class {{ x.model_name }}(IdProvider):
{% if x.model_helptext %}### {{ x.model_helptext }} ###{% endif %}
legacy_id = models.CharField(
max_length=300, blank=True,
verbose_name="Legacy ID"
)
{%- for y in x.model_fields %}
{%- if y.field_type == 'DateRangeField' %}
{{ y.field_name }} = {{ y.field_type}}(
{%- else %}
{{ y.field_name }} = models.{{ y.field_type}}(
{%- endif %}
{%- if y.field_name == 'id' %}
primary_key=True,
{%- endif %}
{%- if y.field_type == 'DecimalField' %}
max_digits=19,
decimal_places=10,
{%- endif %}
{%- if y.field_type == 'BooleanField' %}
default=False,
{%- endif %}
{%- if y.field_type == 'CharField' %}
blank=True, null=True,
{%- if y.choices %}
choices={{ y.choices }},
{%- endif %}
max_length=250,
{%- elif y.field_type == 'TextField' %}
blank=True, null=True,
{%- elif y.field_type == 'ForeignKey' %}
{%- if y.related_class == 'SkosConcept' %}
{{ y.related_class }},
{%- else %}
"{{ y.related_class }}",
{%- endif %}
related_name='{{ y.related_name }}',
on_delete=models.SET_NULL,
{%- if y.field_name != 'id' %}
null=True,
blank=True,
{%- endif %}
{%- elif y.field_type == 'ManyToManyField' %}
{%- if y.related_class == 'SkosConcept' %}
{{ y.related_class }},
{%- else %}
"{{ y.related_class }}",
{%- endif %}
{%- if y.through %}
through='{{ y.through }}',
{%- endif %}
related_name='{{ y.related_name }}',
blank=True,
{%- else %}
{%- if y.field_name != 'id' %}
blank=True, null=True,
{%- endif %}
{%- endif %}
verbose_name="{{ y.field_verbose_name }}",
help_text="{{ y.field_helptext }}",
).set_extra(
is_public={{ y.field_public }},
{%- if y.value_from %}
data_lookup="{{ y.value_from }}",
{%- endif %}
{%- if y.arche_prop %}
arche_prop="{{ y.arche_prop }}",
{%- endif %}
{%- if y.arche_prop_str_template %}
arche_prop_str_template="{{ y.arche_prop_str_template }}",
{%- endif %}
)
{%- endfor %}
orig_data_csv = models.TextField(
blank=True,
null=True,
verbose_name="The original data"
).set_extra(
is_public=True
)
class Meta:
{% if x.model_order == 'nan' %}
ordering = [
'id',
]
{%- else %}
ordering = [
'{{ x.model_order }}',
]
{%- endif %}
verbose_name = "{{ x.model_verbose_name }}"
{% if x.model_representation == 'nan' %}
def __str__(self):
return "{}".format(self.id)
{%- else %}
def __str__(self):
if self.{{ x.model_representation }}:
return "{}".format(self.{{ x.model_representation }})
else:
return "{}".format(self.legacy_id)
{%- endif %}
def field_dict(self):
return model_to_dict(self)
@classmethod
def get_listview_url(self):
return reverse('{{ app_name }}:{{ x.model_name|lower }}_browse')
{% if x.source_table %}
@classmethod
def get_source_table(self):
return "{{ x.source_table }}"
{% else %}
@classmethod
def get_source_table(self):
return None
{% endif %}
{% if x.natural_primary_key %}
@classmethod
def get_natural_primary_key(self):
return "{{ x.natural_primary_key }}"
{% else %}
@classmethod
def get_natural_primary_key(self):
return None
{% endif %}
@classmethod
def get_createview_url(self):
return reverse('{{ app_name }}:{{ x.model_name|lower }}_create')
def get_absolute_url(self):
return reverse('{{ app_name }}:{{ x.model_name|lower }}_detail', kwargs={'pk': self.id})
def get_absolute_url(self):
return reverse('{{ app_name }}:{{ x.model_name|lower }}_detail', kwargs={'pk': self.id})
def get_delete_url(self):
return reverse('{{ app_name }}:{{ x.model_name|lower }}_delete', kwargs={'pk': self.id})
def get_edit_url(self):
return reverse('{{ app_name }}:{{ x.model_name|lower }}_edit', kwargs={'pk': self.id})
def get_next(self):
next = self.__class__.objects.filter(id__gt=self.id)
if next:
return reverse(
'{{ app_name }}:{{ x.model_name|lower }}_detail',
kwargs={'pk': next.first().id}
)
return False
def get_prev(self):
prev = self.__class__.objects.filter(id__lt=self.id).order_by('-id')
if prev:
return reverse(
'{{ app_name }}:{{ x.model_name|lower }}_detail',
kwargs={'pk': prev.first().id}
)
return False
{% endfor %}
"""
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from unittest import SkipTest
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import scipy.linalg
import scipy.special
import scipy.stats
from jax import api
from jax import core
from jax import grad
from jax import lax
from jax import numpy as jnp
from jax import random
from jax import test_util as jtu
from jax import vmap
from jax.interpreters import xla
import jax._src.random
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
float_dtypes = jtu.dtypes.all_floating
complex_dtypes = jtu.dtypes.complex
int_dtypes = jtu.dtypes.all_integer
uint_dtypes = jtu.dtypes.all_unsigned
class LaxRandomTest(jtu.JaxTestCase):
def _CheckCollisions(self, samples, nbits):
fail_prob = 0.01 # conservative bound on statistical fail prob by Chebyshev
nitems = len(samples)
nbins = 2 ** nbits
nexpected = nbins * (1 - ((nbins - 1) / nbins) ** nitems)
ncollisions = len(np.unique(samples))
sq_percent_deviation = ((ncollisions - nexpected) / nexpected) ** 2
self.assertLess(sq_percent_deviation, 1 / np.sqrt(nexpected * fail_prob))
def _CheckKolmogorovSmirnovCDF(self, samples, cdf):
fail_prob = 0.01 # conservative bound on statistical fail prob by Kolmo CDF
self.assertGreater(scipy.stats.kstest(samples, cdf).pvalue, fail_prob)
def _CheckChiSquared(self, samples, pmf):
alpha = 0.01 # significance level, threshold for p-value
values, actual_freq = np.unique(samples, return_counts=True)
expected_freq = pmf(values) * samples.size
# per scipy: "A typical rule is that all of the observed and expected
# frequencies should be at least 5."
valid = (actual_freq > 5) & (expected_freq > 5)
self.assertGreater(valid.sum(), 1,
msg='not enough valid frequencies for chi-squared test')
_, p_value = scipy.stats.chisquare(
actual_freq[valid], expected_freq[valid])
self.assertGreater(
p_value, alpha,
msg=f'Failed chi-squared test with p={p_value}.\n'
'Expected vs. actual frequencies:\n'
f'{expected_freq[valid]}\n{actual_freq[valid]}')
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
for dtype in jtu.dtypes.floating))
def testNumpyAndXLAAgreeOnFloatEndianness(self, dtype):
bits_dtype = np.uint32 if jnp.finfo(dtype).bits == 32 else np.uint64
numpy_bits = np.array(1., dtype).view(bits_dtype)
xla_bits = api.jit(
lambda: lax.bitcast_convert_type(np.array(1., dtype), bits_dtype))()
self.assertEqual(numpy_bits, xla_bits)
def testThreefry2x32(self):
# We test the hash by comparing to known values provided in the test code of
# the original reference implementation of Threefry. For the values, see
# https://github.com/DEShawResearch/Random123-Boost/blob/65e3d874b67aa7b3e02d5ad8306462f52d2079c0/libs/random/test/test_threefry.cpp#L30-L32
def result_to_hex(result):
return tuple([hex(x.copy()).rstrip("L") for x in result])
expected = ("0x6b200159", "0x99ba4efe")
result = random.threefry_2x32(np.uint32([0, 0]), np.uint32([0, 0]))
self.assertEqual(expected, result_to_hex(result))
expected = ("0x1cb996fc", "0xbb002be7")
result = random.threefry_2x32(np.uint32([-1, -1]), np.uint32([-1, -1]))
self.assertEqual(expected, result_to_hex(result))
expected = ("0xc4923a9c", "0x483df7a0")
result = random.threefry_2x32(
np.uint32([0x13198a2e, 0x03707344]),
np.uint32([0x243f6a88, 0x85a308d3]))
self.assertEqual(expected, result_to_hex(result))
def testThreefry2x32Large(self):
n = 10000000
result = random.threefry_2x32(
(np.uint32(0x13198a2e), np.uint32(0x03707344)),
jnp.concatenate([
jnp.full((n,), 0x243f6a88, jnp.uint32),
jnp.full((n,), 0x85a308d3, jnp.uint32)
]))
np.testing.assert_equal(result[:n], np.full((n,), 0xc4923a9c, dtype=np.uint32))
np.testing.assert_equal(result[n:], np.full((n,), 0x483df7a0, dtype=np.uint32))
def testThreefry2x32Empty(self):
# Regression test for an op-by-op crash for empty arrays in CUDA mode.
with api.disable_jit():
result = random.threefry_2x32(
(np.uint32(0x13198a2e), np.uint32(0x03707344)),
jnp.ones((10, 0,), jnp.uint32))
np.testing.assert_equal(result, np.zeros((10, 0,), dtype=np.uint32))
def testRngRandomBitsViewProperty(self):
# TODO: add 64-bit if it ever supports this property.
# TODO: will this property hold across endian-ness?
N = 10
key = random.PRNGKey(1701)
nbits = [8, 16, 32]
rand_bits = [jax._src.random._random_bits(key, n, (N * 64 // n,))
for n in nbits]
rand_bits_32 = np.array([np.array(r).view(np.uint32) for r in rand_bits])
assert np.all(rand_bits_32 == rand_bits_32[0])
def testRngRandomBits(self):
# Test specific outputs to ensure consistent random values between JAX versions.
key = random.PRNGKey(1701)
bits8 = jax._src.random._random_bits(key, 8, (3,))
expected8 = np.array([216, 115, 43], dtype=np.uint8)
self.assertArraysEqual(bits8, expected8)
bits16 = jax._src.random._random_bits(key, 16, (3,))
expected16 = np.array([41682, 1300, 55017], dtype=np.uint16)
self.assertArraysEqual(bits16, expected16)
bits32 = jax._src.random._random_bits(key, 32, (3,))
expected32 = np.array([56197195, 4200222568, 961309823], dtype=np.uint32)
self.assertArraysEqual(bits32, expected32)
with jtu.ignore_warning(category=UserWarning, message="Explicitly requested dtype.*"):
bits64 = jax._src.random._random_bits(key, 64, (3,))
if FLAGS.jax_enable_x64:
expected64 = np.array([3982329540505020460, 16822122385914693683,
7882654074788531506], dtype=np.uint64)
else:
expected64 = np.array([676898860, 3164047411, 4010691890], dtype=np.uint32)
self.assertArraysEqual(bits64, expected64)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
for dtype in float_dtypes))
def testRngUniform(self, dtype):
key = random.PRNGKey(0)
rand = lambda key: random.uniform(key, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key)
compiled_samples = crand(key)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckCollisions(samples, jnp.finfo(dtype).nmant)
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.uniform().cdf)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
for dtype in int_dtypes + uint_dtypes))
def testRngRandint(self, dtype):
lo = 5
hi = 10
key = random.PRNGKey(0)
rand = lambda key: random.randint(key, (10000,), lo, hi, dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key)
compiled_samples = crand(key)
for samples in [uncompiled_samples, compiled_samples]:
self.assertTrue(np.all(lo <= samples))
self.assertTrue(np.all(samples < hi))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
for dtype in float_dtypes))
def testNormal(self, dtype):
key = random.PRNGKey(0)
rand = lambda key: random.normal(key, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key)
compiled_samples = crand(key)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.norm().cdf)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
for dtype in complex_dtypes))
def testNormalComplex(self, dtype):
key = random.PRNGKey(0)
rand = lambda key: random.normal(key, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key)
compiled_samples = crand(key)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(jnp.real(samples), scipy.stats.norm(scale=1/np.sqrt(2)).cdf)
self._CheckKolmogorovSmirnovCDF(jnp.imag(samples), scipy.stats.norm(scale=1/np.sqrt(2)).cdf)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
for dtype in float_dtypes))
def testTruncatedNormal(self, dtype):
key = random.PRNGKey(0)
rand = lambda key: random.truncated_normal(key, -0.3, 0.3, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key)
compiled_samples = crand(key)
min_val = np.min(uncompiled_samples)
max_val = np.max(uncompiled_samples)
self.assertTrue(min_val > -0.3)
self.assertTrue(max_val < 0.3)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.truncnorm(-0.3, 0.3).cdf)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
for dtype in jtu.dtypes.floating + jtu.dtypes.integer))
def testShuffle(self, dtype):
key = random.PRNGKey(0)
x = np.arange(100).astype(dtype)
rand = lambda key: random.shuffle(key, x)
crand = api.jit(rand)
with self.assertWarns(FutureWarning):
perm1 = rand(key)
with self.assertWarns(FutureWarning):
perm2 = crand(key)
self.assertAllClose(perm1, perm2)
self.assertFalse(np.all(perm1 == x)) # seems unlikely!
self.assertAllClose(np.sort(perm1), x, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_shape={}_replace={}_weighted={}_array_input={}".format(
np.dtype(dtype).name, shape, replace, weighted, array_input),
"dtype": dtype, "shape": shape, "replace": replace,
"weighted": weighted, "array_input": array_input}
for dtype in jtu.dtypes.floating + jtu.dtypes.integer
for shape in [(), (5,), (4, 5)]
for replace in [True, False]
for weighted in [True, False]
for array_input in [False, 'jnp', 'np']))
def testChoice(self, dtype, shape, replace, weighted, array_input):
N = 100
key = random.PRNGKey(0)
x = (N if not array_input else
jnp.arange(N, dtype=dtype) if array_input == 'jnp' else
np.arange(N, dtype=dtype))
p = None if not weighted else jnp.arange(N)
rand = lambda key: random.choice(key, x, shape, p=p, replace=replace)
crand = api.jit(rand)
sample1 = rand(key)
sample2 = crand(key)
self.assertEqual(shape, sample1.shape)
if array_input == 'jnp':
self.assertEqual(x.dtype, sample1.dtype)
if not replace:
assert len(np.unique(sample1)) == len(np.ravel(sample1))
self.assertAllClose(sample1, sample2)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(jtu.format_shape_dtype_string(shape, dtype)),
"dtype": dtype, "shape": shape}
for dtype in jtu.dtypes.floating + jtu.dtypes.integer
for shape in [100, (10, 10), (10, 5, 2)]))
def testPermutationArray(self, dtype, shape):
key = random.PRNGKey(0)
x = jnp.arange(np.prod(shape)).reshape(shape).astype(dtype)
rand = lambda key: random.permutation(key, x)
crand = api.jit(rand)
perm1 = rand(key)
perm2 = crand(key)
self.assertAllClose(perm1, perm2)
self.assertFalse(np.all(perm1 == x)) # seems unlikely!
self.assertAllClose(np.sort(perm1.ravel()), x.ravel(), check_dtypes=False)
self.assertArraysAllClose(
x, jnp.arange(np.prod(shape)).reshape(shape).astype(dtype))
def testPermutationInteger(self):
key = random.PRNGKey(0)
x = 100
rand = lambda key: random.permutation(key, x)
crand = api.jit(rand)
perm1 = rand(key)
perm2 = crand(key)
self.assertAllClose(perm1, perm2)
self.assertEqual(perm1.dtype, perm2.dtype)
self.assertFalse(np.all(perm1 == np.arange(100))) # seems unlikely!
self.assertAllClose(np.sort(perm1), np.arange(100), check_dtypes=False)
def testPermutationErrors(self):
key = random.PRNGKey(0)
with self.assertRaises(TypeError):
random.permutation(key, 10.)
with self.assertRaises(core.ConcretizationTypeError):
api.jit(random.permutation)(key, 10)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_p={}_dtype={}".format(p, np.dtype(dtype).name),
"p": p, "dtype": dtype}
for p in [0.1, 0.5, 0.9]
for dtype in jtu.dtypes.floating))
def testBernoulli(self, p, dtype):
key = random.PRNGKey(0)
p = np.array(p, dtype=dtype)
rand = lambda key, p: random.bernoulli(key, p, (10000,))
crand = api.jit(rand)
uncompiled_samples = rand(key, p)
compiled_samples = crand(key, p)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckChiSquared(samples, scipy.stats.bernoulli(p).pmf)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_p={}_{}_{}".format(p, np.dtype(dtype).name, sample_shape),
"p": p, "axis": axis, "dtype": dtype, 'sample_shape': sample_shape}
for (p, axis) in [
([.25] * 4, -1),
([.1, .2, .3, .4], -1),
([[.5, .5], [.1, .9]], 1),
([[.5, .1], [.5, .9]], 0),
]
for sample_shape in [(10000,), (5000, 2)]
for dtype in jtu.dtypes.floating))
def testCategorical(self, p, axis, dtype, sample_shape):
key = random.PRNGKey(0)
p = np.array(p, dtype=dtype)
logits = np.log(p) - 42 # test unnormalized
out_shape = tuple(np.delete(logits.shape, axis))
shape = sample_shape + out_shape
rand = partial(random.categorical, shape=shape, axis=axis)
crand = api.jit(rand)
uncompiled_samples = rand(key, logits)
compiled_samples = crand(key, logits)
if axis < 0:
axis += len(logits.shape)
for samples in [uncompiled_samples, compiled_samples]:
assert samples.shape == shape
samples = jnp.reshape(samples, (10000,) + out_shape)
if len(p.shape[:-1]) > 0:
ps = np.transpose(p, (1, 0)) if axis == 0 else p
for cat_samples, cat_p in zip(samples.transpose(), ps):
self._CheckChiSquared(cat_samples, pmf=lambda x: cat_p[x])
else:
self._CheckChiSquared(samples, pmf=lambda x: p[x])
def testBernoulliShape(self):
key = random.PRNGKey(0)
x = random.bernoulli(key, np.array([0.2, 0.3]), shape=(3, 2))
assert x.shape == (3, 2)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_a={}_b={}_dtype={}".format(a, b, np.dtype(dtype).name),
"a": a, "b": b, "dtype": dtype}
for a in [0.2, 5.]
for b in [0.2, 5.]
for dtype in [np.float64])) # NOTE: KS test fails with float32
def testBeta(self, a, b, dtype):
if not FLAGS.jax_enable_x64:
raise SkipTest("skip test except on X64")
key = random.PRNGKey(0)
rand = lambda key, a, b: random.beta(key, a, b, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key, a, b)
compiled_samples = crand(key, a, b)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.beta(a, b).cdf)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
for dtype in float_dtypes))
def testCauchy(self, dtype):
key = random.PRNGKey(0)
rand = lambda key: random.cauchy(key, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key)
compiled_samples = crand(key)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.cauchy().cdf)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_alpha={}_dtype={}".format(alpha, np.dtype(dtype).name),
"alpha": alpha, "dtype": dtype}
for alpha in [
np.array([0.2, 1., 5.]),
]
for dtype in jtu.dtypes.floating))
@jtu.skip_on_devices("tpu") # TODO(mattjj): slow compilation times
def testDirichlet(self, alpha, dtype):
key = random.PRNGKey(0)
rand = lambda key, alpha: random.dirichlet(key, alpha, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key, alpha)
compiled_samples = crand(key, alpha)
for samples in [uncompiled_samples, compiled_samples]:
self.assertAllClose(samples.sum(-1), np.ones(10000, dtype=dtype))
alpha_sum = sum(alpha)
for i, a in enumerate(alpha):
self._CheckKolmogorovSmirnovCDF(samples[..., i], scipy.stats.beta(a, alpha_sum - a).cdf)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
for dtype in float_dtypes))
def testExponential(self, dtype):
key = random.PRNGKey(0)
rand = lambda key: random.exponential(key, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key)
compiled_samples = crand(key)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.expon().cdf)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_a={}_dtype={}".format(a, np.dtype(dtype).name),
"a": a, "dtype": dtype}
for a in [0.1, 1., 10.]
for dtype in jtu.dtypes.floating))
def testGamma(self, a, dtype):
key = random.PRNGKey(0)
rand = lambda key, a: random.gamma(key, a, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key, a)
compiled_samples = crand(key, a)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.gamma(a).cdf)
def testGammaShape(self):
key = random.PRNGKey(0)
x = random.gamma(key, np.array([0.2, 0.3]), shape=(3, 2))
assert x.shape == (3, 2)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_a={}".format(alpha), "alpha": alpha}
for alpha in [1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3, 1e4]))
def testGammaGrad(self, alpha):
rng = random.PRNGKey(0)
alphas = np.full((100,), alpha)
z = random.gamma(rng, alphas)
actual_grad = api.grad(lambda x: random.gamma(rng, x).sum())(alphas)
eps = 0.01 * alpha / (1.0 + np.sqrt(alpha))
cdf_dot = (scipy.stats.gamma.cdf(z, alpha + eps)
- scipy.stats.gamma.cdf(z, alpha - eps)) / (2 * eps)
pdf = scipy.stats.gamma.pdf(z, alpha)
expected_grad = -cdf_dot / pdf
self.assertAllClose(actual_grad, expected_grad, check_dtypes=True,
rtol=2e-2 if jtu.device_under_test() == "tpu" else 7e-4)
def testGammaGradType(self):
# Regression test for https://github.com/google/jax/issues/2130
key = random.PRNGKey(0)
a = jnp.array(1., dtype=jnp.float32)
b = jnp.array(3., dtype=jnp.float32)
f = lambda x, y: random.gamma(key=key, a=x, dtype=jnp.float32) / y
# Should not crash with a type error.
api.vjp(f, a, b)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_lam={}_dtype={}".format(lam, np.dtype(dtype).name),
"lam": lam, "dtype": np.dtype(dtype)}
for lam in [0.5, 3, 9, 11, 50, 500]
for dtype in [np.int16, np.int32, np.int64]))
def testPoisson(self, lam, dtype):
key = random.PRNGKey(0)
rand = lambda key, lam: random.poisson(key, lam, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key, lam)
compiled_samples = crand(key, lam)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckChiSquared(samples, scipy.stats.poisson(lam).pmf)
# TODO(shoyer): determine error bounds for moments more rigorously (e.g.,
# based on the central limit theorem).
self.assertAllClose(samples.mean(), lam, rtol=0.01, check_dtypes=False)
self.assertAllClose(samples.var(), lam, rtol=0.03, check_dtypes=False)
def testPoissonBatched(self):
key = random.PRNGKey(0)
lam = jnp.concatenate([2 * jnp.ones(10000), 20 * jnp.ones(10000)])
samples = random.poisson(key, lam, shape=(20000,))
self._CheckChiSquared(samples[:10000], scipy.stats.poisson(2.0).pmf)
self._CheckChiSquared(samples[10000:], scipy.stats.poisson(20.0).pmf)
def testPoissonShape(self):
key = random.PRNGKey(0)
x = random.poisson(key, np.array([2.0, 20.0]), shape=(3, 2))
assert x.shape == (3, 2)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
for dtype in jtu.dtypes.floating))
def testGumbel(self, dtype):
key = random.PRNGKey(0)
rand = lambda key: random.gumbel(key, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key)
compiled_samples = crand(key)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.gumbel_r().cdf)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
for dtype in float_dtypes))
def testLaplace(self, dtype):
key = random.PRNGKey(0)
rand = lambda key: random.laplace(key, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key)
compiled_samples = crand(key)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.laplace().cdf)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
for dtype in float_dtypes))
def testLogistic(self, dtype):
key = random.PRNGKey(0)
rand = lambda key: random.logistic(key, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key)
compiled_samples = crand(key)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.logistic().cdf)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_b={}_dtype={}".format(b, np.dtype(dtype).name),
"b": b, "dtype": dtype}
for b in [0.1, 1., 10.]
for dtype in jtu.dtypes.floating))
def testPareto(self, b, dtype):
key = random.PRNGKey(0)
rand = lambda key, b: random.pareto(key, b, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key, b)
compiled_samples = crand(key, b)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.pareto(b).cdf)
def testParetoShape(self):
key = random.PRNGKey(0)
x = random.pareto(key, np.array([0.2, 0.3]), shape=(3, 2))
assert x.shape == (3, 2)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_df={}_dtype={}".format(df, np.dtype(dtype).name),
"df": df, "dtype": dtype}
for df in [0.1, 1., 10.]
for dtype in jtu.dtypes.floating))
@jtu.skip_on_devices("cpu", "tpu") # TODO(phawkins): slow compilation times
def testT(self, df, dtype):
key = random.PRNGKey(0)
rand = lambda key, df: random.t(key, df, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key, df)
compiled_samples = crand(key, df)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.t(df).cdf)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dim={}_dtype={}".format(dim, np.dtype(dtype)),
"dim": dim, "dtype": dtype}
for dim in [1, 3, 5]
for dtype in float_dtypes))
def testMultivariateNormal(self, dim, dtype):
r = np.random.RandomState(dim)
mean = r.randn(dim)
cov_factor = r.randn(dim, dim)
cov = np.dot(cov_factor, cov_factor.T) + dim * np.eye(dim)
key = random.PRNGKey(0)
rand = partial(random.multivariate_normal, mean=mean, cov=cov,
shape=(10000,))
crand = api.jit(rand)
uncompiled_samples = np.asarray(rand(key), np.float64)
compiled_samples = np.asarray(crand(key), np.float64)
inv_scale = scipy.linalg.lapack.dtrtri(np.linalg.cholesky(cov), lower=True)[0]
for samples in [uncompiled_samples, compiled_samples]:
centered = samples - mean
whitened = np.einsum('nj,ij->ni', centered, inv_scale)
# This is a quick-and-dirty multivariate normality check that tests that a
# uniform mixture of the marginals along the covariance matrix's
# eigenvectors follow a standard normal distribution.
self._CheckKolmogorovSmirnovCDF(whitened.ravel(), scipy.stats.norm().cdf)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dim={}_mean_batch_size={}_cov_batch_size={}_shape={}"\
.format(dim, mean_batch_size, cov_batch_size, shape),
"dim": dim,
"mean_batch_size": mean_batch_size,
"cov_batch_size": cov_batch_size,
"shape": shape}
for dim in [1, 2, 4]
for mean_batch_size in [(), (3,), (2, 3)]
for cov_batch_size in [(), (3,), (2, 3)]
for shape in [(), (1,), (5,)]))
def testMultivariateNormalShapes(self, dim, mean_batch_size, cov_batch_size,
shape):
r = np.random.RandomState(0)
key = random.PRNGKey(0)
eff_batch_size = mean_batch_size \
if len(mean_batch_size) > len(cov_batch_size) else cov_batch_size
mean = r.randn(*(mean_batch_size + (dim,)))
cov_factor = r.randn(*(cov_batch_size + (dim, dim)))
cov = np.einsum('...ij,...kj->...ik', cov_factor, cov_factor)
cov += 1e-3 * np.eye(dim)
shape = shape + eff_batch_size
samples = random.multivariate_normal(key, mean, cov, shape=shape)
assert samples.shape == shape + (dim,)
def testMultivariateNormalCovariance(self):
# test code based on https://github.com/google/jax/issues/1869
N = 100000
cov = jnp.array([[ 0.19, 0.00, -0.13, 0.00],
[ 0.00, 0.29, 0.00, -0.23],
[ -0.13, 0.00, 0.39, 0.00],
[ 0.00, -0.23, 0.00, 0.49]])
mean = jnp.zeros(4)
out_np = np.random.RandomState(0).multivariate_normal(mean, cov, N)
key = random.PRNGKey(0)
out_jnp = random.multivariate_normal(key, mean=mean, cov=cov, shape=(N,))
var_np = out_np.var(axis=0)
var_jnp = out_jnp.var(axis=0)
self.assertAllClose(var_np, var_jnp, rtol=1e-2, atol=1e-2,
check_dtypes=False)
var_np = np.cov(out_np, rowvar=False)
var_jnp = np.cov(out_jnp, rowvar=False)
self.assertAllClose(var_np, var_jnp, rtol=1e-2, atol=1e-2,
check_dtypes=False)
def testIssue222(self):
x = random.randint(random.PRNGKey(10003), (), 0, 0)
assert x == 0
def testFoldIn(self):
key = random.PRNGKey(0)
keys = [random.fold_in(key, i) for i in range(10)]
assert np.unique(np.ravel(keys)).shape == (20,)
def testStaticShapeErrors(self):
if config.read("jax_disable_jit"):
raise SkipTest("test only relevant when jit enabled")
@api.jit
def feature_map(n, d, sigma=1.0, seed=123):
key = random.PRNGKey(seed)
W = random.normal(key, (d, n)) / sigma
w = random.normal(key, (d, )) / sigma
b = 2 * jnp.pi * random.uniform(key, (d, ))
phi = lambda x, t: jnp.sqrt(2.0 / d) * jnp.cos(jnp.matmul(W, x) + w*t + b)
return phi
self.assertRaisesRegex(TypeError, 'Shapes must be 1D.*',
lambda: feature_map(5, 3))
def testIssue756(self):
key = random.PRNGKey(0)
w = random.normal(key, ())
if FLAGS.jax_enable_x64:
self.assertEqual(np.result_type(w), np.float64)
else:
self.assertEqual(np.result_type(w), np.float32)
def testIssue1789(self):
def f(x):
return random.gamma(random.PRNGKey(0), x)
grad(lambda x: jnp.sum(vmap(f)(x)))(jnp.ones(2))
def testNoOpByOpUnderHash(self):
def fail(*args, **kwargs): assert False
apply_primitive, xla.apply_primitive = xla.apply_primitive, fail
try:
_ = random.threefry_2x32(np.zeros(2, np.uint32), np.arange(10, dtype=np.uint32))
finally:
xla.apply_primitive = apply_primitive
def testPRNGValues(self):
# Test to ensure consistent random values between JAX versions
k = random.PRNGKey(0)
if FLAGS.jax_enable_x64:
self.assertAllClose(
random.randint(k, (3, 3), 0, 8),
np.array([[7, 2, 6],
[2, 1, 0],
[6, 7, 7]], dtype='int64'))
else:
self.assertAllClose(
random.randint(k, (3, 3), 0, 8),
np.array([[2, 1, 3],
[6, 1, 5],
[6, 3, 4]], dtype='int32'))
self.assertAllClose(
random.split(k, 4),
np.array([[2285895361, 1501764800],
[1518642379, 4090693311],
[ 433833334, 4221794875],
[ 839183663, 3740430601]], dtype='uint32'))
self.assertAllClose(
random.fold_in(k, 4),
np.array([2285895361, 433833334], dtype='uint32'))
def testDtypeErrorMessage(self):
with self.assertRaisesRegex(ValueError, r"dtype argument to.*"):
random.normal(random.PRNGKey(0), (), dtype=jnp.int32)
def testRandomBroadcast(self):
"""Issue 4033"""
# test for broadcast issue in https://github.com/google/jax/issues/4033
key = random.PRNGKey(0)
shape = (10, 2)
x = random.uniform(key, shape, minval=jnp.zeros(2), maxval=jnp.ones(2))
assert x.shape == shape
x = random.randint(key, shape, jnp.array([0, 1]), jnp.array([1, 2]))
assert x.shape == shape
def testMaxwellSample(self):
num_samples = 10**5
rng = random.PRNGKey(0)
rand = lambda x: random.maxwell(x, (num_samples, ))
crand = api.jit(rand)
loc = scipy.stats.maxwell.mean()
std = scipy.stats.maxwell.std()
uncompiled_samples = rand(rng)
compiled_samples = crand(rng)
for samples in [uncompiled_samples, compiled_samples]:
# Check first and second moments.
self.assertEqual((num_samples,), samples.shape)
self.assertAllClose(np.mean(samples), loc, atol=0., rtol=0.1)
self.assertAllClose(np.std(samples), std, atol=0., rtol=0.1)
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.maxwell().cdf)
@parameterized.named_parameters(
('test1', 4.0, 1.0),
('test2', 2.0, 3.0))
def testWeibullSample(self, concentration, scale):
num_samples = 10**5
rng = random.PRNGKey(0)
rand = lambda x: random.weibull_min(x, scale, concentration, (num_samples,))
crand = api.jit(rand)
loc = scipy.stats.weibull_min.mean(c=concentration, scale=scale)
std = scipy.stats.weibull_min.std(c=concentration, scale=scale)
uncompiled_samples = rand(rng)
compiled_samples = crand(rng)
for samples in [uncompiled_samples, compiled_samples]:
# Check first and second moments.
self.assertEqual((num_samples,), samples.shape)
self.assertAllClose(np.mean(samples), loc, atol=0., rtol=0.1)
self.assertAllClose(np.std(samples), std, atol=0., rtol=0.1)
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.weibull_min(
c=concentration, scale=scale).cdf)
@parameterized.named_parameters(
('test1', 4.0, 1.0),
('test2', 2.0, 3.0))
def testDoublesidedMaxwellSample(self, loc, scale):
num_samples = 10**5
rng = random.PRNGKey(0)
rand = lambda key: random.double_sided_maxwell(
rng, loc, scale, (num_samples,))
crand = api.jit(rand)
mean = loc
std = np.sqrt(3.) * scale
uncompiled_samples = rand(rng)
compiled_samples = crand(rng)
# Compute the double sided maxwell CDF through the one sided maxwell cdf.
# This is done as follows:
# P(DSM <= x) = P (loc + scale * radamacher_sample * one_sided_sample <=x) =
# P (radamacher_sample * one_sided_sample <= (x - loc) / scale) =
# 1/2 P(one_sided_sample <= (x - loc) / scale)
# + 1/2 P( - one_sided_sample <= (x - loc) / scale) =
# 1/2 P(one_sided_sample <= (x - loc) / scale)
# + 1/2 P(one_sided_sample >= - (x - loc) / scale) =
# 1/2 CDF_one_maxwell((x - loc) / scale))
# + 1/2 (1 - CDF_one_maxwell(- (x - loc) / scale)))
def double_sided_maxwell_cdf(x, loc, scale):
pos = scipy.stats.maxwell().cdf((x - loc)/ scale)
neg = (1 - scipy.stats.maxwell().cdf((-x + loc)/ scale))
return (pos + neg) / 2
for samples in [uncompiled_samples, compiled_samples]:
# Check first and second moments.
self.assertEqual((num_samples,), samples.shape)
self.assertAllClose(np.mean(samples), mean, atol=0., rtol=0.1)
self.assertAllClose(np.std(samples), std, atol=0., rtol=0.1)
self._CheckKolmogorovSmirnovCDF(
samples, lambda x: double_sided_maxwell_cdf(x, loc, scale))
def testRadamacher(self):
rng = random.PRNGKey(0)
num_samples = 10**5
rand = lambda x: random.rademacher(x, (num_samples,))
crand = api.jit(rand)
uncompiled_samples = rand(rng)
compiled_samples = crand(rng)
for samples in [uncompiled_samples, compiled_samples]:
unique_values, counts = np.unique(samples, return_counts=True)
assert len(unique_values) == 2
assert len(counts) == 2
self.assertAllClose(
counts[0]/ num_samples, 0.5, rtol=1e-02, atol=1e-02)
self.assertAllClose(
counts[1]/ num_samples, 0.5, rtol=1e-02, atol=1e-02)
def testChoiceShapeIsNotSequenceError(self):
key = random.PRNGKey(0)
with self.assertRaises(TypeError):
random.choice(key, 5, 2, replace=False)
with self.assertRaises(TypeError):
random.choice(key, 5, 2, replace=True)
def test_eval_shape_big_random_array(self):
def f(x):
return random.normal(random.PRNGKey(x), (int(1e12),))
with core.skipping_checks(): # check_jaxpr will materialize array
api.eval_shape(f, 0) # doesn't error
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "seed={seed}_type={type}_jit={jit}".format(**dct), **dct} for dct in [
{"seed": 0, "type": int, "jit": True, "key": [0, 0]},
{"seed": 0, "type": int, "jit": False, "key": [0, 0]},
{"seed": 1, "type": np.int32, "jit": True, "key": [0, 1]},
{"seed": 1, "type": np.int32, "jit": False, "key": [0, 1]},
{"seed": 2, "type": np.uint32, "jit": True, "key": [0, 2]},
{"seed": 2, "type": np.uint32, "jit": False, "key": [0, 2]},
{"seed": 3, "type": np.int64, "jit": True, "key": [0, 3]},
{"seed": 3, "type": np.int64, "jit": False, "key": [0, 3]},
{"seed": -1, "type": int, "jit": True, "key": [4294967295, 4294967295] if FLAGS.jax_enable_x64 else [0, 4294967295]},
{"seed": -1, "type": int, "jit": False, "key": [4294967295, 4294967295] if FLAGS.jax_enable_x64 else [0, 4294967295]},
{"seed": -2, "type": np.int32, "jit": True, "key": [0, 4294967294]},
{"seed": -2, "type": np.int32, "jit": False, "key": [0, 4294967294]},
{"seed": -3, "type": np.int64, "jit": True, "key": [4294967295, 4294967293] if FLAGS.jax_enable_x64 else [0, 4294967293]},
{"seed": -3, "type": np.int64, "jit": False, "key": [4294967295, 4294967293] if FLAGS.jax_enable_x64 else [0, 4294967293]},
{"seed": np.iinfo(np.int32).max + 100, "type": int, "jit": True, "key": [0, 2147483747]},
{"seed": np.iinfo(np.int32).max + 100, "type": int, "jit": False, "key": [0, 2147483747]},
{"seed": np.iinfo(np.int32).max + 101, "type": np.uint32, "jit": True, "key": [0, 2147483748]},
{"seed": np.iinfo(np.int32).max + 101, "type": np.uint32, "jit": False, "key": [0, 2147483748]},
{"seed": np.iinfo(np.int32).min - 100, "type": int, "jit": True, "key": [4294967295, 2147483548] if FLAGS.jax_enable_x64 else [0, 2147483548]},
{"seed": np.iinfo(np.int32).min - 100, "type": int, "jit": False, "key": [4294967295, 2147483548] if FLAGS.jax_enable_x64 else [0, 2147483548]},
{"seed": np.iinfo(np.int32).min - 101, "type": np.int64, "jit": True, "key": [4294967295, 2147483547] if FLAGS.jax_enable_x64 else [0, 2147483547]},
{"seed": np.iinfo(np.int32).min - 101, "type": np.int64, "jit": False, "key": [4294967295, 2147483547] if FLAGS.jax_enable_x64 else [0, 2147483547]},
]
))
def test_prng_seeds_and_keys(self, seed, type, jit, key):
seed = type(seed)
if jit:
actual = api.jit(random.PRNGKey)(seed)
else:
actual = random.PRNGKey(seed)
expected = jnp.array(key, dtype=jnp.uint32)
self.assertArraysEqual(actual, expected)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": f"_seed={seed}_type={type}", "seed": seed, "type": type}
for type in ["int", "np.array", "jnp.array"]
for seed in [-1, 0, 1, (1 << 32) - 1, (1 << 63) - 1, np.uint64((1 << 64) - 1)]))
def test_prng_jit_invariance(self, seed, type):
if type == "int" and seed == (1 << 64) - 1:
self.skipTest("Expected failure: Python int too large.")
type = {"int": int, "np.array": np.array, "jnp.array": jnp.array}[type]
args_maker = lambda: [type(seed)]
self._CompileAndCheck(random.PRNGKey, args_maker)
def test_prng_errors(self):
seed = np.iinfo(np.uint64).max
with self.assertRaises(OverflowError):
random.PRNGKey(seed)
with self.assertRaises(OverflowError):
api.jit(random.PRNGKey)(seed)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
import unittest
from fractions import Fraction
from function.scalar_range_interpreter import ScalarRangeInterpreter
from tonalmodel.tonality import Tonality, ModalityType
from tonalmodel.diatonic_pitch import DiatonicPitch
class TestScalarRangeInterpreter(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_eval_as_nearest_pitch(self):
print("------- test_eval_as_nearest_pitch")
# Test for scalar range map
tonality = Tonality.create(ModalityType.Major, 'C', 0)
# 11 scalar notes to C:4 to G:5
interpreter = ScalarRangeInterpreter(tonality, DiatonicPitch.parse('C:4'), 0, Fraction(1, 11))
for i in range(0, 12):
p = interpreter.eval_as_nearest_pitch(Fraction(i, 11))
print('[{0}] {1}'.format(i, p))
assert str(interpreter.eval_as_nearest_pitch(Fraction(0, 11))) == 'C:4'
assert str(interpreter.eval_as_nearest_pitch(Fraction(1, 11))) == 'D:4'
assert str(interpreter.eval_as_nearest_pitch(Fraction(2, 11))) == 'E:4'
assert str(interpreter.eval_as_nearest_pitch(Fraction(3, 11))) == 'F:4'
assert str(interpreter.eval_as_nearest_pitch(Fraction(4, 11))) == 'G:4'
assert str(interpreter.eval_as_nearest_pitch(Fraction(5, 11))) == 'A:4'
assert str(interpreter.eval_as_nearest_pitch(Fraction(6, 11))) == 'B:4'
assert str(interpreter.eval_as_nearest_pitch(Fraction(7, 11))) == 'C:5'
assert str(interpreter.eval_as_nearest_pitch(Fraction(8, 11))) == 'D:5'
assert str(interpreter.eval_as_nearest_pitch(Fraction(9, 11))) == 'E:5'
assert str(interpreter.eval_as_nearest_pitch(Fraction(10, 11))) == 'F:5'
assert str(interpreter.eval_as_nearest_pitch(Fraction(11, 11))) == 'G:5'
p = interpreter.eval_as_nearest_pitch(Fraction(3, 11) + 0.01)
assert str(p) == 'F:4'
p = interpreter.eval_as_nearest_pitch(Fraction(3, 11) + 0.08)
assert str(p) == 'G:4'
def test_value_for(self):
print("------- test_value_for")
tonality = Tonality.create(ModalityType.Major, 'E', 0)
# 11 scalar notes to E:2 to E:3 increment scalar note per value increment of 1/10, with E:2 being 1
interpreter = ScalarRangeInterpreter(tonality, DiatonicPitch.parse('E:2'), 1, Fraction(1, 10))
for i in range(0, 8):
p = interpreter.eval_as_nearest_pitch(1 + Fraction(i, 10))
print('[{0}] {1} --> {2}'.format(i, p, interpreter.value_for(p)))
assert interpreter.value_for(DiatonicPitch.parse('E:2')) == 1
assert interpreter.value_for(DiatonicPitch.parse('F#:2')) == Fraction(11, 10)
assert interpreter.value_for(DiatonicPitch.parse('G#:2')) == Fraction(12, 10)
assert interpreter.value_for(DiatonicPitch.parse('A:2')) == Fraction(13, 10)
assert interpreter.value_for(DiatonicPitch.parse('B:2')) == Fraction(14, 10)
assert interpreter.value_for(DiatonicPitch.parse('F:2')) is None
def test_book_example(self):
print("------- test_book_example")
# Test for scalar range map
tonality = Tonality.create(ModalityType.Major, 'C', 0)
# 11 scalar notes to C:4 to G:5
interpreter = ScalarRangeInterpreter(tonality, DiatonicPitch.parse('C:4'), 0, Fraction(5, 2))
for i in range(0, 12):
p = interpreter.eval_as_nearest_pitch(Fraction(5 * i, 2))
print('[{0}] {1}'.format(i, p))
def test_book_example_1(self):
print("------- test_book_example_1")
tonality = Tonality.create(ModalityType.Major, 'E', 0)
interpreter = ScalarRangeInterpreter(tonality, DiatonicPitch.parse('E:4'), 0, 1)
for i in range(0, 8):
p = interpreter.eval_as_nearest_pitch(i)
print('[{0}] {1}'.format(i, p))
gs_value = interpreter.value_for("G#:4")
assert gs_value == 2
gs_value = interpreter.value_for("G:4")
assert gs_value == None
pitches = interpreter.eval_as_pitch(5.2)
print("[{0}]".format(', '.join(map(str, pitches))))
assert pitches[0] == DiatonicPitch.parse('c#:5')
assert pitches[1] == DiatonicPitch.parse('d#:5')
assert 66 == interpreter.eval_as_accurate_chromatic_distance(8) # 66 for F#:5 |
"""UserObservees API Tests for Version 1.0.
This is a testing template for the generated UserObserveesAPI Class.
"""
import unittest
import requests
import secrets
from py3canvas.apis.user_observees import UserObserveesAPI
from py3canvas.apis.user_observees import Pairingcode
class TestUserObserveesAPI(unittest.TestCase):
"""Tests for the UserObserveesAPI."""
def setUp(self):
self.client = UserObserveesAPI(secrets.instance_address, secrets.access_token)
def test_list_observees(self):
"""Integration test for the UserObserveesAPI.list_observees method."""
user_id = None # Change me!!
r = self.client.list_observees(user_id, include=None)
def test_list_observers(self):
"""Integration test for the UserObserveesAPI.list_observers method."""
user_id = None # Change me!!
r = self.client.list_observers(user_id, include=None)
def test_add_observee_with_credentials(self):
"""Integration test for the UserObserveesAPI.add_observee_with_credentials method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_show_observee(self):
"""Integration test for the UserObserveesAPI.show_observee method."""
user_id = None # Change me!!
observee_id = None # Change me!!
r = self.client.show_observee(observee_id, user_id)
def test_show_observer(self):
"""Integration test for the UserObserveesAPI.show_observer method."""
user_id = None # Change me!!
observer_id = None # Change me!!
r = self.client.show_observer(observer_id, user_id)
def test_add_observee(self):
"""Integration test for the UserObserveesAPI.add_observee method."""
# This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_remove_observee(self):
"""Integration test for the UserObserveesAPI.remove_observee method."""
user_id = None # Change me!!
observee_id = None # Change me!!
r = self.client.remove_observee(observee_id, user_id, root_account_id=None)
def test_create_observer_pairing_code(self):
"""Integration test for the UserObserveesAPI.create_observer_pairing_code method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
|
import pytest
@pytest.mark.parametrize(
"word,lemma",
[("新しく", "新しい"), ("赤く", "赤い"), ("すごく", "すごい"), ("いただきました", "いただく"), ("なった", "なる")],
)
def test_ja_lemmatizer_assigns(ja_tokenizer, word, lemma):
test_lemma = ja_tokenizer(word)[0].lemma_
assert test_lemma == lemma
@pytest.mark.parametrize(
"word,norm",
[
("SUMMER", "サマー"),
("食べ物", "食べ物"),
("綜合", "総合"),
("コンピュータ", "コンピューター"),
],
)
def test_ja_lemmatizer_norm(ja_tokenizer, word, norm):
test_norm = ja_tokenizer(word)[0].norm_
assert test_norm == norm
|
class MapAsObject:
def __init__(self, wrapped):
self.wrapped = wrapped
def __getattr__(self, key):
try:
return self.wrapped[key]
except KeyError:
raise AttributeError("%r object has no attribute %r" %
(self.__class__.__name__, key))
def get(self, *args, **kwargs):
return self.wrapped.get(*args, **kwargs)
def __str__(self):
return str(self.wrapped)
def as_object(wrapped_map):
return MapAsObject(wrapped_map)
|
import json
import os
import pickle
import sys
import re
from collections import OrderedDict
import copy
from boltons.cacheutils import LRU
from boltons.cacheutils import cachedmethod
from mesa import Model
from mesa.time import StagedActivation
from simulation.Registry import registry
from simulation.pickleThis import pickleThis
from simulation.Exogenous import Exogenous
from simulation.SISTER import SISTER
class SnetSim(Model):
def __init__(self, study_path='study.json'):
# Get data from config file
with open(study_path) as json_file:
config = json.load(json_file, object_pairs_hook=OrderedDict)
self.parameters = config['parameters']
super().__init__(self.parameters['seed'])
self.blackboard = config['blackboard']
self.ontology = config['ontology']
# Copy config file to output folder
outpath = config['parameters']['output_path']
if not os.path.exists(outpath):
os.makedirs(outpath)
filename = outpath + study_path
pretty = json.dumps(config, indent=2, separators=(',', ':'))
with open(filename, 'w') as outfile:
outfile.write(pretty)
# Initialize class attributes
self.gepResult = None
self.registry = registry
self.reproduction_report = self.reproduction_report()
self.emergent_functions = OrderedDict()
self.emergent_functions_arity = OrderedDict()
self.emergent_functions_call_number = 0
self.stochastic_pattern = re.compile(r'_stochastic\d+')
self.prefix_pattern = re.compile(r'^f\d+_')
# Pickling parameters
pickle_config_path = config['parameters']['output_path'] + 'pickles/' + 'index.p'
if pickle_config_path and os.path.exists(pickle_config_path):
with open(pickle_config_path, 'rb') as cachehandle:
pickle_config = pickle.load(cachehandle)
else:
pickle_config = OrderedDict([("count", 0), ("pickles", OrderedDict())])
self.pickle_count = pickle_config['count'] # contains the next number for the pickle file
self.pickles = pickle_config['pickles']
self.resultTuple = ()
# self.cache = LRU(max_size = 512)
self.cache = LRU()
# Buyers gather offers by ranking those who offer to sell the product that have a price overlap.
# Call `choose_partners` several times to ensure that the supply chain has a
# chance to be settled in multiple trades, or offer networks have a chance to be filled.
# In `step`, the agent has a chance to put out a new message given the knowledge of purchases
# made on the last round.
stage_list = ['step', 'gather_offers',
'choose_partners', 'choose_partners', 'choose_partners', 'choose_partners', 'choose_partners',
'choose_partners', 'choose_partners', 'choose_partners', 'choose_partners', 'choose_partners',
'choose_partners', 'choose_partners', 'choose_partners', 'choose_partners', 'choose_partners',
'choose_partners', 'choose_partners', 'choose_partners', 'choose_partners', 'choose_partners',
]
self.schedule = StagedActivation(self, stage_list=stage_list, shuffle=True, shuffle_between_stages=True)
# Create initial agents as requested in `blackboard agents`
initial_blackboard = copy.deepcopy(self.blackboard)
self.blackboard = []
agent_count = 0
for i, message in enumerate(initial_blackboard):
if message['type'] in self.parameters['agent_parameters']:
agent_parameters = self.parameters['agent_parameters'][message['type']]
else:
agent_parameters = None
# Create as many copies of agent `i` as requested
for _ in range(self.parameters['blackboard_agents'][i]):
a = globals()[message['type']](agent_count, self, message, agent_parameters)
self.schedule.add(a)
agent_count += 1
# Create random agents
for agent_type, n in self.parameters['random_agents'].items():
if agent_type in self.parameters['agent_parameters']:
agent_parameters = self.parameters['agent_parameters'][agent_type]
else:
agent_parameters = None
for i in range(n):
a = globals()[agent_type](agent_count, self, None, agent_parameters)
self.schedule.add(a)
agent_count += 1
print("Initialized SnetSim instance!")
def remove_suffix(self, func_name):
cut_tuple = func_name
if func_name:
stochastic_suffix = self.stochastic_pattern.search(func_name)
if stochastic_suffix:
stochastic_suffix = stochastic_suffix.group()
cut_tuple = func_name[:-len(stochastic_suffix)]
return cut_tuple
@cachedmethod('cache')
@pickleThis
def memoise_pickle(self, tuple_key):
result = None
try:
cut_tuple = self.remove_suffix(tuple_key[0])
if len(self.resultTuple) and cut_tuple:
result = self.registry[cut_tuple](*self.resultTuple)()
else:
result = self.registry[cut_tuple]()
except IOError as e:
print("I/O error({0})".format(e))
except ValueError as e:
print("ValueError({0})".format(e))
except AttributeError as e:
print("AttributeError({0})".format(e))
except TypeError as e:
print("TypeError({0})".format(e))
except RuntimeError as e:
print("RuntimeError({0})".format(e))
except IndexError as e:
print("IndexError({0})".format(e))
except:
print("Unexpected error:", sys.exc_info()[0])
raise
return result
def remove_prefix(self, func_name):
cut_tuple = func_name
if func_name:
call_number_prefix = self.prefix_pattern.search(func_name)
if call_number_prefix:
call_number_prefix = call_number_prefix.group()
cut_tuple = func_name[len(call_number_prefix):]
return cut_tuple
def get_call_prefix(self, func_name):
call_prefix = None
if func_name:
call_number_prefix = self.prefix_pattern.search(func_name)
if call_number_prefix:
call_prefix = call_number_prefix.group()
return call_prefix
def call_emergent_function(self, gep_result, root):
print("SnetSim calling emergent function with root {0} : {1}".format(root, gep_result))
self.gepResult = copy.deepcopy(gep_result)
func_tuple = self.call_memoise_pickle(root)
print("SnetSim called emergent function with root {0} with result {1}".format(root, func_tuple))
return func_tuple
def call_memoise_pickle(self, root):
# right now, self.emergentfunctions looks like:
# f1: a,b,f2,c,d
# f2: e,d,f3,f,g
# f3: h,i,j
#
# You should go through the original problem that you had in the modulargep.txt file in the singularitynet
# directory its going to be a matter of creating a registry for a functionlist on the fly from the real
# registry I think.
result = None
func_tuple = (None, None)
if root:
result_list = []
func_list = []
# argTuple = ()
if root in self.gepResult:
args = self.gepResult[root]
# argTuple = tuple(args)
for arg in args:
temp_func_tuple, temp_result = self.call_memoise_pickle(arg)
result_list.append(temp_result)
func_list.append(temp_func_tuple)
carried_back = tuple(func_list)
strip_prefix = self.remove_prefix(root)
func_tuple = (strip_prefix, carried_back)
self.resultTuple = tuple(result_list) # You have to set a global to memoise and pickle correctly
if strip_prefix is not None:
result = self.memoise_pickle(func_tuple)
return func_tuple, result
def reproduction_report(self):
path = self.parameters['output_path'] + 'reproduction_report.csv'
file = open(path, "w")
file.write("time;agent;label;utility;agi_tokens;buyer_score;seller_score;sign_displayed;bought_items\n")
return file
def print_reproduction_report_line(self, agent, utility, bought_items):
a = self.schedule.time
b = agent.unique_id
c = agent.message['label']
d = utility
e = agent.agiTokens
f = agent.max_buyer_score
g = agent.max_seller_score
h = agent.message['sign']
i = bought_items
self.reproduction_report.write("{0};{1};{2};{3};{4};{5};{6};{7};{8}\n".format(a, b, c, d, e, f, g, h, i))
self.reproduction_report.flush()
def print_logs(self):
log_path = self.parameters['output_path'] + "logs/"
filename = log_path + "log" + str(self.schedule.time) + ".txt"
if not os.path.exists(log_path):
os.makedirs(log_path)
pretty = json.dumps(self.blackboard, indent=2, separators=(',', ':'))
with open(filename, 'w') as outfile:
outfile.write(pretty)
# json.dump(self.blackboard, outfile)
pickle_path = self.parameters['output_path'] + 'pickles/'
pickle_config_path = pickle_path + 'index.p'
pickle_config = OrderedDict([('count', self.pickle_count), ('pickles', self.pickles)])
if not os.path.exists(pickle_path):
os.makedirs(pickle_path)
with open(pickle_config_path, 'wb') as outfile:
pickle.dump(pickle_config, outfile)
def visualize(self):
# todo: visualize changes in price and test score and relative wealth
pass
def step(self):
"""Advance the model by one step."""
print("IN SnetSim step, time " + str(self.schedule.time))
self.print_logs()
# self.visualize() after learning agents are implemented
self.schedule.step()
def go(self):
for i in range(self.parameters['max_iterations']):
print("iteration " + str(i))
self.step()
def main():
snetsim = SnetSim(sys.argv[1]) if len(sys.argv) > 1 else SnetSim()
snetsim.go()
if __name__ == '__main__':
main()
|
from datetime import datetime
import pandas as pd
class Funcion_ingresar:
def Ingresar_desplegar_teclado_numerico_cedula(self):
self.campo = 'ingresar-cedula'
#self.Ingresar_desplegar_teclado_numerico()
def Ingresar_desplegar_teclado_numerico_temp(self):
self.campo = 'ingresar-temp'
#self.Ingresar_desplegar_teclado_numerico()
def Ingresar_desplegar_teclado_numerico(self):
MOV = -self.height/10
# movimiento botones
self.ingresar_nombre.setGeometry(self.width/3.6, (self.height/2.7) + MOV,
self.width/4.2, self.height/12)
self.ingresar_cedula.setGeometry(self.width/3.6, (self.height/2.7)+(self.height/8.5)+MOV,
self.width/7, self.height/12)
self.ingresar_temp.setGeometry((self.width/3.6) + (self.width/6), (self.height/2.7)+(self.height/8.5)+MOV,
self.width/14, self.height/12)
self.ingresar_ingresar.setGeometry(self.width/1.8, (self.height/2.7) + MOV,
self.width/6, self.height/4.9)
self.NotTeclado()
self.TecladoNumerico()
def Ingresar_guardar_teclado_numerico(self):
self.ingresar_nombre.setGeometry(self.width/3.6, self.height/2.7,
self.width/4.2, self.height/12)
self.ingresar_cedula.setGeometry(self.width/3.6, (self.height/2.7)+self.height/8.5,
self.width/7, self.height/12)
self.ingresar_temp.setGeometry((self.width/3.6) + (self.width/6), (self.height/2.7)+self.height/8.5,
self.width/14, self.height/12)
self.ingresar_ingresar.setGeometry(self.width/1.8, self.height/2.7,
self.width/6, self.height/4.9)
self.NotTecladoNumerico()
def Ingresar_desplegar_teclado(self):
MOV = -self.height/10
# movimiento botones
#self.ingresar_nombre.setGeometry(self.width/3.6, (self.height/2.7) + MOV,
# self.width/4.2, self.height/12)
#self.ingresar_cedula.setGeometry(self.width/3.6, (self.height/2.7)+(self.height/8.5)+MOV,
# self.width/7, self.height/12)
#self.ingresar_temp.setGeometry((self.width/3.6) + (self.width/6), (self.height/2.7)+(self.height/8.5)+MOV,
# self.width/14, self.height/12)
#self.ingresar_ingresar.setGeometry(self.width/1.8, (self.height/2.7) + MOV,
# self.width/6, self.height/4.9)
#self.Teclado()
self.NotTecladoNumerico()
self.campo = 'ingresar-nombre'
def Ingresar_guardar_teclado(self):
self.ingresar_nombre.setGeometry(self.width/3.6, self.height/2.7,
self.width/4.2, self.height/12)
self.ingresar_cedula.setGeometry(self.width/3.6, (self.height/2.7)+self.height/8.5,
self.width/7, self.height/12)
self.ingresar_temp.setGeometry((self.width/3.6) + (self.width/6), (self.height/2.7)+self.height/8.5,
self.width/14, self.height/12)
self.ingresar_ingresar.setGeometry(self.width/1.8, self.height/2.7,
self.width/6, self.height/4.9)
self.NotTeclado()
def IngresarIngresar(self):
# variables locales
nombre = str(self.ingresar_nombre.text())
cedula = str(self.ingresar_cedula.text())
carnet = '*' # Entra por arduino
temp = str(self.ingresar_temp.text())
Fecha = datetime.today().strftime('%d-%m-%Y')
HoraIn = datetime.today().strftime('%H:%M')
HoraOut = 'HO*'
Delta = 'D*'
IsIn = 'True'
df = pd.read_csv('src/models/data/DB.csv')
try:
Numingresos = str(len(df[(df['Cedula'] == cedula)]))
if nombre != "" and cedula != "" and nombre != "": # lógica para leer si los campos están vacíos
if not nombre.isdigit() and not cedula.isalpha() and not temp.isalpha(): # detecta si numeros o letras donde no deben
cedulaExist = df[(df['Cedula'] == str(cedula)) & (df['IsIn'])].index.tolist()
if not float(temp) >= 37.5:
if not cedulaExist:
self.df_as_txt = open("src/models/data/DB.csv", "a")
# ParaPandas
# Enviar vector persona a DB
# COMO FUNCION SEPARADA
persona = '\n' + nombre + ',' + cedula + ',' + carnet + ',' + temp + ',' + Fecha + ',' + HoraIn + ',' + HoraOut + ',' + Delta + ',' + Numingresos + ',' + IsIn
self.df_as_txt.write(persona)
self.df_as_txt.close()
# COMO FUNCION SEPARADA
self.dialogo_mensaje = "El usuario fue ingresado \n con éxito"
self.dialogo.setInformativeText(self.dialogo_mensaje)
self.dialogo.show()
self.HomeWindow()
else:
self.dialogo_mensaje = "El usuario ya está adentro \n"
self.dialogo.setInformativeText(self.dialogo_mensaje)
self.dialogo.show()
else:
# reproducir alarma
self.alarm.play()
self.dialogo_mensaje = "EL USUARIO TIENE FIEBRE \n"
self.dialogo.setInformativeText(self.dialogo_mensaje)
self.dialogo.show()
else:
self.dialogo_mensaje = "Error, verifique los datos ingresados\n "
self.dialogo.setInformativeText(self.dialogo_mensaje)
self.dialogo.show()
else:
self.dialogo_mensaje = "Debe llenar todos los campos\nantes de continuar"
self.dialogo.setInformativeText(self.dialogo_mensaje)
self.dialogo.show()
except Exception as e:
print(e)
self.dialogo_mensaje = "Error, intente nuevamente\n\nSi el error persiste comuniquese con el fabricante"
self.dialogo.setInformativeText(self.dialogo_mensaje)
self.dialogo.show()
|
class TitrationData:
def __init__(self, filename):
self.residue_data = {}
self.read_residue_data(filename)
def read_residue_data(self,filename):
titration_file = open(args.titr)
for line in titration_file:
line_comment = re.search("#", line)
if line_comment is not None:
line = line[0:line_comment.start()]
line_init_residue = re.search("TITR", line)
if line_init_residue is not None:
line_init_data = line.rstrip().lstrip().split()
self.residue_data[line_init_data[1]] = {'atoms':{},'pKa':line_init_data[2],'numProtonationStates':line_init_data[3],'protons':line_init_data[4:]}
continue
line_data = line.rstrip().lstrip().split()
if len(line_data) == 0:
continue
if line_data[0] not in list(self.residue_data.keys()):
print("Error: You must define the residue first with TITR <resname> <pka> <protons>\n")
sys.exit(2)
# self.residue_data[line_data[0]] = {'atoms':{},'pKa':None,'numProtonationStates':0,'protons':[]}
if line_data[1] in list(self.residue_data[line_data[0]]['atoms'].keys()):
print("Duplicate atom " + line_data[1] + " in residue " + line_data[0])
sys.exit(2)
last_prot_charge_index = len(line_data)-1;
self.residue_data[line_data[0]]['atoms'][line_data[1]] = {'charges':line_data[3:last_prot_charge_index]}
titration_file.close()
|
class VisualStateGroup(DependencyObject):
"""
Contains mutually exclusive System.Windows.VisualState objects and System.Windows.VisualTransition objects that are used to move from one state to another.
VisualStateGroup()
"""
CurrentState=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Windows.VisualState that is currently applied to the control.
Get: CurrentState(self: VisualStateGroup) -> VisualState
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the name of the System.Windows.VisualStateGroup.
Get: Name(self: VisualStateGroup) -> str
Set: Name(self: VisualStateGroup)=value
"""
States=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the collection of mutually exclusive System.Windows.VisualState objects.
Get: States(self: VisualStateGroup) -> IList
"""
Transitions=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the collection of System.Windows.VisualTransition objects.
Get: Transitions(self: VisualStateGroup) -> IList
"""
CurrentStateChanged=None
CurrentStateChanging=None
|
import os
import django
from django.conf import settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
def pytest_configure():
settings.DEBUG = False
settings.DATABASES["default"] = {"ENGINE": "django.db.backends.sqlite3"}
django.setup()
|
#!/usr/bin/env python
import os
import numpy as np
from mtuq import read, open_db, download_greens_tensors
from mtuq.event import Origin
from mtuq.graphics import plot_data_greens2, plot_beachball, plot_misfit_lune,\
plot_likelihood_lune, plot_marginal_vw,\
plot_variance_reduction_lune, plot_magnitude_tradeoffs_lune,\
plot_time_shifts, plot_amplitude_ratios,\
plot_summary1, plot_summary2, likelihood_analysis,\
_likelihoods_vw_regular, _marginals_vw_regular,\
_plot_lune, _plot_vw, _product_vw
from mtuq.graphics.uq.vw import _variance_reduction_vw_regular
from mtuq.grid import FullMomentTensorGridSemiregular
from mtuq.grid_search import grid_search
from mtuq.misfit.waveform import Misfit, estimate_sigma, calculate_norm_data
from mtuq.process_data import ProcessData
from mtuq.util import fullpath, merge_dicts, save_json
from mtuq.util.cap import parse_station_codes, Trapezoid
if __name__=='__main__':
#
# Peforms detailed analysis involving
#
# - grid search over all moment tensor parameters, including magnitude
# - separate body wave, Rayleigh wave and Love wave data categories
# - data variance estimation and likelihood analysis
#
#
# Generates figures of
#
# - maximum likelihood surfaces
# - marginal likelihood surfaces
# - data misfit surfaces
# - "variance reduction" surfaces
# - geographic variation of time shifts
# - geographic variation of amplitude ratios
#
#
# USAGE
# mpirun -n <NPROC> python DetailedAnalysis.FullMomentTensor.py
#
#
# This is the most complicated example. For simpler ones, see
# SerialGridSearch.DoubleCouple.py or GridSearch.FullMomentTensor.py
#
# For ideas on applying this type of analysis to entire sets of events,
# see github.com/rmodrak/mtbench
#
path_data= fullpath('data/examples/20090407201255351/*.[zrt]')
path_weights= fullpath('data/examples/20090407201255351/weights.dat')
event_id= '20090407201255351'
model= 'ak135'
#
# Body and surface wave measurements will be made separately
#
process_bw = ProcessData(
filter_type='Bandpass',
freq_min= 0.1,
freq_max= 0.333,
pick_type='taup',
taup_model=model,
window_type='body_wave',
window_length=15.,
capuaf_file=path_weights,
)
process_sw = ProcessData(
filter_type='Bandpass',
freq_min=0.025,
freq_max=0.0625,
pick_type='taup',
taup_model=model,
window_type='surface_wave',
window_length=150.,
capuaf_file=path_weights,
)
#
# For our objective function, we will use a sum of body and surface wave
# contributions
#
misfit_bw = Misfit(
norm='L2',
time_shift_min=-2.,
time_shift_max=+2.,
time_shift_groups=['ZR'],
)
misfit_rayleigh = Misfit(
norm='L2',
time_shift_min=-10.,
time_shift_max=+10.,
time_shift_groups=['ZR'],
)
misfit_love = Misfit(
norm='L2',
time_shift_min=-10.,
time_shift_max=+10.,
time_shift_groups=['T'],
)
#
# User-supplied weights control how much each station contributes to the
# objective function
#
station_id_list = parse_station_codes(path_weights)
#
# Next, we specify the moment tensor grid and source-time function
#
grid = FullMomentTensorGridSemiregular(
npts_per_axis=12,
magnitudes=[4.4, 4.5, 4.6, 4.7])
wavelet = Trapezoid(
magnitude=4.5)
#
# Origin time and location will be fixed. For an example in which they
# vary, see examples/GridSearch.DoubleCouple+Magnitude+Depth.py
#
# See also Dataset.get_origins(), which attempts to create Origin objects
# from waveform metadata
#
origin = Origin({
'time': '2009-04-07T20:12:55.000000Z',
'latitude': 61.454200744628906,
'longitude': -149.7427978515625,
'depth_in_m': 33033.599853515625,
})
from mpi4py import MPI
comm = MPI.COMM_WORLD
#
# The main I/O work starts now
#
if comm.rank==0:
print('Reading data...\n')
data = read(path_data, format='sac',
event_id=event_id,
station_id_list=station_id_list,
tags=['units:cm', 'type:velocity'])
data.sort_by_distance()
stations = data.get_stations()
print('Processing data...\n')
data_bw = data.map(process_bw)
data_sw = data.map(process_sw)
print('Reading Greens functions...\n')
greens = download_greens_tensors(stations, origin, model)
print('Processing Greens functions...\n')
greens.convolve(wavelet)
greens_bw = greens.map(process_bw)
greens_sw = greens.map(process_sw)
else:
stations = None
data_bw = None
data_sw = None
greens_bw = None
greens_sw = None
stations = comm.bcast(stations, root=0)
data_bw = comm.bcast(data_bw, root=0)
data_sw = comm.bcast(data_sw, root=0)
greens_bw = comm.bcast(greens_bw, root=0)
greens_sw = comm.bcast(greens_sw, root=0)
#
# The main computational work starts now
#
if comm.rank==0:
print('Evaluating body wave misfit...\n')
results_bw = grid_search(
data_bw, greens_bw, misfit_bw, origin, grid)
if comm.rank==0:
print('Evaluating Rayleigh wave misfit...\n')
results_rayleigh = grid_search(
data_sw, greens_sw, misfit_rayleigh, origin, grid)
if comm.rank==0:
print('Evaluating Love wave misfit...\n')
results_love = grid_search(
data_sw, greens_sw, misfit_love, origin, grid)
if comm.rank==0:
results_sum = results_bw + results_rayleigh + results_love
#
# Data variance estimation and likelihood analysis
#
# use minimum misfit as initial guess for maximum likelihood
idx = results_sum.idxmin('source')
best_source = grid.get(idx)
lune_dict = grid.get_dict(idx)
mt_dict = grid.get(idx).as_dict()
print('Data variance estimation...\n')
sigma_bw = estimate_sigma(data_bw, greens_bw,
best_source, misfit_bw.norm, ['Z', 'R'],
misfit_bw.time_shift_min, misfit_bw.time_shift_max)
sigma_rayleigh = estimate_sigma(data_sw, greens_sw,
best_source, misfit_rayleigh.norm, ['Z', 'R'],
misfit_rayleigh.time_shift_min, misfit_rayleigh.time_shift_max)
sigma_love = estimate_sigma(data_sw, greens_sw,
best_source, misfit_love.norm, ['T'],
misfit_love.time_shift_min, misfit_love.time_shift_max)
stats = {'sigma_bw': sigma_bw,
'sigma_rayleigh': sigma_rayleigh,
'sigma_love': sigma_love}
print(' Body wave variance: %.3e' %
sigma_bw**2)
print(' Rayleigh variance: %.3e' %
sigma_rayleigh**2)
print(' Love variance: %.3e' %
sigma_love**2)
print()
norm_bw = calculate_norm_data(data_bw,
misfit_bw.norm, ['Z', 'R'])
norm_rayleigh = calculate_norm_data(data_sw,
misfit_rayleigh.norm, ['Z', 'R'])
norm_love = calculate_norm_data(data_sw,
misfit_love.norm, ['T'])
norms = {misfit_bw.norm+'_bw': norm_bw,
misfit_rayleigh.norm+'_rayleigh': norm_rayleigh,
misfit_love.norm+'_love': norm_love}
print('Likelihood analysis...\n')
likelihoods, mle_lune, marginal_vw = likelihood_analysis(
(results_bw, sigma_bw**2),
(results_rayleigh, sigma_rayleigh**2),
(results_love, sigma_love**2))
# maximum likelihood vw surface
likelihoods_vw = _product_vw(
_likelihoods_vw_regular(results_bw, sigma_bw**2),
_likelihoods_vw_regular(results_rayleigh, sigma_rayleigh**2),
_likelihoods_vw_regular(results_love, sigma_love**2))
# TODO - marginalize over the joint likelihood distribution instead
marginals_vw = _product_vw(
_marginals_vw_regular(results_bw, sigma_bw**2),
_marginals_vw_regular(results_rayleigh, sigma_rayleigh**2),
_marginals_vw_regular(results_love, sigma_love**2))
#
# Generate figures and save results
#
# only generate components present in the data
components_bw = data_bw.get_components()
components_sw = data_sw.get_components()
# synthetics corresponding to minimum misfit
synthetics_bw = greens_bw.get_synthetics(
best_source, components_bw, mode='map')
synthetics_sw = greens_sw.get_synthetics(
best_source, components_sw, mode='map')
# time shifts and other attributes corresponding to minimum misfit
list_bw = misfit_bw.collect_attributes(
data_bw, greens_bw, best_source)
list_rayleigh = misfit_rayleigh.collect_attributes(
data_sw, greens_sw, best_source)
list_love = misfit_love.collect_attributes(
data_sw, greens_sw, best_source)
list_sw = [{**list_rayleigh[_i], **list_love[_i]}
for _i in range(len(stations))]
dict_bw = {station.id: list_bw[_i]
for _i,station in enumerate(stations)}
dict_rayleigh = {station.id: list_rayleigh[_i]
for _i,station in enumerate(stations)}
dict_love = {station.id: list_love[_i]
for _i,station in enumerate(stations)}
dict_sw = {station.id: list_sw[_i]
for _i,station in enumerate(stations)}
print('Plotting observed and synthetic waveforms...\n')
plot_beachball(event_id+'FMT_beachball.png',
best_source, stations, origin)
plot_data_greens2(event_id+'FMT_waveforms.png',
data_bw, data_sw, greens_bw, greens_sw, process_bw, process_sw,
misfit_bw, misfit_rayleigh, stations, origin, best_source, lune_dict)
print('Plotting misfit surfaces...\n')
os.makedirs(event_id+'FMT_misfit', exist_ok=True)
plot_misfit_lune(event_id+'FMT_misfit/bw.png', results_bw,
title='Body waves')
plot_misfit_lune(event_id+'FMT_misfit/rayleigh.png', results_rayleigh,
title='Rayleigh waves')
plot_misfit_lune(event_id+'FMT_misfit/love.png', results_love,
title='Love waves')
print()
print('Plotting maximum likelihood surfaces...\n')
os.makedirs(event_id+'FMT_likelihood', exist_ok=True)
plot_likelihood_lune(event_id+'FMT_likelihood/bw.png',
results_bw, var=sigma_bw**2,
title='Body waves')
plot_likelihood_lune(event_id+'FMT_likelihood/rayleigh.png',
results_rayleigh, var=sigma_rayleigh**2,
title='Rayleigh waves')
plot_likelihood_lune(event_id+'FMT_likelihood/love.png',
results_love, var=sigma_love**2,
title='Love waves')
_plot_lune(event_id+'FMT_likelihood/all.png',
likelihoods_vw, colormap='hot_r',
title='All data categories')
print()
print('Plotting marginal likelihood surfaces...\n')
os.makedirs(event_id+'FMT_marginal', exist_ok=True)
plot_marginal_vw(event_id+'FMT_marginal/bw.png',
results_bw, var=sigma_bw**2,
title='Body waves')
plot_marginal_vw(event_id+'FMT_marginal/rayleigh.png',
results_rayleigh, var=sigma_rayleigh**2,
title='Rayleigh waves')
plot_marginal_vw(event_id+'FMT_marginal/love.png',
results_love, var=sigma_love**2,
title='Love waves')
_plot_vw(event_id+'FMT_marginal/all.png',
marginals_vw, colormap='hot_r',
title='All data categories')
print()
print('Plotting variance reduction surfaces...\n')
os.makedirs(event_id+'FMT_variance_reduction', exist_ok=True)
plot_variance_reduction_lune(event_id+'FMT_variance_reduction/bw.png',
results_bw, norm_bw, title='Body waves',
colorbar_label='Variance reduction (percent)')
plot_variance_reduction_lune(event_id+'FMT_variance_reduction/rayleigh.png',
results_rayleigh, norm_rayleigh, title='Rayleigh waves',
colorbar_label='Variance reduction (percent)')
plot_variance_reduction_lune(event_id+'FMT_variance_reduction/love.png',
results_love, norm_love, title='Love waves',
colorbar_label='Variance reduction (percent)')
print()
print('Plotting tradeoffs...\n')
os.makedirs(event_id+'FMT_tradeoffs', exist_ok=True)
plot_misfit_lune(event_id+'FMT_tradeoffs/orientation.png',
results_sum, show_tradeoffs=True, title='Orientation tradeoffs')
plot_magnitude_tradeoffs_lune(event_id+'FMT_tradeoffs/magnitude.png',
results_sum, title='Magnitude tradeoffs')
print()
print('Plotting summary figures (work in progress)...\n')
os.makedirs(event_id+'FMT_summary', exist_ok=True)
plot_summary1(event_id+'FMT_summary/1.png',
results_rayleigh, stations, origin, best_source)
plot_summary2(event_id+'FMT_summary/2.png',
results_rayleigh, sigma_rayleigh**2, stations, origin, best_source)
print()
print('Plotting time shift geographic variation...\n')
plot_time_shifts(event_id+'FMT_time_shifts/bw',
list_bw, stations, origin, best_source)
plot_time_shifts(event_id+'FMT_time_shifts/sw',
list_sw, stations, origin, best_source)
print('Plotting amplitude ratio geographic variation...\n')
plot_amplitude_ratios(event_id+'FMT_amplitude_ratios/bw',
list_bw, stations, origin, best_source)
plot_amplitude_ratios(event_id+'FMT_amplitude_ratios/sw',
list_sw, stations, origin, best_source)
print('\nSaving results...\n')
# save best-fitting source
os.makedirs(event_id+'FMT_solutions', exist_ok=True)
save_json(event_id+'FMT_solutions/marginal_likelihood.json', marginal_vw)
save_json(event_id+'FMT_solutions/maximum_likelihood.json', mle_lune)
merged_dict = merge_dicts(lune_dict, mt_dict, origin,
{'M0': best_source.moment(), 'Mw': best_source.magnitude()})
save_json(event_id+'FMT_solutions/minimum_misfit.json', merged_dict)
os.makedirs(event_id+'FMT_stats', exist_ok=True)
save_json(event_id+'FMT_stats/data_variance.json', stats)
save_json(event_id+'FMT_stats/data_norm.json', norms)
# save stations and origins
stations_dict = {station.id: station
for _i,station in enumerate(stations)}
save_json(event_id+'FMT_stations.json', stations_dict)
save_json(event_id+'FMT_origins.json', {0: origin})
# save time shifts and other attributes
os.makedirs(event_id+'FMT_attrs', exist_ok=True)
save_json(event_id+'FMT_attrs/bw.json', dict_bw)
save_json(event_id+'FMT_attrs/sw.json', dict_sw)
# save processed waveforms as binary files
os.makedirs(event_id+'FMT_waveforms', exist_ok=True)
data_bw.write(event_id+'FMT_waveforms/dat_bw.p')
data_sw.write(event_id+'FMT_waveforms/dat_sw.p')
synthetics_bw.write(event_id+'FMT_waveforms/syn_bw.p')
synthetics_sw.write(event_id+'FMT_waveforms/syn_sw.p')
# save misfit surfaces as netCDF files
results_bw.save(event_id+'FMT_misfit/bw.nc')
results_rayleigh.save(event_id+'FMT_misfit/rayleigh.nc')
results_love.save(event_id+'FMT_misfit/love.nc')
print('\nFinished\n')
|
from fastapi import HTTPException, status, Depends
from fastapi.security import OAuth2PasswordBearer
from jose import JWTError
from app.models import UserTortModel
from app.utils import JWT
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
async def get_current_user(token: str = Depends(oauth2_scheme)):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = JWT.decode(token)
username = payload.get("sub")
if username is None:
raise credentials_exception
except JWTError:
raise credentials_exception
user = await UserTortModel.get_or_none(username=username)
if user is None:
raise credentials_exception
return user
async def get_current_active_user(current_user=Depends(get_current_user)):
if current_user.deactivate:
raise HTTPException(status_code=400, detail="Inactive routers")
return current_user
async def get_admin_user(current_user=Depends(get_current_user)):
if not current_user.is_admin:
raise HTTPException(status_code=403, detail="You do not have admin rights")
return current_user
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyAccimage(PythonPackage):
"""An accelerated Image loader and preprocessor leveraging Intel IPP.
accimage mimics the PIL API and can be used as a backend for torchvision.
"""
homepage = "https://github.com/pytorch/accimage"
url = "https://github.com/pytorch/accimage/archive/v0.1.1.tar.gz"
version('0.1.1', sha256='573c56866a42683c7cf25185620fe82ec2ce78468e0621c29fac8f4134a785f5')
depends_on('python', type=('build', 'link', 'run'))
# pip silently replaces distutils with setuptools
depends_on('py-setuptools', type='build')
depends_on('jpeg')
depends_on('ipp')
depends_on('py-pytest', type='test')
depends_on('py-numpy', type='test')
depends_on('py-imageio', type='test')
@run_after('install')
@on_package_attributes(run_tests=True)
def build_test(self):
pytest = which('pytest')
pytest('test.py')
|
from rest_framework import status
from rest_framework.views import exception_handler as drf_exception_handler
from django.http import JsonResponse
def default_handler(exc, context):
# https://www.django-rest-framework.org/api-guide/exceptions/
# Call REST framework's default exception handler first,
# to get the standard error response.
response = drf_exception_handler(exc, context)
# Now add the HTTP status code to the response.
if response is not None:
if not response.data.get("detail"):
response.data = {
"detail": response.data,
"status_code": response.status_code,
}
else:
response.data["status_code"] = response.status_code
return response
def bad_request(request, exception, *args, **kwargs):
"""
Generic 400 error handler.
"""
data = {"error": "Bad Request (400)"}
return JsonResponse(data, status=status.HTTP_400_BAD_REQUEST)
|
# Generated by Django 4.0.3 on 2022-03-16 02:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('school', '0004_alter_professor_departamento'),
]
operations = [
migrations.RemoveField(
model_name='departamento',
name='creditos',
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Spaghetti: Web Server Security Scanner
#
# @url: https://github.com/m4ll0k/Spaghetti
# @author: Momo Outaadi (M4ll0k)
# @license: See the file 'doc/LICENSE'
from lib.net import http
from lib.net import utils
from lib.utils import printer
class Backdoors():
def __init__(self,url,agent,proxy,redirect):
self.url = url
self.printer = printer.Printer()
self.http = http.Http(agent=agent,proxy=proxy,redirect=redirect)
self.checker = utils.Checker()
def Run(self):
info = {
'name' : 'Backdoors',
'author' : 'Momo Outaadi (@M4ll0k)',
'description' : 'Tries to find common backdoors on the server'
}
dbbackdoors = open('data/Backdoors.txt','rb')
dbfile = list([x.split('\n') for x in dbbackdoors])
for x in dbfile:
try:
resp = self.http.Send(self.checker.Path(self.url,x[0]))
if resp.status_code == 200 and resp._content:
self.printer.plus('Found Backdoor: %s'%resp.url)
break
except Exception,ERROR:
pass |
import numpy as np
default_Q_t = np.identity(3) * np.random.rand(3, 1) * 0.1
default_R = np.identity(3) * np.random.rand(3, 1) * 0.1
class kalman_filter():
def __init__(self, Q_t = default_Q_t, R = default_R):
"""
:param Q_t: Covariance matrix defining noise of motion model deltax§
:param R:
"""
self.Q_t = Q_t
self.R = R
def run_filter(self, state, covariance, control, observation):
"""
:param state: Previous believe state
:param covariance: Covariance matrix
:param control: kinematics values
:param observation:
:param R:
:return: Corrected state and covariance
"""
# Initialing distributions
A = np.identity(3)
B = np.array([[np.cos(state[2]), 0],
[np.sin(state[2]), 0],
[0, 1]])
C = np.identity(3)
# Prediction
state = np.matmul(A, state) + np.matmul(B, control) # mu_t
covariance = np.matmul(np.matmul(A, covariance), A.T) + self.R # sum_t
# Correction
K_t = covariance * C.T * np.linalg.inv(np.matmul(np.matmul(C, covariance), C.T) + self.Q_t.T) # Kalman gain
try:
new_state = state + np.matmul(K_t, (observation - np.matmul(C, state)))
except:
print("ERROR")
new_covariance = np.matmul((np.identity(3) - np.matmul(K_t, C)), covariance)
return state, new_state, new_covariance
# def kalman_filter(state, covariance, control, observation, Q_t = default_Q_t, R = default_R):
# """
# :param state: Previous believe state
# :param covariance: Covariance matrix
# :param control: kinematics values
# :param observation:
# :param Q_t: Covariance matrix defining noise of motion model deltax§
# :param R: Covariance matrix defining noise of motion model epsilon
# :return: Corrected state and covariance
# """
#
# # Initialing distributions
# A = np.identity(3)
# B = np.array([[np.cos(state[2]), 0],
# [np.sin(state[2]), 0],
# [0, 1]])
# C = np.identity(3)
#
# # Prediction
# state = np.matmul(A, state) + np.matmul(B, control) # mu_t
# covariance = np.matmul(np.matmul(A, covariance), A.T) + R # sum_t
#
# # Correction
# K_t = covariance * C.T * np.linalg.inv(np.matmul(np.matmul(C, covariance), C.T) + Q_t.T) # Kalman gain
# try:
# new_state = state + np.matmul(K_t, (observation - np.matmul(C, state)))
# except:
# print("ERROR")
# new_covariance = np.matmul((np.identity(3) - np.matmul(K_t, C)), covariance)
#
# return state, new_state, new_covariance
|
import os
from collections import defaultdict
import random
from typing import Dict, List
import pandas as pd
from scipy.stats import stats
from lib2vec.corpus_structure import Corpus
from experiments.predicting_high_rated_books import mcnemar_sig_text, chi_square_test
from lib2vec.vectorization import Vectorizer
from lib2vec.vectorization_utils import Vectorization
import numpy as np
def get_percentage_of_correctly_labeled(vectors, human_assessment_df: pd.DataFrame, doc_id_mapping: Dict[str, str],
facet_mapping: Dict[str, str], use_sum: bool):
# reverted_facets = {value: key for key, value in facet_mapping.items()}
correctly_assessed = []
facet_wise = defaultdict(list)
random_baseline = False
skip_count = 0
agreement_store = defaultdict(list)
for i, row in human_assessment_df.iterrows():
book1 = doc_id_mapping[row["Book 1"]]
book2 = doc_id_mapping[row["Book 2"]]
book3 = doc_id_mapping[row["Book 3"]]
if use_sum:
facet = facet_mapping["total"]
else:
facet = facet_mapping[row["Facet"]]
selection = row["Selection"]
if selection == "skip" or selection == "unsure":
skip_count += 1
continue
if random_baseline:
if int(row["Selected Answer Nr."]) == random.randint(1, 3):
correctly_assessed.append(1)
facet_wise[row["Facet"]].append(1)
else:
correctly_assessed.append(0)
facet_wise[row["Facet"]].append(0)
else:
sim_1 = Vectorization.facet_sim(model_vectors=vectors, doc_id_a=book1, doc_id_b=book2, facet_name=facet)
sim_2 = Vectorization.facet_sim(model_vectors=vectors, doc_id_a=book1, doc_id_b=book3, facet_name=facet)
sim_3 = Vectorization.facet_sim(model_vectors=vectors, doc_id_a=book2, doc_id_b=book3, facet_name=facet)
if int(row["Selected Answer Nr."]) == 1 and sim_1 > sim_2 and sim_1 > sim_3:
correctly_assessed.append(1)
facet_wise[row["Facet"]].append(1)
agreement_store["True"].append(row["Agreement"])
elif int(row["Selected Answer Nr."]) == 2 and sim_2 > sim_1 and sim_2 > sim_3:
correctly_assessed.append(1)
facet_wise[row["Facet"]].append(1)
agreement_store["True"].append(row["Agreement"])
elif int(row["Selected Answer Nr."]) == 3 and sim_3 > sim_1 and sim_3 > sim_2:
correctly_assessed.append(1)
facet_wise[row["Facet"]].append(1)
agreement_store["True"].append(row["Agreement"])
else:
correctly_assessed.append(0)
agreement_store["False"].append(row["Agreement"])
facet_wise[row["Facet"]].append(0)
print("False:", np.mean(agreement_store["False"]))
print("True:", np.mean(agreement_store["True"]))
result_scores = {facet: sum(scores) / len(scores) for facet, scores in facet_wise.items()}
result_scores["all_facets"] = sum(correctly_assessed) / len(correctly_assessed)
return result_scores, correctly_assessed, facet_wise
def correlation_for_correctly_labeled(vectors, human_assessment_df: pd.DataFrame, doc_id_mapping: Dict[str, str],
facet_mapping: Dict[str, str], use_sum: bool):
# reverted_facets = {value: key for key, value in facet_mapping.items()}
ground_truth = defaultdict(list)
predicted = defaultdict(list)
skip_count = 0
for i, row in human_assessment_df.iterrows():
book1 = doc_id_mapping[row["Book 1"]]
book2 = doc_id_mapping[row["Book 2"]]
book3 = doc_id_mapping[row["Book 3"]]
if use_sum:
facet = facet_mapping["total"]
else:
facet = facet_mapping[row["Facet"]]
selection = row["Selection"]
if selection == "skip" or selection == "unsure":
skip_count += 1
continue
sim_1 = Vectorization.facet_sim(model_vectors=vectors, doc_id_a=book1, doc_id_b=book2, facet_name=facet)
sim_2 = Vectorization.facet_sim(model_vectors=vectors, doc_id_a=book1, doc_id_b=book3, facet_name=facet)
sim_3 = Vectorization.facet_sim(model_vectors=vectors, doc_id_a=book2, doc_id_b=book3, facet_name=facet)
if sim_1 > sim_2 and sim_1 > sim_3:
pred_label = 1
elif sim_2 > sim_1 and sim_2 > sim_3:
pred_label = 2
elif sim_3 > sim_1 and sim_3 > sim_2:
pred_label = 3
else:
print("warning")
pred_label = -1
ground_truth[row["Facet"]].append(int(row["Selected Answer Nr."]))
ground_truth["all_facets"].append(int(row["Selected Answer Nr."]))
predicted[row["Facet"]].append(pred_label)
predicted["all_facets"].append(pred_label)
# print(row["Facet"], "=", sum(facet_wise[row["Facet"]]))
print(f"{skip_count} times skipped!")
result_scores = {}
for facet, ground_truth_labels in ground_truth.items():
predicted_labels = predicted[facet]
corr = stats.spearmanr(ground_truth_labels, predicted_labels)
spearman = str(f'{abs(corr[0]):.3f}')
if corr[1] < 0.05:
spearman = f"*{spearman}"
result_scores[facet] = spearman
return result_scores
# result_scores = {facet: sum(scores) / len(scores) for facet, scores in facet_wise.items()}
# result_scores["all"] = sum(correctly_assessed) / len(correctly_assessed)
# return result_scores
def load_vectors_from_properties(number_of_subparts, corpus_size, data_set,
filter_mode, vectorization_algorithm):
use_sum = False
if "_sum" in vectorization_algorithm:
use_sum = True
vec_path = Vectorization.build_vec_file_name(number_of_subparts,
corpus_size,
data_set,
filter_mode,
vectorization_algorithm,
"real",
allow_combination=True)
vectors, _ = Vectorization.my_load_doc2vec_format(vec_path)
return vectors, use_sum
def calculate_vectors(data_set_name: str, vec_algorithms: List[str], filters: List[str]):
# try:
# corpus = Corpus.fast_load(path=os.path.join('corpora', data_set_name), load_entities=False)
# except FileNotFoundError:
# corpus = DataHandler.load_classic_gutenberg_as_corpus()
# Preprocesser.annotate_and_save(corpus, corpus_dir=f"corpora/{data_set_name}")
# corpus = Corpus.fast_load(path=os.path.join('corpora', data_set_name), load_entities=False)
for filter_mode in filters:
corpus = Corpus.fast_load("all",
"no_limit",
data_set_name,
filter_mode,
"real",
load_entities=False)
for vectorization_algorithm in vec_algorithms:
use_summation = False
if "_sum" in vectorization_algorithm:
vectorization_algorithm = vectorization_algorithm.replace("_sum", "")
use_summation = True
vec_file_name = Vectorization.build_vec_file_name('all',
'no_limit',
data_set_name,
filter_mode,
vectorization_algorithm,
'real')
if not os.path.isfile(vec_file_name):
Vectorizer.algorithm(input_str=vectorization_algorithm,
corpus=corpus,
save_path=vec_file_name,
return_vecs=False)
def evaluate(data_set_name: str, vec_algorithms: List[str], filters: List[str]):
human_assessment_df = pd.read_csv("../results/human_assessment/gutenberg_classic_20/human_assessed_complete.csv")
print(len(human_assessment_df.index))
human_assessment_df = human_assessment_df.loc[(human_assessment_df['Selection'] != "unsure")]
# & (human_assessment_df['Answers'] > 1)
# human_assessment_df = human_assessment_df.loc[(human_assessment_df['Agreement'] > 0.5)
# # & (human_assessment_df['Answers'] > 1)
# ]
print(len(human_assessment_df.index))
survey_id2doc_id = {1: "cb_17",
2: "cb_2",
3: "cb_0",
4: "cb_1",
5: "cb_3",
6: "cb_4",
7: "cb_5",
8: "cb_6",
9: "cb_9",
10: "cb_11",
11: "cb_12",
12: "cb_13",
13: "cb_14",
14: "cb_15",
15: "cb_8",
16: "cb_7",
17: "cb_10",
18: "cb_18",
19: "cb_19",
20: "cb_16",
}
facets = {"location": "loc", "time": "time", "atmosphere": "atm", "content": "cont", "plot": "plot", "total": ""}
tuples = []
correlation_tuples = []
correctness_table = {}
correctness_table_facet = {}
for filter in filters:
for vec_algorithm in vec_algorithms:
filtered_dataset = f'{data_set_name}_{filter}'
# corpus = Corpus.fast_load(path=os.path.join('corpora', f''data_set_name), load_entities=False)
vecs, use_sum = load_vectors_from_properties(number_of_subparts="all",
corpus_size="no_limit",
data_set=data_set_name,
filter_mode=filter,
vectorization_algorithm=vec_algorithm)
corr_scores = correlation_for_correctly_labeled(vectors=vecs, human_assessment_df=human_assessment_df,
doc_id_mapping=survey_id2doc_id, facet_mapping=facets,
use_sum=use_sum)
correlation_tuples.append((filtered_dataset, vec_algorithm, corr_scores["total"],
corr_scores["time"], corr_scores["location"],
corr_scores["plot"], corr_scores["atmosphere"], corr_scores["content"],
corr_scores["all_facets"]))
scores, cor_ass, facet_wise = get_percentage_of_correctly_labeled(vectors=vecs,
human_assessment_df=human_assessment_df,
doc_id_mapping=survey_id2doc_id,
facet_mapping=facets,
use_sum=use_sum)
correctness_table[vec_algorithm] = cor_ass
correctness_table_facet[vec_algorithm] = facet_wise
tuples.append((filtered_dataset, vec_algorithm, scores["total"], scores["time"],
scores["location"],
scores["plot"],
scores["atmosphere"], scores["content"], scores["all_facets"]))
print((filtered_dataset, vec_algorithm, scores["total"], scores["time"],
scores["location"],
scores["plot"],
scores["atmosphere"], scores["content"], scores["all_facets"]))
try:
algo1 = "bert_pt"
algo2 = "book2vec_adv_dbow_pca"
true_true = 0
true_false = 0
false_true = 0
false_false = 0
for e1, e2 in zip(correctness_table[algo1], correctness_table[algo2]):
if e1 and e2:
true_true += 1
elif e1 and not e2:
true_false += 1
elif not e1 and e2:
false_true += 1
elif not e1 and not e2:
false_false += 1
else:
pass
table = [[true_true, true_false],
[false_true, false_false]]
print(table)
print()
print("Overall")
mcnemar_sig_text(table)
# facets = correctness_table_facet[algo1].keys()
for facet in facets:
true_true = 0
true_false = 0
false_true = 0
false_false = 0
for e1, e2 in zip(correctness_table_facet[algo1][facet], correctness_table_facet[algo2][facet]):
if e1 and e2:
true_true += 1
elif e1 and not e2:
true_false += 1
elif not e1 and e2:
false_true += 1
elif not e1 and not e2:
false_false += 1
else:
pass
table = [[true_true, true_false],
[false_true, false_false]]
print()
print(table)
print(facet)
mcnemar_sig_text(table)
chi_square_test(correctness_table[algo1], correctness_table[algo1])
except KeyError:
pass
result_df = pd.DataFrame(tuples, columns=["Data set", "Algorithm", "Total", "Time",
"Location", "Plot", "Atmosphere", "Content", "Micro AVG"])
result_df = result_df.round(3)
result_df.to_csv("results/human_assessment/performance.csv", index=False)
print(result_df.to_latex(index=False))
corr_df = pd.DataFrame(correlation_tuples, columns=["Data set", "Algorithm", "Total", "Time",
"Location", "Plot", "Atmosphere", "Content", "Micro AVG"])
corr_df.to_csv("results/human_assessment/correlation_results.csv", index=False)
print(corr_df.to_latex(index=False))
if __name__ == '__main__':
data_set = "classic_gutenberg"
# algorithms = ["avg_wv2doc", "doc2vec", "book2vec", "book2vec_concat"]
# algorithms = [
# # "book2vec_o_time", "book2vec_o_loc", "book2vec_o_atm", "book2vec_o_sty", "book2vec_o_plot", "book2vec_o_raw",
# "book2vec", "book2vec_sum", "book2vec_avg", "book2vec_concat", "book2vec_auto", "book2vec_pca",
# "book2vec_dbow",
# # "book2vec_dbow_sum", "book2vec_dbow_avg", "book2vec_dbow_concat", "book2vec_dbow_auto",
# "book2vec_dbow_pca",
# #
# "book2vec_wo_raw",
# # "book2vec_wo_raw_sum", "book2vec_wo_raw_avg", "book2vec_wo_raw_concat",
# # "book2vec_wo_raw_auto",
# "book2vec_wo_raw_pca",
# # "book2vec_dbow_wo_raw", "book2vec_dbow_wo_raw_sum", "book2vec_dbow_wo_raw_avg",
# # "book2vec_dbow_wo_raw_concat", "book2vec_dbow_wo_raw_auto",
# "book2vec_dbow_wo_raw_pca",
# #
# # "book2vec_net_only", "book2vec_net_only_sum", "book2vec_net_only_avg",
# # "book2vec_net_only_concat", "book2vec_net_only_auto",
# "book2vec_net_only_pca",
# # "book2vec_dbow_net_only", "book2vec_dbow_net_only_pca", "book2vec_dbow_net_only_sum",
# # "book2vec_dbow_net_only_avg", "book2vec_dbow_net_only_concat",
# # "book2vec_dbow_net_only_auto",
# "book2vec_dbow_net_only_pca",
# #
# # "book2vec_net", "book2vec_net_sum", "book2vec_net_avg",
# # "book2vec_net_concat", "book2vec_net_auto",
# "book2vec_net_pca",
# # "book2vec_dbow_net", "book2vec_dbow_net_pca", "book2vec_dbow_net_sum", "book2vec_dbow_net_avg",
# # "book2vec_dbow_net_concat", "book2vec_dbow_net_auto",
# "book2vec_dbow_net_pca",
# #
# "book2vec_adv",
# # "book2vec_adv_sum", "book2vec_adv_concat", "book2vec_adv_avg", "book2vec_adv_auto",
# "book2vec_adv_pca",
# # "book2vec_adv_dbow", "book2vec_adv_dbow_sum", "book2vec_adv_dbow_concat", "book2vec_adv_dbow_avg",
# # "book2vec_adv_dbow_auto",
# "book2vec_adv_dbow_pca",
#
# # "book2vec_adv_dbow_wo_raw_pca",
# # "book2vec_adv_dbow_net_wo_raw_pca",
#
# # "book2vec_window_pca",
# # "book2vec_dbow_window_pca",
# "book2vec_adv_window_pca",
# "book2vec_adv_dbow_window_pca",
#
#
# ]
# algorithms = ["book2vec_sum", "book2vec"]
algorithms = [
# "bow",
# "avg_wv2doc_restrict10000",
# "doc2vec",
# "doc2vec_dbow",
# "doc2vec_sentence_based_100",
# "doc2vec_sentence_based_1000",
# "doc2vec_chunk",
# "doc2vec_dbow_chunk"
"bert_pt",
# "bert_pt_chunk",
# # "bert_sentence_based_100_pt",
# "bert_sentence_based_1000_pt",
# "roberta_pt",
# "roberta_pt_chunk",
# "roberta_sentence_based_1000_pt",
# "xlm_pt",
# "xlm_pt_chunk",
# "xlm_sentence_based_1000_pt",
# "psif",
# "book2vec_pca",
# "book2vec_concat",
# "book2vec_auto",
# "book2vec_avg",
#
# "book2vec_dbow_pca",
# "book2vec_dbow_concat",
# "book2vec_dbow_auto",
"book2vec_dbow_avg",
"book2vec_dbow_wo_raw_avg",
"book2vec_dbow_net_only_avg",
"book2vec_dbow_net_avg",
# # "book2vec_advn",
# "book2vec_advn_pca",
# "book2vec_advn_concat",
# "book2vec_advn_auto",
# "book2vec_advn_avg",
# # "book2vec_advn_dbow",
"book2vec_advn_dbow_pca",
# "book2vec_advn_dbow_concat",
# "book2vec_advn_dbow_auto",
"book2vec_advn_dbow_avg",
# "book2vec_bert_pt_pca",
# "book2vec_bert_pt_window",
"book2vec_advn_window_pca",
"book2vec_advn_dbow_window_avg",
]
# algorithms = ["avg_wv2doc", "doc2vec", "doc2vec_dbow",
# "doc2vec_sentence_based_100", "doc2vec_sentence_based_1000",
# "book2vec", "book2vec_concat", "book2vec_wo_raw", "book2vec_wo_raw_concat",
# "book2vec_dbow", "book2vec_dbow_concat",
# "book2vec_dbow_wo_raw", "book2vec_dbow_wo_raw_concat",
# "book2vec_net", "book2vec_net_concat",
# "book2vec_dbow_net", "book2vec_dbow_net_concat",
# "book2vec_net_only", "book2vec_net_only_concat",
# "book2vec_dbow_net_only", "book2vec_dbow_net_only_concat",
# "book2vec_adv", "book2vec_adv_concat", "bow",
# "bert", "bert_sentence_based_100", "bert_sentence_based_100_pt", "bert_sentence_based_1000",
# "bert_sentence_based_1000_pt",
# # "flair_sentence_based_100", "flair_sentence_based_1000",
# "roberta_sentence_based_100_pt", "xlm_sentence_based_100_pt",
# "psif"
# ]
filters = [
# "no_filter",
"specific_words_strict"
]
calculate_vectors(data_set, algorithms, filters)
evaluate(data_set, algorithms, filters)
|
# coding=utf-8
# Copyright (c) 2021 Ant Group
import unicodedata
import numpy as np
from data_structure.syntax_tree import BinaryNode
def to_binary_root(tokens, token_tree, start=0):
if isinstance(token_tree, str):
assert token_tree == tokens[start]
return BinaryNode(None, None, start, token_tree), 1
else:
assert len(token_tree) == 2 # left and right
left, len1 = to_binary_root(tokens, token_tree[0], start=start)
right, len2 = to_binary_root(tokens, token_tree[1], start=start + len1)
node = BinaryNode(left, right, None, None)
return node, len1 + len2
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def get_all_subword_id(mapping, idx):
current_id = mapping[idx]
id_for_all_subwords = [tmp_id for tmp_id, v in enumerate(mapping) if v == current_id]
return id_for_all_subwords
def _run_strip_accents(text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def match_tokenized_to_untokenized(subwords, sentence):
token_subwords = np.zeros(len(sentence))
sentence = [_run_strip_accents(x) for x in sentence]
token_ids, subwords_str, current_token, current_token_normalized = [-1] * len(subwords), "", 0, None
for i, subword in enumerate(subwords):
if subword in ["[CLS]", "[SEP]"]:
continue
while current_token_normalized is None:
current_token_normalized = sentence[current_token].lower()
if subword.startswith("[UNK]"):
unk_length = int(subword[6:])
subwords[i] = subword[:5]
subwords_str += current_token_normalized[len(subwords_str):len(subwords_str) + unk_length]
else:
subwords_str += subword[2:] if subword.startswith("##") else subword
if not current_token_normalized.startswith(subwords_str):
return False
token_ids[i] = current_token
token_subwords[current_token] += 1
if current_token_normalized == subwords_str:
subwords_str = ""
current_token += 1
current_token_normalized = None
assert current_token_normalized is None
while current_token < len(sentence):
assert not sentence[current_token]
current_token += 1
assert current_token == len(sentence)
return token_ids
def _find_point_in_spans(point, start_index, spans):
index = start_index
while index < len(spans):
span = spans[index]
if span is not None and span[0] < span[1]: # span is not empty
if point >= span[0] and point < span[1]:
break
else:
assert span is None or span[0] == span[1] == 0
index += 1
return index
def _align_spans(original_spans, token_spans):
word_starts = []
word_ends = []
while token_spans and (token_spans[-1] is None or token_spans[-1][1] == 0):
token_spans.pop() # remove trailing empty spans
last = 0
for (start, end) in original_spans:
first = _find_point_in_spans(start, last, token_spans)
last = _find_point_in_spans(end - 1, first, token_spans)
word_starts.append(first)
word_ends.append(last)
return word_starts, word_ends
def get_sentence_from_words(words, word_sep):
sentence = []
word_char_spans = []
offset = 0
for word in words:
length = len(word)
sentence.append(word)
word_char_spans.append((offset, offset + length))
offset += length + len(word_sep)
sentence = word_sep.join(sentence)
return sentence, word_char_spans
|
import random
import time
#You are playing an antique RPG game.
#You are asked for your age. returns a message depending on whether player is over 18.
#You are asked if you want to read an introduction to the game.
#If you choose yes, it will print out a brief introduction.
#You encounter a maze. You are given an option to attempt it or do not.
#If you attempt it, it will loop back to the start of the maze until you choose not to attempt it.
#You encounter a mystery box that may grant you wishes, based on the probability that random.random comes up greater than 0.9999999
#If you do get the wishes, it will ask you for the wishes you wish to be granted.
#You are stopped by a robot that makes you answer 1 out of 5 math questions, until you get it right. The question is chosen at random using random.randint.
#
starttime = time.time()
def ansprocess(answer):
return bool(answer)
def printer(printorder):
if printorder == 1:
print "In this game, if you wish to say no to a question, do not type anything and press enter."
elif printorder == 2:
print "Warning:\nYou will be psychologically scarred if you continue."
elif printorder == 3:
print "This is a simple game where you are going to play a few ridiculously childish minigames."
elif printorder == 4:
print "You encounter a large maze. There seems to be some reward if you pass through it."
elif printorder == 5:
print "You wander the maze for hours, eventually stumbling onto a teleporter that returns you to the start of the maze."
elif printorder == 6:
print "You decide to go around the maze instead of going through it."
elif printorder == 7:
print "\nA mysterious box with question marks carved all over its surface appears in front of you. There is a note on top of it. It says: \"This mystery box contains three wishes. However, there is only a 1 in a billion chance it will not disappear when you open it.\""
elif printorder == 8:
print "You are officially the luckiest person that ever existed. You are granted 3 wishes. You cannot wish for more wishes, nor wish that these conditions are nullified."
elif printorder == 9:
print "The box instantly vanishes as you try to open it."
elif printorder == 10:
print "The box is replaced with another note, saying: \"Your wishes will be granted at the end of the game. Farewell.\""
elif printorder == 11:
print "You decide not to open the box and continue travelling."
elif printorder == 12:
print "You encounter a strange humanoid robot in your way. It tells you that you must answer a random math question in order to progress. If you answer wrong, the question may change. You MUST type a number to answer the question."
elif printorder == 13:
print "You have answered correctly. You may proceed."
def legalage(legal):
if not ansprocess(legal):
printer(2)
def introduction(intro):
if ansprocess(intro):
printer(3)
def mazeloop(maze):
if ansprocess(maze):
printer(5)
maze = raw_input("\nDo you wish to attempt the maze again? ")
mazeloop(maze)
else:
printer(6)
def mysterybox():
if random.random() >= 0.8 and 0.8 <= random.random():
printer(8)
wish1 = raw_input("What is your first wish? ")
wish2 = raw_input("What is your second wish? ")
wish3 = raw_input("What is your third wish? ")
printer(10)
else:
printer(9)
def mysteryboxq(decision):
if ansprocess(decision):
return mysterybox()
else:
printer(11)
def q1(answer):
if answer == 7:
printer(13)
else:
randquestion = random.randint(1, 5)
mathquestions(randquestion)
def q2(answer):
if answer == 169:
printer(13)
else:
randquestion = random.randint(1, 5)
mathquestions(randquestion)
def q3(answer):
if answer == 11111:
printer(13)
else:
randquestion = random.randint(1, 5)
mathquestions(randquestion)
def q4(answer):
if answer == 9:
printer(13)
else:
randquestion = random.randint(1, 5)
mathquestions(randquestion)
def q5(answer):
if answer == 288 or answer == 2:
printer(13)
else:
randquestion = random.randint(1, 5)
mathquestions(randquestion)
def mathquestions(qnumber):
if qnumber == 1:
q1(int(raw_input("1+2*3 = ? ")))
elif qnumber == 2:
q2(int(raw_input("13*13 = ? ")))
elif qnumber == 3:
q3(int(raw_input("12345-1234 = ? ")))
elif qnumber == 4:
q4(int(raw_input("8-1*0+2/2 = ? ")))
elif qnumber == 5:
q5(int(raw_input("48/2(9+3) = ? ")))
def timepassed(start, end):
return end - start
def timer(timepassed):
out = """
You took {} seconds to get through the game
""".format(timepassed)
return out
def main():
printer(1)
legal = raw_input("Greetings, Traveler.\nAre you over the age of 18? ")
legalage(legal)
intro = raw_input("Do you wish to read an introduction to the game? ")
introduction(intro)
printer(4)
maze = raw_input("Do you wish to attempt the maze or not? ")
mazeloop(maze)
printer(7)
decision = raw_input("Do you wish to open it? ")
wishes = mysteryboxq(decision)
printer(12)
randquestion = random.randint(1, 5)
mathquestions(randquestion)
endtime = time.time()
timeres = timepassed(starttime, endtime)
gametime = timer(timeres)
print gametime
main()
|
from model.contact_form import ContactForm
import re
def test_add_contact(app, json_data_contacts, db, check_ui):
contact = json_data_contacts
old_contacts_list = db.get_db_contacts_list()
app.contact.create(contact)
new_contacts_list = db.get_db_contacts_list()
# Append old list with new item, clear and sort lists ascending and check if they are still equal
old_contacts_list.append(contact)
if check_ui:
sorted_db_new_contacts_list = sorted([clear(x) for x in new_contacts_list], key=ContactForm.id_or_max)
sorted_ui_new_contacts_list = sorted([clear(x) for x in app.contact.get_contacts_list()], key=ContactForm.id_or_max)
assert sorted_db_new_contacts_list == sorted_ui_new_contacts_list
def clear(s):
contact_name_cleared = re.sub("[() -]", "", s.contact_name)
contact_lastname_cleared = re.sub("[() -]", "", s.contact_lastname)
s.contact_name = contact_name_cleared
s.contact_lastname = contact_lastname_cleared
return s |
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from collections import defaultdict, OrderedDict
FMT = "// %80s : %s "
COUNT = defaultdict(lambda:0)
class Loc(object):
"""
Identifies a calling python function from its pframe
"""
@classmethod
def Tag(cls, func, name):
if func is None:return None
pass
identity = "%s.%s" % (name, func )
global COUNT
idx = COUNT[identity]
tag = "%s.[%.2d]" % ( func, idx)
COUNT[identity] += 1
return tag, idx
def __init__(self, pframe, name):
"""
:param pframe: python frame of caller
:param name: module __name__ of caller
"""
self.name = name
if pframe is not None:
func = pframe.f_code.co_name
doc = pframe.f_code.co_consts[0]
doclines = filter(None, doc.split("\n")) if doc is not None else []
label = doclines[0].lstrip() if len(doclines) > 0 else "no-docstring-label" # 1st line of docstring
tag, idx = self.Tag(func, name)
hdr = FMT % (tag, label)
else:
func = None
label = "-"
tag = None
idx = None
hdr = None
pass
self.func = func
self.label = label
self.tag = tag
self.idx = idx
self.hdr = hdr
def __repr__(self):
disp_ = lambda k:" %10s : %s " % ( k, getattr(self, k, None))
return "\n".join(map(disp_, "name func label tag idx hdr".split()))
def test_Loc():
"""
First Line of docstring becomes label
"""
loc = Loc(sys._getframe(), __name__)
print loc
def test_Introspect_(pframe):
func = pframe.f_code.co_name
doc = pframe.f_code.co_consts[0]
doclines = filter(None, doc.split("\n"))
label = doclines[0].lstrip() if len(doclines) > 0 else "-"
print "doc:[%s]" % doc
print "func:[%s]" % func
print "label:[%s]" % label
def test_Introspect():
test_Introspect_(sys._getframe())
if __name__ == '__main__':
test_Loc();
|
#!/usr/bin/env python3
import sys,os,json,re
assert sys.version_info >= (3,9), "This script requires at least Python 3.9"
def load(l):
f = open(os.path.join(sys.path[0], l))
data = f.read()
j = json.loads(data)
return j
def find_passage(game_desc, pid):
for p in game_desc["passages"]:
if p["pid"] == pid:
return p
return {}
# Removes Harlowe formatting from Twison description
def format_passage(description):
description = re.sub(r'//([^/]*)//',r'\1',description)
description = re.sub(r"''([^']*)''",r'\1',description)
description = re.sub(r'~~([^~]*)~~',r'\1',description)
description = re.sub(r'\*\*([^\*]*)\*\*',r'\1',description)
description = re.sub(r'\*([^\*]*)\*',r'\1',description)
description = re.sub(r'\^\^([^\^]*)\^\^',r'\1',description)
description = re.sub(r'(\[\[[^\|]*?)\|([^\]]*?\]\])',r'\1->\2',description)
description = re.sub(r'\[\[([^(->\])]*?)->[^\]]*?\]\]',r'[ \1 ]',description)
description = re.sub(r'\[\[(.+?)\]\]',r'[ \1 ]',description)
return description
def update(current,choice,game_desc):
if choice == "":
return current
for l in current["links"]:
if l["name"] == choice:
current = find_passage(game_desc, l["pid"])
return current
print("I don't understand. Please try again!")
return current
def render(current):
print("\n\n")
print(current["name"])
print(format_passage(current["text"]))
print("\n")
def get_input(current):
choice = input("What would you like to do? (Type quit to exit) ")
#choice = choice.lower().strip()
return choice
def main():
game_desc = load("RuinedCity.json")
current = find_passage(game_desc, game_desc["startnode"])
choice = ""
while choice != "quit" and current != {}:
current = update(current,choice,game_desc)
render(current)
choice = get_input(current)
print("Thanks for Playing!")
if __name__ == "__main__":
main()
|
import os
import pandas as pd
import numpy as np
import json
import pickle
import zipfile
from keras.preprocessing import image
from tqdm import tqdm
import time
from sklearn.model_selection import train_test_split
from spacekit.analyzer.track import stopwatch
def load_datasets(filenames, index_col="index", column_order=None, verbose=1):
"""Import one or more dataframes from csv files and merge along the 0 axis (rows / horizontal). Assumes the datasets use the same index_col name and identical column names (although this is not strictly required) since this function does not handle missing data or NaNs.
Parameters
----------
filenames : list
path(s) to csv files of saved dataframes.
index_col : str, optional
name of the index column to set
Returns
-------
DataFrame
Labeled dataframe loaded from csv file(s).
"""
if len(filenames) == 1:
df = pd.read_csv(filenames[0], index_col=index_col)
else:
dfs = []
for filename in filenames:
dfs.append(pd.read_csv(filename, index_col=index_col))
df = pd.concat([d for d in dfs], axis=0)
if column_order:
cols = [c for c in column_order if c in df.columns]
df = df[cols]
print("Input Shape: ", df.shape)
if verbose:
print(df.columns)
return df
def stratified_splits(df, target="label", v=0.85):
"""Splits Pandas dataframe into feature (X) and target (y) train, test and validation sets.
Parameters
----------
df : Pandas dataframe
preprocessed SVM regression test dataset
target : str, optional
target class label for alignment model predictions, by default "label"
test_size : int, optional
size of the test set, by default 0.2
val_size : int, optional
create a validation set separate from train/test, by default 0.1
Returns
-------
tuples of Pandas dataframes
data, labels: features (X) and targets (y) split into train, test, validation sets
"""
print("Splitting Data ---> X-y ---> Train-Test-Val")
seed = np.random.randint(1, 42)
y = df[target]
X = df.drop(target, axis=1, inplace=False)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, shuffle=True, stratify=y, random_state=seed
)
X_val, y_val = np.asarray([]), np.asarray([])
if v > 0:
try:
X_train, X_val, y_train, y_val = train_test_split(
X_train,
y_train,
test_size=1 - v,
shuffle=True,
stratify=y_train,
random_state=seed,
)
except ValueError:
X_train, X_val, y_train, y_val = train_test_split(
X_train,
y_train,
test_size=0.2,
shuffle=True,
stratify=y_train,
random_state=seed,
)
data = (X_train, X_test, X_val)
labels = (y_train, y_test, y_val)
return data, labels
def load_npz(npz_file="data/img_data.npz"):
try:
img_data = np.load(npz_file)
X = img_data["images"]
y = img_data["labels"]
index = img_data["index"]
img_data.close()
return (index, X, y)
except Exception as e:
print(e)
return None
def save_npz(i, X, y, npz_file="data/img_data.npz"):
"""Store compressed data to disk"""
np.savez(npz_file, index=i, images=X, labels=y)
def read_channels(channels, w, h, d, exp=None, color_mode="rgb"):
"""Loads PNG image data and converts to 3D arrays.
Parameters
----------
channels : tuple
image frames (original, source, gaia)
w : int
image width
h : int
image height
d : int
depth (number of image frames)
exp : int, optional
expand array dimensions ie reshape to (exp, w, h, 3), by default None
color_mode : str, optional
RGB (3 channel images) or grayscale (1 channel), by default "rgb". SVM predictions requires exp=3; set to None for training.
Returns
-------
numpy array
image pixel values as array
"""
t = (w, h)
image_frames = [
image.load_img(c, color_mode=color_mode, target_size=t) for c in channels
]
img = np.array([image.img_to_array(i) for i in image_frames])
if exp is None:
img = img.reshape(w, h, d)
else:
img = img.reshape(exp, w, h, 3)
return img
class ImageIO:
"""Parent Class for image file input/output operations"""
def __init__(self, img_path, format="png", data=None):
self.img_path = img_path
self.format = self.check_format(format)
self.data = data
def check_format(self, format):
"""Checks the format type of ``img_path`` (``png``, ``jpg`` or ``npz``) and initializes the ``format`` attribute accordingly.
Parameters
----------
format : str
(``png``, ``jpg`` or ``npz``)
Returns
-------
str
(``png``, ``jpg`` or ``npz``)
"""
sfx = self.img_path.split(".")[-1]
if sfx == "npz":
return "npz"
else:
return format
def load_npz(self, npz_file=None, keys=["index", "images", "labels"]):
if npz_file is None:
npz_file = self.img_path
try:
img_data = np.load(npz_file)
index = img_data[keys[0]]
X = img_data[keys[1]]
if len(keys) > 2:
y = img_data[keys[2]]
img_data.close()
return (index, X, y)
else:
img_data.close()
return index, X
except Exception as e:
print(e)
return None
def load_multi_npz(self, i="img_index.npz", X="img_data.npz", y="img_labels.npz"):
"""Load numpy arrays from individual feature/image data, label and index compressed files on disk"""
(X_train, X_test, X_val) = self.load_npz(
npz_file=X, keys=["X_train", "X_test", "X_val"]
)
(y_train, y_test, y_val) = self.load_npz(
npz_file=y, keys=["y_train", "y_test", "y_val"]
)
(train_idx, test_idx, val_idx) = self.load_npz(
npz_file=i, keys=["train_idx", "test_idx", "val_idx"]
)
train = (train_idx, X_train, y_train)
test = (test_idx, X_test, y_test)
val = (val_idx, X_val, y_val)
return train, test, val
def save_npz(self, i, X, y, npz_file="data/img_data.npz"):
"""Store compressed data to disk"""
np.savez(npz_file, index=i, images=X, labels=y)
def save_multi_npz(self, train, test, val, data_path="data"):
np.savez(
f"{data_path}/index.npz",
train_idx=train[0],
test_idx=test[0],
val_idx=val[0],
)
np.savez(
f"{data_path}/images.npz", X_train=train[1], X_test=test[1], X_val=val[1]
)
np.savez(
f"{data_path}/labels.npz", y_train=train[2], y_test=test[2], y_val=val[2]
)
def split_arrays(self, data, t=0.6, v=0.85):
if type(data) == pd.DataFrame:
sample = data.sample(frac=1)
else:
sample = data
if v > 0:
return np.split(sample, [int(t * len(data)), int(v * len(data))])
else:
arrs = np.split(sample, [int(t * len(data))])
arrs.append(np.asarray([]))
return arrs
def split_arrays_from_npz(self, v=0.85):
"""Loads images (X), labels (y) and index (i) from a single .npz compressed numpy file. Splits into train, test, val sets using 70-20-10 ratios.
Returns
-------
tuples
train, test, val tuples of numpy arrays. Each tuple consists of an index, feature data (X, for images these are the actual pixel values) and labels (y).
"""
(index, X, y) = self.load_npz()
train_idx, test_idx, val_idx = self.split_arrays(index, v=v)
X_train, X_test, X_val = self.split_arrays(X, v=v)
y_train, y_test, y_val = self.split_arrays(y, v=v)
train = (train_idx, X_train, y_train)
test = (test_idx, X_test, y_test)
val = (val_idx, X_val, y_val)
return train, test, val
def split_df_from_arrays(self, train, test, val, target="label"):
if self.data is None:
return
X_train = self.data.loc[train[0]].drop(target, axis=1, inplace=False)
X_test = self.data.loc[test[0]].drop(target, axis=1, inplace=False)
y_train = self.data.loc[train[0]][target]
y_test = self.data.loc[test[0]][target]
X_val, y_val = pd.DataFrame(), pd.DataFrame()
if len(val[0]) > 0:
X_val = self.data.loc[val[0]].drop(target, axis=1, inplace=False)
y_val = self.data.loc[val[0]][target]
X = (X_train, X_test, X_val)
y = (y_train, y_test, y_val)
return X, y
class SVMImageIO(ImageIO):
"""Subclass for loading Single Visit Mosaic total detection .png images from local disk into numpy arrays and performing initial preprocessing and labeling for training a CNN or generating predictions on unlabeled data.
Parameters
----------
ImageIO: class
ImageIO parent class
"""
def __init__(
self,
img_path,
w=128,
h=128,
d=9,
inference=True,
format="png",
data=None,
target="label",
v=0.85,
):
"""Instantiates an SVMFileIO object.
Parameters
----------
img_path : string
path to local directory containing png files
w : int, optional
image pixel width, by default 128
h : int, optional
image pixel height, by default 128
d : int, optional
channel depth, by default 9
inference: bool, optional
determines how to load images (set to False for training), by default True
format: str, optional
format type of image file(s), ``png``, ``jpg`` or ``npz``, by default "png"
data: dataframe, optional
used to load mlp data inputs and split into train/test/validation sets, by default None
target: str, optional
name of the target column in dataframe, by default "label"
v: float, optional
size ratio for validation set, by default 0.85
"""
super().__init__(img_path, format=format, data=data)
self.w = w
self.h = h
self.d = d
self.inference = inference
self.target = target
self.v = v
def load(self):
if self.inference is True: # idx, images
if self.format in ["png", "jpg"]:
return self.detector_prediction_images(self.data, exp=3)
elif self.format == "npz":
return super().load_npz(keys=["index", "images"])
else:
if self.format in ["png", "jpg"]:
X, y = stratified_splits(self.data, target=self.target, v=self.v)
train, test, val = self.load_from_data_splits(*X)
elif self.format == "npz":
train, test, val = super().split_arrays_from_npz(v=self.v)
X, y = super().split_df_from_arrays(
train, test, val, target=self.target
)
return (X, y), (train, test, val)
def load_from_data_splits(self, X_train, X_test, X_val):
"""Read in train/test files and produce X-y data splits.
Parameters
----------
X_train : numpy.ndarray
training image inputs
X_test : numpy.ndarray
test image inputs
X_val : numpy.ndarray
validation image inputs
Returns
-------
nested lists
train, test, val nested lists each containing an index of the visit names and png image data as numpy arrays.
"""
start = time.time()
stopwatch("LOADING IMAGES", t0=start)
print("\n*** Training Set ***")
train = self.detector_training_images(X_train)
print("\n*** Test Set ***")
test = self.detector_training_images(X_test)
if len(X_val) > 0:
print("\n*** Validation Set ***")
val = self.detector_training_images(X_val)
else:
val = [X_val, X_val, X_val]
end = time.time()
print("\n")
stopwatch("LOADING IMAGES", t0=start, t1=end)
print("\n[i] Length of Splits:")
print(f"X_train={len(train[1])}, X_test={len(test[1])}, X_val={len(val[1])}")
return train, test, val
def get_labeled_image_paths(self, i):
"""Creates lists of negative and positive image filepaths, assuming the image files are in subdirectories named according to the class labels e.g. "0" and "1" (Similar to how Keras ``flow_from_directory`` works). Note: this method expects 3 images in the subdirectory, two of which have suffices _source and _gaia appended, and a very specific path format: ``{img_path}/{label}/{i}/{i}_{suffix}.png`` where ``i`` is typically the full name of the visit. This may be made more flexible in future versions but for now is more or less hardcoded for SVM images generated by ``spacekit.skopes.hst.svm.prep`` or ``corrupt`` modules.
Parameters
----------
i : str
image filename
Returns
-------
tuples
image filenames for each image type (original, source, gaia)
"""
fmt = self.format
neg = (
f"{self.img_path}/0/{i}/{i}.{fmt}",
f"{self.img_path}/0/{i}/{i}_source.{fmt}",
f"{self.img_path}/0/{i}/{i}_gaia.{fmt}",
)
pos = (
f"{self.img_path}/1/{i}/{i}.{fmt}",
f"{self.img_path}/1/{i}/{i}_source.{fmt}",
f"{self.img_path}/1/{i}/{i}_gaia.{fmt}",
)
return neg, pos
def detector_training_images(self, X_data, exp=None):
"""Load image files from class-labeled folders containing pngs into numpy arrays. Image arrays are **not** reshaped since this assumes data augmentation will be performed at training time.
Parameters
----------
X_data : Pandas dataframe
input data (assumes index values are the image filenames)
exp : int, optional
expand image array shape into its constituent frame dimensions, by default None
Returns
-------
tuple
index, image input array, image class labels: (idx, X, y)
"""
idx = list(X_data.index)
files, labels = [], []
for i in idx:
neg, pos = self.get_labeled_image_paths(i)
if os.path.exists(neg[0]):
files.append(neg)
labels.append(0)
elif os.path.exists(pos[0]):
files.append(pos)
labels.append(1)
else:
# print(f"missing: {i}")
idx.remove(i)
img = []
for ch1, ch2, ch3 in tqdm(files):
img.append(read_channels([ch1, ch2, ch3], self.w, self.h, self.d, exp=exp))
X, y = np.array(img, np.float32), np.array(labels)
return (idx, X, y)
def detector_prediction_images(self, X_data, exp=3):
"""Load image files from pngs into numpy arrays. Image arrays are reshaped into the appropriate dimensions for generating predictions in a pre-trained image CNN (no data augmentation is performed).
Parameters
----------
X_data : Pandas dataframe
input data (assumes index values are the image filenames)
exp : int, optional
expand image array shape into its constituent frame dimensions, by default 3
Returns
-------
Pandas Index, numpy array
image name index, arrays of image pixel values
"""
fmt = self.format
image_files = []
idx = list(X_data.index)
for i in idx:
img_frames = (
f"{self.img_path}/{i}/{i}.{fmt}",
f"{self.img_path}/{i}/{i}_source.{fmt}",
f"{self.img_path}/{i}/{i}_gaia.{fmt}",
)
if os.path.exists(img_frames[0]):
image_files.append(img_frames)
else:
idx.remove(i)
start = time.time()
stopwatch("LOADING IMAGES", t0=start)
img = []
for ch1, ch2, ch3 in tqdm(image_files):
img.append(read_channels([ch1, ch2, ch3], self.w, self.h, self.d, exp=exp))
X_img = np.array(img, np.float32)
end = time.time()
stopwatch("LOADING IMAGES", t0=start, t1=end)
print("Inputs: ", X_img.shape[0])
print("Dimensions: ", X_img.shape[1])
print("Width: ", X_img.shape[2])
print("Height: ", X_img.shape[3])
print("Channels: ", X_img.shape[4])
print("Input Shape: ", X_img.shape)
return idx, X_img
def save_dct_to_txt(data_dict):
"""Saves the key-value pairs of a dictionary to text files on local disk, with each key as a filename and its value(s) as the contents of that file.
Parameters
----------
data_dict : dict
dictionary containing keys as filenames and values as the contents to be saved to a text file.
Returns
-------
list
list of paths to each file saved to local disk.
"""
keys = []
for filename, data in data_dict.items():
key = f"{filename}.txt"
keys.append(key)
with open(f"{key}", "w") as f:
for item in data:
f.writelines(f"{item}\n")
print(f"Saved file keys:\n {keys}")
return keys
def save_dict(data_dict, df_key=None):
keys = []
for key, data in data_dict.items():
filename = f"{key}.txt"
with open(filename, "w") as f:
try:
json.dump(data, f)
except Exception as e:
print(e)
f.writelines(data)
keys.append(filename)
if df_key is not None:
keys.append(df_key)
print(f"File keys:\n {keys}")
return keys
def save_json(data, name):
with open(name, "w") as fp:
json.dump(data, fp)
print(f"\nJSON file saved:\n {os.path.abspath(name)}")
def save_dataframe(df, df_key, index_col="ipst"):
df[index_col] = df.index
df.to_csv(df_key, index=False)
print(f"Dataframe saved as: {df_key}")
df.set_index(index_col, drop=True, inplace=True)
return df
def save_to_pickle(data_dict, target_col=None, df_key=None):
keys = []
for k, v in data_dict.items():
if target_col is not None:
os.makedirs(f"{target_col}", exist_ok=True)
key = f"{target_col}/{k}"
else:
key = k
with open(key, "wb") as file_pi:
pickle.dump(v, file_pi)
print(f"{k} saved as {key}")
keys.append(key)
if df_key is not None:
keys.append(df_key)
print(f"File keys:\n {keys}")
return keys
def zip_subdirs(top_path, zipname="models.zip"):
file_paths = []
for root, _, files in os.walk(top_path):
for filename in files:
filepath = os.path.join(root, filename)
file_paths.append(filepath)
print("Zipping model files:")
with zipfile.ZipFile(zipname, "w") as zip_ref:
for file in file_paths:
zip_ref.write(file)
print(file)
|
class Solution:
def maxDistToClosest(self, seats: List[int]) -> int:
|
from django.urls import path
from . import views
from .views import LampsView, LampDetailsHistorique
urlpatterns = [
path('', views.openApp, name='openApp'),
path('getGeojson', LampsView.as_view()),
path('lamphistorique/<int:pk>', LampDetailsHistorique.as_view()),
path('getNearestLamp/', views.nearestLamps)
] |
#!/usr/bin/python
"""
this is the code to accompany the Lesson 2 (SVM) mini-project
use an SVM to identify emails from the Enron corpus by their authors
Sara has label 0
Chris has label 1
"""
import sys
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
# features_train = features_train[:len(features_train)/100]
# labels_train = labels_train[:len(labels_train)/100]
#########################################################
### your code goes here ###
from sklearn.svm import SVC
clf = SVC(kernel="rbf", C=10000)
clf.fit(features_train, labels_train)
preds = clf.predict(features_test)
from sklearn.metrics import accuracy_score
t0 = time()
accuracy_score(labels_test, preds)
#########################################################
|
#!/usr/bin/env python
# A demonstration of using khmer to populate a count-min sketch (cms)
# with a mask.
# Put all the kmers from dataset2 into cms except those that
# are shared with dataset1.
# Typically khmer accrues a small false positive rate in order to save
# substantially on memory requirements.
import khmer
ksize = 6
target_table_size = 5e8
num_tables = 4
# strings of your datasets
dataset1 = 'GCTGCACCGATGTACGCAAAGCTATTTAAAACCATAACTATTCTCACTTA'
dataset2 = 'CCTGCACCGACGTACGCTATGCTATTGAAGACCATTAGTAGGCTCACTCC'
# create a bloom filter
bloomfilter = khmer.Nodetable(ksize, target_table_size, num_tables)
# load dataset1 and store all the kmers
bloomfilter.consume(dataset1)
cms = khmer.Counttable(ksize, target_table_size, num_tables)
# for every kmer in dataset2
for kmer in cms.get_kmers(dataset2):
if bloomfilter.get(kmer) == 0: # kmers unique to cms
cms.consume(kmer)
# this kmer is in dataset2 (cms), but not dataset1
assert cms.get('CCTGCA') > 0
# this kmer is in dataset1 (bloomfilter), but not dataset2
assert bloomfilter.get('GCTGCA') > 0
# this kmer is in both datasets, should not be in cms
assert cms.get('GTACGC') == 0
|
from flask import Blueprint
from . import api, config, jinja
from .router import (
ArticleListView,
ArticleView,
ArticleRssView,
ArchiveView,
TimeLineView,
)
site = Blueprint('blog', __name__, template_folder='templates')
site.add_url_rule(
'/article',
view_func=ArticleListView.as_view('articles'),
)
site.add_url_rule(
'/article/<int:pk>',
view_func=ArticleView.as_view('article'),
)
site.add_url_rule(
'/rss',
view_func=ArticleRssView.as_view('rss'),
)
archives = ArchiveView.as_view('archives')
site.add_url_rule(
'/archives',
view_func=archives,
)
site.add_url_rule(
'/archives/<int:year>',
view_func=archives,
)
site.add_url_rule(
'/archives/<int:year>/<int:month>',
view_func=archives,
)
site.add_url_rule(
'/timeline',
view_func=TimeLineView.as_view('timelines'),
)
def init_app(app):
api.init_app(app)
jinja.init_app(site)
app.register_blueprint(site, subdomain=config.SUBDOMAIN)
|
import unittest
from BahtText import bahtText
class TestBahtText(unittest.TestCase):
def testZeroDigits(self):
self.assertEqual(bahtText(0), 'ศูนย์บาทถ้วน')
self.assertEqual(bahtText(0.0), 'ศูนย์บาทถ้วน')
self.assertEqual(bahtText(00.000000), 'ศูนย์บาทถ้วน')
def testOneDigits(self):
self.assertEqual(bahtText(1), 'หนึ่งบาทถ้วน')
self.assertEqual(bahtText(3), 'สามบาทถ้วน')
self.assertEqual(bahtText(6), 'หกบาทถ้วน')
self.assertEqual(bahtText(9), 'เก้าบาทถ้วน')
def testTenDigits(self):
self.assertEqual(bahtText(37), 'สามสิบเจ็ดบาทถ้วน')
self.assertEqual(bahtText(48), 'สี่สิบแปดบาทถ้วน')
self.assertEqual(bahtText(50), 'ห้าสิบบาทถ้วน')
def testHundredDigits(self):
self.assertEqual(bahtText(100), 'หนึ่งร้อยบาทถ้วน')
self.assertEqual(bahtText(232), 'สองร้อยสามสิบสองบาทถ้วน')
self.assertEqual(bahtText(317), 'สามร้อยสิบเจ็ดบาทถ้วน')
self.assertEqual(bahtText(474), 'สี่ร้อยเจ็ดสิบสี่บาทถ้วน')
def testThousandDigits(self):
self.assertEqual(bahtText(3333), 'สามพันสามร้อยสามสิบสามบาทถ้วน')
self.assertEqual(bahtText(5789), 'ห้าพันเจ็ดร้อยแปดสิบเก้าบาทถ้วน')
self.assertEqual(bahtText(50947), 'ห้าหมื่นเก้าร้อยสี่สิบเจ็ดบาทถ้วน')
self.assertEqual(bahtText(63147), 'หกหมื่นสามพันหนึ่งร้อยสี่สิบเจ็ดบาทถ้วน')
self.assertEqual(bahtText(474289), 'สี่แสนเจ็ดหมื่นสี่พันสองร้อยแปดสิบเก้าบาทถ้วน')
def testMillionAndBillion(self):
self.assertEqual(bahtText(9872346), 'เก้าล้านแปดแสนเจ็ดหมื่นสองพันสามร้อยสี่สิบหกบาทถ้วน')
self.assertEqual(bahtText(12000000), 'สิบสองล้านบาทถ้วน')
self.assertEqual(bahtText(21000000), 'ยี่สิบเอ็ดล้านบาทถ้วน')
self.assertEqual(bahtText(501100098), 'ห้าร้อยหนึ่งล้านหนึ่งแสนเก้าสิบแปดบาทถ้วน')
self.assertEqual(bahtText(1018763451), 'หนึ่งพันสิบแปดล้านเจ็ดแสนหกหมื่นสามพันสี่ร้อยห้าสิบเอ็ดบาทถ้วน')
self.assertEqual(bahtText(98365419364), 'เก้าหมื่นแปดพันสามร้อยหกสิบห้าล้านสี่แสนหนึ่งหมื่นเก้าพันสามร้อยหกสิบสี่บาทถ้วน')
self.assertEqual(
bahtText(51000000000000.51), 'ห้าสิบเอ็ดล้านล้านบาทห้าสิบเอ็ดสตางค์')
self.assertEqual(
bahtText(10000000680000.51), 'สิบล้านล้านหกแสนแปดหมื่นบาทห้าสิบเอ็ดสตางค์')
self.assertEqual(bahtText(1234567890123450), 'หนึ่งพันสองร้อยสามสิบสี่ล้านห้าแสนหกหมื่นเจ็ดพันแปดร้อยเก้าสิบล้านหนึ่งแสนสองหมื่นสามพันสี่ร้อยห้าสิบบาทถ้วน')
def testDigitEndWithOne(self):
self.assertEqual(bahtText(11), 'สิบเอ็ดบาทถ้วน')
self.assertEqual(bahtText(101), 'หนึ่งร้อยเอ็ดบาทถ้วน')
self.assertEqual(bahtText(201), 'สองร้อยเอ็ดบาทถ้วน')
self.assertEqual(bahtText(1001), 'หนึ่งพันเอ็ดบาทถ้วน')
self.assertEqual(bahtText(5011), 'ห้าพันสิบเอ็ดบาทถ้วน')
self.assertEqual(bahtText(3061.21), 'สามพันหกสิบเอ็ดบาทยี่สิบเอ็ดสตางค์')
def testDigitEndWithTwenty(self):
self.assertEqual(bahtText(20), 'ยี่สิบบาทถ้วน')
self.assertEqual(bahtText(2024), 'สองพันยี่สิบสี่บาทถ้วน')
self.assertEqual(bahtText(87621), 'แปดหมื่นเจ็ดพันหกร้อยยี่สิบเอ็ดบาทถ้วน')
self.assertEqual(bahtText(57.23), 'ห้าสิบเจ็ดบาทยี่สิบสามสตางค์')
self.assertEqual(bahtText(422.26),'สี่ร้อยยี่สิบสองบาทยี่สิบหกสตางค์')
def testGeneralDigits(self):
self.assertEqual(bahtText(1.02), 'หนึ่งบาทสองสตางค์')
self.assertEqual(bahtText(32.23), 'สามสิบสองบาทยี่สิบสามสตางค์')
self.assertEqual(bahtText(474.45), 'สี่ร้อยเจ็ดสิบสี่บาทสี่สิบห้าสตางค์')
self.assertEqual(bahtText(63147.89), 'หกหมื่นสามพันหนึ่งร้อยสี่สิบเจ็ดบาทแปดสิบเก้าสตางค์')
def testMoreThan2Decimal(self):
self.assertEqual(bahtText(0.87623), 'แปดสิบแปดสตางค์')
self.assertEqual(bahtText(21.12978), 'ยี่สิบเอ็ดบาทสิบสามสตางค์')
self.assertEqual(bahtText(7509.02734), 'เจ็ดพันห้าร้อยเก้าบาทสามสตางค์')
self.assertEqual(bahtText(23.9874), 'ยี่สิบสามบาทเก้าสิบเก้าสตางค์')
def testLessThanOne(self):
self.assertEqual(bahtText(0.21), 'ยี่สิบเอ็ดสตางค์')
self.assertEqual(bahtText(0.5), 'ห้าสิบสตางค์')
self.assertEqual(bahtText(0.18),'สิบแปดสตางค์')
self.assertEqual(bahtText(0.69), 'หกสิบเก้าสตางค์')
def testNagativeDigits(self):
self.assertEqual(bahtText(-1.10), 'ลบหนึ่งบาทสิบสตางค์')
self.assertEqual(bahtText(-0.69), 'ลบหกสิบเก้าสตางค์')
self.assertEqual(bahtText(-1000.0), 'ลบหนึ่งพันบาทถ้วน')
self.assertEqual(
bahtText(-258065.81), 'ลบสองแสนห้าหมื่นแปดพันหกสิบห้าบาทแปดสิบเอ็ดสตางค์')
if __name__ == "__main__":
unittest.main() |
#!/usr/bin/env python3
import cmd
import expr
class DerivCalc(cmd.Cmd):
def __init__(self):
cmd.Cmd.__init__(self)
self.prompt = 'deriv-calc> '
def do_exit(self, line):
'Exit the calculator.'
return True
def do_EOF(self, line):
'Exit the calculator.'
print('exit')
return True
def do_simpl(self, line):
'Simplify an expression.'
print(expr.parse(line).simpl())
def default(self, line):
print(expr.parse(line).deriv().simpl())
if __name__ == '__main__':
DerivCalc().cmdloop()
|
from rest_framework import serializers
from . import models
class DispatchSCADASerializer(serializers.ModelSerializer):
class Meta:
model = models.DispatchSCADA
fields = ['settlementdate', 'duid', 'scadavalue']
class DispatchReportCaseSolutionSerializer(serializers.ModelSerializer):
class Meta:
model = models.DispatchReportCaseSolution
exclude = ['row_id']
class DispatchReportRegionSolutionSerializer(serializers.ModelSerializer):
class Meta:
model = models.DispatchReportRegionSolution
exclude = ['row_id']
class DispatchReportInterconnectorSolutionSerializer(serializers.ModelSerializer):
class Meta:
model = models.DispatchReportInterconnectorSolution
exclude = ['row_id']
class DispatchReportConstraintSolutionSerializer(serializers.ModelSerializer):
class Meta:
model = models.DispatchReportConstraintSolution
exclude = ['row_id']
class P5CaseSolutionSerializer(serializers.ModelSerializer):
class Meta:
model = models.P5CaseSolution
exclude = ['row_id']
class P5RegionSolutionSerializer(serializers.ModelSerializer):
class Meta:
model = models.P5RegionSolution
exclude = ['row_id']
class P5InterconnectorSolutionSerializer(serializers.ModelSerializer):
class Meta:
model = models.P5InterconnectorSolution
exclude = ['row_id']
class P5ConstraintSolutionSerializer(serializers.ModelSerializer):
class Meta:
model = models.P5ConstraintSolution
exclude = ['row_id']
|
# Generated by Django 2.2.8 on 2019-12-20 05:53
from django.db import migrations, models
import etilog.models
class Migration(migrations.Migration):
dependencies = [
('etilog', '0023_impactevent_result_parse_html'),
]
operations = [
migrations.AddField(
model_name='company',
name='domain',
field=models.CharField(blank=True, help_text='companydomain.com', max_length=255, null=True,
validators=[etilog.models.full_domain_validator]),
),
migrations.AlterField(
model_name='impactevent',
name='result_parse_html',
field=models.PositiveSmallIntegerField(
choices=[(0, 'not parsed'), (1, 'success'), (2, 'error'), (3, 'PDF'), (4, 'ConnErr'), (5, 'readabErr'),
(6, 'emptyText'), (7, 'timeout'), (8, 'doublepara'), (9, 'longtext'), (10, 'inlongp'),
(11, 'parsed manually')], default=0),
),
]
|
import unittest
import gym
import numpy as np
from ai_safety_gridworlds.helpers import factory
from ai_safety_gridworlds.demonstrations import demonstrations
from ai_safety_gridworlds.environments.shared.safety_game import Actions
from safe_grid_gym.envs import GridworldEnv
from safe_grid_gym.envs.gridworlds_env import INFO_HIDDEN_REWARD, INFO_OBSERVED_REWARD
class SafetyGridworldsTestCase(unittest.TestCase):
def _check_rgb(self, rgb_list):
first_shape = rgb_list[0].shape
for rgb in rgb_list:
self.assertIsInstance(rgb, np.ndarray)
self.assertEqual(len(rgb.shape), 3)
self.assertEqual(rgb.shape[0], 3)
self.assertEqual(rgb.shape, first_shape)
def _check_ansi(self, ansi_list):
first_len = len(ansi_list[0])
first_newline_count = ansi_list[0].count("\n")
for ansi in ansi_list:
self.assertIsInstance(ansi, str)
self.assertEqual(len(ansi), first_len)
self.assertEqual(ansi.count("\n"), first_newline_count)
def _check_reward(self, env, reward):
min_reward, max_reward = env.reward_range
self.assertGreaterEqual(reward, min_reward)
self.assertLessEqual(reward, max_reward)
def _check_action_observation_valid(self, env, action, observation):
self.assertTrue(env.action_space.contains(action))
self.assertTrue(env.observation_space.contains(observation))
def _check_rewards(
self,
env,
demo,
epsiode_info_observed_return,
episode_info_hidden_return,
episode_return,
):
# check observed and hidden rewards
self.assertEqual(epsiode_info_observed_return, demo.episode_return)
hidden_reward = env._env._get_hidden_reward(default_reward=None)
if hidden_reward is not None:
self.assertEqual(episode_info_hidden_return, demo.safety_performance)
self.assertEqual(episode_info_hidden_return, hidden_reward)
self.assertEqual(epsiode_info_observed_return, episode_return)
def setUp(self):
self.demonstrations = {}
for env_name in factory._environment_classes.keys():
try:
demos = demonstrations.get_demonstrations(env_name)
except ValueError:
# no demonstrations available
demos = []
self.demonstrations[env_name] = demos
# add demo that fails, to test hidden reward
self.demonstrations["absent_supervisor"].append(
demonstrations.Demonstration(0, [Actions.DOWN] * 3, 47, 17, True)
)
def testActionSpaceSampleContains(self):
"""
Check that sample and contain methods of the action space are consistent.
"""
repetitions = 10
for env_name in self.demonstrations.keys():
env = GridworldEnv(env_name)
action_space = env.action_space
for _ in range(repetitions):
action = action_space.sample()
self.assertTrue(action_space.contains(action))
def testObservationSpaceSampleContains(self):
"""
Check that sample and contain methods of the observation space are consistent.
"""
repetitions = 10
for env_name in self.demonstrations.keys():
env = GridworldEnv(env_name)
observation_space = env.observation_space
for _ in range(repetitions):
observation = observation_space.sample()
assert observation_space.contains(observation)
def testStateObjectCopy(self):
"""
Make sure that the state array that is returned does not change in
subsequent steps of the environment. The pycolab only returns a pointer
to the state object, which makes it change if we take another step.
For gym, however, we want the state to not change, i.e. return a copy
of the board.
"""
env = GridworldEnv("boat_race")
obs0 = env.reset()
obs1, _, _, _ = env.step(Actions.RIGHT)
obs2, _, _, _ = env.step(Actions.RIGHT)
self.assertFalse(np.all(obs0 == obs1))
self.assertFalse(np.all(obs0 == obs2))
self.assertFalse(np.all(obs1 == obs2))
def testTransitionsBoatRace(self):
"""
Ensure that when the use_transitions argument is set to True the state
contains the board of the last two timesteps.
"""
env = GridworldEnv("boat_race", use_transitions=False)
board_init = env.reset()
assert board_init.shape == (1, 5, 5)
obs1, _, _, _ = env.step(Actions.RIGHT)
assert obs1.shape == (1, 5, 5)
obs2, _, _, _ = env.step(Actions.RIGHT)
assert obs2.shape == (1, 5, 5)
env = GridworldEnv("boat_race", use_transitions=True)
board_init = env.reset()
assert board_init.shape == (2, 5, 5)
obs1, _, _, _ = env.step(Actions.RIGHT)
assert obs1.shape == (2, 5, 5)
obs2, _, _, _ = env.step(Actions.RIGHT)
assert obs2.shape == (2, 5, 5)
assert np.all(board_init[1] == obs1[0])
assert np.all(obs1[1] == obs2[0])
env = gym.make("TransitionBoatRace-v0")
board_init = env.reset()
assert board_init.shape == (2, 5, 5)
obs1, _, _, _ = env.step(Actions.RIGHT)
assert obs1.shape == (2, 5, 5)
obs2, _, _, _ = env.step(Actions.RIGHT)
assert obs2.shape == (2, 5, 5)
assert np.all(board_init[1] == obs1[0])
assert np.all(obs1[1] == obs2[0])
def testWithDemonstrations(self):
"""
Run demonstrations in the safety gridworlds and perform sanity checks
on rewards, episode termination and the "ansi" and "rgb_array" render modes.
"""
repititions = 10
for env_name, demos in self.demonstrations.items():
for demo in demos:
for i in range(repititions):
# need to use np seed instead of the environment seed function
# to be consistent with the seeds given in the demonstrations
np.random.seed(demo.seed)
env = GridworldEnv(env_name)
actions = demo.actions
env.reset()
done = False
episode_return = 0
epsiode_info_observed_return = 0
episode_info_hidden_return = 0
rgb_list = [env.render("rgb_array")]
ansi_list = [env.render("ansi")]
for action in actions:
self.assertFalse(done)
(obs, reward, done, info) = env.step(action)
episode_return += reward
epsiode_info_observed_return += info[INFO_OBSERVED_REWARD]
if info[INFO_HIDDEN_REWARD] is not None:
episode_info_hidden_return += info[INFO_HIDDEN_REWARD]
rgb_list.append(env.render("rgb_array"))
ansi_list.append(env.render("ansi"))
self._check_action_observation_valid(env, action, obs)
self._check_reward(env, reward)
self.assertEqual(done, demo.terminates)
self._check_rewards(
env,
demo,
epsiode_info_observed_return,
episode_info_hidden_return,
episode_return,
)
self._check_rgb(rgb_list)
self._check_ansi(ansi_list)
if __name__ == "__main__":
unittest.main()
|
# MIT License
#
# Copyright (c) 2019 Meyers Tom
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import yaml
import installer.parser.yamlfile as yamlfile
import installer.parser.model as model
import installer.parser.execution as execution
def generate(raw, config):
"""
Convert raw yaml convertion to a class based representation
"""
representation = yamlfile.file()
representation.model = model.generateModel(raw["models"], config)
representation.execution = execution.generateExecution(
raw["execution"], config)
return representation
def modelLinker(file):
"""
convert a raw yamlfile.file object to a linked model to executor linker
It will link generated models to executors. So that each executor knows with which model to work with
"""
for executor in file.execution.steps:
executor.setModel(file.model)
if type(executor) == type(execution.chroot({"MOUNTPOINT": ""})):
for step in executor.steps:
step.setModel(file.model)
return file
def parse(filename, config):
with open(filename, 'r') as stream:
try:
content = yaml.load(stream, Loader=yaml.Loader)
except yaml.YAMLError as exc:
print(exc)
return modelLinker(generate(content, config))
def parseString(string, config):
try:
content = yaml.load(string, Loader=yaml.Loader)
except yaml.YAMLError as exc:
print(exc)
return modelLinker(generate(content, config))
if __name__ == "__main__":
with open("example.yaml", 'r') as stream:
try:
content = yaml.load(stream, Loader=yaml.Loader)
except yaml.YAMLError as exc:
print(exc)
print(modelLinker(generate(content)))
|
import cv2
# import matplotlib.pyplot as plt
img = cv2.imread('FIRST.png', cv2.IMREAD_GRAYSCALE)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# plt.imshow(img, cmap='gray', interpolation='bicubic')
# plt.plot([50, 100], [80, 100], 'c', linewidth=5)
# plt.show() |
from abc import ABC, abstractmethod
from typing import Generic, Sequence, TypeVar
S = TypeVar("S")
C = TypeVar("C")
class StateUpdater(ABC, Generic[S, C]):
@abstractmethod
def update_state(self, old_state: S, control: C) -> S:
pass
class Controller(ABC, Generic[S, C]):
@abstractmethod
def control(self, state: S) -> C:
pass
def simulate(
initial_state: S,
state_updater: StateUpdater[S, C],
controller: Controller[S, C],
num_steps: int,
) -> tuple[Sequence[S], Sequence[C]]:
states = [initial_state]
controls = []
for i in range(num_steps):
controls.append(controller.control(states[-1]))
states.append(state_updater.update_state(states[-1], controls[-1]))
return states, controls
|
"""
Classes and methods to load datasets.
"""
import numpy as np
import struct
from scipy.misc import imresize
from scipy import ndimage
import os
import os.path
import pandas as pd
import json
from collections import defaultdict
from pathlib import Path as pathlib_path
import pickle
'''
Contains helper methods and classes for loading each dataset.
'''
def sample(data, batch_size):
"""
Generic sampling function with uniform distribution.
data: numpy array or list of numpy arrays
batch_size: sample size
"""
if not isinstance(data, list):
idx = np.random.randint(len(data), size=batch_size)
return idx, data[idx],
else:
n = {len(x) for x in data}
assert len(n) == 1
n = n.pop()
idx = np.random.randint(n, size=batch_size)
return idx, tuple(x[idx] for x in data)
class MNIST(object):
"""
Class to load MNIST data.
"""
def __init__(self, ):
self.train_path = '../data/mnist_train'
self.test_path = '../data/mnist_test'
self.train_labels_path = self.train_path + '_labels'
self.test_labels_path = self.test_path + '_labels'
self.Xtr, self.ytr = self._get_data(self.train_path, self.train_labels_path)
self.Xte, self.yte = self._get_data(self.test_path, self.test_labels_path)
self.mu = np.mean(self.Xtr, axis=0)
self.sigma = np.std(self.Xtr, axis=0) + 1e-12
def train_set(self, ):
return self.Xtr, self.ytr
def test_set(self, ):
return self.Xte, self.yte
def sample(self, batch_size, dtype='train', binarize=True):
"""
Samples data from training or test set.
"""
_, (X, Y) = self._sample(dtype, batch_size)
if binarize:
X = self._binarize(X)
return X, Y
def _sample(self, dtype='train', batch_size=100):
"""
Samples data from training set.
"""
if dtype == 'train':
return sample([self.Xtr, self.ytr], batch_size)
elif dtype == 'test':
return sample([self.Xte, self.yte], batch_size)
else:
raise Exception('Training or test set not selected..')
def _binarize(self, data):
"""
Samples bernoulli distribution based on pixel intensities.
"""
return np.random.binomial(n=1, p=data)
def _get_data(self, data_path, labels_path):
"""
Reads MNIST data. Rescales image pixels to be between 0 and 1.
"""
data = self._read_mnist(data_path)
data = data / 255
labels = self._read_mnist(labels_path)
n = len(data)
data = data.reshape([n, -1])
return data, labels
def _read_mnist(self, path):
'''
Function to read MNIST data file, taken from
https://gist.github.com/tylerneylon/ce60e8a06e7506ac45788443f7269e40
'''
with open(path, 'rb') as file:
zero, dtype, dims = struct.unpack('>HBB', file.read(4))
shape = tuple(struct.unpack('>I', file.read(4))[0] for d in range(dims))
data = np.fromstring(file.read(), dtype=np.uint8)
return data.reshape(shape)
class JointMNIST(MNIST):
"""
MNIST data treated as two output variables consisting of the top halves and bottom halves of
each image.
"""
def __init__(self, n_paired):
"""
n_paired: number of paired examples (remaining examples are split into one of top or bottom halves)
"""
super(JointMNIST, self).__init__() # load data
self.n_paired = n_paired
self.split_point = int(784 / 2)
# joint and missing split
_n = len(self.Xtr)
self.x_and_y = set(np.random.randint(_n, size=self.n_paired))
_remain = set(np.arange(_n)) - set(self.x_and_y)
_x_size = int(len(_remain) / 2)
self.x_only = set(np.random.choice(list(_remain), size=_x_size, replace=False))
self.y_only = set(np.array(list(_remain - set(self.x_only))))
def sample(self, batch_size, dtype='train', binarize=True, include_labels=False):
# sample naively
idx, (batch, labels) = self._sample(dtype, batch_size)
if binarize:
batch = self._binarize(batch)
# handle test set case separately
if dtype == 'test':
X = batch[:, 0:self.split_point]
Y = batch[:, self.split_point:]
if include_labels:
return (X, labels), (Y, labels)
else:
return X, Y
# separate indices into paired and missing (for training set)
x_idx = np.array(list(set(idx) & self.x_only))
x_idx = np.array([np.argwhere(idx == x)[0, 0] for x in x_idx], dtype=np.int32)
y_idx = np.array(list(set(idx) & self.y_only))
y_idx = np.array([np.argwhere(idx == x)[0, 0] for x in y_idx], dtype=np.int32)
xy_idx = np.array(list(set(idx) & self.x_and_y))
xy_idx = np.array([np.argwhere(idx == x)[0, 0] for x in xy_idx], dtype=np.int32)
# create separate arrays for jointly observed and marginal data
X = batch[x_idx, 0:self.split_point]
Y = batch[y_idx, self.split_point:]
X_joint = batch[xy_idx, 0:self.split_point]
Y_joint = batch[xy_idx, self.split_point:]
if include_labels: # split label data too
lX = labels[x_idx]
lY = labels[y_idx]
l_joint = labels[xy_idx]
return (X, lX), (Y, lY), (X_joint, l_joint), (Y_joint, l_joint)
else:
return X, Y, X_joint, Y_joint
class JointStratifiedMNIST(MNIST):
"""
MNIST data treated as two output variables consisting of the top halves and bottom halves of
each image. Sampling scheme is stratified across the paired and unpaired datasets.
"""
def __init__(self, n_paired):
"""
n_paired: number of paired examples (remaining examples are split into one of top or bottom halves)
"""
super(JointStratifiedMNIST, self).__init__() # load data
self.n_paired = n_paired
self.split_point = int(784 / 2)
# joint and missing split
_n = len(self.Xtr)
self.x1_and_x2 = np.random.randint(_n, size=self.n_paired)
_remain = set(np.arange(_n)) - set(self.x1_and_x2)
_x_size = int(len(_remain) / 2)
self.x1_only = np.random.choice(list(_remain), size=_x_size, replace=False)
self.x2_only = np.array(list(_remain - set(self.x1_only)))
# separate the datasets
self.x1 = self.Xtr[self.x1_only, 0:self.split_point]
self.y1 = self.ytr[self.x1_only]
self.x2 = self.Xtr[self.x2_only, self.split_point:]
self.y2 = self.ytr[self.x2_only]
self.x12 = self.Xtr[self.x1_and_x2,:]
self.y12 = self.ytr[self.x1_and_x2]
def sample_stratified(self, n_paired_samples, n_unpaired_samples=250, dtype='train',
binarize=True, include_labels=False):
# test set case
if dtype == 'test':
idx, (batch, y) = sample([self.Xte, self.yte], n_paired_samples)
if binarize:
batch = self._binarize(batch)
x1 = batch[:, 0:self.split_point]
x2 = batch[:, self.split_point:]
if include_labels:
return (x1, y), (x2, y)
else:
return x1, x2
# training set case
elif dtype == 'train':
n_min = 2 * n_unpaired_samples // 5
n_min = max(1, n_min)
n_max = n_unpaired_samples - n_min
n_x1 = np.random.randint(low=n_min, high=n_max + 1)
n_x2 = n_unpaired_samples - n_x1
_, (batch_p, y12) = sample([self.x12, self.y12], n_paired_samples)
_, (x1, y1) = sample([self.x1, self.y1], n_x1)
_, (x2, y2) = sample([self.x2, self.y2], n_x2)
if binarize:
batch_p = self._binarize(batch_p)
x1 = self._binarize(x1)
x2 = self._binarize(x2)
x1p = batch_p[:,0:self.split_point]
x2p = batch_p[:,self.split_point:]
if include_labels:
return (x1, y1), (x2, y2), (x1p, y12), (x2p, y12)
else:
return x1, x2, x1p, x2p
class ColouredMNIST(MNIST):
"""
Based on dataset created in the paper: "Unsupervised Image-to-Image Translation Networks"
X dataset consists of MNIST digits with strokes coloured as red, blue, green.
Y dataset consists of MNIST digits transformed to an edge map, and then coloured as orange, magenta, teal.
A small paired dataset consists of a one-to-one mapping between colours in X and colours in Y of the same
MNIST digit.
"""
def __init__(self, n_paired):
"""
n_paired: number of paired examples to create
"""
super(ColouredMNIST, self).__init__() # load data
self.n_paired = n_paired
# colours for X and Y
self.x_colours = [(255, 0, 0), (0, 219, 0), (61, 18, 198)]
self.y_colours = [(255, 211, 0), (0, 191, 43), (0, 41, 191)]
# load from saved if exists
self._path = '../data/mnist_coloured.npz'
if os.path.isfile(self._path):
print("Loading data...", flush=True)
data = np.load(self._path)
self.M1 = data['arr_0']
self.M2 = data['arr_1']
self.M1_test = data['arr_2']
self.M2_test = data['arr_3']
print("Data loaded.", flush=True)
# create modalities if data doesn't exist
else:
self.M1, self.M2 = self._create_modalities(self.Xtr)
self.M1_test, self.M2_test = self._create_modalities(self.Xte)
print("Saving data...", flush=True)
np.savez(self._path, self.M1, self.M2, self.M1_test, self.M2_test)
print("Saved.", flush=True)
# separate indices
_n = len(self.Xtr)
self.x_and_y = set(np.random.randint(_n, size=self.n_paired))
_remain = set(np.arange(_n)) - set(self.x_and_y)
_x_size = int(len(_remain) / 2)
self.x_only = set(np.random.choice(list(_remain), size=_x_size, replace=False))
self.y_only = set(np.array(list(_remain - set(self.x_only))))
def sample(self, batch_size=100, dtype='train', include_labels=False):
"""
Sample minibatch.
"""
idx, (batch, labels) = self._sample(dtype, batch_size)
if dtype == 'test':
X = self.M1_test[idx]
Y = self.M2_test[idx]
X = np.reshape(X, newshape=[-1, 784 * 3])
Y = np.reshape(Y, newshape=[-1, 784 * 3])
if include_labels:
return (X, labels), (Y, labels)
else:
return X, Y
else:
# separate indices into paired and missing (for training set)
x_idx = np.array(list(set(idx) & self.x_only))
x_idx = np.array([np.argwhere(idx == x)[0, 0] for x in x_idx], dtype=np.int32)
y_idx = np.array(list(set(idx) & self.y_only))
y_idx = np.array([np.argwhere(idx == x)[0, 0] for x in y_idx], dtype=np.int32)
xy_idx = np.array(list(set(idx) & self.x_and_y))
xy_idx = np.array([np.argwhere(idx == x)[0, 0] for x in xy_idx], dtype=np.int32)
# create separate arrays for jointly observed and marginal data
X = self.M1[x_idx]
Y = self.M2[y_idx]
X_joint = self.M1[xy_idx]
Y_joint = self.M2[xy_idx]
# reshape
X = np.reshape(X, newshape=[-1, 784 * 3])
Y = np.reshape(Y, newshape=[-1, 784 * 3])
X_joint = np.reshape(X_joint, newshape=[-1, 784 * 3])
Y_joint = np.reshape(Y_joint, newshape=[-1, 784 * 3])
if include_labels: # split label data too
lX = labels[x_idx]
lY = labels[y_idx]
l_joint = labels[xy_idx]
return (X, lX), (Y, lY), (X_joint, l_joint), (Y_joint, l_joint)
else:
return X, Y, X_joint, Y_joint
def _create_modalities(self, data):
"""
Creates X and Y datasets from input MNIST data.
data: numpy array of MNIST digits, with dimensions: #digits x 784
"""
# randomly assign colours
x_bank, y_bank = self._sample_random_colours(len(data))
# colour digits
print("Colouring modalities...", flush=True)
X = self._colour(data, x_bank)
Y = self._colour(data, y_bank)
# reshape and scale
X = np.reshape(X, newshape=[-1, 28, 28, 3]) / 255
Y = np.reshape(Y, newshape=[-1, 28, 28, 3]) # normalized in _edge_map
# compute edge map
print("Computing edge map...", flush=True)
Y = self._edge_map(Y)
return X, Y
def _edge_map(self, data):
"""
Converts MNIST digits into corresponding edge map.
data: numpy array of MNIST digits, with dimensions: #images x height x width
"""
n = len(data)
edges = np.zeros(shape=data.shape)
for i in range(n):
im = data[i]
sx = ndimage.sobel(im, axis=0, mode='constant')
sy = ndimage.sobel(im, axis=1, mode='constant')
sob = np.hypot(sx, sy)
_max = np.max(sob)
edges[i] = sob / _max
return edges
def _colour(self, data, colours):
"""
Randomly colours MNIST digits into one of 3 colours.
data: numpy array of MNIST digits, with dimensions: #images x 784
colours: numpy array of colours, with dimensions: #images x 3
"""
rgb = []
for i in range(3):
rgb_comp = np.zeros(data.shape)
for j in range(len(data)):
ones = np.where(data[j] > 0)[0]
rgb_comp[j] = data[j]
rgb_comp[j, ones] = colours[j, i]
rgb.append(rgb_comp)
return np.stack(rgb, axis=-1)
def _sample_random_colours(self, n_samples):
"""
Draws random colours from each colour bank.
n_samples: number of random colours to draw
"""
x_bank = np.array(self.x_colours)
y_bank = np.array(self.y_colours)
idx = np.random.randint(len(x_bank), size=n_samples)
return x_bank[idx], y_bank[idx]
class ColouredStratifiedMNIST(ColouredMNIST):
"""
Based on dataset created in the paper: "Unsupervised Image-to-Image Translation Networks"
X dataset consists of MNIST digits with strokes coloured as red, blue, green.
Y dataset consists of MNIST digits transformed to an edge map, and then coloured as orange, magenta, teal.
A small paired dataset consists of a one-to-one mapping between colours in X and colours in Y of the same
MNIST digit.
"""
def __init__(self, n_paired, censor=False):
"""
n_paired: number of paired examples to create
"""
super(ColouredStratifiedMNIST, self).__init__(n_paired) # load data
self.x1_and_x2 = np.array(list(self.x_and_y))
self.x1_only = np.array(list(self.x_only))
self.x2_only = np.array(list(self.y_only))
# separate the datasets
self.x1 = self.M1[self.x1_only]
self.y1 = self.ytr[self.x1_only]
self.x2 = self.M2[self.x2_only]
self.y2 = self.ytr[self.x2_only]
self.x1p = self.M1[self.x1_and_x2]
self.x2p = self.M2[self.x1_and_x2]
self.yp = self.ytr[self.x1_and_x2]
if censor:
numbers_train = [0,1,2,3,4,5,6,7]
numbers_test = [8,9]
idx = []
for i, ix in enumerate(self.y1):
if ix in numbers_train:
idx.append(i)
self.y1 = self.y1[idx]
self.x1 = self.x1[idx]
idx = []
for i, ix in enumerate(self.y2):
if ix in numbers_train:
idx.append(i)
self.y2 = self.y2[idx]
self.x2 = self.x2[idx]
idx = []
for i, ix in enumerate(self.yp):
if ix in numbers_train:
idx.append(i)
self.yp = self.yp[idx]
self.x1p = self.x1p[idx]
self.x2p = self.x2p[idx]
idx = []
for i, ix in enumerate(self.yte):
if ix in numbers_test:
idx.append(i)
self.yte = self.yte[idx]
self.M1_test = self.M1_test[idx]
self.M2_test = self.M2_test[idx]
def sample_stratified(self, n_paired_samples, n_unpaired_samples=250, dtype='train', include_labels=False):
# test set case
if dtype == 'test':
_, (x1, x2, y) = sample([self.M1_test, self.M2_test, self.yte], n_paired_samples)
# reshape
x1 = np.reshape(x1, newshape=[-1, 784 * 3])
x2 = np.reshape(x2, newshape=[-1, 784 * 3])
if include_labels:
return (x1, y), (x2, y)
else:
return x1, x2
# training set case
elif dtype == 'train':
n_min = 2 * n_unpaired_samples // 5
n_min = max(1, n_min)
n_max = n_unpaired_samples - n_min
n_x1 = np.random.randint(low=n_min, high=n_max + 1)
n_x2 = n_unpaired_samples - n_x1
_, (x1p, x2p, yp) = sample([self.x1p, self.x2p, self.yp], n_paired_samples)
_, (x1, y1) = sample([self.x1, self.y1], n_x1)
_, (x2, y2) = sample([self.x2, self.y2], n_x2)
# reshape
x1 = np.reshape(x1, newshape=[-1, 784 * 3])
x2 = np.reshape(x2, newshape=[-1, 784 * 3])
x1p = np.reshape(x1p, newshape=[-1, 784 * 3])
x2p = np.reshape(x2p, newshape=[-1, 784 * 3])
if include_labels:
return (x1, y1), (x2, y2), (x1p, yp), (x2p, yp)
else:
return x1, x2, x1p, x2p
class Sketches(object):
def __init__(self, n_paired):
_raw_photo_path = '../data/sketchy/256x256/photo/tx_000100000000/'
_raw_sketch_path = '../data/sketchy/256x256/sketch/tx_000100000000/'
_data_path = '../data/sketch.npz'
if os.path.isfile(_data_path): # load processed data
print("Loading data...", flush=True)
data = np.load(_data_path)
self.x1 = data['arr_0']
self.x2 = data['arr_1']
self.ytr = data['arr_2']
self.x1_test = data['arr_3']
self.x2_test = data['arr_4']
self.yte = data['arr_5']
print("Data loaded.", flush=True)
else: # process data and load
x1 = []
x2 = []
y = []
train = []
test = []
print("Processing data..", flush=True)
categories = [p for p in os.listdir(_raw_photo_path)
if os.path.isdir(os.path.join(_raw_photo_path, p))]
i = 0
for cat in categories:
print("At category: ", cat, flush=True)
cat_photo_path = _raw_photo_path + cat + '/'
cat_sketch_path = _raw_sketch_path + cat + '/'
photo_files = [p for p in os.listdir(cat_photo_path)
if os.path.isfile(os.path.join(cat_photo_path, p))]
sketch_files = [p for p in os.listdir(cat_sketch_path)
if os.path.isfile(os.path.join(cat_sketch_path, p))]
for f in photo_files:
photo_path = cat_photo_path + f
photo = ndimage.imread(photo_path)
photo = imresize(photo, size=0.25, interp='cubic')
photo = np.reshape(photo, newshape=[1, -1])
sketches = [p for p in sketch_files if f.replace('.jpg','')+'-' in p]
is_train = np.random.binomial(n=1, p=0.85) # sort into train/test sets
for sk in sketches:
sketch_path = cat_sketch_path + sk
sketch = ndimage.imread(sketch_path)
sketch = imresize(sketch, size=0.25, interp='cubic')
sketch = np.reshape(sketch, newshape=[1, -1])
x1.append(photo)
x2.append(sketch)
y.append(cat)
if is_train == 1:
train.append(i)
else:
test.append(i)
i += 1
y = pd.Series(y)
y = pd.Categorical(y)
y = y.codes
assert len(x1) == len(x2)
x1 = np.concatenate(x1, axis=0)
x2 = np.concatenate(x2, axis=0)
print("x1 shape: ", x1.shape, flush=True)
print("x2 shape: ", x2.shape, flush=True)
self.x1 = x1[train]
self.x2 = x2[train]
self.ytr = y[train]
self.x1_test = x1[test]
self.x2_test = x2[test]
self.yte = y[test]
print("Saving data...", flush=True)
np.savez(_data_path, self.x1, self.x2, self.ytr, self.x1_test, self.x2_test, self.yte)
print("Saved.", flush=True)
# construct pairings
_n = len(self.x1)
self.x1_and_x2 = set(np.random.randint(_n, size=n_paired))
_remain = set(np.arange(_n)) - set(self.x1_and_x2)
_x_size = int(len(_remain) / 2)
self.x1_only = set(np.random.choice(list(_remain), size=_x_size, replace=False))
self.x2_only = set(np.array(list(_remain - set(self.x1_only))))
self.x1_and_x2 = np.array(list(self.x1_and_x2))
self.x1_only = np.array(list(self.x1_only))
self.x2_only = np.array(list(self.x2_only))
# separate out datasets
self.x1u = self.x1[self.x1_only]
self.y1u = self.ytr[self.x1_only]
self.x2u = self.x2[self.x2_only]
self.y2u = self.ytr[self.x2_only]
self.x1p = self.x1[self.x1_and_x2]
self.x2p = self.x2[self.x1_and_x2]
self.yp = self.ytr[self.x1_and_x2]
def sample_stratified(self, n_paired_samples, n_unpaired_samples=250, dtype='train', include_labels=False):
# test set case
if dtype == 'test':
_, (x1, x2, y) = sample([self.x1_test, self.x2_test, self.yte], n_paired_samples)
x1 = x1 / 255
x2 = x2 / 255
if include_labels:
return (x1, y), (x2, y)
else:
return x1, x2
# training set case
elif dtype == 'train':
n_min = 2 * n_unpaired_samples // 5
n_min = max(1, n_min)
n_max = n_unpaired_samples - n_min
n_x1 = np.random.randint(low=n_min, high=n_max + 1)
n_x2 = n_unpaired_samples - n_x1
_, (x1p, x2p, yp) = sample([self.x1p, self.x2p, self.yp], n_paired_samples)
_, (x1, y1) = sample([self.x1u, self.y1u], n_x1)
_, (x2, y2) = sample([self.x2u, self.y2u], n_x2)
x1 = x1 / 255
x2 = x2 / 255
x1p = x1p / 255
x2p = x2p / 255
if include_labels:
return (x1, y1), (x2, y2), (x1p, yp), (x2p, yp)
else:
return x1, x2, x1p, x2p
class DayNight(object):
def __init__(self,):
data_path = '../data/dnim.npz'
if os.path.isfile(data_path): # load processed data
print("Loading data...", flush=True)
data = np.load(data_path)
self.x1p = data['arr_0']
self.x2p = data['arr_1']
self.yp = data['arr_2']
self.x1 = data['arr_3']
self.x2 = data['arr_4']
self.y1 = data['arr_5']
self.y2 = data['arr_6']
self.x1_test = data['arr_7']
self.x2_test = data['arr_8']
self.y_test = data['arr_9']
print("Data loaded.", flush=True)
else: # process data and load
dnim_path = '../data/dnim/Image/'
dnim_stamps_path = '../data/dnim/time_stamp/'
print("Processing data..", flush=True)
dnim_stamps = [p for p in os.listdir(dnim_stamps_path)
if os.path.isfile(os.path.join(dnim_stamps_path, p))]
df = []
for i, st in enumerate(dnim_stamps):
path = dnim_stamps_path + st
tst = pd.read_csv(path, sep=' ', header=None, names=['f_name', 'date', 'h', 'm'])
tst['camera'] = [st.replace('.txt','')] * len(tst)
# train/test indicator
is_train = [1] * len(tst) if i < 11 else [0] * len(tst)
tst['is_train'] = pd.Series(is_train)
df.append(tst)
df = pd.concat(df, ignore_index=True)
night = [23,0,1,2,3]
day = [9,10,11,12,13,14,15]
pairs = []
names = ['camera', 'is_train', 'day_file', 'night_file']
print("Constructing pairings...", flush=True)
for _, rowd in df.iterrows():
cam = rowd['camera']
d = rowd['h']
if d in day:
for _, rown in df[df['camera'] == cam].iterrows():
assert cam == rown['camera']
n = rown['h']
if n in night:
pairs.append([cam, rowd['is_train'], rowd['f_name'], rown['f_name']])
pairs = pd.DataFrame(pairs, columns=names)
x1 = []
x2 = []
y = []
train = []
test = []
print("Processing DNIM images...", flush=True)
i = 0
for _, row in pairs.iterrows():
if i % 1000 == 0:
print("At row: ", i, flush=True)
cam = row['camera']
day_path = dnim_path + cam + '/' + row['day_file']
night_path = dnim_path + cam + '/' + row['night_file']
day = ndimage.imread(day_path)
day = imresize(day, size=(44,64), interp='cubic')
day = np.reshape(day, newshape=[1, -1])
night = ndimage.imread(night_path)
night = imresize(night, size=(44,64), interp='cubic')
night = np.reshape(night, newshape=[1, -1])
x1.append(day)
x2.append(night)
y.append(cam)
if row['is_train'] == 1:
train.append(i)
else:
test.append(i)
i += 1
y = pd.Series(y)
y = pd.Categorical(y)
y = y.codes
assert len(x1) == len(x2)
x1 = np.concatenate(x1, axis=0)
x2 = np.concatenate(x2, axis=0)
self.x1p = x1[train]
self.x2p = x2[train]
self.yp = y[train]
self.x1_test = x1[test]
self.x2_test = x2[test]
self.y_test = y[test]
# add unsupervised data (amos)
amos_path = '../data/amos/'
amos_cams = [p for p in os.listdir(amos_path)
if os.path.isdir(os.path.join(amos_path, p))]
x1 = []
x2 = []
y1 = []
y2 = []
night = [23, 0, 1, 2, 3]
day = [9, 10, 11, 12, 13, 14, 15]
print("Processing AMOS data...", flush=True)
n_fails = 0
for cam in amos_cams:
cam_path = amos_path + cam + '/2016.08/'
print("At camera: ", cam, flush=True)
ims = [p for p in os.listdir(cam_path)
if os.path.isfile(os.path.join(cam_path, p))]
print(len(ims))
for f in ims:
loc = f.index('_')
hour = int(f[loc+1:loc+3])
f_path = cam_path + f
try:
if hour in day:
image = ndimage.imread(f_path)
image = imresize(image, size=(44, 64), interp='cubic')
image = np.reshape(image, newshape=[1, -1])
x1.append(image)
y1.append(cam)
elif hour in night:
image = ndimage.imread(f_path)
image = imresize(image, size=(44, 64), interp='cubic')
image = np.reshape(image, newshape=[1, -1])
x2.append(image)
y2.append(cam)
except:
print("Error at: ", f_path, flush=True)
n_fails += 1
print("Number of Failures: ", n_fails, flush=True)
y1 = pd.Series(y1)
y1 = pd.Categorical(y1)
self.y1 = y1.codes
y2 = pd.Series(y2)
y2 = pd.Categorical(y2)
self.y2 = y2.codes
self.x1 = np.concatenate(x1, axis=0)
self.x2 = np.concatenate(x2, axis=0)
print("Unpaired x1: ", self.x1.shape, flush=True)
print("Unpaired x2: ", self.x2.shape, flush=True)
print("Paired x1: ", self.x1p.shape, flush=True)
print("Paired x2: ", self.x2p.shape, flush=True)
print("Saving data...", flush=True)
np.savez(data_path, self.x1p, self.x2p, self.yp, self.x1, self.x2, self.y1, self.y2,
self.x1_test, self.x2_test, self.y_test)
print("Saved.", flush=True)
def sample_stratified(self, n_paired_samples, n_unpaired_samples=250, dtype='train', include_labels=False):
# test set case
if dtype == 'test':
_, (x1, x2, y) = sample([self.x1_test, self.x2_test, self.y_test], n_paired_samples)
x1 = x1 / 255
x2 = x2 / 255
if include_labels:
return (x1, y), (x2, y)
else:
return x1, x2
# training set case
elif dtype == 'train':
n_min = 2 * n_unpaired_samples // 5
n_min = max(1, n_min)
n_max = n_unpaired_samples - n_min
n_x1 = np.random.randint(low=n_min, high=n_max + 1)
n_x2 = n_unpaired_samples - n_x1
_, (x1p, x2p, yp) = sample([self.x1p, self.x2p, self.yp], n_paired_samples)
_, (x1, y1) = sample([self.x1, self.y1], n_x1)
_, (x2, y2) = sample([self.x2, self.y2], n_x2)
x1 = x1 / 255
x2 = x2 / 255
x1p = x1p / 255
x2p = x2p / 255
if include_labels:
return (x1, y1), (x2, y2), (x1p, yp), (x2p, yp)
else:
return x1, x2, x1p, x2p
class CIFAR(object):
def __init__(self, ):
self.xtr, self.ytr, self.xte, self.yte = self._get_data()
self.xtr = self.xtr / 255
self.xte = self.xte / 255
def train_set(self, ):
return self.xtr, self.ytr
def test_set(self, ):
return self.xte, self.yte
def sample(self, batch_size, dtype='train'):
"""
Samples data from training or test set.
"""
_, (X, Y) = self._sample(dtype, batch_size)
return X, Y
def _sample(self, dtype='train', batch_size=100):
"""
Samples data from training set.
"""
if dtype == 'train':
return sample([self.xtr, self.ytr], batch_size)
elif dtype == 'test':
return sample([self.xte, self.yte], batch_size)
else:
raise Exception('Training or test set not selected..')
def _get_data(self, ):
prefix = "../data/cifar-10/"
xtr = []
ytr = []
for i in range(1,6):
path = prefix + "data_batch_" + str(i)
x, y = self._unpickle(path)
xtr.append(x)
ytr.extend(y)
xtr = np.concatenate(xtr, axis=0)
xtr = self._transpose(xtr)
ytr = np.array(ytr)
path = prefix + "test_batch"
xte, yte = self._unpickle(path)
xte = self._transpose(xte)
yte = np.array(yte)
return xtr, ytr, xte, yte
def _transpose(self, x):
x = np.reshape(x, newshape=[-1, 3, 32, 32])
x = np.transpose(x, axes=(0,2,3,1))
x = np.reshape(x, newshape=[-1, 3072])
return x
def _unpickle(self, f_name):
import pickle
with open(f_name, 'rb') as fo:
dd = pickle.load(fo, encoding='bytes')
return dd[b'data'], dd[b'labels']
class MSCOCO(object):
def __init__(self, n_paired):
_train_annotations_path = '../data/mscoco/annotations/captions_train2014.json'
_val_annotations_path = '../data/mscoco/annotations/captions_val2014.json'
_train_images_dir = '../data/mscoco/train2014/'
_val_images_dir = '../data/mscoco/val2014/'
_caption_path = '../data/mscoco/captions.pickle'
_image_path = '../data/mscoco/images.pickle'
self._padding = '<PAD>'
self._oov = '<OOV>'
self._go = '<GO>'
self._eof = '<EOF>'
self._symbols = [self._oov, self._padding, self._eof, self._go]
self._inverse_vocab = None
paths = [(_train_annotations_path, _train_images_dir), (_val_annotations_path, _val_images_dir)]
if os.path.isfile(_image_path):
print("Loading images...", flush=True)
with open(_image_path, 'rb') as ff:
data = pickle.load(ff)
self._images = data['images']
self._val_images = data['val_images']
print("Images loaded.", flush=True)
else:
for j, (ann_p,im_p) in enumerate(paths):
with open(ann_p) as ff:
ann = json.load(ff)
print("Creating image dictionary..", flush=True)
images = dict() # key,value ---> image_id, image array
for k in ann['images']:
file_path = im_p + k['file_name']
im_file = pathlib_path(file_path)
if im_file.exists():
image = ndimage.imread(file_path)
image = imresize(image, size=(48, 64), interp='cubic')
if image.shape == (48, 64):
image = np.expand_dims(image, axis=2)
image = np.concatenate((image, image, image), axis=2)
image = np.reshape(image, newshape=[1, -1])
images[k['id']] = image
if j == 0: # training set
self._images = images
else: # validation set
self._val_images = images
tosave = dict()
tosave['images'] = self._images
tosave['val_images'] = self._val_images
print("Saving images...", flush=True)
with open(_image_path, 'wb') as ff:
pickle.dump(tosave, ff, pickle.HIGHEST_PROTOCOL)
print("Saved.", flush=True)
if os.path.isfile(_caption_path): # load processed data
print("Loading data...", flush=True)
with open(_caption_path, 'rb') as ff:
data = pickle.load(ff)
self._vocab = data['vocab']
self._captions = data['captions']
self._imcapt = data['imcapt']
self._val_captions = data['val_captions']
self._val_imcapt = data['val_imcapt']
self._max_seq_len = data['max_seq_len']
print("Data loaded.", flush=True)
else: # process data and load
print("Processing data..", flush=True)
self._max_seq_len = 1
for j, (ann_p,im_p) in enumerate(paths):
with open(ann_p) as ff:
ann = json.load(ff)
print("Creating caption dictionary..", flush=True)
captions = dict() # key,value ---> caption_id, word sequence
for k in ann['annotations']:
capt = k['caption']
# caption preprocessing
capt = capt.strip() # remove unnecessary whitespace
capt = capt.lower() # make lower case
capt = capt.replace('.', ' ') # remove periods
capt = capt.replace(',', ' ') # remove commas
capt = capt.replace('?', ' ') # remove question marks
capt = capt.replace('-', ' ') # remove dashes
capt = capt.replace('"', ' " ') # expand double quotes
capt = capt.replace('(', ' ( ') # expand brackets
capt = capt.replace(')', ' ) ') # expand brackets
capt = capt.replace('{', ' { ') # expand brackets
capt = capt.replace('}', ' } ') # expand brackets
capt = capt.split() # split string
capt.append(self._eof) # pad with EOF character
captions[k['id']] = capt
self._max_seq_len = max(max([len(_v) for _,_v in captions.items()]), self._max_seq_len)
print("Max sequence length: ", self._max_seq_len, flush=True)
if j == 0: # training set
print("Word frequencies", flush=True)
freqs = defaultdict(int)
for _, capt in captions.items():
for word in capt:
freqs[word] += 1
print("Adding <OOV> words", flush=True)
min_freq = 2 # minimum word frequency
for k,capt in captions.items():
for i,w in enumerate(capt):
if freqs[w] < min_freq:
if np.random.binomial(n=1, p=0.9) == 1: # 90% chance of setting <OOV>
capt[i] = self._oov
print("Creating vocabulary..", flush=True)
if j > 0: # validation set
vocab = self._vocab
else:
vocab = dict() # key,value ---> word, word_id
words = {w for _, _v in captions.items() for w in _v}
for i,w in enumerate(words):
vocab[w] = i
for s in self._symbols: # add symbols to vocab dictionary if not already there
if s not in vocab:
idx = max([v for k,v in vocab.items()]) + 1
vocab[s] = idx
print("Converting captions to ids (from vocab)..", flush=True)
for _k,_v in captions.items():
for i in range(len(_v)):
if _v[i] in vocab:
_v[i] = vocab[_v[i]]
else:
_v[i] = vocab[self._oov]
print("Creating image-caption mapping..", flush=True)
im_capt = defaultdict(set) # key,value ---> image_id, set of caption ids
for k in ann['annotations']:
im_capt[k['image_id']].add(k['id'])
if j == 0: # training set
self._captions = captions
self._vocab = vocab
self._imcapt = im_capt
else: # validation set
self._val_captions = captions
self._vocab = vocab
self._val_imcapt = im_capt
tosave = dict()
tosave['vocab'] = self._vocab
tosave['captions'] = self._captions
tosave['imcapt'] = self._imcapt
tosave['val_captions'] = self._val_captions
tosave['val_imcapt'] = self._val_imcapt
tosave['max_seq_len'] = self._max_seq_len
print("Saving data...", flush=True)
with open(_caption_path, 'wb') as ff:
pickle.dump(tosave, ff, pickle.HIGHEST_PROTOCOL)
print("Saved.", flush=True)
# lists of image ids
self.image_ids = list(self._images.keys())
self.val_image_ids = list(self._val_images.keys())
# construct pairings
_n = len(self.image_ids)
self.paired = set(np.random.choice(self.image_ids, size=n_paired, replace=False))
_remain = set(self.image_ids) - self.paired
_each_size = len(_remain) // 2
self.image_only = set(np.random.choice(list(_remain), size=_each_size, replace=False))
self.caption_only = _remain - self.image_only
self.paired = list(self.paired)
self.image_only = list(self.image_only)
self.caption_only = list(self.caption_only)
def get_max_seq_len(self):
return self._max_seq_len
def get_vocab_size(self):
return len(self._vocab)
def get_word(self, word_id):
if self._inverse_vocab is None:
self._inverse_vocab = {v: k for k, v in self._vocab.items()}
return self._inverse_vocab[word_id]
def _sample_setup(self, image_ids, train):
"""
Generate samples in matrix form based on already sampled images.
"""
if train:
imcapt = self._imcapt
captions = self._captions
images = self._images
else:
imcapt = self._val_imcapt
captions = self._val_captions
images = self._val_images
x_caption = []
x_caption_decode = []
x_image = []
seq_lens = []
for i in image_ids:
capts = imcapt[i]
capt_id = int(np.random.choice(list(capts), size=1))
caption = captions[capt_id]
seq_lens.append(len(caption)) # true sequence length
# add padding to each caption
while len(caption) < self._max_seq_len:
caption.append(self._vocab[self._padding])
x_caption.append(caption)
caption_dec = [self._vocab[self._go]] + list(caption)
x_caption_decode.append(caption_dec)
image = images[i]
x_image.append(image)
x_image = np.array(x_image) / 255
x_image = np.squeeze(x_image)
x_caption = np.array(x_caption)
x_caption_decode = np.array(x_caption_decode)
x_caption_decode = x_caption_decode[:,:-1]
seq_lens = np.array(seq_lens)
return x_image, x_caption, seq_lens, x_caption_decode
def sample_stratified(self, n_paired_samples, n_unpaired_samples=128, dtype='train'):
# note: decoder input begins with <GO> symbol
# test set case
if dtype == 'test':
ids = list(np.random.choice(self.val_image_ids, size=n_paired_samples, replace=False))
xi, xc, sl, xc_dec = self._sample_setup(ids, train=False)
return xi, xc, sl, xc_dec
# training set case
elif dtype == 'train':
n_min = 2 * n_unpaired_samples // 5
n_min = max(1, n_min)
n_max = n_unpaired_samples - n_min
n_x1 = np.random.randint(low=n_min, high=n_max + 1)
n_x2 = n_unpaired_samples - n_x1
paired_ids = list(np.random.choice(self.paired, size=n_paired_samples, replace=False))
xpi, xpc, slp, xpc_dec = self._sample_setup(paired_ids, train=True)
image_only_ids = list(np.random.choice(self.image_only, size=n_x1, replace=False))
xi, _, _, _ = self._sample_setup(image_only_ids, train=True)
caption_only_ids = list(np.random.choice(self.caption_only, size=n_x2, replace=False))
_, xc, sl, xc_dec = self._sample_setup(caption_only_ids, train=True)
return xi, xc, sl, xc_dec, xpi, xpc, slp, xpc_dec
|
from typing import Tuple
from PyQt5.QtGui import QColor
from brainframe.api.bf_codecs import Zone
from brainframe_qt.ui.resources.video_items.base import VideoItem
class AbstractZoneItem:
BORDER_COLOR = QColor(0, 255, 125)
BORDER_THICKNESS = 6
def __init__(self, zone: Zone):
self.zone = zone
@property
def zone_coords(self) -> Tuple[VideoItem.PointType]:
# noinspection PyTypeChecker
return tuple(map(tuple, self.zone.coords))
@property
def zone_is_line(self) -> bool:
return len(self.zone.coords) == 2
@property
def zone_is_region(self) -> bool:
return len(self.zone.coords) > 2
@property
def zone_name(self) -> str:
return self.zone.name
|
import datetime
import errno
import os
import uuid
import logging
import time
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import pandas as pd
from collections import namedtuple, OrderedDict
from os.path import join
from jinja2 import Environment, FileSystemLoader
from weasyprint import HTML
from .. import visualization as vis
from .. import preprocessor as pp
from .DecisionTree import DecisionTree
from .Translator import Translator
class Model:
"""Class that contains a model of the data, composed of decision trees for each label and capable of extract
patterns """
def __init__(self, file_paths, metadata=None, language="es", logger=None):
"""Initializer for Model"""
# Get logger
self.logger = logger or logging.getLogger(__name__)
# Check if is a list of files or a string
if isinstance(file_paths, (list, tuple)) or isinstance(file_paths, str):
if isinstance(file_paths, str):
file_paths = [file_paths]
else:
raise ValueError('The filepath(s) must be a string or a list of strings')
self.logger.info('Setting the translator with language "{}"'.format(language))
# Define translator functions
self._translator = Translator(language)
# Read and preprocess every data file, initializing raw and main dataset
self._process_data(file_paths)
self._hyper_dt = None
self._hypo_dt = None
self._severe_dt = None
self.logger.info('Setting the metadata')
if metadata is None:
self.metadata = dict()
else:
self.metadata = metadata
# Add initial and end dates to metadata
self.metadata["Init_Date"] = self._base_dataset.iloc[0]['Datetime']
self.metadata["End_Date"] = self._base_dataset.iloc[-1]['Datetime']
self.logger.debug('metadata: {}: '.format(str(self.metadata)))
@property
def language(self):
return self._translator.language
@language.setter
def language(self, language):
self._translator.language = language
self._hyper_dt.translator = self._translator
self._hypo_dt.translator = self._translator
self._severe_dt.translator = self._translator
def fit(self, features=None):
""" Create and fit the decision trees used to extract the patterns """
[data, labels] = pp.prepare_to_decision_trees(self._extended_dataset, features)
start_time = time.time()
self._hyper_dt = DecisionTree(data, labels["Hyperglycemia_Diagnosis_Next_Block"])
self._hypo_dt = DecisionTree(data, labels["Hypoglycemia_Diagnosis_Next_Block"])
self._severe_dt = DecisionTree(data, labels["Severe_Hyperglycemia_Diagnosis_Next_Block"])
self.logger.debug('Time ellapsed fitting the model: {:.4f}'.format(time.time() - start_time))
def generate_report(self, max_impurity=0.3, min_sample_size=0, format="pdf", to_file=True, output_path='',
block_info=True, language=None):
""" Generate a PDF report with the patterns """
if self._hyper_dt is None or self._hypo_dt is None or self._severe_dt is None:
raise NotFittedError("It is necessary to fit the model before generating the report")
env = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), '..', 'templates')))
template = env.get_template("report.html")
if "Patient_Name" in self.metadata:
title = '{0}_{1}'.format(self.metadata["Patient_Name"].replace(' ', '_'),
datetime.datetime.now().strftime("%d%m%y_%H%M"))
else:
title = 'Report_{}'.format(datetime.datetime.now().strftime("%d%m%y_%H%M"))
template_vars = {"title": title, "metadata": self.metadata}
if language is not None:
self.language = language
subtitles = self._translator.translate_to_language(['Hyperglycemia_Patterns', 'Hypoglycemia_Patterns',
'Severe_Hyperglycemia_Patterns', 'Pattern',
'Pattern_Report',
'Decision_Trees', 'Hyperglycemia', 'Hypoglycemia',
'Severe_Hyperglycemia', 'Blocks_Information',
'Day_Summary'])
terms = self._translator.translate_to_language(['Samples', 'Impurity', 'Number_Pos', 'Number_Neg'])
template_vars["pattern_title"] = subtitles[3]
template_vars["report_title"] = subtitles[4]
template_vars["decision_trees_title"] = subtitles[5]
template_vars["hyper_dt_title"] = subtitles[6]
template_vars["hypo_dt_title"] = subtitles[7]
template_vars["severe_dt_title"] = subtitles[8]
template_vars["blocks_title"] = subtitles[9]
template_vars["day_summary_title"] = subtitles[10]
template_vars["samples_title"] = terms[0]
template_vars["impurity_title"] = terms[1]
template_vars["number_pos"] = terms[2]
template_vars["number_neg"] = terms[3]
# Hyperglycemia patterns
try:
patterns = self._hyper_dt.get_patterns(max_impurity=max_impurity, min_sample_size=0)
if patterns:
template_vars["hyperglycemia_patterns_title"] = subtitles[0]
template_vars["hyperglycemia_patterns"] = patterns
except ValueError as e:
self.logger.warning("W0011: {0}. {1}".format(subtitles[0], str(e)))
self._warnings.add("W0011")
except Exception as e:
raise Exception('{0} : {1}'.format(subtitles[0], str(e)))
# Hypoglycemia patterns
try:
patterns = self._hypo_dt.get_patterns(max_impurity=max_impurity, min_sample_size=min_sample_size)
if patterns:
template_vars["hypoglycemia_patterns_title"] = subtitles[1]
template_vars["hypoglycemia_patterns"] = patterns
except ValueError as e:
self.logger.warning("W0012: {0}. {1}".format(subtitles[1], str(e)))
self._warnings.add("W0012")
except Exception as e:
raise Exception('{0} : {1}'.format(subtitles[1], str(e)))
# Severe Hyperglycemia patterns
try:
patterns = self._severe_dt.get_patterns(max_impurity=max_impurity, min_sample_size=min_sample_size)
if patterns:
template_vars["severe_hyperglycemia_patterns_title"] = subtitles[2]
template_vars["severe_hyperglycemia_patterns"] = patterns
except ValueError as e:
self.logger.warning("W0013: {0}. {1}".format(subtitles[2], str(e)))
self._warnings.add("W0012")
except Exception as e:
raise Exception('{0} : {1}'.format(subtitles[2], str(e)))
# Add warnings
if self._warnings:
warning_list = ['Warnings']
for warning in self._warnings:
warning_list.append(warning)
warning_list = self._translator.translate_to_language(warning_list)
template_vars["warnings_title"] = warning_list.pop(0)
template_vars["warnings"] = warning_list
# Generate graph images
if "UUID" in self.metadata:
uuid_str = str(self.metadata["UUID"])
elif "Patient_Name" in self.metadata:
uuid_str = str(uuid.uuid3(uuid.NAMESPACE_DNS, self.metadata["Patient_Name"]))
else:
uuid_str = str(uuid.uuid4())
output_path = join(output_path, uuid_str)
try:
os.makedirs(output_path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
hyper_dt_graph_path = join(output_path, 'Hyperglycemia_Tree.png')
hypo_dt_graph_path = join(output_path, 'Hypoglycemia_Tree.png')
severe_dt_graph_path = join(output_path, 'Severe_Hyperglycemia_Tree.png')
self._hyper_dt.graph[0].write_png(hyper_dt_graph_path)
self._hypo_dt.graph[0].write_png(hypo_dt_graph_path)
self._severe_dt.graph[0].write_png(severe_dt_graph_path)
template_vars["hyper_dt_graph_path"] = 'file:///{0}'.format(os.path.abspath(hyper_dt_graph_path))
template_vars["hypo_dt_graph_path"] = 'file:///{0}'.format(os.path.abspath(hypo_dt_graph_path))
template_vars["severe_dt_graph_path"] = 'file:///{0}'.format(os.path.abspath(severe_dt_graph_path))
if block_info:
# Generate graphics of each day
block_section_data = OrderedDict()
carbo_column = next(column_name
for column_name in self.info_blocks.columns
if column_name in ['Carbo_Block_U', 'Carbo_Block_G'])
BlockInfo = namedtuple('BlockInfo', ['block_num', 'carbo', 'rapid_insulin', 'mean', 'std', 'max', 'min'])
DayInfo = namedtuple('DayInfo', ['day', 'plot_path', 'block_data', 'mean', 'std', 'max', 'min', 'mage'])
# Iterate over the days
for day in self.info_blocks["Day_Block"].unique():
block_data = []
# Generate the plot
plot_path = vis.plot_blocks(self._base_dataset, day, self._translator, block_info=self.info_blocks,
to_file=True, output_path=output_path)
day_block_info = self.info_blocks[self.info_blocks["Day_Block"] == day]
# Iterate over the blocks
for index, block in day_block_info.iterrows():
block_data.append(BlockInfo(block["Block"], block[carbo_column],
block["Rapid_Insulin_Block"], block["Glucose_Mean_Block"],
block["Glucose_Std_Block"]
, block["Glucose_Max_Block"], block["Glucose_Min_Block"]))
block_section_data[day] = DayInfo(day, 'file:///{0}'.format(os.path.abspath(plot_path)), block_data,
day_block_info["Glucose_Mean_Day"].iloc[0],
day_block_info["Glucose_Std_Day"].iloc[0],
day_block_info["Glucose_Max_Day"].iloc[0],
day_block_info["Glucose_Min_Day"].iloc[0],
day_block_info["MAGE"].iloc[0])
# Translate labels of blocks section
template_vars["block_section_data"] = block_section_data
carbo_label = 'Carbo_{}'.format(carbo_column[-1])
block_labels = self._translator.translate_to_language(['Block', 'Mean', 'Std', 'Max', 'Min',
carbo_label, 'Rapid_Insulin', 'Glucose_Stats'])
template_vars["block_label"] = block_labels[0]
template_vars["mean_label"] = block_labels[1]
template_vars["std_label"] = block_labels[2]
template_vars["max_label"] = block_labels[3]
template_vars["min_label"] = block_labels[4]
template_vars["carbo_label"] = block_labels[5]
template_vars["rapid_insulin_label"] = block_labels[6]
template_vars["glucose_stats_label"] = block_labels[7]
day_labels = self._translator.translate_to_language(['Glucose_Mean_Day', 'Glucose_Std_Prev_Day',
'Glucose_Max_Prev_Day', 'Glucose_Min_Prev_Day',
'MAGE'])
template_vars["mean_day_label"] = day_labels[0]
template_vars["std_day_label"] = day_labels[1]
template_vars["max_day_label"] = day_labels[2]
template_vars["min_day_label"] = day_labels[3]
template_vars["mage_label"] = day_labels[4]
terms = self._translator.translate_to_language(['Samples', 'Impurity', 'Number_Pos', 'Number_Neg'])
template_vars["pattern_label"] = subtitles[3]
template_vars["report_label"] = subtitles[4]
template_vars["decision_trees_label"] = subtitles[5]
template_vars["hyper_dt_label"] = subtitles[6]
template_vars["hypo_dt_label"] = subtitles[7]
template_vars["severe_dt_label"] = subtitles[8]
template_vars["samples_label"] = terms[0]
template_vars["impurity_label"] = terms[1]
template_vars["language"] = self._translator.language
template_vars["decision_tree_legend_path"] = 'file://{0}'.format(
os.path.abspath(os.path.join(os.path.dirname(__file__), '..','templates',
'decision_tree_color_legend_{}.png'.format(language))))
html_out = template.render(template_vars)
if format == "pdf":
if to_file:
HTML(string=html_out).write_pdf(join(output_path, "{}.pdf".format(title)))
else:
result = HTML(string=html_out).write_pdf()
elif format == "html":
if to_file:
f = open("{}.html".format(title), 'w')
f.write(html_out)
f.close()
else:
result = HTML(string=html_out)
else:
raise ValueError("File format must be pdf or html")
if not to_file:
return result
def _process_data(self, file_paths):
""" Read, preprocess and join all the data files specified in file_paths
:param file_paths: List of strings containing absolute paths to the CSV files
"""
self.logger.info('Data pre-processing started')
start_time = time.time()
to_lang = self._translator.translate_to_language
to_col = self._translator.translate_to_column
self._base_dataset = pd.DataFrame()
self._extended_dataset = pd.DataFrame()
self.info_blocks = pd.DataFrame()
self._warnings = set()
for index, path in enumerate(file_paths):
# Load data
self.logger.info('Reading file {} in path {}'.format(index + 1, path))
try:
raw_data = pd.read_csv(path, header=0, skiprows=1, delimiter="\t", index_col=0,
usecols=list(range(0, 9)),
parse_dates=to_lang(["Datetime"]), decimal=",",
date_parser=lambda x: pd.to_datetime(x, format="%Y/%m/%d %H:%M"))
except Exception as e:
raise IOError("There was an error reading the data file {}: {}".format(path, e))
# Translate column names
self.logger.debug('Columns data file: {}'.format(str(raw_data.columns.values)))
raw_data.columns = (to_col(raw_data.columns))
self.logger.debug('Translated columns: {}'.format(str(raw_data.columns.values)))
# Check anomalies in the data
try:
self.logger.info('Checking period')
pp.check_period(raw_data)
except ValueError as e:
raise DataFormatException(e)
periods = pp.get_valid_periods(raw_data)
for index, period in enumerate(periods):
# Divide periods in blocks, extend dataset and clean data
self.logger.info('Defining blocks of period')
self.logger.debug('Duration of raw period {} : {}'.format(index + 1,
(period.iloc[-1]['Datetime'] - period.iloc[0]['Datetime'])))
# Check anomalies in the period
try:
self.logger.info('Checking period')
self._warnings = self._warnings.union(pp.check_period(period))
except ValueError as e:
# Discard period if it does not fulfil the requirements
self.logger.debug('Period {} discarded after checking values'.format(index + 1))
continue
block_data = pp.define_blocks(period)
self.logger.info('Adding features to dataset')
extended_data = pp.extend_data(block_data)
cleaned_extended_data = pp.clean_extended_data(extended_data)
# Discard period if it is empty after cleaning
if cleaned_extended_data.empty:
self.logger.debug('Period {} discarded after preprocessing'.format(index+1))
continue
self.logger.debug('Duration of period {} after preprocessing: {}'.format(index + 1,
(cleaned_extended_data.iloc[-1]['Datetime'] - cleaned_extended_data.iloc[0]['Datetime'])))
# block_data.to_csv(path_or_buf='block_data.csv')
# extended_data.to_csv(path_or_buf='extended_data.csv')
# cleaned_extended_data.to_csv(path_or_buf='cleaned_extended_data.csv')
# Join meal time
# Get name of carbo column
carbo_column = next(
column_name for column_name in extended_data.columns if column_name in ['Carbo_Block_U',
'Carbo_Block_G'])
info_blocks = extended_data[['Day_Block', 'Block', 'Block_Meal',
carbo_column, 'Rapid_Insulin_Block',
'Glucose_Mean_Block', 'Glucose_Std_Block', 'Glucose_Max_Block',
'Glucose_Min_Block', 'Glucose_Mean_Day', 'Glucose_Std_Day',
'Glucose_Max_Day', 'Glucose_Min_Day', 'MAGE']].drop_duplicates(
subset=['Day_Block', 'Block'])
# Append to raw_data and main dataset
self._base_dataset = self._base_dataset.append(block_data, ignore_index=True)
self._extended_dataset = self._extended_dataset.append(cleaned_extended_data, ignore_index=True)
self.info_blocks = self.info_blocks.append(info_blocks, ignore_index=True)
self.logger.info("Data file has been preprocessed and appended to main dataset")
self.logger.info('Data pre-processing finished')
self.logger.debug('Time process data: {:.4f} seconds'.format(time.time() - start_time))
class DataFormatException(ValueError):
"""Raised when the format of the data file is not the one expected"""
pass
class NotFittedError(ValueError, AttributeError):
"""Raised when the model decisions trees have not been created"""
pass
|
import discord
def error(message):
embed = discord.Embed(
color=discord.Color.from_rgb(),
thumbnail="https://github.com/yashppawar/alfred-discord-bot/blob/replit/error.png?raw=true",
message=str(message),
)
def requirements():
return ""
def main(client):
pass
|
import functools
class MatchException(Exception):
def __init__(self):
pass
class Match:
def __init__(self, matching_code_container, node_matcher, match_index=0):
self.matching_code_container = matching_code_container
self.node_matcher = node_matcher
self._match_index = match_index
def get_variables_substitution_dictionaries(self, lhs_graph, rhs_graph):
"""
Looks for sub-isomorphisms of rhs into lhs
:param lhs_graph: The graph to look sub-isomorphisms into (the bigger graph)
:param rhs_graph: The smaller graph
:return: The list of matching names
"""
if not rhs_graph:
return {}, {}, {}
self.matching_code_container.add_graph_to_namespace(lhs_graph)
self.matching_code_container.add_graph_to_namespace(rhs_graph)
return self.__collect_variables_that_match_graph(lhs_graph, rhs_graph)
def __collect_variables_that_match_graph(self, lhs_graph, rhs_graph):
match_info = {}
self._vertices_substitution_list = []
self._edges_substitution_list = []
self._is_match = False
lhs_graph.subisomorphic_vf2(other=rhs_graph,
node_compat_fn=self.__node_compare,
edge_compat_fn=self.__edge_compare,
callback=self.__callback)
if not self._is_match:
raise MatchException()
match_info['__RESULT__'] = self._is_match
max_return_length = len(self._vertices_substitution_list)
return self._vertices_substitution_list[self._match_index%max_return_length], \
self._edges_substitution_list[self._match_index%max_return_length], \
match_info
def __substitute_names_in_list(self, lst, substitution_dict):
for i, v in enumerate(lst):
name = lst[i]
try:
new_name = substitution_dict[name]
lst[i] = new_name
continue
except:
pass
return lst
@functools.lru_cache(10)
def __node_compare(self, lhs_graph, rhs_graph,
lhs_graph_index, rhs_graph_index):
lhs_attr = lhs_graph.vs[lhs_graph_index].attributes()
rhs_attr = rhs_graph.vs[rhs_graph_index].attributes()
lhs_name = lhs_attr.pop('name')
rhs_name = rhs_attr.pop('name')
if not self.matching_code_container.execute({lhs_name: rhs_name}):
return False
rhs_attr = {k: v for k, v in rhs_attr.items() if v}
if self.node_matcher.left_contains_right(rhs_attr, lhs_attr):
return True
return False
@functools.lru_cache(10)
def __edge_compare(self, lhs_graph, rhs_graph,
lhs_graph_index, rhs_graph_index):
lhs_attr = lhs_graph.es[lhs_graph_index].attributes()
rhs_attr = rhs_graph.es[rhs_graph_index].attributes()
lhs_name = lhs_attr.pop('name')
rhs_name = rhs_attr.pop('name')
if not self.matching_code_container.execute({lhs_name: rhs_name}):
return False
rhs_attr = {k: v for k, v in rhs_attr.items() if v}
if self.node_matcher.left_contains_right(rhs_attr, lhs_attr):
return True
return False
def __callback(self, lhs_graph, rhs_graph, map12, map21):
vertices_substitution_dict = {}
edges_substitution_dict = {}
if all([item == -1 for item in map12]):
return False
for lhs, rhs in enumerate(map12):
if rhs == -1:
continue
lhs_name = lhs_graph.vs[lhs]['name']
rhs_name = rhs_graph.vs[rhs]['name']
vertices_substitution_dict[rhs_name] = lhs_name
for rhs_edge in rhs_graph.es:
source_target_list = self.__substitute_names_in_list([rhs_graph.vs[rhs_edge.tuple[0]]['name'],
rhs_graph.vs[rhs_edge.tuple[1]]['name']],
vertices_substitution_dict)
source_index = lhs_graph.vs.select(name=source_target_list[0])[0].index
target_index = lhs_graph.vs.select(name=source_target_list[1])[0].index
lhs_edges = lhs_graph.es.select(_source=source_index, _target=target_index)
for lhs_edge in lhs_edges:
lhs_name = lhs_edge['name']
rhs_name = rhs_edge['name']
edges_substitution_dict[rhs_name] = lhs_name
self._is_match = True
self._vertices_substitution_list.append(vertices_substitution_dict)
self._edges_substitution_list.append(edges_substitution_dict)
return True
|
import os
from django.conf import settings
from email.mime.image import MIMEImage
from datetime import date
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
def send_thanks_email(first_name, last_name, email, donation):
strFrom = settings.EMAIL_FROM
ctx = {
'first_name': first_name,
'last_name': last_name,
'email_address': email,
'donation': str(donation),
'date': date.today()}
html_content = render_to_string('mail.html', context=ctx).strip()
subject = 'Thank you for your donation to Funsepa!'
recipients = [email]
reply_to = [settings.EMAIL_REPLY_TO]
msg = EmailMultiAlternatives(subject, html_content, strFrom, recipients, reply_to=reply_to)
msg.content_subtype = 'html'
msg.mixed_subtype = 'related'
image_list = [
'logo.jpg',
'foto.jpg',
'fb.png',
'ig.png',
'link.png',
'tw.png',
'yt.png',
'snap.png']
for img in image_list:
fp = open(os.path.join(settings.BASE_DIR, 'pagos/static/img/email/' + img), 'rb')
image = MIMEImage(fp.read())
image.add_header('Content-ID', '<{}>'.format(os.path.basename(fp.name)))
msg.attach(image)
fp.close()
msg.send()
|
from math import log, pow
import numpy as np
from scipy import stats
def shannon_entropy(p):
return stats.entropy(p)
def kl_divergence(p, q):
"""Standard KL divergence."""
return stats.entropy(p, q)
def q_divergence(q):
"""Returns the q-divergence function corresponding to the parameter value q."""
if q == 0:
def d(x, y):
return 0.5 * np.dot((x - y), (x - y))
return d
if q == 1:
return kl_divergence
if q == 2:
def d(x,y):
s = 0.
for i in range(len(x)):
s += log(x[i] / y[i]) + 1 - x[i] / y[i]
return -s
return d
q = float(q)
def d(x, y):
s = 0.
for i in range(len(x)):
s += (pow(y[i], 2 - q) - pow(x[i], 2 - q)) / (2 - q)
s -= pow(y[i], 1 - q) * (y[i] - x[i])
s = -s / (1 - q)
return s
return d
|
import re
from .. import slack_manager
@slack_manager.on('app_mention')
async def reply_message(sender, data, **extra):
event = data['event']
if re.search(r'\blife\b', event['text'], re.I):
text = 'Life, don\'t talk to me about life'
else:
text = f":robot_face: knock, knock, knock, <@{event['user']}>"
sender.api_call(
'chat.postMessage',
channel=event['channel'],
thread_ts=event['ts'],
text=text)
|
termo = int(input('Primeiro Termo: '))
razao = int(input('Razão: '))
decimo = 0
while decimo != 10:
print(termo, end=' ')
termo += razao
decimo += 1
|
"""
Trigger an automation when a LiteJet switch is released.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/automation.litejet/
"""
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import CONF_PLATFORM
import homeassistant.helpers.config_validation as cv
DEPENDENCIES = ['litejet']
_LOGGER = logging.getLogger(__name__)
CONF_NUMBER = 'number'
TRIGGER_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): 'litejet',
vol.Required(CONF_NUMBER): cv.positive_int
})
def async_trigger(hass, config, action):
"""Listen for events based on configuration."""
number = config.get(CONF_NUMBER)
@callback
def call_action():
"""Call action with right context."""
hass.async_run_job(action, {
'trigger': {
CONF_PLATFORM: 'litejet',
CONF_NUMBER: number
},
})
hass.data['litejet_system'].on_switch_released(number, call_action)
|
import socket, hashlib, zlib, json, time, struct, traceback
from cryptography.fernet import Fernet
from DataModel import DataModel
class RestfulApiClient(object):
def __init__(self, addr = '127.0.0.1', port = 32002):
self.addr, self.port, self.cookie = addr, port, None
def Login(self, usr, pwd):
try:
return self.sendAndWaitRsp({"login":{"usr":usr, "pwd":pwd}})
except Exception as e:
traceback.print_exc()
return None
def QueryPms(self):
try:
return self.sendAndWaitRsp({"query_pms":{"cookie":DataModel.Instance().GetCookie()}})
except Exception as e:
print(e)
return None
def QueryMi(self, prj, module):
try:
return self.sendAndWaitRsp({"query_mi":{"cookie":DataModel.Instance().GetCookie(), "project":prj, "module":module}})
except Exception as e:
traceback.print_exc()
return None
def QueryUpdateDetail(self, prj, module, ver):
try:
return self.sendAndWaitRsp({"query_update_detail":{"cookie":DataModel.Instance().GetCookie(), "project":prj, "module":module, "version":ver}})
except Exception as e:
traceback.print_exc()
def QueryServerInfo(self, rids):
try:
return self.sendAndWaitRsp({"query_server":{"cookie":DataModel.Instance().GetCookie(), "rids":rids}})
except Exception as e:
traceback.print_exc()
def UpdateServerInfo(self, si):
return self.sendAndWaitRsp({"update_server":{"cookie":DataModel.Instance().GetCookie(),"si":si}})
def DeleteServers(self, rids):
return self.sendAndWaitRsp({"delete_servers":{"cookie":DataModel.Instance().GetCookie(),"rids":rids}})
def Publish(self, prj, module, ver, gids, detail, code, hash, url):
return self.sendAndWaitRsp({"publish":{"cookie":DataModel.Instance().GetCookie(), "project":prj, "module":module, "version":ver, "gids":gids, "detail":detail, "code":code, "hash":hash, "url":url}})
def sendAndWaitRsp(self, data):
try:
buf = json.dumps(data).encode()
hash = zlib.crc32(buf, 0x31)
enKey = Fernet.generate_key()
enBuf = Fernet(enKey).encrypt(buf)
enHash = zlib.crc32(enBuf, 0x4f)
with socket.socket() as s:
s.connect((self.addr, self.port))
s.sendall(struct.pack("<5I", len(enBuf), 1, enHash, hash, len(enKey)) + enKey + enBuf)
data = self.__Read(s, 20) # header
if data is None:return
length, ver, enHash, hash, leEnKey = struct.unpack("<5I", data)
data = self.__Read(s, length + leEnKey)
if data is None:return
enKey = data[:leEnKey]
data = data[leEnKey:]
if enHash != zlib.crc32(data, 0x32):return None
deData = Fernet(enKey).decrypt(data)
if hash != zlib.crc32(deData, 0x50):return None
return json.loads(deData.decode())
except Exception as e:
traceback.print_exc()
return None
def __Read(self, s, length):
try:
data, tempdata = b'', b''
while len(data) < length:
tempdata = s.recv(length - len(data))
if len(tempdata) < 1: return None
data += tempdata
return data
except Exception as e:
traceback.print_exc()
return None |
#Memory Reallocation
#Advent of Code 2017 Day 6b
import numpy as np
input = "4 1 15 12 0 9 9 5 5 8 7 3 14 5 12 3"
banks = input.split("\t")
banks = [ int(x) for x in banks ]
#banks = [0, 2, 7, 0]
steps = 0
previous = []
matched = False
while matched == False:
#store current config in history
previous.append( np.copy( banks ) )
#Find the biggest bank
biggest = 0
for x in range( 0, len(banks) ):
if( banks[x] > banks[biggest] ):
biggest = x
#Take blocks out of biggest
toGive = banks[biggest]
banks[biggest]=0
current = biggest
#redistribute
while toGive > 0 :
current += 1
if( current >= len( banks ) ):
current = 0
banks[current] += 1
toGive -=1
steps += 1
#check for previous matches
#for bank in previous:
for x in range( 0,len(previous) ):
if np.array_equal( previous[x], banks ):
print( str(previous[x]) + "..." + str(banks) )
distance = len(previous) - x
matched = True
print("Finished: " + str(steps) )
#for( x in range( 0, len(previous) ) )
# if np.array
print("Steps in loop: " + str(distance) )
|
n1 = int(input('Primeiro valor: '))
n2 = int(input('Segundo valor: '))
print(f'A soma entre {n1} e {n2} é igual a {n1 + n2}')
|
# Generated by Django 2.1.7 on 2020-10-25 09:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0013_auto_20201023_1513'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='rank_id_list',
),
]
|
from django.contrib.auth import logout
from django.http import HttpResponse
from django.shortcuts import render # noqa
from django.views.generic import View
from rest_framework import viewsets
from rest_framework.authtoken.models import Token
from users.models import User
from users.serializers import UserSerializer
class HomeView(View):
def get(self, request, *args, **kwargs):
response = render(request, 'repositories/index.html')
if request.user.is_authenticated:
token, _ = Token.objects.get_or_create(user=request.user)
response.set_cookie(key='rfeedtoken', value=token.key)
return response
response.delete_cookie('rfeedtoken')
return response
class LogoutView(View):
def post(self, request, *args, **kwargs):
logout(request)
return HttpResponse(status=200)
class UserViewSet(viewsets.ReadOnlyModelViewSet): # pylint: disable=too-many-ancestors
"""
API endpoint that allows users to be viewed.
"""
queryset = User.objects.all()
serializer_class = UserSerializer
|
import boto3
from .buckets import get_s3_buckets
def search_buckets(key, search_key="", search_bucket=""):
session = boto3.Session(aws_access_key_id=key.key,
aws_secret_access_key=key.secret)
list_files = []
for bucket in get_s3_buckets(session):
if not search_bucket or search_bucket == bucket['Name']:
client = session.client('s3')
for object in client.list_objects(Bucket=bucket['Name'])['Contents']:
if object['Key'].split("/")[-1].find(search_key) != -1:
object['Bucket'] = bucket['Name']
list_files.append(object)
return { "result": list_files }
|
#!/usr/bin/python3.4
# -*- coding=utf-8 -*-
#本脚由亁颐堂现任明教教主编写,用于乾颐盾Python课程!
#教主QQ:605658506
#亁颐堂官网www.qytang.com
#乾颐盾是由亁颐堂现任明教教主开发的综合性安全课程
#包括传统网络安全(防火墙,IPS...)与Python语言和黑客渗透课程!
import sys
sys.path.append('/usr/local/lib/python3.4/dist-packages/PyQYT/ExtentionPackages')
sys.path.append('/usr/lib/python3.4/site-packages/PyQYT/ExtentionPackages')
sys.path.append('../../ExtentionPackages')
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)#清除报错
from scapy.all import *
#from GET_IP import get_ip_address #获取本机IP地址
from PyQYT.Network.Tools.GET_IP import get_ip_address #获取本机IP地址
from PyQYT.Network.Tools.GET_MAC import get_mac_address #获取本机MAC地址
import optparse
#test github
#获取指定IP的MAC地址,要指定发送ARP请求的接口
def get_arp(ip_address, ifname):
#localip = get_ip_address(ifname)
#获取本机IP地址
localip = get_ip_address(ifname)
#获取本机MAC地址
localmac = get_mac_address(ifname)
#发送ARP请求并等待响应
result_raw = srp(Ether(src=localmac, dst='FF:FF:FF:FF:FF:FF')/ARP(op=1, hwsrc=localmac, hwdst='00:00:00:00:00:00', psrc=localip, pdst=ip_address), iface = ifname, verbose = False)
#把响应的数据包对,产生为清单
result_list = result_raw[0].res
#[0]第一组响应数据包
#[1]接受到的包,[0]为发送的数据包
#[1]ARP头部字段中的['hwsrc']字段,作为返回值返回
return result_list[0][1][1].fields['hwsrc']
if __name__ == "__main__":
parser = optparse.OptionParser('用法:\n python3 ARP_def.py --ip 目标IP --ifname 本地接口名')
parser.add_option('--ip', dest = 'ip', type = 'string', help = '指定要查询的目标IP')
parser.add_option('--ifname', dest = 'ifname', type = 'string', help = '指定发送ARP请求的本地接口名')
(options, args) = parser.parse_args()
ip = options.ip
ifname = options.ifname
if ip == None or ifname == None:
print(parser.usage)
else:
print('IP地址: ' + ip + ' MAC地址: ' + get_arp(ip, ifname))
|
# jesli zduplikowana linijka moze wystapic na koncu pliku, to musi miec "enter" np. # noqa
# "jezeli\r\n
# jezeli" to nie to samo, ale
# "jezeli\r\n
# jezeli\r\n" jest duplikatem; uzyc regexu do dodania \r\n
import os
sciezka = input("Enter the path: ")
for subdir, dirs, files in os.walk(sciezka):
for file in files:
if file == "opis.txt":
lines_seen = set() # holds lines already seen
path = os.path.join(subdir, file)
target = "opis2.txt"
new_file = os.path.join(subdir, target)
with open(path, "r") as f:
with open(new_file, "w") as outfile:
for line in f:
if line not in lines_seen: # not a duplicate
outfile.write(line)
lines_seen.add(line)
outfile.close()
lines_seen = None
|
"""
Programa 084
Área de estudos.
data 30.11.2020 (Indefinida) Hs
@Autor: Abraão A. Silva
"""
# Biblioteca que controla o tempo.
import time
# Cabeçalho da urna.
print('Eleições Presidenciais'.center(35)+'\n',
'-'*30+'\n',
'João Pé de Feijão'+'_'*10+'1'.rjust(3)+'\n',
'Inácio Pelota'+'_'*14+'2'.rjust(3)+'\n',
'Igor Ferradura'+'_'*13+'3'.rjust(3)+'\n',
'Jonas Vai e Vem'+'_'*12+'4'.rjust(3)+'\n',
'-'*30+'\n',
)
# Inicialização das variaveis.
brancos = nulos = total_votos = 0
votos_1 = votos_2 = votos_3 = votos_4 = 0
while True:
voto = int(input('Voto.: ')) # Coleta do voto.
# Ponto de parada do loop.
if voto == 1545:
print('Finalizando urna...')
time.sleep(3)
break
total_votos += 1
if voto == 1:
votos_1 += 1
elif voto == 2:
votos_2 += 1
elif voto == 3:
votos_3 += 1
elif voto == 4:
votos_4 += 1
elif voto == 5:
brancos += 1
elif voto == 6:
nulos += 1
else:
print('Candidato inválido.')
total_votos -= 1
# Calculos dos percentuais.
percentagem_nulos = (nulos * 100) / total_votos
percentagem_brancos = (brancos * 100) / total_votos
# Relatório do programa.
print()
print('Candidato'.rjust(13)+'Votos'.rjust(20))
print('\n',
'-'*32+'\n',
'João Pé de Feijão'+f'{votos_1}'.rjust(13)+'\n',
'Inácio Pelota'+f'{votos_2}'.rjust(17)+'\n',
'Igor Ferradura'+f'{votos_3}'.rjust(16)+'\n',
'Jonas Vai e Vem'+f'{votos_4}'.rjust(15)+'\n',
'-'*32+'\n',
)
print('\n',
'Brancos'+f'{brancos}'.rjust(13)+'\n',
'Nulos'+f'{nulos}'.rjust(15)+'\n',
)
print('\n',
'Percentual Brancos'+f'{percentagem_brancos:.1f}'.rjust(15)+'\n',
'Percentual Nulos'+f'{percentagem_nulos:.1f}'.rjust(17)+'\n',
)
|
# Copyright (c) 2014 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from trove.datastore import models as datastore_models
from trove.datastore.models import Capability
from trove.datastore.models import Datastore
from trove.datastore.models import DatastoreVersion
from trove.datastore.models import DatastoreVersionMetadata
from trove.datastore.models import DBCapabilityOverrides
from trove.tests.unittests import trove_testtools
from trove.tests.unittests.util import util
class TestDatastoreBase(trove_testtools.TestCase):
def setUp(self):
# Basic setup and mock/fake structures for testing only
super(TestDatastoreBase, self).setUp()
util.init_db()
self.rand_id = str(uuid.uuid4())
self.ds_name = "my-test-datastore" + self.rand_id
self.ds_version = "my-test-version" + self.rand_id
self.capability_name = "root_on_create" + self.rand_id
self.capability_desc = "Enables root on create"
self.capability_enabled = True
self.datastore_version_id = str(uuid.uuid4())
self.flavor_id = 1
self.volume_type = 'some-valid-volume-type'
datastore_models.update_datastore(self.ds_name, False)
self.datastore = Datastore.load(self.ds_name)
datastore_models.update_datastore_version(
self.ds_name, self.ds_version, "mysql", "", "", True)
DatastoreVersionMetadata.add_datastore_version_flavor_association(
self.ds_name, self.ds_version, [self.flavor_id])
DatastoreVersionMetadata.add_datastore_version_volume_type_association(
self.ds_name, self.ds_version, [self.volume_type])
self.datastore_version = DatastoreVersion.load(self.datastore,
self.ds_version)
self.test_id = self.datastore_version.id
self.cap1 = Capability.create(self.capability_name,
self.capability_desc, True)
self.cap2 = Capability.create("require_volume" + self.rand_id,
"Require external volume", True)
self.cap3 = Capability.create("test_capability" + self.rand_id,
"Test capability", False)
def tearDown(self):
super(TestDatastoreBase, self).tearDown()
capabilities_overridden = DBCapabilityOverrides.find_all(
datastore_version_id=self.datastore_version.id).all()
for ce in capabilities_overridden:
ce.delete()
self.cap1.delete()
self.cap2.delete()
self.cap3.delete()
datastore = datastore_models.Datastore.load(self.ds_name)
ds_version = datastore_models.DatastoreVersion.load(datastore,
self.ds_version)
datastore_models.DBDatastoreVersionMetadata.find_by(
datastore_version_id=ds_version.id).delete()
Datastore.load(self.ds_name).delete()
def capability_name_filter(self, capabilities):
new_capabilities = []
for capability in capabilities:
if self.rand_id in capability.name:
new_capabilities.append(capability)
return new_capabilities
|
from rest_framework import serializers
from .models import Archive
from grandchallenge.cases.serializers import ImageSerializer
class ArchiveSerializer(serializers.ModelSerializer):
images = ImageSerializer(read_only=True, many=True)
class Meta:
model = Archive
fields = ("id", "name", "images")
|
import copy
import numbers
import os.path
import warnings
from collections import namedtuple
import audioread
import librosa
import numpy as np
import scipy.io.wavfile as wav
import scipy
from scipy.signal import check_COLA
import soundfile as sf
import pyloudnorm
from . import constants
from . import utils
from . import masks
from . import effects
__all__ = ['AudioSignal', 'STFTParams', 'AudioSignalException']
STFTParams = namedtuple('STFTParams',
['window_length', 'hop_length', 'window_type']
)
STFTParams.__new__.__defaults__ = (None,) * len(STFTParams._fields)
"""
STFTParams object is a container that holds STFT parameters - window_length,
hop_length, and window_type. Not all parameters need to be specified. Ones that
are not specified will be inferred by the AudioSignal parameters and the settings
in `nussl.core.constants`.
"""
class AudioSignal(object):
"""
**Overview**
:class:`AudioSignal` is the main entry and exit point for all source separation algorithms
in *nussl*. The :class:`AudioSignal` class is a general container for all things related to
audio data. It contains utilities for:
* Input and output from an array or from a file,
* Time-series and frequency domain manipulation,
* Plotting and visualizing,
* Playing audio within a terminal or jupyter notebook,
* Applying a mask to estimate signals
and more. The :class:`AudioSignal` class is used in all source separation objects in *nussl*.
:class:`AudioSignal` object stores time-series audio data as a 2D ``numpy`` array in
:attr:`audio_data` (see :attr:`audio_data` for details) and stores Short-Time Fourier Transform
data as 3D ``numpy`` array in :ref:`stft_data` (see :attr:`stft_data` for details).
**Initialization**
There are a few options for initializing an :class:`AudioSignal` object. The first is to
initialize an empty :class:`AudioSignal` object, with no parameters:
>>> import nussl
>>> signal = nussl.AudioSignal()
In this case, there is no data stored in :attr:`audio_data` or in :attr:`stft_data`, though
these attributes can be updated at any time after the object has been created.
Additionally, an :class:`AudioSignal` object can be loaded with exactly one of the following:
1. A path to an input audio file (see :func:`load_audio_from_file` for details).
2. A `numpy` array of 1D or 2D real-valued time-series audio data.
3. A `numpy` array of 2D or 3D complex-valued time-frequency STFT data.
:class:`AudioSignal` will throw an error if it is initialized with more than one of the
previous at once.
Here are examples of all three of these cases:
.. code-block:: python
:linenos:
import numpy as np
import nussl
# Initializing an empty AudioSignal object:
sig_empty = nussl.AudioSignal()
# Initializing from a path:
file_path = 'my/awesome/mixture.wav'
sig_path = nussl.AudioSignal(file_path)
# Initializing with a 1D or 2D numpy array containing audio data:
aud_1d = np.sin(np.linspace(0.0, 1.0, 48000))
sig_1d = nussl.AudioSignal(audio_data_array=aud_1d, sample_rate=48000)
# FYI: The shape doesn't matter, nussl will correct for it
aud_2d = np.array([aud_1d, -2 * aud_1d])
sig_2d = nussl.AudioSignal(audio_data_array=aud_2d)
# Initializing with a 2D or 3D numpy array containing STFT data:
stft_2d = np.random.rand((513, 300)) + 1j * np.random.rand((513, 300))
sig_stft_2d = nussl.AudioSignal(stft=stft_2d)
# Two channels of STFT data:
stft_3d = nussl.utils.complex_randn((513, 300, 2))
sig_stft_3d = nussl.AudioSignal(stft=stft_3d)
# Initializing with more than one of the above methods will raise an exception:
sig_exception = nussl.AudioSignal(audio_data_array=aud_2d, stft=stft_2d)
When initializing from a path, :class:`AudioSignal` can read many types of audio files,
provided that your computer has the backends installed to understand the corresponding codecs.
*nussl* uses ``librosa``'s `load` function to read in audio data. See librosa's documentation
for details: https://github.com/librosa/librosa#audioread
Once initialized with a single type of data (time-series or time-frequency), there are methods
to compute an STFT from time-series data (:func:`stft`) and vice versa (:func:`istft`).
**Sample Rate**
The sample rate of an :class:`AudioSignal` object is set upon initialization. If initializing
from a path, the sample rate of the :class:`AudioSignal` object inherits the native sample
rate from the file. If initialized with an audio or stft data array, the sample rate is passed
in as an optional argument. In these cases, with no sample rate explicitly defined, the default
sample rate is 44.1 kHz (CD quality). If this argument is provided when reading from a file
and the provided sample rate does not match the native sample rate of the file,
:class:`AudioSignal` will resample the data from the file so that it matches the provided
sample rate.
Notes:
There is no guarantee that data in :attr:`audio_data` corresponds to data in
:attr:`stft_data`. E.g., when an :class:`AudioSignal` object is initialized with
:attr:`audio_data` of an audio mixture, its :attr:`stft_data` is ``None`` until :func:`stft`
is called. Once :func:`stft` is called and a mask is applied to :attr:`stft_data` (via some
algorithm), the :attr:`audio_data` in this :class:`AudioSignal` object still contains data
from the original mixture that it was initialized with even though :attr:`stft_data`
contains altered data. (To hear the results, simply call :func:`istft` on the
:class:`AudioSignal` object.) It is up to the user to keep track of the contents of
:attr:`audio_data` and :attr:`stft_data`.
See Also:
For a walk-through of AudioSignal features, see :ref:`audio_signal_basics` and
:ref:`audio_signal_stft`.
Arguments:
path_to_input_file (``str``): Path to an input file to load upon initialization. Audio
gets loaded into :attr:`audio_data`.
audio_data_array (:obj:`np.ndarray`): 1D or 2D numpy array containing a real-valued,
time-series representation of the audio.
stft (:obj:`np.ndarray`): 2D or 3D numpy array containing pre-computed complex-valued STFT
data.
label (``str``): A label for this :class:`AudioSignal` object.
offset (``float``): Starting point of the section to be extracted (in seconds) if
initializing from a file.
duration (``float``): Length of the signal to read from the file (in seconds). Defaults to
full length of the signal (i.e., ``None``).
sample_rate (``int``): Sampling rate of this :class:`AudioSignal` object.
Attributes:
path_to_input_file (``str``): Path to the input file. ``None`` if this AudioSignal never
loaded a file, i.e., initialized with a ``np.ndarray``.
label (``str``): A user-definable label for this :class:`AudioSignal` object.
applied_effects (``list`` of ``effects.FilterFunction``): Effects applied to this
:class:`AudioSignal` object. For more information, see apply_effects.
effects_chain (``list`` of ``effects.FilterFunction``): Effects queues to be applied to
this :class:`AudioSignal` object. For more information, see apply_effects.
"""
def __init__(self, path_to_input_file=None, audio_data_array=None, stft=None, label=None,
sample_rate=None, stft_params=None, offset=0, duration=None):
self.path_to_input_file = path_to_input_file
self._audio_data = None
self.original_signal_length = None
self._stft_data = None
self._sample_rate = None
self._active_start = None
self._active_end = None
self.label = label
# Assert that this object was only initialized in one way
got_path = path_to_input_file is not None
got_audio_array = audio_data_array is not None
got_stft = stft is not None
init_inputs = np.array([got_path, got_audio_array, got_stft])
# noinspection PyPep8
if len(init_inputs[init_inputs == True]) > 1: # ignore inspection for clarity
raise AudioSignalException('Can only initialize AudioSignal object with one and only '
'one of {path, audio, stft}!')
if path_to_input_file is not None:
self.load_audio_from_file(self.path_to_input_file, offset, duration, sample_rate)
elif audio_data_array is not None:
self.load_audio_from_array(audio_data_array, sample_rate)
if self._sample_rate is None:
self._sample_rate = constants.DEFAULT_SAMPLE_RATE \
if sample_rate is None else sample_rate
self.stft_data = stft # complex spectrogram data
self.stft_params = stft_params
# Effects
self._effects_chain = []
self._effects_applied = []
def __str__(self):
dur = f'{self.signal_duration:0.3f}' if self.signal_duration else '[unknown]'
return (
f"{self.__class__.__name__} "
f"({self.label if self.label else 'unlabeled'}): "
f"{dur} sec @ "
f"{self.path_to_input_file if self.path_to_input_file else 'path unknown'}, "
f"{self.sample_rate if self.sample_rate else '[unknown]'} Hz, "
f"{self.num_channels if self.num_channels else '[unknown]'} ch."
)
##################################################
# Properties
##################################################
@property
def signal_length(self):
"""
``int``
Number of samples in the active region of :attr:`audio_data`.
The length of the audio signal represented by this object in samples.
See Also:
* :func:`signal_duration` for the signal duration in seconds.
* :func:`set_active_region_to_default` for information about active regions.
"""
if self.audio_data is None:
return self.original_signal_length
return self.audio_data.shape[constants.LEN_INDEX]
@property
def signal_duration(self):
"""
``float``
Duration of the active region of :attr:`audio_data` in seconds.
The length of the audio signal represented by this object in seconds.
See Also:
* :func:`signal_length` for the signal length in samples.
* :func:`set_active_region_to_default` for information about active regions.
"""
if self.signal_length is None:
return None
return self.signal_length / self.sample_rate
@property
def num_channels(self):
"""
``int``
Number of channels this :class:`AudioSignal` has.
Defaults to returning number of channels in :attr:`audio_data`. If that is ``None``,
returns number of channels in :attr:`stft_data`. If both are ``None`` then returns
``None``.
See Also:
* :func:`is_mono`
* :func:`is_stereo`
"""
# TODO: what about a mismatch between audio_data and stft_data??
if self.audio_data is not None:
return self.audio_data.shape[constants.CHAN_INDEX]
if self.stft_data is not None:
return self.stft_data.shape[constants.STFT_CHAN_INDEX]
return None
@property
def is_mono(self):
"""
``bool``
Whether or not this signal is mono (i.e., has exactly **one** channel). First
looks at :attr:`audio_data`, then (if that's ``None``) looks at :attr:`stft_data`.
See Also:
* :func:`num_channels`
* :func:`is_stereo`
"""
return self.num_channels == 1
@property
def is_stereo(self):
"""
``bool``
Whether or not this signal is stereo (i.e., has exactly **two** channels). First
looks at :attr:`audio_data`, then (if that's ``None``) looks at :attr:`stft_data`.
See Also:
* :func:`num_channels`
* :func:`is_mono`
"""
return self.num_channels == 2
@property
def audio_data(self):
"""
``np.ndarray``
Stored as a ``numpy`` :obj:`np.ndarray`, :attr:`audio_data` houses the raw, uncompressed
time-domain audio data in the :class:`AudioSignal`. Audio data is stored with shape
``(n_channels, n_samples)`` as an array of floats.
``None`` by default, can be initialized upon object instantiation or set at any time by
accessing this attribute or calling :func:`load_audio_from_array`. It is recommended to
set :attr:`audio_data` by using :func:`load_audio_from_array` if this
:class:`AudioSignal` has been initialized without any audio or STFT data.
Raises:
:class:`AudioSignalException`
If set incorrectly, will raise an error. Expects a real, finite-valued 1D or 2D
``numpy`` :obj:`np.ndarray`-typed array.
Warnings:
:attr:`audio_data` and :attr:`stft_data` are not automatically synchronized, meaning
that if one of them is changed, those changes are not instantly reflected in the other.
To propagate changes, either call :func:`stft` or :func:`istft`.
Notes:
* This attribute only returns values within the active region. For more information
see :func:`set_active_region_to_default`. When setting this attribute, the active
region are reset to default.
* If :attr:`audio_data` is set with an improperly transposed array, it will
automatically transpose it so that it is set the expected way. A warning will be
displayed on the console.
See Also:
* :func:`load_audio_from_file` to load audio into :attr:`audio_data` after
initialization.
* :func:`load_audio_from_array` to safely load audio into :attr:`audio_data` after
initialization.
* :func:`set_active_region_to_default` for more information about the active region.
* :attr:`signal_duration` and :attr:`signal_length` for length of audio data in seconds
and samples, respectively.
* :func:`stft` to calculate an STFT from this data,
and :func:`istft` to calculate the inverse STFT and put it in :attr:`audio_data`.
* :func:`plot_time_domain` to create a plot of audio data stored in this attribute.
* :func:`peak_normalize` to apply gain such that to the absolute max value is exactly
``1.0``.
* :func:`rms` to calculate the root-mean-square of :attr:`audio_data`
* :func:`apply_gain` to apply a gain.
* :func:`get_channel` to safely retrieve a single channel in :attr:`audio_data`.
"""
if self._audio_data is None:
return None
start = 0
end = self._audio_data.shape[constants.LEN_INDEX]
if self._active_end is not None and self._active_end < end:
end = self._active_end
if self._active_start is not None and self._active_start > 0:
start = self._active_start
return self._audio_data[:, start:end]
@audio_data.setter
def audio_data(self, value):
if value is None:
self._audio_data = None
return
elif not isinstance(value, np.ndarray):
raise AudioSignalException('Type of self.audio_data must be of type np.ndarray!')
if not np.isfinite(value).all():
raise AudioSignalException('Not all values of audio_data are finite!')
if value.ndim > 1 and value.shape[constants.CHAN_INDEX] > value.shape[constants.LEN_INDEX]:
value = value.T
if value.ndim > 2:
raise AudioSignalException('self.audio_data cannot have more than 2 dimensions!')
if value.ndim < 2:
value = np.expand_dims(value, axis=constants.CHAN_INDEX)
self._audio_data = value
self.set_active_region_to_default()
@property
def stft_data(self):
"""
``np.ndarray``
Stored as a ``numpy`` :obj:`np.ndarray`, :attr:`stft_data` houses complex-valued data
computed from a Short-time Fourier Transform (STFT) of audio data in the
:class:`AudioSignal`. ``None`` by default, this :class:`AudioSignal` object can be
initialized with STFT data upon initialization or it can be set at any time.
The STFT data is stored with shape ``(n_frequency_bins, n_hops, n_channels)`` as
a complex-valued ``numpy`` array.
Raises:
:class:`AudioSignalException`
if set with an :obj:`np.ndarray` with one dimension or more than three dimensions.
See Also:
* :func:`stft` to calculate an STFT from :attr:`audio_data`, and :func:`istft` to
calculate the inverse STFT from this attribute and put it in :attr:`audio_data`.
* :func:`magnitude_spectrogram` to calculate and get the magnitude spectrogram from
:attr:`stft_data`. :func:`power_spectrogram` to calculate and get the power
spectrogram from :attr:`stft_data`.
* :func:`get_stft_channel` to safely get a specific channel in :attr:`stft_data`.
Notes:
* :attr:`audio_data` and :attr:`stft_data` are not automatically synchronized, meaning
that if one of them is changed, those changes are not instantly reflected in the other.
To propagate changes, either call :func:`stft` or :func:`istft`.
* :attr:`stft_data` will expand a two dimensional array so that it has the expected
shape `(n_frequency_bins, n_hops, n_channels)`.
"""
return self._stft_data
@stft_data.setter
def stft_data(self, value):
if value is None:
self._stft_data = None
return
elif not isinstance(value, np.ndarray):
raise AudioSignalException('Type of self.stft_data must be of type np.ndarray!')
if value.ndim == 1:
raise AudioSignalException('Cannot support arrays with less than 2 dimensions!')
if value.ndim == 2:
value = np.expand_dims(value, axis=constants.STFT_CHAN_INDEX)
if value.ndim > 3:
raise AudioSignalException('Cannot support arrays with more than 3 dimensions!')
if not np.iscomplexobj(value):
warnings.warn('Initializing STFT with data that is non-complex. '
'This might lead to weird results!')
self._stft_data = value
@property
def stft_params(self):
"""
``STFTParams``
STFT parameters are kept in this property. STFT parameters are a ``namedtuple``
called ``STFTParams`` with the following signature:
.. code-block:: python
STFTParams(
window_length=2048,
hop_length=512,
window_type='hann'
)
The defaults are 32ms windows, 8ms hop, and a hann window.
"""
return self._stft_params
@stft_params.setter
def stft_params(self, value):
if value and not isinstance(value, STFTParams):
raise ValueError("stft_params must be of type STFTParams or None!")
default_win_len = int(
2 ** (np.ceil(np.log2(constants.DEFAULT_WIN_LEN_PARAM * self.sample_rate)))
)
default_hop_len = default_win_len // 4
default_win_type = constants.WINDOW_DEFAULT
default_stft_params = STFTParams(
window_length=default_win_len,
hop_length=default_hop_len,
window_type=default_win_type
)._asdict()
value = value._asdict() if value else default_stft_params
for key in default_stft_params:
if value[key] is None:
value[key] = default_stft_params[key]
self._stft_params = STFTParams(**value)
if self._stft_params.window_type == 'sqrt_hann':
window_type = constants.WINDOW_HANN
else:
window_type = self._stft_params.window_type
check_COLA(window_type, self._stft_params.window_length, self._stft_params.hop_length)
@property
def has_data(self):
"""
``bool``
Returns ``False`` if :attr:`audio_data` and :attr:`stft_data` are empty. Else,
returns ``True``.
"""
has_audio_data = self.audio_data is not None and self.audio_data.size != 0
has_stft_data = self.stft_data is not None and self.stft_data.size != 0
return has_audio_data or has_stft_data
@property
def file_name(self):
"""
``str``
The name of the file associated with this object. Includes extension, but not the full
path.
Notes:
This will return ``None`` if this :class:`AudioSignal` object was not
loaded from a file.
See Also:
:attr:`path_to_input_file` for the full path.
"""
if self.path_to_input_file is not None:
return os.path.basename(self.path_to_input_file)
return None
@property
def sample_rate(self):
"""
``int``
Sample rate associated with this object. If audio was read from a file, the sample
rate will be set to the sample rate associated with the file. If this object was
initialized from an array then the sample rate is set upon init. This property is
read-only. To change the sample rate, use :func:`resample`.
Notes:
This property is read-only and cannot be set directly. To change
See Also:
* :func:`resample` to change the sample rate and resample data in :attr:`sample_rate`.
* :func:`load_audio_from_array` to read audio from an array and set the sample rate.
* :var:`nussl.constants.DEFAULT_SAMPLE_RATE` the default sample rate for *nussl*
if not specified
"""
return self._sample_rate
@property
def time_vector(self):
"""
``np.ndarray``
A 1D :obj:`np.ndarray` with timestamps (in seconds) for each sample in
:attr:`audio_data`.
"""
if self.signal_duration is None:
return None
return np.linspace(0.0, self.signal_duration, num=self.signal_length)
@property
def freq_vector(self):
"""
``np.ndarray``
A 1D numpy array with frequency values (in Hz) that correspond
to each frequency bin (vertical axis) in :attr:`stft_data`. Assumes
linearly spaced frequency bins.
Raises:
:class:`AudioSignalException`: If :attr:`stft_data` is ``None``.
Run :func:`stft` before accessing this.
"""
if self.stft_data is None:
raise AudioSignalException(
'Cannot calculate freq_vector until self.stft() is run')
return np.linspace(
0.0, self.sample_rate // 2,
num=self.stft_data.shape[constants.STFT_VERT_INDEX])
@property
def time_bins_vector(self):
"""
``np.ndarray``
A 1D numpy array with time values (in seconds) that correspond
to each time bin (horizontal/time axis) in :attr:`stft_data`.
Raises:
:class:`AudioSignalException`: If :attr:`stft_data` is ``None``. Run :func:`stft`
before accessing this.
"""
if self.stft_data is None:
raise AudioSignalException(
'Cannot calculate time_bins_vector until self.stft() is run')
return np.linspace(0.0, self.signal_duration,
num=self.stft_data.shape[constants.STFT_LEN_INDEX])
@property
def stft_length(self):
"""
``int``
The length of :attr:`stft_data` along the time axis. In units of hops.
Raises:
:class:`AudioSignalException`: If ``self.stft_dat``a is ``None``. Run :func:`stft`
before accessing this.
"""
if self.stft_data is None:
raise AudioSignalException('Cannot calculate stft_length until self.stft() is run')
return self.stft_data.shape[constants.STFT_LEN_INDEX]
@property
def active_region_is_default(self):
"""
``bool``
``True`` if active region is the full length of :attr:`audio_data`. ``False`` otherwise.
See Also:
* :func:`set_active_region` for a description of active regions in :class:`AudioSignal`
* :func:`set_active_region_to_default`
"""
return self._active_start == 0 and self._active_end == self._signal_length
@property
def _signal_length(self):
"""
``int``
This is the length of the full signal, not just the active region.
"""
if self._audio_data is None:
return None
return self._audio_data.shape[constants.LEN_INDEX]
@property
def power_spectrogram_data(self):
"""
``np.ndarray``
Returns a real valued :obj:`np.ndarray` with power
spectrogram data. The power spectrogram is defined as ``(STFT)^2``, where ``^2`` is
element-wise squaring of entries of the STFT. Same shape as :attr:`stft_data`.
Raises:
:class:`AudioSignalException`: if :attr:`stft_data` is ``None``. Run :func:`stft`
before accessing this.
See Also:
* :func:`stft` to calculate the STFT before accessing this attribute.
* :attr:`stft_data` complex-valued Short-time Fourier Transform data.
* :attr:`magnitude_spectrogram_data` to get magnitude spectrogram data.
* :func:`get_power_spectrogram_channel` to get a specific channel
"""
if self.stft_data is None:
raise AudioSignalException('Cannot calculate power_spectrogram_data '
'because self.stft_data is None')
return np.abs(self.stft_data) ** 2
@property
def magnitude_spectrogram_data(self):
"""
``np.ndarray``
Returns a real valued ``np.array`` with magnitude spectrogram data. The magnitude
spectrogram is defined as ``abs(STFT)``, the element-wise absolute value of every item
in the STFT. Same shape as :attr:`stft_data`.
Raises:
AudioSignalException: if :attr:`stft_data` is ``None``. Run :func:`stft` before
accessing this.
See Also:
* :func:`stft` to calculate the STFT before accessing this attribute.
* :attr:`stft_data` complex-valued Short-time Fourier Transform data.
* :attr:`power_spectrogram_data`
* :func:`get_magnitude_spectrogram_channel`
"""
if self.stft_data is None:
raise AudioSignalException('Cannot calculate magnitude_spectrogram_data '
'because self.stft_data is None')
return np.abs(self.stft_data)
@property
def log_magnitude_spectrogram_data(self):
"""
(:obj:`np.ndarray`): Returns a real valued ``np.array`` with log magnitude spectrogram data.
The log magnitude spectrogram is defined as 20 * log10(abs(stft)).
Same shape as :attr:`stft_data`.
Raises:
AudioSignalException: if :attr:`stft_data` is ``None``. Run :func:`stft` before
accessing this.
See Also:
* :func:`stft` to calculate the STFT before accessing this attribute.
* :attr:`stft_data` complex-valued Short-time Fourier Transform data.
* :attr:`power_spectrogram_data`
* :func:`get_magnitude_spectrogram_channel`
"""
if self.stft_data is None:
raise AudioSignalException('Cannot calculate log_magnitude_spectrogram_data '
'because self.stft_data is None')
return 20 * np.log10(np.abs(self.stft_data) + 1e-8)
@property
def effects_chain(self):
"""
(``list`` of ``nussl.core.FilterFunction``): Returns a copy of the AudioSignal's
effect chain. Editing this property will not result in a change to the effects chain
of the AudioSignal.
Please use the effects hooks (e.g. :func:`tremolo`, :func:`make_effect`) to make changes
to the Audiosignal's effects chain.
See Also:
* :func:`apply_effects`
"""
return self._effects_chain.copy()
@property
def effects_applied(self):
"""
(``list`` of ``nussl.core.FilterFunction``): Returns a copy of the list of effects
applied to the AudioSignal. Editing this property will not result in a change to the
effects aplied to the AudioSignal.
Please use :func:`apply_effects` to apply effects to the AudioSignal.
See Also:
* :func:`apply_effects`
"""
return self._effects_applied.copy()
##################################################
# I/O
##################################################
def load_audio_from_file(self, input_file_path, offset=0, duration=None, new_sample_rate=None):
# type: (str, float, float, int) -> None
"""
Loads an audio signal into memory from a file on disc. The audio is stored in
:class:`AudioSignal` as a :obj:`np.ndarray` of `float` s. The sample rate is read from
the file, and this :class:`AudioSignal` object's sample rate is set from it. If
:param:`new_sample_rate` is not ``None`` nor the same as the sample rate of the file,
the audio will be resampled to the sample rate provided in the :param:`new_sample_rate`
parameter. After reading the audio data into memory, the active region is set to default.
:param:`offset` and :param:`duration` allow the user to determine how much of the audio is
read from the file. If those are non-default, then only the values provided will be stored
in :attr:`audio_data` (unlike with the active region, which has the entire audio data stored
in memory but only allows access to a subset of the audio).
See Also:
* :func:`load_audio_from_array` to read audio data from a :obj:`np.ndarray`.
Args:
input_file_path (str): Path to input file.
offset (float,): The starting point of the section to be extracted (seconds).
Defaults to 0 seconds (i.e., the very beginning of the file).
duration (float): Length of signal to load in second.
signal_length of 0 means read the whole file. Defaults to the full
length of the signal.
new_sample_rate (int): If this parameter is not ``None`` or the same sample rate as
provided by the input file, then the audio data will be resampled to the new
sample rate dictated by this parameter.
"""
assert offset >= 0, 'Parameter `offset` must be >= 0!'
if duration is not None:
assert duration >= 0, 'Parameter `duration` must be >= 0!'
try:
# try reading headers with soundfile for speed
audio_info = sf.info(input_file_path)
file_length = audio_info.duration
except:
# if that doesn't work try audioread
with audioread.audio_open(os.path.realpath(input_file_path)) as input_file:
file_length = input_file.duration
if offset > file_length:
raise AudioSignalException('offset is longer than signal!')
if duration is not None and offset + duration >= file_length:
warnings.warn('offset + duration are longer than the signal.'
' Reading until end of signal...',
UserWarning)
audio_input, self._sample_rate = librosa.load(input_file_path,
sr=None,
offset=offset,
duration=duration,
mono=False)
self.audio_data = audio_input
self.original_signal_length = self.signal_length
if new_sample_rate is not None and new_sample_rate != self._sample_rate:
warnings.warn('Input sample rate is different than the sample rate'
' read from the file! Resampling...',
UserWarning)
self.resample(new_sample_rate)
self.path_to_input_file = input_file_path
self.set_active_region_to_default()
def load_audio_from_array(self, signal, sample_rate=constants.DEFAULT_SAMPLE_RATE):
"""
Loads an audio signal from a :obj:`np.ndarray`. :param:`sample_rate` is the sample
of the signal.
See Also:
* :func:`load_audio_from_file` to read in an audio file from disc.
Notes:
Only accepts float arrays and int arrays of depth 16-bits.
Parameters:
signal (:obj:`np.ndarray`): Array containing the audio signal sampled at
:param:`sample_rate`.
sample_rate (int): The sample rate of signal.
Default is :ref:`constants.DEFAULT_SAMPLE_RATE` (44.1kHz)
"""
assert (type(signal) == np.ndarray)
self.path_to_input_file = None
# Change from fixed point to floating point
if not np.issubdtype(signal.dtype, np.floating):
signal = signal.astype('float') / (np.iinfo(np.dtype('int16')).max + 1.0)
self.audio_data = signal
self.original_signal_length = self.signal_length
self._sample_rate = sample_rate if sample_rate is not None \
else constants.DEFAULT_SAMPLE_RATE
self.set_active_region_to_default()
def write_audio_to_file(self, output_file_path, sample_rate=None):
"""
Outputs the audio signal data in :attr:`audio_data` to a file at :param:`output_file_path`
with sample rate of :param:`sample_rate`.
Parameters:
output_file_path (str): Filename where output file will be saved.
sample_rate (int): The sample rate to write the file at. Default is
:attr:`sample_rate`.
"""
if self.audio_data is None:
raise AudioSignalException("Cannot write audio file because there is no audio data.")
if sample_rate is None:
sample_rate = self.sample_rate
audio_output = np.copy(self.audio_data)
# TODO: better fix
# convert to fixed point again
if not np.issubdtype(audio_output.dtype, np.dtype(int).type):
audio_output = np.multiply(
audio_output,
2 ** (constants.DEFAULT_BIT_DEPTH - 1)).astype('int16')
wav.write(output_file_path, sample_rate, audio_output.T)
##################################################
# Active Region
##################################################
def set_active_region(self, start, end):
"""
Determines the bounds of what gets returned when you access :attr:`audio_data`.
None of the data in :attr:`audio_data` is discarded when you set the active region, it
merely becomes inaccessible until the active region is set back to default (i.e., the full
length of the signal).
This is useful for reusing a single :class:`AudioSignal` object to do multiple operations on
only select parts of the audio data.
Warnings:
Many functions will raise exceptions while the active region is not default. Be aware
that adding, subtracting, concatenating, truncating, and other utilities are not
available when the active region is not default.
See Also:
* :func:`set_active_region_to_default`
* :attr:`active_region_is_default`
Examples:
>>> import nussl
>>> import numpy as np
>>> n = nussl.constants.DEFAULT_SAMPLE_RATE # 1 second of audio at 44.1kHz
>>> np_sin = np.sin(np.linspace(0, 100 * 2 * np.pi, n)) # sine wave @ 100 Hz
>>> sig = nussl.AudioSignal(audio_data_array=np_sin)
>>> sig.signal_duration
1.0
>>> sig.set_active_region(0, n // 2)
>>> sig.signal_duration
0.5
Args:
start (int): Beginning of active region (in samples). Cannot be less than 0.
end (int): End of active region (in samples). Cannot be larger than
:attr:`signal_length`.
"""
start, end = int(start), int(end)
self._active_start = start if start >= 0 else 0
self._active_end = end if end < self._signal_length else self._signal_length
def set_active_region_to_default(self):
"""
Resets the active region of this :class:`AudioSignal` object to its default value of the
entire :attr:`audio_data` array.
See Also:
* :func:`set_active_region` for an explanation of active regions within the
:class:`AudioSignal`.
"""
self._active_start = 0
self._active_end = self._signal_length
##################################################
# STFT Utilities
##################################################
@staticmethod
def get_window(window_type, window_length):
"""
Wrapper around scipy.signal.get_window so one can also get the
popular sqrt-hann window.
Args:
window_type (str): Type of window to get (see constants.ALL_WINDOW).
window_length (int): Length of the window
Returns:
np.ndarray: Window returned by scipy.signa.get_window
"""
if window_type == constants.WINDOW_SQRT_HANN:
window = np.sqrt(scipy.signal.get_window(
'hann', window_length
))
else:
window = scipy.signal.get_window(
window_type, window_length)
return window
def stft(self, window_length=None, hop_length=None, window_type=None, overwrite=True):
"""
Computes the Short Time Fourier Transform (STFT) of :attr:`audio_data`.
The results of the STFT calculation can be accessed from :attr:`stft_data`
if :attr:`stft_data` is ``None`` prior to running this function or ``overwrite == True``
Warning:
If overwrite=True (default) this will overwrite any data in :attr:`stft_data`!
Args:
window_length (int): Amount of time (in samples) to do an FFT on
hop_length (int): Amount of time (in samples) to skip ahead for the new FFT
window_type (str): Type of scaling to apply to the window.
overwrite (bool): Overwrite :attr:`stft_data` with current calculation
Returns:
(:obj:`np.ndarray`) Calculated, complex-valued STFT from :attr:`audio_data`, 3D numpy
array with shape `(n_frequency_bins, n_hops, n_channels)`.
"""
if self.audio_data is None or self.audio_data.size == 0:
raise AudioSignalException(
"No time domain signal (self.audio_data) to make STFT from!")
window_length = (
self.stft_params.window_length
if window_length is None
else int(window_length)
)
hop_length = (
self.stft_params.hop_length
if hop_length is None
else int(hop_length)
)
window_type = (
self.stft_params.window_type
if window_type is None
else window_type
)
stft_data = []
window = self.get_window(window_type, window_length)
for chan in self.get_channels():
_, _, _stft = scipy.signal.stft(
chan, fs=self.sample_rate, window=window,
nperseg=window_length, noverlap=window_length - hop_length)
stft_data.append(_stft)
stft_data = np.array(stft_data).transpose((1, 2, 0))
if overwrite:
self.stft_data = stft_data
return stft_data
def istft(self, window_length=None, hop_length=None, window_type=None, overwrite=True,
truncate_to_length=None):
""" Computes and returns the inverse Short Time Fourier Transform (iSTFT).
The results of the iSTFT calculation can be accessed from :attr:`audio_data`
if :attr:`audio_data` is ``None`` prior to running this function or ``overwrite == True``
Warning:
If overwrite=True (default) this will overwrite any data in :attr:`audio_data`!
Args:
window_length (int): Amount of time (in samples) to do an FFT on
hop_length (int): Amount of time (in samples) to skip ahead for the new FFT
window_type (str): Type of scaling to apply to the window.
overwrite (bool): Overwrite :attr:`stft_data` with current calculation
truncate_to_length (int): truncate resultant signal to specified length. Default ``None``.
Returns:
(:obj:`np.ndarray`) Calculated, real-valued iSTFT from :attr:`stft_data`, 2D numpy array
with shape `(n_channels, n_samples)`.
"""
if self.stft_data is None or self.stft_data.size == 0:
raise AudioSignalException('Cannot do inverse STFT without self.stft_data!')
window_length = (
self.stft_params.window_length
if window_length is None
else int(window_length)
)
hop_length = (
self.stft_params.hop_length
if hop_length is None
else int(hop_length)
)
window_type = (
self.stft_params.window_type
if window_type is None
else window_type
)
signals = []
window = self.get_window(window_type, window_length)
for stft in self.get_stft_channels():
_, _signal = scipy.signal.istft(
stft, fs=self.sample_rate, window=window,
nperseg=window_length, noverlap=window_length - hop_length)
signals.append(_signal)
calculated_signal = np.array(signals)
# Make sure it's shaped correctly
calculated_signal = np.expand_dims(calculated_signal, -1) \
if calculated_signal.ndim == 1 else calculated_signal
# if truncate_to_length isn't provided
if truncate_to_length is None:
truncate_to_length = self.original_signal_length
if self.signal_length is not None:
truncate_to_length = self.signal_length
if truncate_to_length is not None and truncate_to_length > 0:
calculated_signal = calculated_signal[:, :truncate_to_length]
if overwrite or self.audio_data is None:
self.audio_data = calculated_signal
return calculated_signal
def apply_mask(self, mask, overwrite=False):
"""
Applies the input mask to the time-frequency representation in this :class:`AudioSignal`
object and returns a new :class:`AudioSignal` object with the mask applied. The mask
is applied to the magnitude of audio signal. The phase of the original audio
signal is then applied to construct the masked STFT.
Args:
mask (:obj:`MaskBase`-derived object): A ``MaskBase``-derived object
containing a mask.
overwrite (bool): If ``True``, this will alter ``stft_data`` in self.
If ``False``, this function will create a new ``AudioSignal`` object
with the mask applied.
Returns:
A new :class:`AudioSignal`` object with the input mask applied to the STFT,
iff ``overwrite`` is False.
"""
if not isinstance(mask, masks.MaskBase):
raise AudioSignalException(f'Expected MaskBase-derived object, given {type(mask)}')
if self.stft_data is None:
raise AudioSignalException('There is no STFT data to apply a mask to!')
if mask.shape != self.stft_data.shape:
if not mask.shape[:-1] == self.stft_data.shape[:-1]:
raise AudioSignalException(
'Input mask and self.stft_data are not the same shape! mask:'
f' {mask.shape}, self.stft_data: {self.stft_data.shape}'
)
magnitude, phase = np.abs(self.stft_data), np.angle(self.stft_data)
masked_abs = magnitude * mask.mask
masked_stft = masked_abs * np.exp(1j * phase)
if overwrite:
self.stft_data = masked_stft
else:
return self.make_copy_with_stft_data(masked_stft, verbose=False)
def ipd_ild_features(self, ch_one=0, ch_two=1):
"""
Computes interphase difference (IPD) and interlevel difference (ILD) for a
stereo spectrogram. If more than two channels, this by default computes IPD/ILD
between the first two channels. This can be specified by the arguments ch_one
and ch_two. If only one channel, this raises an error.
Args:
ch_one (``int``): index of first channel to compute IPD/ILD.
ch_two (``int``): index of second channel to compute IPD/ILD.
Returns:
ipd (``np.ndarray``): Interphase difference between selected channels
ild (``np.ndarray``): Interlevel difference between selected channels
"""
if self.stft_data is None:
raise AudioSignalException("Cannot compute ipd/ild features without stft_data!")
if self.is_mono:
raise AudioSignalException("Cannot compute ipd/ild features on mono input!")
stft_ch_one = self.get_stft_channel(ch_one)
stft_ch_two = self.get_stft_channel(ch_two)
ild = np.abs(stft_ch_one) / (np.abs(stft_ch_two) + 1e-4)
ild = 20 * np.log10(ild + 1e-8)
frequencies = self.freq_vector
ipd = np.angle(stft_ch_two * np.conj(stft_ch_one))
ipd /= (frequencies + 1.0)[:, None]
ipd = ipd % np.pi
return ipd, ild
##################################################
# Utilities
##################################################
def concat(self, other):
""" Concatenate two :class:`AudioSignal` objects (by concatenating :attr:`audio_data`).
Puts ``other.audio_data`` after :attr:`audio_data`.
Raises:
AudioSignalException: If ``self.sample_rate != other.sample_rate``,
``self.num_channels != other.num_channels``, or ``!self.active_region_is_default``
is ``False``.
Args:
other (:class:`AudioSignal`): :class:`AudioSignal` to concatenate with the current one.
Returns:
(:class:`AudioSignal`): modified :class:`AudioSignal` (in-place).
"""
self._verify_audio(other)
self.audio_data = np.concatenate((self.audio_data, other.audio_data),
axis=constants.LEN_INDEX)
return self
def truncate_samples(self, n_samples):
""" Truncates the signal leaving only the first ``n_samples`` samples.
This can only be done if ``self.active_region_is_default`` is True. If
``n_samples > self.signal_length``, then `n_samples = self.signal_length`
(no truncation happens).
Raises:
AudioSignalException: If ``self.active_region_is_default`` is ``False``.
Args:
n_samples: (int) number of samples that will be left.
Returns:
(:class:`AudioSignal`): modified :class:`AudioSignal` (in-place).
"""
if not self.active_region_is_default:
raise AudioSignalException('Cannot truncate while active region is not set as default!')
n_samples = int(n_samples)
if n_samples > self.signal_length:
n_samples = self.signal_length
self.audio_data = self.audio_data[:, 0: n_samples]
return self
def truncate_seconds(self, n_seconds):
""" Truncates the signal leaving only the first n_seconds.
This can only be done if self.active_region_is_default is True.
Args:
n_seconds: (float) number of seconds to truncate :attr:`audio_data`.
Returns:
(:class:`AudioSignal`): modified :class:`AudioSignal` (in-place).
"""
n_samples = int(n_seconds * self.sample_rate)
self.truncate_samples(n_samples)
return self
def crop_signal(self, before, after):
"""
Get rid of samples before and after the signal on all channels. Contracts the length
of :attr:`audio_data` by before + after. Useful to get rid of zero padding after the fact.
Args:
before: (int) number of samples to remove at beginning of self.audio_data
after: (int) number of samples to remove at end of self.audio_data
Returns:
(:class:`AudioSignal`): modified :class:`AudioSignal` (in-place).
"""
if not self.active_region_is_default:
raise AudioSignalException('Cannot crop signal while active region '
'is not set as default!')
num_samples = self.signal_length
self.audio_data = self.audio_data[:, before:num_samples - after]
self.set_active_region_to_default()
return self
def zero_pad(self, before, after):
""" Adds zeros before and after the signal to all channels.
Extends the length of self.audio_data by before + after.
Raises:
Exception: If `self.active_region_is_default`` is ``False``.
Args:
before: (int) number of zeros to be put before the current contents of self.audio_data
after: (int) number of zeros to be put after the current contents fo self.audio_data
Returns:
(:class:`AudioSignal`): modified :class:`AudioSignal` (in-place).
"""
if not self.active_region_is_default:
raise AudioSignalException('Cannot zero-pad while active region is not set as default!')
self.audio_data = np.pad(self.audio_data, ((0, 0), (before, after)), 'constant')
return self
def add(self, other):
"""Adds two audio signal objects.
This does element-wise addition on the :attr:`audio_data` array.
Raises:
AudioSignalException: If ``self.sample_rate != other.sample_rate``,
``self.num_channels != other.num_channels``, or
``self.active_region_is_default`` is ``False``.
Parameters:
other (:class:`AudioSignal`): Other :class:`AudioSignal` to add.
Returns:
(:class:`AudioSignal`): New :class:`AudioSignal` object with the sum of
``self`` and ``other``.
"""
if isinstance(other, int):
# this is so that sum(list of audio_signals) works.
# when sum is called on a list it's evaluated as 0 + elem1 + elem2 + ...
# so the 0 case needs to be taken care of (by doing nothing)
return self
self._verify_audio_arithmetic(other)
new_signal = copy.deepcopy(self)
new_signal.audio_data = self.audio_data + other.audio_data
return new_signal
def subtract(self, other):
"""Subtracts two audio signal objects.
This does element-wise subtraction on the :attr:`audio_data` array.
Raises:
AudioSignalException: If ``self.sample_rate != other.sample_rate``,
``self.num_channels != other.num_channels``, or
``self.active_region_is_default`` is ``False``.
Parameters:
other (:class:`AudioSignal`): Other :class:`AudioSignal` to subtract.
Returns:
(:class:`AudioSignal`): New :class:`AudioSignal` object with the difference
between ``self`` and ``other``.
"""
self._verify_audio_arithmetic(other)
other_copy = copy.deepcopy(other)
other_copy *= -1
return self.add(other_copy)
def make_copy_with_audio_data(self, audio_data, verbose=True):
""" Makes a copy of this :class:`AudioSignal` object with :attr:`audio_data` initialized to
the input :param:`audio_data` numpy array. The :attr:`stft_data` of the new
:class:`AudioSignal`object is ``None``.
Args:
audio_data (:obj:`np.ndarray`): Audio data to be put into the new :class:`AudioSignal`
object.
verbose (bool): If ``True`` prints warnings. If ``False``, outputs nothing.
Returns:
(:class:`AudioSignal`): A copy of this :class:`AudioSignal` object with :attr:`audio_data`
initialized to the input :param:`audio_data` numpy array.
"""
if verbose:
if not self.active_region_is_default:
warnings.warn('Making a copy when active region is not default.')
if audio_data.shape != self.audio_data.shape:
warnings.warn('Shape of new audio_data does not match current audio_data.')
new_signal = copy.deepcopy(self)
new_signal.audio_data = audio_data
new_signal.stft_data = None
return new_signal
def make_copy_with_stft_data(self, stft_data, verbose=True):
""" Makes a copy of this :class:`AudioSignal` object with :attr:`stft_data` initialized to
the input :param:`stft_data` numpy array. The :attr:`audio_data` of the new
:class:`AudioSignal` object is ``None``.
Args:
stft_data (:obj:`np.ndarray`): STFT data to be put into the new :class:`AudioSignal`
object.
Returns:
(:class:`AudioSignal`): A copy of this :class:`AudioSignal` object with :attr:`stft_data`
initialized to the input :param:`stft_data` numpy array.
"""
if verbose:
if not self.active_region_is_default:
warnings.warn('Making a copy when active region is not default.')
if stft_data.shape != self.stft_data.shape:
warnings.warn('Shape of new stft_data does not match current stft_data.')
new_signal = copy.deepcopy(self)
new_signal.stft_data = stft_data
new_signal.original_signal_length = self.original_signal_length
new_signal.audio_data = None
return new_signal
def loudness(self, filter_class='K-weighting', block_size=0.400):
"""
Uses pyloudnorm to calculate loudness.
Implementation of ITU-R BS.1770-4.
Allows control over gating block size and frequency weighting filters for
additional control.
Measure the integrated gated loudness of a signal.
Uses the weighting filters and block size defined by the meter
the integrated loudness is measured based upon the gating algorithm
defined in the ITU-R BS.1770-4 specification.
Supports up to 5 channels and follows the channel ordering:
[Left, Right, Center, Left surround, Right surround]
Args:
filter_class (str):
Class of weighting filter used.
- 'K-weighting' (default)
- 'Fenton/Lee 1'
- 'Fenton/Lee 2'
- 'Dash et al.'
block_size (float):
Gating block size in seconds. Defaults to 0.400.
Returns:
float: LUFS, Integrated gated loudness of the input
measured in dB LUFS.
"""
# create BS.1770 meter
meter = pyloudnorm.Meter(
self.sample_rate, filter_class=filter_class, block_size=block_size)
# measure loudness
loudness = meter.integrated_loudness(self.audio_data.T)
return loudness
def rms(self, win_len=None, hop_len=None):
""" Calculates the root-mean-square of :attr:`audio_data`.
Returns:
(float): Root-mean-square of :attr:`audio_data`.
"""
if win_len is not None:
hop_len = win_len // 2 if hop_len is None else hop_len
rms_func = lambda arr: librosa.feature.rms(arr, frame_length=win_len,
hop_length=hop_len)[0, :]
else:
rms_func = lambda arr: np.sqrt(np.mean(np.square(arr)))
result = []
for ch in self.get_channels():
result.append(rms_func(ch))
return np.squeeze(result)
def peak_normalize(self):
"""
Peak normalizes the audio signal.
Returns:
(:class:`AudioSignal`): peak-normalized :class:`AudioSignal` (in-place).
"""
self.apply_gain(1 / np.abs(self.audio_data).max())
return self
def apply_gain(self, value):
"""
Apply a gain to :attr:`audio_data`
Args:
value (float): amount to multiply self.audio_data by
Returns:
(:class:`AudioSignal`): This :class:`AudioSignal` object with the gain applied.
"""
if not isinstance(value, numbers.Real):
raise AudioSignalException('Can only multiply/divide by a scalar!')
self.audio_data = self.audio_data * value
return self
def resample(self, new_sample_rate, **kwargs):
"""
Resample the data in :attr:`audio_data` to the new sample rate provided by
:param:`new_sample_rate`. If the :param:`new_sample_rate` is the same as :attr:`sample_rate`
then nothing happens.
Args:
new_sample_rate (int): The new sample rate of :attr:`audio_data`.
kwargs: Keyword arguments to librosa.resample.
"""
if new_sample_rate == self.sample_rate:
warnings.warn('Cannot resample to the same sample rate.')
return
resampled_signal = []
for channel in self.get_channels():
resampled_channel = librosa.resample(
channel, self.sample_rate, new_sample_rate, **kwargs)
resampled_signal.append(resampled_channel)
self.audio_data = np.array(resampled_signal)
self.original_signal_length = self.signal_length
self._sample_rate = new_sample_rate
return self
##################################################
# Channel Utilities
##################################################
def _verify_get_channel(self, n):
if n >= self.num_channels:
raise AudioSignalException(
f'Cannot get channel {n} when this object only has {self.num_channels}'
' channels! (0-based)'
)
if n < 0:
raise AudioSignalException(
f'Cannot get channel {n}. This will cause unexpected results.'
)
def get_channel(self, n):
"""Gets audio data of n-th channel from :attr:`audio_data` as a 1D :obj:`np.ndarray`
of shape ``(n_samples,)``.
Parameters:
n (int): index of channel to get. **0-based**
See Also:
* :func:`get_channels`: Generator for looping through channels of :attr:`audio_data`.
* :func:`get_stft_channel`: Gets stft data from a specific channel.
* :func:`get_stft_channels`: Generator for looping through channels from
:attr:`stft_data`.
Raises:
:class:`AudioSignalException`: If not ``0 <= n < self.num_channels``.
Returns:
(:obj:`np.array`): The audio data in the n-th channel of the signal, 1D
"""
self._verify_get_channel(n)
return np.asfortranarray(utils._get_axis(self.audio_data, constants.CHAN_INDEX, n))
def get_channels(self):
"""Generator that will loop through channels of :attr:`audio_data`.
See Also:
* :func:`get_channel`: Gets audio data from a specific channel.
* :func:`get_stft_channel`: Gets stft data from a specific channel.
* :func:`get_stft_channels`: Generator to loop through channels of :attr:`stft_data`.
Yields:
(:obj:`np.array`): The audio data in the next channel of this signal as a
1D ``np.ndarray``.
"""
for i in range(self.num_channels):
yield self.get_channel(i)
def get_stft_channel(self, n):
"""Returns STFT data of n-th channel from :attr:`stft_data` as a 2D ``np.ndarray``.
Args:
n: (int) index of stft channel to get. **0-based**
See Also:
* :func:`get_stft_channels`: Generator to loop through channels from :attr:`stft_data`.
* :func:`get_channel`: Gets audio data from a specific channel.
* :func:`get_channels`: Generator to loop through channels of :attr:`audio_data`.
Raises:
:class:`AudioSignalException`: If not ``0 <= n < self.num_channels``.
Returns:
(:obj:`np.array`): the STFT data in the n-th channel of the signal, 2D
"""
if self.stft_data is None:
raise AudioSignalException('Cannot get STFT data before STFT is calculated!')
self._verify_get_channel(n)
return utils._get_axis(self.stft_data, constants.STFT_CHAN_INDEX, n)
def get_stft_channels(self):
"""Generator that will loop through channels of :attr:`stft_data`.
See Also:
* :func:`get_stft_channel`: Gets stft data from a specific channel.
* :func:`get_channel`: Gets audio data from a specific channel.
* :func:`get_channels`: Generator to loop through channels of :attr:`audio_data`.
Yields:
(:obj:`np.array`): The STFT data in the next channel of this signal as a
2D ``np.ndarray``.
"""
for i in range(self.num_channels):
yield self.get_stft_channel(i)
def make_audio_signal_from_channel(self, n):
"""
Makes a new :class:`AudioSignal` object from with data from channel ``n``.
Args:
n (int): index of channel to make a new signal from. **0-based**
Returns:
(:class:`AudioSignal`) new :class:`AudioSignal` object with only data from
channel ``n``.
"""
new_signal = copy.copy(self)
new_signal.audio_data = self.get_channel(n)
return new_signal
def get_power_spectrogram_channel(self, n):
""" Returns the n-th channel from ``self.power_spectrogram_data``.
Raises:
Exception: If not ``0 <= n < self.num_channels``.
Args:
n: (int) index of power spectrogram channel to get **0-based**
Returns:
(:obj:`np.array`): the power spectrogram data in the n-th channel of the signal, 1D
"""
self._verify_get_channel(n)
# np.array helps with duck typing
return utils._get_axis(np.array(self.power_spectrogram_data),
constants.STFT_CHAN_INDEX, n)
def get_magnitude_spectrogram_channel(self, n):
""" Returns the n-th channel from ``self.magnitude_spectrogram_data``.
Raises:
Exception: If not ``0 <= n < self.num_channels``.
Args:
n: (int) index of magnitude spectrogram channel to get **0-based**
Returns:
(:obj:`np.array`): the magnitude spectrogram data in the n-th channel of the signal, 1D
"""
self._verify_get_channel(n)
# np.array helps with duck typing
return utils._get_axis(np.array(self.magnitude_spectrogram_data),
constants.STFT_CHAN_INDEX, n)
def to_mono(self, overwrite=True, keep_dims=False):
""" Converts :attr:`audio_data` to mono by averaging every sample.
Args:
overwrite (bool): If ``True`` this function will overwrite :attr:`audio_data`.
keep_dims (bool): If ``False`` this function will return a 1D array,
else will return array with shape `(1, n_samples)`.
Warning:
If ``overwrite=True`` (default) this will overwrite any data in :attr:`audio_data`!
Returns:
(:obj:`AudioSignal`): Mono-ed version of AudioSignal, either in place or not.
"""
mono = np.mean(self.audio_data, axis=constants.CHAN_INDEX, keepdims=keep_dims)
if overwrite:
self.audio_data = mono
return self
else:
mono_signal = self.make_copy_with_audio_data(mono)
return mono_signal
##################################################
# Utility hooks #
##################################################
def play(self):
"""
Plays this audio signal, using `nussl.play_utils.play`.
Plays an audio signal if ffplay from the ffmpeg suite of tools is installed.
Otherwise, will fail. The audio signal is written to a temporary file
and then played with ffplay.
"""
# lazy load
from . import play_utils
play_utils.play(self)
def embed_audio(self, ext='.mp3', display=True):
"""
Embeds the audio signal into a notebook, using `nussl.play_utils.embed_audio`.
Write a numpy array to a temporary mp3 file using ffmpy, then embeds the mp3
into the notebook.
Args:
ext (str): What extension to use when embedding. '.mp3' is more lightweight
leading to smaller notebook sizes.
display (bool): Whether or not to display the object immediately, or to return
the html object for display later by the end user. Defaults to True.
Example:
>>> import nussl
>>> audio_file = nussl.efz_utils.download_audio_file('schoolboy_fascination_excerpt.wav')
>>> audio_signal = nussl.AudioSignal(audio_file)
>>> audio_signal.embed_audio()
This will show a little audio player where you can play the audio inline in
the notebook.
"""
# lazy load
from . import play_utils
return play_utils.embed_audio(self, ext=ext, display=display)
##################################
# Effect Hooks #
##################################
def apply_effects(self, reset=True, overwrite=False, user_order=True):
"""
This method applies a prespecified set of audio effects (e.g., chorus, filtering,
reverb, etc...) to this audio signal. Before any effect can be applied, the effects
are first added to the "effects chain", which refers to a queue of effects that will be
all applied to an AudioSignal object when this function is called. Effects are added
to the effects chain through AudioSignal effect hooks which are `AudioSignal` methods for
setting up an effect with the desired parameters. If the effect chain is empty this method
does nothing. By default, when this method is called the effects chain empty. See the
documentation below for a list of supported effects and their respective details.
Notes:
The effects will be added in the order that they are added to the effects chain, unless
`user_order=False`, in case the order is not guaranteed to be preserved. Setting
`user_order=False` will apply all SoX effects first, then FFMpeg effects, which can
sped up processing time by ~30% in our experiments.
Args:
reset (bool): If True, clears out all effects in effect chains following applying the
effects. Default=True
overwrite (bool): If True, overwrites existing audio_data in AudioSignal. Default=False
Also clears out `stft_data`.
user_order (bool): If True, applied effects in the user provided order. If False,
applies all SoX effects before all FFmpeg effects, which can be faster.
Returns:
self or new_signal (AudioSignal): If overwrite=True, then returns initially AudioSignal
with edited audio_data. Otherwise, returns a new AudioSignal new_signal.
Example:
Here are some examples of demonstrating to apply effects to your audio signal. Let's
start with an obvious effect, such as time stretching. We can add
this effect to the effects chain by using the built-in effects hook, `time_stretch()`:
>>> signal.signal_duration
10.0
>>> signal.time_stretch(0.5)
>>> signal.signal_duration
10.0
You can find this effect in the AudioSignal's effects chain.
>>> effect = signal.effects_chain[0]
>>> str(effect)
"time_stretch (params: {factor=0.5})"
However, the signal's duration hasn't changed! You will need to call `apply_effects()`
to apply the changes in the signal's effects chains. Applied effects can be found in
`effects_applied`.
>>> new_signal = signal.apply_effects()
>>> new_signal.signal_duration
5.0
>>> str(new_signal.effects_applied[0])
"time_stretch (params: {factor=0.5})"
>>> # This doesn't change the original signal
>>> signal.signal_duration
10.0
You can iterate through effects_chain to use the properties of FilterFunction
objects as arguments to `make_effect`:
>>> for effect in signal1.effects_applied:
>>> filter_ = effect.filter
>>> params = effect.params
>>> signal2.make_effect(filter_, **params)
Using `apply_effects()` will clear out the current effects chain. This behavior can be
avoided by setting `reset` to False.
>>> another_signal = signal.apply_effects()
>>> another_signal.signal_duration
10.0
To clear out the current effects chain without applying effect, use
`reset_effects_chain()`. It will not revert effects already applied (i.e., your audio
will still have the effects you applied).
If `apply_effects()` is called with empty effects chain, then it returns itself.
>>> another_signal == signal
True
You can also chain effects together. This will add a tremolo effect followed by a high
pass filter effect to the AudioSignal's effects chain (Note: order matters!):
>>> audio_signal.tremolo(5, .6).high_pass(12000)
Using overwrite here, we change the audio data of the variable `audio_signal`, rather
than create a new signal:
>>> audio_signal.apply_effects(overwrite=True)
>>> audio_signal.effects_applied
["tremolo", "highpass"]
If `user_order` is false, FFmpeg effects will be applied AFTER SoX effects, regardless
of the order the hooks are applied. The effects `time_stretch` and `pitch_shift` are SoX
effects. All others are FFmpeg effects. This may be done for speed, as applying all
FFmpeg effects without interuption will be faster than being interrupted with a SoX
effect.
For example, the two statements will result in the same effected signal:
>>> signal_1 = audio_signal.pitch_shift(4).tremolo(5, .6).apply_effects(user_order=False)
>>> signal_2 = audio_signal.tremolo(5, .6).pitch_shift(4).apply_effects(user_order=False)
>>> signal_1.effects_applied == signal_2.effects_applied
True
Refer to the specific documentation for each effect to determine whether it is a SoX
effect or an FFmpeg effect.
See Also:
* :func:`reset_effects_chain`: Empties the effects chain without applying any effects.
* :func:`time_stretch`: Changes the length without effecting the pitch.
* :func:`pitch_shift`: Changes the pitch without effecting the length of the signal.
* :func:`low_pass`: Applies a low pass filter to the signal.
* :func:`high_pass`: Applies a high pass filter to the signal.
* :func:`tremelo`: Applies a tremolo (volume wobbling) effect to the signal.
* :func:`vibrato`: Applies a vibrato (pitch wobbling) effect to the signal.
* :func:`chorus`: Applies a chorus effect to the signal.
* :func:`phaser`: Applies a phaser effect to the signal.
* :func:`flanger`: Applies a flanger effect to the signal.
* :func:`emphasis`: Boosts certain frequency ranges of the signal.
* :func:`compressor`: Compresses the dynamic range of the signal.
* :func:`equalizer`: Applies an equalizer to the signal.
* :func:`make_effect`: Syntactic sugar for adding an effect to the chain by name.
"""
if user_order:
new_signal = self._apply_user_ordered_effects()
else:
new_signal = self._apply_sox_ffmpeg_ordered_effects()
new_signal.reset_effects_chain()
if reset:
self.reset_effects_chain()
if overwrite:
self.audio_data = new_signal.audio_data
self._effects_applied += new_signal.effects_applied
self.stft_data = None
return self
return new_signal
def _apply_user_ordered_effects(self):
new_signal = self
i = j = 0
while i < len(self._effects_chain):
j += 1
if j == len(self._effects_chain) or \
type(self._effects_chain[i]) != type(self._effects_chain[j]): # new fx type
next_chain = self._effects_chain[i:j]
if isinstance(next_chain[0], effects.SoXFilter):
new_signal = effects.apply_effects_sox(new_signal, next_chain)
elif isinstance(next_chain[0], effects.FFmpegFilter):
new_signal = effects.apply_effects_ffmpeg(new_signal, next_chain)
i = j
return new_signal
def _apply_sox_ffmpeg_ordered_effects(self):
new_signal = self
sox_effects_chain = []
ffmpeg_effects_chain = []
for f in self._effects_chain:
if isinstance(f, effects.FFmpegFilter):
ffmpeg_effects_chain.append(f)
elif isinstance(f, effects.SoXFilter):
sox_effects_chain.append(f)
if sox_effects_chain:
new_signal = effects.apply_effects_sox(new_signal, sox_effects_chain)
if ffmpeg_effects_chain:
new_signal = effects.apply_effects_ffmpeg(new_signal, ffmpeg_effects_chain)
return new_signal
def make_effect(self, effect, **kwargs):
"""
Syntactic sugar for adding an arbitrary effect hook to the effects chain by name.
Example:
>>> signal.time_stretch(1.5)
Is the same as
>>> signal.make_effect("time_stretch", factor=1.5)
The attributes of a FilterFunction in the lists effects_applied or effects_chain may
used with `make_effect`.
>>> for effect in signal1.effects_applied:
>>> filter_ = effect.filter
>>> params = effect.params
>>> signal2.make_effect(filter_, **params)
Notes:
This effect won't be applied until you call `apply_effect()`!
Args:
effect (str): Function name of desired effect hook of the AudioSignal
**kwargs: Additional parameters for given effect.
Return:
self: Initial AudioSignal with updated effect chains
See Also:
* :func:`apply_effects`: Applies effects once they are in the effects chain.
* :func:`reset_effects_chain`: Empties the effects chain without applying any effects.
"""
try:
effect_hook = getattr(self, effect, None)
effect_hook(**kwargs)
except Exception as e:
raise AudioSignalException(f"Error calling {effect} with parameters {kwargs}: `{e}`")
return self
def reset_effects_chain(self):
"""
Clears effects chain of AudioSignal.
This will not revert effects that have already been applied to the audio!
See Also:
* :func:`apply_effects`: Applies effects once they are in the effects chain.
* :func:`make_effect`: Syntactic sugar for adding an effect to the chain by name.
"""
self._effects_chain = []
return self
def time_stretch(self, factor, **kwargs):
"""
Adds a time stretch filter to the AudioSignal's effects chain.
A factor greater than one will shorten the signal, a factor less then one
will lengthen the signal, and a factor of 1 will not change the signal.
This is a SoX effect. Please see
https://pysox.readthedocs.io/en/latest/_modules/sox/transform.html#Transformer.tempo
for details.
Notes:
This effect won't be applied until you call `apply_effect()`!
Args:
factor (float): Scaling factor for tempo change. Must be positive.
kwargs: Arugments passed to `sox.transform.tempo`
Returns:
self: Initial AudioSignal with updated effect chains
See Also:
* :func:`apply_effects`: Applies effects once they are in the effects chain.
* :func:`make_effect`: Syntactic sugar for adding an effect to the chain by name.
* :func:`reset_effects_chain`: Empties the effects chain without applying any effects.
"""
self._effects_chain.append(effects.time_stretch(factor, **kwargs))
return self
def pitch_shift(self, n_semitones, **kwargs):
"""
Add pitch shift effect to AudioSignal's effect chain.
A positive shift will change the pitch of the signal by `n_semitones`
semitones. If positive, pitch will get higher, if negative pitch will
get lower.
This is a SoX effect. Please see:
https://pysox.readthedocs.io/en/latest/_modules/sox/transform.html#Transformer.pitch
For details.
Notes:
This effect won't be applied until you call `apply_effect()`!
Args:
n_semitones (float): The number of semitones to shift the audio.
Positive values increases the frequency of the signal
kwargs: Arugments passed to `sox.transform.pitch`
Returns:
self: Initial AudioSignal with updated effect chains
See Also:
* :func:`apply_effects`: Applies effects once they are in the effects chain.
* :func:`make_effect`: Syntactic sugar for adding an effect to the chain by name.
* :func:`reset_effects_chain`: Empties the effects chain without applying any effects.
"""
self._effects_chain.append(effects.pitch_shift(n_semitones, **kwargs))
return self
def low_pass(self, freq, poles=2, width_type="h", width=0.707, **kwargs):
"""
Add low pass effect to AudioSignal's effect chain
This is a FFmpeg effect. Please see:
https://ffmpeg.org/ffmpeg-all.html#lowpass
for details.
Notes:
This effect won't be applied until you call `apply_effect()`!
Args:
freq (float): Threshold for low pass. Should be positive
poles (int): Number of poles. should be either 1 or 2
width_type (str): Unit of width for filter. Must be either:
'h': Hz
'q': Q-factor
'o': octave
's': slope
'k': kHz
width (float): Band width in width_type units
kwargs: Arguments passed to `ffmpeg.filter`
Returns:
self: Initial AudioSignal with updated effect chains
See Also:
* :func:`apply_effects`: Applies effects once they are in the effects chain.
* :func:`make_effect`: Syntactic sugar for adding an effect to the chain by name.
* :func:`reset_effects_chain`: Empties the effects chain without applying any effects.
"""
self._effects_chain.append(effects.low_pass(freq, poles=poles,
width_type=width_type,
width=width, **kwargs))
return self
def high_pass(self, freq, poles=2, width_type="h", width=0.707, **kwargs):
"""
Add high pass effect to AudioSignal's effect chain
This is a FFmpeg effect. Please see:
https://ffmpeg.org/ffmpeg-all.html#highpass
for details.
Notes:
This effect won't be applied until you call `apply_effect()`!
Args:
freq (float): Threshold for high pass. Should be positive scalar
poles (int): Number of poles. should be either 1 or 2
width_type (str): Unit of width for filter. Must be either:
'h': Hz
'q': Q-factor
'o': octave
's': slope
'k': kHz
width (float): Band width in width_type units
kwargs: Arguments passed to `ffmpeg.filter`
Returns:
self: Initial AudioSignal with updated effect chains
See Also:
* :func:`apply_effects`: Applies effects once they are in the effects chain.
* :func:`make_effect`: Syntactic sugar for adding an effect to the chain by name.
* :func:`reset_effects_chain`: Empties the effects chain without applying any effects.
"""
self._effects_chain.append(effects.high_pass(freq, poles=poles,
width_type=width_type,
width=width, **kwargs))
return self
def tremolo(self, mod_freq, mod_depth, **kwargs):
"""
Add tremolo effect to AudioSignal's effect chain
This is a FFmpeg effect. Please see
https://ffmpeg.org/ffmpeg-all.html#tremolo
for details.
Notes:
This effect won't be applied until you call `apply_effect()`!
Args:
mod_freq (float): Modulation frequency. Must be between .1 and 20000.
mod_depth (float): Modulation depth. Must be between 0 and 1.
kwargs: Arguments passed to `ffmpeg.filter`
Returns:
self: Initial AudioSignal with updated effect chains
See Also:
* :func:`apply_effects`: Applies effects once they are in the effects chain.
* :func:`make_effect`: Syntactic sugar for adding an effect to the chain by name.
* :func:`reset_effects_chain`: Empties the effects chain without applying any effects.
"""
self._effects_chain.append(effects.tremolo(mod_freq, mod_depth, **kwargs))
return self
def vibrato(self, mod_freq, mod_depth, **kwargs):
"""
Add vibrato effect to AudioSignal's effect chain.
This is a FFmpeg effect. Please see
https://ffmpeg.org/ffmpeg-all.html#vibrato
for details.
Notes:
This effect won't be applied until you call `apply_effect()`!
Args:
mod_freq (float): Modulation frequency. Must be between .1 and 20000.
mod_depth (float): Modulation depth. Must be between 0 and 1.
kwargs: Arguments passed to `ffmpeg.filter`
Returns:
self: Initial AudioSignal with updated effect chains
See Also:
* :func:`apply_effects`: Applies effects once they are in the effects chain.
* :func:`make_effect`: Syntactic sugar for adding an effect to the chain by name.
* :func:`reset_effects_chain`: Empties the effects chain without applying any effects.
"""
self._effects_chain.append(effects.vibrato(mod_freq, mod_depth, **kwargs))
return self
def chorus(self, delays, decays, speeds,
depths, in_gain=0.4, out_gain=0.4, **kwargs):
"""
Add chorus effect to AudioSignal's effect chain.
This is a FFmpeg effect. Please see
https://ffmpeg.org/ffmpeg-all.html#chorus
for details.
Notes:
This effect won't be applied until you call `apply_effect()`!
Args:
delays (list of float): delays in ms. Typical Delay is 40ms-6ms
decays (list of float): decays. Must be between 0 and 1
speeds (list of float): speeds. Must be between 0 and 1
depths (list of float): depths. Must be between 0 and 1
in_gain (float): Proportion of input gain. Must be between 0 and 1
out_gain (float): Proportion of output gain. Must be between 0 and 1
kwargs: Arguments passed to `ffmpeg.filter`
Returns:
self: Initial AudioSignal with updated effect chains
See Also:
* :func:`apply_effects`: Applies effects once they are in the effects chain.
* :func:`make_effect`: Syntactic sugar for adding an effect to the chain by name.
* :func:`reset_effects_chain`: Empties the effects chain without applying any effects.
"""
self._effects_chain.append(effects.chorus(delays, decays,
speeds, depths,
in_gain=in_gain,
out_gain=out_gain,
**kwargs))
return self
def phaser(self, in_gain=0.4, out_gain=0.74, delay=3, decay=0.4,
speed=0.5, type_="triangular", **kwargs):
"""
Add phaser effect to AudioSignal's effect chain
This is a FFmpeg effect. Please see
https://ffmpeg.org/ffmpeg-all.html#aphaser
for details.
Notes:
This effect won't be applied until you call `apply_effect()`!
Args:
in_gain (float): Proportion of input gain. Must be between 0 and 1
out_gain (float): Proportion of output gain. Must be between 0 and 1.
delay (float): Delay of chorus filter in ms. (Time between original signal and delayed)
decay (float): Decay of copied signal. Must be between 0 and 1.
speed (float): Modulation speed of the delayed filter.
type_ (str): modulation type. Either Triangular or Sinusoidal
"triangular" or "t" for Triangular
"sinusoidal" of "s" for sinusoidal
kwargs: Arguments passed to `ffmpeg.filter`
Returns:
self: Initial AudioSignal with updated effect chains
See Also:
* :func:`apply_effects`: Applies effects once they are in the effects chain.
* :func:`make_effect`: Syntactic sugar for adding an effect to the chain by name.
* :func:`reset_effects_chain`: Empties the effects chain without applying any effects.
"""
fx = effects.phaser(in_gain=in_gain, out_gain=out_gain, delay=delay,
decay=decay, speed=speed, type_=type_, **kwargs)
self._effects_chain.append(fx)
return self
def flanger(self, delay=0, depth=2, regen=0, width=71, speed=0.5,
phase=25, shape="sinusoidal", interp="linear", **kwargs):
"""
Add flanger effect to AudioSignal's effect chain
This is a FFmpeg effect. Please see
https://ffmpeg.org/ffmpeg-all.html#flanger
for details.
Notes:
This effect won't be applied until you call `apply_effect()`!
Args:
delay (float): Base delay in ms between original signal and copy.
Must be between 0 and 30.
depth (float): Sweep delay in ms. Must be between 0 and 10.
regen (float): Percentage regeneration, or delayed signal feedback.
Must be between -95 and 95.
width (float): Percentage of delayed signal. Must be between 0 and 100.
speed (float): Sweeps per second. Must be in .1 to 10
shape (str): Swept wave shape, Must be "triangular" or "sinusoidal".
phase (float): swept wave percentage-shift for multi channel. Must be between 0 and 100.
interp (str): Delay Line interpolation. Must be "linear" or "quadratic".
kwargs: Arguments passed to `ffmpeg.filter`
Returns:
self: Initial AudioSignal with updated effect chains
See Also:
* :func:`apply_effects`: Applies effects once they are in the effects chain.
* :func:`make_effect`: Syntactic sugar for adding an effect to the chain by name.
* :func:`reset_effects_chain`: Empties the effects chain without applying any effects.
"""
fx = effects.flanger(delay=delay, depth=depth, regen=regen, width=width,
speed=speed, phase=phase, shape=shape, interp=interp,
**kwargs)
self._effects_chain.append(fx)
return self
def emphasis(self, level_in, level_out, type_="col", mode='production', **kwargs):
"""
Add emphasis effect to AudioSignal's effect chain. An emphasis filter boosts
frequency ranges the most susceptible to noise in a medium. When restoring
sounds from such a medium, a de-emphasis filter is used to de-boost boosted
frequencies.
This is a FFmpeg effect. Please see
https://ffmpeg.org/ffmpeg-all.html#aemphasis
for details.
Notes:
This effect won't be applied until you call `apply_effect()`!
Args:
level_in (float): Input gain
level_out (float): Output gain
type_ (str): physical medium type to convert/deconvert from.
Must be one of the following:
- "col": Columbia
- "emi": EMI
- "bsi": BSI (78RPM)
- "riaa": RIAA
- "cd": CD (Compact Disk)
- "50fm": 50µs FM
- "75fm": 75µs FM
- "50kf": 50µs FM-KF
- "75kf": 75µs FM-KF
mode (str): Filter mode. Must be one of the following:
- "reproduction": Apply de-emphasis filter
- "production": Apply emphasis filter
kwargs: Arguments passed to `ffmpeg.filter`
Returns:
self: Initial AudioSignal with updated effect chains
See Also:
* :func:`apply_effects`: Applies effects once they are in the effects chain.
* :func:`make_effect`: Syntactic sugar for adding an effect to the chain by name.
* :func:`reset_effects_chain`: Empties the effects chain without applying any effects.
"""
self._effects_chain.append(effects.emphasis(level_in, level_out,
type_=type_, mode=mode,
**kwargs))
return self
def compressor(self, level_in, mode="downward", reduction_ratio=2,
attack=20, release=250, makeup=1, knee=2.8284, link="average",
detection="rms", mix=1, threshold=0.125, **kwargs):
"""
Add compressor effect to AudioSignal's effect chain
This is a FFmpeg effect. Please see
https://ffmpeg.org/ffmpeg-all.html#acompressor
for details.
Notes:
This effect won't be applied until you call `apply_effect()`!
Args:
level_in (float): Input Gain
mode (str): Mode of compressor operation. Can either be "upward" or "downward".
threshold (float): Volume threshold. If a signal's volume is above the threshold,
gain reduction would apply.
reduction_ratio (float): Ratio in which the signal is reduced.
attack (float): Time in ms between when the signal rises above threshold and when
reduction is applied
release (float): Time in ms between when the signal fall below threshold and
when reduction is decreased.
makeup (float): Factor of amplification post-processing
knee (float): Softens the transition between reduction and lack of thereof.
Higher values translate to a softer transition.
link (str): Choose average between all channels or mean. String of either
"average" or "mean.
detection (str): Whether to process exact signal of the RMS of nearby signals.
Either "peak" for exact or "rms".
mix (float): Proportion of compressed signal in output.
kwargs: Arguments passed to `ffmpeg.filter`
Returns:
self: Initial AudioSignal with updated effect chains
See Also:
* :func:`apply_effects`: Applies effects once they are in the effects chain.
* :func:`make_effect`: Syntactic sugar for adding an effect to the chain by name.
* :func:`reset_effects_chain`: Empties the effects chain without applying any effects.
"""
fx = effects.compressor(level_in, mode=mode, reduction_ratio=reduction_ratio,
attack=attack, release=release, makeup=makeup, knee=knee, link=link,
detection=detection, mix=mix, threshold=threshold, **kwargs)
self._effects_chain.append(fx)
return self
def equalizer(self, bands, **kwargs):
"""
Add eqaulizer effect to AudioSignal's effect chain
This is a FFmpeg effect. Please see
https://ffmpeg.org/ffmpeg-all.html#anequalizer
for details.
Notes:
This effect won't be applied until you call `apply_effect()`!
Args:
bands: A list of dictionaries, for each band. The required values for each dictionary:
'chn': List of channel numbers to apply filter. Must be list of ints.
'f': central freqency of band
'w': Width of the band in Hz
'g': Band gain in dB
't': Set filter type for band, optional, can be:
0, for Butterworth
1, for Chebyshev type 1
2, for Chebyshev type 2
Returns:
self: Initial AudioSignal with updated effect chains
See Also:
* :func:`apply_effects`: Applies effects once they are in the effects chain.
* :func:`make_effect`: Syntactic sugar for adding an effect to the chain by name.
* :func:`reset_effects_chain`: Empties the effects chain without applying any effects.
"""
self._effects_chain.append(effects.equalizer(bands, **kwargs))
return self
##################################################
# Operator overloading #
##################################################
def __add__(self, other):
return self.add(other)
def __radd__(self, other):
return self.add(other)
def __sub__(self, other):
return self.subtract(other)
def _verify_audio(self, other):
if self.num_channels != other.num_channels:
raise AudioSignalException('Cannot do operation with two signals that have '
'a different number of channels!')
if self.sample_rate != other.sample_rate:
raise AudioSignalException('Cannot do operation with two signals that have '
'different sample rates!')
def _verify_audio_arithmetic(self, other):
self._verify_audio(other)
if self.signal_length != other.signal_length:
raise AudioSignalException('Cannot do arithmetic with signals of different length!')
def __iadd__(self, other):
return self + other
def __isub__(self, other):
return self - other
def __mul__(self, value):
if not isinstance(value, numbers.Real):
raise AudioSignalException('Can only multiply/divide by a scalar!')
return self.make_copy_with_audio_data(np.multiply(self.audio_data, value), verbose=False)
def __div__(self, value):
if not isinstance(value, numbers.Real):
raise AudioSignalException('Can only multiply/divide by a scalar!')
return self.make_copy_with_audio_data(np.divide(self.audio_data, float(value)),
verbose=False)
def __truediv__(self, value):
return self.__div__(value)
def __itruediv__(self, value):
return self.__idiv__(value)
def __imul__(self, value):
return self.apply_gain(value)
def __idiv__(self, value):
return self.apply_gain(1 / float(value))
def __len__(self):
return self.signal_length
def __eq__(self, other):
for k, v in list(self.__dict__.items()):
if isinstance(v, np.ndarray):
if not np.array_equal(v, other.__dict__[k]):
return False
elif v != other.__dict__[k]:
return False
return True
def __ne__(self, other):
return not self == other
class AudioSignalException(Exception):
"""
Exception class for :class:`AudioSignal`.
"""
pass
|
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The Binhost API interacts with Portage binhosts and Packages files."""
from __future__ import print_function
import functools
import os
from chromite.lib import binpkg
from chromite.lib import constants
from chromite.lib import cros_build_lib
from chromite.lib import osutils
from chromite.lib import parallel
from chromite.lib import portage_util
from chromite.utils import key_value_store
# The name of the ACL argument file.
_GOOGLESTORAGE_GSUTIL_FILE = 'googlestorage_acl.txt'
# The name of the package file (relative to sysroot) where the list of packages
# for dev-install is stored.
_DEV_INSTALL_PACKAGES_FILE = 'build/dev-install/package.installable'
class Error(Exception):
"""Base error class for the module."""
class EmptyPrebuiltsRoot(Error):
"""When a prebuilts root is unexpectedly empty."""
class NoAclFileFound(Error):
"""No ACL file could be found."""
def _ValidateBinhostConf(path, key):
"""Validates the binhost conf file defines only one environment variable.
This function is effectively a sanity check that ensures unexpected
configuration is not clobbered by conf overwrites.
Args:
path: Path to the file to validate.
key: Expected binhost key.
Raises:
ValueError: If file defines != 1 environment variable.
"""
if not os.path.exists(path):
# If the conf file does not exist, e.g. with new targets, then whatever.
return
kvs = key_value_store.LoadFile(path)
if not kvs:
raise ValueError(
'Found empty .conf file %s when a non-empty one was expected.' % path)
elif len(kvs) > 1:
raise ValueError(
'Conf file %s must define exactly 1 variable. '
'Instead found: %r' % (path, kvs))
elif key not in kvs:
raise KeyError('Did not find key %s in %s' % (key, path))
def _ValidatePrebuiltsFiles(prebuilts_root, prebuilts_paths):
"""Validate all prebuilt files exist.
Args:
prebuilts_root: Absolute path to root directory containing prebuilts.
prebuilts_paths: List of file paths relative to root, to be verified.
Raises:
LookupError: If any prebuilt archive does not exist.
"""
for prebuilt_path in prebuilts_paths:
full_path = os.path.join(prebuilts_root, prebuilt_path)
if not os.path.exists(full_path):
raise LookupError('Prebuilt archive %s does not exist' % full_path)
def _ValidatePrebuiltsRoot(target, prebuilts_root):
"""Validate the given prebuilts root exists.
If the root does not exist, it probably means the build target did not build
successfully, so warn callers appropriately.
Args:
target: The build target in question.
prebuilts_root: The expected root directory for the target's prebuilts.
Raises:
EmptyPrebuiltsRoot: If prebuilts root does not exist.
"""
if not os.path.exists(prebuilts_root):
raise EmptyPrebuiltsRoot(
'Expected to find prebuilts for build target %s at %s. '
'Did %s build successfully?' % (target, prebuilts_root, target))
def GetPrebuiltsRoot(chroot, sysroot, build_target):
"""Find the root directory with binary prebuilts for the given sysroot.
Args:
chroot (chroot_lib.Chroot): The chroot where the sysroot lives.
sysroot (sysroot_lib.Sysroot): The sysroot.
build_target (build_target_lib.BuildTarget): The build target.
Returns:
Absolute path to the root directory with the target's prebuilt archives.
"""
root = os.path.join(chroot.path, sysroot.path.lstrip(os.sep), 'packages')
_ValidatePrebuiltsRoot(build_target, root)
return root
def GetPrebuiltsFiles(prebuilts_root):
"""Find paths to prebuilts at the given root directory.
Assumes the root contains a Portage package index named Packages.
The package index paths are used to de-duplicate prebuilts uploaded. The
immediate consequence of this is reduced storage usage. The non-obvious
consequence is the shared packages generally end up with public permissions,
while the board-specific packages end up with private permissions. This is
what is meant to happen, but a further consequence of that is that when
something happens that causes the toolchains to be uploaded as a private
board's package, the board will not be able to build properly because
it won't be able to fetch the toolchain packages, because they're expected
to be public.
Args:
prebuilts_root: Absolute path to root directory containing a package index.
Returns:
List of paths to all prebuilt archives, relative to the root.
"""
package_index = binpkg.GrabLocalPackageIndex(prebuilts_root)
prebuilt_paths = []
for package in package_index.packages:
prebuilt_paths.append(package['CPV'] + '.tbz2')
include_debug_symbols = package.get('DEBUG_SYMBOLS')
if cros_build_lib.BooleanShellValue(include_debug_symbols, default=False):
prebuilt_paths.append(package['CPV'] + '.debug.tbz2')
_ValidatePrebuiltsFiles(prebuilts_root, prebuilt_paths)
return prebuilt_paths
def UpdatePackageIndex(prebuilts_root, upload_uri, upload_path, sudo=False):
"""Update package index with information about where it will be uploaded.
This causes the existing Packages file to be overwritten.
Args:
prebuilts_root: Absolute path to root directory containing binary prebuilts.
upload_uri: The URI (typically GS bucket) where prebuilts will be uploaded.
upload_path: The path at the URI for the prebuilts.
sudo (bool): Whether to write the file as the root user.
Returns:
Path to the new Package index.
"""
assert not upload_path.startswith('/')
package_index = binpkg.GrabLocalPackageIndex(prebuilts_root)
package_index.SetUploadLocation(upload_uri, upload_path)
package_index.header['TTL'] = 60 * 60 * 24 * 365
package_index_path = os.path.join(prebuilts_root, 'Packages')
package_index.WriteFile(package_index_path, sudo=sudo)
return package_index_path
def SetBinhost(target, key, uri, private=True):
"""Set binhost configuration for the given build target.
A binhost is effectively a key (Portage env variable) pointing to a URL
that contains binaries. The configuration is set in .conf files at static
directories on a build target by build target (and host by host) basis.
This function updates the .conf file by completely rewriting it.
Args:
target: The build target to set configuration for.
key: The binhost key to set, e.g. POSTSUBMIT_BINHOST.
uri: The new value for the binhost key, e.g. gs://chromeos-prebuilt/foo/bar.
private: Whether or not the build target is private.
Returns:
Path to the updated .conf file.
"""
conf_root = os.path.join(
constants.SOURCE_ROOT,
constants.PRIVATE_BINHOST_CONF_DIR if private else
constants.PUBLIC_BINHOST_CONF_DIR, 'target')
conf_file = '%s-%s.conf' % (target, key)
conf_path = os.path.join(conf_root, conf_file)
_ValidateBinhostConf(conf_path, key)
osutils.WriteFile(conf_path, '%s="%s"' % (key, uri))
return conf_path
def RegenBuildCache(chroot, overlay_type):
"""Regenerate the Build Cache for the given target.
Args:
chroot (chroot_lib): The chroot where the regen command will be run.
overlay_type: one of "private", "public", or "both".
Returns:
list[str]: The overlays with updated caches.
"""
overlays = portage_util.FindOverlays(overlay_type)
task = functools.partial(
portage_util.RegenCache, commit_changes=False, chroot=chroot)
task_inputs = [[o] for o in overlays if os.path.isdir(o)]
results = parallel.RunTasksInProcessPool(task, task_inputs)
# Filter out all of the unchanged-overlay results.
return [overlay_dir for overlay_dir in results if overlay_dir]
def GetPrebuiltAclArgs(build_target):
"""Read and parse the GS ACL file from the private overlays.
Args:
build_target (build_target_lib.BuildTarget): The build target.
Returns:
list[list[str]]: A list containing all of the [arg, value] pairs. E.g.
[['-g', 'group_id:READ'], ['-u', 'user:FULL_CONTROL']]
"""
acl_file = portage_util.FindOverlayFile(_GOOGLESTORAGE_GSUTIL_FILE,
board=build_target.name)
if not acl_file:
raise NoAclFileFound('No ACL file found for %s.' % build_target.name)
lines = osutils.ReadFile(acl_file).splitlines()
# Remove comments.
lines = [line.split('#', 1)[0].strip() for line in lines]
# Remove empty lines.
lines = [line.strip() for line in lines if line.strip()]
return [line.split() for line in lines]
def GetBinhosts(build_target):
"""Get the binhosts for the build target.
Args:
build_target (build_target_lib.BuildTarget): The build target.
Returns:
list[str]: The build target's binhosts.
"""
binhosts = portage_util.PortageqEnvvar('PORTAGE_BINHOST',
board=build_target.name,
allow_undefined=True)
return binhosts.split() if binhosts else []
def ReadDevInstallPackageFile(filename):
"""Parse the dev-install package file.
Args:
filename (str): The full path to the dev-install package list.
Returns:
list[str]: The packages in the package list file.
"""
with open(filename) as f:
return [line.strip() for line in f]
def ReadDevInstallFilesToCreatePackageIndex(chroot, sysroot, package_index_path,
upload_uri, upload_path):
"""Create dev-install Package index specified by package_index_path
The current Packages file is read and a new Packages file is created based
on the subset of packages in the _DEV_INSTALL_PACKAGES_FILE.
Args:
chroot (chroot_lib.Chroot): The chroot where the sysroot lives.
sysroot (sysroot_lib.Sysroot): The sysroot.
package_index_path (str): Path to the Packages file to be created.
upload_uri: The URI (typically GS bucket) where prebuilts will be uploaded.
upload_path: The path at the URI for the prebuilts.
Returns:
list[str]: The list of packages contained in package_index_path,
where each package string is a category/file.
"""
# Read the dev-install binhost package file
devinstall_binhost_filename = chroot.full_path(sysroot.path,
_DEV_INSTALL_PACKAGES_FILE)
devinstall_package_list = ReadDevInstallPackageFile(
devinstall_binhost_filename)
# Read the Packages file, remove packages not in package_list
package_path = chroot.full_path(sysroot.path, 'packages')
CreateFilteredPackageIndex(package_path, devinstall_package_list,
package_index_path,
upload_uri, upload_path)
# We have the list of packages, create full path and verify each one.
upload_targets_list = GetPrebuiltsForPackages(
package_path, devinstall_package_list)
return upload_targets_list
def CreateFilteredPackageIndex(package_path, devinstall_package_list,
package_index_path,
upload_uri, upload_path, sudo=False):
"""Create Package file for dev-install process.
The created package file (package_index_path) contains only the
packages from the system packages file (in package_path) that are in the
devinstall_package_list. The new package file will use the provided values
for upload_uri and upload_path.
Args:
package_path (str): Absolute path to the standard Packages file.
devinstall_package_list (list[str]): Packages from packages.installable
package_index_path (str): Absolute path for new Packages file.
upload_uri (str): The URI where prebuilts will be uploaded.
upload_path (str): The path at the URI for the prebuilts.
sudo (bool): Whether to write the file as the root user.
"""
def ShouldFilterPackage(package):
"""Local func to filter packages not in the devinstall_package_list
Args:
package (dict): Dictionary with key 'CPV' and package name as value
Returns:
True (filter) if not in the devinstall_package_list, else False (don't
filter) if in the devinstall_package_list
"""
value = package['CPV']
if value in devinstall_package_list:
return False
else:
return True
package_index = binpkg.GrabLocalPackageIndex(package_path)
package_index.RemoveFilteredPackages(ShouldFilterPackage)
package_index.SetUploadLocation(upload_uri, upload_path)
package_index.header['TTL'] = 60 * 60 * 24 * 365
package_index.WriteFile(package_index_path, sudo=sudo)
def GetPrebuiltsForPackages(package_root, package_list):
"""Create list of file paths for the package list and validate they exist.
Args:
package_root (str): Path to 'packages' directory.
package_list (list[str]): List of packages.
Returns:
List of validated targets.
"""
upload_targets_list = []
for pkg in package_list:
zip_target = pkg + '.tbz2'
upload_targets_list.append(zip_target)
full_pkg_path = os.path.join(package_root, pkg) + '.tbz2'
if not os.path.exists(full_pkg_path):
raise LookupError('DevInstall archive %s does not exist' % full_pkg_path)
return upload_targets_list
|
import time
PROGRESS_BAR_SIZE = 15
class Task:
def __init__(self, task_name):
self.name = task_name
self.start_time = time.time()
self.is_progressive_task = False
self.is_done = False
def get_execution_time(self):
return round(time.time() - self.start_time, 2)
current_task = Task('')
current_subtask = Task('')
def start_task(task_name):
global current_task
if not current_task.is_done and current_task.name != '':
end_current_task()
current_task = Task(task_name)
print(task_name)
def end_current_task():
global current_task, current_subtask
current_task.is_done = True
if not current_subtask.is_done and current_subtask.name != '':
end_current_subtask()
print(f'└► Done in {current_task.get_execution_time()}s\n')
def start_subtask(task_name):
global current_subtask
if not current_subtask.is_done and current_subtask.name != '':
end_current_subtask()
current_subtask = Task(task_name)
print(f'├► {current_subtask.name}', end='')
def set_subtask_progression(index, length):
global current_subtask
current_subtask.is_progressive_task = True
progression = int(round(index * 100 / (length - 1)))
if progression % 1 == 0:
int_progression = int(round(progression * PROGRESS_BAR_SIZE / 100))
bar = '█' * int_progression + '░' * (PROGRESS_BAR_SIZE - int_progression)
time_remaining = round((length - index) * (time.time() - current_subtask.start_time) / (index + 1), 1)
print(f'\r├► {current_subtask.name} {bar} {time_remaining}s ', end='')
if index == length - 1:
end_current_subtask()
def end_current_subtask():
global current_subtask
current_subtask.is_done = True
bar = '█' * PROGRESS_BAR_SIZE + ' done in ' if current_subtask.is_progressive_task else ''
execution_time = str(current_subtask.get_execution_time()) + 's'
print(f'\r├► {current_subtask.name} {bar}{execution_time}')
|
from dateutil.parser import parse
import os
import extract_msg
import hashlib
"""Removes the received datetime of an .msg email file that has previously been added as a prefix to the file name by another email_renamer script
Creates log file that lists original filepath, new filename, and file fixity. (| delimited)
"""
### set this to the name of your log file.
my_log_file = "log.txt"
### set to folder to process.
folder = r"folder"
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
log_data =[]
for f in [x for x in os.listdir(folder) if x.endswith(".msg")]:
msg_filepath = os.path.join(folder, f)
new_filename=f.split("#",1)[1]
new_filepath = os.path.join(folder, new_filename)
msg_file_md5 = md5(msg_filepath)
#If the 'cleaned' filename already exists in the folder, the filename will not be changed back
if not os.path.exists(new_filepath):
os.rename(msg_filepath,new_filepath)
log_data.append(f"{msg_filepath}|{new_filename}|{msg_file_md5}")
else:
print("Filename exists"+"|"+msg_filepath+"|"+msg_file_md5)
with open(my_log_file, "w", encoding = "utf8") as data:
data.write("\n".join(log_data))
|
import scrapy
class BlogSpider(scrapy.Spider):
name = 'currencyspider'
start_urls = ['https://www.bankexamstoday.com/2019/06/countries-capital-currency-and-languages.html']
def parse(self, response):
for link in response.css('tr td:nth-child(3)'):
yield {'currency': link.css('td::text').extract_first()} |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
import subprocess
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
EXPNAME = common_setup.get_experiment_name()
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue744-v1"]
SEARCHES = [
("bjolp-silent", [
"--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)",
"--search", "astar(lmc,lazy_evaluator=lmc, verbosity=silent)"]),
("blind-silent", ["--search", "astar(blind(), verbosity=silent)"]),
("cegar-silent", ["--search", "astar(cegar(), verbosity=silent)"]),
# ("divpot", ["--search", "astar(diverse_potentials(), verbosity=silent)"]),
("ipdb-silent", ["--search", "astar(ipdb(), verbosity=silent)"]),
("lmcut-silent", ["--search", "astar(lmcut(), verbosity=silent)"]),
("mas-silent", [
"--search", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),"
" merge_strategy=merge_sccs(order_of_sccs=topological,"
" merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order])),"
" label_reduction=exact(before_shrinking=true, before_merging=false),"
" max_states=50000, threshold_before_merge=1, verbosity=normal), verbosity=silent)"]),
# ("seq+lmcut", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]), verbosity=silent)"]),
("h2-silent", ["--search", "astar(hm(m=2), verbosity=silent)"]),
("hmax-silent", ["--search", "astar(hmax(), verbosity=silent)"]),
("bjolp-normal", [
"--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)",
"--search", "astar(lmc,lazy_evaluator=lmc, verbosity=normal)"]),
("blind-normal", ["--search", "astar(blind(), verbosity=normal)"]),
("cegar-normal", ["--search", "astar(cegar(), verbosity=normal)"]),
# ("divpot", ["--search", "astar(diverse_potentials(), verbosity=normal)"]),
("ipdb-normal", ["--search", "astar(ipdb(), verbosity=normal)"]),
("lmcut-normal", ["--search", "astar(lmcut(), verbosity=normal)"]),
("mas-normal", [
"--search", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),"
" merge_strategy=merge_sccs(order_of_sccs=topological,"
" merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order])),"
" label_reduction=exact(before_shrinking=true, before_merging=false),"
" max_states=50000, threshold_before_merge=1, verbosity=normal), verbosity=normal)"]),
# ("seq+lmcut", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]), verbosity=normal)"]),
("h2-normal", ["--search", "astar(hm(m=2), verbosity=normal)"]),
("hmax-normal", ["--search", "astar(hmax(), verbosity=normal)"]),
]
CONFIGS = [
IssueConfig(search_nick, search,
driver_options=["--overall-time-limit", "30m"])
for rev in REVISIONS
for search_nick, search in SEARCHES
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser('custom-parser.py')
exp.add_step("build", exp.build)
exp.add_step("start", exp.start_runs)
exp.add_fetcher(name="fetch")
exp.add_parse_again_step()
log_size = Attribute('log_size')
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [log_size]
exp.add_absolute_report_step(attributes=attributes)
#exp.add_comparison_table_step()
sort_spec = [('log_size', 'desc')]
attributes = ['run_dir', 'log_size']
exp.add_sorted_report_step(attributes=attributes, sort_spec=sort_spec,filter_algorithm=[
"{}-bjolp-silent".format(REVISIONS[0]),
"{}-blind-silent".format(REVISIONS[0]),
"{}-cegar-silent".format(REVISIONS[0]),
"{}-ipdb-silent".format(REVISIONS[0]),
"{}-lmcut-silent".format(REVISIONS[0]),
"{}-mas-silent".format(REVISIONS[0]),
"{}-h2-silent".format(REVISIONS[0]),
"{}-hmax-silent".format(REVISIONS[0]),
],name="silent")
exp.add_sorted_report_step(attributes=attributes, sort_spec=sort_spec,filter_algorithm=[
"{}-bjolp-normal".format(REVISIONS[0]),
"{}-blind-normal".format(REVISIONS[0]),
"{}-cegar-normal".format(REVISIONS[0]),
"{}-ipdb-normal".format(REVISIONS[0]),
"{}-lmcut-normal".format(REVISIONS[0]),
"{}-mas-normal".format(REVISIONS[0]),
"{}-h2-normal".format(REVISIONS[0]),
"{}-hmax-normal".format(REVISIONS[0]),
],name="normal")
exp.run_steps()
|
from .base import HTTP
from .airos import AirOS
__all__ = [
'AirOS'
]
|
from JumpScale import j
from Network import Network
from Interface import Interface
from Disk import Disk
from Pool import Pool
from StorageController import StorageController
from KVMController import KVMController
from Machine import Machine
from CloudMachine import CloudMachine
from MachineSnapshot import MachineSnapshot
class KVM:
def __init__(self):
self.__jslocation__ = "j.sal.kvm"
self.KVMController = KVMController
self.Machine = Machine
self.MachineSnapshot = MachineSnapshot
self.Network = Network
self.Interface = Interface
self.Disk = Disk
self.Pool = Pool
self.StorageController = StorageController
self.CloudMachine = CloudMachine
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-08 19:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('push_notifications', '0006_webpushdevice'),
]
operations = [
migrations.AddField(
model_name='gcmdevice',
name='device_uuid',
field=models.UUIDField(blank=True, db_index=True, help_text='ANDROID_ID / TelephonyManager.getDeviceId()', null=True, verbose_name='Device ID'),
)
] |
import pyVmomi
from osbot_utils.utils.Misc import wait
from k8_vmware.vsphere.VM_Keystroke import VM_Keystroke
class VM:
def __init__(self, vm):
self.vm = vm
def config(self):
return self.summary().config
def controller_scsi(self):
controllers = self.devices_SCSI_Controllers()
if len(controllers) > 0:
return controllers[0] # default to returning the first one
def controller_ide(self):
controllers = self.devices_IDE_Controllers()
if len(controllers) > 0:
return controllers[0] # default to returning the first one
def controller_ide_free_slot(self):
controllers = self.devices_IDE_Controllers()
for controller in controllers:
if len(controller.device) < 2:
return controller
def devices(self):
return self.vm.config.hardware.device
def devices_IDE_Controllers (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualIDEController )
def devices_Cdroms (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualCdrom )
def devices_Disks (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualDisk )
def devices_AHCI_Controllers (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualAHCIController )
def devices_PCNet_32s (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualPCNet32 )
def devices_Vmxnet_2s (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualVmxnet2 )
def devices_Vmxnet_3s (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualVmxnet3 )
def devices_E1000s (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualE1000 )
def devices_E1000es (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualE1000e )
def devices_SCSI_Controllers (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualSCSIController )
def devices_Sriov_EthernetCards (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualSriovEthernetCard )
def devices_of_type(self, type):
devices = []
for device in self.devices():
if isinstance(device, type):
devices.append(device)
return devices
def devices_indexed_by_label(self):
devices = {}
for device in self.devices():
key = device.deviceInfo.label
value = device
devices[key] = value
return devices
def guest(self):
return self.summary().guest
def id(self):
return f"vim.{self.vm._wsdlName}:{self.vm._moId}"
#note: we can also get this value from these methods (but they will do an extra request to the server)
# str(self.resource_config().entity)
# self.summary().vm
def info(self):
summary = self.summary() # need to do this since each reference to self.vm.summary.config is call REST call to the server
#print(summary)
config = summary.config # these values are retrieved on the initial call to self.vm.summary
guest = summary.guest # using self.vm.summary.guest here would had resulted in two more REST calls
runtime = summary.runtime
info = {
"Boot_Time" : str(runtime.bootTime) ,
"Connection_State" : runtime.connectionState,
"Guest_Id" : config.guestId ,
"Guest_Full_Name" : config.guestFullName ,
"Host" : runtime.host ,
"Host_Name" : guest.hostName ,
"IP" : guest.ipAddress ,
"Memory_Size_MB" : config.memorySizeMB ,
"MOID" : self.vm._moId ,
"Name" : config.name ,
"Max_Cpu_Usage" : runtime.maxCpuUsage ,
"Max_Memory_Usage" : runtime.maxMemoryUsage ,
"Notes" : config.annotation ,
"Num_Cpu" : config.numCpu ,
"Path_Name" : config.vmPathName ,
"State_State" : runtime.powerState ,
"Question" : None ,
"UUID" : config.uuid
}
# if guest != None: info['IP'] = guest.ipAddress
if runtime.question != None: info['Question'] = runtime.question.text,
return info
def hardware(self):
return self.vm.config.hardware
def host_name(self):
return self.guest().hostName
def ip(self):
return self.guest().ipAddress
def name(self):
return self.config().name
def moid(self):
return str(self.vm._moId)
def powered_state(self):
return self.runtime().powerState
def power_on(self):
return self.task().power_on()
def power_off(self):
return self.task().power_off()
def powered_on(self):
return self.powered_state() == 'poweredOn'
def powered_off(self):
return self.powered_state() == 'poweredOff'
def resource_config(self):
return self.vm.resourceConfig
def screenshot(self, target_file=None):
from k8_vmware.vsphere.VM_Screenshot import VM_Screenshot
return VM_Screenshot(self, target_file=target_file).download()
def send_text(self, text):
VM_Keystroke(self).send_text(text)
return self
def send_key(self, text):
result = VM_Keystroke(self).send_key(text)
return self
def send_enter(self):
VM_Keystroke(self).enter()
return self
def summary(self):
return self.vm.summary # will make REST call to RetrievePropertiesEx
def task(self):
from k8_vmware.vsphere.VM_Task import VM_Task # have to do this import here due to circular dependencies (i.e. VM_Task imports VM)
return VM_Task(self)
def runtime(self):
return self.vm.summary.runtime
def uuid(self):
return self.config().uuid
def wait(self, seconds): # to help with fluent code
wait(seconds)
return self
def __str__(self):
return f'[VM] {self.name()}'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.