content
stringlengths 5
1.05M
|
---|
# Implemente um programa que possa receber do usuário a temperatura em graus Celsius ou Fahrenheit. Antes de receber a temperatura, pergunte ao usuário se ele deseja inserir em Celsius ou em Fahrenheit. Se a entrada for em graus Celsius, o programa deverá retornar a temperatura em Fahrenheit. Se a entrada for em Fahrenheit, o programa deverá retornar a temperatura em Celsius. F = C x 1,8 + 32
opcao = int(input('\nOLÁ!\n Para Inserir em Celsius, digite [1] para FarenHeit [2]: '))
temp = int(input('\nDigite a Temperatura: '))
def celsius_farenheit(temperatura):
print (f'\nTemperatura Convertida em FarenHeit: {(temperatura*1.8) + 32}\n')
def farenheit_celsius(temperatura):
print (f'\nTemperatura Convertida em Celsius: {(temperatura-32) / 1.8}\n')
if opcao == 1:
celsius_farenheit(temp)
elif opcao == 2:
farenheit_celsius(temp) |
#!/usr/bin/python
import sys, os, re, string
dotExtRe = re.compile("\.[a-zA-Z]+")
includeRe = re.compile("^#[ \t]*include[ \t]*")
for file in sys.argv[1:]:
inf = open(file)
myIncludes = [ ]
for x in inf.readlines():
m = includeRe.search(x)
if m != None:
x = x[m.regs[0][1]:-1]
if x[0] == '"':
x = x[1:-1]
if x not in myIncludes:
myIncludes.append(x)
inf.close()
m = dotExtRe.search(file)
assert m != None
dotOFile = file[:m.regs[0][0]] + ".o"
sys.stdout.write(dotOFile + ": " + file)
for x in myIncludes:
sys.stdout.write(" " + x)
sys.stdout.write("\n")
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: __init__.py
import dsz
import dsz.cmd
import dsz.data
import dsz.lp
class Plugins(dsz.data.Task):
def __init__(self, cmd=None):
dsz.data.Task.__init__(self, cmd)
def _LoadData(self):
try:
self.Local = Plugins.Local(dsz.cmd.data.Get('Local', dsz.TYPE_OBJECT)[0])
except:
self.Local = None
try:
self.Remote = Plugins.Remote(dsz.cmd.data.Get('Remote', dsz.TYPE_OBJECT)[0])
except:
self.Remote = None
return
class Local(dsz.data.DataBean):
def __init__(self, obj):
try:
self.address = dsz.cmd.data.ObjectGet(obj, 'address', dsz.TYPE_STRING)[0]
except:
self.address = None
self.Plugin = list()
try:
for x in dsz.cmd.data.ObjectGet(obj, 'Plugin', dsz.TYPE_OBJECT):
self.Plugin.append(Plugins.Local.Plugin(x))
except:
pass
return
class Plugin(dsz.data.DataBean):
def __init__(self, obj):
try:
self.core = dsz.cmd.data.ObjectGet(obj, 'core', dsz.TYPE_BOOL)[0]
except:
self.core = None
try:
self.ReallyLoaded = dsz.cmd.data.ObjectGet(obj, 'ReallyLoaded', dsz.TYPE_BOOL)[0]
except:
self.ReallyLoaded = None
try:
self.loadCount = dsz.cmd.data.ObjectGet(obj, 'loadCount', dsz.TYPE_INT)[0]
except:
self.loadCount = None
try:
self.id = dsz.cmd.data.ObjectGet(obj, 'id', dsz.TYPE_INT)[0]
except:
self.id = None
try:
self.name = dsz.cmd.data.ObjectGet(obj, 'name', dsz.TYPE_STRING)[0]
except:
self.name = None
try:
self.loaderInfo = dsz.cmd.data.ObjectGet(obj, 'loaderInfo', dsz.TYPE_STRING)[0]
except:
self.loaderInfo = None
self.RegisteredApis = list()
try:
for x in dsz.cmd.data.ObjectGet(obj, 'RegisteredApis', dsz.TYPE_OBJECT):
self.RegisteredApis.append(Plugins.Local.Plugin.RegisteredApis(x))
except:
pass
self.AcquiredApis = list()
try:
for x in dsz.cmd.data.ObjectGet(obj, 'AcquiredApis', dsz.TYPE_OBJECT):
self.AcquiredApis.append(Plugins.Local.Plugin.AcquiredApis(x))
except:
pass
try:
self.Version = Plugins.Local.Plugin.Version(dsz.cmd.data.ObjectGet(obj, 'Version', dsz.TYPE_OBJECT)[0])
except:
self.Version = None
return
class RegisteredApis(dsz.data.DataBean):
def __init__(self, obj):
try:
self.interface = dsz.cmd.data.ObjectGet(obj, 'interface', dsz.TYPE_INT)[0]
except:
self.interface = None
try:
self.provider = dsz.cmd.data.ObjectGet(obj, 'provider', dsz.TYPE_INT)[0]
except:
self.provider = None
return
class AcquiredApis(dsz.data.DataBean):
def __init__(self, obj):
try:
self.interface = dsz.cmd.data.ObjectGet(obj, 'interface', dsz.TYPE_INT)[0]
except:
self.interface = None
try:
self.provider = dsz.cmd.data.ObjectGet(obj, 'provider', dsz.TYPE_INT)[0]
except:
self.provider = None
try:
self.providedBy = dsz.cmd.data.ObjectGet(obj, 'providedBy', dsz.TYPE_STRING)[0]
except:
self.providedBy = None
return
class Version(dsz.data.DataBean):
def __init__(self, obj):
try:
self.Lla = Plugins.Local.Plugin.Version.Lla(dsz.cmd.data.ObjectGet(obj, 'Lla', dsz.TYPE_OBJECT)[0])
except:
self.Lla = None
try:
self.Module = Plugins.Local.Plugin.Version.Module(dsz.cmd.data.ObjectGet(obj, 'Module', dsz.TYPE_OBJECT)[0])
except:
self.Module = None
try:
self.BuildEnvironment = Plugins.Local.Plugin.Version.BuildEnvironment(dsz.cmd.data.ObjectGet(obj, 'BuildEnvironment', dsz.TYPE_OBJECT)[0])
except:
self.BuildEnvironment = None
return
class Lla(dsz.data.DataBean):
def __init__(self, obj):
try:
self.Major = dsz.cmd.data.ObjectGet(obj, 'Major', dsz.TYPE_INT)[0]
except:
self.Major = None
try:
self.Minor = dsz.cmd.data.ObjectGet(obj, 'Minor', dsz.TYPE_INT)[0]
except:
self.Minor = None
try:
self.Revision = dsz.cmd.data.ObjectGet(obj, 'Revision', dsz.TYPE_INT)[0]
except:
self.Revision = None
return
class Module(dsz.data.DataBean):
def __init__(self, obj):
try:
self.Major = dsz.cmd.data.ObjectGet(obj, 'Major', dsz.TYPE_INT)[0]
except:
self.Major = None
try:
self.Minor = dsz.cmd.data.ObjectGet(obj, 'Minor', dsz.TYPE_INT)[0]
except:
self.Minor = None
try:
self.Revision = dsz.cmd.data.ObjectGet(obj, 'Revision', dsz.TYPE_INT)[0]
except:
self.Revision = None
try:
self.Flags = Plugins.Local.Plugin.Version.Module.Flags(dsz.cmd.data.ObjectGet(obj, 'Flags', dsz.TYPE_OBJECT)[0])
except:
self.Flags = None
return
class Flags(dsz.data.DataBean):
def __init__(self, obj):
try:
self.Value = dsz.cmd.data.ObjectGet(obj, 'Value', dsz.TYPE_INT)[0]
except:
self.Value = None
try:
self.Target = dsz.cmd.data.ObjectGet(obj, 'Target', dsz.TYPE_BOOL)[0]
except:
self.Target = None
try:
self.Lp = dsz.cmd.data.ObjectGet(obj, 'Lp', dsz.TYPE_BOOL)[0]
except:
self.Lp = None
return
class BuildEnvironment(dsz.data.DataBean):
def __init__(self, obj):
try:
self.Major = dsz.cmd.data.ObjectGet(obj, 'Major', dsz.TYPE_INT)[0]
except:
self.Major = None
try:
self.Minor = dsz.cmd.data.ObjectGet(obj, 'Minor', dsz.TYPE_INT)[0]
except:
self.Minor = None
try:
self.Revision = dsz.cmd.data.ObjectGet(obj, 'Revision', dsz.TYPE_INT)[0]
except:
self.Revision = None
try:
self.TypeValue = dsz.cmd.data.ObjectGet(obj, 'TypeValue', dsz.TYPE_INT)[0]
except:
self.TypeValue = None
try:
self.Type = dsz.cmd.data.ObjectGet(obj, 'Type', dsz.TYPE_STRING)[0]
except:
self.Type = None
return
class Remote(dsz.data.DataBean):
def __init__(self, obj):
try:
self.address = dsz.cmd.data.ObjectGet(obj, 'address', dsz.TYPE_STRING)[0]
except:
self.address = None
self.Plugin = list()
try:
for x in dsz.cmd.data.ObjectGet(obj, 'Plugin', dsz.TYPE_OBJECT):
self.Plugin.append(Plugins.Remote.Plugin(x))
except:
pass
return
class Plugin(dsz.data.DataBean):
def __init__(self, obj):
try:
self.core = dsz.cmd.data.ObjectGet(obj, 'core', dsz.TYPE_BOOL)[0]
except:
self.core = None
try:
self.ReallyLoaded = dsz.cmd.data.ObjectGet(obj, 'ReallyLoaded', dsz.TYPE_BOOL)[0]
except:
self.ReallyLoaded = None
try:
self.loadCount = dsz.cmd.data.ObjectGet(obj, 'loadCount', dsz.TYPE_INT)[0]
except:
self.loadCount = None
try:
self.id = dsz.cmd.data.ObjectGet(obj, 'id', dsz.TYPE_INT)[0]
except:
self.id = None
try:
self.name = dsz.cmd.data.ObjectGet(obj, 'name', dsz.TYPE_STRING)[0]
except:
self.name = None
try:
self.loaderInfo = dsz.cmd.data.ObjectGet(obj, 'loaderInfo', dsz.TYPE_STRING)[0]
except:
self.loaderInfo = None
self.RegisteredApis = list()
try:
for x in dsz.cmd.data.ObjectGet(obj, 'RegisteredApis', dsz.TYPE_OBJECT):
self.RegisteredApis.append(Plugins.Remote.Plugin.RegisteredApis(x))
except:
pass
self.AcquiredApis = list()
try:
for x in dsz.cmd.data.ObjectGet(obj, 'AcquiredApis', dsz.TYPE_OBJECT):
self.AcquiredApis.append(Plugins.Remote.Plugin.AcquiredApis(x))
except:
pass
try:
self.Version = Plugins.Remote.Plugin.Version(dsz.cmd.data.ObjectGet(obj, 'Version', dsz.TYPE_OBJECT)[0])
except:
self.Version = None
return
class RegisteredApis(dsz.data.DataBean):
def __init__(self, obj):
try:
self.interface = dsz.cmd.data.ObjectGet(obj, 'interface', dsz.TYPE_INT)[0]
except:
self.interface = None
try:
self.provider = dsz.cmd.data.ObjectGet(obj, 'provider', dsz.TYPE_INT)[0]
except:
self.provider = None
return
class AcquiredApis(dsz.data.DataBean):
def __init__(self, obj):
try:
self.interface = dsz.cmd.data.ObjectGet(obj, 'interface', dsz.TYPE_INT)[0]
except:
self.interface = None
try:
self.provider = dsz.cmd.data.ObjectGet(obj, 'provider', dsz.TYPE_INT)[0]
except:
self.provider = None
try:
self.providedBy = dsz.cmd.data.ObjectGet(obj, 'providedBy', dsz.TYPE_STRING)[0]
except:
self.providedBy = None
return
class Version(dsz.data.DataBean):
def __init__(self, obj):
try:
self.Lla = Plugins.Remote.Plugin.Version.Lla(dsz.cmd.data.ObjectGet(obj, 'Lla', dsz.TYPE_OBJECT)[0])
except:
self.Lla = None
try:
self.Module = Plugins.Remote.Plugin.Version.Module(dsz.cmd.data.ObjectGet(obj, 'Module', dsz.TYPE_OBJECT)[0])
except:
self.Module = None
try:
self.BuildEnvironment = Plugins.Remote.Plugin.Version.BuildEnvironment(dsz.cmd.data.ObjectGet(obj, 'BuildEnvironment', dsz.TYPE_OBJECT)[0])
except:
self.BuildEnvironment = None
return
class Lla(dsz.data.DataBean):
def __init__(self, obj):
try:
self.Major = dsz.cmd.data.ObjectGet(obj, 'Major', dsz.TYPE_INT)[0]
except:
self.Major = None
try:
self.Minor = dsz.cmd.data.ObjectGet(obj, 'Minor', dsz.TYPE_INT)[0]
except:
self.Minor = None
try:
self.Revision = dsz.cmd.data.ObjectGet(obj, 'Revision', dsz.TYPE_INT)[0]
except:
self.Revision = None
return
class Module(dsz.data.DataBean):
def __init__(self, obj):
try:
self.Major = dsz.cmd.data.ObjectGet(obj, 'Major', dsz.TYPE_INT)[0]
except:
self.Major = None
try:
self.Minor = dsz.cmd.data.ObjectGet(obj, 'Minor', dsz.TYPE_INT)[0]
except:
self.Minor = None
try:
self.Revision = dsz.cmd.data.ObjectGet(obj, 'Revision', dsz.TYPE_INT)[0]
except:
self.Revision = None
try:
self.Flags = Plugins.Remote.Plugin.Version.Module.Flags(dsz.cmd.data.ObjectGet(obj, 'Flags', dsz.TYPE_OBJECT)[0])
except:
self.Flags = None
return
class Flags(dsz.data.DataBean):
def __init__(self, obj):
try:
self.Value = dsz.cmd.data.ObjectGet(obj, 'Value', dsz.TYPE_INT)[0]
except:
self.Value = None
try:
self.Target = dsz.cmd.data.ObjectGet(obj, 'Target', dsz.TYPE_BOOL)[0]
except:
self.Target = None
try:
self.Lp = dsz.cmd.data.ObjectGet(obj, 'Lp', dsz.TYPE_BOOL)[0]
except:
self.Lp = None
return
class BuildEnvironment(dsz.data.DataBean):
def __init__(self, obj):
try:
self.Major = dsz.cmd.data.ObjectGet(obj, 'Major', dsz.TYPE_INT)[0]
except:
self.Major = None
try:
self.Minor = dsz.cmd.data.ObjectGet(obj, 'Minor', dsz.TYPE_INT)[0]
except:
self.Minor = None
try:
self.Revision = dsz.cmd.data.ObjectGet(obj, 'Revision', dsz.TYPE_INT)[0]
except:
self.Revision = None
try:
self.TypeValue = dsz.cmd.data.ObjectGet(obj, 'TypeValue', dsz.TYPE_INT)[0]
except:
self.TypeValue = None
try:
self.Type = dsz.cmd.data.ObjectGet(obj, 'Type', dsz.TYPE_STRING)[0]
except:
self.Type = None
return
dsz.data.RegisterCommand('Plugins', Plugins)
PLUGINS = Plugins
plugins = Plugins |
#!/usr/bin/python3
# Copyright (c) 2016-2021 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import sys
import os
import json
import datetime
import uuid as uuid_gen
import yaml
import collections
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column
from sqlalchemy import Boolean
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import DateTime
from sqlalchemy import exc
import fm_log as log
LOG = log.get_logger(__name__)
Base = declarative_base()
class EventSuppression(Base):
__tablename__ = 'event_suppression'
created_at = Column('created_at', DateTime)
id = Column('id', Integer, primary_key=True, nullable=False)
uuid = Column('uuid', String(36), unique=True)
alarm_id = Column('alarm_id', String(255), unique=True)
description = Column('description', String(255))
suppression_status = Column('suppression_status', String(255))
set_for_deletion = Column('set_for_deletion', Boolean)
mgmt_affecting = Column('mgmt_affecting', String(255))
degrade_affecting = Column('degrade_affecting', String(255))
class ialarm(Base):
__tablename__ = 'alarm'
id = Column(Integer, primary_key=True, nullable=False)
alarm_id = Column('alarm_id', String(255), index=True)
class event_log(Base):
__tablename__ = 'event_log'
id = Column(Integer, primary_key=True, nullable=False)
event_log_id = Column('event_log_id', String(255), index=True)
state = Column(String(255))
def prettyDict(dict):
output = json.dumps(dict, sort_keys=True, indent=4)
return output
def get_events_yaml_filename():
events_yaml_name = os.environ.get("EVENTS_YAML")
if events_yaml_name is not None and os.path.isfile(events_yaml_name):
return events_yaml_name
return "/etc/fm/events.yaml"
#
# Main
#
if len(sys.argv) < 2:
msg = 'Postgres credentials required as argument.'
LOG.error(msg)
sys.exit(msg)
postgresql_credentials = str(sys.argv[1])
# Set up logging:
current_file_name = __file__
current_file_name = current_file_name[2:] # remove leading characters "./"
# Set up sqlalchemy:
try:
meta = sqlalchemy.MetaData()
engine = sqlalchemy.create_engine(postgresql_credentials)
meta.bind = engine
except exc.SQLAlchemyError as exp:
LOG.error(exp)
sys.exit(exp)
Session = sessionmaker(bind=engine)
session = Session()
# Convert events.yaml to dict:
LOG.info("Converting events.yaml to dict: ")
EVENT_TYPES_FILE = get_events_yaml_filename()
if not os.path.isfile(EVENT_TYPES_FILE):
LOG.error("file %s doesn't exist. Finishing" % (EVENT_TYPES_FILE))
exit(-1)
with open(EVENT_TYPES_FILE, 'r') as stream:
event_types = yaml.load(stream)
for alarm_id in list(event_types.keys()):
if isinstance(alarm_id, float):
# force 3 digits after the decimal point,
# to include trailing zero's (ex.: 200.010)
formatted_alarm_id = "{:.3f}".format(alarm_id)
event_types[formatted_alarm_id] = event_types.pop(alarm_id)
event_types = collections.OrderedDict(sorted(event_types.items()))
yaml_event_list = []
uneditable_descriptions = {'100.114', '200.007', '200.02', '200.021', '200.022', '800.002'}
# Parse events.yaml dict, and add any new alarm to event_suppression table:
LOG.info("Parsing events.yaml and adding any new alarm to event_suppression table: ")
for event_type in event_types:
if event_types.get(event_type).get('Type') == "Alarm":
event_created_at = datetime.datetime.now()
event_uuid = str(uuid_gen.uuid4())
string_event_type = str(event_type)
yaml_event_list.append(string_event_type)
if str(event_type) not in uneditable_descriptions:
event_description = (event_types.get(event_type)
.get('Description'))
else:
event_description = event_types.get(event_type).get('Description')
event_description = str(event_description)
event_description = (event_description[:250] + ' ...') \
if len(event_description) > 250 else event_description
try:
event_supp = session.query(EventSuppression) \
.filter_by(alarm_id=string_event_type).first()
except exc.SQLAlchemyError as exp:
LOG.error(exp)
event_mgmt_affecting = str(event_types.get(event_type).get(
'Management_Affecting_Severity', 'warning'))
event_degrade_affecting = str(event_types.get(event_type).get(
'Degrade_Affecting_Severity', 'none'))
if event_supp:
event_supp.description = event_description
event_supp.mgmt_affecting = event_mgmt_affecting
event_supp.degrade_affecting = event_degrade_affecting
else:
event_supp = EventSuppression(created_at=event_created_at,
uuid=event_uuid,
alarm_id=string_event_type,
description=event_description,
suppression_status='unsuppressed',
set_for_deletion=False,
mgmt_affecting=event_mgmt_affecting,
degrade_affecting=event_degrade_affecting)
session.add(event_supp)
LOG.info("Created Event Type: %s in event_suppression table." % (string_event_type))
try:
session.commit()
except exc.SQLAlchemyError as exp:
LOG.error(exp)
event_supp = session.query(EventSuppression)
alarms = session.query(ialarm)
events = session.query(event_log).filter(event_log.state != 'log')
alarm_ids_in_use = set()
for alarm in alarms:
alarm_ids_in_use.add(alarm.alarm_id)
for event in events:
alarm_ids_in_use.add(event.event_log_id)
for event_type in event_supp:
if event_type.alarm_id not in yaml_event_list:
if event_type.alarm_id not in alarm_ids_in_use:
event_supp = session.query(EventSuppression) \
.filter_by(alarm_id=event_type.alarm_id).first()
session.delete(event_supp)
LOG.info("Deleted Event Type: %s from event_suppression table." % (event_type.alarm_id))
else:
event_supp.suppression_status = 'unsuppressed'
event_supp.set_for_deletion = True
LOG.info("Event Type: %s no longer in events.yaml, but still used by alarm in database." % (event_type.alarm_id))
LOG.info("Event Type: %s marked as set for deletion in event_suppression table." % (event_type.alarm_id))
try:
session.commit()
except exc.SQLAlchemyError as exp:
LOG.error(exp)
session.close()
LOG.debug("Normally exiting from: %s" % (__file__))
|
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
class CustomTokenSerializer(TokenObtainPairSerializer):
@classmethod
def get_token(cls, user):
token = super().get_token(user)
token['role'] = user.role.name
return token |
from enums import *
from board import Board
from piece import Piece
def clone_board_state(board):
return [list(x) for x in board.state]
class Move(object):
def __init__(self, board, start_coord, end_coord, attacked = None, attacked_count = 0):
self.white_plays = board.white_plays
self.state = board.state
self.start_coord = start_coord
self.end_coord = end_coord
self.attacked = attacked
self.attacked_count = attacked_count
def get_board(self):
return Board(white_plays = self.white_plays, board_state = self.state, apply_move = self) |
#!/usr/bin/env python
import time
import DalekV2DriveV2
import DalekSpi
import RPi.GPIO as GPIO # Import GPIO divers
GPIO.setwarnings(False)
DalekV2DriveV2.init()
DalekSpi.init()
# this gets the spi reading and gets rid of bad readings and
# readings that are out of range due to the motors and acceleration
def getStartingMag():
# used for all timings in this function.
timerTime = 0.4
magOffset = 3000
print("\n-getStartingMag()")
DalekV2DriveV2.stop()
time.sleep(timerTime)
currentMag = -1
while not (0 <= currentMag <= 360): # ensure we get a valid reading must be between 0 and 360
currentMag = DalekSpi.getMag()
print("--currentMag:{}\n-------------\n".format(currentMag))
currentMag += magOffset # add the offset value
return currentMag
def getMag(clockwise=True ,currentHeading=None):
# used for all timings in this function.
timerTime = 0.4
# The value we add to all readings to
# get over the 360 to 0 rollover and all the
# calculations that would be needed.
# use a high number so it stays above Zero
magOffset = 3000
currentMag = -1 # set a value that we can not get.
print("\n1---getMag({},{})".format(clockwise, currentHeading - magOffset))
if currentHeading == None:
return getStartingMag()
else:
previousMagReading = currentHeading - magOffset # subtract off the offset to get previous reading.
print("\n2---getMag({},{})".format(direction, previousMagReading - magOffset))
if previousMagReading > 360:
floorDivisionOfPreviousMagReading = previousMagReading // 360 # should be 1,2 or 3
previousMagReading = previousMagReading % 360 # between 0 and 360
magOffset += (floorDivisionOfPreviousMagReading * 360) # add the back for using later
print("\n getMag() previousMagReading > 360 previousMagReading:{} magOffset:{} ".format( previousMagReading, magOffset))
#####################
## TODO this should now wrk for clockwise
# now do anticlockwise
# now we can get a new value.
DalekSpi.stop()
time.sleep(timerTime) # settle the bot for better reading
if clockwise: # Clockwise
currentMag = DalekSpi.getMag()
if 0 <= currentMag <= 360: # is between 0 and 360
# is between previous reading and an upper limit this prevents errors if
# the mag is affected buy other factors.
if previousMagReading <= currentMag <= (previousMagReading +30):
currentMag += magOffset
print("---Clockwise {}".format(currentMag))
# you have rolled over the 360 mark
# val: 355 <= (5 + 360){365} <= ( 355 + 30{385}) = True
elif previousMagReading <= (currentMag + 360 ) <= (previousMagReading +30):
currentMag += 360 + magOffset
print("---Clockwise Rollover{}".format(currentMag))
else:
print("---error in mag reading out of range currentMag:{}".format(currentMag))
else:
print("--error in mag reading > 360 value:{}".format(currentMag))
return currentMag
else: # anti Clockwise
pass
magMax = currentMag + currentMagPlay
minMag = currentMag - currentMagPlay
# while the mag reading is not between the range we set
# keep trying for a more accurate reading.
while not (minMag < currentMag <= magMax):
DalekSpi.stop()
time.sleep(timerTime)
currentMag = DalekSpi.getMag() + magOffset
return currentMag
def DalekTurn(degreesToTurn):
magOffset = 3000
# used for all timings in this function.
timerTime = 0.4
fastModeSpeed = 60
normalModeSpeed = 25
print("\n---------")
print("DalekTurn({})".format(degreesToTurn))
startHeading = getStartingMag()
currentHeading = startHeading
endHeading = startHeading + degreesToTurn
print("\n################ \nStartHeading:{} CurrentHeading:{} EndHeading:{}".format(
(startHeading - magOffset), (currentHeading- magOffset), (endHeading - magOffset)))
# used to hold any pass of the 360/0 point
pass360Point=0
# turn counter clockwise
if degreesToTurn < 0:
print("turn counter clockwise")
counter = 0
while endHeading <= currentHeading:
if ( currentHeading - endHeading >= 60):
print("#### FAST MODE ##### ")
## subtract off the endHeading so is dose not over shoot.
pass360Point = turnAntiClockwise(currentHeading, endHeading + 30, timerTime, pass360Point, fastModeSpeed)
time.sleep(timerTime)
currentHeading = getMag(True,currentHeading)
pass360Point = turnAntiClockwise(currentHeading, endHeading, timerTime, pass360Point, normalModeSpeed)
time.sleep(timerTime)
currentHeading = getMag(True,currentHeading)
print(" currentHeading Heading:{} should be:{} pass360Point:{}"
.format((currentHeading - magOffset),( (endHeading- magOffset) - pass360Point),pass360Point))
if counter == 5:
break
# turn clockwise
elif degreesToTurn > 0:
counter = 0
while endHeading >= currentHeading:
if (endHeading - currentHeading >= 60):
print("#### FAST MODE ##### ")
## subtract off the endHeading so is dose not over shoot.
pass360Point = turnClockwise(currentHeading, endHeading - 30, timerTime, pass360Point, fastModeSpeed)
time.sleep(timerTime)
currentHeading = getMag(True,currentHeading)
pass360Point = turnClockwise(currentHeading, endHeading, timerTime, pass360Point, normalModeSpeed)
time.sleep(timerTime)
currentHeading = getMag(True,currentHeading)
print(" currentHeading Heading:{} should be:{} pass360Point:{}".format((currentHeading - magOffset),( (endHeading- magOffset) - pass360Point),pass360Point))
if counter == 5:
break
# you entered 0 so exit
else:
pass
DalekV2DriveV2.stop()
time.sleep(timerTime)
# mag = getMag() - magOffset
print("-- End Heading:{} should be:{}".format((getMag() - magOffset),( (endHeading- magOffset) - pass360Point)))
# this is a private function used in the DalekTurn function
def turnClockwise(currentHeading, endHeading, timerTime, pass360Point,_speed):
magOffset = 3000
print("---- turnClockwise\n speed{}".format(_speed))
minval = currentHeading
while currentHeading <= endHeading:
time.sleep(timerTime) # pause for sensor to settel
checkforPass360 =getMag(True,currentHeading)
DalekV2DriveV2.spinRight(_speed)
print(" checkforPass360: {}".format(checkforPass360 - magOffset))
# take a little off it to account for reading error.
# 3 < (350 -100) or 3 < 250 is true
# 350 < 250 is false, not passed zero point
# you wont move more than 100 dec in 0.3 seconds
if checkforPass360 < (currentHeading -100):
checkforPass360 = checkforPass360 + 360 # you have passed 360
pass360Point = pass360Point + 360 # add to the return value.
if currentHeading < checkforPass360:
currentHeading = checkforPass360 # this is now your new value
print(" currentHeading: {}".format(currentHeading - magOffset))
DalekV2DriveV2.stop()
print("---------------------exit turnClockwise\n" )
return pass360Point
# this is a private function used in the DalekTurn function
def turnAntiClockwise(currentHeading, endHeading, timerTime, pass360Point,speed):
magOffset = 3000
print("--------------------- turnAntiClockwise\n speed{}".format(speed))
while currentHeading >= endHeading:
time.sleep(timerTime)
checkforPass360 =getMag()
DalekV2DriveV2.spinLeft(speed)
print(" checkforPass360: {}".format(checkforPass360 - magOffset))
# take a little off it to account for reading error.
if checkforPass360 > (currentHeading + 100):
checkforPass360 = checkforPass360 - 360
pass360Point = -360
currentHeading = checkforPass360
print(" currentHeading: {}".format(currentHeading - magOffset))
DalekV2DriveV2.stop()
print("---------------------exit turnAntiClockwise\n")
return pass360Point
def test():
magOffset = 3000
# DalekTurn(-90)
# print("#########################\n")
# time.sleep(stop)
DalekTurn(90)
#print("#########################\n")
print("#########################\n")
# time.sleep(stop)
# DalekTurn(-45)
# print("#########################\n")
# time.sleep(stop)
# DalekTurn(45)
# DalekTurn(-25)
# DalekTurn(65)
startval = mag-magOffset
endval = getMag(0, startval) - magOffset
print("\n\n########################\nstart:{} End{}".format(startval,endval))
test()
|
import numpy as np
import os
from gensim.models import KeyedVectors
from keras.layers import add, Bidirectional, Concatenate, CuDNNGRU
from keras.layers import Dense, Embedding, Input, SpatialDropout1D
from keras.models import Model
from keras.optimizers import Adam
from keras.regularizers import l2
from src.BiGRU_experiments.masking import Camouflage, SymmetricMasking
from src.BiGRU_experiments.attension import Attention
from src.BiGRU_experiments.dropout import TimestepDropout
from gensim.downloader import base_dir
EMBEDDINGS_PATH = os.path.join(base_dir, 'glove-wiki-gigaword-200')
def pretrained_embedding():
"""
:return: A Model with an embeddings layer
"""
inputs = Input(shape=(None,), dtype='int32')
embeddings = KeyedVectors.load_word2vec_format(EMBEDDINGS_PATH, binary=False)
word_encodings_weights = np.concatenate((np.zeros((1, embeddings.syn0.shape[-1]), dtype=np.float32),
embeddings.syn0), axis=0)
embeds = Embedding(len(word_encodings_weights), word_encodings_weights.shape[-1],
weights=[word_encodings_weights], trainable=False)(inputs)
return Model(inputs=inputs, outputs=embeds, name='embedding')
def compile_bigrus_attention(shape, n_hidden_layers, hidden_units_size, dropout_rate, word_dropout_rate, lr, mode):
"""
Compiles a BiGRU based on the given parameters
:param mode: Depending on your choice : ['Single Task', 'Multi Task-1', 'Multi Task-5'].
:param shape: The input shape
:param n_hidden_layers: How many stacked Layers you want.
:param hidden_units_size: size of hidden units, as a list
:param dropout_rate: The percentage of inputs to dropout
:param word_dropout_rate: The percentage of time steps to dropout
:param lr: learning rate
:return: The compiled model ready to be trained
"""
# Document Feature Representation
doc_inputs = Input(shape=(shape[1],), name='doc_inputs')
pretrained_encodings = pretrained_embedding()
doc_embeddings = pretrained_encodings(doc_inputs)
# Apply variational dropout
drop_doc_embeddings = SpatialDropout1D(dropout_rate, name='feature_dropout')(doc_embeddings)
encodings = TimestepDropout(word_dropout_rate, name='word_dropout')(drop_doc_embeddings)
# Bi-GRUs over token embeddings
for i in range(n_hidden_layers):
grus = Bidirectional(
CuDNNGRU(hidden_units_size, return_sequences=True), name='bidirectional_grus_{}'.format(i))(encodings)
grus = Camouflage(mask_value=0.0)([grus, encodings])
if i == 0:
encodings = SpatialDropout1D(dropout_rate)(grus)
else:
encodings = add([grus, encodings])
encodings = SpatialDropout1D(dropout_rate)(encodings)
# Attention over BI-GRU (context-aware) embeddings
# Mask encodings before attention
grus_outputs = SymmetricMasking(mask_value=0, name='masking')([encodings, encodings])
doc_encoding = Attention(kernel_regularizer=l2(),
bias_regularizer=l2(),
return_attention=False,
name='self_attention')(grus_outputs)
model = None
# Final output (projection) layer
if mode == 'Single Task':
outputs = Dense(1, activation='linear', name='outputs')(doc_encoding)
model = Model(inputs=doc_inputs, outputs=[outputs])
elif mode == 'Multi Task-1':
outputs = Dense(5, activation='linear', name='outputs')(doc_encoding)
model = Model(inputs=doc_inputs, outputs=[outputs])
elif mode == 'Multi Task-5':
output_q1 = Dense(1, activation='linear', name='output_Q1')(doc_encoding)
output_q2 = Dense(1, activation='linear', name='output_Q2')(doc_encoding)
output_q3 = Dense(1, activation='linear', name='output_Q3')(doc_encoding)
output_q4 = Dense(1, activation='linear', name='output_Q4')(doc_encoding)
output_q5 = Dense(1, activation='linear', name='output_Q5')(doc_encoding)
model = Model(inputs=doc_inputs,
outputs=[Concatenate()([output_q1, output_q2, output_q3, output_q4, output_q5])])
# Wrap up model + Compile with optimizer and loss function
# model = Model(inputs=doc_inputs, outputs=[outputs])
model.compile(optimizer=Adam(lr=lr, clipvalue=5.0), loss='mse', loss_weights=None)
return model
|
import torch
from ReplayBuffer import ReplayBuffer
from CombinedReplayBuffer import CombinedReplayBuffer
import torch.optim as optim
from ranger import Ranger
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
POLICY_LEARNING_RATE = 3e-4
Q_LEARNING_RATE = 3e-4
ALPHA_LEARNING_RATE = 3e-4
POLICY_OPTIM = optim.Adam # Ranger
Q_OPTIM = optim.Adam # Ranger
ALPHA_OPTIM = optim.Adam # Ranger
GAMMA = 0.99
TAU = 0.005
LOGSTD_MIN = -20
LOGSTD_MAX = 2
INITIAL_REPLAY_SIZE = 10000
REPLAY_SIZE = 1000000
REPLAY_BUFFER = ReplayBuffer
HIDDEN_SIZE = 256
BATCH_SIZE = 256
NUM_ITERATIONS = 10000000
EVAL_FREQ = 5000
NUM_EVAL_GAMES = 10
SUMMARY_FREQ = 1000
SAVE_FREQ = 500000
MAX_STEPS = 1000
NUM_TRAINS_PER_TRAIN_LOOP = 1000
NUM_EXPL_STEPS_PER_TRAIN_LOOP = 1000
MUNCHAUSEN = False
M_ALPHA = 0.9
M_TAU = 0.03
M_L0 = -1
|
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from pathlib import Path
def julia_set(c: complex, iterations: int, x_limit: float, y_limit: float, ppl: int = 1000, x_origin: float = 0., y_origin: float = 0.) -> np.ndarray:
threshold = (1 + np.sqrt(1 + 4 * np.abs(c))) / 2
n_x = int(ppl * x_limit)
n_y = int(ppl * y_limit)
y, x = np.ogrid[y_limit:-y_limit:n_y * 1j, -x_limit:x_limit:n_x * 1j]
points = (x + x_origin) + 1j * (y + y_origin)
iterations_to_diverge = np.ones(points.shape) * iterations
is_diverged = np.zeros(points.shape, dtype=bool)
for iteration in range(iterations):
points = np.square(points) + c
points_distance = np.abs(points)
new_diverged = (points_distance > threshold)
is_diverged[new_diverged] = True
iterations_to_diverge[new_diverged] = iteration
points[is_diverged] = 0
return iterations_to_diverge
def show_julia_set(iterations_to_diverge: np.ndarray, file_name: str = None) -> None:
fig = plt.figure(figsize=np.array(iterations_to_diverge.shape)[::-1] / 100)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(iterations_to_diverge, cmap='twilight_shifted')
Path("images").mkdir(parents=True, exist_ok=True)
if file_name:
plt.savefig(file_name)
# plt.show()
plt.close()
def make_zoom_video():
c = -0.7 + .3j
# x_origin, y_origin = 2.087805e-9, (0.2499 + 3.53e-10)
x_origin, y_origin = 2.0877695e-9, (0.2499 + 3.53e-10)
length_limit = 1
zoom_factor = 1.1
ppl = 1000
iterations = 200
frames = 320
dir_name = f'videos/julia_c={c}_itr={iterations}_l={length_limit}_zoom={zoom_factor}_temp_temp'
Path(dir_name).mkdir(parents=True, exist_ok=True)
for frame in tqdm(range(frames)):
iterations_to_diverge = julia_set(
c=c,
iterations=int(iterations),
x_limit=length_limit,
y_limit=length_limit,
ppl=ppl,
x_origin=x_origin,
y_origin=y_origin
)
show_julia_set(iterations_to_diverge, f"{dir_name}/{frame}.jpg")
length_limit /= zoom_factor
ppl *= zoom_factor
iterations += 0.5
if __name__ == '__main__':
make_zoom_video()
# c = -0.7 + .3j
# iterations = 1000
# iterations_to_diverge = julia_set(
# c=c,
# iterations=iterations,
# x_limit=1.55,
# y_limit=0.95,
# ppl=1000,
# )
# file_name = f'images/julia_c={c}_itr={iterations}_w={iterations_to_diverge.shape[1]}_h={iterations_to_diverge.shape[0]}.jpg'
# show_julia_set(iterations_to_diverge, file_name)
|
#
# Copyright 2016 Dohop hf.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test logstash_notifier
"""
try:
from unittest2 import TestCase
except ImportError:
from unittest import TestCase
from logstash_notifier import get_value_from_input
from .utilities import BaseSupervisorTestCase, record, get_config
class SupervisorLoggingTestCase(BaseSupervisorTestCase):
"""
Test logging.
"""
def test_logging(self):
"""
Test logging.
"""
logstash = self.run_logstash()
try:
environment = {
'LOGSTASH_SERVER': logstash.server_address[0],
'LOGSTASH_PORT': str(logstash.server_address[1]),
'LOGSTASH_PROTO': 'udp',
'COVERAGE_PROCESS_START': '.coveragerc'
}
config = get_config()
self.run_supervisor(environment, config)
self.messages(clear_buffer=True, wait_for=2)
try:
self.run_supervisorctl(['stop', 'messages'])
expected = [
record('PROCESS_STATE_STOPPED', 'STOPPING'),
]
received = self.messages(clear_buffer=True, wait_for=1)
self.assertEqual(received, expected)
self.run_supervisorctl(['start', 'messages'])
expected = [
record('PROCESS_STATE_STARTING', 'STOPPED'),
record('PROCESS_STATE_RUNNING', 'STARTING'),
]
received = self.messages(clear_buffer=True, wait_for=2)
self.assertEqual(received, expected)
self.run_supervisorctl(['restart', 'messages'])
expected = [
record('PROCESS_STATE_STOPPED', 'STOPPING'),
record('PROCESS_STATE_STARTING', 'STOPPED'),
record('PROCESS_STATE_RUNNING', 'STARTING'),
]
received = self.messages(clear_buffer=True, wait_for=3)
self.assertEqual(received, expected)
finally:
self.shutdown_supervisor()
finally:
self.shutdown_logstash()
class SupervisorEnvironmentLoggingTestCase(BaseSupervisorTestCase):
"""
Test case for logging extra environment variables
"""
def _test_environment_logging(self, include=None):
"""
test logging of env variables
"""
logstash = self.run_logstash()
try:
environment = {
'LOGSTASH_SERVER': logstash.server_address[0],
'LOGSTASH_PORT': str(logstash.server_address[1]),
'LOGSTASH_PROTO': 'udp',
'COVERAGE_PROCESS_START': '.coveragerc'
}
if include is not None:
environment.update(include)
config = get_config(arguments='--include FRUITS VEGETABLES')
self.run_supervisor(environment, config)
self.messages(clear_buffer=True, wait_for=2)
try:
self.run_supervisorctl(['stop', 'messages'])
received = self.messages(clear_buffer=True, wait_for=1)
# should only have the 'stopping' message
self.assertTrue(len(received) == 1)
message = received[0]
yield message
finally:
self.shutdown_supervisor()
finally:
self.shutdown_logstash()
def test_not_present(self):
"""
If the logger is configured to add two environment variables, FRUITS
and VEGETABLES, but neither is set, we shouldn't get anything extra
"""
for message in self._test_environment_logging({}):
# should have no additional added values since we asked for an
# empty dict to be added
self.assertTrue('user_data' not in message)
def test_only_one_value_set(self):
"""
If only one of them is set, we should only see that one in the logged
message
"""
env = {
'FRUITS': 'pineapple,raspberry,kiwi'
}
for message in self._test_environment_logging(env):
self.assertTrue('user_data' in message)
self.assertDictEqual(env, message['user_data'])
def test_both_values_set(self):
"""
If both of them is set, we should get both returned in the logged
message
"""
env = {
'FRUITS': 'pineapple,raspberry,kiwi',
'VEGETABLES': 'sweet potato,leek,mushroom'
}
for message in self._test_environment_logging(env):
self.assertTrue('user_data' in message)
self.assertDictEqual(env, message['user_data'])
class SupervisorKeyvalsLoggingTestCase(BaseSupervisorTestCase):
"""
Test case for logging user data keyvals
"""
def _test_environment_logging(self):
"""
test logging of user data keyvals
"""
logstash = self.run_logstash()
try:
environment = {
'LOGSTASH_SERVER': logstash.server_address[0],
'LOGSTASH_PORT': str(logstash.server_address[1]),
'LOGSTASH_PROTO': 'udp',
'COVERAGE_PROCESS_START': '.coveragerc'
}
config = get_config(
arguments='--include '
'bears="polar,brown,black" '
'notbears="unicorn,griffin,sphinx,otter"'
)
self.run_supervisor(environment, config)
self.messages(clear_buffer=True, wait_for=2)
try:
self.run_supervisorctl(['stop', 'messages'])
received = self.messages(clear_buffer=True, wait_for=1)
# should only have the 'stopping' message
self.assertTrue(len(received) == 1)
message = received[0]
yield message
finally:
self.shutdown_supervisor()
finally:
self.shutdown_logstash()
def test_get_user_data(self):
"""
Get the user data passed to logstash_notifier
"""
for message in self._test_environment_logging():
self.assertTrue('user_data' in message)
user_data = {
'bears': "polar,brown,black",
'notbears': "unicorn,griffin,sphinx,otter"
}
self.assertDictEqual(
user_data,
message['user_data']
)
class SupervisorOutPutLoggingTestCase(BaseSupervisorTestCase):
"""
Test capturing stdout/stderr logs.
"""
def test_output_logging(self):
"""
Test stdout is captured in logs when capture-output argument is set.
"""
logstash = self.run_logstash()
try:
environment = {
'LOGSTASH_SERVER': logstash.server_address[0],
'LOGSTASH_PORT': str(logstash.server_address[1]),
'LOGSTASH_PROTO': 'udp',
'COVERAGE_PROCESS_START': '.coveragerc'
}
config = get_config(
arguments='--capture-output',
events='PROCESS_LOG'
)
self.run_supervisor(environment, config)
try:
expected = [{
'@version': '1',
'channel': 'stdout',
'eventname': 'PROCESS_LOG_STDOUT',
'groupname': 'messages',
'level': 'INFO',
'logger_name': 'supervisor',
'message': 'Test 0\n',
'path': './logstash_notifier/__init__.py',
'processname': 'messages',
'tags': [],
'type': 'logstash',
}]
received = self.messages(clear_buffer=True, wait_for=1)
self.assertEqual(received, expected)
finally:
self.shutdown_supervisor()
finally:
self.shutdown_logstash()
class SupervisorAppendNewLineTestCase(BaseSupervisorTestCase):
"""
Test appending newlines to log messages.
"""
def test_newline_logging(self):
"""
Test a newline is appended when relevant config option is set.
"""
logstash = self.run_logstash()
try:
environment = {
'LOGSTASH_SERVER': logstash.server_address[0],
'LOGSTASH_PORT': str(logstash.server_address[1]),
'LOGSTASH_PROTO': 'udp',
'COVERAGE_PROCESS_START': '.coveragerc'
}
config = get_config(
arguments='--append-newline',
)
self.run_supervisor(environment, config)
self.messages(clear_buffer=True, wait_for=2)
try:
self.run_supervisorctl(['stop', 'messages'])
# Base test case - note added newline
expected = [
record('PROCESS_STATE_STOPPED', 'STOPPING'),
]
# Keep the buffer, needed in next test.
received = self.messages(clear_buffer=False, wait_for=1)
self.assertEqual(received, expected)
# Raw message test case
for message in self.get_message_buffer():
self.assertTrue(message.endswith("\n"))
finally:
self.shutdown_supervisor()
finally:
self.shutdown_logstash()
|
"""Helper script to generate a pylint badge"""
from pathlib import Path
from pylint.lint import Run
ROOT_DIR = Path(__file__).resolve().parent.parent
DIRECTORY_TO_LINT = (ROOT_DIR / "sam-application").name
# pylint: disable=too-many-return-statements
def get_color(score: float) -> str:
"""Return a colour reference from a pylint score"""
colors = {
"brightgreen": "#4c1",
"green": "#97CA00",
"yellow": "#dfb317",
"yellowgreen": "#a4a61d",
"orange": "#fe7d37",
"red": "#e05d44",
"bloodred": "#ff0000",
"blue": "#007ec6",
"grey": "#555",
"gray": "#555",
"lightgrey": "#9f9f9f",
"lightgray": "#9f9f9f",
}
if score > 9:
return colors["brightgreen"]
if score > 8:
return colors["green"]
if score > 7.5:
return colors["yellowgreen"]
if score > 6.6:
return colors["yellow"]
if score > 5.0:
return colors["orange"]
if score > 0.00:
return colors["red"]
return colors["bloodred"]
def main():
"""main function"""
score = round(Run([DIRECTORY_TO_LINT], exit=False).linter.stats["global_note"], 2)
# pylint: disable=line-too-long
template = '<svg xmlns="http://www.w3.org/2000/svg" width="85" height="20"><linearGradient id="a" x2="0" y2="100%"><stop offset="0" stop-color="#bbb" stop-opacity=".1"/><stop offset="1" stop-opacity=".1"/></linearGradient><rect rx="3" width="85" height="20" fill="#555"/><rect rx="3" x="50" width="35" height="20" fill="{color}"/><path fill="{color}" d="M50 0h4v20h-4z"/><rect rx="3" width="85" height="20" fill="url(#a)"/><g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="11"><text x="25" y="15" fill="#010101" fill-opacity=".3">pylint</text><text x="25" y="14">pylint</text><text x="67" y="15" fill="#010101" fill-opacity=".3">{score}</text><text x="67" y="14">{score}</text></g></svg>'
color = get_color(float(score))
filename = DIRECTORY_TO_LINT + ".svg"
filepath = f".github/{filename}"
with open(filepath, "w") as score_file:
score_file.write(template.format(score=score, color=color))
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Convert the output of CombineSnpCounts.py to gene level counts
============================================================================
AUTHOR: Michael D Dacre, [email protected]
ORGANIZATION: Stanford University
LICENSE: MIT License, property of Stanford, use as you wish
CREATED: 2016-32-20 13:05
Last modified: 2016-05-27 16:05
DESCRIPTION: Takes output, pre-filtered by FDR, and returns gene-level
data where the pvalue is the median of the pvalues of the
child SNPs.
============================================================================
"""
import os
import sys
import argparse
import gzip
import bz2
import random
from collections import defaultdict
# Progress bar
from subprocess import check_output
from tqdm import tqdm
# Handle python objects
try:
import cPickle as pickle
except ImportError:
import pickle
# Math and calculations
import math
import bisect
import numpy
import scipy.stats
import pandas
import logme
#################################################
# Classes to hold exon information for lookup #
#################################################
class Chrom(list):
"""A list of genes on one chromosome."""
def find(self, loc, strand=None):
"""Search in every gene range."""
loc = int(loc)
for exon in self:
if exon.find(loc, strand):
return exon.gene
return None
def __repr__(self):
"""Print a list of genes."""
astr = []
for gene in self:
astr += [repr(gene)]
return '; '.join(astr)
class Exon(object):
"""A single exon, gene points to a Gene object."""
def __init__(self, gene, start, end, strand):
"""Create an Exon, gene must be an existing Gene object."""
self.start = int(start)
self.end = int(end)
self.strand = strand
assert isinstance(gene, Gene)
self.gene = gene
# Set a back link to us
self.gene.exons.append(self)
def find(self, loc, strand=None):
"""Return True if loc in self. If strand provided, check that."""
if strand and not strand == self.strand:
return False
return self.start <= loc < self.end
def __repr__(self):
return "Exon<{}({}:{})>".format(self.gene.name, self.start, self.end)
##########################################
# Class to hold Genes and their counts #
##########################################
class Gene(object):
"""A single gene, holds SNPs."""
def __init__(self, name, trans_id=''):
"""Create an empty Gene object."""
self.name = name
self.trans_id = trans_id
# All the SNPs in this Gene
self.snps = []
# All the exons
self.exons = []
# Raw counts
self.mat_counts = 0
self.pat_counts = 0
self.other_counts = 0
# Winners
self.mat_win = 0
self.pat_win = 0
self.has_ase = 0
self.no_ase = 0
self.weird = 0
self.failed = 0
# Significant metrics for all SNPs in this Gene
self.pval = None
self.win = None
def sum_counts(self):
"""Add up all counts in snps."""
self.mat_counts = 0
self.pat_counts = 0
self.other_counts = 0
for snp in self.snps:
self.mat_counts += snp.mat_counts
self.pat_counts += snp.pat_counts
self.other_counts += snp.other_counts
def calc_pval(self):
"""Take the median of the pvalues of all SNPs as our pvalue.
We only include SNPs with no Ns or non-parental alleles.
"""
pvals = []
for snp in self.snps:
pvals.append(snp.pval)
pvals = [p for p in pvals if isinstance(p, float)]
if pvals:
self.pval = numpy.median(pvals)
else:
self.pval = numpy.nan
def calc_winner(self):
"""Sum winners, try to pick Gene-level winner.
Only chooses Gene-level winner if all SNPs aggree.
Sets self.win to one of:
'mat', 'pat', 'WEIRD', '?', or 'NS' (for non-significant)
Ratio calculations use total SNP counts, not the sum of the parental
alleles.
"""
for snp in self.snps:
if not hasattr(snp, 'win') or not snp.win:
snp.calc_winner()
if not snp.win:
continue
if snp.win == 'M':
self.mat_win += 1
elif snp.win == 'P':
self.pat_win += 1
elif snp.win == '?':
self.failed += 1 # Not bothering with greater res now.
if snp.cls == 'Sym':
self.no_ase += 1
elif snp.cls == 'Asym':
self.has_ase += 1
elif snp.cls == 'Weird':
self.weird += 1
# Winner must have more than 60% of alleles with gene-level
# significance
if not self.pval:
self.calc_pval()
if not self.pval:
logme.log('No pvalue for gene {}'.format(self), 'debug')
self.win = 'NA'
return
if self.weird/len(self) > 0.4:
self.win = 'WEIRD'
elif self.mat_win > self.pat_win and self.mat_win/len(self) > 0.6:
self.win = 'mat'
elif self.pat_win > self.mat_win and self.pat_win/len(self) > 0.6:
self.win = 'pat'
else:
self.win = '?'
def __len__(self):
"""How many SNPs are in this gene."""
return len(self.snps)
def __repr__(self):
"""Summary info."""
self.sum_counts()
return "{}(mat:{};pat:{};other:{})".format(self.name,
self.mat_counts,
self.pat_counts,
self.other_counts)
###############################
# Class to hold SNP alleles #
###############################
class SNP(object):
"""A SNP.
Contains::
chrm -- chromosome
pos -- position
mat_allele -- The maternal allele
pat_allele -- The paternal allele
counts -- Dict of raw counts for each base indexed by ATCG
win -- M/P/?
cls -- Sym/Asym/Weird -- Asym: ASE
pval -- binomial pvalue
gene -- the parent Gene class
mat_counts
pat_counts
other_counts
"""
def __init__(self, gene, snpinfo):
"""Create a SNP object.
:gene: A gene name refering to a Gene object
:snpinfo: A tuple from alleleseq
"""
self.gene = gene
self.gene.snps.append(self)
# Get info
(self.chrm, self.pos, ref, mat_gtyp, pat_gtyp, c_gtyp, phase,
self.mat_allele, self.pat_allele, cA, cC, cG, cT,
self.win, self.cls, pval, BindingSite,
cnv) = snpinfo
self.pval = float(pval)
# Set counts
self.counts = {'A': int(cA), 'C': int(cC), 'G': int(cG), 'T': int(cT)}
# Assign maternal/paternal
self.mat_counts = self.counts[self.mat_allele]
self.pat_counts = self.counts[self.pat_allele]
# Count others
count = list(self.counts)
count.remove(self.mat_allele)
count.remove(self.pat_allele)
self.other_counts = self.counts[count.pop()] + self.counts[count.pop()]
def __len__(self):
"""The total number of SNPs."""
return numpy.sum(self.counts.values())
def __repr__(self):
"""Summary info."""
return "SNP<(mat:{};pat:{};other:{})>".format(
self.mat_counts, self.pat_counts,
self.other_counts)
###############################################################################
# Parse the Bed File #
###############################################################################
def parse_gene_bed(bed_file):
"""Return a defaultdict of Chrom objects for lookup.
To lookup, just run exons[chromsome].find(location) (where exons is the
defaultdict returned by this function).
NOTE: Uses entire gene, not exons. That was more useful for this
application.
:returns: defaultdict(exons), dict(genes)
"""
# Initialize the list exons for lookup
exons = defaultdict(Chrom)
# Initialize a dictionary of genes
genes = {}
count = 0
with open_zipped(bed_file) as fin:
for line in fin:
count += 1
if line.startswith('#'):
continue
fields = line.rstrip().split('\t')
chrom = chr2num(fields[0])
try:
start = int(fields[1])+1 # Enforce 1-base, bed is 0-based
except IndexError:
print(count)
end = int(fields[2])+1
gene = fields[3]
trans = gene
strand = fields[5]
if gene not in genes:
genes[gene] = Gene(gene, trans)
# Assign exons
starts = [start+int(i) for i in fields[11].rstrip(',').split(',')]
lengths = [int(i) for i in fields[10].rstrip(',').split(',')]
assert len(starts) == len(lengths)
assert len(starts) == int(fields[9])
for strt in starts:
exon = Exon(genes[gene], strt,
strt+lengths[starts.index(strt)], strand)
exons[chrom].append(exon)
return exons, genes
###############################################################################
# Main Function #
###############################################################################
def get_gene_counts(bed_file, alleleseq_output, chrom_to_num=False,
logfile=sys.stderr):
"""Return a list of Gene objects from all snps in exons.
:chrom_to_num: If true, convert 'chr1' to 1
"""
logme.log('Parsing gene bed')
exons, genes = parse_gene_bed(bed_file)
# Stats
total_count = 0
not_in_gene = 0
snps = []
# Parse the file
logme.log('Parsing alleleseq output')
lines = int(check_output(['wc', '-l', alleleseq_output]).decode().split(' ')[0])
with open_zipped(alleleseq_output) as fin:
# File format test
header = fin.readline()
if not header.startswith('chrm'):
raise Exception("Invalid alleleseq file format")
# Loop through the file
siter = tqdm(fin, unit='snps', total=lines) if 'PS1' in os.environ \
else fin
for line in siter:
snpinfo = line.rstrip().split('\t')
total_count += 1
chrm = chr2num(snpinfo[0]) if chrom_to_num else snpinfo[0]
gene = exons[chrm].find(int(snpinfo[1]))
# Skip everything not in a gene
if gene is not None:
# The SNP adds itself to the genes list
s = SNP(gene, snpinfo)
snps.append(s)
else:
not_in_gene += 1
newgenes = {}
for name, gene in genes.items():
if gene:
gene.sum_counts()
gene.calc_pval()
gene.calc_winner()
newgenes[name] = gene
return newgenes
###############################################################################
# AlleleSeq's Bionomial Test Functions from binom.py #
###############################################################################
def binomtest(x, n, p):
"""Run a binomial test with scipy unless n*p>50, then use normal_approx."""
#return (scipy.stats.binom_test(x, n, p), normal_approx(x, n, p))
if n*p > 50:
return normal_approx(x, n, p)
else:
return scipy.stats.binom_test(x, n, p)
def normal_approx(x, n, p):
"""A different implementation of the binomial test?."""
if abs(x-n*p) < 1e-5:
return 1.0
u=p*n
s=math.sqrt(n*p*(1-p))
norm=scipy.stats.distributions.norm(u,s)
if x<n*p:
pval=2*norm.cdf(x+.5) # add 0.5 for continuity correction
else:
pval=2*(1-norm.cdf(x-.5))
return pval
###############################################################################
# Calc FDR for genes use AlleleSeq Algorithm #
###############################################################################
class binomMemo(object):
"""Do a binomial test with a definied range."""
def __init__(self, n):
"""Create a binomial range."""
self.n=n
self.cache=[[binomtest(j, i, 0.5) for j in range(i+1)] for i in range(n)]
def binomtest(self, a, cnt):
"""Do a binomial test."""
if cnt<self.n:
return self.cache[cnt][a]
else:
return binomtest(a, cnt, 0.5)
def simpval(cnt,bm):
"""Simulate a binomial pvalue from cnt."""
a=sum([random.randint(0,1) for i in range(cnt)])
pval=bm.binomtest(a, cnt)
return pval
def simpval2(cnt,bm):
"""Simulate a binomial pvalue from cnt."""
a=sum([random.randint(0,1) for i in range(cnt)])
# pval=bm.binomtest(a, cnt)
return a
def calc_fdr(pvals, target=0.1, sims=5, verbose=False):
"""Return the highest pvalue that beats an FDR of 'target'.
I have kept most of the bloat from the original algorithm, and only removed
lines that had to be removed, all of these were just file handling lines.
:pvals: A tuple of (mat_count, pat_count, p-value). Used to simulate new
pvalue set.
:target: The FDR cutoff to beat.
:sims: The number of simulations to do when calulating the random set of
pvalues.
:verbose: Print extra information.
:returns: The pvalue that beats the FDR target for this count set.
"""
bestFDR=bestPV=None
random.seed(0) # This is set in the original algorithm
# print "#"," ".join(sys.argv)
# print "pval\tP\tFP\tFDR"
bm=binomMemo(60)
# h=getInterestingHetsAnnotations.Handler(ifile, hasHeader=True)
# n=h.getCount() # Returns the number of lines in the file
# g=h.getAllAnnotationsGenerator(); # All this returns is the infile as {chr => {pos => rest of the file as a tuple}}
n = len(pvals)
act_pvals=numpy.zeros(n) # pval as reported in counts file
cnt_sums=numpy.zeros(n, dtype=numpy.int) # sum of major and minor alleles
# for each hetSNP, get the count of major and minor allele from the input file
for i, t in enumerate(pvals):
mat, pat, pval = t
act_pvals[i] = float(pval) # Append p-value to the array
counts = [mat, pat] # Create a list of counts
counts = [int(e) for e in counts] # Make them integers
counts = sorted(counts, reverse=True)[0:2] # Keep only the top two
cnt_sums[i] = sum(counts) # Sum the top two counts
act_pvals = sorted(act_pvals)
# For every line in the input file, calculate a random pvalue. Repeat this
# sims times. Sims is often 5.
sim_pvals=numpy.array([ sorted([simpval(cnt_sums[j],bm) for j in range(n)]) for i in range(sims)])
#sim_pvals_means=numpy.mean(sim_pvals, 0)
pvs=[e*0.001 for e in range(10)]+[e*0.01 for e in range(1,10)]+[e*0.1 for e in range(1,10)]
# for a given test pv, find the number of actual pvals that are smaller, and the number of sim pvals that are smaller.
# FDR is the ratio
sys.stderr.write("pval\tpos_in_actual\tmean_sim_pos\tFDR\n")
for pv in pvs:
# Get what position the pvalue from pvs is in in the actual pvalues
# from the input file.
Nact=bisect.bisect(act_pvals, pv)
# For every simulated pvalue set, find the position of the pvalue from
# pvs in that set, then take the mean of all simulations.
mean_Nsims=numpy.mean([bisect.bisect(sim_pvals[i], pv) for i in range(sims)])
# The false discovery rate is the position of the pvalue from pvs in
# the simulated pvalue set divided by the position of the same pvalue
# in the actual pvalue set from the infile, plus 1.
FDR=mean_Nsims/(Nact+1)
sys.stderr.write("%f\t%s\t%f\t%f\n" % (pv, Nact, mean_Nsims, FDR))
# This is my attempt to find the act_pval that corresponds best to the desired target FDR.
# This version walks from largest observed pvalue to the smallest.
if target:
last_FDR=last_pv=0.0
for Nact, pv in sorted(enumerate(act_pvals), reverse=True):
# For every simulated pvalue set, find the position of the pvalue from
# the actual pvalues in the simulated pvalues, then take the mean
# of all simulations.
mean_Nsims=numpy.mean([bisect.bisect(sim_pvals[i], pv) for i in range(sims)])
# The false discovery rate is the position of the pvalue from the
# actual data in the simulated pvalue set divided by the position
# we are in the list of pvalues (walking from largest to smallest)
FDR=mean_Nsims/(Nact+1)
if verbose:
sys.stderr.write("test %d %f %f %f\n" % (
Nact,mean_Nsims,FDR, pv))
# As soon as we get an FDR that is less than the target (usually
# 0.1), that is our 'bestFDR': the largest p-value that beats our
# target FDR.
if not bestFDR and FDR < target:
sys.stderr.write("target %f\n" % target)
sys.stderr.write("before %f %f\n" % (last_FDR, last_pv))
sys.stderr.write("after %f %f\n" % (FDR, pv))
bestFDR = FDR; bestPV = pv
last_FDR=FDR; last_pv=pv
sys.stderr.write("Target {} FDR {} pv {}\n".format(target,
bestFDR,
bestPV))
return bestFDR
###############################################################################
# Create pandas dataframe from genes #
###############################################################################
def genes_to_df(genes, ind):
"""Make a pandas dataframe from a dictionary of genes.
Datafram has the following columns::
Counts::
'Mat_Counts' -- Total number of maternal counts for this gene
'Pat_Counts' -- Total number of paternal counts for this gene
'N_Counts' -- Total number of reads with N in the SNP position
'Others' -- Total number of reads with a non-parental allele
Gene-level summary::
'Winner' -- The overall winner ('mat' or 'pat')
'pval' -- The pvalue of that association (binomial)
SNP-level information::
'SNPs' -- Total number of SNPs in this gene
'Mat_wins' -- Total number of SNPs with materal wins
'Pat_wins' -- Total number of SNPs with pateral wins
'Not_Sig' -- Total number of SNPs that weren't significant
'Weird' -- Total number of SNPs with non-parental allele
'Failed' -- Total number of SNPs that failed for some reason,
(usually due to Ns in the sequence)
"""
ind = str(ind)
# Populate dictionaries from every gene
df_dict = {}
if not genes:
raise Exception('Genes must have at least one entry.')
for name, gene in genes.items():
gene.sum_counts()
df_dict[ind + '_' + name] = {'Mat_Counts': gene.mat_counts,
'Pat_Counts': gene.pat_counts,
'Others': gene.other_counts,
'SNPs': len(gene),
'Winner': gene.win,
'pval': gene.pval,
'Mat_wins': gene.mat_win,
'Pat_wins': gene.pat_win,
'Weird': gene.weird,
'Failed': gene.failed,
'TX': name,
'Tissue ID': ind}
column_order = ['TX', 'Tissue ID', 'Mat_Counts', 'Pat_Counts', 'Others',
'SNPs', 'Winner', 'pval', 'Mat_wins', 'Pat_wins',
'Weird', 'Failed']
df = pandas.DataFrame.from_dict(df_dict, orient='index')
df.index.name = 'IDX'
df = df[column_order]
return df
def chr2num(chrom):
"""Make chr# #."""
return chrom[3:] if chrom.startswith('chr') else chrom
def open_zipped(infile, mode='r'):
""" Return file handle of file regardless of zipped or not
Text mode enforced for compatibility with python2 """
mode = mode[0] + 't'
p2mode = mode
if hasattr(infile, 'write'):
return infile
if isinstance(infile, str):
if infile.endswith('.gz'):
return gzip.open(infile, mode)
if infile.endswith('.bz2'):
if hasattr(bz2, 'open'):
return bz2.open(infile, mode)
else:
return bz2.BZ2File(infile, p2mode)
return open(infile, p2mode)
def main(argv=None):
"""Run as a script."""
if not argv:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Positional arguments
parser.add_argument('exon_positions_bed',
help="A bed file of exons to include.")
parser.add_argument('alleleseq_output',
help="The output of CombineSnpCounts.py, filtered " +
"FDR")
parser.add_argument('-i', '--ind', help='Individual name')
parser.add_argument('-n', '--tonum', action='store_true',
help='Convert chr# to #')
# Optional Files
optfiles = parser.add_argument_group('Optional Files')
optfiles.add_argument('-o', '--outfile', default=sys.stdout,
help="Output file, Default STDOUT")
optfiles.add_argument('-l', '--logfile', default=sys.stderr,
help="Log File, Default STDERR (append mode)")
optfiles.add_argument('--data',
help="Output raw gene dictionary to this file.")
optfiles.add_argument('--pandas',
help="Output a pickled pandas dataframe here.")
# FDR Calulations
fdrcalc = parser.add_argument_group('FDR Calculation')
fdrcalc.add_argument('--filter-fdr', action='store_true',
help="Filter the output by FDR")
fdrcalc.add_argument('-f', '--fdr-cutoff', type=float, default=0.1,
metavar='', help="FDR cutoff (Default 0.1).")
fdrcalc.add_argument('-s', '--simulations', type=int, default=10,
metavar='',
help="# simulations for FDR calculation " +
"(Default: 10)")
args = parser.parse_args(argv)
ind = args.ind if args.ind \
else os.path.basename(args.alleleseq_output).split('.')[0]
genes = get_gene_counts(args.exon_positions_bed, args.alleleseq_output,
args.tonum)
giter = tqdm(genes.values(), unit='genes') if 'PS1' in os.environ \
else genes.values()
for gene in giter:
gene.sum_counts()
if args.data:
with open(args.data, 'wb') as fout:
pickle.dump(genes, fout)
df = genes_to_df(genes, ind)
fdr_pval = calc_fdr(
[tuple(x) for x in df[['Mat_Counts', 'Pat_Counts', 'pval']].values],
target=args.fdr_cutoff, sims=args.simulations)
logme.log('In ind {} p-values smaller than {} beat FDR of {}'
.format(ind, fdr_pval, args.fdr_cutoff), 'info')
# Filter by FDR if requested
if args.filter_fdr:
logme.log('Filtering genes by FDR less than {}'
.format(args.fdr_cutoff), 'info')
df = df[df.pval < fdr_pval]
if args.pandas:
df.to_pickle(args.pandas)
with open_zipped(args.outfile, 'w') as fout:
df.to_csv(fout, sep='\t')
return 0
if __name__ == '__main__' and '__file__' in globals():
sys.exit(main())
|
# Generated by Django 3.1.7 on 2021-10-19 12:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dm_page', '0086_auto_20211019_1140'),
]
operations = [
migrations.RemoveField(
model_name='organisation',
name='additional_name',
),
migrations.RemoveField(
model_name='organisation',
name='profile',
),
migrations.RemoveField(
model_name='paramètre',
name='institut_image',
),
migrations.RemoveField(
model_name='paramètre',
name='institut_post_code',
),
migrations.RemoveField(
model_name='paramètre',
name='institut_street_name',
),
migrations.RemoveField(
model_name='paramètre',
name='institut_title',
),
migrations.RemoveField(
model_name='paramètre',
name='institut_town',
),
migrations.RemoveField(
model_name='paramètre',
name='object_description',
),
migrations.RemoveField(
model_name='paramètre',
name='object_title',
),
migrations.RemoveField(
model_name='paramètre',
name='president',
),
migrations.RemoveField(
model_name='paramètre',
name='president_signature',
),
migrations.AddField(
model_name='organisation',
name='institut_image',
field=models.FileField(blank=True, null=True, upload_to='', verbose_name="Image de l'Institut"),
),
migrations.AddField(
model_name='organisation',
name='institut_post_code',
field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Code Postal'),
),
migrations.AddField(
model_name='organisation',
name='institut_street_name',
field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Rue'),
),
migrations.AddField(
model_name='organisation',
name='institut_title',
field=models.CharField(blank=True, max_length=300, null=True, verbose_name='Institution'),
),
migrations.AddField(
model_name='organisation',
name='institut_town',
field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Commune'),
),
migrations.AddField(
model_name='organisation',
name='name',
field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Nom'),
),
migrations.AddField(
model_name='organisation',
name='object_description',
field=models.TextField(blank=True, null=True, verbose_name="Description de l'Objet"),
),
migrations.AddField(
model_name='organisation',
name='object_title',
field=models.CharField(blank=True, max_length=300, null=True, verbose_name="Titre de l'Objet"),
),
migrations.AddField(
model_name='organisation',
name='president',
field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Président'),
),
migrations.AddField(
model_name='organisation',
name='president_signature',
field=models.FileField(blank=True, null=True, upload_to='', verbose_name='Signature du Président'),
),
migrations.AddField(
model_name='organisation',
name='used_for_receipt',
field=models.BooleanField(default=False),
),
]
|
# A Wavenet For Speech Denoising - Dario Rethage - 19.05.2017
# Util.py
# Utility functions for dealing with audio signals and training a Denoising Wavenet
import os
import numpy as np
import json
import warnings
import scipy.signal
import scipy.stats
import soundfile as sf
import keras
def l1_l2_loss(y_true, y_pred, l1_weight, l2_weight):
loss = 0
if l1_weight != 0:
loss += l1_weight*keras.objectives.mean_absolute_error(y_true, y_pred)
if l2_weight != 0:
loss += l2_weight * keras.objectives.mean_squared_error(y_true, y_pred)
return loss
def compute_receptive_field_length(stacks, dilations, filter_length, target_field_length):
half_filter_length = (filter_length-1)/2
length = 0
for d in dilations:
length += d*half_filter_length
length = 2*length
length = stacks * length
length += target_field_length
return length
def snr_db(rms_amplitude_A, rms_amplitude_B):
return 20.0*np.log10(rms_amplitude_A/rms_amplitude_B)
def wav_to_float(x):
try:
max_value = np.iinfo(x.dtype).max
min_value = np.iinfo(x.dtype).min
except:
max_value = np.finfo(x.dtype).max
min_value = np.finfo(x.dtype).min
x = x.astype('float64', casting='safe')
x -= min_value
x /= ((max_value - min_value) / 2.)
x -= 1.
return x
def float_to_uint8(x):
x += 1.
x /= 2.
uint8_max_value = np.iinfo('uint8').max
x *= uint8_max_value
x = x.astype('uint8')
return x
def keras_float_to_uint8(x):
x += 1.
x /= 2.
uint8_max_value = 255
x *= uint8_max_value
return x
def linear_to_ulaw(x, u=255):
x = np.sign(x) * (np.log(1 + u * np.abs(x)) / np.log(1 + u))
return x
def keras_linear_to_ulaw(x, u=255.0):
x = keras.backend.sign(x) * (keras.backend.log(1 + u * keras.backend.abs(x)) / keras.backend.log(1 + u))
return x
def uint8_to_float(x):
max_value = np.iinfo('uint8').max
min_value = np.iinfo('uint8').min
x = x.astype('float32', casting='unsafe')
x -= min_value
x /= ((max_value - min_value) / 2.)
x -= 1.
return x
def keras_uint8_to_float(x):
max_value = 255
min_value = 0
x -= min_value
x /= ((max_value - min_value) / 2.)
x -= 1.
return x
def ulaw_to_linear(x, u=255.0):
y = np.sign(x) * (1 / float(u)) * (((1 + float(u)) ** np.abs(x)) - 1)
return y
def keras_ulaw_to_linear(x, u=255.0):
y = keras.backend.sign(x) * (1 / u) * (((1 + u) ** keras.backend.abs(x)) - 1)
return y
def one_hot_encode(x, num_values=256):
if isinstance(x, int):
x = np.array([x])
if isinstance(x, list):
x = np.array(x)
return np.eye(num_values, dtype='uint8')[x.astype('uint8')]
def one_hot_decode(x):
return np.argmax(x, axis=-1)
def preemphasis(signal, alpha=0.95):
return np.append(signal[0], signal[1:] - alpha * signal[:-1])
def binary_encode(x, max_value):
if isinstance(x, int):
x = np.array([x])
if isinstance(x, list):
x = np.array(x)
width = np.ceil(np.log2(max_value)).astype(int)
return (((x[:, None] & (1 << np.arange(width)))) > 0).astype(int)
def get_condition_input_encode_func(representation):
if representation == 'binary':
return binary_encode
else:
return one_hot_encode
def ensure_keys_in_dict(keys, dictionary):
if all (key in dictionary for key in keys):
return True
return False
def get_subdict_from_dict(keys, dictionary):
return dict((k, dictionary[k]) for k in keys if k in dictionary)
def pretty_json_dump(values, file_path=None):
if file_path is None:
print json.dumps(values, sort_keys=True, indent=4, separators=(',', ': '))
else:
json.dump(values, open(file_path, 'w'), sort_keys=True, indent=4, separators=(',', ': '))
def read_wav(filename):
# Reads in a wav audio file, takes the first channel, converts the signal to float64 representation
audio_signal, sample_rate = sf.read(filename)
if audio_signal.ndim > 1:
audio_signal = audio_signal[:, 0]
if audio_signal.dtype != 'float64':
audio_signal = wav_to_float(audio_signal)
return audio_signal, sample_rate
def load_wav(wav_path, desired_sample_rate):
sequence, sample_rate = read_wav(wav_path)
sequence = ensure_sample_rate(sequence, desired_sample_rate, sample_rate)
return sequence
def write_wav(x, filename, sample_rate):
print("Writing WAV to " + filename)
if type(x) != np.ndarray:
x = np.array(x)
with warnings.catch_warnings():
warnings.simplefilter("error")
sf.write(filename, x, sample_rate)
def ensure_sample_rate(x, desired_sample_rate, file_sample_rate):
if file_sample_rate != desired_sample_rate:
return scipy.signal.resample_poly(x, desired_sample_rate, file_sample_rate)
return x
def rms(x):
return np.sqrt(np.mean(np.square(x), axis=-1))
def normalize(x):
max_peak = np.max(np.abs(x))
return x / max_peak
def get_subsequence_with_speech_indices(full_sequence):
signal_magnitude = np.abs(full_sequence)
chunk_length = 800
chunks_energies = []
for i in xrange(0, len(signal_magnitude), chunk_length):
chunks_energies.append(np.mean(signal_magnitude[i:i + chunk_length]))
threshold = np.max(chunks_energies) * .1
onset_chunk_i = 0
for i in range(0, len(chunks_energies)):
if chunks_energies[i] >= threshold:
onset_chunk_i = i
break
termination_chunk_i = len(chunks_energies)
for i in range(len(chunks_energies) - 1, 0, -1):
if chunks_energies[i] >= threshold:
termination_chunk_i = i
break
num_pad_chunks = 4
onset_chunk_i = np.max((0, onset_chunk_i - num_pad_chunks))
termination_chunk_i = np.min((len(chunks_energies), termination_chunk_i + num_pad_chunks))
return [onset_chunk_i*chunk_length, (termination_chunk_i+1)*chunk_length]
def extract_subsequence_with_speech(full_sequence):
indices = get_subsequence_with_speech_indices(full_sequence)
return full_sequence[indices[0]:indices[1]]
def dir_contains_files(path):
for f in os.listdir(path):
if not f.startswith('.'):
return True
return False
|
from rdkit import Chem
import os
import os.path as osp
import shutil
from ogb.utils import smiles2graph
from ogb.utils.torch_util import replace_numpy_with_torchtensor
from ogb.utils.url import decide_download, download_url, extract_zip
import pandas as pd
import numpy as np
from tqdm import tqdm
import torch
from torch_geometric.data import InMemoryDataset
from torch_geometric.data import Data
class PyGPCQM4MDataset(InMemoryDataset):
def __init__(self, root = 'dataset', smiles2graph = smiles2graph, transform=None, pre_transform = None):
self.original_root = root
self.smiles2graph = smiles2graph
self.folder = osp.join(root, 'pcqm4m')
self.download_name = 'pcqm4m-folder'
self.version = 1
self.url = f'http://ogb-data.stanford.edu/data/lsc/{self.download_name}.zip'
# check version and update if necessary
if osp.isdir(self.folder) and (not osp.exists(osp.join(self.folder, f'RELEASE_v{self.version}.txt'))):
print('PCQM4M dataset has been updated.')
if input('Will you update the dataset now? (y/N)\n').lower() == 'y':
shutil.rmtree(self.folder)
super(PyGPCQM4MDataset, self).__init__(self.folder, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
return 'data.csv.gz'
@property
def processed_file_names(self):
return 'geometric_data_processed.pt'
def download(self):
if decide_download(self.url):
path = download_url(self.url, self.original_root)
extract_zip(path, self.original_root)
os.unlink(path)
try:
shutil.rmtree(self.folder)
except:
pass
shutil.move(osp.join(self.original_root, self.download_name), self.folder)
else:
print('Stop download.')
exit(-1)
def process(self):
data_df = pd.read_csv(osp.join(self.raw_dir, 'data.csv.gz'))
smiles_list = data_df['smiles']
homolumogap_list = data_df['homolumogap']
print('Converting SMILES strings into graphs...')
data_list = []
for i in tqdm(range(len(smiles_list))):
data = Data()
smiles = smiles_list[i]
homolumogap = homolumogap_list[i]
graph = self.smiles2graph(smiles)
assert(len(graph['edge_feat']) == graph['edge_index'].shape[1])
assert(len(graph['node_feat']) == graph['num_nodes'])
data.__num_nodes__ = int(graph['num_nodes'])
data.edge_index = torch.from_numpy(graph['edge_index']).to(torch.int64)
data.edge_attr = torch.from_numpy(graph['edge_feat']).to(torch.int64)
data.x = torch.from_numpy(graph['node_feat']).to(torch.int64)
data.y = torch.Tensor([homolumogap])
data_list.append(data)
# double-check prediction target
split_dict = self.get_idx_split()
assert(all([not torch.isnan(data_list[i].y)[0] for i in split_dict['train']]))
assert(all([not torch.isnan(data_list[i].y)[0] for i in split_dict['valid']]))
assert(all([torch.isnan(data_list[i].y)[0] for i in split_dict['test']]))
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
data, slices = self.collate(data_list)
print('Saving...')
torch.save((data, slices), self.processed_paths[0])
def get_idx_split(self):
split_dict = replace_numpy_with_torchtensor(torch.load(osp.join(self.root, 'split_dict.pt')))
return split_dict
class PCQM4MEvaluator:
def __init__(self):
'''
Evaluator for the PCQM4M dataset
Metric is Mean Absolute Error
'''
pass
def eval(self, input_dict):
'''
y_true: numpy.ndarray or torch.Tensor of shape (num_graphs,)
y_pred: numpy.ndarray or torch.Tensor of shape (num_graphs,)
y_true and y_pred need to be of the same type (either numpy.ndarray or torch.Tensor)
'''
assert('y_pred' in input_dict)
assert('y_true' in input_dict)
y_pred, y_true = input_dict['y_pred'], input_dict['y_true']
assert((isinstance(y_true, np.ndarray) and isinstance(y_pred, np.ndarray))
or
(isinstance(y_true, torch.Tensor) and isinstance(y_pred, torch.Tensor)))
assert(y_true.shape == y_pred.shape)
assert(len(y_true.shape) == 1)
if isinstance(y_true, torch.Tensor):
return {'mae': torch.mean(torch.abs(y_pred - y_true)).cpu().item()}
else:
return {'mae': float(np.mean(np.absolute(y_pred - y_true)))}
def save_test_submission(self, input_dict, dir_path):
'''
save test submission file at dir_path
'''
assert('y_pred' in input_dict)
y_pred = input_dict['y_pred']
if not osp.exists(dir_path):
os.makedirs(dir_path)
filename = osp.join(dir_path, 'y_pred_pcqm4m')
assert(isinstance(filename, str))
assert(isinstance(y_pred, np.ndarray) or isinstance(y_pred, torch.Tensor))
assert(y_pred.shape == (377423,))
if isinstance(y_pred, torch.Tensor):
y_pred = y_pred.numpy()
y_pred = y_pred.astype(np.float32)
np.savez_compressed(filename, y_pred = y_pred)
if __name__ == '__main__':
# dataset = PyGPCQM4MDataset()
# print(dataset[0])
# print(dataset.get_idx_split())
evaluator = PCQM4MEvaluator()
y_true = torch.randn(100)
y_pred = torch.randn(100)
result = evaluator.eval({'y_true': y_true, 'y_pred': y_pred})
print(result)
y_pred = torch.randn(377423)
evaluator.save_test_submission({'y_pred': y_pred}, 'result')
|
from .base_path_processor import BasePathProcessor
from .simple_path_processor import SimplePathProcessor
|
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# usage: rule_gperf.py INPUT_FILE OUTPUT_DIR
# INPUT_FILE is a path to DocTypeStrings.gperf, HTMLEntityNames.gperf, or
# ColorData.gperf.
# OUTPUT_DIR is where the gperf-generated .cpp file should be placed. Because
# some users want a .c file instead of a .cpp file, the .cpp file is copied
# to .c when done.
import posixpath
import shutil
import subprocess
import sys
assert len(sys.argv) == 3
inputFile = sys.argv[1]
outputDir = sys.argv[2]
gperfCommands = {
'DocTypeStrings.gperf': [
'-CEot', '-L', 'ANSI-C', '-k*', '-N', 'findDoctypeEntry',
'-F', ',PubIDInfo::eAlmostStandards,PubIDInfo::eAlmostStandards'
],
'HTMLEntityNames.gperf': [
'-a', '-L', 'ANSI-C', '-C', '-G', '-c', '-o', '-t', '-k*',
'-N', 'findEntity', '-D', '-s', '2'
],
'ColorData.gperf': [
'-CDEot', '-L', 'ANSI-C', '-k*', '-N', 'findColor', '-D', '-s', '2'
],
}
inputName = posixpath.basename(inputFile)
assert inputName in gperfCommands
(inputRoot, inputExt) = posixpath.splitext(inputName)
outputCpp = posixpath.join(outputDir, inputRoot + '.cpp')
#command = ['gperf', '--output-file', outputCpp]
command = ['gperf']
command.extend(gperfCommands[inputName])
command.append(inputFile)
ofile = open(outputCpp, 'w')
# Do it. check_call is new in 2.5, so simulate its behavior with call and
# assert.
returnCode = subprocess.call(command, stdout=ofile.fileno())
assert returnCode == 0
outputC = posixpath.join(outputDir, inputRoot + '.c')
shutil.copyfile(outputCpp, outputC)
|
import peewee as pw
from flask import Blueprint
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
from myfunds.core.models import Account
from myfunds.core.models import Category
from myfunds.core.models import Currency
from myfunds.core.models import JointLimit
from myfunds.core.models import JointLimitParticipant
from myfunds.web import auth
from myfunds.web import notify
from myfunds.web import utils
from myfunds.web.forms import AddJointLimitForm
from myfunds.web.forms import DeleteJointLimitForm
bp = Blueprint("joint_limits", __name__, template_folder="templates")
@bp.route("/joint-limits")
@auth.login_required
@auth.superuser_required
def index():
# fmt: off
currencies = Currency.select().order_by(Currency.code_alpha)
participants = (
JointLimit
.select(
JointLimit.id,
pw.fn.COUNT(
pw.fn.DISTINCT(JointLimitParticipant.category.account.id)
).alias("participants"),
)
.join(JointLimitParticipant)
.join(Category)
.join(Account)
.group_by(JointLimit.id)
)
limits = (
JointLimit
.select(
JointLimit,
Currency,
pw.Value(participants.c.participants).alias("participants"),
)
.join(Currency)
.switch()
.join(
participants,
pw.JOIN.LEFT_OUTER,
on=(JointLimit.id == participants.c.id)
)
.order_by(JointLimit.name)
)
# fmt: on
return render_template(
"joint_limits/view.html", currencies=currencies, limits=limits
)
@bp.route("/joint-limits/new", methods=["POST"])
@auth.login_required
@auth.superuser_required
def new():
redirect_url = url_for("joint_limits.index")
form = AddJointLimitForm(request.form)
utils.validate_form(form, redirect_url)
currency_id = form.currency_id.data
name = form.name.data
amount = form.amount.data
currency = Currency.get_or_none(id=currency_id)
if currency is None:
notify.error("Currency not found.")
return redirect(redirect_url)
JointLimit.create(currency=currency, name=name, amount=amount)
notify.info("New limit was created.")
return redirect(redirect_url)
@bp.route("/joint-limits/delete", methods=["POST"])
@auth.login_required
@auth.superuser_required
def delete():
redirect_url = url_for("joint_limits.index")
form = DeleteJointLimitForm(request.form)
utils.validate_form(form, redirect_url)
limit_id = form.limit_id.data
limit = JointLimit.get_or_none(id=limit_id)
if limit is None:
notify.error("Limit not found.")
return redirect(redirect_url)
limit.delete_instance()
notify.info(f"Limit '{limit.name}' was deleted.")
return redirect(redirect_url)
|
from tkinter import *
from password_generator import PasswordGenerator
master = Tk()
master.title("Password Generator")
master.resizable(0,0)
password = PasswordGenerator()
password.minlen = 8
password.maxlen = 9
password.minuchars = 2
password.minlchars = 2
password.minnumbers = 2
password.minschars = 2
def create():
result = password.generate()
pw.insert(0, result)
def delete1():
pw.delete(0,END)
label1 = Label(master, text = "Password Generator", font=("Helvetica", 15, "bold"))
pw = Entry(master)
Generate = Button(master, text = "Generate", command = create)
clear = Button(master, text="Clear", command = delete1)
label1.grid(row = 0, column = 0, padx= 50)
pw.grid(row=1, column= 0, padx= 50)
Generate.grid(row=2, column= 0, ipadx = 30, ipady = 15, padx= 50, pady =30 )
clear.grid(row=3, column= 0, ipadx = 35, ipady = 15, padx= 50)
master.mainloop() |
# Desenvolva uma logica que elia o peso e a altura de uma pessoa, calcule seu IMC e mostre seu status de acordo com a tabela abaixo
# Abaixo de 18.5: Abaixo do peso; Entre 18.5 e 25: Peso ideal; 25 ate 30: Sobrepeso; 30 ate 40: Obesidade; Acima de 40: obesidade morbida
peso = float(input('Qual é o seu peso? (KG)'))
altura = float(input('Qual é a sua altura? (m)'))
IMC = peso / (altura * altura)
print('O IMC dessa pessoa é de {:.2f}'.format(IMC))
if IMC < 18.5:
print('Abaixo do peso')
elif IMC >= 18.5 or IMC <= 25:
print('PARABÉNS, você está na faixa de PESO NORMAL')
elif IMC >= 25 or IMC <= 30:
print('Procure um nutricionista, você está no SOBREPESO')
elif IMC >= 30 or IMC <= 40:
print('Procure um médico, você esta no grau OBESO')
else:
print('PERIGO !!, Obesidade morbida')
|
"""
Create a multibar file from our labeled file and our tree. We will do this at different taxonomic levels
and different label levels.
For each label level we have, we'll create a single file.
"""
import os
import sys
import argparse
from ete3 import Tree
def read_labels(lf, col, verbose=False):
"""
Read the labels file and return a dict with tree labels and values
:param lf: labels file
:param col: the column to use
:param verbose: extra output
:return: a dict of the leaves and their labels and a dict of the labels and their counts
"""
ret = {}
counts = {}
with open(lf, 'r') as f:
for l in f:
p = l.strip().split("\t")
if len(p) <= col:
continue
if not p[col]:
continue
ret[p[0]] = p[col]
counts[p[col]] = counts.get(p[col], 0) + 1
return ret, counts
def write_directory(treefile, data, counts, taxa, outputdir, colors, legend, legendshape, proportions, verbose=False):
"""
Write a directory with one multibar file per type
:param treefile: The tree file to parse
:param data: The data dict with leaves and labels
:param counts: the counts of label frequency
:param outputdir: the directory to create
:param colors: the array of colors to choose from
:param legend: the legend name
:param legendshape: the legend shape
:param verbose: more output
:return:
"""
allkeys = list(counts.keys())
if len(allkeys) > len(colors):
sys.stderr.write("ERROR: Not enough colors. We have {} keys and {} colors\n".format(len(allkeys), len(colors)))
sys.exit(-1)
keycolors = {x: colors[allkeys.index(x)] for x in allkeys}
if not os.path.exists(outputdir):
try:
os.mkdir(outputdir)
except Exception as e:
sys.stderr.write("Cannot make directory: {}\n".format(outputdir))
sys.stderr.write("{}\n".format(e))
sys.exit(-1)
if verbose:
sys.stderr.write("Reading tree\n")
tree = Tree(treefile, quoted_node_names=True, format=1)
if verbose:
sys.stderr.write(f"Creating output files in {outputdir}\n")
for k in counts:
fnme = k.replace(' ', '_')
outputf = os.path.join(outputdir, fnme + ".multibar.txt")
with open(outputf, 'w') as out:
out.write("DATASET_MULTIBAR\nSEPARATOR COMMA\n")
out.write("DATASET_LABEL,{} counts\n".format(k))
out.write("FIELD_COLORS,{}\n".format(keycolors[k]))
out.write("FIELD_LABELS,{}\n".format(k))
out.write("WIDTH,50\n")
out.write("DATASET_SCALE,0-{}-{}\n".format(k, keycolors[k]))
out.write("HEIGHT_FACTOR,50\n")
out.write("SHOW_INTERNAL,1\n")
out.write("ALIGN_FIELDS,1\n")
out.write("COLOR,{}\n".format(keycolors[k]))
out.write("DATA\n")
for n in tree.traverse("preorder"):
if taxa in n.name:
leafcount = 0
for l in n.get_leaves():
if l.name in data and data[l.name] == k:
leafcount += 1
elif l.name in data and verbose:
sys.stderr.write("Skipped {} as it is a {} and we're a {}\n".format(l.name, data[l.name], k))
elif verbose:
sys.stderr.write("No {}\n".format(l.name))
if proportions:
leafcount /= counts[k]
out.write("{},{}\n".format(n.name, leafcount))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument('-f', help='The labeled leaves file from fastq2ids.py', required=True)
parser.add_argument('-t', help='Newick tree file', required=True)
parser.add_argument('-d', help='Output directory where to write the files', required=True)
parser.add_argument('-n', help='Column in the labeled leaves file to use. 0 indexed', required=True, type=int)
parser.add_argument('-l', help='Color strip legend (e.g. Kingdom, Fish, Species', required=True)
parser.add_argument('-x', help='taxa to use for the labels', required=True)
parser.add_argument('-s', help='Legend shape (a number). Default = 1', default="1", type=str)
parser.add_argument('-p', help='Display proportion of counts not counts', action='store_true')
parser.add_argument('-c', help='Colors to use. These will be prepended to our default list', action='append')
parser.add_argument('-v', help='verbose output', action="store_true")
args = parser.parse_args()
colors = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628', '#f781bf', '#999999']
if args.c:
colors = args.c + colors
data, counts = read_labels(args.f, args.n, args.v)
taxa= args.x
if not taxa.startswith('r_'):
taxa = "r_{}".format(taxa)
write_directory(args.t, data, counts, taxa, args.d, colors, args.l, args.s, args.p, args.v) |
#!/usr/bin/python3 -OO
# Copyright 2009-2020 The SABnzbd-Team <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.utils.sleepless - Keep macOS awake by setting power assertions
"""
import objc
from Foundation import NSBundle
# https://developer.apple.com/documentation/iokit/iopowersources.h?language=objc
IOKit = NSBundle.bundleWithIdentifier_("com.apple.framework.IOKit")
functions = [
("IOPMAssertionCreateWithName", b"i@i@o^i"),
("IOPMAssertionRelease", b"vi"),
]
objc.loadBundleFunctions(IOKit, globals(), functions)
# Keep track of the assertion ID at the module-level
assertion_id = None
def keep_awake(reason):
"""Tell OS to stay awake. One argument: text to send to OS.
Stays in effect until next 'allow_sleep' call.
Multiple calls allowed.
"""
global assertion_id
# Each assertion needs to be released, so make sure to only set it once
if not assertion_id:
kIOPMAssertionTypeNoIdleSleep = "NoIdleSleepAssertion"
kIOPMAssertionLevelOn = 255
errcode, assertion_id = IOPMAssertionCreateWithName(
kIOPMAssertionTypeNoIdleSleep, kIOPMAssertionLevelOn, reason, None
)
return errcode == 0
return True
def allow_sleep():
""" Allow OS to go to sleep """
global assertion_id
if assertion_id:
IOPMAssertionRelease(assertion_id)
assertion_id = None
|
'''
瞳孔直径数据存储在csv文件当中,现在把它构造成 伪"EEG"格式的mne数据结构中,
方便进行计算,同时能够统一接口,便于后续的数据融合。
'''
import pandas as pd
import numpy as np
import mne
def create_mne_object_from_pupil_diameter(data, sample_rate=60):
data = np.array([data])
# ch_types = ['eye']
ch_types = ['eeg']
ch_names = ['pupil_diameter']
info = mne.create_info(ch_names=ch_names, sfreq=sample_rate, ch_types=ch_types)
raw = mne.io.RawArray(data, info)
return raw
def get_average_psd(sub_raw_obj, fmin, fmax):
'''
This method returns a the average log psd feature for a MNE raw object
Arguments:
sub_raw_obj: a raw object from MNE library
fmin: the minium frequency you are intreseted
fmax: the maximum frequency you are intreseted
Returns:
average_psd: the required psd features, numpy array like.
shape: (the number of the features, )
'''
psds, freq = mne.time_frequency.psd_multitaper(sub_raw_obj, fmin=fmin, fmax=fmax, n_jobs=4, verbose='ERROR')
# preventing overflow
psds[psds <= 0] = 1
psds = 10 * np.log10(psds)
average_psd = np.mean(psds, axis=1)
return average_psd
def Eye_extract_average_psd_from_a_trial(file_path, save_path, average_second=1, overlap=0.5):
assert overlap >= 0 and overlap < 1
data = pd.DataFrame(pd.read_csv(file_path))
data = data['PupilDiameter']
raw_obj = create_mne_object_from_pupil_diameter(data=data, sample_rate=60)
# print(raw_obj)
total_time = int(raw_obj.times.max())
features = []
move = average_second * (1 - overlap) # 确定步长
# print("total_time:\n", total_time, "\nmove:\n", move) # index: 0->237 (199*2-1)个数字
# i = 1
for start_second in np.arange(0, total_time, move): # 时间120s,所以index为:0->119
if start_second + average_second > total_time:
break
sub_raw_obj = raw_obj.copy().crop(start_second, start_second + average_second)
try:
first_psd = get_average_psd(sub_raw_obj, fmin=0.0, fmax=0.5)
# print("first_psd: ", type(first_psd), ", ", first_psd)
second_psd = get_average_psd(sub_raw_obj, fmin=0.5, fmax=1.0)
# print("second_psd: ", type(second_psd), ", ", second_psd)
except:
print("Eye error!!!")
return save_path
feature = np.concatenate((first_psd, second_psd), axis=None)
features.append(feature)
npy_features = np.array(features)
# print("npy_features:\n", npy_features.shape) # (x, 2)
np.save(save_path+'Eye.npy', npy_features)
# print("features:\n", type(feature), "\n", features, "\nlen: ", len(features))
return True
def test():
# data = pd.DataFrame(pd.read_csv('P1-Rec1-All-Data-New_Section_2.csv'))
# data = pd.DataFrame(pd.read_csv('P11-Rec1-All-Data-New_Section_30.csv'))
# data = data['PupilDiameter']
Eye_extract_average_psd_from_a_trial(file_path='P11-Rec1-All-Data-New_Section_30.csv', save_path='', average_second=1, overlap=0.5)
if __name__ == "__main__":
# myFT1()
test()
|
import numpy as np
from dataclasses import dataclass
from typing import Any
from typing import Optional
from torch.utils import data as torch_data
from lso.data import data as lso_data
@dataclass
class NumpyData(lso_data.Data, torch_data.Dataset):
x: np.array
objective: Optional[np.array] = None
features: Optional[np.array] = None
def __add__(self, other: Any) -> "NumpyData":
if not isinstance(other, NumpyData):
return NotImplemented
if not self.x.shape[1:] == other.x.shape[1:]:
raise ValueError(f'x shapes of added Data: {self}, {other}'
f' do not match: {self.x.shape}, {other.x.shape}.')
x = np.concatenate((self.x, other.x))
if self.objective is None or other.objective is None:
objective = None
else:
if not self.objective.shape[1:] == other.objective.shape[1:]:
raise ValueError(f'objective shapes of added Data: {self}, {other}'
f' do not match: {self.objective.shape}, {other.objective.shape}.')
objective = np.concatenate((self.objective, other.objective))
if self.features is None or other.features is None:
features = None
else:
if not self.features.shape[1:] == other.features.shape[1:]:
raise ValueError(f'features shapes of added Data: {self}, {other}'
f' do not match: {self.features.shape}, {other.features.shape}.')
features = np.concatenate((self.features, other.features))
return type(self)(x=x, objective=objective, features=features)
@dataclass
class NumpyLatent(lso_data.Latent, torch_data.Dataset):
z: np.array
objective: Optional[np.array] = None
features: Optional[np.array] = None
def __add__(self, other: Any) -> "NumpyLatent":
if not isinstance(other, NumpyLatent):
return NotImplemented
if not self.z.shape[1:] == other.z.shape[1:]:
raise ValueError(f'Z shapes of added latents: {self}, {other}'
f' do not match: {self.z.shape}, {other.z.shape}.')
z = np.concatenate((self.z, other.z))
if self.objective is None or other.objective is None:
objective = None
else:
if not self.objective.shape[1:] == other.objective.shape[1:]:
raise ValueError(f'objective shapes of added latents: {self}, {other}'
f' do not match: {self.objective.shape}, {other.objective.shape}.')
objective = np.concatenate((self.objective, other.objective))
if self.features is None or other.features is None:
features = None
else:
if not self.features.shape[1:] == other.features.shape[1:]:
raise ValueError(f'features shapes of added latents: {self}, {other}'
f' do not match: {self.features.shape}, {other.features.shape}.')
features = np.concatenate((self.features, other.features))
return type(self)(z=z, objective=objective, features=features)
|
# Create lists and import using csv.
import csv
train_sad = []
train_anger = []
train_joy = []
train_trust = []
train_fear = []
train_surprise = []
train_disgust = []
train_anticipation = []
#Open and import the training set emotions.txt
with open("TrainingSetEmotions.txt", "r") as trainingsetemotions:
next(trainingsetemotions)
reader = csv.reader(trainingsetemotions,delimiter='\t')
for sad,anger,joy,trust,fear,surprise,disgust,anticipation in reader:
train_sad.append(sad)
train_anger.append(anger)
train_joy.append(joy)
train_trust.append(trust)
train_fear.append(fear)
train_surprise.append(surprise)
train_disgust.append(disgust)
train_anticipation.append(anticipation)
|
from __future__ import absolute_import
from __future__ import print_function
import operator
import six
from six.moves import range
class HotCounter(object):
def __init__(self, vs=None, limit=20):
if vs is None:
vs = []
self.limit = limit
self.total = {}
self.updates = {}
self._max = 0
for v in vs:
self.add(v)
def add(self, v):
c = self.updates.get(v, 0) + 1
self.updates[v] = c
if c > self._max:
self._max = c
if len(self.updates) > self.limit * 5 and self._max > 5:
self._merge()
def _merge(self):
for k, c in six.iteritems(self.updates):
if c > 1:
self.total[k] = self.total.get(k, 0) + c
self._max = 0
self.updates = {}
if len(self.total) > self.limit * 5:
self.total = dict(self.top(self.limit * 3))
def update(self, o):
self._merge()
if isinstance(o, HotCounter):
o._merge()
for k, c in six.iteritems(o.total):
self.total[k] = self.total.get(k, 0) + c
def top(self, limit):
return sorted(list(self.total.items()), key=operator.itemgetter(1), reverse=True)[:limit]
def test():
import random
import math
t = HotCounter()
for j in range(10):
c = HotCounter()
for i in range(10000):
v = int(math.sqrt(random.randint(0, 1000000)))
c.add(v)
t.update(c)
for k, v in t.top(20):
print(k, v)
if __name__ == '__main__':
test()
|
import json
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
with open('data.json', 'r') as jf:
data = json.load(jf)
bulk_list = []
for goal, name in data['goals'].items():
bulk_list.append({'goal': goal, 'name': name})
op.bulk_insert(goals, bulk_list)
bulk_list = []
for t in data['teachers']:
bulk_list.append({
'name': t['name'],
'about': t['about'],
'rating': t['rating'],
'picture': t['picture'],
'price': t['price'],
})
op.bulk_insert(teachers, bulk_list)
|
# Generated by Django 2.1.1 on 2018-10-05 04:25
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('api', '0022_merge_20181004_0912'),
]
operations = [
migrations.RemoveField(
model_name='accommodation',
name='addr_city',
),
migrations.RemoveField(
model_name='accommodation',
name='addr_number',
),
migrations.RemoveField(
model_name='accommodation',
name='addr_state',
),
migrations.RemoveField(
model_name='accommodation',
name='addr_street',
),
migrations.AlterField(
model_name='accommodation',
name='title',
field=models.CharField(default=uuid.UUID('822956a6-0e67-4e90-b94f-bcad363471d8'), max_length=100),
),
]
|
import logging
import colorlog
class CustomLogger:
FORMAT = '[%(asctime)s | PID: %(process)d | %(name)s - %(levelname)-8s] %(message)s'
dict_level = {
'debug' : logging.DEBUG,
'info' : logging.INFO,
'warning' : logging.WARNING,
'error' : logging.ERROR,
'critical' : logging.CRITICAL
}
def create_logger(self, log_name = __name__, log_file = None, low_level = 'info',
datefmt='%d-%b-%y %H:%M:%S', alay=False):
used_format = self.FORMAT
if alay:
logger = colorlog.getLogger(log_name)
bold_seq = '\033[1m'
c_format = (
f'{bold_seq}'
'%(log_color)s'
f'{used_format}'
)
c_handler = colorlog.StreamHandler()
c_handler.setFormatter(colorlog.ColoredFormatter(c_format, datefmt=datefmt))
logger.addHandler(c_handler)
logger.setLevel(self.dict_level[low_level])
else:
# Handlers
logger = logging.getLogger(log_name)
c_handler = logging.StreamHandler()
c_format = logging.Formatter(used_format, datefmt=datefmt)
c_handler.setFormatter(c_format)
c_handler.setLevel(self.dict_level[low_level])
logger.addHandler(c_handler)
logger.setLevel(self.dict_level[low_level])
if log_file is not None:
f_handler = logging.FileHandler(log_file)
f_format = logging.Formatter(used_format, datefmt=datefmt)
f_handler.setFormatter(f_format)
f_handler.setLevel(self.dict_level[low_level])
logger.addHandler(f_handler)
return logger
|
from jax import random, vmap
from jax.example_libraries.optimizers import adam
import jax.numpy as jnp
import numpy as np
import numpy.typing as npt
import numpyro
import numpyro.distributions as dist
from numpyro.infer import SVI, Trace_ELBO
from numpyro.infer.autoguide import AutoNormal
from scipy.stats import beta, norm
from sklearn.linear_model import LogisticRegression
def compute_logit(α, θ, x):
return α + jnp.dot(θ, x)
def noninformative_model(X: npt.NDArray, regions: npt.NDArray, y: npt.NDArray):
n, p = X.shape
n_regions = len(np.unique(regions))
# noninformative hyper-priors for the one-level hierarchical model
μ_a = numpyro.sample('μ_a', dist.Normal(0., 100.))
σ_a = numpyro.sample('σ_a', dist.HalfCauchy(5.))
μ_t = numpyro.sample('μ_t', dist.Normal(0., 100.))
σ_t = numpyro.sample('σ_t', dist.HalfCauchy(5.))
μ = jnp.repeat(μ_t, repeats=p)
Σ = jnp.diag(jnp.repeat(σ_t, repeats=p))
# Create an intercept for each administrative region
with numpyro.plate('regions', n_regions):
α = numpyro.sample('α', dist.Normal(μ_a, σ_a))
θ = numpyro.sample('θ', dist.MultivariateNormal(μ, Σ))
β = vmap(compute_logit, in_axes=0)(α[regions], θ[regions], X)
with numpyro.plate('samples', n):
numpyro.sample('y', dist.Bernoulli(logits=β), obs=y)
def compute_region_params(y: npt.NDArray):
m = y.mean()
a = len(y) * m
b = len(y) - a
samples = beta.rvs(a, b, size=1000, random_state=17)
logit_samples = np.log(samples / (1 - samples))
μ, σ = norm.fit(logit_samples)
return μ, σ
def compute_feature_params(X: npt.NDArray, y: npt.NDArray):
model = LogisticRegression(fit_intercept=False, random_state=17, max_iter=500)
model.fit(X, y)
θ = model.coef_
w = X @ θ.T
w = jnp.exp(w) / jnp.square(1 + jnp.exp(w))
jnp.diag(w).shape
return θ, X.T @ jnp.diag(w.flatten()) @ X
def empirical_model(X: npt.NDArray, regions: npt.NDArray, y: npt.NDArray,
a: float, b: float, μ: npt.NDArray, τ: npt.NDArray):
n, p = X.shape
n_regions = len(np.unique(regions))
# Define the empircal hyper-priors for the hierarchical model
μ_a = numpyro.sample('μ_a', dist.Normal(a, b))
σ_a = numpyro.sample('σ_a', dist.HalfCauchy(5.))
μ_t = numpyro.sample('μ_t', dist.MultivariateNormal(μ, precision_matrix=τ))
σ_t = numpyro.sample('σ_t', dist.HalfCauchy(5.))
Σ = jnp.diag(jnp.repeat(σ_t, repeats=p))
with numpyro.plate('regions', n_regions):
α = numpyro.sample('α', dist.Normal(μ_a, σ_a))
θ = numpyro.sample('θ', dist.MultivariateNormal(μ_t, Σ))
β = vmap(compute_logit, in_axes=0)(α[regions], θ[regions], X)
with numpyro.plate('samples', n):
numpyro.sample('y', dist.Bernoulli(logits=β), obs=y)
def fit_model(X: npt.NDArray, regions: npt.NDArray, y:npt.NDArray, **kwargs):
# Giving sensible defaults for SVI arguments, but allowing user to pass
# different ones if they want
default_kwargs = {'seed': 17, 'prior': 'noninformative', 'step_size': 0.005,
'n_steps': 10000}
kwargs = {**default_kwargs, **kwargs}
rng_key = random.PRNGKey(kwargs['seed'])
if kwargs['prior'] == 'noninformative':
guide = AutoNormal(noninformative_model)
model = SVI(noninformative_model, guide,
adam(step_size=kwargs['step_size']), Trace_ELBO())
result = model.run(rng_key, kwargs['n_steps'], X, regions, y)
else:
a, b = compute_region_params(y)
μ, τ = compute_feature_params(X, y)
guide = AutoNormal(empirical_model)
model = SVI(empirical_model, guide,
adam(step_size=kwargs['step_size']), Trace_ELBO())
result = model.run(rng_key, kwargs['n_steps'], X, regions, y, a, b, μ, τ)
return result, guide
|
#!/usr/bin/env python3
# This script starts a `qemu-system` to run the tests and then safely shutdown
# the VM.
#
# Inspired by https://github.com/CTSRD-CHERI/cheribuild/tree/master/test-scripts
import argparse
import os
from run_tests_common import boot_cheribsd, run_tests_main
def run_cheri_examples_tests(qemu: boot_cheribsd.QemuCheriBSDInstance, args: argparse.Namespace) -> bool:
if args.sysroot_dir is not None:
boot_cheribsd.set_ld_library_path_with_sysroot(qemu)
boot_cheribsd.info("Running tests for cheri-examples")
# This is the BUILD_DIR for the tests, not for this script
os.system("BUILD_DIR=bin ./tests/run_tests.sh")
return True
if __name__ == '__main__':
run_tests_main(test_function=run_cheri_examples_tests, need_ssh=True, should_mount_builddir=False) |
import os
import pandas as pd
import matplotlib.pyplot as plt
from examples.cartpole_example.cartpole_dynamics import RAD_TO_DEG, DEG_TO_RAD
if __name__ == '__main__':
#df_model = pd.read_csv(os.path.join("data", "pendulum_data_PID.csv"))
#df_nn = pd.read_csv(os.path.join("data", "pendulum_data_PID_NN_model.csv"))
df_meas = pd.read_csv(os.path.join("data", "pendulum_data_MPC_ref_val.csv"))
df_nn = pd.read_csv(os.path.join("data", "pendulum_data_MPC_ref_val_NN_model.csv"))
fig,axes = plt.subplots(3,1, figsize=(10,10), sharex=True)
axes[0].plot(df_meas['time'], df_meas['p'], "k", label='p system')
axes[0].plot(df_nn['time'], df_nn['p'], "r", label='p NN')
axes[0].set_title("Position (m)")
axes[0].set_ylim(-10, 10.0)
axes[1].plot(df_meas['time'], df_meas['theta'] * RAD_TO_DEG, "k", label='theta system')
axes[1].plot(df_nn['time'], df_nn['theta']*RAD_TO_DEG, "r", label='theta NN')
axes[2].plot(df_meas['time'], df_meas['u'], label="u")
axes[2].plot(df_nn['time'], df_nn['u'], label="u")
for ax in axes:
ax.grid(True)
ax.legend()
|
"""paicli: A CLI tool for PAI (Platform for AI).
Author: Sotetsu KOYAMADA
"""
from __future__ import unicode_literals
from prompt_toolkit.application import Application
from prompt_toolkit.interface import CommandLineInterface
from prompt_toolkit.key_binding.manager import KeyBindingManager
from prompt_toolkit.keys import Keys
from prompt_toolkit.layout.containers import Window
from prompt_toolkit.shortcuts import create_eventloop
from prompt_toolkit.filters import IsDone
from prompt_toolkit.layout.controls import TokenListControl
from prompt_toolkit.layout.containers import ConditionalContainer, ScrollOffsets, HSplit
from prompt_toolkit.layout.dimension import LayoutDimension as D
from prompt_toolkit.token import Token
from prompt_toolkit.styles import style_from_dict
# Reference
#
# examples/custom-token-list-control.py:
# https://github.com/jonathanslenders/python-prompt-toolkit/pull/427/commits/2b75d1835eee49c8881c1d8d7e107237227eda95#diff-a4228333fe0e9eca1e3149e15ae2d8de
#
def select_choices_interactively(choices):
# choices = [job['name'] for job in job_content]
app = get_app(choices)
eventloop = create_eventloop()
try:
cli = CommandLineInterface(application=app, eventloop=eventloop)
selected_choice = cli.run(reset_current_buffer=False)
finally:
eventloop.close()
return selected_choice
def get_app(choices):
class InquirerControl(TokenListControl):
selected_option_index = 0
answered = False
choices = []
def __init__(self, choices, **kwargs):
self.choices = choices
super(InquirerControl, self).__init__(self._get_choice_tokens, **kwargs)
@property
def choice_count(self):
return len(self.choices)
def _get_choice_tokens(self, cli):
tokens = []
T = Token
def append(index, label):
selected = (index == self.selected_option_index)
tokens.append((T.Selected if selected else T, '> ' if selected else ' '))
if selected:
tokens.append((Token.SetCursorPosition, ''))
tokens.append((T.Selected if selected else T, '%-24s' % label))
tokens.append((T, '\n'))
for i, choice in enumerate(self.choices):
append(i, choice)
tokens.pop() # Remove last newline.
return tokens
def get_selection(self):
return self.choices[self.selected_option_index]
ic = InquirerControl(choices)
def get_prompt_tokens(cli):
tokens = []
if ic.answered:
cli.return_value = lambda: ic.get_selection()
return tokens
layout = HSplit([
Window(height=D.exact(0),
content=TokenListControl(get_prompt_tokens, align_center=False)),
ConditionalContainer(
Window(
ic,
width=D.exact(43),
height=D(min=3),
scroll_offsets=ScrollOffsets(top=1, bottom=1)
),
filter=~IsDone())])
manager = KeyBindingManager.for_prompt()
@manager.registry.add_binding(Keys.ControlQ, eager=True)
@manager.registry.add_binding(Keys.ControlC, eager=True)
def _(event):
event.cli.set_return_value(None)
@manager.registry.add_binding(Keys.Down, eager=True)
@manager.registry.add_binding(Keys.ControlN, eager=True)
def move_cursor_down(event):
ic.selected_option_index = (
(ic.selected_option_index + 1) % ic.choice_count)
@manager.registry.add_binding(Keys.Up, eager=True)
@manager.registry.add_binding(Keys.ControlP, eager=True)
def move_cursor_up(event):
ic.selected_option_index = (
(ic.selected_option_index - 1) % ic.choice_count)
@manager.registry.add_binding(Keys.Enter, eager=True)
def set_answer(event):
ic.answered = True
event.cli.set_return_value(None)
inquirer_style = style_from_dict({
Token.QuestionMark: '#5F819D',
Token.Selected: '#FF9D00',
Token.Instruction: '',
Token.Answer: '#FF9D00 bold',
Token.Question: 'bold',
})
app = Application(
layout=layout,
key_bindings_registry=manager.registry,
mouse_support=True,
style=inquirer_style
)
return app
|
"""
Computer vision.
"""
from . import mussels
__all__ = [
"mussels",
]
|
import unittest
from intrepyd.iec611312py.parsest import parse_st
from intrepyd.iec611312py.variable import Variable
from intrepyd.iec611312py.stmtprinter import StmtPrinter
from intrepyd.iec611312py.summarizer import Summarizer
from intrepyd.iec611312py.datatype import Primitive
from intrepyd.iec611312py.expression import VariableOcc, Ite
from intrepyd.iec611312py.statement import Assignment
boolType = Primitive('BOOL')
class TestSTSummarizer(unittest.TestCase):
def _run_test(self, program, name2var, expected_assignments, expected_extra_assignments):
statements = parse_st(program, name2var, {})
self._run_test_helper(statements, expected_assignments, expected_extra_assignments)
def _run_test_helper(self, statements, expected_assignments, expected_extra_assignments):
summarizer = Summarizer()
summary = summarizer.summarize_stmt_block(statements)
actual = {}
for assignment in summary:
printer = StmtPrinter()
assignment.rhs.accept(printer)
actual[assignment.lhs.var.name] = printer.result
self.assertEqual(expected_assignments, actual)
actual = {}
for assignment in summarizer.assignments:
printer = StmtPrinter()
assignment.rhs.accept(printer)
actual[assignment.lhs.var.name] = printer.result
self.assertEqual(expected_extra_assignments, actual)
def test_1(self):
name2var = {
'a' : Variable('a', boolType, Variable.LOCAL),
'b' : Variable('b', boolType, Variable.LOCAL),
'c' : Variable('c', boolType, Variable.LOCAL)
}
program = """
a := TRUE;
b := FALSE;
c := a AND b;
"""
expected_assignments = {
'a': 'TRUE',
'b': 'FALSE',
'c': '(a___1 AND b___2)'
}
expected_extra_assignments = {
'a___1': 'TRUE',
'b___2': 'FALSE'
}
self._run_test(program, name2var, expected_assignments, expected_extra_assignments)
def test_2(self):
name2var = {
'a' : Variable('a', boolType, Variable.LOCAL),
'b' : Variable('b', boolType, Variable.LOCAL),
'c' : Variable('c', boolType, Variable.LOCAL)
}
program = """
a := TRUE;
b := FALSE;
a := b;
c := (a AND b);
"""
expected_assignments = {
'a': 'b___1',
'b': 'FALSE',
'c': '(a___2 AND b___3)'
}
expected_extra_assignments = {
'a___2': 'b___1',
'b___1': 'FALSE',
'b___3': 'FALSE'
}
self._run_test(program, name2var, expected_assignments, expected_extra_assignments)
def test_3(self):
name2var = {
'a' : Variable('a', boolType, Variable.LOCAL),
'b' : Variable('b', boolType, Variable.LOCAL),
'c' : Variable('c', boolType, Variable.LOCAL)
}
program = """
a := b;
c := a;
"""
expected_assignments = {
'a': 'b',
'c': 'a___1',
}
expected_extra_assignments = {
'a___1': 'b'
}
self._run_test(program, name2var, expected_assignments, expected_extra_assignments)
def test_4(self):
name2var = {
'a' : Variable('a', boolType, Variable.LOCAL),
'b' : Variable('b', boolType, Variable.LOCAL),
'c' : Variable('c', boolType, Variable.LOCAL)
}
program = """
a := b AND FALSE;
c := a;
a := c AND TRUE;
"""
expected_assignments = {
'a': '(c___2 AND TRUE)',
'c': 'a___1',
}
expected_extra_assignments = {
'c___2': 'a___1',
'a___1': '(b AND FALSE)'
}
self._run_test(program, name2var, expected_assignments, expected_extra_assignments)
def test_5(self):
name2var = {
'a' : VariableOcc(Variable('a', boolType, Variable.LOCAL)),
'b' : VariableOcc(Variable('b', boolType, Variable.LOCAL)),
'c' : VariableOcc(Variable('c', boolType, Variable.LOCAL)),
'd' : VariableOcc(Variable('d', boolType, Variable.LOCAL)),
'e' : VariableOcc(Variable('e', boolType, Variable.LOCAL)),
'f' : VariableOcc(Variable('f', boolType, Variable.LOCAL))
}
statements = [
Assignment(name2var['a'], Ite(name2var['b'], name2var['c'], name2var['d'])),
Assignment(name2var['e'], name2var['a']),
Assignment(name2var['f'], Ite(name2var['a'], name2var['a'], name2var['a'])),
]
expected_assignments = {
'a': 'ite(b, c, d)',
'e': 'a___1',
'f': 'ite(a___2, a___3, a___4)',
}
expected_extra_assignments = {
'a___1': 'ite(b, c, d)',
'a___2': 'ite(b, c, d)',
'a___3': 'ite(b, c, d)',
'a___4': 'ite(b, c, d)'
}
self._run_test_helper(statements, expected_assignments, expected_extra_assignments)
def test_6(self):
name2var = {
'a' : Variable('a', boolType, Variable.LOCAL),
'b' : Variable('b', boolType, Variable.LOCAL),
'c' : Variable('c', boolType, Variable.LOCAL)
}
program = """
a := a AND b;
"""
expected_assignments = {
'a': '(a AND b)'
}
self._run_test(program, name2var, expected_assignments, {})
def test_7(self):
name2var = {
'a' : VariableOcc(Variable('a', boolType, Variable.LOCAL)),
'b' : VariableOcc(Variable('b', boolType, Variable.LOCAL)),
'c' : VariableOcc(Variable('c', boolType, Variable.LOCAL)),
}
statements = [
Assignment(name2var['a'], Ite(name2var['b'], name2var['a'], name2var['c']))
]
expected_assignments = {
'a': 'ite(b, a, c)',
}
self._run_test_helper(statements, expected_assignments, {})
if __name__ == "__main__":
unittest.main()
|
"""HTTP Navigation Manager."""
from configparser import ConfigParser
from typing import Dict, List
import logging
import random
from OSIx.core.base_module import BaseModule
from OSIx.core.http_manager import HttpNavigationManager
logger = logging.getLogger()
class HttpNavigationManagerHandler(BaseModule):
"""HTTP Navigation Manager."""
def run(self, config: ConfigParser, args: Dict, data: Dict) -> None:
"""Execute Module."""
# Check if Not have a Selected UA
if 'web_navigation' not in data or 'user_agent' not in data['web_navigation']:
data.update({'web_navigation': {'user_agent': self.__choose_ua(config)}})
# Initialize Internal Modules
HttpNavigationManager.init(data)
def __choose_ua(self, config: ConfigParser) -> str:
"""Choose a Random UA."""
if config:
ua_list: List[str] = config['WEB_NAVIGATION']['useragent_list'].split('\n')
return ua_list[random.Random().randint(1, len(ua_list) - 1)] # nosec
raise Exception("System Configuration Not Initialized")
|
#!/usr/bin/env python
import re
import yaml
def camel_to_snake(name):
""" Convert CamelCase names to snake_case """
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
with open('z.yaml') as yaml_file:
y = yaml.load(yaml_file, Loader=yaml.FullLoader)
for n in y:
s = camel_to_snake(n)
print(f"{n}:{s}\n{y[n]}")
file_name = f"{s}.yaml"
d = dict()
d[n] = y[n]
with open(file_name, 'w') as out:
yaml.dump(d, out, default_flow_style=False)
|
import random
guess_num = random.randint(1, 100)
playing = True
while(playing):
difficulty = input(
"What is your preference for difficulty\nhard or medium or easy\n>")
if(difficulty == "h" or difficulty == "hard" or difficulty == 1):
life = 5
elif(difficulty == "m" or difficulty == "medium" or difficulty == 2):
life = 7
else:
life = 10
while(True):
if(life < 1):
print("you ran out of life\n")
if(input("Would you like to give it another go\n'y' for yes\n>" == "y")):
break
else:
playing = False
break
user_guess = int(input("what is your guess"))
if(user_guess > guess_num):
print("too high\n")
life -= 1
print(f"you lost a life,\n you have {life} remaining")
elif(user_guess < guess_num):
print("too low\n")
print(f"you lost a life,\n you have {life} remaining")
else:
print("you win\n")
print("you ran out of life\n")
if(input("Would you like to give it another go\n'y' for yes\n>" == "y")):
break
else:
playing = False
break
print("thanx for playing :>")
|
import configparser
from pymongo import MongoClient
import os
class Properties:
def __init__(self):
pass
def load_properties():
# Read configuration properties
config = configparser.ConfigParser()
base_path = os.path.dirname(os.path.realpath(__file__))
config.read(os.path.join(base_path, 'properties.ini'))
properties = Properties()
properties.CHANNELS = config.get('general', 'channels')
properties.DATABASE_ENDPOINT = config.get('database', 'endpoint')
properties.SAPO_ENDPOINT = config.get('sapo', 'endpoint')
properties.SAPO_IMAGE = config.get('sapo', 'image')
properties.SAPO_NS = config.get('sapo', 'ns')
properties.OMDB_ENDPOINT = config.get('omdb', 'endpoint')
properties.OMDB_KEY = config.get('omdb', 'key')
properties.GOOGLE_ENDPOINT = config.get('google', 'endpoint')
properties.GOOGLE_KEY = config.get('google', 'key')
properties.GOOGLE_CX = config.get('google', 'cx')
return properties
CONFIG = load_properties()
def load_database():
connection = MongoClient(CONFIG.DATABASE_ENDPOINT)
return connection.dev
db = load_database()
|
import time
import board
from adafruit_pyportal import PyPortal
from adafruit_bitmap_font import bitmap_font
from adafruit_display_text.label import Label
import json
from secrets import secrets
import gc
# Set up where we'll be fetching data from. Presumes a JSON structure of:
# [{"status":"At home",
# "date":"- Monday, May 1, 9:54 PM",
# "graph":"https://host/graph.png"}]
DATA_SOURCE = secrets['pyportal_source']
# Status graph details
image_json_path = [0, 'graph']
image_size = (297, 122)
image_position = (11, 0)
image_refresh_time = 3600
# Status text details
font_path = "/fonts/DejaVuSansMono-14.bdf"
text_wrap = 38
line_spacing = 0.75
status_dict = {'status': {'position': (5, 174),
'color': 0xffffff,
'length': 140,
'json_path': [0, 'status'],
'format': '{0}'},
'date': {'position': (5, 230),
'color': 0x4f29b4,
'length': text_wrap,
'json_path': [0, 'date'],
'format': '{{0: >{0}}}'.format(text_wrap)}
# "monospaced right justify"
}
status_refresh_time = 30
# Should be no need for modifications past this point.
# Initialize font
big_font = bitmap_font.load_font(font_path)
big_font.load_glyphs(b' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz`1234567890-=~!@#$%^&*()_+[]{},./<>?;:\\|\'"')
# Initialize PyPortal
pyportal = PyPortal(debug=False,
url=DATA_SOURCE,
image_json_path=image_json_path,
image_resize=image_size,
image_position=image_position,
status_neopixel=board.NEOPIXEL,
)
# Initialize status text areas
text_areas = {}
for k, entry in status_dict.items():
pos = entry['position']
color = entry['color']
length = entry['length']
textarea = Label(big_font, text=length*' ')
textarea._line_spacing = line_spacing
textarea.x = pos[0]
textarea.y = pos[1]
textarea.color = color
pyportal.splash.append(textarea)
text_areas[k] = textarea
# Initialize looping and change conditions
refresh_time = None
raw_status_dict_old = {}
pyportal.get_local_time()
start_time = time.monotonic()
while True:
print("{0} since start".format(time.monotonic()-start_time))
# Periodically, sync time and grab a new image
if (not refresh_time) or (time.monotonic() - refresh_time) > image_refresh_time:
try:
print("Getting time from internet")
pyportal.get_local_time()
refresh_time = time.monotonic()
print("Grabbing new image")
value = pyportal.fetch()
print("Response is", value)
except RuntimeError as e:
print("Some error occured, retrying! -", e)
continue
# Every time, grab the status JSON and check for changes to text area contents
try:
gc.collect()
print("Grabbing new status")
json_file = DATA_SOURCE.split('/')[-1]
if pyportal._sdcard:
json_file = '/sd/' + json_file
pyportal.wget(url=DATA_SOURCE,
filename=json_file,
chunk_size=512)
with open(json_file, 'r') as f:
lines = f.readlines()
json_str = ' '.join(lines)
j = json.loads(json_str)
# Check JSON for changes to text area contents
raw_status_dict = {}
for k in status_dict.keys():
json_path = status_dict[k]['json_path']
json_traversed = pyportal._json_traverse(j, json_path)
text = '\n'.join(pyportal.wrap_nicely(json_traversed, text_wrap))
raw_status_dict[k] = text
if raw_status_dict == raw_status_dict_old:
print("No changes in json text")
else:
print("At least one thing changed in json text")
# Update changed text areas
for k, v in raw_status_dict.items():
if not(v == raw_status_dict_old.get(k)):
print("Status item '{0}' changed from '{1}' to '{2}'".format(k, raw_status_dict_old.get(k), v))
text_areas[k].text = status_dict[k]['format'].format(v)
# Update status dictionary for next iteration
raw_status_dict_old = raw_status_dict
except (RuntimeError, ValueError) as e:
print("Some error occured, retrying! -", e)
time.sleep(status_refresh_time) |
""" Defines the CloudNoiseModel class and supporting functions """
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import numpy as _np
import copy as _copy
import itertools as _itertools
import collections as _collections
import scipy.sparse as _sps
import warnings as _warnings
from . import operation as _op
from . import spamvec as _sv
from . import povm as _povm
from . import qubitgraph as _qgraph
from . import labeldicts as _ld
from . import opfactory as _opfactory
from ..tools import optools as _gt
from ..tools import basistools as _bt
from ..tools import internalgates as _itgs
from .implicitmodel import ImplicitOpModel as _ImplicitOpModel
from .layerlizard import ImplicitLayerLizard as _ImplicitLayerLizard
from .verbosityprinter import VerbosityPrinter as _VerbosityPrinter
from .basis import BuiltinBasis as _BuiltinBasis, ExplicitBasis as _ExplicitBasis
from .label import Label as _Lbl, CircuitLabel as _CircuitLabel
from ..tools.basisconstructors import sqrt2, id2x2, sigmax, sigmay, sigmaz
def _iter_basis_inds(weight):
""" Iterate over product of `weight` non-identity Pauli 1Q basis indices """
basisIndList = [[1, 2, 3]] * weight # assume pauli 1Q basis, and only iterate over non-identity els
for basisInds in _itertools.product(*basisIndList):
yield basisInds
def basisProductMatrix(sigmaInds, sparse):
""" Construct the Pauli product matrix from the given `sigmaInds` """
sigmaVec = (id2x2 / sqrt2, sigmax / sqrt2, sigmay / sqrt2, sigmaz / sqrt2)
M = _np.identity(1, 'complex')
for i in sigmaInds:
M = _np.kron(M, sigmaVec[i])
return _sps.csr_matrix(M) if sparse else M
class CloudNoiseModel(_ImplicitOpModel):
"""
A noisy n-qubit model using a low-weight and geometrically local
error model with a common "global idle" operation.
"""
@classmethod
def build_from_hops_and_weights(cls, nQubits, gate_names, nonstd_gate_unitaries=None,
custom_gates=None, availability=None,
qubit_labels=None, geometry="line",
maxIdleWeight=1, maxSpamWeight=1, maxhops=0,
extraWeight1Hops=0, extraGateWeight=0, sparse=False,
sim_type="auto", parameterization="H+S",
spamtype="lindblad", addIdleNoiseToAllGates=True,
errcomp_type="gates", independent_clouds=True, verbosity=0):
"""
Create a n-qubit model using a low-weight and geometrically local
error model with a common "global idle" operation.
This type of model is referred to as a "cloud noise" model because
noise specific to a gate may act on a neighborhood or cloud around
the gate's target qubits. This type of model is generally useful
for performing GST on a multi-qubit system.
Parameters
----------
nQubits : int
The number of qubits
gate_names : list
A list of string-type gate names (e.g. `"Gx"`) either taken from
the list of builtin "standard" gate names given above or from the
keys of `nonstd_gate_unitaries`. These are the typically 1- and 2-qubit
gates that are repeatedly embedded (based on `availability`) to form
the resulting model.
nonstd_gate_unitaries : dict, optional
A dictionary of numpy arrays which specifies the unitary gate action
of the gate names given by the dictionary's keys. As an advanced
behavior, a unitary-matrix-returning function which takes a single
argument - a tuple of label arguments - may be given instead of a
single matrix to create an operation *factory* which allows
continuously-parameterized gates. This function must also return
an empty/dummy unitary when `None` is given as it's argument.
custom_gates : dict
A dictionary that associates with gate labels
:class:`LinearOperator`, :class:`OpFactory`, or `numpy.ndarray`
objects. These objects describe the full action of the gate or
primitive-layer they're labeled by (so if the model represents
states by density matrices these objects are superoperators, not
unitaries), and override any standard construction based on builtin
gate names or `nonstd_gate_unitaries`. Keys of this dictionary must
be string-type gate *names* -- they cannot include state space labels
-- and they must be *static* (have zero parameters) because they
represent only the ideal behavior of each gate -- the cloudnoise
operations represent the parameterized noise. To fine-tune how this
noise is parameterized, call the :class:`CloudNoiseModel` constructor
directly.
availability : dict, optional
A dictionary whose keys are the same gate names as in
`gatedict` and whose values are lists of qubit-label-tuples. Each
qubit-label-tuple must have length equal to the number of qubits
the corresponding gate acts upon, and causes that gate to be
embedded to act on the specified qubits. For example,
`{ 'Gx': [(0,),(1,),(2,)], 'Gcnot': [(0,1),(1,2)] }` would cause
the `1-qubit `'Gx'`-gate to be embedded three times, acting on qubits
0, 1, and 2, and the 2-qubit `'Gcnot'`-gate to be embedded twice,
acting on qubits 0 & 1 and 1 & 2. Instead of a list of tuples,
values of `availability` may take the special values:
- `"all-permutations"` and `"all-combinations"` equate to all possible
permutations and combinations of the appropriate number of qubit labels
(deterined by the gate's dimension).
- `"all-edges"` equates to all the vertices, for 1Q gates, and all the
edges, for 2Q gates of the geometry.
- `"arbitrary"` or `"*"` means that the corresponding gate can be placed
on any target qubits via an :class:`EmbeddingOpFactory` (uses less
memory but slower than `"all-permutations"`.
If a gate name (a key of `gatedict`) is not present in `availability`,
the default is `"all-edges"`.
qubit_labels : tuple, optional
The circuit-line labels for each of the qubits, which can be integers
and/or strings. Must be of length `nQubits`. If None, then the
integers from 0 to `nQubits-1` are used.
geometry : {"line","ring","grid","torus"} or QubitGraph
The type of connectivity among the qubits, specifying a
graph used to define neighbor relationships. Alternatively,
a :class:`QubitGraph` object with node labels equal to
`qubit_labels` may be passed directly.
maxIdleWeight : int, optional
The maximum-weight for errors on the global idle gate.
maxSpamWeight : int, optional
The maximum-weight for SPAM errors when `spamtype == "linblad"`.
maxhops : int
The locality constraint: for a gate, errors (of weight up to the
maximum weight for the gate) are allowed to occur on the gate's
target qubits and those reachable by hopping at most `maxhops` times
from a target qubit along nearest-neighbor links (defined by the
`geometry`).
extraWeight1Hops : int, optional
Additional hops (adds to `maxhops`) for weight-1 errors. A value > 0
can be useful for allowing just weight-1 errors (of which there are
relatively few) to be dispersed farther from a gate's target qubits.
For example, a crosstalk-detecting model might use this.
extraGateWeight : int, optional
Addtional weight, beyond the number of target qubits (taken as a "base
weight" - i.e. weight 2 for a 2Q gate), allowed for gate errors. If
this equals 1, for instance, then 1-qubit gates can have up to weight-2
errors and 2-qubit gates can have up to weight-3 errors.
sparse : bool, optional
Whether the embedded Lindblad-parameterized gates within the constructed
`nQubits`-qubit gates are sparse or not. (This is determied by whether
they are constructed using sparse basis matrices.) When sparse, these
Lindblad gates take up less memory, but their action is slightly slower.
Usually it's fine to leave this as the default (False), except when
considering particularly high-weight terms (b/c then the Lindblad gates
are higher dimensional and sparsity has a significant impact).
sim_type : {"auto","matrix","map","termorder:<N>"}
The type of forward simulation (probability computation) to use for the
returned :class:`Model`. That is, how should the model compute
operation sequence/circuit probabilities when requested. `"matrix"` is better
for small numbers of qubits, `"map"` is better for larger numbers. The
`"termorder"` option is designed for even larger numbers. Usually,
the default of `"auto"` is what you want.
parameterization : {"P", "P terms", "P clifford terms"}
Where *P* can be any Lindblad parameterization base type (e.g. CPTP,
H+S+A, H+S, S, D, etc.) This is the type of parameterizaton to use in
the constructed model. Types without any "terms" suffix perform
usual density-matrix evolution to compute circuit probabilities. The
other "terms" options compute probabilities using a path-integral
approach designed for larger numbers of qubits (experts only).
spamtype : { "static", "lindblad", "tensorproduct" }
Specifies how the SPAM elements of the returned `Model` are formed.
Static elements are ideal (perfect) operations with no parameters, i.e.
no possibility for noise. Lindblad SPAM operations are the "normal"
way to allow SPAM noise, in which case error terms up to weight
`maxSpamWeight` are included. Tensor-product operations require that
the state prep and POVM effects have a tensor-product structure; the
"tensorproduct" mode exists for historical reasons and is *deprecated*
in favor of `"lindblad"`; use it only if you know what you're doing.
addIdleNoiseToAllGates: bool, optional
Whether the global idle should be added as a factor following the
ideal action of each of the non-idle gates.
errcomp_type : {"gates","errorgens"}
How errors are composed when creating layer operations in the created
model. `"gates"` means that the errors on multiple gates in a single
layer are composed as separate and subsequent processes. Specifically,
the layer operation has the form `Composed(target,idleErr,cloudErr)`
where `target` is a composition of all the ideal gate operations in the
layer, `idleErr` is idle error (`.operation_blks['layers']['globalIdle']`),
and `cloudErr` is the composition (ordered as layer-label) of cloud-
noise contributions, i.e. a map that acts as the product of exponentiated
error-generator matrices. `"errorgens"` means that layer operations
have the form `Composed(target, error)` where `target` is as above and
`error` results from composing the idle and cloud-noise error
*generators*, i.e. a map that acts as the exponentiated sum of error
generators (ordering is irrelevant in this case).
independent_clouds : bool, optional
Currently this must be set to True. In a future version, setting to
true will allow all the clouds of a given gate name to have a similar
cloud-noise process, mapped to the full qubit graph via a stencil.
verbosity : int, optional
An integer >= 0 dictating how must output to send to stdout.
"""
printer = _VerbosityPrinter.build_printer(verbosity)
if custom_gates is None: custom_gates = {}
if nonstd_gate_unitaries is None: nonstd_gate_unitaries = {}
std_unitaries = _itgs.get_standard_gatename_unitaries()
#Get evotype
_, evotype = _gt.split_lindblad_paramtype(parameterization)
assert(evotype in ("densitymx", "svterm", "cterm")), "State-vector evolution types not allowed."
gatedict = _collections.OrderedDict()
for name in gate_names:
if name in custom_gates:
gatedict[name] = custom_gates[name]
else:
U = nonstd_gate_unitaries.get(name, std_unitaries.get(name, None))
if U is None:
raise KeyError("'%s' gate unitary needs to be provided by `nonstd_gate_unitaries` arg" % name)
if callable(U): # then assume a function: args -> unitary
U0 = U(None) # U fns must return a sample unitary when passed None to get size.
gatedict[name] = _opfactory.UnitaryOpFactory(U, U0.shape[0], evotype=evotype)
else:
gatedict[name] = _bt.change_basis(_gt.unitary_to_process_mx(U), "std", "pp")
# assume evotype is a densitymx or term type
#Add anything from custom_gates directly if it wasn't added already
for lbl, gate in custom_gates.items():
if lbl not in gate_names: gatedict[lbl] = gate
if qubit_labels is None:
qubit_labels = tuple(range(nQubits))
if not independent_clouds:
raise NotImplementedError("Non-independent noise clounds are not supported yet!")
if isinstance(geometry, _qgraph.QubitGraph):
qubitGraph = geometry
else:
qubitGraph = _qgraph.QubitGraph.common_graph(nQubits, geometry, directed=False,
qubit_labels=qubit_labels)
printer.log("Created qubit graph:\n" + str(qubitGraph))
#Process "auto" sim_type
if sim_type == "auto":
if evotype in ("svterm", "cterm"): sim_type = "termorder:1"
else: sim_type = "map" if nQubits > 2 else "matrix"
assert(sim_type in ("matrix", "map") or sim_type.startswith("termorder") or sim_type.startswith("termgap"))
#Global Idle
if maxIdleWeight > 0:
printer.log("Creating Idle:")
global_idle_layer = _build_nqn_global_noise(
qubitGraph, maxIdleWeight, sparse,
sim_type, parameterization, errcomp_type, printer - 1)
else:
global_idle_layer = None
#SPAM
if spamtype == "static" or maxSpamWeight == 0:
if maxSpamWeight > 0:
_warnings.warn(("`spamtype == 'static'` ignores the supplied "
"`maxSpamWeight=%d > 0`") % maxSpamWeight)
prep_layers = [_sv.ComputationalSPAMVec([0] * nQubits, evotype)]
povm_layers = {'Mdefault': _povm.ComputationalBasisPOVM(nQubits, evotype)}
elif spamtype == "tensorproduct":
_warnings.warn("`spamtype == 'tensorproduct'` is deprecated!")
basis1Q = _BuiltinBasis("pp", 4)
prep_factors = []; povm_factors = []
from ..construction import basis_build_vector
v0 = basis_build_vector("0", basis1Q)
v1 = basis_build_vector("1", basis1Q)
# Historical use of TP for non-term-based cases?
# - seems we could remove this. FUTURE REMOVE?
povmtyp = rtyp = "TP" if parameterization in \
("CPTP", "H+S", "S", "H+S+A", "S+A", "H+D+A", "D+A", "D") \
else parameterization
for i in range(nQubits):
prep_factors.append(
_sv.convert(_sv.StaticSPAMVec(v0), rtyp, basis1Q))
povm_factors.append(
_povm.convert(_povm.UnconstrainedPOVM(([
('0', _sv.StaticSPAMVec(v0)),
('1', _sv.StaticSPAMVec(v1))])), povmtyp, basis1Q))
prep_layers = [_sv.TensorProdSPAMVec('prep', prep_factors)]
povm_layers = {'Mdefault': _povm.TensorProdPOVM(povm_factors)}
elif spamtype == "lindblad":
prepPure = _sv.ComputationalSPAMVec([0] * nQubits, evotype)
prepNoiseMap = _build_nqn_global_noise(
qubitGraph, maxSpamWeight, sparse, sim_type, parameterization, errcomp_type, printer - 1)
prep_layers = [_sv.LindbladSPAMVec(prepPure, prepNoiseMap, "prep")]
povmNoiseMap = _build_nqn_global_noise(
qubitGraph, maxSpamWeight, sparse, sim_type, parameterization, errcomp_type, printer - 1)
povm_layers = {'Mdefault': _povm.LindbladPOVM(povmNoiseMap, None, "pp")}
else:
raise ValueError("Invalid `spamtype` argument: %s" % spamtype)
weight_maxhops_tuples_1Q = [(1, maxhops + extraWeight1Hops)] + \
[(1 + x, maxhops) for x in range(1, extraGateWeight + 1)]
cloud_maxhops_1Q = max([mx for wt, mx in weight_maxhops_tuples_1Q]) # max of max-hops
weight_maxhops_tuples_2Q = [(1, maxhops + extraWeight1Hops), (2, maxhops)] + \
[(2 + x, maxhops) for x in range(1, extraGateWeight + 1)]
cloud_maxhops_2Q = max([mx for wt, mx in weight_maxhops_tuples_2Q]) # max of max-hops
def build_cloudnoise_fn(lbl):
gate_nQubits = len(lbl.sslbls)
if gate_nQubits not in (1, 2):
raise ValueError("Only 1- and 2-qubit gates are supported. %s acts on %d qubits!"
% (str(lbl.name), gate_nQubits))
weight_maxhops_tuples = weight_maxhops_tuples_1Q if len(lbl.sslbls) == 1 else weight_maxhops_tuples_2Q
return _build_nqn_cloud_noise(
lbl.sslbls, qubitGraph, weight_maxhops_tuples,
errcomp_type=errcomp_type, sparse=sparse, sim_type=sim_type,
parameterization=parameterization, verbosity=printer - 1)
def build_cloudkey_fn(lbl):
cloud_maxhops = cloud_maxhops_1Q if len(lbl.sslbls) == 1 else cloud_maxhops_2Q
cloud_inds = tuple(qubitGraph.radius(lbl.sslbls, cloud_maxhops))
cloud_key = (tuple(lbl.sslbls), tuple(sorted(cloud_inds))) # (sets are unhashable)
return cloud_key
return cls(nQubits, gatedict, availability, qubit_labels, geometry,
global_idle_layer, prep_layers, povm_layers,
build_cloudnoise_fn, build_cloudkey_fn,
sim_type, evotype, errcomp_type,
addIdleNoiseToAllGates, sparse, printer)
def __init__(self, nQubits, gatedict, availability=None,
qubit_labels=None, geometry="line",
global_idle_layer=None, prep_layers=None, povm_layers=None,
build_cloudnoise_fn=None, build_cloudkey_fn=None,
sim_type="map", evotype="densitymx", errcomp_type="gates",
addIdleNoiseToAllGates=True, sparse=False, verbosity=0):
"""
Create a n-qubit model using a low-weight and geometrically local
error model with a common "global idle" operation.
This constructor relies on factory functions being passed to it
which generate the cloud-noise operators - noise thtat is specific
to a gate but may act on a neighborhood or cloud around the gate's
target qubits.
Parameters
----------
nQubits : int
The number of qubits
gatedict : dict
A dictionary (an `OrderedDict` if you care about insertion order) that
associates with string-type gate names (e.g. `"Gx"`) :class:`LinearOperator`,
`numpy.ndarray`, or :class:`OpFactory` objects. When the objects may act on
fewer than the total number of qubits (determined by their dimension/shape) then
they are repeatedly embedded into `nQubits`-qubit gates as specified by their
`availability`. These operations represent the ideal target operations, and
thus, any `LinearOperator` or `OpFactory` objects must be *static*, i.e., have
zero parameters.
availability : dict, optional
A dictionary whose keys are the same gate names as in
`gatedict` and whose values are lists of qubit-label-tuples. Each
qubit-label-tuple must have length equal to the number of qubits
the corresponding gate acts upon, and causes that gate to be
embedded to act on the specified qubits. For example,
`{ 'Gx': [(0,),(1,),(2,)], 'Gcnot': [(0,1),(1,2)] }` would cause
the `1-qubit `'Gx'`-gate to be embedded three times, acting on qubits
0, 1, and 2, and the 2-qubit `'Gcnot'`-gate to be embedded twice,
acting on qubits 0 & 1 and 1 & 2. Instead of a list of tuples,
values of `availability` may take the special values:
- `"all-permutations"` and `"all-combinations"` equate to all possible
permutations and combinations of the appropriate number of qubit labels
(deterined by the gate's dimension).
- `"all-edges"` equates to all the vertices, for 1Q gates, and all the
edges, for 2Q gates of the geometry.
- `"arbitrary"` or `"*"` means that the corresponding gate can be placed
on any target qubits via an :class:`EmbeddingOpFactory` (uses less
memory but slower than `"all-permutations"`.
If a gate name (a key of `gatedict`) is not present in `availability`,
the default is `"all-edges"`.
qubit_labels : tuple, optional
The circuit-line labels for each of the qubits, which can be integers
and/or strings. Must be of length `nQubits`. If None, then the
integers from 0 to `nQubits-1` are used.
geometry : {"line","ring","grid","torus"} or QubitGraph
The type of connectivity among the qubits, specifying a
graph used to define neighbor relationships. Alternatively,
a :class:`QubitGraph` object with node labels equal to
`qubit_labels` may be passed directly.
global_idle_layer : LinearOperator
A global idle operation which acts on all the qubits and
is, if `addIdleNoiseToAllGates=True`, composed with the
actions of specific gates to form the layer operation of
any circuit layer.
prep_layers, povm_layers : None or operator or dict or list, optional
The SPAM operations as n-qubit layer operations. If `None`, then
no preps (or POVMs) are created. If a dict, then the keys are
labels and the values are layer operators. If a list, then the
elements are layer operators and the labels will be assigned as
"rhoX" and "MX" where X is an integer starting at 0. If a single
layer operation is given, then this is used as the sole prep or
POVM and is assigned the label "rho0" or "Mdefault" respectively.
build_cloudnoise_fn : function, optional
A function which takes a single :class:`Label` as an argument and
returns the cloud-noise operation for that primitive layer
operation. Note that if `errcomp_type="gates"` the returned
operator should be a superoperator whereas if
`errcomp_type="errorgens"` then the returned operator should be
an error generator (not yet exponentiated).
build_cloudkey_fn : function, optional
An function which takes a single :class:`Label` as an argument and
returns a "cloud key" for that primitive layer. The "cloud" is the
set of qubits that the error (the operator returned from
`build_cloudnoise_fn`) touches -- and the "key" returned from this
function is meant to identify that cloud. This is used to keep track
of which primitive layer-labels correspond to the same cloud - e.g.
the cloud-key for ("Gx",2) and ("Gy",2) might be the same and could
be processed together when selecing sequences that amplify the parameters
in the cloud-noise operations for these two labels. The return value
should be something hashable with the property that two noise
which act on the same qubits should have the same cloud key.
sim_type : {"matrix","map","termorder:<N>"}
The type of forward simulation (probability computation) to use for the
returned :class:`Model`. That is, how should the model compute
operation sequence/circuit probabilities when requested. `"matrix"` is better
for small numbers of qubits, `"map"` is better for larger numbers. The
`"termorder"` option is designed for even larger numbers. Usually,
the default of `"auto"` is what you want.
evotype : {"densitymx","statevec","stabilizer","svterm","cterm"}
The evolution type.
errcomp_type : {"gates","errorgens"}
How errors are composed when creating layer operations in the created
model. `"gates"` means that the errors on multiple gates in a single
layer are composed as separate and subsequent processes. Specifically,
the layer operation has the form `Composed(target,idleErr,cloudErr)`
where `target` is a composition of all the ideal gate operations in the
layer, `idleErr` is idle error (`.operation_blks['layers']['globalIdle']`),
and `cloudErr` is the composition (ordered as layer-label) of cloud-
noise contributions, i.e. a map that acts as the product of exponentiated
error-generator matrices. `"errorgens"` means that layer operations
have the form `Composed(target, error)` where `target` is as above and
`error` results from composing the idle and cloud-noise error
*generators*, i.e. a map that acts as the exponentiated sum of error
generators (ordering is irrelevant in this case).
addIdleNoiseToAllGates: bool, optional
Whether the global idle should be added as a factor following the
ideal action of each of the non-idle gates.
sparse : bool, optional
Whether embedded Lindblad-parameterized gates within the constructed
`nQubits`-qubit gates are sparse or not.
verbosity : int, optional
An integer >= 0 dictating how must output to send to stdout.
"""
if qubit_labels is None:
qubit_labels = tuple(range(nQubits))
if availability is None:
availability = {}
# Build gate dictionaries. A value of `gatedict` can be an array, a LinearOperator, or an OpFactory.
# For later processing, we'll create mm_gatedict to contain each item as a ModelMember. For cloud-
# noise models, these gate operations should be *static* (no parameters) as they represent the target
# operations and all noise (and parameters) are assumed to enter through the cloudnoise members.
StaticDenseOp = _get_Static_factory(sim_type, evotype) # always a *gate*
mm_gatedict = _collections.OrderedDict() # static *target* ops as ModelMembers
#REMOVE self.gatedict = _collections.OrderedDict() # static *target* ops (unused) as numpy arrays
for gn, gate in gatedict.items():
if isinstance(gate, _op.LinearOperator):
assert(gate.num_params() == 0), "Only *static* ideal operators are allowed in `gatedict`!"
#REMOVE self.gatedict[gn] = gate.todense()
if gate._evotype != evotype and isinstance(gate, _op.StaticDenseOp):
# special case: we'll convert static ops to the right evotype (convenient)
mm_gatedict[gn] = StaticDenseOp(gate, "pp")
else:
mm_gatedict[gn] = gate
elif isinstance(gate, _opfactory.OpFactory):
assert(gate.num_params() == 0), "Only *static* ideal factories are allowed in `gatedict`!"
# don't store factories in self.gatedict for now (no good dense representation)
mm_gatedict[gn] = gate
else: # presumably a numpy array or something like it:
#REMOVE self.gatedict[gn] = _np.array(gate)
mm_gatedict[gn] = StaticDenseOp(gate, "pp")
assert(mm_gatedict[gn]._evotype == evotype)
#Set other members
self.nQubits = nQubits
self.availability = availability
self.qubit_labels = qubit_labels
self.geometry = geometry
#TODO REMOVE unneeded members
#self.maxIdleWeight = maxIdleWeight
#self.maxSpamWeight = maxSpamWeight
#self.maxhops = maxhops
#self.extraWeight1Hops = extraWeight1Hops
#self.extraGateWeight = extraGateWeight
self.sparse = sparse
#self.parameterization = parameterization
#self.spamtype = spamtype
self.addIdleNoiseToAllGates = addIdleNoiseToAllGates
self.errcomp_type = errcomp_type
#REMOVE
##Process "auto" sim_type
#_, evotype = _gt.split_lindblad_paramtype(parameterization)
#assert(evotype in ("densitymx", "svterm", "cterm")), "State-vector evolution types not allowed."
#if sim_type == "auto":
# if evotype in ("svterm", "cterm"): sim_type = "termorder:1"
# else: sim_type = "map" if nQubits > 2 else "matrix"
assert(sim_type in ("matrix", "map") or sim_type.startswith("termorder") or sim_type.startswith("termgap"))
qubit_dim = 2 if evotype in ('statevec', 'stabilizer') else 4
if not isinstance(qubit_labels, _ld.StateSpaceLabels): # allow user to specify a StateSpaceLabels object
qubit_sslbls = _ld.StateSpaceLabels(qubit_labels, (qubit_dim,) * len(qubit_labels), evotype=evotype)
else:
qubit_sslbls = qubit_labels
qubit_labels = [lbl for lbl in qubit_sslbls.labels[0] if qubit_sslbls.labeldims[lbl] == qubit_dim]
#Only extract qubit labels from the first tensor-product block...
if global_idle_layer is None:
self.addIdleNoiseToAllGates = False # there is no idle noise to add!
lizardArgs = {'add_idle_noise': self.addIdleNoiseToAllGates,
'errcomp_type': errcomp_type, 'dense_rep': not sparse}
super(CloudNoiseModel, self).__init__(qubit_sslbls, "pp", {}, CloudNoiseLayerLizard,
lizardArgs, sim_type=sim_type, evotype=evotype)
flags = {'auto_embed': False, 'match_parent_dim': False,
'match_parent_evotype': True, 'cast_to_type': None}
self.prep_blks['layers'] = _ld.OrderedMemberDict(self, None, None, flags)
self.povm_blks['layers'] = _ld.OrderedMemberDict(self, None, None, flags)
self.operation_blks['layers'] = _ld.OrderedMemberDict(self, None, None, flags)
self.operation_blks['gates'] = _ld.OrderedMemberDict(self, None, None, flags)
self.operation_blks['cloudnoise'] = _ld.OrderedMemberDict(self, None, None, flags)
self.instrument_blks['layers'] = _ld.OrderedMemberDict(self, None, None, flags)
self.factories['layers'] = _ld.OrderedMemberDict(self, None, None, flags)
self.factories['gates'] = _ld.OrderedMemberDict(self, None, None, flags)
self.factories['cloudnoise'] = _ld.OrderedMemberDict(self, None, None, flags)
printer = _VerbosityPrinter.build_printer(verbosity)
geometry_name = "custom" if isinstance(geometry, _qgraph.QubitGraph) else geometry
printer.log("Creating a %d-qubit local-noise %s model" % (nQubits, geometry_name))
if isinstance(geometry, _qgraph.QubitGraph):
qubitGraph = geometry
else:
qubitGraph = _qgraph.QubitGraph.common_graph(nQubits, geometry, directed=False,
qubit_labels=qubit_labels)
printer.log("Created qubit graph:\n" + str(qubitGraph))
if global_idle_layer is None:
pass
elif callable(global_idle_layer):
self.operation_blks['layers'][_Lbl('globalIdle')] = global_idle_layer()
else:
self.operation_blks['layers'][_Lbl('globalIdle')] = global_idle_layer
# a dictionary of "cloud" objects
# keys = cloud identifiers, e.g. (target_qubit_indices, cloud_qubit_indices) tuples
# values = list of gate-labels giving the gates (primitive layers?) associated with that cloud (necessary?)
self.clouds = _collections.OrderedDict()
#Get gates availability
primitive_ops = []
gates_and_avail = _collections.OrderedDict()
for gateName, gate in mm_gatedict.items(): # gate is a static ModelMember (op or factory)
gate_nQubits = int(round(_np.log2(gate.dim) / 2)) if (evotype in ("densitymx", "svterm", "cterm")) \
else int(round(_np.log2(gate.dim))) # evotype in ("statevec","stabilizer")
availList = self.availability.get(gateName, 'all-edges')
if availList == 'all-combinations':
availList = list(_itertools.combinations(qubit_labels, gate_nQubits))
elif availList == 'all-permutations':
availList = list(_itertools.permutations(qubit_labels, gate_nQubits))
elif availList == 'all-edges':
if gate_nQubits == 1:
availList = [(i,) for i in qubit_labels]
elif gate_nQubits == 2:
availList = qubitGraph.edges(double_for_undirected=True)
else:
raise NotImplementedError(("I don't know how to place a %d-qubit gate "
"on graph edges yet") % gate_nQubits)
elif availList in ('arbitrary', '*'):
availList = [('*', gate_nQubits)] # let a factory determine what's "available"
self.availability[gateName] = tuple(availList)
gates_and_avail[gateName] = (gate, availList)
ssAllQ = qubit_sslbls # labls should also be node-names of qubitGraph
EmbeddedDenseOp = _op.EmbeddedDenseOp if sim_type == "matrix" else _op.EmbeddedOp
for gn, (gate, availList) in gates_and_avail.items():
#Note: gate was taken from mm_gatedict, and so is a static op or factory
gate_is_factory = isinstance(gate, _opfactory.OpFactory)
if gate_is_factory:
self.factories['gates'][_Lbl(gn)] = gate
else:
self.operation_blks['gates'][_Lbl(gn)] = gate
for inds in availList: # inds are target qubit labels
#Target operation
if inds[0] == '*':
printer.log("Creating %dQ %s gate on arbitrary qubits!!" % (inds[1], gn))
self.factories['layers'][_Lbl(gn)] = _opfactory.EmbeddingOpFactory(
ssAllQ, gate, dense=bool(sim_type == "matrix"), num_target_labels=inds[1])
# add any primitive ops for this embedding factory?
else:
printer.log("Creating %dQ %s gate on qubits %s!!" % (len(inds), gn, inds))
assert(_Lbl(gn, inds) not in gatedict), \
("Cloudnoise models do not accept primitive-op labels, e.g. %s, in `gatedict` as this dict "
"specfies the ideal target gates. Perhaps make the cloudnoise depend on the target qubits "
"of the %s gate?") % (str(_Lbl(gn, inds)), gn)
if gate_is_factory:
self.factories['layers'][_Lbl(gn, inds)] = _opfactory.EmbeddedOpFactory(
ssAllQ, inds, gate, dense=bool(sim_type == "matrix"))
# add any primitive ops for this factory?
else:
self.operation_blks['layers'][_Lbl(gn, inds)] = EmbeddedDenseOp(
ssAllQ, inds, gate)
primitive_ops.append(_Lbl(gn, inds))
#Cloudnoise operation
if build_cloudnoise_fn is not None:
if inds[0] == '*':
cloudnoise = build_cloudnoise_fn(_Lbl(gn))
assert(isinstance(cloudnoise, _opfactory.EmbeddingOpFactory)), \
("`build_cloudnoise_fn` must return an EmbeddingOpFactory for gate %s"
" with arbitrary availability") % gn
self.factories['cloudnoise'][_Lbl(gn)] = cloudnoise
else:
cloudnoise = build_cloudnoise_fn(_Lbl(gn, inds))
if isinstance(cloudnoise, _opfactory.OpFactory):
self.factories['cloudnoise'][_Lbl(gn, inds)] = cloudnoise
else:
self.operation_blks['cloudnoise'][_Lbl(gn, inds)] = cloudnoise
#REMOVE
#_build_nqn_cloud_noise(
# (i,), qubitGraph, weight_maxhops_tuples_1Q,
# errcomp_type=errcomp_type, sparse=sparse, sim_type=sim_type,
# parameterization=parameterization, verbosity=printer - 1)
#cloud_inds = tuple(qubitGraph.radius((i,), cloud_maxhops))
#cloud_key = ((i,), tuple(sorted(cloud_inds))) # (sets are unhashable)
if inds[0] != '*' and build_cloudkey_fn is not None:
# TODO: is there any way to get a default "key", e.g. the
# qubits touched by the corresponding cloudnoise op?
# need a way to identify a clound (e.g. Gx and Gy gates on some qubit will have the *same* cloud)
cloud_key = build_cloudkey_fn(_Lbl(gn, inds))
if cloud_key not in self.clouds: self.clouds[cloud_key] = []
self.clouds[cloud_key].append(_Lbl(gn, inds))
#keep track of the primitive-layer labels in each cloud,
# used to specify which gate parameters should be amplifiable by germs for a given cloud (?) TODO CHECK
#SPAM (same as for local noise model)
if prep_layers is None:
pass # no prep layers
elif isinstance(prep_layers, dict):
for rhoname, layerop in prep_layers.items():
self.prep_blks['layers'][_Lbl(rhoname)] = layerop
elif isinstance(prep_layers, _op.LinearOperator): # just a single layer op
self.prep_blks['layers'][_Lbl('rho0')] = prep_layers
else: # assume prep_layers is an iterable of layers, e.g. isinstance(prep_layers, (list,tuple)):
for i, layerop in enumerate(prep_layers):
self.prep_blks['layers'][_Lbl("rho%d" % i)] = layerop
if povm_layers is None:
pass # no povms
elif isinstance(povm_layers, _povm.POVM): # just a single povm - must precede 'dict' test!
self.povm_blks['layers'][_Lbl('Mdefault')] = povm_layers
elif isinstance(povm_layers, dict):
for povmname, layerop in povm_layers.items():
self.povm_blks['layers'][_Lbl(povmname)] = layerop
else: # assume povm_layers is an iterable of layers, e.g. isinstance(povm_layers, (list,tuple)):
for i, layerop in enumerate(povm_layers):
self.povm_blks['layers'][_Lbl("M%d" % i)] = layerop
#REMOVE
#if spamtype == "static" or maxSpamWeight == 0:
# if maxSpamWeight > 0:
# _warnings.warn(("`spamtype == 'static'` ignores the supplied "
# "`maxSpamWeight=%d > 0`") % maxSpamWeight)
# self.prep_blks['layers'][_Lbl('rho0')] = _sv.ComputationalSPAMVec([0] * nQubits, evotype)
# self.povm_blks['layers'][_Lbl('Mdefault')] = _povm.ComputationalBasisPOVM(nQubits, evotype)
#
#elif spamtype == "tensorproduct":
#
# _warnings.warn("`spamtype == 'tensorproduct'` is deprecated!")
# basis1Q = _BuiltinBasis("pp", 4)
# prep_factors = []; povm_factors = []
#
# from ..construction import basis_build_vector
#
# v0 = basis_build_vector("0", basis1Q)
# v1 = basis_build_vector("1", basis1Q)
#
# # Historical use of TP for non-term-based cases?
# # - seems we could remove this. FUTURE REMOVE?
# povmtyp = rtyp = "TP" if parameterization in \
# ("CPTP", "H+S", "S", "H+S+A", "S+A", "H+D+A", "D+A", "D") \
# else parameterization
#
# for i in range(nQubits):
# prep_factors.append(
# _sv.convert(_sv.StaticSPAMVec(v0), rtyp, basis1Q))
# povm_factors.append(
# _povm.convert(_povm.UnconstrainedPOVM(([
# ('0', _sv.StaticSPAMVec(v0)),
# ('1', _sv.StaticSPAMVec(v1))])), povmtyp, basis1Q))
#
# # # Noise logic refactored from construction.nqnoiseconstruction.build_nqnoise_model
# # if prepNoise is not None:
# # if isinstance(prepNoise,tuple): # use as (seed, strength)
# # seed,strength = prepNoise
# # rndm = _np.random.RandomState(seed)
# # depolAmts = _np.abs(rndm.random_sample(nQubits)*strength)
# # else:
# # depolAmts = prepNoise[0:nQubits]
# # for amt,vec in zip(depolAmts,prep_factors): vec.depolarize(amt)
#
# # if povmNoise is not None:
# # if isinstance(povmNoise,tuple): # use as (seed, strength)
# # seed,strength = povmNoise
# # rndm = _np.random.RandomState(seed)
# # depolAmts = _np.abs(rndm.random_sample(nQubits)*strength)
# # else:
# # depolAmts = povmNoise[0:nQubits]
# # for amt,povm in zip(depolAmts,povm_factors): povm.depolarize(amt)
#
# self.prep_blks['layers'][_Lbl('rho0')] = _sv.TensorProdSPAMVec('prep', prep_factors)
# self.povm_blks['layers'][_Lbl('Mdefault')] = _povm.TensorProdPOVM(povm_factors)
#
#elif spamtype == "lindblad":
#
# prepPure = _sv.ComputationalSPAMVec([0] * nQubits, evotype)
# prepNoiseMap = _build_nqn_global_noise(
# qubitGraph, maxSpamWeight, sparse, sim_type, parameterization, errcomp_type, printer - 1)
# self.prep_blks['layers'][_Lbl('rho0')] = _sv.LindbladSPAMVec(prepPure, prepNoiseMap, "prep")
#
# povmNoiseMap = _build_nqn_global_noise(
# qubitGraph, maxSpamWeight, sparse, sim_type, parameterization, errcomp_type, printer - 1)
# self.povm_blks['layers'][_Lbl('Mdefault')] = _povm.LindbladPOVM(povmNoiseMap, None, "pp")
#
#else:
# raise ValueError("Invalid `spamtype` argument: %s" % spamtype)
self.set_primitive_op_labels(primitive_ops)
self.set_primitive_prep_labels(tuple(self.prep_blks['layers'].keys()))
self.set_primitive_povm_labels(tuple(self.povm_blks['layers'].keys()))
#(no instruments)
printer.log("DONE! - created Model with dim=%d and op-blks=" % self.dim)
for op_blk_lbl, op_blk in self.operation_blks.items():
printer.log(" %s: %s" % (op_blk_lbl, ', '.join(map(str, op_blk.keys()))))
def get_clouds(self):
"""
Returns the set of cloud-sets used when creating sequences which
amplify the parameters of this model.
"""
return self.clouds
def _get_Lindblad_factory(sim_type, parameterization, errcomp_type):
""" Returns a function that creates a Lindblad-type gate appropriate
given the simulation type and parameterization """
_, evotype = _gt.split_lindblad_paramtype(parameterization)
if errcomp_type == "gates":
if evotype == "densitymx":
cls = _op.LindbladDenseOp if sim_type == "matrix" \
else _op.LindbladOp
elif evotype in ("svterm", "cterm"):
assert(sim_type.startswith("termorder"))
cls = _op.LindbladOp
else:
raise ValueError("Cannot create Lindblad gate factory for ", sim_type, parameterization)
#Just call cls.from_operation_matrix with appropriate evotype
def _f(opMatrix, # unitaryPostfactor=None,
proj_basis="pp", mxBasis="pp", relative=False):
unitaryPostfactor = None # we never use this in gate construction
p = parameterization
if relative:
if parameterization == "CPTP": p = "GLND"
elif "S" in parameterization: p = parameterization.replace("S", "s")
elif "D" in parameterization: p = parameterization.replace("D", "d")
return cls.from_operation_obj(opMatrix, p, unitaryPostfactor,
proj_basis, mxBasis, truncate=True)
return _f
elif errcomp_type == "errorgens":
def _f(errorGen,
proj_basis="pp", mxBasis="pp", relative=False):
p = parameterization
if relative:
if parameterization == "CPTP": p = "GLND"
elif "S" in parameterization: p = parameterization.replace("S", "s")
elif "D" in parameterization: p = parameterization.replace("D", "d")
_, evotype, nonham_mode, param_mode = _op.LindbladOp.decomp_paramtype(p)
return _op.LindbladErrorgen.from_error_generator(errorGen, proj_basis, proj_basis,
param_mode, nonham_mode, mxBasis,
truncate=True, evotype=evotype)
return _f
else: raise ValueError("Invalid `errcomp_type`: %s" % errcomp_type)
def _get_Static_factory(sim_type, evotype):
""" Returns a function that creates a static-type gate appropriate
given the simulation and parameterization """
if evotype == "densitymx":
if sim_type == "matrix":
return lambda g, b: _op.StaticDenseOp(g, evotype)
elif sim_type == "map":
return lambda g, b: _op.StaticDenseOp(g, evotype) # TODO: create StaticGateMap?
elif evotype in ("svterm", "cterm"):
assert(sim_type.startswith("termorder") or sim_type.startswith("termgap"))
def _f(opMatrix, mxBasis="pp"):
return _op.LindbladOp.from_operation_matrix(
None, opMatrix, None, None, mxBasis=mxBasis, evotype=evotype)
# a LindbladDenseOp with None as ham_basis and nonham_basis => no parameters
return _f
raise ValueError("Cannot create Static gate factory for ", sim_type, evotype)
def _build_nqn_global_noise(qubitGraph, maxWeight, sparse=False, sim_type="matrix",
parameterization="H+S", errcomp_type="gates", verbosity=0):
"""
Create a "global" idle gate, meaning one that acts on all the qubits in
`qubitGraph`. The gate will have up to `maxWeight` errors on *connected*
(via the graph) sets of qubits.
Parameters
----------
qubitGraph : QubitGraph
A graph giving the geometry (nearest-neighbor relations) of the qubits.
maxWeight : int
The maximum weight errors to include in the resulting gate.
sparse : bool, optional
Whether the embedded Lindblad-parameterized gates within the constructed
gate are represented as sparse or dense matrices. (This is determied by
whether they are constructed using sparse basis matrices.)
sim_type : {"matrix","map","termorder:<N>"}
The type of forward simulation (probability computation) being used by
the model this gate is destined for. This affects what type of
gate objects (e.g. `ComposedDenseOp` vs `ComposedOp`) are created.
parameterization : str
The type of parameterizaton for the constructed gate. E.g. "H+S",
"H+S terms", "H+S clifford terms", "CPTP", etc.
errcomp_type : {"gates","errorgens"}
How errors are composed when creating layer operations in the associated
model. See :method:`CloudnoiseModel.__init__` for details.
verbosity : int, optional
An integer >= 0 dictating how must output to send to stdout.
Returns
-------
LinearOperator
"""
assert(maxWeight <= 2), "Only `maxWeight` equal to 0, 1, or 2 is supported"
if errcomp_type == "gates":
if sim_type == "matrix":
Composed = _op.ComposedDenseOp
Embedded = _op.EmbeddedDenseOp
else:
Composed = _op.ComposedOp
Embedded = _op.EmbeddedOp
elif errcomp_type == "errorgens":
Composed = _op.ComposedErrorgen
Embedded = _op.EmbeddedErrorgen
else: raise ValueError("Invalid `errcomp_type`: %s" % errcomp_type)
Lindblad = _get_Lindblad_factory(sim_type, parameterization, errcomp_type)
#constructs a gate or errorgen based on value of errcomp_type
printer = _VerbosityPrinter.build_printer(verbosity)
printer.log("*** Creating global idle ***")
termops = [] # gates or error generators to compose
qubit_labels = qubitGraph.get_node_names()
qubit_dim = 4 # cloud noise models always use density matrices, so not '2' here
ssAllQ = _ld.StateSpaceLabels(qubit_labels, (qubit_dim,) * len(qubit_labels))
nQubits = qubitGraph.nqubits
possible_err_qubit_inds = _np.arange(nQubits)
nPossible = nQubits
for wt in range(1, maxWeight + 1):
printer.log("Weight %d: %d possible qubits" % (wt, nPossible), 2)
basisEl_Id = basisProductMatrix(_np.zeros(wt, _np.int64), sparse)
if errcomp_type == "gates":
wtNoErr = _sps.identity(4**wt, 'd', 'csr') if sparse else _np.identity(4**wt, 'd')
elif errcomp_type == "errorgens":
wtNoErr = _sps.csr_matrix((4**wt, 4**wt)) if sparse else _np.zeros((4**wt, 4**wt), 'd')
else: raise ValueError("Invalid `errcomp_type`: %s" % errcomp_type)
wtBasis = _BuiltinBasis('pp', 4**wt, sparse=sparse)
for err_qubit_inds in _itertools.combinations(possible_err_qubit_inds, wt):
if len(err_qubit_inds) == 2 and not qubitGraph.is_directly_connected(err_qubit_inds[0], err_qubit_inds[1]):
continue # TO UPDATE - check whether all wt indices are a connected subgraph
errbasis = [basisEl_Id]
errbasis_lbls = ['I']
for err_basis_inds in _iter_basis_inds(wt):
error = _np.array(err_basis_inds, _np.int64) # length == wt
basisEl = basisProductMatrix(error, sparse)
errbasis.append(basisEl)
errbasis_lbls.append(''.join(["IXYZ"[i] for i in err_basis_inds]))
printer.log("Error on qubits %s -> error basis of length %d" % (err_qubit_inds, len(errbasis)), 3)
errbasis = _ExplicitBasis(errbasis, errbasis_lbls, real=True, sparse=sparse)
termErr = Lindblad(wtNoErr, proj_basis=errbasis, mxBasis=wtBasis)
err_qubit_global_inds = err_qubit_inds
fullTermErr = Embedded(ssAllQ, [qubit_labels[i] for i in err_qubit_global_inds], termErr)
assert(fullTermErr.num_params() == termErr.num_params())
printer.log("Lindblad gate w/dim=%d and %d params -> embedded to gate w/dim=%d" %
(termErr.dim, termErr.num_params(), fullTermErr.dim))
termops.append(fullTermErr)
if errcomp_type == "gates":
return Composed(termops)
elif errcomp_type == "errorgens":
errgen = Composed(termops)
LindbladOp = _op.LindbladDenseOp if sim_type == "matrix" \
else _op.LindbladOp
return LindbladOp(None, errgen, dense_rep=not sparse)
else: assert(False)
def _build_nqn_cloud_noise(target_qubit_inds, qubitGraph, weight_maxhops_tuples,
errcomp_type="gates", sparse=False, sim_type="matrix",
parameterization="H+S", verbosity=0):
"""
Create an n-qubit gate that is a composition of:
`targetOp(target_qubits) -> idle_noise(all_qubits) -> loc_noise(local_qubits)`
where `idle_noise` is given by the `idle_noise` argument and `loc_noise` is
given by the rest of the arguments. `loc_noise` can be implemented either
by a single (n-qubit) embedded Lindblad gate with all relevant error
generators, or as a composition of embedded single-error-term Lindblad gates
(see param `errcomp_type`).
The local noise consists terms up to a maximum weight acting on the qubits
given reachable by a given maximum number of hops (along the neareset-
neighbor edges of `qubitGraph`) from the target qubits.
Parameters
----------
target_qubit_inds : list
The indices of the target qubits.
qubitGraph : QubitGraph
A graph giving the geometry (nearest-neighbor relations) of the qubits.
weight_maxhops_tuples : iterable
A list of `(weight,maxhops)` 2-tuples specifying which error weights
should be included and what region of the graph (as a `maxhops` from
the set of target qubits) should have errors of the given weight applied
to it.
errcomp_type : {"gates","errorgens"}
How errors are composed when creating layer operations in the associated
model. See :method:`CloudnoiseModel.__init__` for details.
sparse : bool, optional
Whether the embedded Lindblad-parameterized gates within the constructed
gate are represented as sparse or dense matrices. (This is determied by
whether they are constructed using sparse basis matrices.)
sim_type : {"matrix","map","termorder:<N>"}
The type of forward simulation (probability computation) being used by
the model this gate is destined for. This affects what type of
gate objects (e.g. `ComposedDenseOp` vs `ComposedOp`) are created.
parameterization : str
The type of parameterizaton for the constructed gate. E.g. "H+S",
"H+S terms", "H+S clifford terms", "CPTP", etc.
verbosity : int, optional
An integer >= 0 dictating how must output to send to stdout.
Returns
-------
LinearOperator
"""
if sim_type == "matrix":
ComposedDenseOp = _op.ComposedDenseOp
EmbeddedDenseOp = _op.EmbeddedDenseOp
else:
ComposedDenseOp = _op.ComposedOp
EmbeddedDenseOp = _op.EmbeddedOp
if errcomp_type == "gates":
Composed = ComposedDenseOp
Embedded = EmbeddedDenseOp
elif errcomp_type == "errorgens":
Composed = _op.ComposedErrorgen
Embedded = _op.EmbeddedErrorgen
else: raise ValueError("Invalid `errcomp_type`: %s" % errcomp_type)
Lindblad = _get_Lindblad_factory(sim_type, parameterization, errcomp_type)
#constructs a gate or errorgen based on value of errcomp_type
printer = _VerbosityPrinter.build_printer(verbosity)
printer.log("Creating local-noise error factor (%s)" % errcomp_type)
# make a composed-gate of embedded single-basis-element Lindblad-gates or -errorgens,
# one for each specified error term
loc_noise_termops = [] # list of gates to compose
qubit_labels = qubitGraph.get_node_names()
qubit_dim = 4 # cloud noise models always use density matrices, so not '2' here
ssAllQ = _ld.StateSpaceLabels(qubit_labels, (qubit_dim,) * len(qubit_labels))
for wt, maxHops in weight_maxhops_tuples:
## loc_noise_errinds = [] # list of basis indices for all local-error terms
possible_err_qubit_inds = _np.array(qubitGraph.radius(target_qubit_inds, maxHops),
_np.int64) # we know node labels are integers
nPossible = len(possible_err_qubit_inds) # also == "nLocal" in this case
basisEl_Id = basisProductMatrix(_np.zeros(wt, _np.int64), sparse) # identity basis el
if errcomp_type == "gates":
wtNoErr = _sps.identity(4**wt, 'd', 'csr') if sparse else _np.identity(4**wt, 'd')
elif errcomp_type == "errorgens":
wtNoErr = _sps.csr_matrix((4**wt, 4**wt)) if sparse else _np.zeros((4**wt, 4**wt), 'd')
else: raise ValueError("Invalid `errcomp_type`: %s" % errcomp_type)
wtBasis = _BuiltinBasis('pp', 4**wt, sparse=sparse)
printer.log("Weight %d, max-hops %d: %d possible qubits" % (wt, maxHops, nPossible), 3)
# print("DB: possible qubits = ", possible_err_qubit_inds,
# " (radius of %d around %s)" % (maxHops,str(target_qubit_inds)))
for err_qubit_local_inds in _itertools.combinations(list(range(nPossible)), wt):
# err_qubit_inds are in range [0,nPossible-1] qubit indices
#Future: check that err_qubit_inds marks qubits that are connected
errbasis = [basisEl_Id]
errbasis_lbls = ['I']
for err_basis_inds in _iter_basis_inds(wt):
error = _np.array(err_basis_inds, _np.int64) # length == wt
basisEl = basisProductMatrix(error, sparse)
errbasis.append(basisEl)
errbasis_lbls.append(''.join(["IXYZ"[i] for i in err_basis_inds]))
err_qubit_global_inds = possible_err_qubit_inds[list(err_qubit_local_inds)]
printer.log("Error on qubits %s -> error basis of length %d" % (err_qubit_global_inds, len(errbasis)), 4)
errbasis = _ExplicitBasis(errbasis, errbasis_lbls, real=True, sparse=sparse)
termErr = Lindblad(wtNoErr, proj_basis=errbasis, mxBasis=wtBasis, relative=True)
fullTermErr = Embedded(ssAllQ, [qubit_labels[i] for i in err_qubit_global_inds], termErr)
assert(fullTermErr.num_params() == termErr.num_params())
printer.log("Lindblad gate w/dim=%d and %d params -> embedded to gate w/dim=%d" %
(termErr.dim, termErr.num_params(), fullTermErr.dim))
loc_noise_termops.append(fullTermErr)
fullCloudErr = Composed(loc_noise_termops)
return fullCloudErr
class CloudNoiseLayerLizard(_ImplicitLayerLizard):
"""
The layer lizard class for a :class:`CloudNoiseModel`, which
creates layers by composing perfect target gates, global idle error,
and local "cloud" errors.
The value of `model._lizardArgs['errcomp_type']` determines which of two
composition strategies are employed. When the errcomp_type is `"gates"`,
the errors on multiple gates in a single layer are composed as separate
and subsequent processes. Specifically, the layer operation has the form
`Composed(target,idleErr,cloudErr)` where `target` is a composition of all
the ideal gate operations in the layer, `idleErr` is idle error
(`.operation_blks['layers']['globalIdle']`), and `cloudErr` is the
composition (ordered as layer-label) of cloud-noise contributions, i.e. a
map that acts as the product of exponentiated error-generator matrices.
`"errorgens"`, on the other hand, means that layer operations have the form
`Composed(target, error)` where `target` is as above and `error` results
from composing the idle and cloud-noise error *generators*, i.e. a map that
acts as the exponentiated sum of error generators (ordering is irrelevant in
this case).
"""
def get_prep(self, layerlbl):
return self.prep_blks['layers'][layerlbl] # prep_blks['layers'] are full prep ops
def get_effect(self, layerlbl):
if layerlbl in self.effect_blks['layers']:
return self.effect_blks['layers'][layerlbl] # effect_blks['layer'] are full effect ops
else:
# See if this effect label could correspond to a *marginalized* POVM, and
# if so, create the marginalized POVM and add its effects to self.effect_blks['layers']
if isinstance(layerlbl, _Lbl): # this should always be the case...
povmName = _gt.eLabelToPOVM(layerlbl)
if povmName in self.povm_blks['layers']:
# implicit creation of marginalized POVMs whereby an existing POVM name is used with sslbls that
# are not present in the stored POVM's label.
mpovm = _povm.MarginalizedPOVM(self.povm_blks['layers'][povmName],
self.model.state_space_labels, layerlbl.sslbls) # cache in FUTURE?
mpovm_lbl = _Lbl(povmName, layerlbl.sslbls)
self.effect_blks['layers'].update(mpovm.simplify_effects(mpovm_lbl))
assert(layerlbl in self.effect_blks['layers']), "Failed to create marginalized effect!"
return self.effect_blks['layers'][layerlbl]
raise KeyError("Could not build effect for '%s' label!" % str(layerlbl))
def get_operation(self, layerlbl):
dense = bool(self.model._sim_type == "matrix") # whether dense matrix gates should be created
if isinstance(layerlbl, _CircuitLabel):
return self.get_circuitlabel_op(layerlbl, dense)
add_idle_noise = self.model._lizardArgs['add_idle_noise']
errcomp_type = self.model._lizardArgs['errcomp_type']
dense_rep = self.model._lizardArgs['dense_rep'] or dense
# can't create dense-rep LindbladOps with dense_rep=False
Composed = _op.ComposedDenseOp if dense else _op.ComposedOp
Lindblad = _op.LindbladDenseOp if dense else _op.LindbladOp
Sum = _op.ComposedErrorgen
#print("DB: CloudNoiseLayerLizard building gate %s for %s w/comp-type %s" %
# (('matrix' if dense else 'map'), str(oplabel), self.errcomp_type) )
components = layerlbl.components
if len(components) == 0: # or layerlbl == 'Gi': # OLD: special case: 'Gi' acts as global idle!
return self.simpleop_blks['layers']['globalIdle'] # idle!
#Compose target operation from layer's component labels, which correspond
# to the perfect (embedded) target ops in op_blks
if len(components) > 1:
targetOp = Composed([self.get_layer_component_targetop(l) for l in components], dim=self.model.dim,
evotype=self.model._evotype)
else: targetOp = self.get_layer_component_targetop(components[0])
ops_to_compose = [targetOp]
if errcomp_type == "gates":
if add_idle_noise: ops_to_compose.append(self.simpleop_blks['layers']['globalIdle'])
component_cloudnoise_ops = self.get_layer_component_cloudnoises(components)
if len(component_cloudnoise_ops) > 0:
if len(component_cloudnoise_ops) > 1:
localErr = Composed(component_cloudnoise_ops,
dim=self.model.dim, evotype=self.model._evotype)
else:
localErr = component_cloudnoise_ops[0]
ops_to_compose.append(localErr)
elif errcomp_type == "errorgens":
#We compose the target operations to create a
# final target op, and compose this with a *singe* Lindblad gate which has as
# its error generator the composition (sum) of all the factors' error gens.
errorGens = [self.simpleop_blks['layers']['globalIdle'].errorgen] if add_idle_noise else []
errorGens.extend(self.get_layer_component_cloudnoises(components))
if len(errorGens) > 0:
if len(errorGens) > 1:
error = Lindblad(None, Sum(errorGens, dim=self.model.dim,
evotype=self.model._evotype),
dense_rep=dense_rep)
else:
error = Lindblad(None, errorGens[0], dense_rep=dense_rep)
ops_to_compose.append(error)
else:
raise ValueError("Invalid errcomp_type in CloudNoiseLayerLizard: %s" % errcomp_type)
ret = Composed(ops_to_compose, dim=self.model.dim,
evotype=self.model._evotype)
self.model._init_virtual_obj(ret) # so ret's gpindices get set
return ret
def get_layer_component_targetop(self, complbl):
if isinstance(complbl, _CircuitLabel):
raise NotImplementedError("Cloud noise models cannot simulate circuits with partial-layer subcircuits.")
# In the FUTURE, could easily implement this for errcomp_type == "gates", but it's unclear what to
# do for the "errorgens" case - how do we gate an error generator of an entire (mulit-layer) sub-circuit?
# Maybe we just need to expand the label and create a composition of those layers?
elif complbl in self.simpleop_blks['layers']:
return self.simpleop_blks['layers'][complbl]
else:
return _opfactory.op_from_factories(self.model.factories['layers'], complbl)
def get_layer_component_cloudnoises(self, complbl_list):
"""
Get any present cloudnoise ops from a list of components. This function processes
a list rather than an item because it's OK if some components don't have
corresponding cloudnoise ops - we just leave those off.
"""
ret = []
for complbl in complbl_list:
if complbl in self.simpleop_blks['cloudnoise']:
ret.append(self.simpleop_blks['cloudnoise'][complbl])
else:
try:
ret.append(_opfactory.op_from_factories(self.model.factories['cloudnoise'], complbl))
except KeyError: pass # OK if cloudnoise doesn't exist (means no noise)
return ret
|
from .text_detector import *
from .text_recognizer import *
|
from shutil import make_archive, copytree, rmtree, unpack_archive, move
import json
import decimal
import datetime
import uuid
import os
import rdb.models.image as Image
import rdb.models.mlModel as MLModel
import rdb.models.feature as Feature
import rdb.models.featureSet as FeatureSet
import rdb.models.environment as Environment
from rdb.rdb import db
from flask import g
PACKAGING_PATH_PREFIX = '/ketos/environments_data/packaging/'
METADATA_DIR = '/ketos_metadata'
def alchemyencoder(obj):
"""JSON encoder function for SQLAlchemy special classes."""
if isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, decimal.Decimal):
return float(obj)
def get_packaging_path(model):
return PACKAGING_PATH_PREFIX + model.environment.container_name
def package_model(model):
# build relevant paths
packaging_path = get_packaging_path(model)
packaging_path_tmp = packaging_path + '/' + model.ml_model_name
metadata_path = packaging_path_tmp + METADATA_DIR
root_dir = model.environment.get_data_directory() + '/' + model.ml_model_name
# initial cleanup
if os.path.isdir(packaging_path_tmp):
rmtree(packaging_path_tmp)
# create root directory if needed
if not os.path.isdir(root_dir):
os.makedirs(root_dir, mode=0o777)
# temporarily copy model data to packaging path
copytree(root_dir, packaging_path_tmp)
# create directory for metadata
os.makedirs(metadata_path, mode=0o777)
# create metadata json file
with open(metadata_path + '/metadata.json', 'w') as metadata:
metadata.write('[')
# write image data to json file
image = model.environment.base_image
json.dump(image.as_dict(), metadata, default=alchemyencoder)
metadata.write(',')
# write environment data to json file
env = model.environment
env_print = env.as_dict()
env_print['container_id'] = "xxxxxxxxxxxx"
env_print['container_name'] = "xxxxxxxxxxxx"
env_print['jupyter_token'] = "xxxxxxxxxxxx"
env_print['jupyter_port'] = "xxxxxxxxxxxxx"
json.dump(env_print, metadata, default=alchemyencoder)
metadata.write(',')
# write model data to json file
model_print = model.as_dict()
model_print['container_id'] = "xxxxxxxxxxxx"
model_print['container_name'] = "xxxxxxxxxxxx"
model_print['jupyter_token'] = "xxxxxxxxxxxx"
model_print['jupyter_port'] = "xxxxxxxxxxxxx"
json.dump(model_print, metadata, default=alchemyencoder)
# write feature set data to json file
feature_set = model.feature_set
if feature_set:
metadata.write(',')
# write single feature data from feature set to json file
metadata.write('[')
features = feature_set.features
if features:
count = 0
for f in features:
json.dump(f.as_dict(), metadata, default=alchemyencoder)
count = count + 1
if count != len(features):
metadata.write(',')
metadata.write(']')
metadata.write(',')
json.dump(feature_set.as_dict(), metadata, default=alchemyencoder)
metadata.write(']')
# zip data to archive
archive_path = packaging_path + '/' + model.ml_model_name
if os.path.exists(archive_path + '.zip'):
os.remove(archive_path + '.zip')
make_archive(archive_path, 'zip', packaging_path_tmp)
# remove temporary data
rmtree(packaging_path_tmp)
def load_model(file, environment_id=None, feature_set_id=None, raise_abort=True):
# generate temporary path to save file to
tmp_uuid = str(uuid.uuid4().hex)
tmp_path = '/tmp/' + tmp_uuid
# create temporary directory
os.makedirs(tmp_path, mode=0o777)
# save zip-file to temporary directory and unzip it
file.save(tmp_path + '/' + file.filename)
unpack_archive(tmp_path + '/' + file.filename, tmp_path, 'zip')
os.remove(tmp_path + '/' + file.filename)
# load metadata
metadata = None
with open(tmp_path + METADATA_DIR + '/metadata.json', 'r') as infile:
metadata = json.load(infile)
# first of all: get the image of the environment to create
i = metadata[0]
create_image = Image.get_by_name(image_name=i['name'])
# create and start the new environment
env_created = None
e = metadata[1]
if not environment_id or environment_id <= 0:
env_created = Environment.create(name=e['name'], desc=e['description'], image_id=create_image.id)
else:
env_created = Environment.get(environment_id, raise_abort=raise_abort)
# create the model which is to be loaded
m = metadata[2]
model_created = MLModel.create(name=m['name'], desc=m['description'], env_id=env_created.id, create_example_model=False, feature_set_id=None)
if len(metadata) > 3:
# create the features
features_created = list()
features = metadata[3]
for f in features:
# do not create duplicate features
feature = Feature.get_by_res_par_val(resource=f['resource'], parameter_name=f['parameter_name'], value=f['value'])
if not feature:
feature = Feature.create(resource=f['resource'], parameter_name=f['parameter_name'], value=f['value'], name=f['name'], desc=f['description'])
features_created.append(feature)
# create the feature set with the features and model assigned
feature_set_created = None
fs = metadata[4]
if not feature_set_id or feature_set_id <= 0:
feature_set_created = FeatureSet.create(name=fs['name'], desc=fs['description'])
else:
feature_set_created = FeatureSet.get(feature_set_id, raise_abort=raise_abort)
feature_set_created.features = features_created
feature_set_created.ml_models.append(model_created)
db.session.commit()
# remove temporarily created directory and files
if os.path.isdir(env_created.get_data_directory() + '/' + model_created.ml_model_name):
rmtree(env_created.get_data_directory() + '/' + model_created.ml_model_name)
os.makedirs(env_created.get_data_directory() + '/' + model_created.ml_model_name, mode=0o777)
for filename in os.listdir(tmp_path):
move(tmp_path + '/' + filename, env_created.get_data_directory() + '/' + model_created.ml_model_name)
rmtree(env_created.get_data_directory() + '/' + model_created.ml_model_name + METADATA_DIR)
def get_suitable_environments(file):
metadata = json.load(file)
image_metadata = metadata[0]
image = Image.get_by_name(image_name=image_metadata['name'])
return Environment.get_by_image_id(image.id)
def get_suitable_feature_sets(file):
metadata = json.load(file)
if len(metadata) <= 3:
return None
features_metadata = metadata[3]
feature_sets = FeatureSet.get_all_for_user(g.user.id)
ret = list()
cnt_features = len(features_metadata)
for fs in feature_sets:
cnt_match = 0
for f in fs.features:
for f_metadata in features_metadata:
if f_metadata['resource'] == f.resource and f_metadata['parameter_name'] == f.parameter_name and f_metadata['value'] == f.value:
cnt_match += 1
break
if cnt_match == cnt_features:
ret.append(fs)
return ret
|
"""
Visualize the apparent quasar proper motions caused by the acceleration of the solar sytem barycentre
Use the results from Gaia Collaboration, Klioner, et al. (2020) to visualize the apparent proper motions of quasars
caused by the acceleration of the solar system barycentre (also known as 'Galactic aberration'). Use equation (4) from
the paper to calculate the apparent proper motions.
Anthony Brown Nov 2020 - Dec 2020
"""
import argparse
import astropy.units as u
import astropy_healpix.healpy as hp
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import numpy as np
from astropy.coordinates import Galactic
from astropy_healpix import HEALPix
from matplotlib.gridspec import GridSpec
from matplotlib.patches import ArrowStyle
def make_plot(args):
"""
Take the steps to make the plot.
Parameters
----------
args: array-like
Command line arguments
Returns
-------
Nothing
"""
basename = 'PMmap-qso-galactic-aberration'
gx = 5.04
gy = -0.10
gz = -0.29
if args['quiver']:
hplevel = 3
else:
hplevel = 5
nside = hp.order2nside(hplevel)
npix = hp.nside2npix(nside)
ahp = HEALPix(nside=nside, order='nested', frame=Galactic())
hpindices = np.arange(npix)
skycoords = ahp.healpix_to_skycoord(hpindices)
pm_l_cosb = -gx * np.sin(skycoords.l.to(u.rad)) + gy * np.cos(skycoords.l.to(u.rad))
pm_b = -gx * np.sin(skycoords.b.to(u.rad)) * np.cos(skycoords.l.to(u.rad)) \
- gy * np.sin(skycoords.b.to(u.rad)) * np.sin(skycoords.l.to(u.rad)) \
+ gz * np.cos(skycoords.b.to(u.rad))
pmtot = np.sqrt(pm_l_cosb ** 2 + pm_b ** 2)
backgr = plt.imread('../star-trail-animation/sky-images/GaiaSky-colour-2k.png')
default_proj = ccrs.PlateCarree()
sky_proj = ccrs.Mollweide()
fig = plt.figure(figsize=(16, 9), dpi=120, frameon=False, tight_layout={'pad': 0.01})
gs = GridSpec(1, 1, figure=fig)
ax = fig.add_subplot(gs[0, 0], projection=sky_proj)
ax.imshow(np.fliplr(backgr), transform=default_proj, zorder=-1, origin='upper')
veccolor = plt.cm.get_cmap('tab10').colors[9]
linecolor = plt.cm.get_cmap('tab10').colors[9]
if args['quiver']:
vscale = np.median(pmtot) / 50
ax.quiver(skycoords.l.value, skycoords.b.value, pm_l_cosb, pm_b, transform=default_proj, angles='xy',
scale=vscale, scale_units='dots', color=veccolor, headwidth=4, headlength=4, headaxislength=3.5)
else:
if args['colourstreams']:
ax.streamplot(skycoords.l.value, skycoords.b.value, pm_l_cosb, pm_b, transform=default_proj, linewidth=2.0,
density=2, color=pmtot, cmap='viridis', maxlength=0.5, arrowsize=1,
arrowstyle=ArrowStyle.Fancy(head_length=1.0, head_width=.4, tail_width=.4))
elif args['lwcode'] > 0:
ax.streamplot(skycoords.l.value, skycoords.b.value, pm_l_cosb, pm_b, transform=default_proj,
linewidth=args['lwcode'] * pmtot / np.median(pmtot), density=2, color=linecolor,
maxlength=0.5,
arrowsize=1, arrowstyle=ArrowStyle.Fancy(head_length=1.0, head_width=.4, tail_width=.4))
else:
ax.streamplot(skycoords.l.value, skycoords.b.value, pm_l_cosb, pm_b, transform=default_proj, linewidth=1.5,
density=2.5, color=linecolor, maxlength=0.5, arrowsize=1,
arrowstyle=ArrowStyle.Fancy(head_length=1.0, head_width=.4, tail_width=.4))
# ax.gridlines()
ax.invert_xaxis()
if args['pdfOutput']:
plt.savefig(basename + '.pdf')
elif args['pngOutput']:
plt.savefig(basename + '.png')
else:
plt.show()
def parse_command_line_arguments():
"""
Set up command line parsing.
"""
parser = argparse.ArgumentParser("Produce all-sky proper motion map.")
parser.add_argument('--vectors', action="store_true", dest="quiver", help="Plot vectors instead of streamlines")
parser.add_argument('--colourcode', action='store_true', dest='colourstreams', help="""Plot streamlines colour coded
by magnitude of proper motion""")
parser.add_argument('--lwcode', type=float, default=0.0, help="""Plot streamlines with the width indicating the
magnitude of proper motion. Scale the widths by the factor provided""")
parser.add_argument("-p", action="store_true", dest="pdfOutput", help="Make PDF plot")
parser.add_argument("-b", action="store_true", dest="pngOutput", help="Make PNG plot")
args = vars(parser.parse_args())
return args
if __name__ in '__main__':
cmdargs = parse_command_line_arguments()
make_plot(cmdargs)
|
"""Provides data structures for encapsulating loss data."""
import numpy
class Loss:
"""Encapsulates training loss data.
.. py:attribute:: label
A string that will be used in graph legends for this loss data.
.. py:attribute:: loss_values
A numpy.ndarray containing the training loss data.
.. py:attribute:: precision_values
A numpy.ndarray containing the training precision data.
"""
def __init__(
self, label: str, loss_values: numpy.ndarray, precision_values: numpy.ndarray
):
self.label = label
self.loss_values = loss_values
self.precision_values = precision_values
LossList = list
def has_invalid_values(loss: Loss) -> bool:
"""Determine if loss or precision data has invalid values.
:param data: The loss or precision data to check for invalid values. This
should be a Loss or Precision object.
:returns: True is returned if data has at least one invalid value. False is
returned if all values in data are valid.
:rtype: bool
This function will tell you if the data has any values that are NaN,
+infinity, or -infinity.
"""
return numpy.any(numpy.logical_not(numpy.isfinite(loss.loss_values))) or numpy.any(
numpy.logical_not(numpy.isfinite(loss.precision_values))
)
def sort_by_loss(losses: LossList, algorithm: str) -> None:
"""Sort the loss data according to the specified algorithm.
:param LossList losses: The list of loss data to sort. This list is sorted
in place.
:param str algorithm: The algorithm to use for sorting. See the loss
configuration item ``sort_algorithm`` for acceptable values.
:returns: None
This function sorts the loss data, based on loss values (not precision
values). The list is sorted from best to worst. For loss, best is always the
lowest. The list below defines what is compared to determine what is lowest.
For the examples described below, assume two sets of loss data, with the
given values:
.. code-block::
baseline = [ 5, 2, 1, 2 ]
new_loss = [ 4, 3, 2, 1 ]
:last: Comparisons are made between the last value in each list of loss
data. Using this algorithm for the example data, ``new_loss`` will be
sorted ahead of ``baseline``. ``baseline[4]`` is 2, while
``new_loss[4]`` is 1.
"""
if algorithm == "last":
losses.sort(key=lambda l: l.loss_values[-1])
def sort_by_precision(losses: LossList, algorithm: str) -> None:
"""Sort the loss data according to the specified algorithm.
:param LossList losses: The list of loss data to sort. This list is sorted
in place.
:param str algorithm: The algorithm to use for sorting. See the loss
configuration item ``sort_algorithm`` for acceptable values.
:returns: None
This function sorts the loss data, based on precision values (not loss
values). The list is sorted from best to worst. For precision, best is
always the highest. The list below defines what is compared to determine
what is highest. For the examples described below, assume two sets of
precision data, with the given values:
.. code-block::
baseline = [ 5, 2, 1, 2 ]
new_loss = [ 4, 3, 2, 1 ]
:last: Comparisons are made between the last value in each list of loss
data. Using this algorithm for the example data, ``baseline`` will be
sorted ahead of ``new_loss``. ``baseline[4]`` is 2, while
``new_loss[4]`` is 1.
"""
if algorithm == "last":
losses.sort(key=lambda l: l.precision_values[-1], reverse=True)
|
# Solution of;
# Project Euler Problem 691: Long substring with many repetitions
# https://projecteuler.net/problem=691
#
# Given a character string $s$, we define $L(k,s)$ to be the length of the
# longest substring of $s$ which appears at least $k$ times in $s$, or $0$ if
# such a substring does not exist. For example,
# $L(3,\text{“bbabcabcabcacba”})=4$ because of the three occurrences of the
# substring $\text{“abca”}$, and $L(2,\text{“bbabcabcabcacba”})=7$ because of
# the repeated substring $\text{“abcabca”}$. Note that the occurrences can
# overlap. Let $a_n$, $b_n$ and $c_n$ be the $0/1$ sequences defined by:$a_0 =
# 0$$a_{2n} = a_{n}$$a_{2n+1} = 1-a_{n}$$b_n =
# \lfloor\frac{n+1}{\varphi}\rfloor - \lfloor\frac{n}{\varphi}\rfloor$ (where
# $\varphi$ is the golden ratio)$c_n = a_n + b_n - 2a_nb_n$and $S_n$ the
# character string $c_0\ldots c_{n-1}$. You are given that $L(2,S_{10})=5$,
# $L(3,S_{10})=2$, $L(2,S_{100})=14$, $L(4,S_{100})=6$, $L(2,S_{1000})=86$,
# $L(3,S_{1000}) = 45$, $L(5,S_{1000}) = 31$, and that the sum of non-zero
# $L(k,S_{1000})$ for $k\ge 1$ is $2460$. Find the sum of non-zero
# $L(k,S_{5000000})$ for $k\ge 1$.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 691
timed.caller(dummy, n, i, prob_id)
|
import asyncio
from unittest import mock
import pytest
from distributed.core import ConnectionPool
from distributed.utils_comm import gather_from_workers, pack_data, retry, subs_multiple
from distributed.utils_test import BrokenComm, gen_cluster
def test_pack_data():
data = {"x": 1}
assert pack_data(("x", "y"), data) == (1, "y")
assert pack_data({"a": "x", "b": "y"}, data) == {"a": 1, "b": "y"}
assert pack_data({"a": ["x"], "b": "y"}, data) == {"a": [1], "b": "y"}
def test_subs_multiple():
data = {"x": 1, "y": 2}
assert subs_multiple((sum, [0, "x", "y", "z"]), data) == (sum, [0, 1, 2, "z"])
assert subs_multiple((sum, [0, ["x", "y", "z"]]), data) == (sum, [0, [1, 2, "z"]])
dsk = {"a": (sum, ["x", "y"])}
assert subs_multiple(dsk, data) == {"a": (sum, [1, 2])}
# Tuple key
data = {"x": 1, ("y", 0): 2}
dsk = {"a": (sum, ["x", ("y", 0)])}
assert subs_multiple(dsk, data) == {"a": (sum, [1, 2])}
@gen_cluster(client=True)
async def test_gather_from_workers_permissive(c, s, a, b):
rpc = await ConnectionPool()
x = await c.scatter({"x": 1}, workers=a.address)
data, missing, bad_workers = await gather_from_workers(
{"x": [a.address], "y": [b.address]}, rpc=rpc
)
assert data == {"x": 1}
assert list(missing) == ["y"]
class BrokenConnectionPool(ConnectionPool):
async def connect(self, *args, **kwargs):
return BrokenComm()
@gen_cluster(client=True)
async def test_gather_from_workers_permissive_flaky(c, s, a, b):
x = await c.scatter({"x": 1}, workers=a.address)
rpc = await BrokenConnectionPool()
data, missing, bad_workers = await gather_from_workers({"x": [a.address]}, rpc=rpc)
assert missing == {"x": [a.address]}
assert bad_workers == [a.address]
def test_retry_no_exception(cleanup):
n_calls = 0
retval = object()
async def coro():
nonlocal n_calls
n_calls += 1
return retval
async def f():
return await retry(coro, count=0, delay_min=-1, delay_max=-1)
assert asyncio.run(f()) is retval
assert n_calls == 1
def test_retry0_raises_immediately(cleanup):
# test that using max_reties=0 raises after 1 call
n_calls = 0
async def coro():
nonlocal n_calls
n_calls += 1
raise RuntimeError(f"RT_ERROR {n_calls}")
async def f():
return await retry(coro, count=0, delay_min=-1, delay_max=-1)
with pytest.raises(RuntimeError, match="RT_ERROR 1"):
asyncio.run(f())
assert n_calls == 1
def test_retry_does_retry_and_sleep(cleanup):
# test the retry and sleep pattern of `retry`
n_calls = 0
class MyEx(Exception):
pass
async def coro():
nonlocal n_calls
n_calls += 1
raise MyEx(f"RT_ERROR {n_calls}")
sleep_calls = []
async def my_sleep(amount):
sleep_calls.append(amount)
return
async def f():
return await retry(
coro,
retry_on_exceptions=(MyEx,),
count=5,
delay_min=1.0,
delay_max=6.0,
jitter_fraction=0.0,
)
with mock.patch("asyncio.sleep", my_sleep):
with pytest.raises(MyEx, match="RT_ERROR 6"):
asyncio.run(f())
assert n_calls == 6
assert sleep_calls == [0.0, 1.0, 3.0, 6.0, 6.0]
|
from ... pyaz_utils import _call_az
def add(name, ip_address=None, no_wait=None, resource_group=None, subnet=None, vnet_name=None):
'''
Required Parameters:
- name -- Name of the Vault.
Optional Parameters:
- ip_address -- IPv4 address or CIDR range. Can supply a list: --ip-address ip1 [ip2]...
- no_wait -- Do not wait for the long-running operation to finish.
- resource_group -- Proceed only if Key Vault belongs to the specified resource group.
- subnet -- Name or ID of subnet. If name is supplied, `--vnet-name` must be supplied.
- vnet_name -- Name of a virtual network.
'''
return _call_az("az keyvault network-rule add", locals())
def remove(name, ip_address=None, no_wait=None, resource_group=None, subnet=None, vnet_name=None):
'''
Required Parameters:
- name -- Name of the Vault.
Optional Parameters:
- ip_address -- IPv4 address or CIDR range.
- no_wait -- Do not wait for the long-running operation to finish.
- resource_group -- Proceed only if Key Vault belongs to the specified resource group.
- subnet -- Name or ID of subnet. If name is supplied, `--vnet-name` must be supplied.
- vnet_name -- Name of a virtual network.
'''
return _call_az("az keyvault network-rule remove", locals())
def list(name, resource_group=None):
'''
Required Parameters:
- name -- Name of the Vault.
Optional Parameters:
- resource_group -- Proceed only if Key Vault belongs to the specified resource group.
'''
return _call_az("az keyvault network-rule list", locals())
def wait(name, created=None, custom=None, deleted=None, exists=None, interval=None, resource_group=None, timeout=None, updated=None):
'''
Place the CLI in a waiting state until a condition of the vault is met.
Required Parameters:
- name -- Name of the Vault.
Optional Parameters:
- created -- wait until created with 'provisioningState' at 'Succeeded'
- custom -- Wait until the condition satisfies a custom JMESPath query. E.g. provisioningState!='InProgress', instanceView.statuses[?code=='PowerState/running']
- deleted -- wait until deleted
- exists -- wait until the resource exists
- interval -- polling interval in seconds
- resource_group -- Proceed only if Key Vault belongs to the specified resource group.
- timeout -- maximum wait in seconds
- updated -- wait until updated with provisioningState at 'Succeeded'
'''
return _call_az("az keyvault network-rule wait", locals())
|
import numpy as np
import keras
import logging
import sys
import pandas as pd
from data.base import DataObject
from keras.layers import Input
from keras.layers import Conv1D
from keras.layers import GRU
from keras.layers import MaxPooling1D
from keras.models import Model
from keras.layers import Flatten, Dense
from keras.layers import Embedding
from keras.layers import Dropout
from keras import backend as K
from keras import regularizers
from sklearn.metrics import roc_auc_score
import matplotlib
import matplotlib.pyplot as pyplot
def printn(string):
sys.stdout.write(string)
sys.stdout.flush()
def euclidean_distance(vects):
eps = 1e-08
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), eps))
def eucl_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0], 1)
def contrastive_loss(y_true, y_pred):
margin = 1
return K.mean(y_true * K.square(y_pred) + (1 - y_true) * K.square(K.maximum(margin - y_pred, 0)))
def _dg_embed_layers(k_poll, u_poll):
#combining layers here for flexibility
k_output = Flatten()(k_poll)
k_output = Dense(64, activation='elu')(k_output)
u_output = Flatten()(u_poll)
u_output = Dense(64, activation='elu')(u_output)
return k_output, u_output
def _dg_cnn_base(data_builder, embed_dim, activation='relu'):
k_input = Input(shape=(data_builder.target_doc_len,), dtype='int32', name="k_input")
u_input = Input(shape=(data_builder.target_doc_len,), dtype='int32', name="u_input")
embedding_layer = Embedding(input_length=data_builder.target_doc_len,
input_dim=data_builder.vocabulary_size + 1,
output_dim=embed_dim,
weights=[data_builder.embed_matrix],
trainable=False)
k_embedded_seq = embedding_layer(k_input)
u_embedded_seq = embedding_layer(u_input)
conv_first = Conv1D(filters=128, kernel_size=5, activation='relu')
poll_first = MaxPooling1D(pool_size=596)
k_cov = conv_first(k_embedded_seq)
k_poll = poll_first(k_cov)
u_cov = conv_first(u_embedded_seq)
u_poll = poll_first(u_cov)
return k_input, u_input, k_poll, u_poll
def rnn_yifan(data_builder, embed_dim=100):
logging.info("BUILDING RNN USING CONCATENATION")
embedding_layer = Embedding(input_length=data_builder.target_doc_len,
input_dim=data_builder.vocabulary_size + 1,
output_dim=100,
weights=[data_builder.embed_matrix],
trainable=False,
mask_zero=True,
name="embedding_layer")
k_input = Input(shape=(data_builder.target_doc_len,), dtype='int32', name="k_doc_input")
k_embedded_seq = embedding_layer(k_input)
u_input = Input(shape=(data_builder.target_doc_len,), dtype='int32', name="u_doc_input")
u_embedded_seq = embedding_layer(u_input)
# shared first conv
gru_layer = GRU(units=64, name="gru_layer", dropout=0.5, recurrent_dropout=0.5,
kernel_regularizer = regularizers.l2(0.02),
activity_regularizer = regularizers.l2(0.02))
k_feat = gru_layer(k_embedded_seq)
u_feat = gru_layer(u_embedded_seq)
# d_layer = Dense(8, activation='relu')
all_feat = keras.layers.concatenate([k_feat, u_feat])
all_feat = Dropout(rate=0.5)(all_feat)
all_feat = Dense(32, activation='relu', kernel_regularizer=regularizers.l2(0.02))(all_feat)
model = Model([k_input, u_input], all_feat)
return model
def dg_cnn_yifan(data_builder, embed_dim=100):
k_input, u_input, k_poll, u_poll = _dg_cnn_base(data_builder, embed_dim)
x = keras.layers.subtract([k_poll, u_poll])
output = Flatten()(k_poll)
model = Model([k_input, u_input], output)
return model
def dg_cnn_dainis(data_builder, embed_dim=100):
k_input, u_input, k_poll, u_poll = _dg_cnn_base(data_builder, embed_dim)
k_output, u_output = _dg_embed_layers(k_poll, u_poll)
model = Model([k_input, u_input], [k_output, u_output])
return model
def load_data(data_builder):
train = data_builder.train_data
test = data_builder.test_data
pair_combo, y1, y2, y_combo = make_pairs(train)
return (train), (pair_combo, y1, y2, y_combo), (test.value, test.label_doc)
def make_pairs(data_object: DataObject):
source_pair = []
target_pair = []
source_l = []
target_l = []
value_frame = data_object.value
label_frame = data_object.label_doc
for trs in range(len(value_frame)):
for trt in range(trs + 1, len(value_frame)):
source_pair.append(value_frame.iloc[[trs]])
target_pair.append(value_frame.iloc[[trt]])
source_l.append(label_frame[trs])
target_l.append(label_frame[trt])
source_pair = pd.concat(source_pair, axis=0)
target_pair = pd.concat(target_pair, axis=0)
source_l = np.array(source_l)
target_l = np.array(target_l)
source_pair.columns = ["s_k_doc", "s_u_doc"]
target_pair.columns = ["t_k_doc", "t_u_doc"]
source_pair = source_pair.reset_index(drop=True)
target_pair = target_pair.reset_index(drop=True)
pair_combo = source_pair.join(target_pair, how='outer')
y_combo = source_l == target_l
y_combo = y_combo.astype(int)
return pair_combo, source_l, target_l, y_combo
def training_the_model(model:Model, orig_train:DataObject, train, test, epochs=80, batch_size=256):
pair_combo, y_src, y_tgt, y_combo = train
test_value, test_label = test
train_auc_list = []
test_auc_list = []
pyplot.ion()
# fig = pyplot.figure()
# ax = fig.add_subplot(111)
# train_line, = ax.plot([1])
# test_line, = ax.plot([1])
# ax.legend(['train', 'test'], loc='upper right')
# pyplot.draw()
print('Training the model - Epochs '+str(epochs))
best_acc = 0
if batch_size > len(y_tgt):
print('Lowering batch size, to %d, number of inputs is too small for it.' % len(y_tgt))
batch_size = len(y_tgt)
for e in range(epochs):
print(str(e) + '->')
for i in range(len(y_tgt) // batch_size):
# flipping stuff here
from_sample = i * batch_size
to_sample = (i + 1) * batch_size
loss1 = model.train_on_batch([
np.array(pair_combo["s_k_doc"].iloc[from_sample:to_sample].tolist()),
np.array(pair_combo["s_u_doc"].iloc[from_sample:to_sample].tolist()),
np.array(pair_combo["t_k_doc"].iloc[from_sample:to_sample].tolist()),
np.array(pair_combo["t_u_doc"].iloc[from_sample:to_sample].tolist())
],
[ y_src[from_sample:to_sample], y_combo[from_sample:to_sample] ])
# loss2 = model.train_on_batch([
# np.array(pair_combo["t_k_doc"].iloc[from_sample:to_sample].tolist()),
# np.array(pair_combo["t_u_doc"].iloc[from_sample:to_sample].tolist()),
# np.array(pair_combo["s_k_doc"].iloc[from_sample:to_sample].tolist()),
# np.array(pair_combo["s_u_doc"].iloc[from_sample:to_sample].tolist()),
# ],
# [ y_tgt[from_sample:to_sample], y_combo[from_sample:to_sample] ])
pred_output = model.predict([np.array(test_value["k_doc"][:100].tolist()), np.array(test_value["u_doc"][:100].tolist()),
np.array(test_value["k_doc"][:100].tolist()), np.array(test_value["u_doc"][:100].tolist())])
test_auc = roc_auc_score(test_label[:100], pred_output[0])
pred_output = model.predict([np.stack(orig_train.value["k_doc"][:100].as_matrix()),
np.stack(orig_train.value["u_doc"][:100].as_matrix()),
np.stack(orig_train.value["k_doc"][:100].as_matrix()),
np.stack(orig_train.value["u_doc"][:100].as_matrix())])
train_auc = roc_auc_score(orig_train.label_doc[:100], pred_output[0])
train_auc_list.append(train_auc)
test_auc_list.append(test_auc)
# train_line.set_data([range(len(train_auc_list)), train_auc_list])
# test_line.set_data([range(len(train_auc_list)), test_auc_list])
# ax.set_xlim(left=0, right=len(train_auc_list))
# ax.set_ylim(bottom=0, top=1.0)
# pyplot.draw()
# pyplot.pause(0.02)
print("step: " + str(i) + " : " + str(train_auc) + " : " + str(test_auc))
pyplot.plot(train_auc_list)
pyplot.plot(test_auc_list)
pyplot.legend(['train', 'test'], loc='upper right')
pyplot.show()
output = model.predict([np.array(test_value["k_doc"].tolist()), np.array(test_value["u_doc"].tolist()),
np.array(test_value["k_doc"].tolist()), np.array(test_value["u_doc"].tolist()) ])
acc_v = np.array(output[0] > 0.5).astype(int).squeeze() == test_label
acc = np.count_nonzero(acc_v) / len(output[0])
logging.info("ACCU: " + str(acc))
train_auc = roc_auc_score(test_label, output[0])
logging.info("ROC : " + str(train_auc))
# if best_acc < acc:
# best_acc = acc
# logging.info("BEST ACCU: " + str(acc))
# pyplot.ioff()
# pyplot.show()
return best_acc
|
# Generated by Django 3.2.11 on 2022-01-25 03:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("users", "0082_alter_userprofile_lang"),
]
operations = [
migrations.AddField(
model_name="editor",
name="wp_block_hash",
field=models.CharField(
blank=True,
default="",
editable=False,
help_text="A hash that is generated with a user's block data",
max_length=255,
),
),
]
|
#!/usr/bin/env python
import os, sys, re
import unicodecsv as csv
index = None
def get_csv_path(repo):
# NOTE: this assumes that everything is stored using a specific naming
# convention: /usr/local/aclu/[repo]/data/data.csv (20180601/dphiffer)
return "/usr/local/aclu/%s/data/data.csv" % repo
def get_index(repo):
global index
if index and repo in index:
return index[repo]
if not index:
index = {}
if not repo in index:
index[repo] = {
'count': 0,
'lookup': {},
'list': []
}
csv_path = get_csv_path(repo)
if not os.path.isfile(csv_path):
return index[repo]
else:
with open(csv_path, 'rb') as csv_file:
reader = csv.reader(csv_file)
row_num = 0
for row in reader:
if row_num == 0:
header = row
else:
id = row[0]
path = row[1]
name = row[2]
match = re.search(r"\d+$", id)
if match:
number = int(match.group(0))
index['count'] = number
index[repo]['lookup'][path] = row
index[repo]['list'].append(row)
row_num = row_num + 1
index[repo]['count'] = row_num
return index[repo]
def save_index(repo):
csv_path = get_csv_path(repo)
with open(csv_path, 'wb') as csv_file:
writer = csv.writer(csv_file, delimiter=',', encoding='utf-8')
headers = ['id', 'path', 'name']
writer.writerow(headers)
for line in index[repo]['list']:
writer.writerow(line)
csv_file.close()
def get_id(repo, type, path, name):
index = get_index(repo)
if path in index['lookup']:
return index['lookup'][path][0]
else:
number = index['count']
index['count'] = index['count'] + 1
id = "aclu/%s/%s:%d" % (repo, type, number)
record = [id, path, name]
index['lookup'][path] = record
index['list'].append(record)
return id
|
import json
from py42 import settings
from py42._compat import str
from py42.sdk.queries.query_filter import create_eq_filter_group
from py42.services import BaseService
from py42.services.util import get_all_pages
class AlertService(BaseService):
_uri_prefix = u"/svc/api/v1/{0}"
_CREATED_AT = u"CreatedAt"
_RULE_METADATA = u"ruleMetadata"
_SEARCH_KEY = u"alerts"
def __init__(self, connection, user_context):
super(AlertService, self).__init__(connection)
self._user_context = user_context
def search(self, query, page_num=1, page_size=None):
query.page_number = page_num - 1
if page_size:
query.page_size = page_size
query = self._add_tenant_id_if_missing(query)
uri = self._uri_prefix.format(u"query-alerts")
return self._connection.post(uri, data=query)
def get_search_page(self, query, page_num, page_size):
query.page_number = page_num - 1
query.page_size = page_size
uri = self._uri_prefix.format(u"query-alerts")
query = self._add_tenant_id_if_missing(query)
return self._connection.post(uri, data=query)
def search_all_pages(self, query):
return get_all_pages(
self.get_search_page,
self._SEARCH_KEY,
query=query,
page_size=query.page_size,
)
def get_details(self, alert_ids):
if not isinstance(alert_ids, (list, tuple)):
alert_ids = [alert_ids]
tenant_id = self._user_context.get_current_tenant_id()
uri = self._uri_prefix.format(u"query-details")
data = {u"tenantId": tenant_id, u"alertIds": alert_ids}
results = self._connection.post(uri, json=data)
return _convert_observation_json_strings_to_objects(results)
def update_state(self, state, alert_ids, note=None):
if not isinstance(alert_ids, (list, tuple)):
alert_ids = [alert_ids]
tenant_id = self._user_context.get_current_tenant_id()
uri = self._uri_prefix.format(u"update-state")
data = {
u"tenantId": tenant_id,
u"alertIds": alert_ids,
u"note": note,
u"state": state,
}
return self._connection.post(uri, json=data)
def _add_tenant_id_if_missing(self, query):
query_dict = json.loads(str(query))
tenant_id = query_dict.get(u"tenantId", None)
if tenant_id is None:
query_dict[u"tenantId"] = self._user_context.get_current_tenant_id()
return json.dumps(query_dict)
else:
return str(query)
def get_rules_page(
self, page_num, groups=None, sort_key=None, sort_direction=None, page_size=None
):
# This API expects the first page to start with zero.
page_num = page_num - 1
page_size = page_size or settings.items_per_page
data = {
u"tenantId": self._user_context.get_current_tenant_id(),
u"groups": groups or [],
u"groupClause": u"AND",
u"pgNum": page_num,
u"pgSize": page_size,
u"srtKey": sort_key,
u"srtDirection": sort_direction,
}
uri = self._uri_prefix.format(u"rules/query-rule-metadata")
return self._connection.post(uri, json=data)
def get_all_rules(self, sort_key=_CREATED_AT, sort_direction=u"DESC"):
return get_all_pages(
self.get_rules_page,
self._RULE_METADATA,
groups=None,
sort_key=sort_key,
sort_direction=sort_direction,
)
def get_all_rules_by_name(
self, rule_name, sort_key=_CREATED_AT, sort_direction=u"DESC"
):
return get_all_pages(
self.get_rules_page,
self._RULE_METADATA,
groups=[json.loads(str(create_eq_filter_group(u"Name", rule_name)))],
sort_key=sort_key,
sort_direction=sort_direction,
)
def get_rule_by_observer_id(
self, observer_id, sort_key=_CREATED_AT, sort_direction=u"DESC"
):
results = get_all_pages(
self.get_rules_page,
self._RULE_METADATA,
groups=[
json.loads(str(create_eq_filter_group(u"ObserverRuleId", observer_id)))
],
sort_key=sort_key,
sort_direction=sort_direction,
)
return next(results)
def update_note(self, alert_id, note):
tenant_id = self._user_context.get_current_tenant_id()
uri = self._uri_prefix.format(u"add-note")
data = {
u"tenantId": tenant_id,
u"alertId": alert_id,
u"note": note,
}
return self._connection.post(uri, json=data)
def get_aggregate_data(self, alert_id):
uri = self._uri_prefix.format(u"query-details-aggregate")
data = {
u"alertId": alert_id,
}
return self._connection.post(uri, json=data)
def _convert_observation_json_strings_to_objects(results):
for alert in results[u"alerts"]:
if u"observations" in alert:
for observation in alert[u"observations"]:
try:
observation[u"data"] = json.loads(observation[u"data"])
except Exception:
continue
return results
|
def basis2(n,uv):
# Shape function values and their derivatives for a given point in the
# in the reference triangle with 3 or 6 nodes
#
# Inputs: n number of nodes in the triangle
# uv coordinates of the pont (size 2x1)
# Outputs: N shape function values (size n x 1)
# dN shape function derivative values (size n x 2)
import numpy as np
# Coordinates in the reference element
u = uv[0,:]
v = uv[1,:]
o = 0.0*u
# 3-node triangle
if n == 3:
N = [[]]*3;
dN = [[]]*2;
N[0] = u;
N[1] = v;
N[2] = 1-u-v;
dN[0] = [o+1.0, o, o-1.0];
dN[1] = [o, o+1.0, o-1.0];
#6-node triangle
else:
if n == 6:
N = [[]]*6;
dN = [[]]*2;
w = 1-u-v;
# N = [(2*u-1).*u ; (2*v-1).*v ; (2*w-1).*w ; 4*u.*v ; 4*v.*w ; 4*w.*u ];
# dN(:,1) = [4*u-1 ; 0 ; 1-4*w ; 4*v ; -4*v ; 4*(w-u)];
# dN(:,2) = [0 ; 4*v-1 ; 1-4*w ; 4*u ; 4*(w-v) ; -4*u ];
dN[0] = [ 4*u-1 , 0.0 , 1-4*w , 4*w , -4*v , 4*w-u ];
dN[1] = [ 0.0 , 4*v-1 , 1-4*w , 4*u , 4*(w-v) , -4*u ];
else:
print(str(n), '-node shape functions not implemented for triangles')
return np.array(N), np.array(dN)
|
import os
import json
import redis
redis_connection = redis.Redis(decode_responses=True)
if not os.path.isdir('logs_json'):
os.mkdir('logs_json')
for key in redis_connection.scan_iter('*'):
file_name = 'logs_json/' + '_'.join(key.split(':')) + '.json'
print(key)
data = redis_connection.lrange(key, 0, -1)[::-1]
with open(file_name, "w+") as f:
f.write('[')
f.write(',\n'.join(data))
f.write(']\n')
|
import discord
from cogs.utils.Cache import cache
class RequestView(discord.ui.View):
def __init__(self):
super().__init__(timeout=None)
@discord.ui.button(label="Received", style=discord.ButtonStyle.blurple, emoji='📩')
async def receive_button_callback(self, button, interaction):
pass
@discord.ui.button(label='Process', style=discord.ButtonStyle.blurple, emoji='✏')
async def process_button_callback(self, button, interaction):
pass
@discord.ui.button(label='Dismiss', style=discord.ButtonStyle.danger, emoji='❌')
async def dismiss_button_callback(self, button, interaction):
pass
@discord.ui.button(label='Finish', style=discord.ButtonStyle.green, emoji='✅')
async def finish_button_callback(self, button, interaction):
pass
class SetupView(discord.ui.View):
def __init__(self):
super().__init__(timeout=None)
@discord.ui.button(label='Archive', style=discord.ButtonStyle.green, emoji='✅')
async def toggle_archive_button_callback(self, button, interaction):
if button.style is discord.ButtonStyle.green:
button.style = discord.ButtonStyle.danger
button.emoji = '❌'
cache.cache['Guild']['archive'] = False
await interaction.response.edit_message(view=self)
else:
button.style = discord.ButtonStyle.green
button.emoji = '✅'
cache.cache['Guild']['archive'] = True
await interaction.response.edit_message(view=self)
@discord.ui.button(label='Delete All', style=discord.ButtonStyle.blurple, emoji='👮♂️')
async def add_role_button_callback(self, button, interaction):
await cache.category.channels[0].delete()
await cache.category.delete()
await cache.category_archive.delete()
cache.cache['Guild']['setup'] = False
|
"""
Code for sidebar navigation inclusion tags for blog.
:author: Douglas Daly
:date: 1/4/2018
"""
#
# Imports
#
from django import template
from ..models import Post, Category, Tag, Author
#
# Tag Definitions
#
register = template.Library()
@register.inclusion_tag("blog/tags/sidebar_menu.html")
def sidebar_menu(sort_by="date"):
"""Tag for side menu links"""
heading_objects = True
if sort_by == "date":
ret = __sidebar_menu_helper_date()
elif sort_by == "categories":
categories = Category.objects.all()
all_posts = Post.get_displayable()
ret = list()
for category in categories:
posts = all_posts.filter(category=category).order_by('title')
if len(posts) <= 0:
continue
temp = list()
for post in posts:
temp.append((post.title, post.get_absolute_url()))
ret.append((category, temp))
elif sort_by == "tags":
heading_objects = False
all_tags = Tag.objects.all()
all_posts = Post.get_displayable()
ret = list()
letters = all_tags.values_list("_category", flat=True).distinct()
for letter in letters:
tags = all_tags.filter(_category=letter)
temp = list()
for tag in tags:
has_posts = all_posts.filter(tags=tag).exists()
if not has_posts:
continue
temp.append((tag.name, tag.get_absolute_url()))
if temp:
ret.append((letter, temp))
elif sort_by == "authors":
heading_objects = False
ret = list()
all_authors = Author.get_displayable()
for author in all_authors:
posts = author.get_all_posts()
if len(posts) <= 0:
continue
temp = list()
for post in posts:
temp.append((post.title, post.get_absolute_url()))
ret.append((author.get_display_name(), temp))
else:
ret = None
return {
"sidemenu_sort": sort_by,
"sidemenu_dict": ret,
"sidemenu_heading_objects": heading_objects,
}
@register.filter(is_safe=True)
def smallnavbtn(target_nav, current_nav):
"""Tag for small nav items"""
if current_nav is not None and target_nav == current_nav:
return "btn-primary"
return "btn-secondary"
#
# Helper Functions
#
class YearHelper(object):
"""
Small helper object for displaying posts by year
"""
def __init__(self, year):
self.name = str(year)
self.slug = "Y" + str(year)
def __sidebar_menu_helper_date(previews=False):
"""Helper to get all posts by year"""
ret = list()
all_posts = Post.get_displayable(previews=previews)
date_years = all_posts.dates('display_date', 'year').distinct()
for year in reversed(date_years):
posts = all_posts.filter(published=True, display_date__year=year.year)
if len(posts) <= 0:
continue
t_year = YearHelper(year.year)
temp = list()
for post in posts:
temp.append((post.title, post.get_absolute_url()))
ret.append((t_year, temp))
return ret
|
import numpy as np
import matplotlib.pyplot as plt
import cos_doubles
x = np.arange(0, 2 * np.pi, 0.1)
y = np.empty_like(x)
cos_doubles.cos_doubles_func(x, y)
plt.plot(x, y)
plt.show()
|
# TODO: Use batch API instead of sending 1-by-1: https://github.com/Azure-Samples/cognitive-services-speech-sdk/tree/master/samples/batch/python
import azure.cognitiveservices.speech as speechsdk
import os
from tqdm import tqdm
from glob import glob
import time
LANGUAGE = 'en-IN'
SPEECH_CONFIG = speechsdk.SpeechConfig(subscription="<paste-speech-key-here>", region="<paste-speech-region-here>", speech_recognition_language=LANGUAGE)
def log(text):
# print(text)
return
def recognize_single_utterance(local_file_path):
audio_input = speechsdk.AudioConfig(filename=local_file_path)
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=SPEECH_CONFIG, audio_config=audio_input, language=LANGUAGE)
result = speech_recognizer.recognize_once()
return result.text
def recognize_continuous(local_file_path):
'''
Modified from: https://github.com/Azure-Samples/cognitive-services-speech-sdk/blob/4f9ee79c2287a5a00dcd1a50112cd43694aa7286/samples/python/console/speech_sample.py#L321
'''
audio_config = speechsdk.audio.AudioConfig(filename=local_file_path)
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=SPEECH_CONFIG, audio_config=audio_config, language=LANGUAGE)
done = False
transcript = ''
def stop_cb(evt):
"""callback that signals to stop continuous recognition upon receiving an event `evt`"""
log('CLOSING on {}'.format(evt))
nonlocal done
done = True
def recognized_cb(evt):
"""When recognizing phase is complete for a single instance, it returns a final utterance before proceeding next"""
log('RECOGNIZED: {}'.format(evt))
nonlocal transcript
transcript += ' ' + evt.result.text
# Connect callbacks to the events fired by the speech recognizer
speech_recognizer.recognizing.connect(lambda evt: log('RECOGNIZING: {}'.format(evt)))
speech_recognizer.recognized.connect(recognized_cb)
speech_recognizer.session_started.connect(lambda evt: log('SESSION STARTED: {}'.format(evt)))
speech_recognizer.session_stopped.connect(lambda evt: log('SESSION STOPPED {}'.format(evt)))
speech_recognizer.canceled.connect(lambda evt: log('CANCELED {}'.format(evt)))
# stop continuous recognition on either session stopped or canceled events
speech_recognizer.session_stopped.connect(stop_cb)
speech_recognizer.canceled.connect(stop_cb)
# Start continuous speech recognition
speech_recognizer.start_continuous_recognition()
while not done:
time.sleep(.5)
speech_recognizer.stop_continuous_recognition()
return transcript.strip()
def recognize_bulk_wav(wav_folder, output_folder, overwrite=True):
print('Running STT for audio files in:', wav_folder)
os.makedirs(output_folder, exist_ok=True)
audio_files = sorted(glob(os.path.join(wav_folder, '*.wav')))
for audio_file in tqdm(audio_files, unit='transcript'):
txt_file = os.path.join(output_folder, os.path.basename(audio_file).replace('.wav', '.txt'))
if not overwrite and os.path.isfile(txt_file):
continue
transcript = recognize_continuous(audio_file)
if not transcript:
print('Failed for:', audio_file)
continue
with open(txt_file, 'w', encoding='utf-8') as f:
f.write(transcript)
return
if __name__ == '__main__':
recognize_bulk_wav('wav_folder', 'output_ms')
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0011_element_concept'),
]
operations = [
migrations.AddField(
model_name='element',
name='action',
field=models.TextField(null=True, blank=True),
),
migrations.AddField(
model_name='element',
name='audio',
field=models.TextField(null=True, blank=True),
),
migrations.AddField(
model_name='element',
name='image',
field=models.TextField(null=True, blank=True),
),
migrations.AddField(
model_name='element',
name='mime_type',
field=models.CharField(max_length=128, null=True, blank=True),
),
migrations.AddField(
model_name='element',
name='required',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='element',
name='element_type',
field=models.CharField(blank=True, max_length=12, null=True, choices=[(b'DATE', b'DATE'), (b'ENTRY', b'ENTRY'), (b'SELECT', b'SELECT'), (b'MULTI_SELECT', b'MULTI_SELECT'), (b'RADIO', b'RADIO'), (b'PICTURE', b'PICTURE'), (b'PLUGIN', b'PLUGIN'), (b'ENTRY_PLUGIN', b'ENTRY_PLUGIN')]),
),
]
|
from locust import TaskSet, User
from appian_locust.helper import ENV
from .mock_client import CustomLocust
from .mock_reader import read_mock_file
from appian_locust import AppianTaskSet
from appian_locust.uiform import (ComponentNotFoundException,
ChoiceNotFoundException, InvalidComponentException,
SailUiForm)
import os
import unittest
CURR_FILE_PATH = os.path.dirname(os.path.realpath(__file__))
# Set these values to an integration endpoint for etoe testing,
integration_url = ""
auth = ["a", "b"]
class TestAppImport(unittest.TestCase):
def setUp(self) -> None:
record_mode = True if integration_url else False
self.custom_locust = CustomLocust(User(ENV), integration_url=integration_url, record_mode=record_mode)
parent_task_set = TaskSet(self.custom_locust)
setattr(parent_task_set, "host", integration_url)
setattr(parent_task_set, "auth", auth)
setattr(self.custom_locust, "record_mode", True)
self.task_set = AppianTaskSet(parent_task_set)
ENV.stats.clear_all()
def test_app_importer_e_to_e(self) -> None:
self.task_set.on_start()
path_to_file = os.path.join(CURR_FILE_PATH, "resources", "Constant Test App.zip")
if not os.path.exists(path_to_file):
raise Exception(f"file not found {path_to_file}")
# Order of execution is, navigate to /design, click import, upload doc, fill upload field, click import again
if not integration_url:
self.custom_locust.enqueue_response(200, read_mock_file("design_landing_page.json"))
self.custom_locust.enqueue_response(200, read_mock_file("design_click_import_button_resp_init.json"))
self.custom_locust.enqueue_response(200, read_mock_file("design_soap_app_upload_response.json"))
self.custom_locust.enqueue_response(200, read_mock_file("design_upload_to_file_upload_field_doc_resp.json"))
self.custom_locust.enqueue_response(200, read_mock_file("design_click_import_button_resp_final.json"))
self.task_set.appian.app_importer.import_app(path_to_file)
def test_app_importer_e_to_e_for_inspect_and_import(self) -> None:
self.task_set.on_start()
path_to_file = os.path.join(CURR_FILE_PATH, "resources", "Constant Test App.zip")
if not os.path.exists(path_to_file):
raise Exception(f"file not found {path_to_file}")
# Order of execution is, navigate to /design, click import, upload doc, fill upload field, click import again
if not integration_url:
self.custom_locust.enqueue_response(200, read_mock_file("design_landing_page.json"))
self.custom_locust.enqueue_response(200, read_mock_file("design_click_import_button_resp_init.json"))
self.custom_locust.enqueue_response(200, read_mock_file("design_soap_app_upload_response.json"))
self.custom_locust.enqueue_response(200, read_mock_file("design_upload_to_file_upload_field_doc_resp.json"))
self.custom_locust.enqueue_response(200, read_mock_file("design_inspection_results_resp.json"))
self.custom_locust.enqueue_response(200, read_mock_file("design_click_import_button_resp_final.json"))
self.task_set.appian.app_importer.import_app(app_file_path=path_to_file, inspect_and_import=True)
def test_app_importer_e_to_e_with_cust_file(self) -> None:
self.task_set.on_start()
path_to_app = os.path.join(CURR_FILE_PATH, "resources", "Constant Test App.zip")
path_to_cust_file = os.path.join(CURR_FILE_PATH, "resources", "Constant Test App.properties")
for path_to_file in [path_to_app, path_to_cust_file]:
if not os.path.exists(path_to_file):
raise Exception(f"file not found {path_to_file}")
if not integration_url:
self.custom_locust.enqueue_response(200, read_mock_file("design_landing_page.json"))
self.custom_locust.enqueue_response(200, read_mock_file("design_click_import_button_resp_init.json"))
self.custom_locust.enqueue_response(200, read_mock_file("design_soap_app_upload_response.json"))
self.custom_locust.enqueue_response(200, read_mock_file("design_upload_to_file_upload_field_doc_resp.json"))
self.custom_locust.enqueue_response(200, read_mock_file("design_check_import_cust_box.json"))
self.custom_locust.enqueue_response(200, read_mock_file("design_constant_props_upload_response.json"))
self.custom_locust.enqueue_response(200, read_mock_file("design_upload_to_file_upload_field_doc_resp.json"))
self.custom_locust.enqueue_response(200, read_mock_file("design_click_import_button_resp_final.json"))
self.task_set.appian.app_importer.import_app(path_to_app, customization_file_path=path_to_cust_file)
def test_app_importer_e_to_e_with_cust_file_error(self) -> None:
self.task_set.on_start()
path_to_app = os.path.join(CURR_FILE_PATH, "resources", "Constant Test App.zip")
path_to_cust_file = os.path.join(CURR_FILE_PATH, "resources", "Constant Test App.properties")
for path_to_file in [path_to_app, path_to_cust_file]:
if not os.path.exists(path_to_file):
raise Exception(f"file not found {path_to_file}")
if not integration_url:
self.custom_locust.enqueue_response(200, read_mock_file("design_landing_page.json"))
self.custom_locust.enqueue_response(200, read_mock_file("design_click_import_button_resp_init.json"))
self.custom_locust.enqueue_response(200, read_mock_file("design_soap_app_upload_response.json"))
self.custom_locust.enqueue_response(200, read_mock_file("design_upload_to_file_upload_field_doc_resp.json"))
self.custom_locust.enqueue_response(200, read_mock_file("design_check_import_cust_box.json"))
self.custom_locust.enqueue_response(200, read_mock_file("design_constant_props_upload_response.json"))
self.custom_locust.enqueue_response(200, read_mock_file("design_upload_to_file_upload_field_doc_resp.json"))
self.custom_locust.enqueue_response(200, read_mock_file("design_click_import_failed_validation.json"))
with self.assertRaises(Exception) as e:
self.task_set.appian.app_importer.import_app(path_to_app, customization_file_path=path_to_cust_file)
if __name__ == '__main__':
unittest.main()
|
"""Import-Related Tasks"""
import time
import uuid
from celery import shared_task
from django.conf import settings
from django.core.exceptions import ValidationError
import requests
import unicodecsv
import newrelic.agent
from accounts.utils_user import create_user
from mailer.mailserver import deliver
from user_import.models import UserImport, ImportRecord, ImportRecordStatus
def load_file(file_field):
"""Generate the path to a local file for a filefield
Working with local files is great! You can scan through them easily and
manipulate them much faster than if they're remote objects. There are two
ways a file in django could be local: it could be part of a local-based
storage (and thus have a `path` attribute) or it could be downloaded and
put into a temporary folder. This function addresses both instances.
Args:
file_field: a file field on an object
Returns:
path to file
string
"""
try:
return file_field.path
except NotImplementedError:
filename = '/tmp/{uuid}'.format(uuid=uuid.uuid4())
request = requests.get(file_field.url, stream=True)
with open(filename, 'wb') as file_obj:
for chunk in request.iter_content(chunk_size=1024):
if chunk:
file_obj.write(chunk)
return filename
@shared_task
def ingest_import(user_import_id):
"""Ingest an import file"""
user_import = UserImport.objects.get(pk=user_import_id)
newrelic.agent.add_custom_parameter(
'organization_id', user_import.organization.pk)
newrelic.agent.add_custom_parameter(
'user_import_id', user_import_id)
user_import.status = 'ingesting'
user_import.save()
try:
filename = load_file(user_import.file)
with open(filename, 'rb') as file_obj:
reader = unicodecsv.DictReader(file_obj, encoding='utf-8-sig')
if reader.fieldnames != [u'first_name',
u'last_name',
u'email',
u'address']:
raise Exception('Invalid fields.')
count = 0
new_records = []
for row in reader:
count += 1
new_records.append(ImportRecord(
user_import=user_import,
first_name=row.get('first_name', ''),
last_name=row.get('last_name', ''),
email=row.get('email', ''),
address=row.get('address', '')))
if not count % 5000:
ImportRecord.objects.bulk_create(new_records)
# Empty the list
new_records = []
# Import any records not imported in the "by-5000" iterator
ImportRecord.objects.bulk_create(new_records)
user_import.ingested = len(new_records)
user_import.save()
trigger_import.delay(user_import.pk)
except Exception as error:
user_import.status = 'failed'
user_import.note = unicode(error)
user_import.save()
alert_failed_import.delay(user_import.pk)
raise
@shared_task
def alert_failed_import(user_import_id):
"""Alert someone that an import failed"""
user_import = UserImport.objects.get(pk=user_import_id)
newrelic.agent.add_custom_parameter(
'organization_id', user_import.organization.pk)
newrelic.agent.add_custom_parameter(
'user_import_id', user_import_id)
deliver(
user_import.uploader.email,
settings.DEFAULT_FROM_EMAIL,
u'Import Processing Failure {}'.format(user_import.name),
user_import.note)
@shared_task
def trigger_import(user_import_id):
"""Queue up all pending records that need to be imported"""
user_import = UserImport.objects.get(id=user_import_id)
newrelic.agent.add_custom_parameter(
'organization_id', user_import.organization_id)
newrelic.agent.add_custom_parameter(
'user_import_id', user_import_id)
imported_records = ImportRecordStatus.objects.filter(
user_import_id=user_import_id).values_list(
'import_record_id', flat=True)
import_records = ImportRecord.objects.filter(
user_import_id=user_import_id).exclude(
id__in=imported_records).only('pk')
total_records = import_records.count()
user_import.status = 'creating'
user_import.save()
current_record = 1
for record in import_records:
final = bool(current_record == total_records)
import_user.delay(record.pk, current_record, final)
current_record += 1
@shared_task
def import_user(import_record_id, current_record, final=False):
"""Import a specific user"""
import_record = ImportRecord.objects.select_related(
'user_import', 'user_import__organization').get(pk=import_record_id)
newrelic.agent.add_custom_parameter(
'organization_id', import_record.user_import.organization_id)
newrelic.agent.add_custom_parameter(
'user_import_id', import_record.user_import.pk)
status_record = ImportRecordStatus()
status_record.user_import_id = import_record.user_import_id
status_record.import_record = import_record
# Overly broad exception handling system to handle all possible exceptions.
# We're basically logging exceptions to the database instead of to stdout
# or NewRelic, because digging through NewRelic to try to address issues
# would be a nightmare. We should clean this up later.
# pylint: disable=broad-except
try:
status_record.account = None
status_record.status = 'success'
status_record.account = create_user(
import_record.user_import.organization,
import_record.email,
import_record.address,
import_record.first_name,
import_record.last_name)
except ValidationError as error:
status_record.status = 'failed'
status_record.error_type = 'ValidationError'
status_record.account = None
status_record.note = unicode(error.message)
except Exception as error:
status_record.status = 'failed'
status_record.error_type = type(error)
status_record.note = unicode(error.message)
status_record.account = None
status_record.save()
# Every 250 records or on the final record update the status of the job
if not current_record % 250 or final:
user_import = import_record.user_import
# If it's the final record, sleep for 10 seconds to let other tasks
# complete
if final:
time.sleep(10)
user_import.status = 'finished'
all_import_statuses = ImportRecordStatus.objects.filter(
user_import=user_import)
user_import.total_succeeded = all_import_statuses.filter(
status='success').count()
user_import.total_failed = all_import_statuses.filter(
status='failed').count()
import_record.user_import.save()
|
import argparse
import cv2
import numpy as np
import os
import tensorflow as tf
import time
from atom import Element
from atom.messages import LogLevel, Response
from autolab_core import YamlConfig
from keras.backend.tensorflow_backend import set_session
from mrcnn import model as modellib
from sd_maskrcnn.config import MaskConfig
from threading import Thread
MODES = set(["depth", "both"])
MODEL_PATHS = {
"depth": "models/sd_maskrcnn.h5",
"both": "models/ydd.h5",
}
PUBLISH_RATE = 10 # Hz
NUM_OF_COLORS = 640
SEGMENT_SCORE = 0.98
class SDMaskRCNNEvaluator:
def __init__(self,
mode="both",
input_size=512,
scaling_factor=2,
config_path="sd-maskrcnn/cfg/benchmark.yaml"):
self.element = Element("instance-segmentation")
self.input_size = input_size
self.scaling_factor = scaling_factor
self.config_path = config_path
self.mode = mode
# Streaming of masks is disabled by default to prevent consumption of resources
self.stream_enabled = False
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
config.gpu_options.visible_device_list = "0"
set_session(tf.Session(config=config))
self.set_mode(b"both")
# Initiate tensorflow graph before running threads
self.get_masks()
self.element.command_add("segment", self.segment, 10000)
self.element.command_add("get_mode", self.get_mode, 100)
self.element.command_add("set_mode", self.set_mode, 10000)
self.element.command_add("stream", self.set_stream, 100)
t = Thread(target=self.element.command_loop, daemon=True)
t.start()
self.publish_segments()
def get_mode(self, _):
"""
Returns the current mode of the algorithm (both or depth).
"""
return Response(self.mode)
def set_mode(self, data):
"""
Sets the mode of the algorithm and loads the corresponding weights.
'both' means that the algorithm is considering grayscale and depth data.
'depth' means that the algorithm only considers depth data.
"""
mode = data.decode().strip().lower()
if mode not in MODES:
return Response(f"Invalid mode {mode}")
self.mode = mode
config = YamlConfig(self.config_path)
inference_config = MaskConfig(config['model']['settings'])
inference_config.GPU_COUNT = 1
inference_config.IMAGES_PER_GPU = 1
model_path = MODEL_PATHS[self.mode]
model_dir, _ = os.path.split(model_path)
self.model = modellib.MaskRCNN(
mode=config['model']['mode'], config=inference_config, model_dir=model_dir)
self.model.load_weights(model_path, by_name=True)
self.element.log(LogLevel.INFO, f"Loaded weights from {model_path}")
return Response(f"Mode switched to {self.mode}")
def set_stream(self, data):
"""
Sets streaming of segmented masks to true or false.
"""
data = data.decode().strip().lower()
if data == "true":
self.stream_enabled = True
elif data == "false":
self.stream_enabled = False
else:
return Response(f"Expected bool, got {type(data)}.")
return Response(f"Streaming set to {self.stream_enabled}")
def inpaint(self, img, missing_value=0):
"""
Fills the missing values of the depth data.
"""
# cv2 inpainting doesn't handle the border properly
# https://stackoverflow.com/questions/25974033/inpainting-depth-map-still-a-black-image-border
img = cv2.copyMakeBorder(img, 1, 1, 1, 1, cv2.BORDER_DEFAULT)
mask = (img == missing_value).astype(np.uint8)
# Scale to keep as float, but has to be in bounds -1:1 to keep opencv happy.
scale = np.abs(img).max()
img = img.astype(np.float32) / scale # Has to be float32, 64 not supported.
img = cv2.inpaint(img, mask, 1, cv2.INPAINT_NS)
# Back to original size and value range.
img = img[1:-1, 1:-1]
img = img * scale
return img
def normalize(self, img, max_dist=1000):
"""
Scales the range of the data to be in 8-bit.
Also shifts the values so that maximum is 255.
"""
img = np.clip(img / max_dist, 0, 1) * 255
img = np.clip(img + (255 - img.max()), 0, 255)
return img.astype(np.uint8)
def scale_and_square(self, img, scaling_factor, size):
"""
Scales the image by scaling_factor and creates a border around the image to match size.
Reducing the size of the image tends to improve the output of the model.
"""
img = cv2.resize(
img, (int(img.shape[1] / scaling_factor), int(img.shape[0] / scaling_factor)),
interpolation=cv2.INTER_NEAREST)
v_pad, h_pad = (size - img.shape[0]) // 2, (size - img.shape[1]) // 2
img = cv2.copyMakeBorder(img, v_pad, v_pad, h_pad, h_pad, cv2.BORDER_REPLICATE)
return img
def unscale(self, results, scaling_factor, size):
"""
Takes the results of the model and transforms them back into the original dimensions of the input image.
"""
masks = results["masks"].astype(np.uint8)
masks = cv2.resize(
masks, (int(masks.shape[1] * scaling_factor), int(masks.shape[0] * scaling_factor)),
interpolation=cv2.INTER_NEAREST)
v_pad, h_pad = (masks.shape[0] - size[0]) // 2, (masks.shape[1] - size[1]) // 2
masks = masks[v_pad:-v_pad, h_pad:-h_pad]
rois = results["rois"] * scaling_factor
for roi in rois:
roi[0] = min(max(0, roi[0] - v_pad), size[0])
roi[1] = min(max(0, roi[1] - h_pad), size[1])
roi[2] = min(max(0, roi[2] - v_pad), size[0])
roi[3] = min(max(0, roi[3] - h_pad), size[1])
return masks, rois
def publish_segments(self):
"""
Publishes visualization of segmentation masks continuously.
"""
self.colors = []
for i in range(NUM_OF_COLORS):
self.colors.append((np.random.rand(3) * 255).astype(int))
while True:
if not self.stream_enabled:
time.sleep(1 / PUBLISH_RATE)
continue
start_time = time.time()
scores, masks, rois, color_img = self.get_masks()
masked_img = np.zeros(color_img.shape).astype("uint8")
contour_img = np.zeros(color_img.shape).astype("uint8")
if masks is not None and scores.size != 0:
number_of_masks = masks.shape[-1]
# Calculate the areas of masks
mask_areas = []
for i in range(number_of_masks):
width = np.abs(rois[i][0] - rois[i][2])
height = np.abs(rois[i][1] - rois[i][3])
mask_area = width * height
mask_areas.append(mask_area)
np_mask_areas = np.array(mask_areas)
mask_indices = np.argsort(np_mask_areas)
# Add masks in the order of there areas.
for i in mask_indices:
if (scores[i] > SEGMENT_SCORE):
indices = np.where(masks[:, :, i] == 1)
masked_img[indices[0], indices[1], :] = self.colors[i]
# Smoothen masks
masked_img = cv2.medianBlur(masked_img, 15)
# find countours and draw boundaries.
gray_image = cv2.cvtColor(masked_img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray_image, 50, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
# Draw contours:
for contour in contours:
area = cv2.contourArea(contour)
cv2.drawContours(contour_img, contour, -1, (255, 255, 255), 5)
masked_img = cv2.addWeighted(color_img, 0.6, masked_img, 0.4, 0)
masked_img = cv2.bitwise_or(masked_img, contour_img)
_, color_serialized = cv2.imencode(".tif", masked_img)
self.element.entry_write(
"color_mask", {"data": color_serialized.tobytes()}, maxlen=30)
time.sleep(max(0, (1 / PUBLISH_RATE) - (time.time() - start_time)))
def get_masks(self):
"""
Gets the latest data from the realsense, preprocesses it and returns the
segmentation masks, bounding boxes, and scores for each detected object.
"""
color_data = self.element.entry_read_n("realsense", "color", 1)
depth_data = self.element.entry_read_n("realsense", "depth", 1)
try:
color_data = color_data[0]["data"]
depth_data = depth_data[0]["data"]
except IndexError or KeyError:
raise Exception("Could not get data. Is the realsense element running?")
depth_img = cv2.imdecode(np.frombuffer(depth_data, dtype=np.uint16), -1)
original_size = depth_img.shape[:2]
depth_img = self.scale_and_square(depth_img, self.scaling_factor, self.input_size)
depth_img = self.inpaint(depth_img)
depth_img = self.normalize(depth_img)
if self.mode == "both":
gray_img = cv2.imdecode(np.frombuffer(color_data, dtype=np.uint16), 0)
color_img = cv2.imdecode(np.frombuffer(color_data, dtype=np.uint16), 1)
gray_img = self.scale_and_square(gray_img, self.scaling_factor, self.input_size)
input_img = np.zeros((self.input_size, self.input_size, 3))
input_img[..., 0] = gray_img
input_img[..., 1] = depth_img
input_img[..., 2] = depth_img
else:
input_img = np.stack((depth_img, ) * 3, axis=-1)
# Get results and unscale
results = self.model.detect([input_img], verbose=0)[0]
masks, rois = self.unscale(results, self.scaling_factor, original_size)
if masks.ndim < 2 or results["scores"].size == 0:
masks = None
results["scores"] = None
elif masks.ndim == 2:
masks = np.expand_dims(masks, axis=-1)
return results["scores"], masks, rois, color_img
def segment(self, _):
"""
Command for getting the latest segmentation masks and returning the results.
"""
scores, masks, rois, color_img = self.get_masks()
# Encoded masks in TIF format and package everything in dictionary
encoded_masks = []
if masks is not None and scores is not None:
for i in range(masks.shape[-1]):
_, encoded_mask = cv2.imencode(".tif", masks[..., i])
encoded_masks.append(encoded_mask.tobytes())
response_data = {
"rois": rois.tolist(),
"scores": scores.tolist(),
"masks": encoded_masks
}
else:
response_data = {
"rois": [],
"scores": [],
"masks": []
}
return Response(response_data, serialize=True)
if __name__ == "__main__":
evaluator = SDMaskRCNNEvaluator()
|
from scraper import *
# Currently just a script to run a few demo cases for anity checking purposes
# TODO: programatically compare results
# Ordinary case
print(get_card_price("tarmogoyf"))
# 'Splinter' is a card with a name that is a substring of another
# this should return the correct card
print(get_card_price("splinter"))
# testing that you can search with random unicode characters
print(get_card_price("jace, vryn's prodigy"))
# testing search-by-set
print(get_card_price('Wasteland', 'Tempest'))
|
# vim: tabstop=2 shiftwidth=2 softtabstop=2 expandtab autoindent smarttab
from manipulator import *
import random
# sv_swarm
def testInteger():
name = 'int1'
pmin = -10
pmax = 10
# vals = [random.randint(1, 100) for i in range(pmin, pmax+1)]
p = IntegerParameter(name, pmin, pmax)
v = -10
pos = {name:0}
gb = {name:4}
lb = {name:9}
p.sv_swarm(pos, gb, lb, 0.5, 0.3, 0.3, v)
print pos
def testBoolean():
name = 'bool1'
p = BooleanParameter(name)
v = 0
pos = {name:0}
gb = {name:1}
lb = {name:0}
p.sv_swarm(pos, gb, lb, 0.5, 0.3, 0.3)
def testPermutation():
name = 'perm1'
p = PermutationParameter(name, range(10))
pos = {name: [7,3,1,4,2,5,8, 0,9,6]}
print pos
gb = {name: [3,1,4,2,5,7,8,6,9,0]}
lb = {name: [3,1,4,7,5,2,8, 0,9,6]}
p.sv_swarm(pos, gb, lb, 0, 0.5, 0.5, 'CX')
print pos
def testBooleanArray(func):
name = 'BA'
p = BooleanArray(name, 8)
pos = {name: numpy.array([1,0,0,1,0,0,1,1])}
gb = {name: numpy.array([0,0,0,0,1,1,1,1])}
lb = {name: numpy.array([1,0,0,1,0,1,1,0])}
print pos, gb, lb
getattr(p, func)(pos, gb, lb)
# p.sv_cross(pos, gb, lb, 0.3)
# p.sv_swarm(pos, gb, lb, 0.5, 0.3, 0.3)
# p.randomize(pos)
# print p.seed_value()
# p.sv_swarm_parallel(pos, gb, lb, 0.5, 0.3, 0.3)
print pos
def testFloatArray():
pass
testBooleanArray('sv_cross')
|
import os
def check_file_exists(full_path):
if not os.path.isfile(full_path):
message = "File NOT found \n"+full_path
raise Exception(message) |
import argparse
import ast
import json
import sys
def normalize(input):
data = []
with open(input) as f:
for line in f:
data.append(json.loads(line))
return data
def in_memory_normalize(input):
# may be OOM killed
r = normalize(args.input)
jr = ast.literal_eval(json.dumps(r, sort_keys=True, indent=2))
nr = str(jr).replace("'", "\"")
print(nr)
def streaming_normalize(input):
data = []
# get the total line number
totalLine = sum(1 for line in open(input))
if totalLine > 1:
print("[\n")
#
with open(input) as f:
curLine = 0
for line in f:
curLine = curLine + 1
l = line.rstrip()
if l.endswith(','):
a = l[0:-1]
l = a
line=l
try:
a=json.loads(line)
except Exception as e:
#print(line)
continue
jr = ast.literal_eval(json.dumps(a, sort_keys=True, indent=2))
nr = str(jr).replace("'", "\"")
print(nr)
if curLine < totalLine:
print(",")
else:
print("]")
return data
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="Specify the input Yaml file")
args = parser.parse_args()
if args.input is None:
print("Input file is not specified!")
else:
#in_memory_normalize(args.input)
streaming_normalize(args.input)
|
import numpy as np
a = intBuffer.data[1] + 0 # plus zero to make a copy or a&b is changed in the for loop
b = intBuffer.data[3] + 0
for i in range(0,intBuffer.data.shape[0]):
intBuffer.data[i] = i*i
|
# Copyright 2022 Shuhei Nitta. All rights reserved.
import os
from pathlib import Path
DATA_DIR = Path(os.environ.get("YA3_REPORT_DATA_DIR", "data"))
DATAFILE_SUFFIX = os.environ.get("YA3_REPORT_DATAFILE_SUFFIX", ".csv.gz")
|
class InvalidQueryException(BaseException):
pass
|
# Generated by Django 2.2.24 on 2021-12-02 09:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0002_customtext_homepage'),
]
operations = [
migrations.CreateModel(
name='Demo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('emailid', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='Test',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.EmailField(max_length=244)),
],
),
migrations.AddField(
model_name='homepage',
name='username',
field=models.TextField(blank=True, null=True),
),
]
|
from django.http import HttpResponse, HttpResponseRedirect # noqa: 401
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.views import generic
from collections import Counter
import uuid
from .models import Choice, Question, Product
def home(request):
products = Product.objects.all()[:3]
return render(request, "home.html",{
'products':products
})
def about(request):
return render(request, "about.html",{})
def product(request, pid):
product = Product.objects.get(id=pid)
return render(request, "product.html",{
'product':product
})
def thanks(request):
products = {}
total = 0.00
transactionId = uuid.uuid1()
if request.COOKIES.get('basket'):
ids = request.COOKIES.get('basket').split(',')
products = Product.objects.filter(id__in=ids)
quantities = []
for p in products:
p.quantity = ids.count(str(p.id))
total = round(total + float(p.price) * p.quantity,2)
return render(request, "thanks.html",{
'products':products,
'total':total+5.99,
'transactionId': transactionId
})
def contact(request):
return render(request, "contact.html",{})
def payment(request):
products = {}
total = 0.00
transactionId = uuid.uuid1()
if request.COOKIES.get('basket'):
ids = request.COOKIES.get('basket').split(',')
products = Product.objects.filter(id__in=ids)
quantities = []
for p in products:
p.quantity = ids.count(str(p.id))
total = round(total + float(p.price) * p.quantity,2)
return render(request, "payment.html",{
'products':products,
'total':total+5.99,
'transactionId': transactionId
})
def products(request):
products = Product.objects.all()
return render(request, "products.html",{
'products':products
})
def basket(request):
products = {}
total = 0.00
if request.COOKIES.get('basket'):
ids = request.COOKIES.get('basket').split(',')
products = Product.objects.filter(id__in=ids)
quantities = []
for p in products:
p.quantity = ids.count(str(p.id))
total = round(total + float(p.price) * p.quantity,2)
return render(request, "basket.html",{
'products':products,
'total':total
})
def searchResults(request):
products = Product.objects.all()
results = len(products)
return render(request, "searchResults.html",{
'products':products,
'results':results
})
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""Return the last five published questions."""
return Question.objects.order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(
reverse('polls:results', args=(question.id,))
)
|
import re
from imbi.endpoints import base
class _RequestHandlerMixin:
ID_KEY = ['id']
FIELDS = ['id', 'project_type_ids', 'name', 'fact_type', 'data_type',
'description', 'ui_options', 'weight']
DEFAULTS = {
'data_type': 'string',
'fact_type': 'free-form',
'weight': 0
}
GET_SQL = re.sub(r'\s+', ' ', """\
SELECT id, created_at, created_by, last_modified_at, last_modified_by,
project_type_ids, name, fact_type, data_type, description,
ui_options, weight
FROM v1.project_fact_types
WHERE id=%(id)s""")
class CollectionRequestHandler(_RequestHandlerMixin,
base.CollectionRequestHandler):
NAME = 'fact-types'
ITEM_NAME = 'fact-type'
COLLECTION_SQL = re.sub(r'\s+', ' ', """\
SELECT id, project_type_ids, name, fact_type, data_type, description,
ui_options, weight
FROM v1.project_fact_types
ORDER BY name, project_type_ids""")
POST_SQL = re.sub(r'\s+', ' ', """\
INSERT INTO v1.project_fact_types
(project_type_ids, created_by, name, fact_type, data_type,
description, ui_options, weight)
VALUES (%(project_type_ids)s, %(username)s, %(name)s,
%(fact_type)s, %(data_type)s, %(description)s,
%(ui_options)s, %(weight)s)
RETURNING id""")
class RecordRequestHandler(_RequestHandlerMixin, base.AdminCRUDRequestHandler):
NAME = 'fact-type'
DELETE_SQL = 'DELETE FROM v1.project_fact_types WHERE id=%(id)s'
PATCH_SQL = re.sub(r'\s+', ' ', """\
UPDATE v1.project_fact_types
SET last_modified_at=CURRENT_TIMESTAMP,
last_modified_by=%(username)s,
project_type_ids=%(project_type_ids)s,
name=%(name)s,
fact_type=%(fact_type)s,
data_type=%(data_type)s,
description=%(description)s,
ui_options=%(ui_options)s,
weight=%(weight)s
WHERE id=%(id)s""")
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains and evaluates unprocessing neural network.
Unprocessing Images for Learned Raw Denoising
http://timothybrooks.com/tech/unprocessing
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow.compat.v1 as tf
from unprocessing import dataset
from unprocessing import estimator
from unprocessing import network
from tensorflow.contrib import training as contrib_training
FLAGS = flags.FLAGS
flags.DEFINE_string(
'model_dir',
None,
'Location at which to save model logs and checkpoints.')
flags.DEFINE_string(
'train_pattern',
None,
'Pattern for directory containing source JPG images for training.')
flags.DEFINE_string(
'test_pattern',
None,
'Pattern for directory containing source JPG images for testing.')
flags.DEFINE_integer(
'image_size',
256,
'Width and height to crop training and testing frames. '
'Must be a multiple of 16',
lower_bound=16)
flags.DEFINE_integer(
'batch_size',
16,
'Training batch size.',
lower_bound=1)
flags.DEFINE_float(
'learning_rate',
2e-5,
'Learning rate for Adam optimization.',
lower_bound=0.0)
flags.register_validator(
'image_size',
lambda image_size: image_size % 16 == 0,
message='\'image_size\' must multiple of 16.')
flags.mark_flag_as_required('model_dir')
flags.mark_flag_as_required('train_pattern')
flags.mark_flag_as_required('test_pattern')
def main(_):
inference_fn = network.inference
hparams = contrib_training.HParams(learning_rate=FLAGS.learning_rate)
model_fn = estimator.create_model_fn(inference_fn, hparams)
config = tf.estimator.RunConfig(FLAGS.model_dir)
tf_estimator = tf.estimator.Estimator(model_fn=model_fn, config=config)
train_dataset_fn = dataset.create_dataset_fn(
FLAGS.train_pattern,
height=FLAGS.image_size,
width=FLAGS.image_size,
batch_size=FLAGS.batch_size)
eval_dataset_fn = dataset.create_dataset_fn(
FLAGS.test_pattern,
height=FLAGS.image_size,
width=FLAGS.image_size,
batch_size=FLAGS.batch_size)
train_spec, eval_spec = estimator.create_train_and_eval_specs(
train_dataset_fn, eval_dataset_fn)
tf.logging.set_verbosity(tf.logging.INFO)
tf.estimator.train_and_evaluate(tf_estimator, train_spec, eval_spec)
if __name__ == '__main__':
tf.app.run(main)
|
"""
This module provides utilities to write cross-version python code.
"""
import sys
py_version = sys.version_info
if py_version.major == 3:
BASE_STRING_TYPE = str
else:
BASE_STRING_TYPE = basestring
if py_version.major == 3 and py_version.minor >= 5:
from http import HTTPStatus as HTTPStatus
elif py_version.major == 3 and py_version.minor < 5:
import http.client as HTTPStatus
else:
import httplib as HTTPStatus
def is_string(value):
"""
Returns whether the specified value is a version independent string.
:param value:
Value we want to evalute
:return:
Boolean indicating if the value is a string. In Python 2.7 is an
object derived from ``basestring`` and in Python 3.x is an instance
of ``str``.
"""
return isinstance(value, BASE_STRING_TYPE)
|
"""
Contains data utils.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.SaltRemover import SaltRemover
from rdkit.Chem.FilterCatalog import *
import numpy as np
import pandas as pd
"""
Computes tanimoto dissimilarity array between two feature matrices.
Compares each row of X with each row of Y.
"""
def tanimoto_dissimilarity(X, Y, X_batch_size=50, Y_batch_size=50):
n_features = X.shape[-1]
if X.ndim == 1:
X = X.reshape(-1, n_features)
if Y.ndim == 1:
Y = Y.reshape(-1, n_features)
tan_sim = []
X_total_batches = X.shape[0] // X_batch_size + 1
Y_total_batches = Y.shape[0] // Y_batch_size + 1
for X_batch_i in range(X_total_batches):
X_start_idx = X_batch_i*X_batch_size
X_end_idx = min((X_batch_i+1)*X_batch_size, X.shape[0])
X_batch = X[X_start_idx:X_end_idx,:]
for Y_batch_i in range(Y_total_batches):
Y_start_idx = Y_batch_i*Y_batch_size
Y_end_idx = min((Y_batch_i+1)*Y_batch_size, Y.shape[0])
Y_batch = Y[Y_start_idx:Y_end_idx,:]
# adapted from: https://github.com/deepchem/deepchem/blob/2531eca8564c1dc68910d791b0bcd91fd586afb9/deepchem/trans/transformers.py#L752
numerator = np.dot(X_batch, Y_batch.T).flatten() # equivalent to np.bitwise_and(X_batch, Y_batch), axis=1)
denominator = n_features - np.dot(1-X_batch, (1-Y_batch).T).flatten() # np.sum(np.bitwise_or(X_rep, Y_rep), axis=1)
tan_sim.append(numerator / denominator)
tan_sim = np.hstack(tan_sim)
return 1.0 - tan_sim
"""
Computes tanimoto dissimilarity between two vectors.
"""
def feature_dist_func_dict():
return {"tanimoto_dissimilarity": tanimoto_dissimilarity}
"""
Returns indices of duplicated smiles from x_smiles in y_smiles.
"""
def get_duplicate_smiles_in1d(x_smiles, y_smiles, smiles_are_canonical=True):
x_canon_smiles = x_smiles
y_canon_smiles = y_smiles
if not smiles_are_canonical:
x_canon_smiles = np.array([Chem.MolToSmiles(Chem.MolFromSmiles(x)) for x in x_smiles])
y_canon_smiles = np.array([Chem.MolToSmiles(Chem.MolFromSmiles(y)) for y in y_smiles])
y_duplicates = np.in1d(y_canon_smiles, x_canon_smiles)
idx_to_drop = list(np.arange(len(y_canon_smiles))[y_duplicates])
return idx_to_drop
"""
Computes avg cluster dissimilarity/distance of candidate clusters towards selected clusters.
clusters_ordered_ids is the ordering of clusters_avg_dissimilarity.
Assumes feature distance function returns array with distances between rows of X and Y.
"""
def get_avg_cluster_dissimilarity(clusters,
features,
selected_cluster_ids,
candidate_cluster_ids,
feature_dist_func=tanimoto_dissimilarity,
candidate_cluster_batch_size=2056):
clusters_ordered_ids = candidate_cluster_ids #[:] no need to make a copy
#clusters_avg_dissimilarity = np.zeros(shape=(len(candidate_cluster_ids),))
selected_cid_instances = np.in1d(clusters, selected_cluster_ids)
cluster_dist_means_list = []
total_batches = candidate_cluster_ids.shape[0] // candidate_cluster_batch_size + 1
for batch_i in range(total_batches):
start_idx = batch_i*candidate_cluster_batch_size
end_idx = min((batch_i+1)*candidate_cluster_batch_size, candidate_cluster_ids.shape[0])
candidate_batch = candidate_cluster_ids[start_idx:end_idx]
candidate_cid_instances = np.in1d(clusters, candidate_batch)
candidate_cluster_rep = np.repeat(clusters[candidate_cid_instances], len(clusters[selected_cid_instances]))
candidate_cluster_dist = feature_dist_func(features[selected_cid_instances,:], features[candidate_cid_instances,:])
dist_df = pd.DataFrame(data=np.hstack([candidate_cluster_dist.reshape(-1,1),
candidate_cluster_rep.reshape(-1,1)]),
columns=['dist', 'candidate_group'])
cluster_dist_means_list.append(dist_df.groupby('candidate_group').mean().loc[candidate_batch].values.flatten())
clusters_avg_dissimilarity = np.hstack(cluster_dist_means_list)
return clusters_ordered_ids, clusters_avg_dissimilarity
"""
----
curr_clusters_dissimilarity = np.zeros(shape=(len(candidate_cluster_ids),))
for selected_cid in selected_cluster_ids:
selected_cid_instances = np.where(clusters == selected_cid)[0]
candidate_cid_instances = np.in1d(clusters, candidate_cluster_ids)
candidate_cluster_rep = np.repeat(clusters[candidate_cid_instances], len(selected_cid_instances))
candidate_cluster_dist = feature_dist_func(features[selected_cid_instances,:], features[candidate_cid_instances,:])
dist_df = pd.DataFrame(data=np.hstack([candidate_cluster_dist.reshape(-1,1),
candidate_cluster_rep.reshape(-1,1)]),
columns=['dist', 'group'])
cluster_dist_means = dist_df.groupby('group').mean().values.flatten()
sorted_idx = np.argsort(candidate_cluster_ids)
rev_sorted_idx = np.zeros(len(candidate_cluster_ids), dtype=int)
rev_sorted_idx[sorted_idx] = np.arange(len(candidate_cluster_ids)) # adapted from: https://stackoverflow.com/a/10831155
curr_clusters_dissimilarity[:] = cluster_dist_means[rev_sorted_idx]
clusters_avg_dissimilarity += curr_clusters_dissimilarity
clusters_avg_dissimilarity /= len(selected_cluster_ids)
"""
"""
Computes avg cluster dissimilarity/distance of candidate clusters towards selected clusters.
Uses a disk-stored np.memmap matrix storing the instance dissimilarities.
"""
def get_avg_cluster_dissimilarity_from_file(clusters,
memmap_filename,
n_instances,
selected_cluster_ids,
candidate_cluster_ids,
candidate_cluster_batch_size=2056,
batched_clusters_method=True):
dissimilarity_matrix = np.memmap(memmap_filename, shape=(n_instances, n_instances),
dtype='float16', mode='r')
clusters_ordered_ids = candidate_cluster_ids[:]
clusters_avg_dissimilarity = np.zeros(shape=(len(candidate_cluster_ids),))
cluster_dist_means_list = []
selected_cid_instances = np.in1d(clusters, selected_cluster_ids)
if batched_clusters_method:
total_batches = candidate_cluster_ids.shape[0] // candidate_cluster_batch_size + 1
for batch_i in range(total_batches):
start_idx = batch_i*candidate_cluster_batch_size
end_idx = min((batch_i+1)*candidate_cluster_batch_size, candidate_cluster_ids.shape[0])
candidate_batch = candidate_cluster_ids[start_idx:end_idx]
candidate_cid_instances = np.in1d(clusters, candidate_batch)
candidate_cluster_rep = np.repeat(clusters[candidate_cid_instances], len(clusters[selected_cid_instances]))
dm_slice = dissimilarity_matrix[candidate_cid_instances, :][:,selected_cid_instances]
dm_slice = dm_slice.flatten().reshape(-1,1)
dist_df = pd.DataFrame(data=np.hstack([dm_slice,
candidate_cluster_rep.reshape(-1,1)]),
columns=['dist', 'candidate_group'])
cluster_dist_means_list.append(dist_df.groupby('candidate_group').mean().loc[candidate_batch].values.flatten())
else:
for ccid in clusters_ordered_ids:
ccid_instances_idx = np.where(clusters == ccid)[0]
dm_slice = dissimilarity_matrix[ccid_instances_idx, :][:,selected_cid_instances]
cluster_dist_means_list.append(np.mean(dm_slice))
clusters_avg_dissimilarity = np.hstack(cluster_dist_means_list)
del dissimilarity_matrix
return clusters_ordered_ids, clusters_avg_dissimilarity
"""
Computes dissimilarity matrix for a given row of features.
"""
def get_dissimilarity_matrix(features,
feature_dist_func=tanimoto_dissimilarity):
row_count = features.shape[0]
dissimilarity_matrix = np.zeros(shape=(row_count, row_count))
for i in range(row_count):
for j in range(row_count):
dissimilarity_matrix[i,j] = feature_dist_func(features[i:i+1,:], features[j:j+1,:])
return dissimilarity_matrix
"""
Returns dissimilarity matrix slice from disk-stored np.memmap matrix. .
"""
def get_dissimilarity_matrix_from_file(instances_idx,
memmap_filename,
n_instances):
dissimilarity_matrix = np.memmap(memmap_filename, shape=(n_instances, n_instances),
dtype='float16', mode='r')
dm_slice = dissimilarity_matrix[instances_idx, :][:,instances_idx]
del dissimilarity_matrix
return dm_slice |
st = input('Input number')
try:
num = int(st)
result = num *100
print('%d, %d' % (num, result))
except:
print('Invalid Number')
print('end') |
#!/usr/bin/env python3
import os
from aws_cdk import core
from ds_dashboard.spoke import SpokeStack
app = core.App()
usecase_stack = SpokeStack(
app,
"ds-dashboard-spoke-stack",
)
app.synth()
|
# -*- coding: utf-8 -*-
import unittest
from ...util.byteutil import ByteUtil
# from ...util.hexutil import HexUtil
class ByteUtilTest(unittest.TestCase):
def test_split(self):
# okm = HexUtil.decodeHex('02a9aa6c7dbd64f9d3aa92f92a277bf54609dadf0b00'
# '828acfc61e3c724b84a7bfbe5efb603030526742e3ee'
# '89c7024e884e440f1ff376bb2317b2d64deb7c8322f4'
# 'c5015d9d895849411ba1d793a827')
data = [i for i in range(0, 80)]
a_data = [i for i in range(0, 32)]
b_data = [i for i in range(32, 64)]
c_data = [i for i in range(64, 80)]
a, b, c = ByteUtil.split(data, 32, 32, 16)
self.assertEqual(a, a_data)
self.assertEqual(b, b_data)
self.assertEqual(c, c_data)
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#--------------------------------------------------------------------------
# It is a tool to compare the inference results of the original model and optimized model.
import sys
import argparse
import numpy as np
import os
import random
from pathlib import Path
import statistics
import onnx
import onnx.utils
import psutil
import csv
import timeit
from datetime import datetime
from onnx import ModelProto, TensorProto, numpy_helper
from OnnxModel import OnnxModel
from bert_test_data import get_bert_inputs, generate_test_data, output_test_data
from bert_perf_test import create_session, onnxruntime_inference, setup_openmp_environ
def run_model(model_path, all_inputs, use_gpu, use_openmp, disable_optimization):
# Import onnxruntime shall be after OpenMP environment variable setting.
# So we put import here to delay importing.
import onnxruntime
graph_optimization_level = None
if disable_optimization:
graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL
intra_op_num_threads = 1 if use_openmp else psutil.cpu_count(logical=False)
session = create_session(model_path, use_gpu, intra_op_num_threads, graph_optimization_level)
output_names = [output.name for output in session.get_outputs()]
results, latency_list = onnxruntime_inference(session, all_inputs, output_names)
return results, latency_list, output_names
def compare(baseline_results, treatment_results, verbose, rtol=1e-3, atol=1e-4):
# Validate the output of baseline and treatment, to make sure the results are similar.
diff_count = 0
max_rel_diff = 0
max_abs_diff = 0
for test_case_id, results in enumerate(baseline_results):
case_passed = True
for i in range(len(results)):
treatment_output = treatment_results[test_case_id][i]
rel_diff = np.amax(np.abs((treatment_output - results[i]) / results[i]))
abs_diff = np.amax(np.abs(treatment_output - results[i]))
max_rel_diff = max(max_rel_diff, rel_diff)
max_abs_diff = max(max_abs_diff, abs_diff)
if not np.allclose(results[i].tolist(), treatment_output.tolist(), rtol=rtol, atol=atol):
if case_passed:
case_passed = False
diff_count += 1
if verbose:
print("case {} output {}".format(test_case_id, i))
print("baseline={}\ntreatment={}".format(results[i].tolist(), treatment_output))
print("rel_diff={} abs_diff={}".format(rel_diff, abs_diff))
if diff_count == 0:
print("100% passed for {} random inputs given thresholds (rtol={}, atol={}).".format(len(baseline_results), rtol, atol))
else:
print("{} out of {} results not passed for thresholds (rtol={}, atol={}).".format(diff_count, len(baseline_results), rtol, atol))
print("maximum absolute difference={}".format(max_abs_diff))
print("maximum relative difference={}".format(max_rel_diff))
def run_test(baseline_model, optimized_model, output_dir, batch_size, sequence_length, use_gpu, test_cases, seed, use_openmp, verbose, rtol, atol):
# Try deduce input names from optimized model.
input_ids, segment_ids, input_mask = get_bert_inputs(optimized_model)
# Use random mask length for accuracy test. It might introduce slight inflation in latency reported in this script.
all_inputs = generate_test_data(batch_size, sequence_length, test_cases, seed, verbose, input_ids, segment_ids, input_mask, random_mask_length=True)
# OpenMP environment variables must be set before the very first "import onnxruntime"
if use_openmp:
setup_openmp_environ(omp_num_threads=psutil.cpu_count(logical=False), omp_wait_policy='ACTIVE')
else:
setup_openmp_environ(omp_num_threads=1, omp_wait_policy='ACTIVE')
baseline_results, baseline_latency, output_names = run_model(baseline_model, all_inputs, use_gpu, use_openmp, disable_optimization=True)
if verbose:
print("baseline average latency (all optimizations disabled): {} ms".format(statistics.mean(baseline_latency) * 1000))
if output_dir is not None:
for i, inputs in enumerate(all_inputs):
output_test_data(output_dir, i, inputs)
treatment_results, treatment_latency, treatment_output_names = run_model(optimized_model, all_inputs, use_gpu, use_openmp, disable_optimization=False)
if verbose:
print("treatment average latency: {} ms".format(statistics.mean(treatment_latency) * 1000))
# Validate the output of baseline and treatment, to make sure the results are similar.
compare(baseline_results, treatment_results, verbose, rtol, atol)
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--baseline_model', required=True, type=str,
help="baseline onnx model path.")
parser.add_argument('--optimized_model', required=True, type=str, default=None,
help="path of the optimized model. It shall have same inputs as the baseline model.")
parser.add_argument('--output_dir', required=False, type=str, default=None,
help="output test data path. If not specified, test data will not be saved.")
parser.add_argument('--batch_size', required=True, type=int,
help="batch size of input")
parser.add_argument('--sequence_length', required=True, type=int,
help="maximum sequence length of input")
parser.add_argument('--rtol', required=False, type=float, default=1e-3,
help="relative tolerance")
parser.add_argument('--atol', required=False, type=float, default=1e-4,
help="absolute tolerance")
parser.add_argument('--samples', required=False, type=int, default=100,
help="number of test cases to be generated")
parser.add_argument('--seed', required=False, type=int, default=3,
help="random seed")
parser.add_argument('--use_gpu', required=False, action='store_true', help="use GPU")
parser.set_defaults(use_gpu=False)
parser.add_argument('--openmp', required=False, action='store_true', help="use openmp")
parser.set_defaults(openmp=False)
parser.add_argument('--verbose', required=False, action='store_true', help="print verbose information")
parser.set_defaults(verbose=False)
args = parser.parse_args()
return args
def main():
args = parse_arguments()
if args.output_dir is not None:
# create the output directory if not existed
path = Path(args.output_dir)
path.mkdir(parents=True, exist_ok=True)
run_test(
args.baseline_model,
args.optimized_model,
args.output_dir,
args.batch_size,
args.sequence_length,
args.use_gpu,
args.samples,
args.seed,
args.openmp,
args.verbose,
args.rtol,
args.atol)
if __name__ == "__main__":
main()
|
import ccobra
class Matching(ccobra.CCobraModel):
"""
This model returns True if the mood of the prediction matches the mood of the conclusion
for the syllogisms AE1, AE2, EA1, EA2
"""
def __init__(self, name='Matching'):
super(Matching, self).__init__(
name, ['syllogistic'], ['verify'])
def predict(self, item, **kwargs):
enc_conclusion = ccobra.syllogistic.encode_response(item.choices[0], item.task)
if enc_conclusion in ["Eac", "Eca"]:
return True
return False
|
import itertools
from termtable.utils import RamCacheWrapper
_COLOUR_LISTS = {
'basic': [
('00', '000000'),
('01', '800000'),
('02', '008000'),
('03', '808000'),
('04', '000080'),
('05', '800080'),
('06', '008080'),
('07', 'c0c0c0'),
('08', '808080'),
('09', 'ff0000'),
('10', '00ff00'),
('11', 'ffff00'),
('12', '0000ff'),
('13', 'ff00ff'),
('14', '00ffff'),
('15', 'ffffff'),
],
'extended': [
('16', '000000'),
('17', '00005f'),
('18', '000087'),
('19', '0000af'),
('20', '0000d7'),
('21', '0000ff'),
('22', '005f00'),
('23', '005f5f'),
('24', '005f87'),
('25', '005faf'),
('26', '005fd7'),
('27', '005fff'),
('28', '008700'),
('29', '00875f'),
('30', '008787'),
('31', '0087af'),
('32', '0087d7'),
('33', '0087ff'),
('34', '00af00'),
('35', '00af5f'),
('36', '00af87'),
('37', '00afaf'),
('38', '00afd7'),
('39', '00afff'),
('40', '00d700'),
('41', '00d75f'),
('42', '00d787'),
('43', '00d7af'),
('44', '00d7d7'),
('45', '00d7ff'),
('46', '00ff00'),
('47', '00ff5f'),
('48', '00ff87'),
('49', '00ffaf'),
('50', '00ffd7'),
('51', '00ffff'),
('52', '5f0000'),
('53', '5f005f'),
('54', '5f0087'),
('55', '5f00af'),
('56', '5f00d7'),
('57', '5f00ff'),
('58', '5f5f00'),
('59', '5f5f5f'),
('60', '5f5f87'),
('61', '5f5faf'),
('62', '5f5fd7'),
('63', '5f5fff'),
('64', '5f8700'),
('65', '5f875f'),
('66', '5f8787'),
('67', '5f87af'),
('68', '5f87d7'),
('69', '5f87ff'),
('70', '5faf00'),
('71', '5faf5f'),
('72', '5faf87'),
('73', '5fafaf'),
('74', '5fafd7'),
('75', '5fafff'),
('76', '5fd700'),
('77', '5fd75f'),
('78', '5fd787'),
('79', '5fd7af'),
('80', '5fd7d7'),
('81', '5fd7ff'),
('82', '5fff00'),
('83', '5fff5f'),
('84', '5fff87'),
('85', '5fffaf'),
('86', '5fffd7'),
('87', '5fffff'),
('88', '870000'),
('89', '87005f'),
('90', '870087'),
('91', '8700af'),
('92', '8700d7'),
('93', '8700ff'),
('94', '875f00'),
('95', '875f5f'),
('96', '875f87'),
('97', '875faf'),
('98', '875fd7'),
('99', '875fff'),
('100', '878700'),
('101', '87875f'),
('102', '878787'),
('103', '8787af'),
('104', '8787d7'),
('105', '8787ff'),
('106', '87af00'),
('107', '87af5f'),
('108', '87af87'),
('109', '87afaf'),
('110', '87afd7'),
('111', '87afff'),
('112', '87d700'),
('113', '87d75f'),
('114', '87d787'),
('115', '87d7af'),
('116', '87d7d7'),
('117', '87d7ff'),
('118', '87ff00'),
('119', '87ff5f'),
('120', '87ff87'),
('121', '87ffaf'),
('122', '87ffd7'),
('123', '87ffff'),
('124', 'af0000'),
('125', 'af005f'),
('126', 'af0087'),
('127', 'af00af'),
('128', 'af00d7'),
('129', 'af00ff'),
('130', 'af5f00'),
('131', 'af5f5f'),
('132', 'af5f87'),
('133', 'af5faf'),
('134', 'af5fd7'),
('135', 'af5fff'),
('136', 'af8700'),
('137', 'af875f'),
('138', 'af8787'),
('139', 'af87af'),
('140', 'af87d7'),
('141', 'af87ff'),
('142', 'afaf00'),
('143', 'afaf5f'),
('144', 'afaf87'),
('145', 'afafaf'),
('146', 'afafd7'),
('147', 'afafff'),
('148', 'afd700'),
('149', 'afd75f'),
('150', 'afd787'),
('151', 'afd7af'),
('152', 'afd7d7'),
('153', 'afd7ff'),
('154', 'afff00'),
('155', 'afff5f'),
('156', 'afff87'),
('157', 'afffaf'),
('158', 'afffd7'),
('159', 'afffff'),
('160', 'd70000'),
('161', 'd7005f'),
('162', 'd70087'),
('163', 'd700af'),
('164', 'd700d7'),
('165', 'd700ff'),
('166', 'd75f00'),
('167', 'd75f5f'),
('168', 'd75f87'),
('169', 'd75faf'),
('170', 'd75fd7'),
('171', 'd75fff'),
('172', 'd78700'),
('173', 'd7875f'),
('174', 'd78787'),
('175', 'd787af'),
('176', 'd787d7'),
('177', 'd787ff'),
('178', 'd7af00'),
('179', 'd7af5f'),
('180', 'd7af87'),
('181', 'd7afaf'),
('182', 'd7afd7'),
('183', 'd7afff'),
('184', 'd7d700'),
('185', 'd7d75f'),
('186', 'd7d787'),
('187', 'd7d7af'),
('188', 'd7d7d7'),
('189', 'd7d7ff'),
('190', 'd7ff00'),
('191', 'd7ff5f'),
('192', 'd7ff87'),
('193', 'd7ffaf'),
('194', 'd7ffd7'),
('195', 'd7ffff'),
('196', 'ff0000'),
('197', 'ff005f'),
('198', 'ff0087'),
('199', 'ff00af'),
('200', 'ff00d7'),
('201', 'ff00ff'),
('202', 'ff5f00'),
('203', 'ff5f5f'),
('204', 'ff5f87'),
('205', 'ff5faf'),
('206', 'ff5fd7'),
('207', 'ff5fff'),
('208', 'ff8700'),
('209', 'ff875f'),
('210', 'ff8787'),
('211', 'ff87af'),
('212', 'ff87d7'),
('213', 'ff87ff'),
('214', 'ffaf00'),
('215', 'ffaf5f'),
('216', 'ffaf87'),
('217', 'ffafaf'),
('218', 'ffafd7'),
('219', 'ffafff'),
('220', 'ffd700'),
('221', 'ffd75f'),
('222', 'ffd787'),
('223', 'ffd7af'),
('224', 'ffd7d7'),
('225', 'ffd7ff'),
('226', 'ffff00'),
('227', 'ffff5f'),
('228', 'ffff87'),
('229', 'ffffaf'),
('230', 'ffffd7'),
('231', 'ffffff'),
],
'greyscale': [
('232', '080808'),
('233', '121212'),
('234', '1c1c1c'),
('235', '262626'),
('236', '303030'),
('237', '3a3a3a'),
('238', '444444'),
('239', '4e4e4e'),
('240', '585858'),
('241', '626262'),
('242', '6c6c6c'),
('243', '767676'),
('244', '808080'),
('245', '8a8a8a'),
('246', '949494'),
('247', '9e9e9e'),
('248', 'a8a8a8'),
('249', 'b2b2b2'),
('250', 'bcbcbc'),
('251', 'c6c6c6'),
('252', 'd0d0d0'),
('253', 'dadada'),
('254', 'e4e4e4'),
('255', 'eeeeee'),
]
}
def _build_map(colour_tuples):
bins = {}
l = None
for tup in colour_tuples:
if l is None:
l = len(tup[1])
component = tup[1][:2]
if component not in bins:
bins[component] = []
bins[component].append((tup[0], tup[1][2:]))
if l == 2:
for c in [key for key in bins]:
bins[c] = int(bins[c][0][0])
else:
for c in [key for key in bins]:
bins[c] = _build_map(bins[c])
return bins
def mult_colspace_to_hex(value):
return str(hex(int(value * 255)))[2:].rjust(2, '0')
def hex_colspace_to_mult(hex_str):
value = int(hex_str, 16)
return float(value)/255.0
def hex_col_to_mult(hex_col, default_alpha=1):
hex_components = [hex_col[1:3], hex_col[3:5], hex_col[5:7]]
if len(hex_col) == 9:
hex_components.append(hex_col[7:9])
else:
hex_components.append(mult_colspace_to_hex(default_alpha))
return [hex_colspace_to_mult(s) for s in hex_components]
def mult_col_to_hex(mult_col):
return '#{}'.format(''.join(mult_colspace_to_hex(comp) for comp in mult_col))
@RamCacheWrapper
def blend_colours(fg_hex_col, bg_hex_col, default_fg_col='#ffffff', default_bg_col='#000000', default_fg_alpha=0.9, default_bg_alpha=1.0):
if not fg_hex_col or fg_hex_col < 0:
fg_hex_col = default_fg_col
if not bg_hex_col or bg_hex_col < 0:
bg_hex_col = default_bg_col
fg_col = hex_col_to_mult(fg_hex_col, default_fg_alpha)
bg_col = hex_col_to_mult(bg_hex_col, default_bg_alpha)
res_alpha = 1.0 - (1.0 - fg_col[3]) * (1.0 - bg_col[3])
res_col = [(fg_col[i] * fg_col[3] + bg_col[i] * bg_col[3] * (1 - fg_col[3])) / res_alpha for i in xrange(3)]
res_col.append(res_alpha)
return mult_col_to_hex(res_col)
def colour_distance(hex_col_1, hex_col_2):
m1 = hex_col_to_mult(hex_col_1)[:3]
m2 = hex_col_to_mult(hex_col_2)[:3]
return sum((comp1 - comp2) ** 2 for comp1, comp2 in zip(m1, m2))
_BASIC_MAP = _build_map(_COLOUR_LISTS['basic'])
_EXTENDED_MAP = _build_map(_COLOUR_LISTS['extended'])
_GREYSCALE_MAP = _build_map(_COLOUR_LISTS['greyscale'])
_TERM_COL_MAP = dict(itertools.chain(*[_COLOUR_LISTS[key] for key in _COLOUR_LISTS]))
@RamCacheWrapper
def get_closest_term_colour(hex_col, cmap=None, include_details=False):
if hex_col.startswith('#'):
hex_col = hex_col[1:]
if len(hex_col) > 6:
hex_col = hex_col[:6]
if cmap is None:
if hex_col[0:2] == hex_col[2:4] and hex_col[2:4] == hex_col[4:6]:
return get_closest_term_colour(hex_col, cmap=_GREYSCALE_MAP, include_details=include_details)
return get_closest_term_colour(hex_col, cmap=_EXTENDED_MAP, include_details=include_details)
component = hex_col[:2]
c_val = int(component, 16)
closest = min((max(c_val - int(c, 16), int(c, 16) - c_val), c) for c in cmap)[1]
if isinstance(cmap[closest], int):
if include_details:
return cmap[closest], closest
return cmap[closest]
if include_details:
term_col, hex_part = get_closest_term_colour(hex_col[2:], cmap[closest], include_details=True)
if len(hex_col) == 6:
return term_col, '#' + closest + hex_part
return term_col, closest + hex_part
return get_closest_term_colour(hex_col[2:], cmap[closest])
def term_col_to_hex(term_col):
return '#' + _TERM_COL_MAP[str(term_col)]
|
"""
This script determines whether a 450 Hz camera can observe multiple frames
during the partial occulation of a star by the moon.
If the begin and end time of a partial occultation could be measured accurately,
it could be used to determine the radius of the star and hence the radius of
its transiting planets.
"""
import numpy as np
import matplotlib.pyplot as pl
import astropy.units as u
from astropy import log
LUNAR_PERIOD = 27.3 * u.day
LUNAR_RADIUS = 1737.4 * u.km
LUNAR_DISTANCE = 384472 * u.km
RADII_MIN, RADII_MAX = 0.1, 30
DISTANCE_MIN, DISTANCE_MAX = 1, 100
def angular_diameter(radius, distance):
"""Returns the apparent (angular) diameter of an object on the sky [arcsec],
given its intrinsic radius and distance.
Parameters
----------
radius : astropy `Quantity` object
distance : astropy `Quantity` object
Returns
-------
angular_diameter : astropy `Quantity` object
apparent diameter in arcseconds
"""
result = 2 * np.arctan2(2 * radius, 2. * distance)
return result.to(u.arcsec)
def angular_speed_of_the_moon():
"""Returns the angular speed of the moon in the sky, relative to the stars."""
return (360.*u.deg / LUNAR_PERIOD).to(u.arcsec / u.second)
if __name__ == '__main__':
log.info("The moon moves at {:.2f}".format(angular_speed_of_the_moon()))
log.info("The apparent diameter of the moon is {:.2f}".format(angular_diameter(LUNAR_RADIUS, LUNAR_DISTANCE)))
radii = np.linspace(RADII_MIN, RADII_MAX, 100) * u.solRad
distances = np.linspace(DISTANCE_MIN, DISTANCE_MAX, 100) * u.pc
grid = np.meshgrid(radii, distances)
durations = angular_diameter(grid[0], grid[1]) / angular_speed_of_the_moon()
hz_for_10_samples = 450 * durations.value
pl.figure()
pl.imshow(hz_for_10_samples,
extent=(RADII_MIN, RADII_MAX, DISTANCE_MIN, DISTANCE_MAX),
origin="lower",
aspect="auto",
vmin=0, vmax=10,
interpolation="nearest",
label="duration [s]",
cmap="ocean_r")
cbar = pl.colorbar()
cbar.ax.set_ylabel("# Frames", fontsize=20)
pl.xlabel("Stellar radius [sol rad]", fontsize=20)
pl.ylabel("Distance to the star [pc]", fontsize=20)
pl.legend()
pl.title("# Frames obtained by a 450 Hz camera during\n"
"the partial occulation of a star by the moon",
fontsize=20)
pl.tight_layout()
pl.savefig("the-answer.pdf")
pl.close()
|
import re
import unittest
from stix_shifter.stix_translation import stix_translation
from stix_shifter_utils.utils.error_response import ErrorCode
translation = stix_translation.StixTranslation()
def _remove_timestamp_from_query_ver1(queries):
pattern = r'\s*AND\s*EventTime\s*BETWEEN\s*\\"\d{4}-\d{2}-\d{2}T\d{2}:' \
r'\d{2}:\d{2}\.\d{3}Z\\"\s*AND\s*\\"\d{4}-\d{2}-\d{2}T\d{2}:' \
r'\d{2}:\d{2}\.\d{3}Z\\"",\s*"fromDate":\s*"\d{4}-\d{2}-\d{2}T\d{2}:' \
r'\d{2}:\d{2}\.\d{3}Z",\s*"toDate":\s*"\d{4}-\d{2}-\d{2}T\d{2}:' \
r'\d{2}:\d{2}\.\d{3}Z",\s*"limit":\s*10000'
if isinstance(queries, list):
return [re.sub(pattern, '', str(query)) for query in queries]
elif isinstance(queries, str):
return re.sub(pattern, '', queries)
def _remove_timestamp_from_query_ver2(queries):
pattern = r'\s*AND\s*EventTime\s*BETWEEN\s*\\"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:' \
r'\d{2}\.\d{3}Z\\"\s*AND\s*\\"\d{4}-\d{2}-\d{2}T\d{2}:' \
r'\d{2}:\d{2}\.\d{3}Z\\"\)",\s*"fromDate":\s*"\d{4}-\d{2}-\d{2}T\d{2}:' \
r'\d{2}:\d{2}\.\d{3}Z",\s*"toDate":\s*"\d{4}-\d{2}-\d{2}T\d{2}:' \
r'\d{2}:\d{2}\.\d{3}Z",\s*"limit":\s*10000'
if isinstance(queries, list):
return [re.sub(pattern, '', str(query)) for query in queries]
elif isinstance(queries, str):
return re.sub(pattern, '', queries)
class TestQueryTranslator(unittest.TestCase):
"""
class to perform unit test case for sentinelone translate query
"""
if __name__ == "__main__":
unittest.main()
def _test_query_assertions(self, query, queries):
"""
to assert the each query in the list against expected result
"""
self.assertIsInstance(queries, list)
self.assertIsInstance(query, dict)
self.assertIsInstance(query['queries'], list)
for index, each_query in enumerate(query.get('queries'), start=0):
self.assertEqual(each_query, queries[index])
def test_ipv4_query(self):
""" test to check ipv4 stix pattern to native data source query """
stix_pattern = "[ipv4-addr:value = '164.132.169.172']"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "(srcIp = \\"164.132.169.172\\" '
'OR dstIp = \\"164.132.169.172\\" '
'OR srcMachineIP = \\"164.132.169.172\\") AND EventTime '
'BETWEEN \\"2022-04-15T12:33:32.255Z\\" '
'AND \\"2022-04-15T12:38:32.255Z\\"", '
'"fromDate": "2022-04-15T12:33:32.255Z", '
'"toDate": "2022-04-15T12:38:32.255Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_network_traffic_query(self):
""" test to check network traffic stix pattern to native data source query """
stix_pattern = "[network-traffic:dst_port= 3389]"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "dstPort = \\"3389\\" AND EventTime '
'BETWEEN \\"2022-02-22T09:16:24.526Z\\" AND \\"2022-02-22T09:21:24.526Z\\"", '
'"fromDate": "2022-02-22T09:16:24.526Z", '
'"toDate": "2022-02-22T09:21:24.526Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_network_query_greater_than(self):
""" test to check network traffic stix pattern to native data source query """
stix_pattern = "[network-traffic:dst_port> 3000]"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "dstPort > \\"3000\\" AND EventTime '
'BETWEEN \\"2022-02-22T09:18:09.727Z\\" AND \\"2022-02-22T09:23:09.727Z\\"", '
'"fromDate": "2022-02-22T09:18:09.727Z", '
'"toDate": "2022-02-22T09:23:09.727Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_network_query_not_equals(self):
""" test to check network traffic stix pattern to native data source query """
stix_pattern = "[network-traffic:dst_port!= 22]"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "dstPort != \\"22\\" AND EventTime '
'BETWEEN \\"2022-02-22T09:20:09.933Z\\" AND \\"2022-02-22T09:25:09.933Z\\"", '
'"fromDate": "2022-02-22T09:20:09.933Z", '
'"toDate": "2022-02-22T09:25:09.933Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_network_query_less_than(self):
""" test to check network traffic stix pattern to native data source query """
stix_pattern = "[network-traffic:dst_port< 22]"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "dstPort < \\"22\\" AND EventTime '
'BETWEEN \\"2022-02-22T09:21:54.124Z\\" '
'AND \\"2022-02-22T09:26:54.124Z\\"", '
'"fromDate": "2022-02-22T09:21:54.124Z", '
'"toDate": "2022-02-22T09:26:54.124Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_network_query_lessthan_or_equals(self):
""" test to check network traffic stix pattern to native data source query """
stix_pattern = "[network-traffic:dst_port<= 22]"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "dstPort <= \\"22\\" AND EventTime '
'BETWEEN \\"2022-02-22T09:23:42.790Z\\" '
'AND \\"2022-02-22T09:28:42.790Z\\"", '
'"fromDate": "2022-02-22T09:23:42.790Z", '
'"toDate": "2022-02-22T09:28:42.790Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_network_query_greaterthan_or_equals(self):
""" test to check network traffic stix pattern to native data source query """
stix_pattern = "[network-traffic:dst_port>= 22]"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = [
'{"query": "dstPort >= \\"22\\" AND EventTime BETWEEN \\"2022-02-20T12:16:36.638Z\\" '
'AND \\"2022-02-20T12:21:36.638Z\\"", "fromDate": "2022-02-20T12:16:36.638Z", '
'"toDate": "2022-02-20T12:21:36.638Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_network_query_in_operator(self):
""" test to check network traffic stix pattern to native data source query """
stix_pattern = "[network-traffic:dst_port IN (80,3389)]"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = [
'{"query": "dstPort IN (\\"80\\",\\"3389\\") AND EventTime '
'BETWEEN \\"2022-02-20T12:23:23.975Z\\" AND \\"2022-02-20T12:28:23.975Z\\"", '
'"fromDate": "2022-02-20T12:23:23.975Z", '
'"toDate": "2022-02-20T12:28:23.975Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_process_like_operator(self):
""" test to check process stix pattern to native data source query """
stix_pattern = "[process:name LIKE 'svchost.exe']"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "(srcProcName in contains anycase (\\"svchost.exe\\") '
'OR srcProcParentName in contains anycase (\\"svchost.exe\\") '
'OR tgtProcName in contains anycase (\\"svchost.exe\\")) '
'AND EventTime BETWEEN \\"2022-03-17T06:49:36.915Z\\" '
'AND \\"2022-03-17T06:54:36.915Z\\"", '
'"fromDate": "2022-03-17T06:49:36.915Z", '
'"toDate": "2022-03-17T06:54:36.915Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_process_matches_operator(self):
""" test to check process stix pattern to native data source query """
stix_pattern = "[process:name MATCHES 'svchost.exe']"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "(srcProcName regexp \\"svchost.exe\\" '
'OR srcProcParentName regexp \\"svchost.exe\\" '
'OR tgtProcName regexp \\"svchost.exe\\") '
'AND EventTime BETWEEN \\"2022-03-17T06:53:00.681Z\\" '
'AND \\"2022-03-17T06:58:00.681Z\\"", '
'"fromDate": "2022-03-17T06:53:00.681Z", '
'"toDate": "2022-03-17T06:58:00.681Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_process_created_query(self):
""" test to check process stix pattern to native data source query """
stix_pattern = "[process:created >= '2019-09-04T09:29:29.0882Z']"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "(srcProcStartTime >= \\"2019-09-04T09:29:29.0882Z\\" '
'OR tgtProcStartTime >= \\"2019-09-04T09:29:29.0882Z\\" '
'OR srcProcParentStartTime >= \\"2019-09-04T09:29:29.0882Z\\") '
'AND EventTime BETWEEN \\"2022-04-15T12:45:11.518Z\\" '
'AND \\"2022-04-15T12:50:11.518Z\\"", '
'"fromDate": "2022-04-15T12:45:11.518Z", '
'"toDate": "2022-04-15T12:50:11.518Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_query_from_multiple_comparison_expressions_joined_by_and(self):
""" test to check multiple comparison stix pattern to native data source query """
stix_pattern = "[ x-oca-asset:extensions.'x-sentinelone-endpoint'." \
"endpoint_os = 'windows' AND " \
"file:extensions.'x-sentinelone-file'.file_type IN ('PE')]"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "(tgtFileType IN (\\"PE\\") '
'AND endpointOs = \\"WINDOWS\\") AND EventTime '
'BETWEEN \\"2022-03-11T10:34:07.700Z\\" '
'AND \\"2022-03-11T10:39:07.700Z\\"", '
'"fromDate": "2022-03-11T10:34:07.700Z", '
'"toDate": "2022-03-11T10:39:07.700Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_file_query(self):
""" test to check file stix pattern to native data source query """
stix_pattern = "[file:name LIKE 'WindowsApplication1']"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "fileFullName in contains anycase (\\"WindowsApplication1\\") '
'AND EventTime BETWEEN \\"2022-03-17T04:25:39.478Z\\" '
'AND \\"2022-03-17T04:30:39.478Z\\"", '
'"fromDate": "2022-03-17T04:25:39.478Z", '
'"toDate": "2022-03-17T04:30:39.478Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_query_from_morethan_two_comparison_expressions_joined_by_and(self):
""" test to check more than two comparison expressions """
stix_pattern = "[user-account:account_login LIKE 'ADMINISTRATOR' " \
"AND x-oca-asset:extensions.'x-sentinelone-endpoint'." \
"endpoint_os = 'windows' " \
"AND x-sentinelone-indicator:indicator_name = 'PreloadInjection']"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "(indicatorName = \\"PreloadInjection\\" '
'AND (endpointOs = \\"WINDOWS\\" '
'AND loginsUserName in contains anycase (\\"ADMINISTRATOR\\"))) '
'AND EventTime BETWEEN \\"2022-04-15T12:48:51.738Z\\" '
'AND \\"2022-04-15T12:53:51.738Z\\"", '
'"fromDate": "2022-04-15T12:48:51.738Z", '
'"toDate": "2022-04-15T12:53:51.738Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_multiple_observation_query(self):
""" test to check multiple observation query """
stix_pattern = "([file: hashes.MD5 = '0bbd4d92d3a0178463ef6e0ad46c986a' " \
"AND file:extensions.'x-sentinelone-file'.file_extension = 'log'" \
" AND x-oca-event:action = 'File Rename'] AND " \
"[file:extensions.'x-sentinelone-file'.file_type = 'PE'] AND " \
"[ x-oca-asset:extensions.'x-sentinelone-endpoint'." \
"agent_version = '21.6.6.1200' " \
"AND network-traffic:extensions.'x-sentinelone-network-action'." \
"connection_status ='SUCCESS' ])" \
"START t'2019-10-01T00:00:00.030Z' STOP t'2021-10-07T00:00:00.030Z' "
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
queries = ['{"query": "((eventType = \\"FILE RENAME\\" AND '
'(tgtFileExtension = \\"log\\" AND '
'(tgtFileMd5 = \\"0bbd4d92d3a0178463ef6e0ad46c986a\\" OR '
'tgtFileOldMd5 = \\"0bbd4d92d3a0178463ef6e0ad46c986a\\" OR '
'srcProcImageMd5 = \\"0bbd4d92d3a0178463ef6e0ad46c986a\\" OR '
'tgtProcImageMd5 = \\"0bbd4d92d3a0178463ef6e0ad46c986a\\"))) '
'AND EventTime BETWEEN \\"2019-10-01T00:00:00.030Z\\" '
'AND \\"2021-10-07T00:00:00.030Z\\") '
'OR (tgtFileType = \\"PE\\" AND EventTime '
'BETWEEN \\"2019-10-01T00:00:00.030Z\\" AND \\"2021-10-07T00:00:00.030Z\\") '
'OR ((netConnStatus = \\"SUCCESS\\" AND agentVersion = \\"21.6.6.1200\\") '
'AND EventTime BETWEEN \\"2019-10-01T00:00:00.030Z\\" '
'AND \\"2021-10-07T00:00:00.030Z\\")", '
'"fromDate": "2019-10-01T00:00:00.030Z", '
'"toDate": "2021-10-07T00:00:00.030Z", "limit": 10000}']
self._test_query_assertions(query, queries)
def test_negate_query(self):
""" test to check negate query """
stix_pattern = "[x-oca-asset:extensions.'x-sentinelone-endpoint'." \
"endpoint_os NOT IN('windows')]"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "endpointOs NOT IN (\\"WINDOWS\\") AND EventTime '
'BETWEEN \\"2022-02-25T11:17:57.613Z\\" AND \\"2022-02-25T11:22:57.613Z\\"", '
'"fromDate": "2022-02-25T11:17:57.613Z", '
'"toDate": "2022-02-25T11:22:57.613Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_boolean_true_value_query(self):
""" test to check boolean true query """
stix_pattern = "[user-account:is_privileged = 'true']"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "loginIsAdministratorEquivalent is true AND EventTime '
'BETWEEN \\"2022-02-20T13:17:01.361Z\\" AND \\"2022-02-20T13:22:01.361Z\\"", '
'"fromDate": "2022-02-20T13:17:01.361Z", '
'"toDate": "2022-02-20T13:22:01.361Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_boolean_false_value_query(self):
""" test to check boolean false query """
stix_pattern = "[user-account:is_privileged = 'false']"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = [
'{"query": "loginIsAdministratorEquivalent is false AND EventTime '
'BETWEEN \\"2022-02-20T13:18:21.764Z\\" AND \\"2022-02-20T13:23:21.764Z\\"", '
'"fromDate": "2022-02-20T13:18:21.764Z", '
'"toDate": "2022-02-20T13:23:21.764Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_merge_similar_element_timestamp_query(self):
""" test to check similar element timestamp query """
stix_pattern = "[network-traffic:src_port = 62024 AND " \
"network-traffic:protocols[*] = 'tcp'] START " \
"t'2019-10-01T08:43:10.003Z' STOP t'2019-11-30T10:43:10.005Z' "
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
queries = ['{"query": "(netProtocolName = \\"tcp\\" AND srcPort = \\"62024\\") '
'AND EventTime BETWEEN \\"2019-10-01T08:43:10.003Z\\" '
'AND \\"2019-11-30T10:43:10.005Z\\"", '
'"fromDate": "2019-10-01T08:43:10.003Z", '
'"toDate": "2019-11-30T10:43:10.005Z", "limit": 10000}']
self._test_query_assertions(query, queries)
def test_is_reversed_parm_query(self):
""" test to check reversed parameter query """
stix_pattern = "[process:extensions.'x-sentinelone-process'." \
"publisher = 'MICROSOFT WINDOWS PUBLISHER' " \
"AND network-traffic:src_port = 62024 ] START " \
"t'2019-10-01T08:43:10.003Z' STOP t'2019-11-30T10:43:10.005Z' "
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
queries = ['{"query": "(srcPort = \\"62024\\" '
'AND (srcProcPublisher = \\"MICROSOFT WINDOWS PUBLISHER\\" '
'OR tgtProcPublisher = \\"MICROSOFT WINDOWS PUBLISHER\\")) '
'AND EventTime BETWEEN \\"2019-10-01T08:43:10.003Z\\" '
'AND \\"2019-11-30T10:43:10.005Z\\"", '
'"fromDate": "2019-10-01T08:43:10.003Z", '
'"toDate": "2019-11-30T10:43:10.005Z", "limit": 10000}']
self._test_query_assertions(query, queries)
def test_multiple_observation_with_qualifier_query(self):
""" test to check multiple observation qualifier query """
stix_pattern = "[file:size > 10 ] START t'2022-01-01T00:00:00.030Z' " \
"STOP t'2022-02-28T00:00:00.030Z' AND [ " \
"file:extensions.'x-sentinelone-file'." \
"file_description = 'Windows Push " \
"Notifications User Service_2d02eb' AND " \
"x-oca-asset:extensions.'x-sentinelone-endpoint'." \
"endpoint_os = 'windows'] "
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver2(query['queries'])
queries = ['{"query": "(tgtFileSize > \\"10\\" AND EventTime '
'BETWEEN \\"2022-01-01T00:00:00.030Z\\" '
'AND \\"2022-02-28T00:00:00.030Z\\") '
'OR ((endpointOs = \\"WINDOWS\\" AND '
'tgtFileDescription = \\"Windows Push Notifications '
'User Service_2d02eb\\") AND EventTime '
'BETWEEN \\"2022-03-03T06:11:48.907Z\\" '
'AND \\"2022-03-03T06:16:48.907Z\\")", '
'"fromDate": "2022-01-01T00:00:00.030Z", '
'"toDate": "2022-03-03T06:16:48.907Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver2(queries)
self._test_query_assertions(query, queries)
def test_not_include_filter_query(self):
"""test to check not include filter query"""
stix_pattern = "[domain-name:value!='dc-integrations.traps.paloaltonetworks.com']"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "(dnsRequest != \\"dc-integrations.traps.paloaltonetworks.com\\" '
'OR dnsResponse != \\"dc-integrations.traps.paloaltonetworks.com\\" '
'OR loginAccountDomain != \\"dc-integrations.traps.paloaltonetworks.com\\") '
'AND EventTime BETWEEN \\"2022-04-15T12:42:44.494Z\\" '
'AND \\"2022-04-15T12:47:44.494Z\\"", "fromDate": "2022-04-15T12:42:44.494Z", '
'"toDate": "2022-04-15T12:47:44.494Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_creation_with_in_operator_query(self):
"""test to check in operator query"""
stix_pattern = "[process:pid IN (443) ]"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "(srcProcPid IN (\\"443\\") OR tgtProcPid IN (\\"443\\") '
'OR srcProcParentPid IN (\\"443\\")) AND EventTime '
'BETWEEN \\"2022-02-22T16:10:35.305Z\\" AND \\"2022-02-22T16:15:35.305Z\\"", '
'"fromDate": "2022-02-22T16:10:35.305Z", '
'"toDate": "2022-02-22T16:15:35.305Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_negate_for_like_operator(self):
"""test to check negate for like query"""
stix_pattern = "[file:extensions.'x-sentinelone-file'." \
"file_description NOT LIKE 'Windows']"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "NOT tgtFileDescription in contains anycase (\\"Windows\\") '
'AND EventTime BETWEEN \\"2022-03-17T04:33:13.901Z\\" '
'AND \\"2022-03-17T04:38:13.901Z\\"", '
'"fromDate": "2022-03-17T04:33:13.901Z", '
'"toDate": "2022-03-17T04:38:13.901Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_negate_for_greater_than_or_equals_operator(self):
"""test to check negate greater than or equal query"""
stix_pattern = "[network-traffic:dst_port NOT >= 22]"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "dstPort < \\"22\\" AND EventTime '
'BETWEEN \\"2022-02-25T11:25:55.150Z\\" AND \\"2022-02-25T11:30:55.150Z\\"", '
'"fromDate": "2022-02-25T11:25:55.150Z", '
'"toDate": "2022-02-25T11:30:55.150Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_negate_for_less_than_operator(self):
"""test to check negate for lessthan query"""
stix_pattern = "[network-traffic:dst_port NOT < 22]"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "dstPort >= \\"22\\" AND EventTime '
'BETWEEN \\"2022-02-25T11:28:23.103Z\\" '
'AND \\"2022-02-25T11:33:23.103Z\\"", '
'"fromDate": "2022-02-25T11:28:23.103Z", '
'"toDate": "2022-02-25T11:33:23.103Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_unmapped_attribute_handling_with_and(self):
"""test to check unmapped attribute"""
stix_pattern = "[url:value = 'http://www.testaddress.com' AND unmapped:attribute = 'something']"
result = translation.translate('sentinelone', 'query', '{}', stix_pattern)
assert result['success'] is False
assert ErrorCode.TRANSLATION_MAPPING_ERROR.value == result['code']
assert 'Unable to map the following STIX objects and properties' in result['error']
def test_invalid_stix_pattern(self):
"""test to check invalid stix pattern"""
stix_pattern = "[not_a_valid_pattern]"
result = translation.translate('cybereason', 'query', '{}', stix_pattern, {'validate_pattern': 'true'})
assert result['success'] is False
assert ErrorCode.TRANSLATION_STIX_VALIDATION.value == result['code']
assert stix_pattern[1:-1] in result['error']
def test_for_match_operator(self):
"""test to check regex operator query"""
stix_pattern = "[process:name MATCHES '[a-zA-Z0-9_%]+[.exe]']"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "(srcProcName regexp \\"[a-zA-Z0-9_%]+[.exe]\\" '
'OR srcProcParentName regexp \\"[a-zA-Z0-9_%]+[.exe]\\" '
'OR tgtProcName regexp \\"[a-zA-Z0-9_%]+[.exe]\\") '
'AND EventTime BETWEEN \\"2022-03-17T06:45:46.514Z\\" '
'AND \\"2022-03-17T06:50:46.514Z\\"", '
'"fromDate": "2022-03-17T06:45:46.514Z", '
'"toDate": "2022-03-17T06:50:46.514Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_for_multiple_observation_with_timestamp(self):
"""test to multiple start stop qualifier observation query"""
stix_pattern = "[file: hashes.MD5 = '0bbd4d92d3a0178463ef6e0ad46c986a']START " \
"t'2022-01-05T00:00:00.030Z' STOP t'2022-02-20T00:00:00.030Z' " \
"AND [ x-oca-asset:extensions.'x-sentinelone-endpoint'." \
"agent_version = '21.6.6.1200']START " \
"t'2022-01-01T00:00:00.030Z' STOP t'2022-02-28T00:00:00.030Z'"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
queries = ['{"query": "((tgtFileMd5 = \\"0bbd4d92d3a0178463ef6e0ad46c986a\\" '
'OR tgtFileOldMd5 = \\"0bbd4d92d3a0178463ef6e0ad46c986a\\" '
'OR srcProcImageMd5 = \\"0bbd4d92d3a0178463ef6e0ad46c986a\\" '
'OR tgtProcImageMd5 = \\"0bbd4d92d3a0178463ef6e0ad46c986a\\") '
'AND EventTime BETWEEN \\"2022-01-05T00:00:00.030Z\\" '
'AND \\"2022-02-20T00:00:00.030Z\\") '
'OR (agentVersion = \\"21.6.6.1200\\" '
'AND EventTime BETWEEN \\"2022-01-01T00:00:00.030Z\\" '
'AND \\"2022-02-28T00:00:00.030Z\\")", '
'"fromDate": "2022-01-01T00:00:00.030Z", '
'"toDate": "2022-02-28T00:00:00.030Z", "limit": 10000}']
self._test_query_assertions(query, queries)
def test_invalid_boolean_value(self):
"""test to check invalid boolean pattern"""
stix_pattern = "[user-account:is_privileged = '2']"
result = translation.translate('sentinelone', 'query', '{}', stix_pattern)
assert result['success'] is False
assert ErrorCode.TRANSLATION_NOTIMPLEMENTED_MODE.value == result['code']
assert 'wrong parameter : Invalid boolean type input' in result['error']
def test_or_operator_query(self):
"""test to check or pattern to query"""
stix_pattern = "[x-oca-asset:extensions.'x-sentinelone-endpoint'." \
"endpoint_os = 'windows' " \
"OR network-traffic:extensions.'x-sentinelone-network-action'." \
"connection_status ='SUCCESS' " \
"AND network-traffic:src_port > 100 ]"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "((srcPort > \\"100\\" AND netConnStatus = \\"SUCCESS\\") '
'OR endpointOs = \\"WINDOWS\\") AND EventTime '
'BETWEEN \\"2022-03-03T06:45:13.380Z\\" '
'AND \\"2022-03-03T06:50:13.380Z\\"", '
'"fromDate": "2022-03-03T06:45:13.380Z", '
'"toDate": "2022-03-03T06:50:13.380Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_multiple_pattern_with_or_query(self):
"""test to check multiple pattern with or to query"""
stix_pattern = "[network-traffic:extensions.'x-sentinelone-network-action'." \
"connection_status ='SUCCESS' " \
"AND network-traffic:src_port > 100] START " \
"t'2019-10-01T08:43:10.003Z' STOP t'2019-11-30T10:43:10.005Z' " \
"AND [x-oca-asset:extensions.'x-sentinelone-endpoint'." \
"endpoint_os = 'windows' " \
"OR x-oca-event:action = 'File Rename']"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver2(query['queries'])
queries = ['{"query": "((srcPort > \\"100\\" AND netConnStatus = \\"SUCCESS\\") '
'AND EventTime BETWEEN \\"2019-10-01T08:43:10.003Z\\" '
'AND \\"2019-11-30T10:43:10.005Z\\") '
'OR ((eventType = \\"FILE RENAME\\" OR endpointOs = \\"WINDOWS\\") '
'AND EventTime BETWEEN \\"2022-03-11T10:24:41.029Z\\" '
'AND \\"2022-03-11T10:29:41.029Z\\")", '
'"fromDate": "2019-10-01T08:43:10.003Z", '
'"toDate": "2022-03-11T10:29:41.029Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver2(queries)
self._test_query_assertions(query, queries)
def test_multiple_combination_or_pattern_query(self):
"""test to check multiple combination or operator to query"""
stix_pattern = "[network-traffic:extensions.'x-sentinelone-network-action'." \
"connection_status ='SUCCESS' " \
"OR network-traffic:src_port > 100 " \
"AND x-oca-asset:extensions.'x-sentinelone-endpoint'." \
"endpoint_os = 'windows' " \
"OR process:extensions.'x-sentinelone-process'." \
"integrity_level = 'SYSTEM' ]"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "((srcProcIntegrityLevel = \\"SYSTEM\\" '
'OR tgtProcIntegrityLevel = \\"SYSTEM\\") '
'OR ((endpointOs = \\"WINDOWS\\" AND srcPort > \\"100\\") '
'OR netConnStatus = \\"SUCCESS\\")) '
'AND EventTime BETWEEN \\"2022-03-11T09:51:06.719Z\\" '
'AND \\"2022-03-11T09:56:06.719Z\\"", '
'"fromDate": "2022-03-11T09:51:06.719Z", '
'"toDate": "2022-03-11T09:56:06.719Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_multiple_or_operator_query(self):
"""test to check multiple or operator to query"""
stix_pattern = "[network-traffic:extensions.'x-sentinelone-network-action'." \
"connection_status ='SUCCESS' " \
"OR network-traffic:src_port > 100 " \
"OR x-oca-asset:extensions.'x-sentinelone-endpoint'." \
"endpoint_os = 'windows']"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "(endpointOs = \\"WINDOWS\\" OR (srcPort > \\"100\\" '
'OR netConnStatus = \\"SUCCESS\\")) AND EventTime '
'BETWEEN \\"2022-03-03T10:09:10.017Z\\" '
'AND \\"2022-03-03T10:14:10.017Z\\"", '
'"fromDate": "2022-03-03T10:09:10.017Z", '
'"toDate": "2022-03-03T10:14:10.017Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_pattern_with_enum_fields_query(self):
"""test to check enum fields to query"""
stix_pattern = "[file:extensions.'x-sentinelone-file'.file_type IN ('PE') " \
"OR windows-registry-key:extensions.'x-sentinelone-registry'." \
"full_size = 72]"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "(registryValueFullSize = \\"72\\" '
'OR tgtFileType IN (\\"PE\\")) AND EventTime '
'BETWEEN \\"2022-03-11T10:30:30.099Z\\" '
'AND \\"2022-03-11T10:35:30.099Z\\"", '
'"fromDate": "2022-03-11T10:30:30.099Z", '
'"toDate": "2022-03-11T10:35:30.099Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_multiple_enum_fields_query(self):
"""test to check multiple enum fields to query"""
stix_pattern = "[network-traffic:extensions.'x-sentinelone-network-action'." \
"connection_status " \
"IN ('SUCCESS') OR " \
" file:extensions.'x-sentinelone-file'." \
"file_type IN ('PE') " \
"AND user-account:extensions.'x-sentinelone-login'." \
"login_type IN ('SYSTEM') ]"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "((loginType IN (\\"SYSTEM\\") '
'AND tgtFileType IN (\\"PE\\")) OR netConnStatus '
'IN (\\"SUCCESS\\")) AND EventTime '
'BETWEEN \\"2022-03-11T09:59:55.322Z\\" '
'AND \\"2022-03-11T10:04:55.322Z\\"", '
'"fromDate": "2022-03-11T09:59:55.322Z", '
'"toDate": "2022-03-11T10:04:55.322Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
def test_invalid_enum_fields_query(self):
"""test to check invalid enum field pattern """
stix_pattern = "[file:extensions.'x-sentinelone-file'.file_type = 'abc']"
result = translation.translate('sentinelone', 'query', '{}', stix_pattern)
assert result['success'] is False
assert ErrorCode.TRANSLATION_NOTIMPLEMENTED_MODE.value == result['code']
assert result['error'] == "sentinelone connector error => " \
"wrong parameter : Unsupported ENUM values provided. " \
"Possible supported enum values " \
"are['UNKNOWN', 'PE', 'ELF', 'MACH', 'VECT', 'PDF', " \
"'COM', 'OLE', 'OPENXML', 'PKZIP', 'RAR', " \
"'LZMA', 'BZIP2', 'TAR', 'CABINET', 'SFX', " \
"'DOTNET', 'EICAR', 'LNK']"
def test_invalid_enum_fields_with_in_operator(self):
"""test to check invalid enum fields """
stix_pattern = "[x-oca-asset:extensions.'x-sentinelone-endpoint'." \
"endpoint_os IN ('mac')]"
result = translation.translate('sentinelone', 'query', '{}', stix_pattern)
assert result['success'] is False
assert ErrorCode.TRANSLATION_NOTIMPLEMENTED_MODE.value == result['code']
assert result['error'] == "sentinelone connector error => " \
"wrong parameter : Unsupported ENUM values provided. " \
"Possible supported enum values " \
"are['windows', 'osx', 'linux']"
def test_invalid_enum_fields_with_multiple_element(self):
"""test to check invalid enum fields with multiple element"""
stix_pattern = "[x-oca-asset:extensions.'x-sentinelone-endpoint'." \
"endpoint_os IN ('mac') " \
"OR file:extensions.'x-sentinelone-file'.file_type IN ('PE')]"
result = translation.translate('sentinelone', 'query', '{}', stix_pattern)
assert result['success'] is False
assert ErrorCode.TRANSLATION_NOTIMPLEMENTED_MODE.value == result['code']
assert result['error'] == "sentinelone connector error => " \
"wrong parameter : Unsupported ENUM values provided. " \
"Possible supported enum values " \
"are['windows', 'osx', 'linux']"
def test_multiple_invalid_fields(self):
"""test to check multiple invalid fields"""
stix_pattern = "[x-oca-asset:extensions.'x-sentinelone-endpoint'." \
"endpoint_os IN ('mac') " \
"OR file:extensions.'x-sentinelone-file'.file_type IN ('abc')]"
result = translation.translate('sentinelone', 'query', '{}', stix_pattern)
assert result['success'] is False
assert ErrorCode.TRANSLATION_NOTIMPLEMENTED_MODE.value == result['code']
assert result['error'] == "sentinelone connector error => " \
"wrong parameter : Unsupported ENUM values provided. " \
"Possible supported enum values " \
"are['UNKNOWN', 'PE', 'ELF', 'MACH', 'VECT', 'PDF', " \
"'COM', 'OLE', 'OPENXML', 'PKZIP', 'RAR', " \
"'LZMA', 'BZIP2', 'TAR', 'CABINET', 'SFX', " \
"'DOTNET', 'EICAR', 'LNK']"
def test_indicator_field_query(self):
"""test to check indicator fields"""
stix_pattern = "[x-sentinelone-indicator:indicator_category = 'Malware']"
query = translation.translate('sentinelone', 'query', '{}', stix_pattern)
query['queries'] = _remove_timestamp_from_query_ver1(query['queries'])
queries = ['{"query": "indicatorCategory = \\"MALWARE\\" '
'AND EventTime BETWEEN \\"2022-03-30T06:17:37.577Z\\" '
'AND \\"2022-03-30T06:22:37.577Z\\"", '
'"fromDate": "2022-03-30T06:17:37.577Z", '
'"toDate": "2022-03-30T06:22:37.577Z", "limit": 10000}']
queries = _remove_timestamp_from_query_ver1(queries)
self._test_query_assertions(query, queries)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
def main():
result = "pass"
try:
ver = subprocess.check_output(["./version.py"])
except OSError:
result = "fail"
ver = 0
except subprocess.CalledProcessError:
result = "fail"
ver = 0
subprocess.Popen(["lava-test-case", "code-version-%s" % ver, "--result", result])
return 0
if __name__ == '__main__':
main()
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
##############export checkpoint file into onnx models#################
python export.py
"""
import numpy as np
import mindspore as ms
from mindspore import Tensor, context
from mindspore.train.serialization import load_checkpoint, load_param_into_net, export
from src.models.APDrawingGAN_G import Generator
from src.option.options_test import TestOptions
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False)
if __name__ == '__main__':
print(ms.__version__)
opt = TestOptions().get_settings()
opt.isExport = True
net = Generator(opt)
param_dict = load_checkpoint(opt.model_path)
load_param_into_net(net, param_dict)
real_A = Tensor(np.ones([1, 3, 512, 512]) * 1.0, ms.float32)
real_A_bg = Tensor(np.ones([1, 3, 512, 512]) * 1.0, ms.float32)
real_A_eyel = Tensor(np.ones([1, 3, 80, 112]) * 1.0, ms.float32)
real_A_eyer = Tensor(np.ones([1, 3, 80, 112]) * 1.0, ms.float32)
real_A_nose = Tensor(np.ones([1, 3, 96, 96]) * 1.0, ms.float32)
real_A_mouth = Tensor(np.ones([1, 3, 80, 128]) * 1.0, ms.float32)
real_A_hair = Tensor(np.ones([1, 3, 512, 512]) * 1.0, ms.float32)
mask = Tensor(np.ones([1, 1, 512, 512]) * 1.0, ms.float32)
mask2 = Tensor(np.ones([1, 1, 512, 512]) * 1.0, ms.float32)
center = np.array([[199., 238.],
[313., 238.],
[254., 300.],
[256., 369.]])
net.set_pad(center)
input_arr = [real_A, real_A_bg, real_A_eyel, real_A_eyer, real_A_nose, real_A_mouth, real_A_hair, mask, mask2]
export(net, *input_arr, file_name=opt.mindir_filename, file_format="MINDIR")
if opt.isModelarts:
from src.utils.tools import modelarts_result2obs
modelarts_result2obs(opt)
|
# Copyright (c) 2019, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from django.core.management.base import BaseCommand, CommandError
import requests
from urllib.parse import urljoin
from slides_manager.models import Slide
from promort.settings import OME_SEADRAGON_BASE_URL
class Command(BaseCommand):
help = 'check if OMERO images related to ProMort slides still exist'
def _check_slide(self, slide_id, ome_image_id, image_type):
if image_type == 'MIRAX':
url = urljoin(OME_SEADRAGON_BASE_URL, 'mirax/deepzoom/get/%s_metadata.json' % slide_id)
else:
url = urljoin(OME_SEADRAGON_BASE_URL, 'deepzoom/get/%s_metadata.json' % ome_image_id)
response = requests.get(url)
if response.json()['tile_sources']:
return True
else:
return False
def handle(self, *args, **opts):
try:
slides = Slide.objects.filter(omero_id__isnull=False)
except Slide.DoesNotExist:
raise CommandError('There is no Slide related to an OMERO image')
for slide in slides.all():
self.stdout.write('CHECKING SLIDE %s' % slide.id)
ome_slide_exists = self._check_slide(slide.id, slide.omero_id, slide.image_type)
if not ome_slide_exists:
self.stdout.write('[SLIDE %s] There is no slide in OMERO with ID %s' % (slide.id, slide.omero_id))
slide.omero_id = None
slide.image_type = None
slide.save()
|
# Generated by Django 3.2.5 on 2021-07-11 15:35
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='book_detail',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Book_name', models.CharField(max_length=120)),
('Author', models.CharField(max_length=130)),
('genre', models.CharField(max_length=123)),
('language', models.CharField(max_length=123)),
],
),
]
|
"""p2 S3 URLs"""
from django.conf import settings
from django.urls import include, path, register_converter
from p2.s3.urls import EverythingConverter, S3BucketConverter
from p2.s3.views import buckets, get, objects
register_converter(S3BucketConverter, 's3')
register_converter(EverythingConverter, 'everything')
app_name = 'p2_s3'
# These patterns are only loaded when a X-AWS-* Header is detected
# as these paths can interfere with p2.serve
urlpatterns = [
path('<s3:bucket>', buckets.BucketView.as_view(), name='bucket'),
path('<s3:bucket>/', buckets.BucketView.as_view(), name='bucket'),
path('<s3:bucket>/<everything:path>', objects.ObjectView.as_view(), name='bucket-object'),
path('', get.ListView.as_view(), name='list'),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
path('_/debug/', include(debug_toolbar.urls)),
] + urlpatterns
|
# Copyright (c) initOS GmbH 2019
# Distributed under the MIT License (http://opensource.org/licenses/MIT).
from ..commands import CommandError, parse_commands
from ..config import OCABOT_EXTRA_DOCUMENTATION, OCABOT_USAGE
from ..router import router
from ..tasks.add_pr_comment import add_pr_comment
@router.register("issue_comment", action="created")
async def on_command(event, gh, *args, **kwargs):
"""On pull request review, tag if approved or ready to merge."""
if not event.data["issue"].get("pull_request"):
# ignore issue comments
return
org, repo = event.data["repository"]["full_name"].split("/")
pr = event.data["issue"]["number"]
username = event.data["comment"]["user"]["login"]
text = event.data["comment"]["body"]
try:
for command in parse_commands(text):
command.delay(org, repo, pr, username)
except CommandError as e:
# Add a comment on the current PR, if
# the command were misunderstood by the bot
add_pr_comment.delay(
org,
repo,
pr,
f"Hi @{username}. Your command failed:\n\n"
f"``{e}``.\n\n"
f"{OCABOT_USAGE}\n\n"
f"{OCABOT_EXTRA_DOCUMENTATION}",
)
|
# Note: The profiles/__init__.py bits are not required if you are already referring to your AppConfig in the INSTALLED_APPS settings.
# default_app_config = 'invoices.apps.InvoicesConfig' |
# Copyright (c) 2018 Phil Vachon <[email protected]>
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from .state import TruePositionState
from .uart import TruePositionUART
from .http import TruePositionHTTPApi
from .sat_writer import TruePositionSatWriter
from .nmea_writer import TruePositionNMEAWriter
from .shm_writer import TruePositionSHMWriter
|
# Dialog used to select cells for assigning protocols or types
import bisect
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QLabel, QGridLayout, QPushButton, \
QTextEdit, QDialog, QListWidget, QListWidgetItem, QVBoxLayout, \
QHBoxLayout
class SelectCellDialog(QDialog):
'''
Dialog used to select cells or trials for assigning protocols or types.
'''
selected = pyqtSignal(tuple)
def __init__(self, parent = None):
'''
Initialize and build the window.
Parameters
----------
parent: QWidget
Parent window.
Attributes
----------
items: list of QListWidgetItem
Items to display in QListWidget.
included: list of int
Included cell numbers.
excluded: list of int
Excluded cell numbers.
'''
super().__init__(parent)
self.incLW = QListWidget(self)
self.excLW = QListWidget(self)
self.incLb = QLabel("Included")
self.excLb = QLabel("Excluded")
incVB = QVBoxLayout()
excVB = QVBoxLayout()
incVB.addWidget(self.incLb)
incVB.addWidget(self.incLW)
excVB.addWidget(self.excLb)
excVB.addWidget(self.excLW)
excAllBtn = QPushButton(">>", self)
excBtn = QPushButton('>', self)
incBtn = QPushButton('<', self)
incAllBtn = QPushButton("<<", self)
btnVB = QVBoxLayout()
btnVB.addWidget(excAllBtn)
btnVB.addWidget(excBtn)
btnVB.addWidget(incBtn)
btnVB.addWidget(incAllBtn)
selectHB = QHBoxLayout()
selectHB.addLayout(incVB)
selectHB.addLayout(btnVB)
selectHB.addLayout(excVB)
acceptBtn = QPushButton("OK", self)
acceptBtn.setDefault(True)
cancelBtn = QPushButton("Cancel", self)
btnHB = QHBoxLayout()
btnHB.addWidget(acceptBtn)
btnHB.addWidget(cancelBtn)
topVB = QVBoxLayout(self)
topVB.addLayout(selectHB)
topVB.addLayout(btnHB)
self.items = []
self.included = []
self.excluded = []
# key binding
incAllBtn.clicked.connect(self.includeAll)
excAllBtn.clicked.connect(self.excludeAll)
incBtn.clicked.connect(self.include)
excBtn.clicked.connect(self.exclude)
acceptBtn.clicked.connect(self.finish)
cancelBtn.clicked.connect(self.reject)
def start(self, inc, exc):
'''
Overload dialog open function, initializing items before
showing the dialog.
Parameters
----------
inc: list of int
Predefined included cell numbers.
exc: list of int
Predefined excluded cell numbers.
'''
self.included = sorted(inc)
self.excluded = sorted(exc)
self.incLW.clear()
self.excLW.clear()
self.items = [None] * (max(self.included + self.excluded))
for c in self.included:
self.items[c - 1] = QListWidgetItem(str(c))
self.incLW.addItem(self.items[c - 1])
for c in self.excluded:
self.items[c - 1] = QListWidgetItem(str(c))
self.excLW.addItem(self.items[c - 1])
super().open()
def finish(self):
'''
Finish selection, raise selectd signal with (inc, exc) as parameter.
Signals
-------
selected:
Signalling selection is finished and return selection results.
'''
self.accept()
self.selected.emit((self.included, self.excluded))
def include(self):
'''
Move the selected items from excluded list to included list.
'''
for item in self.excLW.selectedItems():
c = int(item.text())
i = bisect.bisect_left(self.excluded, c)
self.excLW.takeItem(i)
self.excluded.pop(i)
j = bisect.bisect_left(self.included, c)
self.included.insert(j, c)
self.incLW.insertItem(j, item)
self.incLW.update()
self.excLW.update()
def exclude(self):
'''
Move the selected items from included list to excluded list.
'''
for item in self.incLW.selectedItems():
c = int(item.text())
i = bisect.bisect_left(self.included, c)
self.incLW.takeItem(i)
self.included.pop(i)
j = bisect.bisect_left(self.excluded, c)
self.excluded.insert(j, c)
self.excLW.insertItem(j, item)
self.incLW.update()
self.excLW.update()
def includeAll(self):
'''
Move all items from excluded list ot included list.
'''
while len(self.excluded):
item = self.excLW.takeItem(0)
c = self.excluded.pop(0)
j = bisect.bisect_left(self.included, c)
self.included.insert(j, c)
self.incLW.insertItem(j, item)
self.excLW.update()
self.incLW.update()
def excludeAll(self):
'''
Move all items from included list ot excluded list.
'''
while len(self.included):
item = self.incLW.takeItem(0)
c = self.included.pop(0)
j = bisect.bisect_left(self.excluded, c)
self.excluded.insert(j, c)
self.excLW.insertItem(j, item)
self.incLW.update()
self.excLW.update()
def changeTarget(self, target):
'''
Change display included or excluded subject.
'''
self.incLb.setText("Included " + target)
self.excLb.setText("Excluded " + target)
|
# -*- coding: utf-8 -*-
"""Untitled0.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1_BEDlHOdtbAEWSvPgI5Lp00ZydRk48QX
"""
# ! git clone
import cv2
import os
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import pickle
from pathlib import Path
from datetime import datetime
class VIDEO :
_defaults = {
"id": 0,
"save": False,
"anotate": False,
"save_path" :'./images/from_video/',
"path" : "./videos/challenge_video.mp4",
"period" : 0.1
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def __init__(self, **kwargs):
self.save : bool
self.period : float
self.save_path : str
self.anotate : bool
self.fps : int
self.path : str
self.__dict__.update(self._defaults) # set up default values
self.__dict__.update(kwargs) # and update with user overrides
self.video = None
self.fps : int
self.step : int
# self.extract_frames()
# 비디오 영상으로부터 여러개 이미지로 변환을합니다.
def extract_frames(self):
self.video = cv2.VideoCapture(self.path)
self.fps = self.video.get(cv2.CAP_PROP_FPS)
# self.step = int(self.period* self.fps)
count = 0
success = 1
while success:
success, image = self.video.read()
fn = int(datetime.utcnow().timestamp()*10000)
cv2.imwrite(self.save_path+str(fn)+".jpg",image)
count += 1
print(count, self.fps)
class CAMERA :
def __init__(self):
self.callibration_done = False
self.cam_matrix = None
self.dist_coeffs= None
self.img_size = None
self.rvecs = None
self.tvecs = None
# self.callibrate()
def callibrate(self , folder = 'camera_cal',n_x = 7, n_y = 7, verbose = False):
objp = np.zeros((n_y*n_x, 3), np.float32)
objp[:, :2] = np.mgrid[0:n_x, 0:n_y].T.reshape(-1, 2)
image_points = []
object_points = []
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
directory = Path(folder)
for image_file in directory.glob("*.jpg"):
img = cv2.imread(str( image_file))
img= cv2.resize(img, (400,300))
img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
found, corners = cv2.findChessboardCorners(img_gray, (n_x, n_y))
if found:
self.callibration_done = True
corners2 = cv2.cornerSubPix(img_gray, corners, (11, 11), (-1, -1), criteria)
image_points.append(corners2)
object_points.append(objp)
if verbose:
cv2.drawChessboardCorners(img, (n_x, n_y), corners, found)
plt.imshow(img)
plt.show()
# pefrorm the calibration
ret, self.cam_matrix, self.dist_coeffs, self.rvecs, self.tvecs = cv2.calibrateCamera(object_points, image_points, img_gray.shape[::-1], None, None)
self.img_size = img.shape
def undistort(self, image) :
if self.callibration_done :
image = cv2.undistort(image, self.cam_matrix, self.dist_coeffs)
return image
class EVENT :
def __init__(self):
# TAILGAITING , FCAS WARNING , LANE CHANGING, TRAFFIC LIGHT JUMP
self.time_stamp : int
self.image_path : str
self.type : int
self.speed : float
self.coordinates : [float, float]
if __name__ == "__main__":
video = VIDEO(path = "./videos/challenge_video.mp4", save=True,period = 1)
video.extract_frames()
|
import math
from miscellanies.torch.distributed import is_dist_available_and_initialized, is_main_process
import torch.distributed
from miscellanies.yaml_ops import load_yaml
import os
from .mixin_utils import apply_mixin_rules
def _update_sweep_config_(config: dict):
wandb_tuner_config = config['tune']
for parameter in wandb_tuner_config['parameters'].values():
if 'distribution' in parameter and parameter['distribution'] in ('log_uniform', 'q_log_uniform', 'log_normal', 'q_log_normal'):
if 'min_raw' in parameter:
parameter['min'] = math.log(parameter['min_raw'])
del parameter['min_raw']
if 'max_raw' in parameter:
parameter['max'] = math.log(parameter['max_raw'])
del parameter['max_raw']
return config
def get_sweep_config(args):
if args.sweep_config is not None:
if args.sweep_config.startswith('/' or '\\'):
config_path = os.path.join(args.config_path, args.sweep_config)
else:
config_path = os.path.join(args.config_path, args.method_name, args.config_name, 'sweep', args.sweep_config)
else:
config_path = os.path.join(args.config_path, args.method_name, args.config_name, 'sweep', 'sweep.yaml')
config = load_yaml(config_path)
_update_sweep_config_(config)
return config
def prepare_sweep(args, wandb_instance, config):
# get the config of this run from wandb server
if is_main_process():
this_run_config = wandb_instance.config.as_dict()
else:
this_run_config = None
if is_dist_available_and_initialized():
object_list = [this_run_config]
torch.distributed.broadcast_object_list(object_list, src=0)
this_run_config, = object_list
sweep_config = get_sweep_config(args)
apply_mixin_rules(sweep_config['mixin'], config, this_run_config)
if args.debug:
import pprint
pprint.pprint(config)
|
"""
This module loads trained models to predict properties of organic molecules
"""
from __future__ import print_function
import os
import pkg_resources
import numpy as np
import pandas as pd
from tensorflow.keras import backend as K
from tensorflow.keras.models import load_model
from rdkit import Chem
from rdkit.Chem.rdMolDescriptors import GetMorganFingerprintAsBitVect
from chemml.models.keras.trained.engine import check_array_input
class OrganicLorentzLorenz():
"""
A machine learning model for Lorentz-Lorenz (LL) estimates of refractive index.
The model predicts refractive index, polarizability, and density of an organic molecule using its
SMILES representation.
The model is trained on 100K small organic molecules with their polarizabilities from DFT calculations, densities from
molecular dynamics simulations, and refractive index by feeding calculated polarizabilities and densities into the
LL model.
The model is a fully connected artificial neural network with 3 hidden layers. The number of neurons per layers from
input layer to the output layer are as follow: 1024 --> 128 --> 64 --> 32 --> [1, 1, 1].
"""
def __init__(self):
self.path = pkg_resources.resource_filename('chemml', os.path.join('datasets', 'data','organic_lorentz_lorenz'))
# load x and y scalers
self.x_scaler = pd.read_csv(os.path.join(self.path, 'x_standard_scaler.csv'))
self.y_scaler = pd.read_csv(os.path.join(self.path, 'y_standard_scaler.csv'))
def load(self, summary=True):
"""
This function loads the Keras model. The model consists of 3 hidden layers and more than 140K parameters.
Parameters
----------
summary: bool
if True a summary of Keras model will be printed out.
"""
self.model = load_model(os.path.join(self.path, 'Morgan_100k.h5'))
if isinstance(summary, bool):
if summary:
self.model.summary()
def __represent(self, smiles):
# The descriptor must be a binary Morgan fingerprint with radius 2 and 1024 bits.
mol = Chem.MolFromSmiles(smiles.strip())
if mol is None:
msg = '%s is not a valid SMILES representation'%smiles
raise ValueError(msg)
else:
return np.array(GetMorganFingerprintAsBitVect(mol, radius=2, nBits=1024))
def predict(self, smiles, pprint=False):
"""
After loading the model, this function predicts refractive index, polarizability, and density of the entery.
Parameters
----------
smiles: str
The SMILES representaion of a molecule.
pprint: bool
If True, a short description of the predicted properties will be printed out.
Returns
-------
tuple
includes estimates of refractive index, polarizability, and density, respectively.
"""
# Todo: smiles can be a list or file path?!
# check smiles type
if isinstance(smiles, str):
# find descriptor
self.descriptor = self.__represent(smiles)
else:
msg = "smiles must has `str` type."
raise ValueError(msg)
# preprocess fingerprint: keep all of them for this model
xin = (self.descriptor - self.x_scaler['ss_mean'].values) / self.x_scaler['ss_scale'].values
xin = xin.reshape(1, 1024)
# y1: RI, y2: polarizability (Bohr^3), y3: density (Kg/m^3)
y1, y2, y3 = self.model.predict(xin)
ri = float(y1 * self.y_scaler['ss_scale'][0] + self.y_scaler['ss_mean'][0])
pol = float(y2 * self.y_scaler['ss_scale'][1] + self.y_scaler['ss_mean'][1])
den = float(y3 * self.y_scaler['ss_scale'][2] + self.y_scaler['ss_mean'][2])
# print out predictions
if pprint:
print ('\ndata-driven model estimates:')
print (' LL refractive index: ', '%.2f' % ri)
print (' polarizability (Bohr^3):', '%.2f' % pol)
print (' density (Kg/m^3): ', '%.2f' % den)
return (ri, pol, den)
def train(self, X, Y, scale=True, kwargs_for_compile={}, kwargs_for_fit={}):
"""
This function allows the user to retrain the model on a given data set for some further steps.
Thus, all the parameters you are able to pass to a keras model's compile or fit methods can be passed to this
function as well.
Parameters
----------
X: ndarray or dataframe
If 2D array, must be with 1024 dimension and numerical type. It is recommended to be Morgan fingerprint representation of the molecules.
If 1D array, must be an array of `str` type, each element represents a molecule in the SMILES format.
If dataframe, it can be a 2D frame with one column of SMILES or 1024 columns of features.
Y: list or dataframe
a list of three numpy arrays for refractive index, polarizability, and density, respectively.
The length of arrays must be same as the length of X.
If dataframe, it must be a 2D frame with 3 columns, each for one of the properties.
scale: bool, optional (default: True)
If True the X and Y will be scaled in the same fashion as the original traning process (recommended).
kwargs_for_compile: dict, optional (default: {})
This dictionary could contain all the parameters that the compile method of keras models can receive.
kwargs_for_fit: dict, optional (default: {})
This dictionary could contain all the parameters that the fit method of keras models can receive.
"""
# convert dataframe to ndarray
if isinstance(X, pd.DataFrame):
if X.ndim == 2 and X.shape[1] == 1:
X = X.iloc[:,0].values
elif X.ndim == 2 and X.shape[1] == 1024:
X = X.values
else:
msg = "This function doesn't support the format of the input X."
raise ValueError(msg)
if isinstance(Y, pd.DataFrame):
if Y.ndim == 2 and Y.shape[1] == 3:
Y = [Y.iloc[:,0].values, Y.iloc[:,1].values, Y.iloc[:,2].values]
else:
msg = "This function doesn't support the format of the input Y."
raise ValueError(msg)
# check dimension of X
itis, msg = check_array_input(X, 'X', 2, (None, 1024))
if not itis:
itis, msg = check_array_input(X, 'X', 1, (None,))
if itis:
X = np.array([self.__represent(i) for i in X])
else:
raise ValueError(msg)
# check dimension of Y
if isinstance(Y, list):
if len(Y) == 3:
if isinstance(Y[0], np.ndarray) and isinstance(Y[1], np.ndarray) and \
isinstance(Y[2], np.ndarray) and Y[0].ndim == Y[1].ndim == Y[2].ndim == 1:
if len(Y[0]) == len(Y[1]) == len(Y[2]) == len(X):
pass
else:
msg = "The length of all Y arrays and X must be same."
raise ValueError(msg)
else:
msg = "All the Y arrays must be numpy 1D array of properties."
raise ValueError(msg)
else:
msg = "Y must contain 3 arrays."
raise ValueError(msg)
else:
msg = "Y must be a list of arrays or a pandas dataframe."
raise ValueError(msg)
# scale
if isinstance(scale, bool) and scale:
# scale X
xin = (X - self.x_scaler['ss_mean'].values) / self.x_scaler['ss_scale'].values
xin = xin.reshape(X.shape[0], 1024)
# scale Ys
y1 = (Y[0] - self.y_scaler['ss_mean'][0]) / self.y_scaler['ss_scale'][0]
y2 = (Y[1] - self.y_scaler['ss_mean'][1]) / self.y_scaler['ss_scale'][1]
y3 = (Y[2] - self.y_scaler['ss_mean'][2]) / self.y_scaler['ss_scale'][2]
else:
msg = "The parameter scale must be boolean"
raise ValueError(msg)
# the actual compile and training
from tensorflow.keras.optimizers import Adam
adam = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-8, decay=0.0)
default_kwargs = {'optimizer': adam,
'loss': 'mean_squared_error',
'metrics': ['mean_absolute_error']}
default_kwargs.update(kwargs_for_compile)
self.model.compile(**default_kwargs)
self.model.fit(X, [y1, y2, y3], **kwargs_for_fit)
def get_hidden_layer(self, X, id=1):
"""
This functions return the first hidden layer of the model.
Parameters
----------
X: ndarray
If 2D array, must be with 1024 dimension and numerical type. It is recommended to be Morgan fingerprint representation of the molecules.
If 1D array, must be an array of `str` type, each element represents a molecule in the SMILES format.
id: int
This is the id of hidden layers. It can be any of 1, 2, or 3 for the first, second,
or third hidden layer, respectively.
Returns
-------
ndarray
The array of shape (length_of_X, 128) as the outputs of the first hidden layer (id=1).
The array of shape (length_of_X, 64) as the outputs of the first hidden layer (id=2).
The array of shape (length_of_X, 32) as the outputs of the first hidden layer (id=3).
"""
# check dimension of X
itis, msg = check_array_input(X, 'X', 2, (None, 1024))
if not itis:
itis, msg = check_array_input(X, 'X', 1, (None,))
if itis:
X = np.array([self.__represent(i) for i in X])
else:
raise ValueError(msg)
get_layer_output = K.function([self.model.layers[0].input],
[self.model.layers[id].output])
return get_layer_output([X])[0]
|
def crash_add(a, b):
raise RuntimeError("I can't calculate anything :(")
|
# coding=utf-8
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^conta/$', views.index, name='index'),
url(r'^alterar-dados/$', views.alterar_dados_usuario, name='alterar-dados-usuario'),
url(r'^alterar-senha/$', views.update_password, name='update_password'),
url(r'^registro/$', views.register, name='register'),
]
|
# Time: O(nlogn)
# Space: O(n)
class BIT(object): # 0-indexed
def __init__(self, n):
self.__bit = [0]*(n+1)
def add(self, i, val):
i += 1
while i < len(self.__bit):
self.__bit[i] += val
i += (i & -i)
def query(self, i):
i += 1
ret = 0
while i > 0:
ret += self.__bit[i]
i -= (i & -i)
return ret
# greedy, bit, fenwick tree
class Solution(object):
def minMovesToMakePalindrome(self, s):
"""
:type s: str
:rtype: int
"""
idxs = [[] for _ in xrange(26)]
for i, c in enumerate(s):
idxs[ord(c)-ord('a')].append(i)
targets, pairs = [0]*len(s), []
for c, idx in enumerate(idxs):
for i in xrange(len(idx)//2):
pairs.append((idx[i], idx[~i]))
if len(idx)%2:
targets[idx[len(idx)//2]] = len(s)//2
pairs.sort()
for i, (l, r) in enumerate(pairs):
targets[l], targets[r] = i, (len(s)-1)-i
bit = BIT(len(s))
result = 0
for i in targets:
result += i-bit.query(i-1) # move from bit.query(i-1) to i
bit.add(i, 1)
return result
# Time: O(n^2)
# Space: O(n)
# greedy
class Solution2(object):
def minMovesToMakePalindrome(self, s):
"""
:type s: str
:rtype: int
"""
s = list(s)
result = 0
while s:
i = s.index(s[-1])
if i == len(s)-1:
result += i//2
else:
result += i
s.pop(i)
s.pop()
return result
|
#!/usr/bin.env/python
# -*- coding: utf-8 -*-
"""
This module contains the base class KerasCellClassifier for using deep learning methods,
trained on some labeled FileGroup (has existing Populations), to predict single cell classifications.
Copyright 2020 Ross Burton
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished
to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from ..build_models import build_keras_model
from .cell_classifier import CellClassifier, check_data_init, check_model_init
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import History
from matplotlib.pyplot import Axes
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
class KerasCellClassifier(CellClassifier):
"""
Use Keras deep learning models to predict the classification of single cell data.
Training data should be provided in the form of a FileGroup with existing Populations.
Supports multi-class and multi-label classification; if multi-label classification is chosen,
the tree structure of training data is NOT conserved - all resulting populations
will have the same parent population.
Note, this class assumes you use the Keras Sequential API. Objects can be constructed using
a pre-built model, or the model designed through the parameters 'optimizer', 'loss' and 'metrics,
and then a model constructed using the 'build_model' method.
Parameters
----------
model: Sequential, optional
Pre-compiled Keras Sequential model
optimizer: str, optional
Provide if you intend to compile a model with the 'build_model' method.
See https://keras.io/api/optimizers/ for optimizers
loss: str, optional
Provide if you intend to compile a model with the 'build_model' method.
See https://keras.io/api/losses/ for valid loss functions
metrics: list, optional
Provide if you intend to compile a model with the 'build_model' method.
See https://keras.io/api/metrics/ for valid metrics
features: list
List of channels/markers to use as features in prediction
target_populations: list
List of populations from training data to predict
multi_label: bool (default=False)
If True, single cells can belong to more than one population. The tree structure of training data is
NOT conserved - all resulting populations will have the same parent population.
logging_level: int (default=logging.INFO)
Level to log events at
log: str, optional
Path to log output to; if not given, will log to stdout
population_prefix: str (default="CellClassifier_")
Prefix applied to populations generated
Attributes
----------
scaler: Scaler
Scaler object
transformer: Transformer
Transformer object
class_weights: dict
Sample class weights; key is sample index, value is weight. Set by calling compute_class_weights.
x: Pandas.DataFrame
Training feature space
y: numpy.ndarray
Target labels
logger: logging.Logger
features: list
target_populations: list
"""
def __init__(self,
model: Sequential or None = None,
optimizer: str or None = None,
loss: str or None = None,
metrics: list or None = None,
**kwargs):
self.optimizer = optimizer
self.loss = loss
self.metrics = metrics
if model is not None:
self.model = model
else:
if any([x is None for x in [optimizer, loss, metrics]]):
raise ValueError("If model is not provided, must provide optimizer, loss and metrics, and "
"call 'build_model' prior to fit")
super().__init__(**kwargs)
def build_model(self,
layers: list,
layer_params: list,
input_shape: tuple or None = None,
**compile_kwargs):
"""
If Sequential model is not constructed and provided at object construction, this method
can be used to specify a sequential model to be built.
Parameters
----------
layers: list
List of keras layer class names (see https://keras.io/api/layers/)
layer_params: list
List of parameters to use when constructing layers (order must match layers)
input_shape: tuple, optional
Shape of input data to first layer, if None, then passed as (N, ) where N is the number
of features
compile_kwargs:
Additional keyword arguments passed when calling compile
Returns
-------
self
"""
if self.model is not None:
raise ValueError("Model already defined.")
input_shape = input_shape or (len(self.features),)
self.model = build_keras_model(layers=layers,
layer_params=layer_params,
optimizer=self.optimizer,
loss=self.loss,
metrics=self.metrics,
input_shape=input_shape,
**compile_kwargs)
return self
@check_model_init
def _predict(self,
x: pd.DataFrame,
threshold: float = 0.5):
"""
Overrides parent _predict method to facilitate Keras predict methods. If multi_class is True,
then threshold is used to assign labels using the predicted probabilities; positive association
where probability exceeds threshold.
Parameters
----------
x: Pandas.DataFrame
Feature space
threshold: float (default=0.5)
Threshold for positivity when multi_class is True
Returns
-------
numpy.ndarray, numpy.ndarray
Predicted labels, prediction probabilities
"""
y_score = self.model.predict(x)
if self.multi_label:
y_pred = list(map(lambda yi: [int(i > threshold) for i in yi], y_score))
else:
y_pred = np.argmax(self.model.predict(x), axis=-1)
return y_pred, y_score
def _fit(self,
x: pd.DataFrame,
y: np.ndarray,
epochs: int = 100,
validation_x: pd.DataFrame or None = None,
validation_y: np.ndarray or None = None,
**kwargs):
"""
Overwrites the _fit method of CellClassifier to support Keras classifier.
If a validation feature space and labels are provided, then these are passed
to 'validation_data' of keras 'fit' method.
Parameters
----------
x: Pandas.DataFrame
Training feature space to fit
y: numpy.ndarray
Training labels
epochs: int (default=100)
Number of training rounds
validation_x: Pandas.DataFrame
Validation feature space
validation_y: numpy.ndarray
Validation labels
kwargs:
Additional keyword arguments passed to fit method of Keras sequential API
Returns
-------
Keras.callbacks.History
Keras History object
Raises
------
AssertionError
validation_y not provided but validation_x is
"""
if validation_x is not None:
assert validation_y is not None, "validation_y cannot be None if validation_x given"
return self.model.fit(x, to_categorical(y), epochs=epochs,
validation_data=(validation_x, validation_y), **kwargs)
return self.model.fit(x, to_categorical(y), epochs=epochs, **kwargs)
@check_model_init
@check_data_init
def fit(self,
validation_frac: float or None = 0.3,
train_test_split_kwargs: dict or None = None,
epochs: int = 100,
**kwargs):
"""
Fit the Keras model to the associated training data. If 'validation_frac' is provided,
then a given proportion of the training data will be set apart and given to
the 'validation_data' parameter to Keras fit method of the sequential API.
The validation data is created using the train_test_split function from
Scikit-Learn; additional keyword arguments can be provided as a dictionary
with 'train_test_split_kwargs'.
Parameters
----------
validation_frac: float (optional; default=0.3)
Proportion of training data to set aside for validation
train_test_split_kwargs: dict (optional)
Additional keyword arguments for train_test_split function from Scikit-Learn
epochs: int (default=100)
Number of training rounds
kwargs:
Additional keyword arguments passed to fit method of keras sequential API
Returns
-------
Keras.callbacks.History
Keras History object
"""
train_test_split_kwargs = train_test_split_kwargs or {}
validation_x, validation_y = None, None
x, y = self.x, self.y
if validation_frac is not None:
x, validation_x, y, validation_y = train_test_split(self.x,
self.y,
test_size=validation_frac,
**train_test_split_kwargs)
return self._fit(x=x, y=y, validation_x=validation_x, validation_y=validation_y, epochs=epochs, **kwargs)
@check_model_init
def plot_learning_curve(self,
history: History or None = None,
ax: Axes or None = None,
figsize: tuple = (10, 10),
plot_kwargs: dict or None = None,
**fit_kwargs):
"""
This method will generate a learning curve using the History object generated
from the fit method from the Keras sequential API.
Parameters
----------
history: History (optional)
If not given, then the 'fit' method will be called and use the associated
training data.
ax: Matplotlib.Axes
figsize: tuple (default=(10,10))
plot_kwargs: dict (optional)
Keyword arguments passed to Pandas.DataFrame.plot method
fit_kwargs:
Keyword arguments passed to fit method if 'history' is not given
Returns
-------
Matplotlib.Axes
"""
history = history or self.fit(**fit_kwargs)
plot_kwargs = plot_kwargs or {}
ax = ax or plt.subplots(figsize=figsize)[1]
return pd.DataFrame(history.history).plot(ax=ax, **plot_kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.