max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
aquitania/data_processing/indicator_pipeline.py | marcus-var/aquitania | 0 | 12794151 | <filename>aquitania/data_processing/indicator_pipeline.py
########################################################################################################################
# |||||||||||||||||||||||||||||||||||||||||||||||||| AQUITANIA ||||||||||||||||||||||||||||||||||||||||||||||||||||||| #
# |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| #
# |||| To be a thinker means to go by the factual evidence of a case, not by the judgment of others |||||||||||||||||| #
# |||| As there is no group stomach to digest collectively, there is no group mind to think collectively. |||||||||||| #
# |||| Each man must accept responsibility for his own life, each must be sovereign by his own judgment. ||||||||||||| #
# |||| If a man believes a claim to be true, then he must hold to this belief even though society opposes him. ||||||| #
# |||| Not only know what you want, but be willing to break all established conventions to accomplish it. |||||||||||| #
# |||| The merit of a design is the only credential that you require. |||||||||||||||||||||||||||||||||||||||||||||||| #
# |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| #
########################################################################################################################
"""
.. moduleauthor:: <NAME>
Studying Pipelines through 'Hands-On Machine Learning...' on 29/01/2018.
"""
class IndicatorPipeLine:
def __init__(self):
pass
def fit(self):
pass
def transform(self):
pass
def fit_transform(self, X):
X = aligned(X)
X = sup_res_align(X)
# X = process_dates(X)
return X
def aligned(df):
for column in df:
if 'direction' in column:
df[column] = df[column] == df['signal']
for column in df:
if 'tied' in column:
df[column] = df.apply(lambda x: x[column] if x['signal'] else -x[column], axis=1)
return df
def sup_res_align(df):
for column in df:
if 'sup' in column:
df[column + '_aligned'] = ((df[column] == df['signal']) & (df['signal']))
if 'res' in column:
df[column + '_aligned'] = ((df[column] == ~df['signal']) & (~df['signal']))
return df
def process_dates(df):
fld = df.index
pre = 'dt_'
attr_list = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Hour', 'Minute']
for attr in attr_list:
df[pre + attr] = getattr(fld.dt, attr.lower())
return df
| 2.171875 | 2 |
nbmessages/utils.py | ucsd-ets/nbmessages | 0 | 12794152 | """One-off functions"""
import urllib.parse, random, string
def load_yaml(path):
import yaml
try:
with open(path, 'r') as yaml_file:
data = yaml.load(yaml_file, Loader=yaml.FullLoader)
return data
except FileNotFoundError:
raise FileNotFoundError('could not load yaml at path: {path}')
except Exception as e:
raise e
def parse_body(body):
body = urllib.parse.parse_qs(body)
for k, v in body.items():
if len(v) == 1:
body.update({k: v[0]})
return body
def unquote_plus(text):
return urllib.parse.unquote_plus(text)
def parse_url_path(url_path):
reformat = url_path.replace('%2F', '/')
reformat = reformat.replace('+', ' ')
return reformat
def random_string(stringLength=8):
"""https://pynative.com/python-generate-random-string/
Args:
stringLength (int, optional): length of string. Defaults to 8.
Returns:
str: random string
"""
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength)) | 3.1875 | 3 |
controls.py | CamRoy008/AlouetteApp | 0 | 12794153 | # This file is depreciated. Controls had to be hard coded into app.py in the translate_static function. Babel could not translate from this file.
from flask_babel import Babel, _
# Controls for webapp
station_name_options = [
{'label': _('Resolute Bay, No. W. Territories'), 'value': 'Resolute Bay, No. W. Territories'},
{'label': _('Blossom Point, Maryland'), 'value': 'Blossom Point, Maryland'},
{'label': _('South Atlantic, Falkland Islands'), 'value': 'South Atlantic, Falkland Islands'},
{'label': _("St. John's, Newfoundland"), 'value': "St. John's, Newfoundland"},
{'label': _('Orroral Valley, Australia'), 'value': 'Orroral Valley, Australia'},
{'label': _('Prince Albert, Canada'), 'value': 'Prince Albert, Canada'},
{'label': _('Ottawa, Canada'), 'value': 'Ottawa, Canada'},
{'label': _('Byrd Station, Antartica'), 'value': 'Byrd Station, Antartica'},
{'label': _('Las Palmas, Canary Island'), 'value': 'Las Palmas, Canary Island'},
{'label': _('Winkfield, England'), 'value': 'Winkfield, England'},
{'label': _('Fort Myers, Florida'), 'value': 'Fort Myers, Florida'},
{'label': _('Antofagasta, Chile'), 'value': 'Antofagasta, Chile'},
{'label': _('East Grand Forks, Minnesota'), 'value': 'East Grand Forks, Minnesota'},
{'label': _('Rosman, No. Carolina'), 'value': 'Rosman, No. Carolina'},
{'label': _('College, Fairbanks, Alaska'), 'value': 'College, Fairbanks, Alaska'},
{'label': _('Woomera, Australia'), 'value': 'Woomera, Australia'},
{'label': _('Gilmore Creek, Fairbanks, Alaska'), 'value': 'Gilmore Creek, Fairbanks, Alaska'},
{'label': _('Tromso, Norway'), 'value': 'Tromso, Norway'},
{'label': _('University of Alaska, Fairbanks, Alaska'), 'value': 'University of Alaska, Fairbanks, Alaska'},
{'label': _('Darwin, Australia'), 'value': 'Darwin, Australia'},
{'label': _('Quito, Ecuador'), 'value': 'Quito, Ecuador'},
{'label': _('South Point, Hawaiian Islands'), 'value': 'South Point, Hawaiian Islands'},
{'label': _('Lima, Peru'), 'value': 'Lima, Peru'},
{'label': _('Johannesburg, South Africa'), 'value': 'Johannesburg, South Africa'},
{'label': _('Kano, Nigeria'), 'value': 'Kano, Nigeria'},
{'label': _('Tananarive, Madagascar'), 'value': 'Tananarive, Madagascar'},
{'label': _('Bretigny, France'), 'value': 'Bretigny, France'},
{'label': _('Singapore, Malaysia'), 'value': 'Singapore, Malaysia'},
{'label': _('Boulder, Colorado'), 'value': 'Boulder, Colorado'},
{'label': _('Mojave, California'), 'value': 'Mojave, California'},
{'label': _('Kauai, Hawaii'), 'value': 'Kauai, Hawaii'},
{'label': _('Kashima, Japan'), 'value': 'Kashima, Japan'}]
# Getting only the values of the station names
station_values = []
for station in station_name_options:
station_values.append(station['value'])
x_axis_options = [
{'label': _('Date'), 'value': ('timestamp')},
{'label': _('Latitude'), 'value': ('lat')},
{'label': _('Longitude'), 'value': ('lon')}]
y_axis_options = [
{'label': _('Minimum Frequency'), 'value': ('fmin')},
{'label': _('Maximum Depth'), 'value': ('max_depth')}]
year_dict = {}
for year in range(1962,1974):
year_dict[year] = str(year)
lat_dict = {}
for lat in range(-90, 90+1, 15):
lat_dict[lat] = str(lat)
lon_dict = {}
for lon in range(-180, 180+1, 30):
lon_dict[lon] = str(lon) | 1.78125 | 2 |
archiv/views.py | csae8092/djtranskribus | 0 | 12794154 | <reponame>csae8092/djtranskribus
# generated by appcreator
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.urls import reverse, reverse_lazy
from django.views.generic.detail import DetailView
from django.views.generic.edit import DeleteView
from . filters import *
from . forms import *
from . tables import *
from . models import (
TrpCollection,
TrpDocument,
TrpPage,
TrpTranscript
)
from browsing.browsing_utils import (
GenericListView, BaseCreateView, BaseUpdateView, BaseDetailView
)
class TrpCollectionListView(GenericListView):
model = TrpCollection
filter_class = TrpCollectionListFilter
formhelper_class = TrpCollectionFilterFormHelper
table_class = TrpCollectionTable
init_columns = [
'id', 'name',
]
enable_merge = True
class TrpCollectionDetailView(BaseDetailView):
model = TrpCollection
template_name = 'browsing/generic_detail.html'
class TrpCollectionCreate(BaseCreateView):
model = TrpCollection
form_class = TrpCollectionForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(TrpCollectionCreate, self).dispatch(*args, **kwargs)
class TrpCollectionUpdate(BaseUpdateView):
model = TrpCollection
form_class = TrpCollectionForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(TrpCollectionUpdate, self).dispatch(*args, **kwargs)
class TrpCollectionDelete(DeleteView):
model = TrpCollection
template_name = 'webpage/confirm_delete.html'
success_url = reverse_lazy('archiv:trpcollection_browse')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(TrpCollectionDelete, self).dispatch(*args, **kwargs)
class TrpDocumentListView(GenericListView):
model = TrpDocument
filter_class = TrpDocumentListFilter
formhelper_class = TrpDocumentFilterFormHelper
table_class = TrpDocumentTable
init_columns = [
'id', 'title',
]
enable_merge = True
class TrpDocumentDetailView(BaseDetailView):
model = TrpDocument
template_name = 'browsing/generic_detail.html'
class TrpDocumentCreate(BaseCreateView):
model = TrpDocument
form_class = TrpDocumentForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(TrpDocumentCreate, self).dispatch(*args, **kwargs)
class TrpDocumentUpdate(BaseUpdateView):
model = TrpDocument
form_class = TrpDocumentForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(TrpDocumentUpdate, self).dispatch(*args, **kwargs)
class TrpDocumentDelete(DeleteView):
model = TrpDocument
template_name = 'webpage/confirm_delete.html'
success_url = reverse_lazy('archiv:trpdocument_browse')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(TrpDocumentDelete, self).dispatch(*args, **kwargs)
class TrpPageListView(GenericListView):
model = TrpPage
filter_class = TrpPageListFilter
formhelper_class = TrpPageFilterFormHelper
table_class = TrpPageTable
init_columns = [
'id', 'part_of', 'page_nr',
]
enable_merge = True
class TrpPageDetailView(BaseDetailView):
model = TrpPage
template_name = 'browsing/generic_detail.html'
class TrpPageCreate(BaseCreateView):
model = TrpPage
form_class = TrpPageForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(TrpPageCreate, self).dispatch(*args, **kwargs)
class TrpPageUpdate(BaseUpdateView):
model = TrpPage
form_class = TrpPageForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(TrpPageUpdate, self).dispatch(*args, **kwargs)
class TrpPageDelete(DeleteView):
model = TrpPage
template_name = 'webpage/confirm_delete.html'
success_url = reverse_lazy('archiv:trppage_browse')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(TrpPageDelete, self).dispatch(*args, **kwargs)
class TrpTranscriptListView(GenericListView):
model = TrpTranscript
filter_class = TrpTranscriptListFilter
formhelper_class = TrpTranscriptFilterFormHelper
table_class = TrpTranscriptTable
init_columns = [
'id', 'id',
]
enable_merge = True
class TrpTranscriptDetailView(BaseDetailView):
model = TrpTranscript
template_name = 'browsing/generic_detail.html'
class TrpTranscriptCreate(BaseCreateView):
model = TrpTranscript
form_class = TrpTranscriptForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(TrpTranscriptCreate, self).dispatch(*args, **kwargs)
class TrpTranscriptUpdate(BaseUpdateView):
model = TrpTranscript
form_class = TrpTranscriptForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(TrpTranscriptUpdate, self).dispatch(*args, **kwargs)
class TrpTranscriptDelete(DeleteView):
model = TrpTranscript
template_name = 'webpage/confirm_delete.html'
success_url = reverse_lazy('archiv:trptranscript_browse')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(TrpTranscriptDelete, self).dispatch(*args, **kwargs)
| 1.90625 | 2 |
hero1/command_pb2.py | danielhwkim/Hero1 | 0 | 12794155 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: command.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\rcommand.proto\x12\tcommander\"U\n\x03\x43md\x12\x1f\n\x03\x63md\x18\x01 \x01(\x0e\x32\x12.commander.CmdType\x12\x0c\n\x04ints\x18\x02 \x03(\x05\x12\x0e\n\x06\x66loats\x18\x03 \x03(\x02\x12\x0f\n\x07strings\x18\x04 \x03(\t*\xe3\x01\n\x07\x43mdType\x12\x0e\n\nRAWKEYDOWN\x10\x00\x12\x0c\n\x08RAWKEYUP\x10\x01\x12\x0c\n\x08\x43OMMAND2\x10\x02\x12\x0c\n\x08\x43OMMAND3\x10\x03\x12\x0c\n\x08\x43OMMAND4\x10\x04\x12\x0c\n\x08\x43OMMAND5\x10\x05\x12\x0c\n\x08\x43OMMAND6\x10\x06\x12\x0c\n\x08\x43OMMAND7\x10\x07\x12\x0c\n\x08\x43OMMAND8\x10\x08\x12\x0c\n\x08\x43OMMAND9\x10\t\x12\x0f\n\x0bMAPORIGINAL\x10\n\x12\x07\n\x03MAP\x10\x0b\x12\x07\n\x03\x41\x43K\x10\x0c\x12\x08\n\x04\x41\x43K2\x10\r\x12\x08\n\x04HERO\x10\x0e\x12\t\n\x05READY\x10\x0f\x12\x08\n\x04INIT\x10\x10\x62\x06proto3')
_CMDTYPE = DESCRIPTOR.enum_types_by_name['CmdType']
CmdType = enum_type_wrapper.EnumTypeWrapper(_CMDTYPE)
RAWKEYDOWN = 0
RAWKEYUP = 1
COMMAND2 = 2
COMMAND3 = 3
COMMAND4 = 4
COMMAND5 = 5
COMMAND6 = 6
COMMAND7 = 7
COMMAND8 = 8
COMMAND9 = 9
MAPORIGINAL = 10
MAP = 11
ACK = 12
ACK2 = 13
HERO = 14
READY = 15
INIT = 16
_CMD = DESCRIPTOR.message_types_by_name['Cmd']
Cmd = _reflection.GeneratedProtocolMessageType('Cmd', (_message.Message,), {
'DESCRIPTOR' : _CMD,
'__module__' : 'command_pb2'
# @@protoc_insertion_point(class_scope:commander.Cmd)
})
_sym_db.RegisterMessage(Cmd)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_CMDTYPE._serialized_start=116
_CMDTYPE._serialized_end=343
_CMD._serialized_start=28
_CMD._serialized_end=113
# @@protoc_insertion_point(module_scope)
| 1.15625 | 1 |
hoki/load.py | HeloiseS/hoki | 40 | 12794156 | <reponame>HeloiseS/hoki
"""
This module implements the tools to easily load BPASS data.
"""
import pandas as pd
import hoki.hrdiagrams as hr
from hoki.constants import *
import os
import yaml
import io
import pickle
import pkg_resources
import hoki.data_compilers
import warnings
from hoki.utils.exceptions import HokiDeprecationWarning, HokiKeyError
# TODO: Should I allow people to chose to load the data into a numpy arrays as well or is the
# data frame good enough?
__all__ = ['model_input', 'model_output', 'set_models_path', 'unpickle']
data_path = pkg_resources.resource_filename('hoki', 'data')
########################
# GENERAL LOAD HELPERS #
########################
def unpickle(path):
"""Extract pickle files"""
assert os.path.isfile(path), 'File not found.'
return pickle.load(open(path, 'rb'))
# TODO: Deprecation warning
def set_models_path(path):
"""
Changes the path to the stellar models in hoki's settings
Parameters
----------
path : str,
Absolute path to the top level of the stellar models this could be a directory named something like
bpass-v2.2-newmodels and the next level down should contain 'NEWBINMODS' and 'NEWSINMODS'.
Notes
-----
You are going to have to reload hoki for your new path to take effect.
"""
deprecation_msg = "set_models_path has been moved to the hoki.constants module -- In future versions of hoki" \
"calling set_models_path from hoki.load will fail"
warnings.warn(deprecation_msg, HokiDeprecationWarning)
assert os.path.isdir(path), 'HOKI ERROR: The path provided does not correspond to a valid directory'
path_to_settings = data_path+'/settings.yaml'
with open(path_to_settings, 'r') as stream:
settings = yaml.safe_load(stream)
settings['models_path'] = path
with io.open(path_to_settings, 'w', encoding='utf8') as outfile:
yaml.dump(settings, outfile, default_flow_style=False, allow_unicode=True)
print('Looks like everything went well! You can check the path was correctly updated by looking at this file:'
'\n'+path_to_settings)
########################
# LOAD DUMMY VARIABLE #
########################
def dummy_to_dataframe(filename, bpass_version=DEFAULT_BPASS_VERSION):
"""Reads in dummy to df from a filename"""
inv_dict ={v: k for k, v in dummy_dicts[bpass_version].items()}
cols = [inv_dict[key] if key in inv_dict.keys() else 'Nan'+str(key) for key in range(96)]
dummy = pd.read_csv(filename, names=cols, sep=r"\s+", engine='python')
return dummy
#########################
# MODEL INPUT FUNCTIONS #
#########################
def model_input(path):
"""
Loads inputs from one file and put them in a dataframe
Parameters
----------
path : str
Path to the file containing the input data.
Returns
-------
"""
assert isinstance(path, str), "The location of the file is expected to be a string."
assert os.path.isfile(path), f"File {path} does not exist, or its path is incorrect."
lines = open(path).read().split("\n")
# rows [a,b,c,d] in the BPASS manual
row = [1, 0, 0, 0]
# All potential input parameters and filename
# If there is no corresponding value for a particular model, will append a NaN
filenames = []
modelimfs = []
modeltypes = []
mixedimf = []
mixedage = []
initialBH = []
initialP = []
# This goes through the file line by line. Using .split is not possible/convenient
# The vector will tell use what we should do with this line.
for l in lines[1:]:
# This line contains the filename.
if row[0]:
filenames.append(l)
# The next line will contain the imf probability and the type - we reset the vector..
row = [0, 1, 0, 0]
# ... and we skip the rest to read in the next line.
continue
# This line contains the imf probability and the type
elif row[1]:
elements = l.split() # we split the line into the 2 values
modelimfs.append(elements[0]) # and append them
modeltypes.append(elements[1])
# The next step is decided according to the value of type
# To know what each value means, consult the BPASS manual
if int(elements[1]) < 2:
# In this case all the other potential inputs are NaNs and we go back to the
# beginning to read a new file name
row = [1, 0, 0, 0]
mixedimf.append(0.0)
mixedage.append(0.0)
initialBH.append(np.nan)
initialP.append(np.nan)
continue
elif int(elements[1]) != 4:
# If type is 2 or 3, we know the initial BH and initial P will be NaN
# but we also need to read the next line so we set the vector accordingly.
row = [0, 0, 1, 0]
initialBH.append(np.nan)
initialP.append(np.nan)
continue
elif int(elements[1]) == 4:
# If the type is 4 we need all the other outputs, so we need the next 2 lines.
# We set the vector accordingly.
row = [0, 0, 1, 1]
continue
elif row[2]:
# Splitting the two values and putting them in their respective lists
elements = l.split()
mixedimf.append(elements[0])
mixedage.append(elements[1])
# Then depending on whether we need the next line for more inputs
# we set the vector to either go back to reading a filename or to probe those inputs.
if not row[3]:
row = [1, 0, 0, 0]
if row[3]:
row[2] = 0
continue
elif row[3]:
# This reads the last possible pair of inputs and puts them in their rightful lists.
elements = l.split()
initialBH.append(elements[0])
initialP.append(elements[0])
# We then reset the vector to be reading in filenames because no more inputs are coming
row = [1, 0, 0, 0]
# Once we've goe through the whole file and filled our lists, we can put them in a dataframe
# with some named columns and set the datatypes to strings and numbers.
input_df = pd.DataFrame.from_dict({'filenames': filenames[:-1], 'model_imf': modelimfs,
'types': modeltypes, 'mixed_imf': mixedimf,
'mixed_age': mixedage, 'initial_BH': initialBH,
'initial_P': initialP}).astype({'filenames': str,
'model_imf': float,
'types': int,
'mixed_imf': float,
'mixed_age': float,
'initial_BH': float,
'initial_P': float})
return input_df
##########################
# MODEL OUTPUT FUNCTIONS #
##########################
def model_output(path, hr_type=None):
"""
Loads a BPASS output file
Parameters
----------
path : str
Path to the file containing the target data.
hr_type : str, optional
Type of HR diagram to load: 'TL', 'Tg' or 'TTG'.
Returns
-------
Output Data : pandas.DataFrame or hoki.hrdiagrams.HRDiagrams object
"""
assert isinstance(path, str), "HOKI ERROR: The location of the file is expected to be a string."
assert os.path.isfile(path), "HOKI ERROR: This file does not exist, or its path is incorrect."
assert hr_type in [None,'TL', 'Tg', 'TTG'], "HOKI ERROR: The HR diagram type is invalid. " \
"Available options are: 'TL', 'Tg', 'TTG'. "
if "supernova" in path:
return _sn_rates(path)
elif "numbers" in path:
return _stellar_numbers(path)
elif "yields" in path:
return _yields(path)
elif "starmass" in path:
return _stellar_masses(path)
elif "spectra" in path:
return _sed(path)
elif "ioniz" in path:
return _ionizing_flux(path)
elif "colour" in path:
return _colours(path)
elif "hrs" in path and hr_type == 'TL':
return _hrTL(path)
elif "hrs" in path and hr_type == 'Tg':
return _hrTg(path)
elif "hrs" in path and hr_type == 'TTG':
return _hrTTG(path)
else:
print("HOKI ERROR -- Could not load the Stellar Population output. "
"\nDEBUGGING ASSISTANT:\n1) Is the filename correct?"
"\n2) Trying to load an HR diagram? "
"Make sure hr_type is set! Available options are: 'TL', 'Tg', 'TTG'. ")
def _sn_rates(path):
"""
Loads One Supernova rate file into a dataframe
"""
return pd.read_csv(path, sep=r"\s+",
names=['log_age', 'Ia', 'IIP', 'II', 'Ib', 'Ic', 'LGRB', 'PISNe', 'low_mass',
'e_Ia', 'e_IIP', 'e_II', 'e_Ib', 'e_Ic', 'e_LGRB', 'e_PISNe', 'e_low_mass',
'age_yrs'], engine='python')
def _stellar_numbers(path):
"""
Load One stellar type number file into a dataframe
"""
return pd.read_csv(path, sep=r"\s+",
names=['log_age', 'O_hL', 'Of_hL', 'B_hL', 'A_hL', 'YSG_hL',
'K_hL', 'M_hL', 'WNH_hL', 'WN_hL', 'WC_hL',
'O_lL', 'Of_lL', 'B_lL', 'A_lL', 'YSG_lL',
'K_lL', 'M_lL', 'WNH_lL', 'WN_lL', 'WC_lL'], engine='python')
def _yields(path):
"""
Load One yields file into a dataframe
"""
return pd.read_csv(path, sep=r"\s+",
names=['log_age', 'H_wind', 'He_wind', 'Z_wind', 'E_wind',
'E_sn', 'H_sn', 'He_sn', 'Z_sn'], engine='python')
def _stellar_masses(path):
"""
Load One stellar masses file into a dataframe
"""
return pd.read_csv(path, sep=r"\s+",
names=['log_age', 'stellar_mass', 'remnant_mass'], engine='python')
def _hrTL(path):
"""
Load HR diagrams (TL type)
"""
# 'a' is just a place order which contains the whole file in an array of shape (45900,100)
a = np.loadtxt(path)
return hr.HRDiagram(a[0:5100,:].reshape(51,100,100),
a[5100:10200,:].reshape(51,100,100),
a[10200:15300,:].reshape(51,100,100), hr_type='TL')
def _hrTg(path):
"""
Load One HR diagrams (Tg type)
"""
a = np.loadtxt(path)
return hr.HRDiagram(a[15300:20400,:].reshape(51,100,100),
a[20400:25500,:].reshape(51,100,100),
a[25500:30600,:].reshape(51,100,100), hr_type='Tg')
def _hrTTG(path):
"""
Load One HR diagrams (T/TG type)
"""
a = np.loadtxt(path)
return hr.HRDiagram(a[30600:35700,:].reshape(51,100,100),
a[35700:40800,:].reshape(51,100,100),
a[40800:,:].reshape(51,100,100), hr_type='TTG')
def _sed(path):
"""
Load One SED file
"""
return pd.read_csv(path, sep=r"\s+", engine='python',
names=['WL', '6.0', '6.1', '6.2', '6.3', '6.4', '6.5', '6.6', '6.7', '6.8',
'6.9', '7.0', '7.1', '7.2', '7.3', '7.4', '7.5', '7.6', '7.7', '7.8',
'7.9', '8.0', '8.1', '8.2', '8.3', '8.4', '8.5', '8.6', '8.7', '8.8',
'8.9', '9.0', '9.1', '9.2', '9.3', '9.4', '9.5', '9.6', '9.7', '9.8',
'9.9', '10.0', '10.1', '10.2', '10.3', '10.4', '10.5', '10.6', '10.7',
'10.8', '10.9', '11.0'])
def _ionizing_flux(path):
"""
Load One ionizing flux file
"""
return pd.read_csv(path, sep=r'\s+', engine='python',
names=['log_age', 'prod_rate', 'halpha', 'FUV', 'NUV'])
def _colours(path):
"""
Load One colour file
"""
return pd.read_csv(path, sep=r'\s+', engine='python',
names=['log_age', 'V-I', 'U', 'B', 'V', 'R', 'I', 'J', 'H', 'K', 'u',
'g', 'r', 'i', 'z', 'f300w', 'f336w', 'f435w', 'f450w', 'f555w',
'f606w', 'f814w', 'prod_rate', 'halpha', 'FUV', 'NUV'])
##########################
# NEBULAR EMISSION LINES #
##########################
def nebular_lines(path):
"""
Load the nebular line output information
Parameters
----------
path
Returns
-------
"""
assert isinstance(path, str), "HOKI ERROR: The location of the file is expected to be a string."
assert os.path.isfile(path), "HOKI: ERROR This file does not exist, or its path is incorrect."
if 'UV' in path:
return _UV_nebular_lines(path)
elif 'Optical' in path:
return _optical_nebular_lines(path)
def _optical_nebular_lines(path):
column_opt_em_lines=['model_num', 'logU', 'log_nH', 'log_age',
'NII6548_F', 'NII6548_EW', 'NII6584_F', 'NII6584_EW',
'SiII6716_F', 'SiII6716_EW', 'SiII6731_F', 'SiII6731_EW',
'OI6300_F', 'OI6300_EW',
'OIII4959_F','OIII4959_EW','OIII5007_F','OIII5007_EW',
'Halpha_F', 'Halpha_EW', 'Hbeta_F', 'Hbeta_EW',
'HeI4686_F', 'HeI4686_EW']
return pd.read_csv(path, skiprows=1, sep=r'\s+', engine='python', names=column_opt_em_lines)
def _UV_nebular_lines(path):
column_UV_em_lines = ['model_num', 'logU', 'log_nH', 'log_age',
'HeII1640_F', 'HeII1640_EW',
'CIII1907_F', 'CIII1907_EW', 'CIII1910_F', 'CIII1910_EW',
'CIV1548_F', 'CIV1548_EW', 'CIV1551_F', 'CIV1551_EW',
'OI1357_F', 'OI1357_EW',
'OIII1661_F', 'OIII1661_EW', 'OIII1666_F', 'OIII1666_EW',
'SiII1263_F', 'SiII1263_EW', 'SiIII1308_F', 'SiIII1308_EW', 'SiII1531_F', 'SiII1531_EW']
return pd.read_csv(path, skiprows=1, sep=r'\s+', engine='python', names=column_UV_em_lines)
#####################################
# BPASS Load over all metallicity #
#####################################
def rates_all_z(data_path, imf, binary=True):
"""
Loads the BPASS supernova event files.
Notes
-----
The rates are just read from file and not normalised.
Input
-----
data_path : `str`
The filepath to the folder containing the BPASS data
binary : `bool`
Use the binary files or just the single stars. Default=True
imf : `str`
BPASS Identifier of the IMF to be used.
The accepted IMF identifiers are:
- `"imf_chab100"`
- `"imf_chab300"`
- `"imf100_100"`
- `"imf100_300"`
- `"imf135_100"`
- `"imf135_300"`
- `"imfall_300"`
- `"imf170_100"`
- `"imf170_300"`
Returns
-------
`pandas.DataFrame` (51, (8,13)) (log_age, (event_types, metallicity)
A pandas MultiIndex dataframe containing the BPASS number of events
per metallicity per type.
Usage: rates.loc[log_age, (type, metallicity)]
Example: rates.loc[6.5, ("Ia", 0.02)]
Notes
-----
This dataframe has the following structure.
The index is the log_age as a float.
The column is a `pandas.MultiIndex` with the event types
(level=0, `float`) and the metallicity (level=1, `float`)
|Event Type | Ia | IIP | ... | PISNe | low_mass |
|Metallicity| 0.00001 | 0.00001 | ... | 0.04 | 0.04 |
| log_age |---------|----------|------|-------|----------|
| 6.0 |
| ... | Event Rate values
| 11.0 |
"""
# Check population type
star = "bin" if binary else "sin"
# Check if the given IMF is in the accepted IMFs
if imf not in BPASS_IMFS:
raise HokiKeyError(
f"{imf} is not a BPASS IMF. Please select a correct IMF.\n"\
"These can be found in the documentation of this function.")
# Create the output DataFrame
arrays = [BPASS_NUM_METALLICITIES, BPASS_EVENT_TYPES]
columns = pd.MultiIndex.from_product(
arrays, names=["Metallicicty", "Event Type"])
rates = pd.DataFrame(index=BPASS_TIME_BINS,
columns=columns,
dtype=np.float64)
rates.index.name = "log_age"
# load supernova count files
for num, metallicity in enumerate(BPASS_METALLICITIES):
data = model_output(
f"{data_path}/supernova-{star}-{imf}.{metallicity}.dat"
)
data = data.loc[:, slice(BPASS_EVENT_TYPES[0], BPASS_EVENT_TYPES[-1])]
rates.loc[:, (BPASS_NUM_METALLICITIES[num], slice(None))] = data.to_numpy()
# swap metallicity and event type
return rates.swaplevel(0, 1, axis=1)
def spectra_all_z(data_path, imf, binary=True):
"""
Load all BPASS spectra from files.
Notes
-----
The first time this function is ran on a folder it will generate a pickle
file containing all the BPASS spectra per metallicity for faster loading
in the future. It stores the file in the same folder with the name:
`all_spectra-[bin/sin]-[imf].pkl`
The spectra are just read from file and not normalised.
Input
-----
data_path : `str`
The path to the folder containing the BPASS spectra.
binary : `bool`
Use the binary files or just the single stars. Default=True
imf : `str`
BPASS Identifier of the IMF to be used.
The accepted IMF identifiers are:
- `"imf_chab100"`
- `"imf_chab300"`
- `"imf100_100"`
- `"imf100_300"`
- `"imf135_100"`
- `"imf135_300"`
- `"imfall_300"`
- `"imf170_100"`
- `"imf170_300"`
Returns
-------
spectra : `numpy.ndarray` (13, 51, 100000) [(metallicity, log_age, wavelength)]
A 3D numpy array containing all the BPASS spectra for a specific imf
and binary or single star population.
Usage: spectra[1][2][1000]
(gives L_\\odot for Z=0.0001 and log_age=6.2 at 999 Angstrom)
"""
# Check population type
star = "bin" if binary else "sin"
# check IMF key
if imf not in BPASS_IMFS:
raise HokiKeyError(
f"{imf} is not a BPASS IMF. Please select a correct IMF.")
# check if data_path is a string
if not isinstance(data_path, str):
raise HokiTypeError("The folder location is expected to be a string.")
# check if compiled file exists
if os.path.isfile(f"{data_path}/all_spectra-{star}-{imf}.npy"):
print("Loading precompiled file.")
spectra = np.load(f"{data_path}/all_spectra-{star}-{imf}.npy")
print("Done Loading.")
# Otherwise compile
else:
print("Compiled file not found. Data will be compiled")
spec = hoki.data_compilers.SpectraCompiler(
data_path, data_path, imf, binary=binary
)
spectra = spec.output
return spectra
def emissivities_all_z(data_path, imf, binary=True):
"""
Load all BPASS emissivities from files.
Notes
-----
The first time this function is run on a folder it will generate an npy
file containing all the BPASS emissivities for faster loading in the
future. It stores the file in the same folder with the name:
`all_ionizing-[bin/sin]-[imf].npy`.
The emissivities are just read from file and not normalised.
Input
-----
data_path : `str`
The path to the folder containing the BPASS files.
binary : `bool`
Use the binary files or just the single stars. Default=True
imf : `str`
BPASS Identifier of the IMF to be used.
The accepted IMF identifiers are:
- `"imf_chab100"`
- `"imf_chab300"`
- `"imf100_100"`
- `"imf100_300"`
- `"imf135_100"`
- `"imf135_300"`
- `"imfall_300"`
- `"imf170_100"`
- `"imf170_300"`
Returns
-------
emissivities : `numpy.ndarray` (N_Z, N_age, 4) [(metallicity, log_age, band)]
A 3D numpy array containing all the BPASS emissivities (Nion [1/s],
L_Halpha [ergs/s], L_FUV [ergs/s/A], L_NUV [ergs/s/A]) for a specific
imf and binary or single star population.
Usage: spectra[1][2][0]
(gives Nion for Z=0.0001 and log_age=6.2)
"""
# Check population type
star = "bin" if binary else "sin"
# check IMF key
if imf not in BPASS_IMFS:
raise HokiKeyError(
f"{imf} is not a BPASS IMF. Please select a correct IMF.")
# check if data_path is a string
if not isinstance(data_path, str):
raise HokiTypeError("The folder location is expected to be a string.")
# Check if compiled spectra are already present in data folder
if os.path.isfile(f"{data_path}/all_ionizing-{star}-{imf}.npy"):
print("Load precompiled file.")
emissivities = np.load(f"{data_path}/all_ionizing-{star}-{imf}.npy")
print("Done Loading.")
# Compile the spectra for faster reading next time otherwise
else:
print("Compiled file not found. Data will be compiled.")
res = hoki.data_compilers.EmissivityCompiler(
data_path, data_path, imf, binary=binary
)
emissivities = res.output
return emissivities
#################
# #
#################
def _do_not_use():
import webbrowser
url = "https://www.youtube.com/watch?v=dQw4w9WgXcQ"
webbrowser.open_new_tab(url)
| 2.078125 | 2 |
examples/go_to_joints_with_joint_impedances.py | iamlab-cmu/frankapy | 18 | 12794157 | import argparse
import sys
from frankapy import FrankaArm
from frankapy import FrankaConstants as FC
def wait_for_enter():
if sys.version_info[0] < 3:
raw_input('Press Enter to continue:')
else:
input('Press Enter to continue:')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--time', '-t', type=float, default=10)
parser.add_argument('--open_gripper', '-o', action='store_true')
args = parser.parse_args()
print('Starting robot')
fa = FrankaArm()
if args.open_gripper:
fa.open_gripper()
print('Be very careful!! Make sure the robot can safely move to HOME JOINTS Position.')
wait_for_enter()
fa.reset_joints()
print('Using default joint impedances to move back and forth.')
wait_for_enter()
fa.goto_joints(FC.READY_JOINTS, joint_impedances=FC.DEFAULT_JOINT_IMPEDANCES)
fa.goto_joints(FC.HOME_JOINTS)
print('Now using different joint impedances to move back and forth.')
wait_for_enter()
fa.goto_joints(FC.READY_JOINTS, joint_impedances=[1500, 1500, 1500, 1250, 1250, 1000, 1000])
fa.goto_joints(FC.HOME_JOINTS)
print('Remember to reset the joint_impedances to defaults.')
fa.goto_joints(FC.HOME_JOINTS, joint_impedances=FC.DEFAULT_JOINT_IMPEDANCES)
| 2.9375 | 3 |
ftp.py | benny-ojeda/python-ftp-client | 1 | 12794158 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
from ftplib import FTP
import ftplib
import argparse
import getpass
parser = argparse.ArgumentParser(description='FTP Client.')
parser.add_argument('ftp_server')
args = parser.parse_args()
def cli(prompt, reminder='Please type a valid command'):
command = [
'help',
'ls',
'pwd',
'cd',
'get',
'send',
'mkdir',
'rmdir',
'delete',
'size',
'debug',
'clear'
]
shell = True
while shell:
cmd = input(prompt)
try:
if cmd == command[0]:
all_commands = command[:]
all_commands.sort()
print(*all_commands[0:6], sep='\t')
print(*all_commands[6:12], sep='\t')
if cmd == command[1]:
ftp.dir()
if cmd == command[2]:
print(ftp.pwd())
if cmd.split(' ', 1)[0] == command[3] and len(cmd.split()) == 2:
dirname = cmd.split(' ', 1)[1]
print(ftp.cwd(dirname))
elif cmd.split(' ', 1)[0] == command[3] and len(cmd.split()) == 1:
print("E: Missing argument.")
if cmd.split(' ', 1)[0] == command[4] and len(cmd.split()) == 2:
filename = cmd.split(' ', 1)[1]
print(ftp.retrbinary("RETR " + filename, open(str(filename), 'wb').write))
elif cmd.split(' ', 1)[0] == command[4] and len(cmd.split()) == 1:
print("E: Missing argument.")
if cmd.split(' ', 1)[0] == command[5] and len(cmd.split()) == 2:
filename = cmd.split(' ', 1)[1]
print(ftp.storbinary("STOR " + filename, open(filename, 'rb'), callback=None))
elif cmd.split(' ', 1)[0] == command[5] and len(cmd.split()) == 1:
print("E: Missing argument.")
if cmd.split(' ', 1)[0] == command[6] and len(cmd.split()) == 2:
dirname = cmd.split(' ', 1)[1]
print(ftp.mkd(dirname))
elif cmd.split(' ', 1)[0] == command[6] and len(cmd.split()) == 1:
print("E: Missing argument.")
if cmd.split(' ', 1)[0] == command[7] and len(cmd.split()) == 2:
dirname = cmd.split(' ', 1)[1]
print(ftp.rmd(dirname))
elif cmd.split(' ', 1)[0] == command[7] and len(cmd.split()) == 1:
print("E: Missing argument.")
if cmd.split(' ', 1)[0] == command[8] and len(cmd.split()) == 2:
filename = cmd.split(' ', 1)[1]
print(ftp.delete(filename))
elif cmd.split(' ', 1)[0] == command[8] and len(cmd.split()) == 1:
print("E: Missing argument.")
if cmd.split(' ', 1)[0] == command[9] and len(cmd.split()) == 2:
filename = cmd.split(' ', 1)[1]
print(ftp.size(filename))
elif cmd.split(' ', 1)[0] == command[9] and len(cmd.split()) == 1:
print("E: Missing argument.")
if cmd.split(' ', 1)[0] == command[10] and len(cmd.split()) == 2:
level = cmd.split(' ', 1)[1]
print(ftp.set_debuglevel(int(level)))
elif cmd.split(' ', 1)[0] == command[10] and len(cmd.split()) == 1:
print("E: Missing argument.")
if cmd == command[11]:
os.system("clear")
if cmd in ('q', 'quit', 'exit', 'bye'):
print(ftp.quit())
shell = False
if len(cmd.split()) and cmd.split(' ', 1)[0] not in command and cmd.split(' ', 1)[0] not in ('q', 'quit', 'exit', 'bye'):
print(reminder)
except ftplib.all_errors as error:
print('FTP error: ', error)
if args.ftp_server:
try:
with FTP(args.ftp_server) as ftp:
username = input('Username (%s): ' % args.ftp_server )
password = <PASSWORD>(prompt='Password: ', stream=None)
ftp.login(user=username, passwd=password)
print(ftp.getwelcome())
cli('ftp> ')
except ftplib.all_errors as error:
print('FTP error: ', error) | 3.171875 | 3 |
synapse/tests/test_lib_filepath.py | larrycameron80/synapse | 0 | 12794159 | <reponame>larrycameron80/synapse
import io
import shutil
import tarfile
import zipfile
import tempfile
from synapse.tests.common import *
import synapse.exc as s_exc
import synapse.lib.filepath as s_filepath
class TestFilePath(SynTest):
def test_filepath_glob(self):
temp_dir = tempfile.mkdtemp()
os.mkdir(os.path.join(temp_dir, 'dir0'))
os.mkdir(os.path.join(temp_dir, 'dir1'))
os.mkdir(os.path.join(temp_dir, 'dir2'))
os.mkdir(os.path.join(temp_dir, 'fooD'))
os.makedirs(os.path.join(temp_dir, 'bazer', 'eir0', 'dir1'))
os.makedirs(os.path.join(temp_dir, 'bazer', 'eir0', 'dir2'))
f0 = b'A' * 20
f0_path = os.path.join(temp_dir, 'foo0')
with open(f0_path, 'wb') as fd:
fd.write(f0)
f1 = b'B' * 20
f1_path = os.path.join(temp_dir, 'foo1')
with open(f1_path, 'wb') as fd:
fd.write(f1)
f2 = b'C' * 20
f2_path = os.path.join(temp_dir, 'foo2')
with open(f2_path, 'wb') as fd:
fd.write(f2)
f3 = b'Z' * 20
f3_path = os.path.join(temp_dir, 'junk')
with open(f3_path, 'wb') as fd:
fd.write(f3)
# same files alpha/bravo
t1 = b'a' * 20
t_path = os.path.join(temp_dir, 'bazer', 'eir0', 'dir1', 'alpha')
with open(t_path, 'wb') as fd:
fd.write(t1)
t_path = os.path.join(temp_dir, 'bazer', 'eir0', 'dir1', 'bravo')
with open(t_path, 'wb') as fd:
fd.write(t1)
t_path = os.path.join(temp_dir, 'bazr')
with open(t_path, 'wb') as fd:
fd.write(t1)
# files that exists
path = os.path.join(temp_dir, 'foo*')
self.false(s_filepath.exists(path))
self.false(s_filepath.isfile(path))
self.false(s_filepath.isdir(path))
# dirs that exist
path = os.path.join(temp_dir, 'dir*')
self.false(s_filepath.exists(path))
self.false(s_filepath.isfile(path))
self.false(s_filepath.isdir(path))
# open a dir
fp = s_filepath.parsePath(temp_dir, 'dir0')
self.none(fp.open())
# multiple open regular files
path = os.path.join(temp_dir, 'foo*')
fd_ct = 0
f = [fd for fd in s_filepath.openfiles(path, mode='rb', req=False)]
for fd in f:
buf = fd.read()
self.eq(len(buf), 20)
self.isin(buf, [f0, f1, f2])
fd.close()
fd_ct += 1
self.eq(fd_ct, 3)
# multiple open on dir
path = os.path.join(temp_dir, 'dir*')
def diropen(path):
[f for f in s_filepath.openfiles(path, mode='rb', req=True)]
self.raises(s_exc.NoSuchPath, diropen, path)
path = os.path.join(temp_dir, 'dir*')
def diropen(path):
return [f for f in s_filepath.openfiles(path, mode='rb', req=False)]
self.eq([], diropen(path))
# multiple open on dne
path = os.path.join(temp_dir, 'dne*')
def diropen(path):
return [f for f in s_filepath.openfiles(path, mode='rb', req=True)]
self.raises(s_exc.NoSuchPath, diropen, path)
ret = [a for a in s_filepath.openfiles(None)]
self.eq([], ret)
# multiple open zip files
tzfd0 = open(os.path.join(temp_dir, 'baz.zip'), 'w')
tzfd1 = tempfile.NamedTemporaryFile()
ttfd0 = open(os.path.join(temp_dir, 'baz.tar'), 'w')
zfd0 = zipfile.ZipFile(tzfd0.name, 'w')
zfd0.writestr('dir0/dir1/dir2/foo0', f0)
zfd0.writestr('dir0/dir1/dir2/foo1', f1)
zfd0.writestr('dir0/dir1/dir2/foo2', f2)
zfd0.writestr('dir0/dir1/dir2/junk', 'Z' * 20)
zfd0.writestr('eir0/dir3/z1', t1)
zfd0.writestr('eir0/dir4/z2', t1)
zfd0.close()
tfd0 = tarfile.TarFile(ttfd0.name, 'w')
tfd0.add(f0_path, arcname='dir0/dir1/dir2/foo0')
tfd0.add(f1_path, arcname='dir0/dir1/dir2/foo1')
tfd0.add(f2_path, arcname='dir0/dir1/dir2/foo2')
tfd0.add(f3_path, arcname='dir0/dir1/dir2/junk')
tfd0.add(t_path, arcname='eir0/dir5/t1')
tfd0.add(t_path, arcname='eir0/dir6/t2')
tfd0.close()
zfd1 = zipfile.ZipFile(tzfd1.name, 'w')
zfd1.writestr('dir0/dir1/dir2/bar0', f0)
zfd1.writestr('dir0/dir1/dir2/bar1', f1)
zfd1.writestr('dir0/dir1/dir2/bar2', f2)
zfd1.writestr('dir0/dir1/dir2/junk', 'Z' * 20)
zfd1.write(tzfd0.name, arcname='ndir0/nested.zip')
zfd1.write(ttfd0.name, arcname='ndir0/nested.tar')
zfd1.close()
path = os.path.join(tzfd1.name, 'dir0/dir1/dir2/bar*')
count = 0
for fd in s_filepath.openfiles(path, mode='rb'):
buf = fd.read()
fd.seek(0)
self.eq(len(buf), 20)
self.eq(buf, fd.read())
self.isin(buf, [f0, f1, f2])
fd.close()
count += 1
self.eq(count, 3)
path = os.path.join(temp_dir, 'baz*', 'eir0', 'dir*', '*')
count = 0
for fd in s_filepath.openfiles(path, mode='rb'):
buf = fd.read()
self.eq(len(buf), 20)
self.eq(buf, t1)
fd.close()
count += 1
self.eq(count, 6)
tzfd0.close()
tzfd1.close()
ttfd0.close()
shutil.rmtree(temp_dir)
def test_filepath_regular(self):
temp_fd = tempfile.NamedTemporaryFile()
temp_dir = tempfile.mkdtemp()
fbuf = b'A' * 20
temp_fd.write(fbuf)
temp_fd.flush()
# file and dir that exist
self.true(s_filepath.exists(temp_fd.name))
self.true(s_filepath.exists(temp_dir))
self.true(s_filepath.exists('/'))
self.false(s_filepath.isfile(temp_dir))
self.false(s_filepath.isdir(temp_fd.name))
# DNE in a real directory
path = os.path.join(temp_dir, 'dne')
self.false(s_filepath.exists(path))
self.raises(s_exc.NoSuchPath, s_filepath._pathClass, path)
# open regular file
fd = s_filepath.openfile(temp_fd.name, mode='rb')
self.eq(fd.read(), fbuf)
fd.close()
# dne path
self.raises(s_exc.NoSuchPath, s_filepath.openfile, '%s%s' % (temp_fd.name, '_DNE'), mode='rb')
self.raises(s_exc.NoSuchPath, s_filepath.openfile, None)
self.raises(s_exc.NoSuchPath, s_filepath.openfile, '')
self.none(s_filepath.openfile(None, req=False))
# open a directory
self.none(s_filepath.openfile('/tmp', mode='rb', req=False))
self.none(s_filepath.openfile('/', req=False))
temp_fd.close()
shutil.rmtree(temp_dir)
def test_filepath_zip(self):
temp_fd = tempfile.NamedTemporaryFile()
nested_temp_fd = tempfile.NamedTemporaryFile()
zip_fd = zipfile.ZipFile(temp_fd.name, 'w')
zip_fd.writestr('foo', 'A' * 20)
zip_fd.writestr('dir0/bar', 'A' * 20)
zip_fd.writestr('dir0/dir1/dir2/baz', 'C' * 20)
zip_fd.close()
zip_fd = zipfile.ZipFile(nested_temp_fd.name, 'w')
zip_fd.writestr('aaa', 'A' * 20)
zip_fd.writestr('ndir0/bbb', 'A' * 20)
zip_fd.writestr('ndir0/ndir1/ndir2/ccc', 'C' * 20)
zip_fd.write(temp_fd.name, arcname='ndir0/nested.zip')
zip_fd.close()
# container is path
path = nested_temp_fd.name
self.true(s_filepath.exists(path))
self.true(s_filepath.isfile(path))
# base directory that exists
path = os.path.join(temp_fd.name, 'dir0')
self.true(s_filepath.exists(path))
self.true(s_filepath.isdir(path))
# container nested dir that exists
path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0')
self.true(s_filepath.exists(path))
self.true(s_filepath.isdir(path))
# container nested file that exists
path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0', 'bar')
self.true(s_filepath.exists(path))
self.true(s_filepath.isfile(path))
# container nested DNE path
path = os.path.join(nested_temp_fd.name, 'ndir0', 'nested.zip', 'dir0', 'dne')
self.false(s_filepath.exists(path))
self.false(s_filepath.isfile(path))
self.false(s_filepath.isdir(path))
# base file that exists
path = os.path.join(temp_fd.name, 'foo')
self.true(s_filepath.exists(path))
self.true(s_filepath.isfile(path))
# file that exists in a directory
path = os.path.join(temp_fd.name, 'dir0', 'bar')
self.true(s_filepath.exists(path))
self.true(s_filepath.isfile(path))
# nested dir that exists
path = os.path.join(temp_fd.name, 'dir0', 'dir1', 'dir2')
self.true(s_filepath.isdir(path))
# DNE in a real directory
path = os.path.join(temp_fd.name, 'dir0', 'dne')
self.false(s_filepath.exists(path))
self.false(s_filepath.isfile(path))
self.false(s_filepath.isdir(path))
# DNE base
path = os.path.join(temp_fd.name, 'dne')
self.false(s_filepath.exists(path))
self.false(s_filepath.isfile(path))
self.false(s_filepath.isdir(path))
temp_fd.close()
nested_temp_fd.close()
def test_filepath_zip_open(self):
temp_fd = tempfile.NamedTemporaryFile()
zip_fd = zipfile.ZipFile(temp_fd.name, 'w')
abuf = 'A' * 20
bbuf = b'A' * 20
zip_fd.writestr('dir0/foo', abuf)
fbuf2 = 'B' * 20
zip_fd.writestr('bar', fbuf2)
zip_fd.close()
# file that exists in a directory
path = os.path.join(temp_fd.name, 'dir0', 'foo')
self.true(s_filepath.isfile(path))
# open zip file
path = temp_fd.name
fd0 = s_filepath.openfile(path, mode='rb')
fd1 = open(path, mode='rb')
self.eq(fd0.read(), fd1.read())
fd0.close()
fd1.close()
# open inner zip file
path = os.path.join(temp_fd.name, 'dir0', 'foo')
fd = s_filepath.openfile(path, mode='r')
self.eq(fd.read(), bbuf)
fd.close()
temp_fd.close()
temp_fd.close()
def test_filepath_tar(self):
# container is path
path = getTestPath('nest2.tar')
self.true(s_filepath.exists(path))
self.true(s_filepath.isfile(path))
# base directory that exists
path = getTestPath('nest2.tar', 'nndir0')
self.true(s_filepath.exists(path))
self.true(s_filepath.isdir(path))
# file that exists in a tar
path = getTestPath('nest2.tar', 'nndir0', 'nnbar')
self.true(s_filepath.exists(path))
self.true(s_filepath.isfile(path))
# container nested base directory that exists
path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0')
self.true(s_filepath.exists(path))
self.true(s_filepath.isdir(path))
# container nested file that exists
path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'nfoo')
self.true(s_filepath.exists(path))
self.true(s_filepath.isfile(path))
# container nested file that exists
path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0', 'ndir1', 'nest0.zip', 'foo')
self.true(s_filepath.exists(path))
self.true(s_filepath.isfile(path))
# container nested path that DNE
path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar', 'ndir0', 'dne')
self.false(s_filepath.exists(path))
self.false(s_filepath.isdir(path))
self.false(s_filepath.isfile(path))
# DNE file in a real directory
path = getTestPath('nest2.tar', 'nndir0', 'dne')
self.false(s_filepath.exists(path))
self.false(s_filepath.isfile(path))
self.false(s_filepath.isdir(path))
# DNE base
path = getTestPath('nest2.tar', 'dne')
self.false(s_filepath.exists(path))
self.false(s_filepath.isfile(path))
self.false(s_filepath.isdir(path))
def test_filepath_tar_open(self):
# open tar file
path = getTestPath('nest2.tar')
fd = s_filepath.openfile(path, mode='rb')
fs_fd = open(getTestPath('nest2.tar'), 'rb')
self.eq(fd.read(), fs_fd.read())
fs_fd.close()
fd.close()
# open inner tar file
path = getTestPath('nest2.tar', 'nndir0', 'nndir1', 'nest1.tar')
fd = s_filepath.openfile(path, mode='rb')
fs_fd = open(getTestPath('nest1.tar'), 'rb')
self.eq(fd.read(), fs_fd.read())
fs_fd.close()
fd.close()
# open inner file
path = getTestPath('nest2.tar', 'nnfoo')
fd = s_filepath.openfile(path, mode='rb')
buf = b'A' * 20
buf += b'\n'
self.eq(fd.read(), buf)
fd.close()
| 2.421875 | 2 |
award/forms.py | Ras-Kwesi/ranker | 0 | 12794160 | from django import forms
from .models import *
class NewProject(forms.ModelForm):
class Meta:
model = Project
exclude = ['likes', 'profile',]
class EditProfile(forms.ModelForm):
class Meta:
model = Profile
exclude = []
fields = ['profilepic','bio','contact']
class EditUser(forms.ModelForm):
class Meta:
model = User
exclude = []
fields = ['first_name','last_name', 'email']
class NewVote(forms.ModelForm):
class Meta:
model = Vote
exclude = ['voter','project'] | 2.1875 | 2 |
gae/api/__init__.py | trevorhreed/gae-boilerplate | 0 | 12794161 | import core
class Test(core.Endpoint):
def get(self):
self.json({'what': 'now'}) | 1.90625 | 2 |
src/rpdk/core/jsonutils/utils.py | zjinmei/cloudformation-cli | 200 | 12794162 | import hashlib
import json
import logging
from collections.abc import Mapping, Sequence
from typing import Any, List, Tuple
from nested_lookup import nested_lookup
from ordered_set import OrderedSet
from .pointer import fragment_decode, fragment_encode
LOG = logging.getLogger(__name__)
NON_MERGABLE_KEYS = ("uniqueItems", "insertionOrder")
TYPE = "type"
REF = "$ref"
UNPACK_SEQUENCE_IDENTIFIER = "*"
class FlatteningError(Exception):
pass
def item_hash(
item,
): # assumption -> input is only json comparable type (dict/list/scalar)
"""MD5 hash for an item (Dictionary/Iterable/Scalar)"""
dhash = hashlib.md5() # nosec
if isinstance(item, dict):
item = {k: item_hash(v) for k, v in item.items()}
if isinstance(item, list):
item = [item_hash(i) for i in item].sort()
encoded = json.dumps(item, sort_keys=True).encode()
dhash.update(encoded)
return dhash.hexdigest()
def to_set(value: Any) -> OrderedSet:
return (
OrderedSet(value)
if isinstance(value, (list, OrderedSet))
else OrderedSet([value])
)
class ConstraintError(FlatteningError, ValueError):
def __init__(self, message, path, *args):
self.path = fragment_encode(path)
message = message.format(*args, path=self.path)
super().__init__(message)
class BaseRefPlaceholder:
"""A sentinel object representing a reference inside the base document."""
def __repr__(self):
"""Readable representation for debugging.
>>> repr(BaseRefPlaceholder())
'<BASE>'
"""
return "<BASE>"
#: The sentinel instance representing a reference inside the base document.
BASE = BaseRefPlaceholder()
def rewrite_ref(ref):
"""Rewrite a reference to be inside of the base document. A relative JSON
pointer is returned (in URI fragment identifier representation).
If the reference is already inside the base document (:const:`BASE`), the parts
are simply encoded into a pointer.
If the reference is outside of the base document, a unique pointer inside
the base document is made by namespacing the reference under the remote base
name inside the remote section.
>>> rewrite_ref((BASE, "foo", "bar"))
'#/foo/bar'
>>> rewrite_ref((BASE,))
'#'
>>> rewrite_ref(("remote", "foo", "bar"))
'#/remote/remote/foo/bar'
>>> rewrite_ref(("remote",))
'#/remote/remote'
"""
base, *parts = ref
if base is not BASE:
parts = ["remote", base] + parts
return fragment_encode(parts)
def traverse(document, path_parts):
"""Traverse the document according to the reference.
Since the document is presumed to be the reference's base, the base is
discarded. There is no validation that the reference is valid.
:raises ValueError, LookupError: the reference is invalid for this document
>>> traverse({"foo": {"bar": [42]}}, tuple())
({'foo': {'bar': [42]}}, (), None)
>>> traverse({"foo": {"bar": [42]}}, ["foo"])
({'bar': [42]}, ('foo',), {'foo': {'bar': [42]}})
>>> traverse({"foo": {"bar": [42]}}, ("foo", "bar"))
([42], ('foo', 'bar'), {'bar': [42]})
>>> traverse({"foo": {"bar": [42]}}, ("foo", "bar", "0"))
(42, ('foo', 'bar', 0), [42])
>>> traverse({}, ["foo"])
Traceback (most recent call last):
...
KeyError: 'foo'
>>> traverse([], ["foo"])
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'foo'
>>> traverse([], [0])
Traceback (most recent call last):
...
IndexError: list index out of range
"""
parent = None
path = []
for part in path_parts:
if isinstance(document, Sequence):
part = int(part)
parent = document
document = document[part]
path.append(part)
return document, tuple(path), parent
def _resolve_ref(sub_schema: dict, definitions: dict, last_step: bool = False):
# resolve $ref
ref = nested_lookup(REF, sub_schema) # should be safe (always single value)
# bc sub_schema is always per paranet property
# (taken from definitions)
if last_step and REF not in sub_schema: # dont traverse deeper than requested
# check if $ref is used directly ->
# means that we need to check definition
# otherwise it's an array and return subschema
return sub_schema
if ref:
# [0] should be a single $ref in subschema on the top level
# [-1] $ref must follow #/definitions/object
sub_schema = definitions[fragment_decode(ref[0])[-1]]
# resolve properties
properties = nested_lookup("properties", sub_schema)
if properties:
sub_schema = properties[0]
return sub_schema
# pylint: disable=C0301
def traverse_raw_schema(schema: dict, path: tuple):
"""Traverse the raw json schema resolving $ref
:raises TypeError: either schema is not of type dict
:raises ConstraintError: the schema tries to override "type" or "$ref"
>>> traverse_raw_schema({"properties": {"bar": [42]}}, tuple())
{'bar': [42]}
>>> traverse_raw_schema({"properties": {"bar": [42]}}, ("bar",))
[42]
>>> traverse_raw_schema({"definitions": {"bar": {"type": "boolean"}},"properties": {"bar": {"$ref": "#/definitions/bar"}}}, ("bar",))
{'type': 'boolean'}
>>> traverse_raw_schema({"definitions":{"b":[1],"f":{"properties":{"b":{"$ref":"#/definitions/b"}}}},"properties":{"f":{"$ref":"#/definitions/f"}}},("f", "b")) # noqa: B950
[1]
>>> traverse_raw_schema({}, ("foo"))
{}
>>> traverse_raw_schema([], ["foo"])
Traceback (most recent call last):
...
TypeError: Schema must be a dictionary
"""
if not isinstance(schema, Mapping):
raise TypeError("Schema must be a dictionary")
try:
properties = schema["properties"]
definitions = schema.get("definitions", {})
sub_properties = properties
last_step = (
len(path) - 1
) # get amount of steps to prevent deeper traversal than requested
for step in path:
sub_properties = _resolve_ref(
sub_properties[step],
definitions,
last_step=path.index(step) == last_step,
)
return sub_properties
except KeyError as e:
LOG.debug("Malformed Schema or incorrect path provided\n%s\n%s", path, e)
return {}
def traverse_path_for_sequence_members(
document: dict, path_parts: Sequence, path: list = None
) -> Tuple[List[object], List[tuple]]:
"""Traverse the paths for all sequence members in the document according to the reference.
Since the document is presumed to be the reference's base, the base is
discarded. There is no validation that the reference is valid.
Differing from traverse, this returns a list of documents and a list of resolved paths.
:parameter document: document to traverse (dict or list)
:parameter path_parts: document paths to traverse
:parameter path: traversed path so far
:raises ValueError, LookupError: the reference is invalid for this document
>>> traverse_path_for_sequence_members({"foo": {"bar": [42, 43, 44]}}, tuple())
([{'foo': {'bar': [42, 43, 44]}}], [()])
>>> traverse_path_for_sequence_members({"foo": {"bar": [42, 43, 44]}}, ["foo"])
([{'bar': [42, 43, 44]}], [('foo',)])
>>> traverse_path_for_sequence_members({"foo": {"bar": [42, 43, 44]}}, ("foo", "bar"))
([[42, 43, 44]], [('foo', 'bar')])
>>> traverse_path_for_sequence_members({"foo": {"bar": [42, 43, 44]}}, ("foo", "bar", "*"))
([42, 43, 44], [('foo', 'bar', 0), ('foo', 'bar', 1), ('foo', 'bar', 2)])
>>> traverse_path_for_sequence_members({"foo": {"bar": [{"baz": 1, "bin": 1}, {"baz": 2, "bin": 2}]}}, ("foo", "bar", "*"))
([{'baz': 1, 'bin': 1}, {'baz': 2, 'bin': 2}], [('foo', 'bar', 0), ('foo', 'bar', 1)])
>>> traverse_path_for_sequence_members({"foo": {"bar": [{"baz": 1, "bin": 1}, {"baz": 2, "bin": 2}]}}, ("foo", "bar", "*", "baz"))
([1, 2], [('foo', 'bar', 0, 'baz'), ('foo', 'bar', 1, 'baz')])
>>> traverse_path_for_sequence_members({}, ["foo"])
Traceback (most recent call last):
...
KeyError: 'foo'
>>> traverse_path_for_sequence_members([], ["foo"])
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'foo'
>>> traverse_path_for_sequence_members([], [0])
Traceback (most recent call last):
...
IndexError: list index out of range
"""
if path is None:
path = []
if not path_parts:
return [document], [tuple(path)]
path_parts = list(path_parts)
if not isinstance(document, Sequence):
return _handle_non_sequence_for_traverse(document, path_parts, path)
return _handle_sequence_for_traverse(document, path_parts, path)
def _handle_non_sequence_for_traverse(
current_document: dict, current_path_parts: list, current_path: list
) -> Tuple[List[object], List[tuple]]:
"""
Handling a non-sequence member for `traverse_path_for_sequence_members` is like the loop block in `traverse`:
The next path part is the first part in the list of path parts.
The new document is obtained from the current document using the new path part as the key.
The next path part is added to the traversed path.
The traversal continues by recursively calling `traverse_path_for_sequence_members`
"""
part_to_handle = current_path_parts.pop(0)
current_document = current_document[part_to_handle]
current_path.append(part_to_handle)
return traverse_path_for_sequence_members(
current_document, current_path_parts, current_path
)
def _handle_sequence_for_traverse(
current_document: Sequence, current_path_parts: list, current_path: list
) -> Tuple[List[object], List[tuple]]:
"""
Check the new path part for the unpack sequence identifier (e.g. '*'), otherwise traverse index and continue:
The new document is obtained from the current document (a sequence) using the new path part as the index.
The next path part is added to the traversed path
"""
sequence_part = current_path_parts.pop(0)
if sequence_part == UNPACK_SEQUENCE_IDENTIFIER:
return _handle_unpack_sequence_for_traverse(
current_document, current_path_parts, current_path
)
# otherwise, sequence part should be a valid index
current_sequence_part = int(sequence_part)
current_document = current_document[current_sequence_part]
current_path.append(current_sequence_part)
return [current_document], [tuple(current_path)]
def _handle_unpack_sequence_for_traverse(
current_document: Sequence, current_path_parts: list, current_path: list
) -> Tuple[List[object], List[tuple]]:
"""
When unpacking a sequence, we need to include multiple paths and multiple documents, one for each sequence member.
For each sequence member:
Append the traversed paths w/ the sequence index, and get the new document.
The new document is obtained by traversing the current document using the sequence index.
The new document is appended to the list of new documents.
For each new document:
The remaining document is traversed using the remaining path parts.
The list of traversed documents and traversed paths are returned.
"""
documents = []
resolved_paths = []
new_documents = []
new_paths = []
for sequence_index in range(len(current_document)):
new_paths.append(current_path.copy() + [sequence_index])
new_document = traverse_path_for_sequence_members(
current_document, [sequence_index] + current_path_parts, current_path.copy()
)[0]
new_documents.extend(new_document)
for i in range(len(new_documents)): # pylint: disable=consider-using-enumerate
new_document = new_documents[i]
newer_documents, newer_paths = traverse_path_for_sequence_members(
new_document, current_path_parts, new_paths[i]
)
documents.extend(newer_documents)
resolved_paths.extend(newer_paths)
return documents, resolved_paths
def schema_merge(target, src, path): # noqa: C901 # pylint: disable=R0912
"""Merges the src schema into the target schema in place.
If there are duplicate keys, src will overwrite target.
:raises TypeError: either schema is not of type dict
:raises ConstraintError: the schema tries to override "type" or "$ref"
>>> schema_merge({}, {}, ())
{}
>>> schema_merge({'foo': 'a'}, {}, ())
{'foo': 'a'}
>>> schema_merge({}, {'foo': 'a'}, ())
{'foo': 'a'}
>>> schema_merge({'foo': 'a'}, {'foo': 'b'}, ())
{'foo': 'b'}
>>> schema_merge({'required': 'a'}, {'required': 'b'}, ())
{'required': ['a', 'b']}
>>> a, b = {'$ref': 'a'}, {'foo': 'b'}
>>> schema_merge(a, b, ('foo',))
{'$ref': 'a', 'foo': 'b'}
>>> a, b = {'$ref': 'a'}, {'type': 'b'}
>>> schema_merge(a, b, ('foo',))
{'type': OrderedSet(['a', 'b'])}
>>> a, b = {'$ref': 'a'}, {'$ref': 'b'}
>>> schema_merge(a, b, ('foo',))
{'type': OrderedSet(['a', 'b'])}
>>> a, b = {'$ref': 'a'}, {'type': ['b', 'c']}
>>> schema_merge(a, b, ('foo',))
{'type': OrderedSet(['a', 'b', 'c'])}
>>> a, b = {'$ref': 'a'}, {'type': OrderedSet(['b', 'c'])}
>>> schema_merge(a, b, ('foo',))
{'type': OrderedSet(['a', 'b', 'c'])}
>>> a, b = {'type': ['a', 'b']}, {'$ref': 'c'}
>>> schema_merge(a, b, ('foo',))
{'type': OrderedSet(['a', 'b', 'c'])}
>>> a, b = {'type': OrderedSet(['a', 'b'])}, {'$ref': 'c'}
>>> schema_merge(a, b, ('foo',))
{'type': OrderedSet(['a', 'b', 'c'])}
>>> a, b = {'Foo': {'$ref': 'a'}}, {'Foo': {'type': 'b'}}
>>> schema_merge(a, b, ('foo',))
{'Foo': {'type': OrderedSet(['a', 'b'])}}
>>> schema_merge({'type': 'a'}, {'type': 'b'}, ()) # doctest: +NORMALIZE_WHITESPACE
{'type': OrderedSet(['a', 'b'])}
>>> schema_merge({'type': 'string'}, {'type': 'integer'}, ())
{'type': OrderedSet(['string', 'integer'])}
"""
if not (isinstance(target, Mapping) and isinstance(src, Mapping)):
raise TypeError("Both schemas must be dictionaries")
for key, src_schema in src.items():
try:
if key in (
REF,
TYPE,
): # $ref and type are treated similarly and unified
target_schema = target.get(key) or target.get(TYPE) or target[REF]
else:
target_schema = target[key] # carry over existing properties
except KeyError:
target[key] = src_schema
else:
next_path = path + (key,)
try:
target[key] = schema_merge(target_schema, src_schema, next_path)
except TypeError:
if key in (TYPE, REF): # combining multiple $ref and types
src_set = to_set(src_schema)
try:
target[TYPE] = to_set(
target[TYPE]
) # casting to ordered set as lib
# implicitly converts strings to sets
target[TYPE] |= src_set
except (TypeError, KeyError):
target_set = to_set(target_schema)
target[TYPE] = target_set | src_set
try:
# check if there are conflicting $ref and type
# at the same sub schema. Conflicting $ref could only
# happen on combiners because method merges two json
# objects without losing any previous info:
# e.g. "oneOf": [{"$ref": "..#1.."},{"$ref": "..#2.."}] ->
# { "ref": "..#1..", "type": [{},{}] }
target.pop(REF)
except KeyError:
pass
elif key == "required":
target[key] = sorted(set(target_schema) | set(src_schema))
else:
if key in NON_MERGABLE_KEYS and target_schema != src_schema:
msg = (
"Object at path '{path}' declared multiple values "
"for '{}': found '{}' and '{}'"
)
# pylint: disable=W0707
raise ConstraintError(msg, path, key, target_schema, src_schema)
target[key] = src_schema
return target
| 2.328125 | 2 |
tests/test_cli.py | jacebrowning/slackoff | 0 | 12794163 | # pylint: disable=redefined-outer-name,unused-variable,expression-not-assigned
import pytest
from click.testing import CliRunner
from expecter import expect
from slackoff.cli import main
@pytest.fixture
def runner():
return CliRunner()
def describe_cli():
def describe_signout():
def it_can_force_signin(runner):
result = runner.invoke(main, ["Foobar", "--signout"])
expect(result.exit_code) == 0
expect(result.output) == "Currently signed out of Foobar\n"
| 1.953125 | 2 |
main.py | albgus/fikabotten | 1 | 12794164 | <reponame>albgus/fikabotten<gh_stars>1-10
#!/usr/bin/env python3
import discord
import asyncio
import re
import time
import yaml
import os
import sys
from sqlalchemy import func, create_engine
from sqlalchemy.orm import sessionmaker
from db import Base,User,Server,Trigger
def load_config(configfile):
"""Return a dict with configuration from the supplied yaml file."""
try:
with open(configfile, 'r') as ymlfile:
try:
config = yaml.load(ymlfile)
except yaml.parser.ParserError:
print('Could not parse config file: %s' % configfile)
sys.exit(1)
except IOError:
print('Could not open config file: %s' % configfile)
sys.exit(1)
return config
configfile = 'config.yaml'
if os.getenv('FIKABOTTEN_CONFIG'):
configfile = os.getenv('FIKABOTTEN_CONFIG')
config = load_config(configfile)
# Setup database
engine = create_engine(config.get('database'), echo=True)
Session = sessionmaker(bind=engine)
session = Session()
Base.metadata.create_all(engine)
# Create client
client = discord.Client()
@client.event
@asyncio.coroutine
def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
@client.event
@asyncio.coroutine
def on_message(message):
# Stop handling messages from this bot immediately.
if message.author == client.user:
return
import json
print(json.dumps(message.raw_mentions))
print('Recived message')
user_id = message.author.id
server_id = message.server.id
print('UserID: {0}'.format(message.author.id))
if client.user in message.mentions and 'help' in message.content:
yield from client.send_typing(message.channel)
yield from asyncio.sleep(4)
yield from client.send_message(message.channel,
"""
Jag kommer att pinga folk på fikalistan när någon pingar mig med "fika".
`@fikabotten register` - registrera dig på fikalistan.
`@fikabotten unregister` - bli borttagen ifrån fikalistan.
`@fikabotten fika` - Trigga meddelandet till alla på fikalistan.
"""
)
elif client.user in message.mentions and 'unregister' in message.content:
print('[on_message] Removing client ID:{0} from ID:{1}'.format(user_id,
server_id))
u = session.query(User).filter(User.id==user_id).one_or_none()
if u is not None:
s = session.query(Server).filter(Server.id==server_id).one_or_none()
if s is not None:
s.users.remove(u)
session.commit()
yield from client.send_message(message.channel,
"Du är nu borttagen")
else:
print('[on_message] unregister - server dont exist. waaat')
else:
print('[on_message] unregister - user dont exist')
elif client.user in message.mentions and 'GTFO' in message.content:
print('[on_message] Removing client ID:{0} from everywhere')
u = session.query(User).filter(User.id==user_id).one_or_none()
if u is not None:
session.delete(u)
session.commit()
elif client.user in message.mentions and 'register' in message.content:
print('[on_message] Adding client ID:{0} on ID:{1}'.format(user_id, server_id))
u = session.query(User).filter(User.id==user_id).one_or_none()
if u is None:
u = User(id=user_id)
session.add(u)
print('Added user to database')
s = session.query(Server).filter(Server.id==server_id).one_or_none()
if s is None:
s = Server(id=server_id)
session.add(s)
print('Added server to database')
if not s in u.servers:
u.servers.append(s)
session.commit()
print('Added client to server')
yield from client.send_message(message.channel, 'Du kommer att bli'
+ ' pingad när det är fika på G.')
else:
print('But, your already registered in this server :o')
yield from client.send_message(message.channel, 'Du är redan tillagd ')
yield from asyncio.sleep(3)
yield from client.send_typing(message.channel)
yield from asyncio.sleep(1)
yield from client.send_message(message.channel, message.author.mention + ' n00b')
elif (client.user in message.mentions and
len(re.findall('fika', message.content,flags=re.I)) > 0):
print('[on_message] TIME TO GET SOME FIKA')
h = time.localtime().tm_hour
# Get server and user objects as u and s.
u = session.query(User).filter(User.id==user_id).one_or_none()
s = session.query(Server).filter(Server.id==server_id).one_or_none()
# If hen isn't, GTFO
if u is None:
return
# Do a check for odd hours of the day.
if h < 8 or h > 23:
yield from client.send_message(message.channel, message.author.mention + ' :middle_finger:')
return
elif h > 18:
yield from client.send_message(message.channel, message.author.mention + ' Lite sent för fika nu....')
return
#elif h == 10 or h == 11 or h == 12:
# yield from client.send_message(message.channel, message.author.mention + ' Fika? Det är ju lunch...')
# return
# BEGIN Anti-spam section
# Because people are generally assholes and will probably attempt to misuse the bot.
#
rate_1m = (
session.query(func.count('*'))
.select_from(Trigger)
.filter(Trigger.user_id==user_id)
.filter(Trigger.server_id==server_id)
.filter(Trigger.timestamp > time.time()-(60)) # 60s
.scalar()
)
rate_g_5m = (
session.query(func.count('*'))
.select_from(Trigger)
.filter(Trigger.server_id==server_id)
.filter(Trigger.timestamp > time.time()-(5*60)) # 5*60s
.scalar()
)
rate_5m = (
session.query(func.count('*'))
.select_from(Trigger)
.filter(Trigger.user_id==user_id)
.filter(Trigger.server_id==server_id)
.filter(Trigger.timestamp > time.time()-(5*60)) # 5*60 s
.scalar()
)
#rate_30m = (
# session.query(func.count('*'))
# .select_from(Trigger)
# .filter(Trigger.user_id==user_id)
# .filter(Trigger.server_id==server_id)
# .filter(Trigger.time > ()time.time()-(30*60)) # 5*60 s
# .scalar()
#)
rate_limit_bail = False
# RTL-1
if rate_g_5m >= 1:
print('RTL-1 - silent')
rate_limit_bail = True
# RTL-2
if rate_5m == 4:
print('RTL-2 - verbose')
rate_limit_bail = True
yield from client.send_message(message.channel,
message.author.mention +
' Förhelvete...!')
if rate_1m == 7:
print('RTL-3 - GTFO')
rate_limit_bail = True
yield from client.send_message(message.channel,
message.author.mention +
' :middle_finger:')
session.add(Trigger(
user_id=message.author.id,
server_id=server_id,
timestamp=int(time.time())
))
# Gotta commit those changes to the DB.
session.commit()
if rate_limit_bail:
return
# END Anti-spam section
#
# Okej, ready for action. Vi har serverobjektet.
if s is not None:
fikare_db = s.users # En list med alla användare relaterade till servern.
fikare_mentions = ""
for fikare in fikare_db: #loopa över listan
fikare_mentions += '<@{0.id}> '.format(fikare) # Lägg till mentions till en lång sträng.
yield from client.send_message(message.channel, fikare_mentions +
"FIKA!!") #Skrik.
(
session.query(Trigger)
.filter(Trigger.timestamp < int(time.time()-30*60))
.delete()
)
elif len(re.findall('fika', message.content, flags=re.I)) > 0:
print('[on_message] DEBUG: fika matched, but no trigger.')
print('------')
client.run(config.get('token'))
| 2.125 | 2 |
buffer/shard-cpp-test/common/inner_reuse/out_parser.py | zaqwes8811/coordinator-tasks | 0 | 12794165 | # coding: utf-8
import re
from inner_reuse import chunks
class Ptr(object):
"""
Contain operation data
"""
def __init__(self, pos, type_key):
self.position = pos
self.type_response = type_key
self.name_test = None
self.out = ""
def __repr__(self):
return str(dict((k, v) for k, v in self.__dict__.items() if k != 'position'))
def get_data(self):
"""
Returns:
key, value
"""
return self.type_response, (self.name_test, self.out)
def split_test_response(self, content_value):
"""
Returns:
(name, out)
"""
end_name_pos = content_value.find('\n')
if end_name_pos != -1:
self.name_test = content_value[:end_name_pos]
# Почему-то если в строке выше, то влияет на все строку, а не только на срезанную
self.name_test = self.name_test.replace('\r', '')
self.out = content_value[end_name_pos:].strip()
else:
self.name_test = content_value
def parser_out(out):
"""
Returns:
[(name, ok/fail/deadlock, out_one), ..., ]
"""
OK = "\[ OK \]"
RUN = "\[ RUN \]"
FAILED = "\[ FAILED \]"
template = []
template.extend([Ptr(a.start(), 'ok') for a in list(re.finditer(OK, out))])
template.extend([Ptr(a.start(), 'fail') for a in list(re.finditer(FAILED, out))])
template.extend([Ptr(a.end(), 'run') for a in list(re.finditer(RUN, out))])
how_split = 2 # По парам
response_pairs = chunks(sorted(template, key=lambda record: record.position), how_split)
result = []
for pair in response_pairs:
head = pair[0] # с ключем run идет вервой
if head.type_response == 'run':
if len(pair) == 1:
pair.append(Ptr(out.__len__(), 'deadlock'))
bottom = pair[1]
content = out[head.position:bottom.position].strip()
bottom.split_test_response(content)
yield bottom.get_data()
def parse_out_not_gen(out):
gen = parser_out(out)
report = []
for i in gen:
report.append(i)
return report
| 2.703125 | 3 |
ml/scratch.py | Shivams9/pythoncodecamp | 6 | 12794166 | import numpy
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
x = [1,2,3,4]
x=numpy.array(x)
print(x.shape)
x=x.reshape(2,-1)
print(x.shape)
print(x)
x=x.reshape(-1)
print(x.shape)
print(x)
y = [2,4,6,8]
#x*2=[2,4,6,8]
#x*x=[1,4,9,16]
#sum(x) = 10
pf=numpy.polyfit(x,y,3)
print(pf)
print(type(pf))
model = numpy.poly1d(pf)
drv=model.deriv()
print(model([1,2,3,4]))
print(type(drv))
print(model)
print(drv)
coeff=r2_score(y, model(x))
print(coeff)
| 3.1875 | 3 |
Day 52/PolygonPossibilty.py | sandeep-krishna/100DaysOfCode | 0 | 12794167 | '''
You are given length of n sides, you need to answer whether it is possible to make n sided convex polygon with it.
Input : -
First line contains ,no .of testcases.
For each test case , first line consist of single integer ,second line consist of spaced integers, size of each side.
Output : -
For each test case print "Yes" if it is possible to make polygon or else "No" if it is not possible.
SAMPLE INPUT
2
3
4 3 2
4
1 2 1 4
SAMPLE OUTPUT
Yes
No
'''
for _ in range(int(input())):
n=int(input())
arr=list(map(int,input().split()))
total=sum(arr)
flag=1
for i in arr:
if total-i <= i:
flag=0
break
print ("Yes" if flag==1 else "No") | 4.09375 | 4 |
util/ConfigUtils.py | cclauss/ph0neutria | 0 | 12794168 | #!/usr/bin/python
from ConfigParser import SafeConfigParser
import os
import string
class baseObj:
def __init__(self, multiProcess, userAgent, outputFolderName, outputFolder, deleteOutput, dateFormat, useTor, torIP, torPort, redirectLimit, hashCountLimit, urlCharLimit, osintDays, malShareApiKey, disableMalShare, disableOsint, otxKey, shodanKey, vtKey, vtUser, vtReqPerMin, disableVT, viperUrlAdd, viperApiToken):
self.multiProcess = multiProcess
self.userAgent = userAgent
self.outputFolderName = outputFolderName
self.outputFolder = outputFolder
self.deleteOutput = deleteOutput
self.dateFormat = dateFormat
self.useTor = useTor
self.torIP = torIP
self.torPort = torPort
self.redirectLimit = redirectLimit
self.hashCountLimit = hashCountLimit
self.urlCharLimit = urlCharLimit
self.osintDays = osintDays
self.malShareApiKey = malShareApiKey
self.disableMalShare = disableMalShare
self.disableOsint = disableOsint
self.otxKey = otxKey
self.shodanKey = shodanKey
self.vtKey = vtKey
self.vtUser = vtUser
self.vtReqPerMin = vtReqPerMin
self.disableVT = disableVT
self.viperUrlAdd = viperUrlAdd
self.viperApiToken = 'Token {0}'.format(viperApiToken)
def getBaseConfig(rootDir):
parser = SafeConfigParser()
parser.read(os.path.join(rootDir, 'config', 'settings.conf'))
multiProcess = parser.get('Core', 'multiprocess')
userAgent = parser.get('Core', 'useragent')
outputFolderName = parser.get('Core', 'outputfolder')
outputFolder = os.path.join(rootDir, outputFolderName)
deleteOutput = parser.get('Core', 'deleteoutput')
dateFormat = parser.get('Core', 'dateformat')
useTor = parser.get('Core', 'usetor')
torIP = parser.get('Core', 'torip')
torPort = parser.get('Core', 'torport')
redirectLimit = parser.get('Core', 'redirectlimit')
hashCountLimit = parser.get('Core', 'hashcountlimit')
urlCharLimit = parser.get('Core', 'urlcharlimit')
osintDays = parser.get('Core', 'osintdays')
malShareApiKey = parser.get('MalShare', 'apikey')
disableMalShare = parser.get('MalShare', 'disable')
disableOsint = parser.get('OSINT', 'disable')
otxKey = parser.get('OTX', 'apikey')
shodanKey = parser.get('Shodan', 'apikey')
vtKey = parser.get('VirusTotal', 'apikey')
vtUser = parser.get('VirusTotal', 'username')
vtReqPerMin = parser.get('VirusTotal', 'requestsperminute')
disableVT = parser.get('VirusTotal', 'disable')
viperUrlAdd = parser.get('Viper', 'addurl')
viperApiToken = parser.get('Viper', 'apitoken')
return baseObj(multiProcess, userAgent, outputFolderName, outputFolder, deleteOutput, dateFormat, useTor, torIP, torPort, redirectLimit, hashCountLimit, urlCharLimit, osintDays, malShareApiKey, disableMalShare, disableOsint, otxKey, shodanKey, vtKey, vtUser, vtReqPerMin, disableVT, viperUrlAdd, viperApiToken)
| 1.945313 | 2 |
dragonfire/conversational/corpus/ubuntudata.py | kameranis/Dragonfire | 1 | 12794169 | <gh_stars>1-10
import os
from tqdm import tqdm
"""
Ubuntu Dialogue Corpus
http://arxiv.org/abs/1506.08909
"""
class UbuntuData:
"""
"""
def __init__(self, dirName):
"""
Args:
dirName (string): directory where to load the corpus
"""
self.MAX_NUMBER_SUBDIR = 10
self.conversations = []
__dir = os.path.join(dirName, "dialogs")
number_subdir = 0
for sub in tqdm(os.scandir(__dir), desc="Ubuntu dialogs subfolders", total=len(os.listdir(__dir))):
if number_subdir == self.MAX_NUMBER_SUBDIR:
print("WARNING: Early stoping, only extracting {} directories".format(self.MAX_NUMBER_SUBDIR))
return
if sub.is_dir():
number_subdir += 1
for f in os.scandir(sub.path):
if f.name.endswith(".tsv"):
self.conversations.append({"lines": self.loadLines(f.path)})
def loadLines(self, fileName):
"""
Args:
fileName (str): file to load
Return:
list<dict<str>>: the extracted fields for each line
"""
lines = []
with open(fileName, 'r') as f:
for line in f:
l = line[line.rindex("\t")+1:].strip() # Strip metadata (timestamps, speaker names)
lines.append({"text": l})
return lines
def getConversations(self):
return self.conversations
| 2.6875 | 3 |
yacluster.py | KrzysiekJ/yacluster | 4 | 12794170 | <filename>yacluster.py
from functools import partial, reduce
from itertools import chain, product
from math import sqrt
def distance(coords_1, coords_2):
return sqrt(pow(coords_1[0] - coords_2[0], 2) + pow(coords_1[1] - coords_2[1], 2))
def get_grid_cell(x, y, threshold):
return (int(x // threshold), int(y // threshold))
def get_nearby_grid_cells(grid_cell):
grid_x, grid_y = grid_cell
return [(i, j) for i, j in product(range(grid_x - 1, grid_x + 2), range(grid_y - 1, grid_y + 2))]
def cluster_iter(clustered, point, threshold):
"""Add a point to a grid-like cluster structure.
This allows comparing point distances only to clusters from nearby grids, not to all clusters. Useful when there are many clusters expected."""
coords, object_ = point
point_grid_cell = get_grid_cell(*coords, threshold=threshold)
nearby_grid_cells = get_nearby_grid_cells(point_grid_cell)
possible_nearby_cluster_locations = chain(
*[(location for location in clustered.get(grid_cell, {})) for grid_cell in nearby_grid_cells]
)
def nearest_location(champion_with_distance, pretender, coords=coords, threshold=threshold):
pretender_distance = distance(coords, pretender)
if pretender_distance < threshold:
if champion_with_distance and champion_with_distance[1] <= pretender_distance:
return champion_with_distance
else:
return (pretender, pretender_distance)
else:
return champion_with_distance
nearest_cluster_with_distance = reduce(nearest_location, possible_nearby_cluster_locations, None)
if nearest_cluster_with_distance:
nearest_cluster_location, _nearest_cluster_distance = nearest_cluster_with_distance
else:
nearest_cluster_location = None
if nearest_cluster_location:
cluster_grid_cell = get_grid_cell(*nearest_cluster_location, threshold=threshold)
cluster = clustered[cluster_grid_cell].pop(nearest_cluster_location)
cluster_object_count = len(cluster)
new_cluster_location = (
(nearest_cluster_location[0] * cluster_object_count + coords[0]) / (cluster_object_count + 1),
(nearest_cluster_location[1] * cluster_object_count + coords[1]) / (cluster_object_count + 1),
)
else:
cluster = []
new_cluster_location = coords
cluster.append(point)
new_cluster_grid_cell = get_grid_cell(*new_cluster_location, threshold=threshold)
clustered.setdefault(new_cluster_grid_cell, {})
clustered[new_cluster_grid_cell][new_cluster_location] = cluster
return clustered
def cluster(points, threshold):
"""Cluster points using distance-based clustering algorithm.
Arguments:
points — an iterable of two-element point tuples, each containing:
• a two-element tuple with X and Y coordinates,
• the actual object being clustered;
threshold — if a point is included into a cluster, it must be closer to its centroid than this value.
Return value:
an iterable of two-element cluster tuples, each containing:
• a two-element tuple with X and Y coordinates of the cluster centroid;
• a list of objects belonging to the cluster.
Cluster’s centroid is defined as average coordinates of the cluster’s members.
"""
cluster_iter_for_threshold = partial(cluster_iter, threshold=threshold)
clustered = reduce(cluster_iter_for_threshold, points, {})
return chain(
*[((location, [object_ for coords, object_ in points]) for location, points in grid_clusters.items())
for grid_clusters in clustered.values()]
)
| 3.265625 | 3 |
mock_interview/spring_mock_at_make_school.py | Sukhrobjon/leetcode | 0 | 12794171 | <filename>mock_interview/spring_mock_at_make_school.py<gh_stars>0
"""Problem 2: Given an array of integers, replace each element
of the array with product of every other element in the array
without using the division operator.
Example input 1:
[1, 2, 3, 4, 5] = > should return output: [120, 60, 40, 30, 24]
Example input 2:
[5, 3, 4, 2, 6, 8] = > should return output: [1152, 1920, 1440, 2880, 960, 720]
"""
# slicing the array arr[0:self-1]+arr[self:]
# start two pointers at the current num we want to ignore and expand the pointer to the sides
def product_no_self(numbers):
start = 0
end = 0
index = 0
pro = 1
output = []
# bound check
while index < len(numbers): # [1, 2, 3, 4, 5] O(n)
curr = numbers[index]
start = index - 1 # 2, 1
end = index + 1 # 4, 5
pro = 1
while start > -1 or end < len(numbers): # O(n)
if start > -1:
pro *= numbers[start]
start -= 1
if end < len(numbers):
pro *= numbers[end]
end += 1
output.append(pro)
index += 1
return output
numbers = [1, 2, 3, 4, 5]
# space is O(n)
# run time is O(n^2)
print(product_no_self(numbers))
| 3.84375 | 4 |
tests/test_cli_config.py | Dallinger/Dallinger | 100 | 12794172 | <reponame>Dallinger/Dallinger
from pathlib import Path
import json
import mock
import pytest
import shutil
import tempfile
def test_list_hosts_empty():
from dallinger.command_line.config import get_configured_hosts
assert get_configured_hosts() == {}
def test_list_hosts_results(tmp_appdir):
from dallinger.command_line.config import get_configured_hosts
(Path(tmp_appdir) / "hosts").mkdir()
host1 = dict(user="test_user_1", host="test_host_1")
host2 = dict(user="test_user_2", host="test_host_2")
(Path(tmp_appdir) / "hosts" / "test_host_1").write_text(json.dumps(host1))
(Path(tmp_appdir) / "hosts" / "test_host_2").write_text(json.dumps(host2))
assert get_configured_hosts() == {"test_host_1": host1, "test_host_2": host2}
def test_store_host():
from dallinger.command_line.config import get_configured_hosts
from dallinger.command_line.config import store_host
host1 = dict(user="test_user_1", host="test_host_1")
host2 = dict(user="test_user_2", host="test_host_2")
store_host(host1), store_host(host2)
assert get_configured_hosts() == {"test_host_1": host1, "test_host_2": host2}
@pytest.fixture(autouse=True)
def tmp_appdir():
"""Monkey patch appdirs to provede a pristine dirctory to each test"""
tmp_dir = tempfile.mkdtemp()
with mock.patch("dallinger.command_line.config.APPDIRS") as mock_appdirs:
mock_appdirs.user_data_dir = tmp_dir
yield tmp_dir
shutil.rmtree(tmp_dir)
| 2.1875 | 2 |
energy/evaluation.py | pminervini/DeepKGC | 5 | 12794173 | <reponame>pminervini/DeepKGC
# -*- coding: utf-8 -*-
import numpy as np
import theano
import theano.tensor as T
import logging
from sklearn import metrics
from sparse.learning import parse_embeddings
def auc_pr(predictions=[], labels=[]):
'''Computes the Area Under the Precision-Recall Curve (AUC-PR)'''
predictions, labels = np.asarray(predictions), np.asarray(labels)
precision, recall, threshold = metrics.precision_recall_curve(labels, predictions)
auc = metrics.auc(recall, precision)
return auc
def auc_roc(predictions=[], labels=[]):
'''Computes the Area Under the Receiver Operating Characteristic Curve (AUC-ROC)'''
predictions, labels = np.asarray(predictions), np.asarray(labels)
precision, recall, threshold = metrics.roc_curve(labels, predictions)
auc = metrics.auc(recall, precision)
return auc
#
# COMPUTING PERFORMANCE METRICS ON RANKINGS
#
#
# Evaluation summary (as in FB15k):
#
def ranking_summary(res, idxo=None, n=10, tag='raw'):
resg = res[0] + res[1]
dres = {}
dres.update({'microlmean': np.mean(res[0])})
dres.update({'microlmedian': np.median(res[0])})
dres.update({'microlhits@n': np.mean(np.asarray(res[0]) <= n) * 100})
dres.update({'micrormean': np.mean(res[1])})
dres.update({'micrormedian': np.median(res[1])})
dres.update({'microrhits@n': np.mean(np.asarray(res[1]) <= n) * 100})
resg = res[0] + res[1]
dres.update({'microgmean': np.mean(resg)})
dres.update({'microgmedian': np.median(resg)})
dres.update({'microghits@n': np.mean(np.asarray(resg) <= n) * 100})
logging.info('### MICRO (%s):' % (tag))
logging.info('\t-- left >> mean: %s, median: %s, hits@%s: %s%%' % (
round(dres['microlmean'], 5), round(dres['microlmedian'], 5),
n, round(dres['microlhits@n'], 3)))
logging.info('\t-- right >> mean: %s, median: %s, hits@%s: %s%%' % (
round(dres['micrormean'], 5), round(dres['micrormedian'], 5),
n, round(dres['microrhits@n'], 3)))
logging.info('\t-- global >> mean: %s, median: %s, hits@%s: %s%%' % (
round(dres['microgmean'], 5), round(dres['microgmedian'], 5),
n, round(dres['microghits@n'], 3)))
if idxo is not None:
listrel = set(idxo)
dictrelres = {}
dictrellmean = {}
dictrelrmean = {}
dictrelgmean = {}
dictrellmedian = {}
dictrelrmedian = {}
dictrelgmedian = {}
dictrellrn = {}
dictrelrrn = {}
dictrelgrn = {}
for i in listrel:
dictrelres.update({i: [[], []]})
for i, j in enumerate(res[0]):
dictrelres[idxo[i]][0] += [j]
for i, j in enumerate(res[1]):
dictrelres[idxo[i]][1] += [j]
for i in listrel:
dictrellmean[i] = np.mean(dictrelres[i][0])
dictrelrmean[i] = np.mean(dictrelres[i][1])
dictrelgmean[i] = np.mean(dictrelres[i][0] + dictrelres[i][1])
dictrellmedian[i] = np.median(dictrelres[i][0])
dictrelrmedian[i] = np.median(dictrelres[i][1])
dictrelgmedian[i] = np.median(dictrelres[i][0] + dictrelres[i][1])
dictrellrn[i] = np.mean(np.asarray(dictrelres[i][0]) <= n) * 100
dictrelrrn[i] = np.mean(np.asarray(dictrelres[i][1]) <= n) * 100
dictrelgrn[i] = np.mean(np.asarray(dictrelres[i][0] + dictrelres[i][1]) <= n) * 100
dres.update({'dictrelres': dictrelres})
dres.update({'dictrellmean': dictrellmean})
dres.update({'dictrelrmean': dictrelrmean})
dres.update({'dictrelgmean': dictrelgmean})
dres.update({'dictrellmedian': dictrellmedian})
dres.update({'dictrelrmedian': dictrelrmedian})
dres.update({'dictrelgmedian': dictrelgmedian})
dres.update({'dictrellrn': dictrellrn})
dres.update({'dictrelrrn': dictrelrrn})
dres.update({'dictrelgrn': dictrelgrn})
dres.update({'macrolmean': np.mean(dictrellmean.values())})
dres.update({'macrolmedian': np.mean(dictrellmedian.values())})
dres.update({'macrolhits@n': np.mean(dictrellrn.values())})
dres.update({'macrormean': np.mean(dictrelrmean.values())})
dres.update({'macrormedian': np.mean(dictrelrmedian.values())})
dres.update({'macrorhits@n': np.mean(dictrelrrn.values())})
dres.update({'macrogmean': np.mean(dictrelgmean.values())})
dres.update({'macrogmedian': np.mean(dictrelgmedian.values())})
dres.update({'macroghits@n': np.mean(dictrelgrn.values())})
logging.info('### MACRO (%s):' % (tag))
logging.info('\t-- left >> mean: %s, median: %s, hits@%s: %s%%' % (
round(dres['macrolmean'], 5), round(dres['macrolmedian'], 5),
n, round(dres['macrolhits@n'], 3)))
logging.info('\t-- right >> mean: %s, median: %s, hits@%s: %s%%' % (
round(dres['macrormean'], 5), round(dres['macrormedian'], 5),
n, round(dres['macrorhits@n'], 3)))
logging.info('\t-- global >> mean: %s, median: %s, hits@%s: %s%%' % (
round(dres['macrogmean'], 5), round(dres['macrogmedian'], 5),
n, round(dres['macroghits@n'], 3)))
return dres
#
# RANKING FUNCTIONS
#
def RankRightFnIdx(fnsim, embeddings, leftop, rightop, subtensorspec=None):
"""
This function returns a Theano function to measure the similarity score of
all 'right' entities given couples of relation and 'left' entities (as
index values).
:param fnsim: similarity function (on Theano variables).
:param embeddings: an Embeddings instance.
:param leftop: class for the 'left' operator.
:param rightop: class for the 'right' operator.
:param subtensorspec: only measure the similarity score for the entities
corresponding to the first subtensorspec (int)
entities of the embedding matrix (default None: all
entities).
"""
embedding, relationl, relationr = parse_embeddings(embeddings)
# Inputs
idxl, idxo = T.iscalar('idxl'), T.iscalar('idxo')
# Graph
lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) # lhs: 1xD vector containing the embedding of idxl
if subtensorspec is not None:
# We compute the score only for a subset of entities
rhs = (embedding.E[:, :subtensorspec]).T
else:
rhs = embedding.E.T # rhs: NxD embedding matrix
rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) # rell: 1xD vector containing the embedding of idxo (relationl)
relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) # relr: 1xD vector containing the embedding of idxo (relationr)
tmp = leftop(lhs, rell) # a = rell(lhs)
# b = relr(rhs)
simi = fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr)) # simi = fnsim(a, b)
"""
Theano function inputs.
:input idxl: index value of the 'left' member.
:input idxo: index value of the relation member.
Theano function output.
:output simi: vector of score values.
"""
return theano.function([idxl, idxo], [simi], on_unused_input='ignore')
def RankLeftFnIdx(fnsim, embeddings, leftop, rightop, subtensorspec=None):
"""
This function returns a Theano function to measure the similarity score of
all 'left' entities given couples of relation and 'right' entities (as
index values).
:param fnsim: similarity function (on Theano variables).
:param embeddings: an Embeddings instance.
:param leftop: class for the 'left' operator.
:param rightop: class for the 'right' operator.
:param subtensorspec: only measure the similarity score for the entities
corresponding to the first subtensorspec (int)
entities of the embedding matrix (default None: all
entities).
"""
embedding, relationl, relationr = parse_embeddings(embeddings)
# Inputs
idxr, idxo = T.iscalar('idxr'), T.iscalar('idxo')
# Graph
if subtensorspec is not None:
# We compute the score only for a subset of entities
lhs = (embedding.E[:, :subtensorspec]).T
else:
lhs = embedding.E.T
rhs = (embedding.E[:, idxr]).reshape((1, embedding.D))
rell = (relationl.E[:, idxo]).reshape((1, relationl.D))
relr = (relationr.E[:, idxo]).reshape((1, relationr.D))
tmp = rightop(rhs, relr)
simi = fnsim(leftop(lhs, rell), tmp.reshape((1, tmp.shape[1])))
"""
Theano function inputs.
:input idxr: index value of the 'right' member.
:input idxo: index value of the relation member.
Theano function output.
:output simi: vector of score values.
"""
return theano.function([idxr, idxo], [simi], on_unused_input='ignore')
def RankingScoreIdx(sl, sr, idxl, idxr, idxo):
"""
This function computes the rank list of the lhs and rhs, over a list of
lhs, rhs and rel indexes.
:param sl: Theano function created with RankLeftFnIdx().
:param sr: Theano function created with RankRightFnIdx().
:param idxl: list of 'left' indices.
:param idxr: list of 'right' indices.
:param idxo: list of relation indices.
"""
errl = []
errr = []
for l, o, r in zip(idxl, idxo, idxr):
errl += [np.argsort(np.argsort((sl(r, o)[0]).flatten())[::-1]).flatten()[l] + 1]
errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1]
return errl, errr
def FilteredRankingScoreIdx(sl, sr, idxl, idxr, idxo, true_triples):
"""
This function computes the rank list of the lhs and rhs, over a list of
lhs, rhs and rel indexes.
:param sl: Theano function created with RankLeftFnIdx().
:param sr: Theano function created with RankRightFnIdx().
:param idxl: list of 'left' indices.
:param idxr: list of 'right' indices.
:param idxo: list of relation indices.
"""
errl = []
errr = []
for l, o, r in zip(idxl, idxo, idxr):
il=np.argwhere(true_triples[:,0]==l).reshape(-1,)
io=np.argwhere(true_triples[:,1]==o).reshape(-1,)
ir=np.argwhere(true_triples[:,2]==r).reshape(-1,)
inter_l = [i for i in ir if i in io]
rmv_idx_l = [true_triples[i,0] for i in inter_l if true_triples[i,0] != l]
scores_l = (sl(r, o)[0]).flatten()
scores_l[rmv_idx_l] = -np.inf
errl += [np.argsort(np.argsort(-scores_l)).flatten()[l] + 1]
inter_r = [i for i in il if i in io]
rmv_idx_r = [true_triples[i,2] for i in inter_r if true_triples[i,2] != r]
scores_r = (sr(l, o)[0]).flatten()
scores_r[rmv_idx_r] = -np.inf
errr += [np.argsort(np.argsort(-scores_r)).flatten()[r] + 1]
return errl, errr
def RankingScoreIdx_sub(sl, sr, idxl, idxr, idxo, selection=[]):
"""
Similar to RankingScoreIdx, but works on a subset of examples, defined in
the 'selection' parameter.
"""
errl, errr = [], []
for l, o, r in [(idxl[i], idxo[i], idxr[i]) for i in selection]:
errl += [np.argsort(np.argsort((sl(r, o)[0]).flatten())[::-1]).flatten()[l] + 1]
errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1]
return errl, errr
def RankingScoreRightIdx(sr, idxl, idxr, idxo):
"""
This function computes the rank list of the rhs, over a list of lhs, rhs
and rel indexes.
:param sr: Theano function created with RankRightFnIdx().
:param idxl: list of 'left' indices.
:param idxr: list of 'right' indices.
:param idxo: list of relation indices.
"""
errr = []
for l, o, r in zip(idxl, idxo, idxr):
errr += [np.argsort(np.argsort((sr(l, o)[0]).flatten())[::-1]).flatten()[r] + 1]
return errr
#
# COMPUTING PERFORMANCE METRICS ON CLASSIFICATIONS
#
def classification_summary(energyfn,
validlidx, validridx, validoidx, valid_targets,
testlidx, testridx, testoidx, test_targets):
# Find unique relation indexes
relidxs = np.unique(validoidx)
valid_matches, test_matches = [], []
# Iterate over unique relation indexes
for relidx in relidxs:
# Select the validation triples containing the 'relidx' predicate, and the corresponding target values
valid_idxs = np.where(validoidx == relidx)
r_validlidx, r_validridx, r_validoidx = validlidx[valid_idxs], validridx[valid_idxs], validoidx[valid_idxs]
r_valid_targets = valid_targets[valid_idxs]
# Evaluate the energies of those triples
r_valid_energies = energyfn(r_validlidx, r_validridx, r_validoidx)[0]
r_valid_cutpoint = find_classification_threshold(r_valid_energies, r_valid_targets)
valid_matches += classification_matches(r_valid_energies, r_valid_targets, r_valid_cutpoint)
# Select the test triples containing the 'relidx' predicate, and the corresponding target values
test_idxs = np.where(testoidx == relidx)
r_testlidx, r_testridx, r_testoidx = testlidx[test_idxs], testridx[test_idxs], testoidx[test_idxs]
r_test_targets = test_targets[test_idxs]
r_test_energies = energyfn(r_testlidx, r_testridx, r_testoidx)[0]
test_matches += classification_matches(r_test_energies, r_test_targets, r_valid_cutpoint)
logging.info('Validation Accuracy: %s -- Test Accuracy: %s' %
((np.mean(valid_matches) * 100.0), (np.mean(test_matches) * 100.0)))
def find_classification_threshold(energies, targets):
x = np.unique(np.sort(energies))
cutpoints = np.concatenate(([x[0]], (x[1:] + x[:-1]) / 2., [x[-1]]))
accuracies = [np.mean(classification_matches(energies, targets, cutpoint)) * 100.0 for cutpoint in cutpoints]
best_cutpoint = cutpoints[np.argmax(np.asarray(accuracies))]
return best_cutpoint
def classification_matches(energies, targets, threshold):
classifications = classify(energies, threshold)
comparisons = (targets == classifications)
ret = [1. if comparison == True else 0. for comparison in comparisons]
return ret
def classify(energies, threshold):
classifications = np.asarray([1 if energy < threshold else 0 for energy in energies])
return classifications
#
# CLASSIFICATION FUNCTIONS
#
def EnergyFn(fnsim, embeddings, leftop, rightop):
embedding, relationl, relationr = parse_embeddings(embeddings)
idxl, idxo, idxr = T.iscalar('idxl'), T.iscalar('idxo'), T.iscalar('idxr')
lhs = (embedding.E[:, idxl]).reshape((1, embedding.D))
rhs = (embedding.E[:, idxr]).reshape((1, embedding.D))
rell = (relationl.E[:, idxo]).reshape((1, relationl.D))
relr = (relationr.E[:, idxo]).reshape((1, relationr.D))
energy = - fnsim(leftop(lhs, rell), rightop(rhs, relr))
return theano.function([idxl, idxr, idxo], [energy], on_unused_input='ignore')
def EnergyVecFn(fnsim, embeddings, leftop, rightop):
embedding, relationl, relationr = parse_embeddings(embeddings)
idxl, idxo, idxr = T.ivector('idxl'), T.ivector('idxo'), T.ivector('idxr')
lhs, rhs = embedding.E[:, idxl].T, embedding.E[:, idxr].T
rell, relr = relationl.E[:, idxo].T, relationr.E[:, idxo].T
energy = - fnsim(leftop(lhs, rell), rightop(rhs, relr))
return theano.function([idxl, idxr, idxo], [energy], on_unused_input='ignore')
#
# LEVERAGING RANGE AND DOMAIN RELATIONS DURING LEARNING
#
def FilteredRankingScoreIdx_DR(sl, sr, idxl, idxr, idxo, rel2domain, rel2range, illegal_dr_penalty=1e6, true_triples=None):
errl, errr = [], []
relidxs = np.unique(idxo)
for relidx in relidxs:
dr_domain, dr_range = rel2domain[relidx], rel2range[relidx]
dr_domain = set(dr_domain)
dr_range = set(dr_range)
test_triples = [(l, o, r) for (l, o, r) in zip(idxl, idxo, idxr) if o == relidx]
for l, o, r in test_triples:
rmv_idx_l, rmv_idx_r = [], []
# Remove triples from true_triples from ranking results
if true_triples is not None:
il = np.argwhere(true_triples[:, 0] == l).reshape(-1,)
io = np.argwhere(true_triples[:, 1] == o).reshape(-1,)
ir = np.argwhere(true_triples[:, 2] == r).reshape(-1,)
inter_l = [i for i in ir if i in io]
rmv_idx_l += [true_triples[i, 0] for i in inter_l if true_triples[i, 0] != l]
inter_r = [i for i in il if i in io]
rmv_idx_r += [true_triples[i, 2] for i in inter_r if true_triples[i, 2] != r]
scores_l = (sl(r, o)[0]).flatten()
scores_r = (sr(l, o)[0]).flatten()
# Remove triples not in domain and range from ranking results
pen_idx_l = [cl for cl in range(len(scores_l)) if cl not in dr_domain]
pen_idx_r = [cr for cr in range(len(scores_r)) if cr not in dr_range]
scores_l[rmv_idx_l] = -np.inf
scores_r[rmv_idx_r] = -np.inf
scores_l[pen_idx_l] -= illegal_dr_penalty
scores_r[pen_idx_r] -= illegal_dr_penalty
errl += [np.argsort(np.argsort(- scores_l)).flatten()[l] + 1]
errr += [np.argsort(np.argsort(- scores_r)).flatten()[r] + 1]
return errl, errr
#
# SCHEMA-AWARE RANKING FUNCTIONS
#
#
# RANKING FUNCTIONS
#
def RankRightFnIdx_Schema(fnsim, embeddings, prior, leftop, rightop, subtensorspec=None):
embedding, relationl, relationr = parse_embeddings(embeddings)
# Inputs
idxl, idxo = T.iscalar('idxl'), T.iscalar('idxo')
g = T.matrix('g')
# Graph
lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) # lhs: 1xD vector containing the embedding of idxl
if subtensorspec is not None:
# We compute the score only for a subset of entities
rhs = (embedding.E[:, :subtensorspec]).T
else:
rhs = embedding.E.T # rhs: NxD embedding matrix
rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) # rell: 1xD vector containing the embedding of idxo (relationl)
relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) # relr: 1xD vector containing the embedding of idxo (relationr)
tmp = leftop(lhs, rell) # a = rell(lhs)
# b = relr(rhs)
# Negative Energy
simi = fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr)) # simi = fnsim(a, b)
pen_simi = g[0, :].T * prior.P[idxo, 0].T + g[1, :].T * prior.P[idxo, 1].T
simi = simi - pen_simi
return theano.function([idxl, idxo, g], [simi], on_unused_input='ignore')
def RankLeftFnIdx_Schema(fnsim, embeddings, prior, leftop, rightop, subtensorspec=None):
embedding, relationl, relationr = parse_embeddings(embeddings)
# Inputs
idxr, idxo = T.iscalar('idxr'), T.iscalar('idxo')
g = T.matrix('g')
# Graph
if subtensorspec is not None:
# We compute the score only for a subset of entities
lhs = (embedding.E[:, :subtensorspec]).T
else:
lhs = embedding.E.T
rhs = (embedding.E[:, idxr]).reshape((1, embedding.D))
rell = (relationl.E[:, idxo]).reshape((1, relationl.D))
relr = (relationr.E[:, idxo]).reshape((1, relationr.D))
tmp = rightop(rhs, relr)
simi = fnsim(leftop(lhs, rell), tmp.reshape((1, tmp.shape[1])))
pen_simi = g[0, :].T * prior.P[idxo, 0].T + g[1, :].T * prior.P[idxo, 1].T
simi = simi - pen_simi
return theano.function([idxr, idxo, g], [simi], on_unused_input='ignore')
#@profile
def RankingScoreIdx_Schema(sl, sr, idxl, idxr, idxo,
relation2domainSet, relation2rangeSet,
schemaPenalty, l_subtensorspec=None, r_subtensorspec=None):
errl = []
errr = []
for l, o, r in zip(idxl, idxo, idxr):
gl = schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec), [r] * l_subtensorspec, [o] * l_subtensorspec)
gr = schemaPenalty.schema_penalties_lr_fast([l] * r_subtensorspec, range(r_subtensorspec), [o] * r_subtensorspec)
slro = sl(r, o, gl)[0]
srlo = sr(l, o, gr)[0]
errl += [np.argsort(np.argsort((slro).flatten())[::-1]).flatten()[l] + 1]
errr += [np.argsort(np.argsort((srlo).flatten())[::-1]).flatten()[r] + 1]
return errl, errr
def FilteredRankingScoreIdx_Schema(sl, sr, idxl, idxr, idxo, true_triples,
relation2domainSet, relation2rangeSet,
schemaPenalty, l_subtensorspec=None, r_subtensorspec=None):
errl = []
errr = []
for l, o, r in zip(idxl, idxo, idxr):
il=np.argwhere(true_triples[:,0]==l).reshape(-1,)
io=np.argwhere(true_triples[:,1]==o).reshape(-1,)
ir=np.argwhere(true_triples[:,2]==r).reshape(-1,)
gl = schemaPenalty.schema_penalties_lr_fast(range(l_subtensorspec), [r] * l_subtensorspec, [o] * l_subtensorspec)
gr = schemaPenalty.schema_penalties_lr_fast([l] * r_subtensorspec, range(r_subtensorspec), [o] * r_subtensorspec)
slro = sl(r, o, gl)[0]
srlo = sr(l, o, gr)[0]
inter_l = [i for i in ir if i in io]
rmv_idx_l = [true_triples[i, 0] for i in inter_l if true_triples[i, 0] != l]
scores_l = (slro).flatten()
scores_l[rmv_idx_l] = -np.inf
errl += [np.argsort(np.argsort(-scores_l)).flatten()[l] + 1]
inter_r = [i for i in il if i in io]
rmv_idx_r = [true_triples[i, 2] for i in inter_r if true_triples[i, 2] != r]
scores_r = (srlo).flatten()
scores_r[rmv_idx_r] = -np.inf
errr += [np.argsort(np.argsort(-scores_r)).flatten()[r] + 1]
return errl, errr
| 2.046875 | 2 |
tests/test_markup.py | Kiran-Raj-Dev/ManimPango | 0 | 12794174 | # -*- coding: utf-8 -*-
from pathlib import Path
import pytest
import manimpango
from . import CASES_DIR
from ._manim import MarkupText
from .svg_tester import SVGStyleTester
ipsum_text = (
"<b>Lorem ipsum dolor</b> sit amet, <i>consectetur</i> adipiscing elit,"
"sed do eiusmod tempor incididunt ut labore et dolore"
"magna aliqua. Ut enim <b>ad</b> minim veniam, quis nostrud"
"exercitation ullamco laboris nisi ut aliquip"
"ex ea commodo consequat. Duis aute irure dolor"
"in reprehenderit in voluptate velit esse cillum"
"dolore eu fugiat nulla pariatur. Excepteur sint"
"occaecat cupidatat non proident, sunt in culpa qui"
"officia deserunt mollit anim id est laborum."
)
@pytest.mark.parametrize("text", ["foo", "<b>bar</b>", "வணக்கம்"])
def test_good_markup(text):
assert not manimpango.MarkupUtils.validate(
text,
), f"{text} should not fail validation"
@pytest.mark.parametrize("text", ["<b>foo", "<xyz>foo</xyz>"])
def test_bad_markup(text):
assert manimpango.MarkupUtils.validate(
text
), f"{text} should fail validation (unbalanced tags)"
@pytest.mark.parametrize(
"text,error",
[
(
"<b>foo",
"Error on line 1 char 23: Element “markup” was closed, "
"but the currently open element is “b”",
),
(
"<xyz>foo</xyz>",
"Unknown tag 'xyz' on line 1 char 14",
),
],
)
def test_bad_markup_error_message(text, error):
assert manimpango.MarkupUtils.validate(text) == error
def test_markup_text(tmpdir):
loc = Path(tmpdir, "test.svg")
assert not loc.exists()
MarkupText(
'<span underline="error"><b><i>Hello Manim</i></b></span>', filename=str(loc)
)
assert loc.exists()
def test_markup_justify(tmpdir):
# don't know how to verify this correctly
# it varies upon diffent system so, we are
# just check whether it runs
loc = Path(tmpdir, "test.svg")
assert not loc.exists()
MarkupText(ipsum_text, justify=True, filename=str(loc))
assert loc.exists()
def test_markup_indent(tmpdir):
# don't know how to verify this correctly
# it varies upon diffent system so, we are
# just check whether it runs
loc = Path(tmpdir, "test.svg")
assert not loc.exists()
MarkupText(ipsum_text, indent=10, filename=str(loc))
assert loc.exists()
def test_markup_alignment(tmpdir):
# don't know how to verify this correctly
# it varies upon diffent system so, we are
# just check whether it runs
loc = Path(tmpdir, "test.svg")
assert not loc.exists()
MarkupText(
ipsum_text,
alignment=manimpango.Alignment.CENTER,
filename=str(loc),
)
assert loc.exists()
def test_markup_style(tmpdir):
test_case = CASES_DIR / "hello_blue_world_green.svg"
expected = tmpdir / "expected.svg"
text = "<span foreground='BLUE'>Hello</span>\n<span foreground='GREEN'>World</span>"
MarkupText(
text,
filename=str(expected),
)
s = SVGStyleTester(gotSVG=expected, expectedSVG=test_case)
assert len(s.got_svg_style) == len(s.expected_svg_style)
assert s.got_svg_style == s.expected_svg_style
def test_wrap_text(tmpdir):
tmpdir = Path(tmpdir)
wrapped = tmpdir / "wrap.svg"
nowrap = tmpdir / "nowarap.svg"
MarkupText(ipsum_text, wrap_text=False, filename=str(nowrap))
MarkupText(ipsum_text, filename=str(wrapped))
assert wrapped.read_text() != nowrap.read_text()
| 2.34375 | 2 |
orb_simulator/orbsim_language/orbsim_ast/bitwise_shift_right_node.py | dmguezjaviersnet/IA-Sim-Comp-Project | 1 | 12794175 | <filename>orb_simulator/orbsim_language/orbsim_ast/bitwise_shift_right_node.py
from orbsim_language.orbsim_ast.binary_expr_node import BinaryExprNode
# >>
class BitwiseShiftRightNode(BinaryExprNode):
pass
| 1.695313 | 2 |
pylabnet/network/client_server/dio_breakout.py | wi11dey/pylabnet | 10 | 12794176 | <gh_stars>1-10
from pylabnet.network.core.service_base import ServiceBase
from pylabnet.network.core.client_base import ClientBase
class Service(ServiceBase):
def exposed_measure_voltage(self, board, channel):
return self._module.measure_voltage(board, channel)
def exposed_set_high_voltage(self, board, channel, voltage):
return self._module.set_high_voltage(board, channel, voltage)
def exposed_set_low_voltage(self, board, channel, voltage):
return self._module.set_low_voltage(board, channel, voltage)
def exposed_get_high_voltage(self, board, channel):
return self._module.get_high_voltage(board, channel)
def exposed_get_low_voltage(self, board, channel):
return self._module.get_low_voltage(board, channel)
def exposed_save(self):
return self._module.save()
def exposed_override(self, board, channel, state=True):
return self._module.override(board, channel, state)
def exposed_disable_override(self, board, channel):
return self._module.disable_override(board, channel)
def close_server(self):
""" Closes the server for which the service is running
Overwrites parent class method
"""
self._module.close()
super().close_server()
class Client(ClientBase):
def measure_voltage(self, board, channel):
return self._service.exposed_measure_voltage(board, channel)
def set_high_voltage(self, board, channel, voltage):
return self._service.exposed_set_high_voltage(board, channel, voltage)
def set_low_voltage(self, board, channel, voltage):
return self._service.exposed_set_low_voltage(board, channel, voltage)
def get_high_voltage(self, board, channel):
return self._service.exposed_get_high_voltage(board, channel)
def get_low_voltage(self, board, channel):
return self._service.exposed_get_low_voltage(board, channel)
def save(self):
return self._service.exposed_save()
def override(self, board, channel, state=True):
return self._service.exposed_override(board, channel, state)
def disable_override(self, board, channel):
return self._service.exposed_disable_override(board, channel)
| 2.46875 | 2 |
api/__init__.py | adenilsonElias/Gerenciador_Casa_Aluguel | 0 | 12794177 | """
API do Gerenciador de Casas de Aluguel
======================================
"""
# https://www.pythoncentral.io/introduction-to-sqlite-in-python/
import sqlite3
import config
def make_connection():
return sqlite3.connect(config.DATABASE_URL)
class InquilinoException(Exception):
...
class CasaException(Exception):
...
class DAO():
def __init__(self, conn):
self.conn = conn
class Casa_DAO(DAO):
def adiciona_casa(self, nome=None, valor_aluguel=None, agua=None,
instalacao_eletrica=None, commit=False, rollback=False):
if nome is None:
raise Exception("Necessário prover nome.")
if valor_aluguel is None:
raise Exception("Necessário prover um valor para o aluguel.")
try:
cursor = self.conn.cursor()
cursor.execute("""
INSERT INTO
casa(nome_casa, valor_aluguel_casa, agua_casa, num_instalacao)
VALUES
(?,?,?,?)
""", (nome, valor_aluguel, agua, instalacao_eletrica))
if commit:
self.conn.commit()
return {
'id_casa': cursor.lastrowid,
'nome_casa': nome,
'valor_aluguel': valor_aluguel,
'agua_casa': agua,
'num_instalacao_eletrica': instalacao_eletrica
}
except sqlite3.Error as e:
if rollback:
self.conn.rollback()
return None
def todas_casas(self, vazias=False):
cursor = self.conn.cursor()
if vazias:
cursor.execute("""
SELECT c.id_casa, nome_casa, valor_aluguel_casa,
agua_casa, i.num_instalacao, cpf_titular
FROM casa c
LEFT JOIN instalacao_eletrica i ON c.num_instalacao = i.num_instalacao
WHERE c.id_casa NOT IN (
SELECT casa.id_casa from casa
JOIN contrato ON contrato.id_casa= casa.id_casa
WHERE ativo )
GROUP BY c.id_casa;
""")
else:
cursor.execute("""
SELECT c.id_casa, nome_casa, valor_aluguel_casa,
agua_casa, i.num_instalacao, cpf_titular
FROM casa c
LEFT JOIN instalacao_eletrica i ON c.num_instalacao = i.num_instalacao;
""")
casas = cursor.fetchall()
return [{
'id_casa': x[0],
'nome_casa': x[1],
'valor_aluguel': x[2],
'agua_casa': x[3],
'num_instalacao_eletrica': x[4],
'cpf': x[5]
} for x in casas]
def altera_casa(self, id=None, commit=False, rollback=False,
**kwargs):
if id is None:
raise Exception("Necessário prover um ID")
if not len(kwargs):
raise Exception("Necessário prover novas informações para o Inquilino")
query = f'''UPDATE casa
SET {', '.join([f"{key}{'_casa' if key != 'num_instalacao' else '' } = ?" for key in kwargs.keys()])}
WHERE id_casa = ?'''
# return None
try:
cursor = self.conn.cursor()
cursor.execute(query, tuple((kwargs[k] for k in kwargs.keys())) + tuple([id]))
if commit:
self.conn.commit()
except sqlite3.Error as e:
if rollback:
self.conn.rollback()
class Instalacao_Eletrica_DAO(DAO):
def adiciona_instalacao_eletrica(self, num_instalacao=None, cpf=None, commit=False, rollback=False):
if num_instalacao is None:
raise Exception("Necessário prover um número de instalação")
if cpf is None:
raise Exception("Necessário prover um número de CPF")
try:
cursor = self.conn.cursor()
cursor.execute("""
INSERT INTO
instalacao_eletrica
VALUES
(?, ?)
""", (num_instalacao, cpf))
if commit:
self.conn.commit()
return {
'num_instalacao': num_instalacao,
'cpf_titular': cpf
}
except sqlite3.Error as e:
# e
if rollback:
self.conn.rollback()
return None
def altera_instalacao(self, num_instalacao, cpf, commit=False, rollback=False):
query = f'''UPDATE instalacao_eletrica
SET cpf_titular = ?
WHERE num_instalacao = ? '''
# return None
try:
cursor = self.conn.cursor()
cursor.execute(query, (cpf, num_instalacao))
if commit:
self.conn.commit()
except sqlite3.Error as e:
if rollback:
self.conn.rollback()
def todas_instalacoes(self):
cursor = self.conn.cursor()
cursor.execute("""
SELECT * FROM instalacao_eletrica;
""")
instalcoes = cursor.fetchall()
return [{
'num_instalacao': x[0],
'cpf_titular': x[1]
} for x in instalacoes]
class Inquilino_DAO(DAO):
def adiciona_inquilino(self, cpf=None, nome=None,
rg=None, commit=False, rollback=False):
if cpf is None:
raise Exception("Necessário prover um número de CPF")
if nome is None:
raise Exception("Necessário prover um Nome")
if rg is None:
raise Exception("Necessário prover um RG")
try:
cursor = self.conn.cursor()
cursor.execute("""
INSERT INTO
inquilino(cpf_inq, nome_inq, rg_inq)
VALUES
(?, ?, ?)
""", (cpf, nome, rg))
if commit:
self.conn.commit()
return {
'id_inq': cursor.lastrowid,
'cpf_inq': cpf,
'nome_inq': nome,
'rg_inq': rg
}
except sqlite3.Error as e:
if rollback:
self.conn.rollback()
return None
def todos_inquilinos(self, ativos=False, inativos=False):
cursor = self.conn.cursor()
if ativos and inativos:
raise Exception("Conflito")
elif ativos:
cursor.execute("""
select * from inquilino
where id_inq in (select DISTINCT id_inq from contrato where ativo);
""")
elif inativos:
cursor.execute("""
select * from inquilino
where id_inq not in (select DISTINCT id_inq from contrato where ativo);
""")
else:
cursor.execute("""
SELECT * from inquilino;
""")
inquilinos = cursor.fetchall()
return [{
'id_inq': x[0],
'cpf_inq': x[1],
'nome_inq': x[2],
'rg_inq': x[3]
} for x in inquilinos]
def altera_inquilino(self, id=None, commit=False, rollback=False,
**kwargs):
if id is None:
raise Exception("Necessário prover um ID")
if not len(kwargs):
raise Exception("Necessário prover novas informações para o Inquilino")
query = f'''UPDATE inquilino
SET {', '.join([f'{key}_inq = ?' for key in kwargs.keys()])}
WHERE id_inq = ?'''
try:
cursor = self.conn.cursor()
cursor.execute(query, tuple((kwargs[k] for k in kwargs.keys())) + tuple([id]))
if commit:
self.conn.commit()
except sqlite3.Error as e:
if rollback:
self.conn.rollback()
class Contrato_DAO(DAO):
def adiciona_contrato(self, valor=None, ativo=True, dia_vencimento=None,
fim_contrato=None, casa=None, inq=None,
commit=False, rollback=False):
if valor is None:
raise Exception("Necessário prover um valor de aluguel para o contrato")
if dia_vencimento is None:
raise Exception("Necessário prover uma data de vencimento")
if casa is None:
raise Exception("Necessário escolher uma casa")
if inq is None:
raise Exception("Necessário escolher um inquilino")
try:
cursor = self.conn.cursor()
self._valida(inq, casa)
cursor.execute("""
INSERT INTO
contrato(valor, ativo, dt_fim_contrato, dia_venc_aluguel, id_casa, id_inq)
VALUES
(?,?,?,?,?,?)
""", (valor, ativo,fim_contrato, dia_vencimento, casa, inq))
if commit:
self.conn.commit()
return {
'id_contrato': cursor.lastrowid,
'valor': valor,
'ativo': ativo,
'dt_fim_contrato': fim_contrato,
'dia_venc_aluguel': dia_vencimento,
'id_casa': casa,
'id_inq': inq
}
except sqlite3.Error as e:
if rollback:
self.conn.rollback()
return None
def _valida(self, id_inq=None, id_casa=None):
c = Contrato_DAO(make_connection())
if id_inq and id_inq in [x['id_inq'] for x in c.todos_contratos() if x['ativo']]:
raise InquilinoException()
if id_casa and id_casa in [x['id_casa'] for x in c.todos_contratos() if x['ativo']]:
raise CasaException()
def todos_contratos(self):
cursor = self.conn.cursor()
cursor.execute("""
SELECT * FROM contrato;
""")
contratos = cursor.fetchall()
return [{
'id_contrato': x[0],
'valor': x[1],
'ativo': x[2],
'dt_fim_contrato': x[3],
'dia_venc_aluguel': x[4],
'id_casa': x[5],
'id_inq': x[6]
} for x in contratos]
def altera_valor_contrato(self, id=None, valor=None, commit=False, rollback=False):
if id is None:
raise Exception("Necessário prover um ID")
if valor is None:
raise Exception("Necessário prover um valor")
query = f'''UPDATE contrato
SET valor = ?
WHERE id_contrato = ?'''
print(query)
try:
cursor = self.conn.cursor()
cursor.execute(query, (valor, id))
if commit:
self.conn.commit()
except sqlite3.Error as e:
if rollback:
self.conn.rollback()
def inativa_contrato(self, id=None, commit=False, rollback=False):
if id is None:
raise Exception("Necessário prover um ID")
query = '''UPDATE contrato
SET ativo = 0
WHERE id_contrato = ?'''
try:
cursor = self.conn.cursor()
cursor.execute(query, (id, ))
if commit:
self.conn.commit()
except sqlite3.Error as e:
if rollback:
self.conn.rollback()
def ativa_contrato(self, id=None, commit=False, rollback=False):
if id is None:
raise Exception("Necessário prover um ID")
C = self.get_contrato(id)
self._valida(C['id_inq'], C['id_casa'] )
query = '''UPDATE contrato
SET ativo = 1
WHERE id_contrato = ?'''
try:
cursor = self.conn.cursor()
cursor.execute(query, (id,))
if commit:
self.conn.commit()
except sqlite3.Error as e:
if rollback:
self.conn.rollback()
def get_contrato(self, id):
cursor = self.conn.cursor()
cursor.execute("""
SELECT * FROM contrato
WHERE id_contrato = ?;
""", tuple([id]))
contratos = cursor.fetchall()
return [{
'id_contrato': x[0],
'valor': x[1],
'ativo': x[2],
'dt_fim_contrato': x[3],
'dia_venc_aluguel': x[4],
'id_casa': x[5],
'id_inq': x[6]
} for x in contratos][0]
class PagamentoDAO(DAO):
def realiza_pagamento(self, id_contrato=None, dt_pag=None, dt_venc=None, deposito=False, commit=False, rollback=False):
if id_contrato is None:
raise Exception("Necessário prover um contrato")
if dt_venc is None:
raise Exception("Necessário prover uma data de vencimento")
if dt_pag is None:
raise Exception("Necessário prover uma data de pagamento")
try:
cursor = self.conn.cursor()
cursor.execute("""
INSERT INTO
pagamento(dt_venc, dt_pag, deposito, id_contrato)
VALUES
(?, ?, ?, ?)
""", (dt_venc, dt_pag, deposito, id_contrato))
if commit:
self.conn.commit()
return {
'id_pag': cursor.lastrowid ,
'dt_venc': dt_venc ,
'dt_pag': dt_pag ,
'deposito': deposito ,
'id_contrato': id_contrato
}
except sqlite3.Error as e:
if rollback:
self.conn.rollback()
return None
def todos_pagamentos(self):
cursor = self.conn.cursor()
cursor.execute("""
SELECT * FROM pagamento;
""")
pagamentos = cursor.fetchall()
return [{
'id_pag': x[0] ,
'dt_venc': x[1] ,
'dt_pag': x[2] ,
'deposito': x[3] ,
'id_contrato': x[4]
} for x in pagamentos]
def todos_pagamentos_contrato(self, id_contrato):
cursor = self.conn.cursor()
cursor.execute("""
SELECT * FROM pagamento
WHERE pagamento.id_contrato = ?;
""", (id_contrato))
pagamentos = cursor.fetchall()
return [{
'id_pag': x[0] ,
'dt_venc': x[1] ,
'dt_pag': x[2] ,
'deposito': x[3] ,
'id_contrato': x[4]
} for x in pagamentos]
def start_db(conn):
cursor = conn.cursor()
cursor.executescript("""
CREATE TABLE IF NOT EXISTS instalacao_eletrica (
num_instalacao VARCHAR(20) NOT NULL PRIMARY KEY,
cpf_titular VARCHAR(11) NOT NULL UNIQUE
);
CREATE TABLE IF NOT EXISTS casa(
id_casa INTEGER NOT NULL PRIMARY KEY,
nome_casa INTEGER NOT NULL,
valor_aluguel_casa INTEGER NOT NULL,
agua_casa VARCHAR(10),
num_instalacao VARCHAR(11) UNIQUE,
FOREIGN KEY (num_instalacao) REFERENCES instalacao_eletrica(num_instalacao)
);
CREATE TABLE IF NOT EXISTS inquilino(
id_inq INTEGER NOT NULL PRIMARY KEY,
cpf_inq VARCHAR(11) NOT NULL UNIQUE,
nome_inq VARCHAR(40) NOT NULL,
rg_inq VARCHAR(10) NOT NULL
);
CREATE TABLE IF NOT EXISTS contrato(
id_contrato INTEGER NOT NULL PRIMARY KEY,
valor REAL NOT NULL,
ativo INTEGER NOT NULL,
dt_fim_contrato DATE NOT NULL,
dia_venc_aluguel INTEGER NOT NULL,
id_casa INTEGER NOT NULL,
id_inq INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS pagamento(
id_pag INTEGER NOT NULL PRIMARY KEY,
dt_venc VARCHAR(23) NOT NULL,
dt_pag VARCHAR(23),
deposito INTEGER NOT NULL,
id_contrato INTEGER ,
FOREIGN KEY (id_contrato) REFERENCES contrato(id_contrato)
);
""")
| 4.09375 | 4 |
wisdem/orbit/phases/install/cable_install/__init__.py | ptrbortolotti/WISDEM | 81 | 12794178 | <reponame>ptrbortolotti/WISDEM
"""Initialize cable installation functionality"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from .array import ArrayCableInstallation
from .common import SimpleCable
from .export import ExportCableInstallation
| 0.984375 | 1 |
src/nsupdate/utils/_tests/test_mail.py | mirzazulfan/nsupdate.info | 774 | 12794179 | <reponame>mirzazulfan/nsupdate.info
"""
Tests for mail module.
"""
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from ..mail import translate_for_user
class TestTransUser(object):
def test(self):
User = get_user_model()
user = User.objects.get(username='test')
user.profile.language = 'de'
msgs = [_('German'), _('English')]
msgs = translate_for_user(user, *msgs)
assert msgs == ['Deutsch', 'Englisch']
| 2.1875 | 2 |
tool_sdk/api/basic/list_tool_pb2.py | easyopsapis/easyops-api-python | 5 | 12794180 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: list_tool.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tool_sdk.model.tool import tool_pb2 as tool__sdk_dot_model_dot_tool_dot_tool__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='list_tool.proto',
package='basic',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0flist_tool.proto\x12\x05\x62\x61sic\x1a\x1etool_sdk/model/tool/tool.proto\"\x95\x01\n\x0fListToolRequest\x12\x0e\n\x06\x64\x65tail\x18\x01 \x01(\x08\x12\x0e\n\x06plugin\x18\x02 \x01(\x08\x12\x10\n\x08\x63\x61tegory\x18\x03 \x01(\t\x12\x13\n\x0bpermissions\x18\x04 \x01(\t\x12\x16\n\x0eonlyProduction\x18\x05 \x01(\x08\x12\x15\n\rshowInvisible\x18\x06 \x01(\x08\x12\x0c\n\x04tags\x18\x07 \x01(\t\"\\\n\x10ListToolResponse\x12\x0c\n\x04page\x18\x01 \x01(\x05\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\r\n\x05total\x18\x03 \x01(\x05\x12\x18\n\x04list\x18\x04 \x03(\x0b\x32\n.tool.Tool\"r\n\x17ListToolResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12%\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x17.basic.ListToolResponseb\x06proto3')
,
dependencies=[tool__sdk_dot_model_dot_tool_dot_tool__pb2.DESCRIPTOR,])
_LISTTOOLREQUEST = _descriptor.Descriptor(
name='ListToolRequest',
full_name='basic.ListToolRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='detail', full_name='basic.ListToolRequest.detail', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='plugin', full_name='basic.ListToolRequest.plugin', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='category', full_name='basic.ListToolRequest.category', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='permissions', full_name='basic.ListToolRequest.permissions', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='onlyProduction', full_name='basic.ListToolRequest.onlyProduction', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='showInvisible', full_name='basic.ListToolRequest.showInvisible', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='basic.ListToolRequest.tags', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=59,
serialized_end=208,
)
_LISTTOOLRESPONSE = _descriptor.Descriptor(
name='ListToolResponse',
full_name='basic.ListToolResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='page', full_name='basic.ListToolResponse.page', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_size', full_name='basic.ListToolResponse.page_size', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='total', full_name='basic.ListToolResponse.total', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='list', full_name='basic.ListToolResponse.list', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=210,
serialized_end=302,
)
_LISTTOOLRESPONSEWRAPPER = _descriptor.Descriptor(
name='ListToolResponseWrapper',
full_name='basic.ListToolResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='basic.ListToolResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='basic.ListToolResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='basic.ListToolResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='basic.ListToolResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=304,
serialized_end=418,
)
_LISTTOOLRESPONSE.fields_by_name['list'].message_type = tool__sdk_dot_model_dot_tool_dot_tool__pb2._TOOL
_LISTTOOLRESPONSEWRAPPER.fields_by_name['data'].message_type = _LISTTOOLRESPONSE
DESCRIPTOR.message_types_by_name['ListToolRequest'] = _LISTTOOLREQUEST
DESCRIPTOR.message_types_by_name['ListToolResponse'] = _LISTTOOLRESPONSE
DESCRIPTOR.message_types_by_name['ListToolResponseWrapper'] = _LISTTOOLRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ListToolRequest = _reflection.GeneratedProtocolMessageType('ListToolRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTTOOLREQUEST,
'__module__' : 'list_tool_pb2'
# @@protoc_insertion_point(class_scope:basic.ListToolRequest)
})
_sym_db.RegisterMessage(ListToolRequest)
ListToolResponse = _reflection.GeneratedProtocolMessageType('ListToolResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTTOOLRESPONSE,
'__module__' : 'list_tool_pb2'
# @@protoc_insertion_point(class_scope:basic.ListToolResponse)
})
_sym_db.RegisterMessage(ListToolResponse)
ListToolResponseWrapper = _reflection.GeneratedProtocolMessageType('ListToolResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _LISTTOOLRESPONSEWRAPPER,
'__module__' : 'list_tool_pb2'
# @@protoc_insertion_point(class_scope:basic.ListToolResponseWrapper)
})
_sym_db.RegisterMessage(ListToolResponseWrapper)
# @@protoc_insertion_point(module_scope)
| 1.179688 | 1 |
hrdf-tools/hrdf_db_reporter_cli.py | vasile/OJP-Showcase | 0 | 12794181 | import argparse
import os
import re
import sys
from inc.HRDF.Stops_Reporter.stops_reporter import HRDF_Stops_Reporter
from inc.HRDF.HRDF_Parser.hrdf_helpers import compute_formatted_date_from_hrdf_db_path
from inc.HRDF.db_helpers import compute_db_tables_report
parser = argparse.ArgumentParser(description = 'Generate stops report from HRDF DB')
parser.add_argument('-p', '--path', help='Path to HRDF DB')
args = parser.parse_args()
db_path = args.path
if db_path is None:
print("ERROR, use with --path")
sys.exit(1)
compute_db_tables_report(db_path=db_path) | 2.578125 | 3 |
AnnotationTools/ui_mainwindow.py | upzheng/Electrocardio-Panorama | 33 | 12794182 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_2.setObjectName("gridLayout_2")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.buttonLayout = QtWidgets.QGridLayout()
self.gridLayout.addLayout(self.buttonLayout, 0, 0)
self.openfileButton = QtWidgets.QPushButton(self.centralwidget)
self.openfileButton.setObjectName("openfileButton")
self.buttonLayout.addWidget(self.openfileButton, 0, 0, 1, 1)
self.saveButton = QtWidgets.QPushButton(self.centralwidget)
self.saveButton.setObjectName('saveButton')
self.buttonLayout.addWidget(self.saveButton, 1, 0, 1, 1)
self.nextButton = QtWidgets.QPushButton(self.centralwidget)
self.nextButton.setObjectName('nextButton')
self.buttonLayout.addWidget(self.nextButton, 2, 0, 1, 1)
self.clearButton = QtWidgets.QPushButton(self.centralwidget)
self.clearButton.setObjectName('clearButton')
self.buttonLayout.addWidget(self.clearButton, 4, 0, 1, 1)
self.lastButton = QtWidgets.QPushButton(self.centralwidget)
self.lastButton.setObjectName('lastButton')
self.buttonLayout.addWidget(self.lastButton, 3, 0, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 18))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.label_pos = QtWidgets.QLabel()
self.buttonLayout.addWidget(self.label_pos, 0, 1)
self.label_state = QtWidgets.QLabel()
self.buttonLayout.addWidget(self.label_state, 1, 1)
self.label_file_name = QtWidgets.QLabel()
self.buttonLayout.addWidget(self.label_file_name, 2, 1)
self.label_annotation_points = QtWidgets.QLabel()
self.buttonLayout.addWidget(self.label_annotation_points, 4, 1)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.openfileButton.setText(_translate("MainWindow", "open"))
self.saveButton.setText('save')
self.nextButton.setText('next')
self.clearButton.setText('clear')
self.lastButton.setText('previous')
| 2.109375 | 2 |
typeidea/blog/admin.py | IchLiebeDeutsch/typeidea | 0 | 12794183 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Post, Category, Tag
from typeidea.custom_site import custom_site
from django.utils.html import format_html
from django.core.urlresolvers import reverse
from .adminforms import PostAdminForm
from typeidea.custom_admin import BaseOwnerAdmin
# Register your models here.
@admin.register(Post, site=custom_site)
class PostAdmin(BaseOwnerAdmin):
form = PostAdminForm
list_display = ['title', 'category', 'status', 'owner', 'created_time', 'operator']
list_display_links = ['category', 'status']
search_fields = ['title', 'category__name', 'owner__first_name']
save_on_top = False
show_full_result_count = False # 优化显示结果
list_filter = ['title']
actions_on_top = True
date_hierarchy = 'created_time'
list_editable = ['title', ]
# 编辑页面
fieldsets = ( # 跟fields互斥
('基础配置', {
'fields': (('category', 'title'),
'desc',
'status',
'content')
}),
('高级配置', {
'classes': ('collapse', 'addon'),
'fields': ('tag',),
}),
) # 布局作用
filter_horizontal = ('tag',)
def operator(self, obj):
return format_html(
'<a href="{}">编辑</a>',
reverse('cus_site:blog_post_change', args=(obj.id,))
)
operator.show_description = '操作'
operator.empty_value_display = '???'
class PostInlineAdmin(admin.TabularInline):
fields = ('title', 'status')
extra = 1
model = Post
@admin.register(Category, site=custom_site)
class CategoryAdmin(BaseOwnerAdmin):
list_display = ['name', 'status','is_nav', 'created_time']
inlines = [PostInlineAdmin,]
fields = ('name', 'status', 'is_nav',)
@admin.register(Tag, site=custom_site)
class TagAdmin(BaseOwnerAdmin):
list_display = ['name', 'status', 'owner', 'created_time']
| 1.90625 | 2 |
src/optimizer.py | Chizuchizu/riadd | 1 | 12794184 | from adabelief_pytorch import AdaBelief
import torch_optimizer
from torch import optim
from src.sam import SAM
__OPTIMIZERS__ = {
"AdaBelief": AdaBelief,
"RAdam": torch_optimizer.RAdam,
"SAM": SAM
}
def get_optimizer(cfg, model):
optimizer_name = cfg.optimizer.name
if optimizer_name == "SAM":
base_optimizer_name = cfg.optimizer.base
if __OPTIMIZERS__.get(base_optimizer_name) is not None:
base_optimizer = __OPTIMIZERS__[base_optimizer_name]
else:
base_optimizer = optim.__getattribute__(base_optimizer_name)
return SAM(model.parameters(), base_optimizer, **cfg.optimizer.param)
if __OPTIMIZERS__.get(optimizer_name) is not None:
return __OPTIMIZERS__[optimizer_name](model.parameters(), **cfg.optimizer.param)
else:
return optim.__getattribute__(optimizer_name)(model.parameters(), **cfg.optimizer.param)
| 2.375 | 2 |
eval/keras_v1/utils.py | zifanw/smoothed_geometry | 5 | 12794185 | import numpy as np
import tensorflow as tf
import random
import _pickle as pkl
import matplotlib.pyplot as plt
from pylab import rcParams
import scipy
import scipy.stats as stats
from tensorflow.python.ops import gen_nn_ops
config_gpu = tf.ConfigProto()
config_gpu.gpu_options.allow_growth = True
MEAN_IMAGE = np.zeros((1, 227, 227, 3)).astype(np.float32)
MEAN_IMAGE[:, :, :, 0] = 103.939
MEAN_IMAGE[:, :, :, 1] = 116.779
MEAN_IMAGE[:, :, :, 2] = 123.68
EPSILON = 1e-12
MIN_INPUT = -MEAN_IMAGE
MAX_INPUT = 255 * np.ones_like(MEAN_IMAGE).astype(np.float32) - MEAN_IMAGE
def dataReader():
X = np.zeros((100, 227, 227, 3))
y = np.zeros(100)
for num in range(4):
with open(
"./ImagenetValidationSamples/imagenet_sample_{}.pkl".format(
num), "rb") as inputs:
dic_temp = pkl.load(inputs)
X[num * 20:num * 20 + 20] = dic_temp["X"]
y[num * 20:num * 20 + 20] = dic_temp["y"]
labels = dic_temp["labels"]
return X, y.astype(int), labels
class SimpleGradientAttack(object):
def __init__(self,
mean_image,
sess,
test_image,
original_label,
NET,
NET2=None,
k_top=1000,
target_map=None,
pixel_max=255.):
"""
Args:
mean_image: The mean image of the data set(The assumption is that the images are mean subtracted)
sess: Session containing model(and surrogate model's) graphs
test_image: Mean subtracted test image
original_label: True label of the image
NET: Original neural network. It's assumed that NET.saliency is the saliency map tensor and
NET.saliency_flatten is its flatten version.
NET2: Surrogate neural network with the same structure and weights of the orignal network but
with activations replaced by softplus function
(necessary only when the activation function of the original function
does not have second order gradients, ex: ReLU). It's assumed that NET.saliency is the
saliency map tensor and NET2.saliency_flatten is its flatten version.
k_top: the topK parameter of the attack (refer to the original paper)
pixel_max: the maximum pixel value in the image.
"""
self.pixel_max = pixel_max
if len(test_image.shape) != 3:
raise ValueError("Invalid Test Image Dimensions")
if NET.input.get_shape()[-3]!=test_image.shape[-3] or NET.input.get_shape()[-2]!=test_image.shape[-2] or\
NET.input.get_shape()[-1]!=test_image.shape[-1]:
raise ValueError(
"Model's input dimensions is not Compatible with the provided test image!"
)
if self.check_prediction(sess, original_label, test_image, NET):
return
self.sess = sess
self.target_map = target_map
self.create_extra_ops(NET, test_image.shape[-3], test_image.shape[-2],
k_top)
if NET2 is None:
NET2 = NET
else:
self.create_extra_ops(NET2, test_image.shape[-3],
test_image.shape[-2], k_top)
if NET2.input.get_shape()[-3]!=test_image.shape[-3] or NET2.input.get_shape()[-2]!=test_image.shape[-2] or\
NET2.input.get_shape()[-1]!=test_image.shape[-1]:
raise ValueError(
"Surrogate model's input dimensions is not Compatible with the provided test image!"
)
self.NET = NET
self.NET2 = NET2
self.test_image = test_image
self.original_label = original_label
self.mean_image = mean_image
self.k_top = k_top
w, h, c = self.mean_image.shape
self.topk_ph = tf.placeholder(tf.float32,
shape=[w * h],
name='topk_ph')
self.mass_center_ph = tf.placeholder(tf.float32,
shape=[2],
name='mass_center_ph')
self.target_map_ph = tf.placeholder(tf.float32,
shape=[w, h],
name='target_map_ph')
self.original_output = self.NET.predict(test_image[None, :])
_, num_class = self.original_output.shape
self.original_output_ph = tf.placeholder(
tf.float32, shape=[None, num_class],
name='original_output_ph') # only for the manipulation attack
self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0')
self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1')
self.create_attack_ops(NET2, test_image.shape[-3],
test_image.shape[-2])
self.update_new_image(test_image, original_label)
def update_new_image(self, test_image, original_label, target_map=None):
w, h, c = test_image.shape
self.test_image = test_image
self.original_label = original_label
assert self.check_prediction(self.sess, original_label, test_image,
self.NET) == False
if target_map is not None:
self.target_map = target_map
self.original_output = self.NET2.predict(test_image[None, :])
self.saliency1, self.topK = self.run_model(
self.sess, [self.NET.saliency, self.NET.top_idx], self.test_image,
self.NET)
self.saliency1_flatten = np.reshape(
self.saliency1, [test_image.shape[-3] * test_image.shape[-2]])
elem1 = np.argsort(np.reshape(self.saliency1, [w * h]))[-self.k_top:]
self.elements1 = np.zeros(w * h)
self.elements1[elem1] = 1
self.original_topk = self.elements1
self.mass_center1 = self.run_model(self.sess, self.NET.mass_center,
self.test_image,
self.NET).astype(int)
self.original_mass_center = self.mass_center1
def check_prediction(self, sess, original_label, image, NET):
""" If the network's prediction is incorrect in the first place, attacking has no meaning."""
predicted_scores = sess.run(
NET.output,
feed_dict={NET.input: image if len(image.shape) == 4 else [image]})
if np.argmax(predicted_scores, 1) != original_label:
print("Network's Prediction is Already Incorrect!")
return True
else:
self.original_confidence = np.max(predicted_scores)
return False
def create_extra_ops(self, NET, w, h, k_top):
top_val, NET.top_idx = tf.nn.top_k(NET.saliency_flatten, k_top)
y_mesh, x_mesh = np.meshgrid(np.arange(h), np.arange(w))
NET.mass_center = tf.stack([
tf.reduce_sum(NET.saliency * x_mesh) / (w * h),
tf.reduce_sum(NET.saliency * y_mesh) / (w * h)
])
def create_attack_ops(self, NET, w, h):
topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph))
self.topK_direction = -tf.gradients(topK_loss, NET.input)[0]
mass_center_loss = -tf.reduce_sum(
(NET.mass_center - self.mass_center_ph)**2)
self.mass_center_direction = -tf.gradients(mass_center_loss,
NET.input)[0]
if self.target_map is not None:
target_dis = tf.keras.losses.MSE(self.target_map_ph, NET.saliency)
output_dis = tf.keras.losses.MSE(self.original_output_ph,
NET.output)
target_loss = tf.reduce_mean(
target_dis) * self.beta_0_ph + self.beta_1_ph * tf.reduce_mean(
output_dis)
self.debug = target_loss
self.target_direction = -tf.gradients(target_loss, NET.input)[0]
def run_model(self, sess, operation, feed, NET):
if len(feed.shape) == 3:
if hasattr(self, "original_topk") and hasattr(
self, "original_mass_center"):
if hasattr(self, "use_target") and self.use_target:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.label_ph: self.original_label,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.beta_0_ph: self.beta_0,
self.beta_1_ph: self.beta_1,
self.original_output_ph:
self.original_output,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.label_ph:
self.original_label,
self.topk_ph:
self.original_topk,
self.mass_center_ph:
self.original_mass_center
})
else:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.label_ph: self.original_label,
})
elif len(feed.shape) == 4:
if hasattr(self, "original_topk") and hasattr(
self, "original_mass_center"):
if hasattr(self, "use_target") and self.use_target:
return sess.run(operation,
feed_dict={
NET.input: feed,
NET.label_ph: self.original_label,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.beta_0_ph: self.beta_0,
self.beta_1_ph: self.beta_1,
self.original_output_ph:
self.original_output,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input:
feed,
NET.label_ph:
self.original_label,
self.topk_ph:
self.original_topk,
self.mass_center_ph:
self.original_mass_center
})
else:
return sess.run(operation,
feed_dict={
NET.input: feed,
NET.label_ph: self.original_label,
})
else:
raise RuntimeError("Input image shape invalid!")
def give_simple_perturbation(self, attack_method, in_image):
w, h, c = self.test_image.shape
if attack_method == "random":
perturbation = np.random.normal(size=(w, h, c))
elif attack_method == "topK":
perturbation = self.run_model(self.sess, self.topK_direction,
in_image, self.NET2)
perturbation = np.reshape(perturbation, [w, h, c])
elif attack_method == "mass_center":
perturbation = self.run_model(self.sess,
self.mass_center_direction, in_image,
self.NET2)
perturbation = np.reshape(perturbation, [w, h, c])
elif attack_method == "target":
self.use_target = True
if self.target_map is None:
raise ValueError("No target region determined!")
else:
perturbation = self.run_model(self.sess, self.target_direction,
in_image, self.NET2)
debug = self.run_model(self.sess, self.debug, in_image,
self.NET2)
print("MSE: ", debug)
perturbation = np.reshape(perturbation, [w, h, c])
return np.sign(perturbation)
def apply_perturb(self, in_image, pert, alpha, bound=8 / 255, ord=np.inf):
if self.mean_image is None:
self.mean_image = np.zeros_like(in_image)
# out_image = self.test_image + np.clip(
# in_image + alpha * np.sign(pert) - self.test_image, -bound, bound)
d = in_image + alpha * np.sign(pert) - self.test_image
d_norm = np.linalg.norm(d.flatten(), ord=ord)
if d_norm > bound:
proj_ratio = bound / np.linalg.norm(d.flatten(), ord=ord)
else:
proj_ratio = 1
out_image = self.test_image + d * proj_ratio
out_image = np.clip(out_image, -self.mean_image,
self.pixel_max - self.mean_image)
return out_image
def check_measure(self, test_image_pert, measure):
prob = self.run_model(self.sess, self.NET.output, test_image_pert,
self.NET)
if np.argmax(prob, 1) == self.original_label:
if measure == "intersection":
top2 = self.run_model(self.sess, self.NET.top_idx,
test_image_pert, self.NET)
criterion = float(len(np.intersect1d(self.topK,
top2))) / self.k_top
elif measure == "correlation":
saliency2_flatten = self.run_model(self.sess,
self.NET.saliency_flatten,
test_image_pert, self.NET)
criterion = scipy.stats.spearmanr(self.saliency1_flatten,
saliency2_flatten)[0]
elif measure == "mass_center":
center2 = self.run_model(self.sess, self.NET.mass_center,
test_image_pert, self.NET).astype(int)
criterion = -np.linalg.norm(self.mass_center1 - center2)
elif measure == "cosine":
saliency2_flatten = self.run_model(self.sess,
self.NET.saliency_flatten,
test_image_pert, self.NET)
criterion = scipy.spatial.distance.cosine(
self.saliency1_flatten, saliency2_flatten)
else:
raise ValueError("Invalid measure!")
return criterion
else:
return 1.
def iterative_attack(self,
attack_method,
epsilon,
iters=100,
alpha=1,
beta_0=1e11,
beta_1=1e6,
measure="intersection",
target=None):
"""
Args:
attack_method: One of "mass_center", "topK" or "random"
epsilon: Allowed maximum $ell_infty$ of perturbations, eg:8
iters: number of maximum allowed attack iterations
alpha: perturbation size in each iteration of the attack
measure: measure for success of the attack (one of "correlation", "mass_center" or "intersection")
beta_0: parameter for manipulate (target) attack
beta_1: parameter for manipulate (target) attack
Returns:
intersection: The portion of the top K salient pixels in the original picture that are in the
top K salient pixels of the perturbed image devided
correlation: The rank correlation between saliency maps of original and perturbed image
center_dislocation: The L2 distance between saliency map mass centers in original and perturbed images
confidence: The prediction confidence of the perturbed image
"""
self.beta_0 = beta_0
self.beta_1 = beta_1
w, h, c = self.test_image.shape
test_image_pert = self.test_image.copy()
min_criterion = 1.
perturb_size = 0.
last_image = None
for counter in range(iters):
pert = self.give_simple_perturbation(attack_method,
test_image_pert)
test_image_pert = self.apply_perturb(test_image_pert, pert, alpha,
epsilon)
criterion = self.check_measure(test_image_pert, measure)
if criterion < min_criterion:
min_criterion = criterion
self.perturbed_image = test_image_pert.copy()
perturb_size = np.max(
np.abs(self.test_image - self.perturbed_image))
else:
pass
if criterion == 1.:
return None
predicted_scores = self.run_model(self.sess, self.NET.output,
self.perturbed_image, self.NET)
confidence = np.max(predicted_scores)
self.saliency2, self.top2, self.mass_center2= self.run_model\
(self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], self.perturbed_image, self.NET)
correlation = scipy.stats.spearmanr(
self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))[0]
intersection = float(len(np.intersect1d(self.topK,
self.top2))) / self.k_top
center_dislocation = np.linalg.norm(self.mass_center1 -
self.mass_center2.astype(int))
cos_distance = scipy.spatial.distance.cosine(
self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))
return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance
class IntegratedGradientsAttack(object):
def __init__(self,
sess,
mean_image,
test_image,
original_label,
NET,
NET2=None,
k_top=1000,
num_steps=100,
reference_image=None,
target_map=None,
pixel_max=255.):
"""
Args:
mean_image: The mean image of the data set(The assumption is that the images are mean subtracted)
sess: Session containing model(and surrogate model's) graphs
test_image: Mean subtracted test image
original_label: True label of the image
NET: Original neural network. It's assumed that NET.saliency is the saliency map tensor and
NET.saliency_flatten is its flatten version.
NET2: Surrogate neural network with the same structure and weights of the orignal network but
with activations replaced by softplus function
(necessary only when the activation function of the original function
does not have second order gradients, ex: ReLU). It's assumed that NET.saliency is the
saliency map tensor and NET2.saliency_flatten is its flatten version.
k_top: the topK parameter of the attack (refer to the original paper)
num_steps: Number of steps in Integrated Gradients Algorithm
reference_image: Mean subtracted reference image of Integrated Gradients Algorithm
pixel_max: the maximum pixel value in the image.
"""
self.pixel_max = pixel_max
if len(test_image.shape) != 3:
raise ValueError("Invalid Test Image Dimensions")
if sum([
NET.input.get_shape()[-i] != test_image.shape[-i]
for i in [1, 2, 3]
]):
raise ValueError(
"Model's input dimensions is not Compatible with the provided test image!"
)
if self.check_prediction(sess, original_label, test_image, NET):
return
self.sess = sess
self.target_map = target_map
self.create_extra_ops(NET, test_image.shape[-3], test_image.shape[-2],
k_top)
if NET2 is None:
NET2 = NET
else:
self.create_extra_ops(NET2, test_image.shape[-3],
test_image.shape[-2], k_top)
if sum([
NET2.input.get_shape()[-i] != test_image.shape[-i]
for i in [1, 2, 3]
]):
raise ValueError(
"Surrogate model's input dimensions is not Compatible with the provided test image!"
)
self.NET = NET
self.NET2 = NET2
self.test_image = test_image
self.original_label = original_label
self.mean_image = mean_image
self.k_top = k_top
self.num_steps = num_steps
self.reference_image = np.zeros_like(
test_image) if reference_image is None else reference_image
w, h, c = self.mean_image.shape
self.topk_ph = tf.placeholder(tf.float32,
shape=[w * h],
name='topk_ph')
self.mass_center_ph = tf.placeholder(tf.float32,
shape=[2],
name='mass_center_ph')
self.target_map_ph = tf.placeholder(tf.float32,
shape=[w, h],
name='target_map_ph')
self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0')
self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1')
self.original_output = self.NET.predict(test_image[None, :])
_, num_class = self.original_output.shape
self.original_output_ph = tf.placeholder(
tf.float32, shape=[None, num_class],
name='original_output_ph') # only for the manipulation attack
self.create_attack_ops(self.NET2, test_image.shape[-3],
test_image.shape[-2])
def check_prediction(self, sess, original_label, image, NET):
""" If the network's prediction is incorrect in the first place, attacking has no meaning."""
predicted_scores = sess.run(
NET.output,
feed_dict={NET.input: image if len(image.shape) == 4 else [image]})
if np.argmax(predicted_scores, 1) != original_label:
print("Network's Prediction is Already Incorrect!")
return True
else:
self.original_confidence = np.max(predicted_scores)
return False
def update_new_image(self, test_image, original_label, target_map=None):
w, h, c = test_image.shape
self.test_image = test_image
self.original_label = original_label
assert self.check_prediction(self.sess, original_label, test_image,
self.NET) == False
if target_map is not None:
self.target_map = target_map
self.original_output = self.NET2.predict(test_image[None, :])
counterfactuals = self.create_counterfactuals(test_image)
self.saliency1, self.topK = self.run_model(
self.sess, [self.NET.saliency, self.NET.top_idx], counterfactuals,
self.NET)
self.saliency1_flatten = np.reshape(
self.saliency1, [test_image.shape[-3] * test_image.shape[-2]])
elem1 = np.argsort(np.reshape(self.saliency1, [w * h]))[-self.k_top:]
self.elements1 = np.zeros(w * h)
self.elements1[elem1] = 1
self.original_topk = self.elements1
self.mass_center1 = self.run_model(self.sess, self.NET.mass_center,
counterfactuals,
self.NET).astype(int)
self.original_mass_center = self.mass_center1
def create_extra_ops(self, NET, w, h, k_top):
top_val, NET.top_idx = tf.nn.top_k(NET.saliency_flatten, k_top)
y_mesh, x_mesh = np.meshgrid(np.arange(h), np.arange(w))
NET.mass_center = tf.stack([
tf.reduce_sum(NET.saliency * x_mesh) / (w * h),
tf.reduce_sum(NET.saliency * y_mesh) / (w * h)
])
def create_attack_ops(self, NET, w, h):
topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph))
self.debug = topK_loss
NET.topK_direction = -tf.gradients(topK_loss, NET.input)[0]
mass_center_loss = -tf.reduce_sum(
(NET.mass_center - self.mass_center_ph)**2)
NET.mass_center_direction = -tf.gradients(mass_center_loss,
NET.input)[0]
if self.target_map is not None:
target_dis = tf.keras.losses.MSE(self.target_map_ph, NET.saliency)
output_dis = tf.keras.losses.MSE(self.original_output_ph,
NET.output)
target_loss = tf.reduce_mean(
target_dis) * self.beta_0_ph + self.beta_1_ph * tf.reduce_mean(
output_dis)
self.debug = target_loss
self.target_direction = -tf.gradients(target_loss, NET.input)[0]
def create_counterfactuals(self, in_image):
ref_subtracted = in_image - self.reference_image
counterfactuals = np.array([(float(i+1)/self.num_steps) * ref_subtracted + self.reference_image\
for i in range(self.num_steps)])
return np.array(counterfactuals)
def run_model(self, sess, operation, feed, NET):
if len(feed.shape) == 3:
if hasattr(self, "original_topk") and hasattr(
self, "original_mass_center"):
if hasattr(self, "use_target") and self.use_target:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.label_ph: self.original_label,
self.topk_ph: self.original_topk,
NET.reference_image:
self.reference_image,
self.mass_center_ph:
self.original_mass_center,
self.beta_0_ph: self.beta_0,
self.beta_1_ph: self.beta_1,
self.original_output_ph:
self.original_output,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.label_ph: self.original_label,
NET.reference_image:
self.reference_image,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.reference_image: self.reference_image,
NET.label_ph: self.original_label,
self.target_map_ph: self.target_map
})
elif len(feed.shape) == 4:
if hasattr(self, "original_topk") and hasattr(
self, "original_mass_center"):
if hasattr(self, "use_target") and self.use_target:
return sess.run(operation,
feed_dict={
NET.input: feed,
NET.label_ph: self.original_label,
NET.reference_image:
self.reference_image,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.beta_0_ph: self.beta_0,
self.beta_1_ph: self.beta_1,
self.original_output_ph:
self.original_output,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: feed,
NET.label_ph: self.original_label,
NET.reference_image:
self.reference_image,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: feed,
NET.reference_image: self.reference_image,
NET.label_ph: self.original_label,
})
else:
raise RuntimeError("Input image shape invalid!")
def give_simple_perturbation(self, attack_method, in_image):
counterfactuals = self.create_counterfactuals(in_image)
w, h, c = self.test_image.shape
if attack_method == "random":
perturbation = np.random.normal(size=(self.num_steps, w, h, c))
elif attack_method == "topK":
perturbation = self.run_model(self.sess, self.NET2.topK_direction,
counterfactuals, self.NET2)
perturbation = np.reshape(perturbation, [self.num_steps, w, h, c])
elif attack_method == "mass_center":
perturbation = self.run_model(self.sess,
self.NET2.mass_center_direction,
counterfactuals, self.NET2)
perturbation = np.reshape(perturbation, [self.num_steps, w, h, c])
elif attack_method == "target":
self.use_target = True
if self.target_map is None:
raise ValueError("No target region determined!")
else:
perturbation = self.run_model(self.sess, self.target_direction,
counterfactuals, self.NET2)
perturbation = np.reshape(perturbation,
[self.num_steps, w, h, c])
perturbation_summed = np.sum(np.array([float(i+1)/self.num_steps*perturbation[i]\
for i in range(self.num_steps)]),0)
return np.sign(perturbation_summed)
def apply_perturb(self, in_image, pert, alpha, bound=8 / 255, ord=np.inf):
if self.mean_image is None:
self.mean_image = np.zeros_like(in_image)
# out_image = self.test_image + np.clip(
# in_image + alpha * np.sign(pert) - self.test_image, -bound, bound)
d = in_image + alpha * np.sign(pert) - self.test_image
d_norm = np.linalg.norm(d.flatten(), ord=ord)
if d_norm > bound:
proj_ratio = bound / np.linalg.norm(d.flatten(), ord=ord)
else:
proj_ratio = 1
out_image = self.test_image + d * proj_ratio
out_image = np.clip(out_image, -self.mean_image,
self.pixel_max - self.mean_image)
return out_image
def check_measure(self, test_image_pert, measure):
prob = self.run_model(self.sess, self.NET.output, test_image_pert,
self.NET)
if np.argmax(prob, 1) == self.original_label:
counterfactuals = self.create_counterfactuals(test_image_pert)
if measure == "intersection":
top2 = self.run_model(self.sess, self.NET.top_idx,
counterfactuals, self.NET)
criterion = float(len(np.intersect1d(self.topK,
top2))) / self.k_top
elif measure == "correlation":
saliency2_flatten = self.run_model(self.sess,
self.NET.saliency_flatten,
counterfactuals, self.NET)
criterion = scipy.stats.spearmanr(self.saliency1_flatten,
saliency2_flatten)[0]
elif measure == "mass_center":
center2 = self.run_model(self.sess, self.NET.mass_center,
counterfactuals, self.NET).astype(int)
criterion = -np.linalg.norm(self.mass_center1 - center2)
elif measure == "cosine":
saliency2_flatten = self.run_model(self.sess,
self.NET.saliency_flatten,
test_image_pert, self.NET)
criterion = scipy.spatial.distance.cosine(
self.saliency1_flatten, saliency2_flatten)
else:
raise ValueError("Invalid measure!")
return criterion
else:
return 1
def iterative_attack(self,
attack_method,
epsilon,
iters=100,
alpha=1,
beta_0=1e11,
beta_1=1e6,
measure="intersection"):
"""
Args:
attack_method: One of "mass_center", "topK" or "random"
epsilon: set of allowed maximum $ell_infty$ of perturbations, eg:[2,4]
iters: number of maximum allowed attack iterations
alpha: perturbation size in each iteration of the attack
measure: measure for success of the attack (one of "correlation", "mass_center" or "intersection")
Returns:
intersection: The portion of the top K salient pixels in the original picture that are in the
top K salient pixels of the perturbed image devided
correlation: The rank correlation between saliency maps of original and perturbed image
center_dislocation: The L2 distance between saliency map mass centers in original and perturbed images
confidence: The prediction confidence of the perturbed image
"""
self.beta_0 = beta_0
self.beta_1 = beta_1
w, h, c = self.test_image.shape
test_image_pert = self.test_image.copy()
min_criterion = 1.
for counter in range(iters):
# if counter % int(iters / 5) == 0:
# print("Iteration : {}".format(counter))
pert = self.give_simple_perturbation(attack_method,
test_image_pert)
# print(pert.sum())
test_image_pert = self.apply_perturb(test_image_pert, pert, alpha,
epsilon)
criterion = self.check_measure(test_image_pert, measure)
if criterion < min_criterion:
# print("attack")
min_criterion = criterion
self.perturbed_image = test_image_pert.copy()
perturb_size = np.max(
np.abs(self.test_image - self.perturbed_image))
else:
# print("labels is changed")
pass
if min_criterion == 1.:
# print(
# "The attack was not successfull for maximum allowed perturbation size equal to {}"
# .format(epsilon))
# return 1., 1., self.original_confidence, 0.
return None
# print(
# '''For maximum allowed perturbation size equal to {}, the resulting perturbation size was equal to {}
# '''.format(epsilon,
# np.max(np.abs(self.test_image - self.perturbed_image))))
predicted_scores = self.run_model(self.sess, self.NET.output,
self.perturbed_image, self.NET)
confidence = np.max(predicted_scores)
counterfactuals = self.create_counterfactuals(self.perturbed_image)
self.saliency2, self.top2, self.mass_center2= self.run_model\
(self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], counterfactuals, self.NET)
correlation = scipy.stats.spearmanr(
self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))[0]
intersection = float(len(np.intersect1d(self.topK,
self.top2))) / self.k_top
center_dislocation = np.linalg.norm(self.mass_center1 -
self.mass_center2.astype(int))
cos_distance = scipy.spatial.distance.cosine(
self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))
return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance
class SmoothGradientsAttack(object):
def __init__(self,
sess,
mean_image,
test_image,
original_label,
NET,
NET2=None,
k_top=1000,
num_steps=100,
reference_image=None,
target_map=None,
pixel_max=255.):
"""
Args:
mean_image: The mean image of the data set(The assumption is that the images are mean subtracted)
sess: Session containing model(and surrogate model's) graphs
test_image: Mean subtracted test image
original_label: True label of the image
NET: Original neural network. It's assumed that NET.saliency is the saliency map tensor and
NET.saliency_flatten is its flatten version.
NET2: Surrogate neural network with the same structure and weights of the orignal network but
with activations replaced by softplus function
(necessary only when the activation function of the original function
does not have second order gradients, ex: ReLU). It's assumed that NET.saliency is the
saliency map tensor and NET2.saliency_flatten is its flatten version.
k_top: the topK parameter of the attack (refer to the original paper)
num_steps: Number of steps in Integrated Gradients Algorithm
reference_image: not used
pixel_max: maximum pixel value in the input image
"""
self.pixel_max = pixel_max
if len(test_image.shape) != 3:
raise ValueError("Invalid Test Image Dimensions")
if sum([
NET.input.get_shape()[-i] != test_image.shape[-i]
for i in [1, 2, 3]
]):
raise ValueError(
"Model's input dimensions is not Compatible with the provided test image!"
)
if self.check_prediction(sess, original_label, test_image, NET):
return
self.sess = sess
self.target_map = target_map
self.create_extra_ops(NET, test_image.shape[-3], test_image.shape[-2],
k_top)
if NET2 is None:
NET2 = NET
else:
self.create_extra_ops(NET2, test_image.shape[-3],
test_image.shape[-2], k_top)
if sum([
NET2.input.get_shape()[-i] != test_image.shape[-i]
for i in [1, 2, 3]
]):
raise ValueError(
"Surrogate model's input dimensions is not Compatible with the provided test image!"
)
self.NET = NET
self.NET2 = NET2
self.test_image = test_image
self.original_label = original_label
self.mean_image = mean_image
self.k_top = k_top
self.num_steps = num_steps
self.reference_image = np.zeros_like(
test_image) if reference_image is None else reference_image
w, h, c = self.mean_image.shape
self.topk_ph = tf.placeholder(tf.float32,
shape=[w * h],
name='topk_ph')
self.mass_center_ph = tf.placeholder(tf.float32,
shape=[2],
name='mass_center_ph')
self.target_map_ph = tf.placeholder(tf.float32,
shape=[w, h],
name='target_map_ph')
self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0')
self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1')
self.original_output = self.NET.predict(test_image[None, :])
_, num_class = self.original_output.shape
self.original_output_ph = tf.placeholder(
tf.float32, shape=[None, num_class],
name='original_output_ph') # only for the manipulation attack
self.create_attack_ops(self.NET2, test_image.shape[-3],
test_image.shape[-2])
self.update_new_image(test_image, original_label)
def update_new_image(self, test_image, original_label, target_map=None):
w, h, c = test_image.shape
self.test_image = test_image
self.original_label = original_label
assert self.check_prediction(self.sess, original_label, test_image,
self.NET) == False
if target_map is not None:
self.target_map = target_map
self.original_output = self.NET2.predict(test_image[None, :])
counterfactuals = self.create_counterfactuals(test_image)
self.saliency1, self.topK = self.run_model(
self.sess, [self.NET.saliency, self.NET.top_idx], counterfactuals,
self.NET)
self.saliency1_flatten = np.reshape(
self.saliency1, [test_image.shape[-3] * test_image.shape[-2]])
elem1 = np.argsort(np.reshape(self.saliency1, [w * h]))[-self.k_top:]
self.elements1 = np.zeros(w * h)
self.elements1[elem1] = 1
self.original_topk = self.elements1
self.mass_center1 = self.run_model(self.sess, self.NET.mass_center,
counterfactuals,
self.NET).astype(int)
self.original_mass_center = self.mass_center1
def check_prediction(self, sess, original_label, image, NET):
""" If the network's prediction is incorrect in the first place, attacking has no meaning."""
predicted_scores = sess.run(
NET.output,
feed_dict={NET.input: image if len(image.shape) == 4 else [image]})
if np.argmax(predicted_scores, 1) != original_label:
print("Network's Prediction is Already Incorrect!")
print("Pred: ", np.argmax(predicted_scores, 1))
print("Label: ", original_label)
return True
else:
self.original_confidence = np.max(predicted_scores)
return False
def create_extra_ops(self, NET, w, h, k_top):
top_val, NET.top_idx = tf.nn.top_k(NET.saliency_flatten, k_top)
y_mesh, x_mesh = np.meshgrid(np.arange(h), np.arange(w))
NET.mass_center = tf.stack([
tf.reduce_sum(NET.saliency * x_mesh) / (w * h),
tf.reduce_sum(NET.saliency * y_mesh) / (w * h)
])
def create_attack_ops(self, NET, w, h):
topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph))
self.debug = topK_loss
NET.topK_direction = -tf.gradients(topK_loss, NET.input)[0]
mass_center_loss = -tf.reduce_sum(
(NET.mass_center - self.mass_center_ph)**2)
NET.mass_center_direction = -tf.gradients(mass_center_loss,
NET.input)[0]
if self.target_map is not None:
target_dis = tf.keras.losses.MSE(self.target_map_ph, NET.saliency)
output_dis = tf.keras.losses.MSE(self.original_output_ph,
NET.output)
target_loss = tf.reduce_mean(
target_dis) * self.beta_0_ph + self.beta_1_ph * tf.reduce_mean(
output_dis)
self.debug = target_loss
self.target_direction = -tf.gradients(target_loss, NET.input)[0]
def create_counterfactuals(self, in_image, noise_ratio=0.1):
counterfactuals = np.array([
in_image + np.random.normal(scale=0.1 *
(in_image.max() - in_image.min()),
size=in_image.shape)
for _ in range(self.num_steps)
])
return np.array(counterfactuals)
def run_model(self, sess, operation, feed, NET):
if len(feed.shape) == 3:
if hasattr(self, "original_topk") and hasattr(
self, "original_mass_center"):
if hasattr(self, "use_target") and self.use_target:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.label_ph: self.original_label,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.beta_0_ph: self.beta_0,
self.beta_1_ph: self.beta_1,
self.original_output_ph:
self.original_output,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.label_ph: self.original_label,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.label_ph: self.original_label,
self.target_map_ph: self.target_map
})
elif len(feed.shape) == 4:
if hasattr(self, "original_topk") and hasattr(
self, "original_mass_center"):
if hasattr(self, "use_target") and self.use_target:
return sess.run(operation,
feed_dict={
NET.input: feed,
NET.label_ph: self.original_label,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.beta_0_ph: self.beta_0,
self.beta_1_ph: self.beta_1,
self.original_output_ph:
self.original_output,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: feed,
NET.label_ph: self.original_label,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: feed,
NET.label_ph: self.original_label,
})
else:
raise RuntimeError("Input image shape invalid!")
def give_simple_perturbation(self, attack_method, in_image):
counterfactuals = self.create_counterfactuals(in_image)
w, h, c = self.test_image.shape
if attack_method == "random":
perturbation = np.random.normal(size=(self.num_steps, w, h, c))
elif attack_method == "topK":
perturbation = self.run_model(self.sess, self.NET2.topK_direction,
counterfactuals, self.NET2)
perturbation = np.reshape(perturbation, [self.num_steps, w, h, c])
elif attack_method == "mass_center":
perturbation = self.run_model(self.sess,
self.NET2.mass_center_direction,
counterfactuals, self.NET2)
perturbation = np.reshape(perturbation, [self.num_steps, w, h, c])
elif attack_method == "target":
if self.target_map is None:
raise ValueError("No target region determined!")
else:
perturbation = self.run_model(self.sess, self.target_direction,
counterfactuals, self.NET2)
perturbation = np.reshape(perturbation,
[self.num_steps, w, h, c])
perturbation_summed = np.mean(perturbation, 0)
return np.sign(perturbation_summed)
def apply_perturb(self, in_image, pert, alpha, bound=8 / 255, ord=np.inf):
if self.mean_image is None:
self.mean_image = np.zeros_like(in_image)
# out_image = self.test_image + np.clip(
# in_image + alpha * np.sign(pert) - self.test_image, -bound, bound)
d = in_image + alpha * pert - self.test_image
d_norm = np.linalg.norm(d.flatten(), ord=ord)
if d_norm > bound:
proj_ratio = bound / np.linalg.norm(d.flatten(), ord=ord)
else:
proj_ratio = 1
out_image = self.test_image + d * proj_ratio
out_image = np.clip(out_image, -self.mean_image,
self.pixel_max - self.mean_image)
return out_image
def check_measure(self, test_image_pert, measure):
prob = self.run_model(self.sess, self.NET.output, test_image_pert,
self.NET)
if np.argmax(prob, 1) == self.original_label:
counterfactuals = self.create_counterfactuals(test_image_pert)
if measure == "intersection":
top2 = self.run_model(self.sess, self.NET.top_idx,
counterfactuals, self.NET)
criterion = float(len(np.intersect1d(self.topK,
top2))) / self.k_top
elif measure == "correlation":
saliency2_flatten = self.run_model(self.sess,
self.NET.saliency_flatten,
counterfactuals, self.NET)
criterion = scipy.stats.spearmanr(self.saliency1_flatten,
saliency2_flatten)[0]
elif measure == "mass_center":
center2 = self.run_model(self.sess, self.NET.mass_center,
counterfactuals, self.NET).astype(int)
criterion = -np.linalg.norm(self.mass_center1 - center2)
elif measure == "cosine":
saliency2_flatten = self.run_model(self.sess,
self.NET.saliency_flatten,
test_image_pert, self.NET)
criterion = scipy.spatial.distance.cosine(
self.saliency1_flatten, saliency2_flatten)
else:
raise ValueError("Invalid measure!")
return criterion
else:
return 1.
def iterative_attack(self,
attack_method,
epsilon,
iters=100,
alpha=1,
beta_0=1e11,
beta_1=1e6,
measure="intersection"):
"""
Args:
attack_method: One of "mass_center", "topK" or "random"
epsilon: set of allowed maximum $ell_infty$ of perturbations, eg:[2,4]
iters: number of maximum allowed attack iterations
alpha: perturbation size in each iteration of the attack
measure: measure for success of the attack (one of "correlation", "mass_center" or "intersection")
Returns:
intersection: The portion of the top K salient pixels in the original picture that are in the
top K salient pixels of the perturbed image devided
correlation: The rank correlation between saliency maps of original and perturbed image
center_dislocation: The L2 distance between saliency map mass centers in original and perturbed images
confidence: The prediction confidence of the perturbed image
"""
w, h, c = self.test_image.shape
test_image_pert = self.test_image.copy()
self.original = self.test_image.copy()
if attack_method == 'target':
self.use_target = True
else:
self.use_target = False
self.beta_0 = beta_0
self.beta_1 = beta_1
min_criterion = 1.
last_image = None
for counter in range(iters):
pert = self.give_simple_perturbation(attack_method,
test_image_pert)
test_image_pert = self.apply_perturb(test_image_pert, pert, alpha,
epsilon)
criterion = self.check_measure(test_image_pert, measure)
if criterion < min_criterion:
min_criterion = criterion
self.perturbed_image = test_image_pert.copy()
perturb_size = np.max(
np.abs(self.test_image - self.perturbed_image))
else:
pass
if criterion == 1.:
return None
predicted_scores = self.run_model(self.sess, self.NET.output,
self.perturbed_image, self.NET)
confidence = np.max(predicted_scores)
counterfactuals = self.create_counterfactuals(self.perturbed_image)
self.saliency2, self.top2, self.mass_center2= self.run_model\
(self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], counterfactuals, self.NET)
correlation = scipy.stats.spearmanr(
self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))[0]
intersection = float(len(np.intersect1d(self.topK,
self.top2))) / self.k_top
center_dislocation = np.linalg.norm(self.mass_center1 -
self.mass_center2.astype(int))
cos_distance = scipy.spatial.distance.cosine(
self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))
return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance
class UniGradientsAttack(SmoothGradientsAttack):
def __init__(self,
sess,
mean_image,
test_image,
original_label,
NET,
NET2=None,
k_top=1000,
num_steps=100,
radii=4,
reference_image=None,
target_map=None,
pixel_max=255.):
self.radii = radii / (255. / pixel_max)
super(UniGradientsAttack,
self).__init__(sess,
mean_image,
test_image,
original_label,
NET,
NET2=NET2,
k_top=1000,
num_steps=num_steps,
reference_image=reference_image,
target_map=target_map,
pixel_max=255.)
def create_counterfactuals(self, in_image):
counterfactuals = np.array([
in_image +
np.random.uniform(-1, 1, size=in_image.shape) * self.radii
for _ in range(self.num_steps)
])
return np.array(counterfactuals)
| 2.4375 | 2 |
pulse/uix/menu/widgets/plotWidget.py | open-pulse/OpenPulse | 23 | 12794186 | <reponame>open-pulse/OpenPulse
from os.path import isfile
from PyQt5.QtWidgets import QWidget, QLabel, QLineEdit, QPushButton, QFormLayout, QMessageBox, QCheckBox
class PlotWidget(QWidget):
"""MenuInfo Widget
This class is responsible for building a small area below of the item menu
when some item is clicked. This has been replaced for QDialog windows and currently isn't used.
"""
def __init__(self, main_window):
super().__init__()
self.main_window = main_window
self._create_widgets()
self._add_widget_to_layout()
def _create_widgets(self):
self.plot_button = QPushButton('Plot')
self.wireframe = QCheckBox('Wireframe')
self.deformation = QCheckBox('Deformation')
self.animate = QCheckBox('Animate')
self.plot_button.clicked.connect(self._plot_function)
def _add_widget_to_layout(self):
self.layout = QFormLayout()
self.setLayout(self.layout)
self.layout.addRow(self.wireframe)
self.layout.addRow(self.deformation)
self.layout.addRow(self.animate)
self.layout.addRow(self.plot_button)
def _plot_function(self):
self.main_window.draw() | 2.78125 | 3 |
setuptools_pyecore/command.py | pyecore/setuptools-pyecore | 4 | 12794187 | """Implementation of the setuptools command 'pyecore'."""
import collections
import contextlib
import distutils.log as logger
import logging
import pathlib
import shlex
import pyecore.resources
import pyecoregen.ecore
import setuptools
class PyEcoreCommand(setuptools.Command):
"""A setuptools command for generating Python code from Ecore models.
An extra command for setuptools to generate static Python classes from Ecore models. The pyecore
command wraps pyecoregen - the real Python code generator for Ecore models. It searches for
Ecore models starting from the base directory and generates a Python package for each found
Ecore model.
:cvar _ECORE_FILE_EXT: File extension of Ecore XMI file
:cvar description: Description of ecore command
:cvar user_options: Options which can be passed by the user
:cvar boolean_options: Subset of user options which are binary
"""
_ECORE_FILE_EXT = 'ecore'
description = 'generate Python code from Ecore models'
user_options = [
('ecore-models=', 'e', 'specify Ecore models to generate code for'),
('output=', 'o', 'specify directories where output is generated'),
('user-modules=', None, 'dotted names of modules with user-provided mixins to import from '
'generated classes'),
('auto-register-package', None, 'Generate package auto-registration for the PyEcore '
'\'global_registry\''),
]
boolean_options = ['auto-register-package']
def initialize_options(self):
"""Set default values for all the options that this command supports. Note that these
defaults may be overridden by other commands, by the setup script, by config files, or by
the command-line.
"""
self.ecore_models = None
self.output = ''
self.user_modules = ''
self.auto_register_package = 0
def finalize_options(self):
"""Set final values for all the options that this command supports. This is always called
as late as possible, ie. after any option assignments from the command-line or from other
commands have been done.
"""
# parse ecore-models option
if self.ecore_models:
self.ecore_models = shlex.split(self.ecore_models, comments=True)
# parse output option
tokens = shlex.split(self.output, comments=True)
self.output = collections.defaultdict(lambda: None)
for token in tokens:
model, output = token.split('=', 1)
# check if model and output are specified
if model and output:
# add relative output path to dictionary
output_path = pathlib.Path(output).relative_to('.')
if model == 'default':
self.output.default_factory = lambda: output_path
else:
self.output[model] = output_path
else:
logger.warn('Ignoring invalid output specifier {!r}.', token)
# parse user-modules option
tokens = shlex.split(self.user_modules, comments=True)
self.user_modules = {}
for token in tokens:
model, user_module = token.split('=', 1)
# check if model and user module are specified
if model and user_module:
self.user_modules[model] = user_module
else:
logger.warn('Ignoring invalid user module specifier {!r}.', token)
def _configure_logging(self):
"""Configure logging using global verbosity level of distutils."""
loglevel_map = collections.defaultdict(lambda: logging.WARNING)
loglevel_map.update({
0: logging.WARNING,
1: logging.INFO,
2: logging.DEBUG
})
logging.basicConfig(
format='%(asctime)s %(levelname)s [%(name)s] %(message)s',
level=loglevel_map[self.distribution.verbose]
)
def _find_ecore_xmi_files(self, base_path=pathlib.Path('.')):
"""Search for all Ecore XMI files starting from base directory and returns a list of them.
:param base_path: base path to search for Ecore XMI files
:return: a list of all found Ecore XMI files
"""
pattern = '*.{}'.format(self._ECORE_FILE_EXT)
logger.debug('searching for Ecore XMI files in \'{!s}\''.format(str(base_path)))
return sorted(base_path.rglob(pattern))
@staticmethod
@contextlib.contextmanager
def _load_ecore_model(ecore_model_path):
"""Load a single Ecore model from a Ecore XMI file and return the root package.
:param ecore_model_path: path to Ecore XMI file
:return: root package of the Ecore model
"""
rset = pyecore.resources.ResourceSet()
try:
logger.debug('loading \'{!s}\''.format(str(ecore_model_path)))
resource = rset.get_resource(ecore_model_path.as_posix())
yield resource.contents[0]
except Exception:
raise
else:
rset.remove_resource(resource)
def run(self):
"""Perform all tasks necessary to generate Python packages representing the classes from
Ecore models. This process is controlled by the user options passed on the command line or
set internally to default values.
"""
self._configure_logging()
# find Ecore XMI files
ecore_xmi_files = self._find_ecore_xmi_files()
# load each Ecore model
for ecore_xmi_file in ecore_xmi_files:
with self._load_ecore_model(ecore_xmi_file) as resource:
if self.ecore_models is None or resource.name in self.ecore_models:
# configure EcoreGenerator
kwargs = {}
if self.auto_register_package:
kwargs['auto_register_package'] = True
if resource.name in self.user_modules:
kwargs['user_module'] = self.user_modules[resource.name]
if self.output[resource.name]:
output_dir = self.output[resource.name]
else:
output_dir = ecore_xmi_file.parent
# generate Python classes
logger.info(
'running pyecoregen to generate code for {!r} metamodel'.format(resource.name)
)
pyecoregen.ecore.EcoreGenerator(**kwargs).generate(
resource,
output_dir.as_posix()
)
else:
logger.debug('skipping {!r} metamodel'.format(resource.name))
| 2.34375 | 2 |
terseparse/root_parser.py | jthacker/terseparse | 1 | 12794188 | <reponame>jthacker/terseparse
import sys
import logging
from argparse import ArgumentParser, RawTextHelpFormatter, _SubParsersAction, SUPPRESS
from collections import namedtuple, OrderedDict
def is_subparser(action_group):
for a in action_group._group_actions:
if isinstance(a, _SubParsersAction):
return True
return False
class CustomHelpFormatter(RawTextHelpFormatter):
def __init__(self, *args, **kwargs):
super(CustomHelpFormatter, self).__init__(*args, **kwargs)
self._action_max_length = 10
class Lazy(object):
"""Lazily load a default argument after the args have been parsed"""
def __init__(self, val):
"""Initialize a lazy object.
Args:
val -- if object is callable, it should take one parameter.
The arguments namespace object is passed if callable.
"""
self.val = val
def __call__(self, parsed_args_namespace):
if callable(self.val):
return self.val(parsed_args_namespace)
return self.val
class ParsedArgsNamespace(object):
def __init__(self, keywords, defaults):
self._keywords = keywords
self._defaults = defaults or {}
self._fields = set(list(self._keywords.keys()) + list(self._defaults.keys()))
def __getattr__(self, key):
if key not in self._fields:
raise AttributeError('%r object has not attribute %r' % (type(self), key))
val = self._keywords.get(key)
if val is not None:
if isinstance(val, Lazy):
val = val(self)
self._keywords[key] = val
return val
val = self._defaults.get(key)
if val is not None:
if callable(val):
val = val(self)
self._keywords[key] = val
return val
def __contains__(self, key):
return key in self._fields
def __getitem__(self, key):
return self.__getattr__(key)
def __iter__(self):
return iter((k, self[k]) for k in self._fields)
def __dir__(self):
return sorted(set(dir(type(self)) + self._fields))
def __repr__(self):
return 'ParsedArgsNamespace(%r, %r)' % (self._keywords, self._defaults)
class ParsedArgs(object):
def __init__(self, keywords, defaults):
self.ns = ParsedArgsNamespace(keywords, defaults)
def names(self):
return self.ns._fields
def pprint(self):
spacer = ' ' * 4
names = self.names()
arg_len = max(3, max(map(len, names)))
hfmt = '{:{}}'+spacer+'{}'
lfmt = '{:{}}'+spacer+'{!r}'
msg = '\n'.join(lfmt.format(name, arg_len, self.ns[name]) for name in names)
title = 'Parsed Arguments:'
header = hfmt.format('arg', arg_len, 'value') + '\n'
header += hfmt.format('---', arg_len, '-----')
print(title)
print('=' * len(title))
print(header)
print(msg)
class RootParser(ArgumentParser):
"""Private Class."""
@staticmethod
def add_parser(*args, **kwargs):
return RootParser(*args, **kwargs)
def __init__(self, *args, **kwargs):
kwargs['formatter_class'] = CustomHelpFormatter
super(RootParser, self).__init__(*args, **kwargs)
self._debug = False
def error(self, message):
"""Overrides error to control printing output"""
if self._debug:
import pdb
_, _, tb = sys.exc_info()
if tb:
pdb.post_mortem(tb)
else:
pdb.set_trace()
self.print_usage(sys.stderr)
self.exit(2, ('\nERROR: {}\n').format(message))
def format_help(self):
"""Overrides format_help to not print subparsers"""
formatter = self._get_formatter()
# usage
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups, except SubParsers
for action_group in self._action_groups:
if is_subparser(action_group):
continue
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
def parse_args(self, args=None, namespace=None, defaults=None):
if not args:
args = sys.argv[1:]
if len(args) > 0 and args[0] == '--terseparse-debug':
self._debug = True
logging.getLogger().setLevel(logging.DEBUG)
logging.basicConfig()
args = args[1:]
parser = super(RootParser, self)
ns = parser.parse_args(args, namespace)
parsed_args = ParsedArgs(OrderedDict(ns._get_kwargs()), defaults)
if self._debug:
parsed_args.pprint()
return parser, parsed_args
| 2.46875 | 2 |
pytautulli/models/home_stats.py | bdraco/pytautulli | 2 | 12794189 | """PyTautulliApiHomeStats."""
from __future__ import annotations
from pytautulli.models.user import PyTautulliApiUser
from .base import APIResponseType, PyTautulliApiBaseModel
class PyTautulliApiHomeStatsRow(PyTautulliApiUser, PyTautulliApiBaseModel):
"""PyTautulliApiHomeStatsRow"""
_responsetype = APIResponseType.DICT
art: str | None = None
content_rating: str | None = None
count: int | None = None
friendly_name: str | None = None
grandparent_rating_key: str | None = None
grandparent_thumb: str | None = None
guid: str | None = None
labels: list[str] | None = None
last_play: str | None = None
live: bool | None = None
media_type: str | None = None
platform: str | None = None
platform_name: str | None = None
rating_key: int | None = None
row_id: int | None = None
section_id: int | None = None
started: str | None = None
stopped: str | None = None
thumb: str | None = None
title: str | None = None
total_duration: int | None = None
total_plays: int | None = None
user: str | None = None
users_watched: str | None = None
year: int | None = None
class PyTautulliApiHomeStats(PyTautulliApiBaseModel):
"""PyTautulliApiHomeStats."""
_responsetype = APIResponseType.LIST
stat_id: str | None = None
stat_type: str | None = None
stat_title: str | None = None
rows: list[PyTautulliApiHomeStatsRow] = None
def __post_init__(self):
super().__post_init__()
self.rows = [PyTautulliApiHomeStatsRow(row) for row in self.rows or []]
| 2.359375 | 2 |
TextCNN_Siamese/cnn_loaddata_v2.py | Wang-Yikai/Final-Project-for-Natural-Language-Processing-FDU | 6 | 12794190 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 21 15:05:24 2018
@author: Hendry
"""
from read_data import *
from TokenizeSentences import *
import numpy as np
def onehot(data,nClass):
data2 = np.zeros([len(data),nClass])
for i in range(nClass):
data2[np.where(data==i),i]= 1
return data2
def get_text_idx(text,vocab,max_document_length):
text_array = np.zeros([len(text), max_document_length],dtype=np.int32)
for i,x in enumerate(text):
words = x
for j, w in enumerate(words):
if w in vocab:
text_array[i, j] = vocab[w]
else :
text_array[i, j] = vocab['the']
return text_array
def loaddata(w2v_model,typeOfClassify = 0,useTextsum= 1):
train_bodies = readRawData('train_bodies.csv')
if useTextsum == 0:
trainDocs = TokenizeSentences(splitData(train_bodies,1))
else:
f = open('./fnc_data/train_1.txt','r')
data = f.readlines()
f.close()
trainDocs = TokenizeSentences(data)
trainDocsIdx = np.array(splitData(train_bodies,0)).astype('int')
train_stances = readRawData('train_stances.csv')
trainTitle = TokenizeSentences(splitData(train_stances,0))
trainTitleIdx = np.array(splitData(train_stances,1)).astype('int')
trainRes = np.array(splitData(train_stances,2))
trainRes[np.where(trainRes=='unrelated')]='0'
trainRes[np.where(trainRes=='agree')]='1'
trainRes[np.where(trainRes=='disagree')]='2'
trainRes[np.where(trainRes=='discuss')]='3'
trainRes =trainRes.astype('int')
maxDocLength = 0
for i in range(len(trainDocs)):
maxDocLength = max(maxDocLength,len(trainDocs[i]))
maxTitleLength = 0
for i in range(len(trainTitle)):
maxTitleLength = max(maxTitleLength,len(trainTitle[i]))
trainDocs = get_text_idx(trainDocs,w2v_model.vocab_hash,maxDocLength)
trainTitle = get_text_idx(trainTitle,w2v_model.vocab_hash,maxTitleLength)
trainTitleDocs = [[] for i in range(len(trainTitle))]
for i in range(len(trainTitle)):
idx = np.where(trainDocsIdx==trainTitleIdx[i])
trainTitleDocs[i]=trainDocs[int(idx[0])]
trainTitleDocs = np.array(trainTitleDocs)
trainDocs = np.array(trainDocs)
trainTitle = np.array(trainTitle)
uniIdx = np.unique(trainTitleIdx)
uniIdxTest = uniIdx[round(0.95*len(uniIdx)):]
validIdx = np.argwhere(trainTitleIdx == uniIdxTest[0])
for i in range(len(uniIdxTest)-1):
validIdx = np.append(validIdx,np.argwhere(trainTitleIdx == uniIdxTest[i+1]))
validIdx = sorted(validIdx)
fullIdx = list(range(len(trainTitleIdx)))
trainIdx = list(set(fullIdx).difference(set(validIdx)))
x1Train = trainTitleDocs[trainIdx]
x2Train = trainTitle[trainIdx]
trainRes = np.array(trainRes)
y0Train = trainRes[trainIdx]
x1Valid = trainTitleDocs[validIdx]
x2Valid = trainTitle[validIdx]
y0Valid = trainRes[validIdx]
if typeOfClassify==0:
yValid = onehot(y0Valid,4)
yTrain = onehot(y0Train,4)
elif typeOfClassify==1:
y0Train[y0Train>0]=1
y0Valid[y0Valid>0]=1
yValid = onehot(y0Valid,2)
yTrain = onehot(y0Train,2)
elif typeOfClassify==2:
x1Train = x1Train[y0Train>0]
x2Train = x2Train[y0Train>0]
y0Train = y0Train[y0Train>0]-1
x1Valid = x1Valid[y0Valid>0]
x2Valid = x2Valid[y0Valid>0]
y0Valid = y0Valid[y0Valid>0]-1
yValid = onehot(y0Valid,3)
yTrain = onehot(y0Train,3)
vocab_size = len(w2v_model.vocab_hash)
return x1Train, x1Valid, x2Train, x2Valid, yTrain, yValid, vocab_size
| 2.5625 | 3 |
tests/context.py | mrcagney/googlemaps_helpers | 1 | 12794191 | import os
import sys
from pathlib import Path
sys.path.insert(0, os.path.abspath('..'))
import googlemaps_helpers
ROOT = Path('.')
DATA_DIR = Path('tests/data') | 1.789063 | 2 |
HW2/dataset_setup.py | yusufdalva/CS550_Assignments | 1 | 12794192 | <filename>HW2/dataset_setup.py
import os
import shutil
import wget
import numpy as np
DATA_FILE_NAMES = ["train1", "test1", "train2", "test2"]
BASE_URL = "http://www.cs.bilkent.edu.tr/~gunduz/teaching/cs550/documents"
FILE_URLS = [os.path.join(BASE_URL, file_name) for file_name in DATA_FILE_NAMES]
def download_data(file_names, file_urls):
cwd = os.getcwd()
if os.path.isdir(os.path.join(cwd, "data")):
shutil.rmtree(os.path.join(cwd, "data"))
folder_path = os.path.join(cwd, "data")
os.mkdir(folder_path)
file_paths = []
for file_idx in range(len(file_names)):
file_path = os.path.join(folder_path, file_names[file_idx] + ".txt")
file_paths.append(file_path)
wget.download(file_urls[file_idx], file_path)
print("\nDOWNLOAD COMPLETED")
return file_paths
class Dataset:
def __init__(self, train_data_path, test_data_path):
self.train_data = self.read_data(train_data_path)
self.test_data = self.read_data(test_data_path)
self.train_mean = np.mean(self.train_data, axis=0)
self.train_std = np.std(self.train_data, axis=0)
@staticmethod
def read_data(data_path):
train_file = open(data_path, "r")
samples = train_file.readlines()
sample_data = [list(map(float, sample.split())) for sample in samples]
return np.array(sample_data)
def normalize_data(self):
train_data = (self.train_data - self.train_mean) / self.train_std
test_data = (self.test_data - self.train_mean) / self.train_std
return train_data, test_data
def denormalize_samples(self, samples):
denormalized = samples * self.train_std + self.train_mean
return denormalized
def denormalize_labels(self, labels):
denormalized = labels * self.train_std[1] + self.train_mean[1]
return denormalized
| 2.90625 | 3 |
pfp/fuzz/__init__.py | bannsec/pfp | 1 | 12794193 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module contains the base classes used
when defining mutation strategies for pfp
"""
import glob
import os
import six
get_strategy = None
StratGroup = None
FieldStrat = None
def init():
global get_strategy
global StratGroup
global FieldStrat
import pfp.fuzz.strats
get_strategy = pfp.fuzz.strats.get_strategy
StratGroup = pfp.fuzz.strats.StratGroup
FieldStrat = pfp.fuzz.strats.FieldStrat
# load all of the built-in strategies
for strat_file in glob.glob(os.path.join(os.path.dirname(__file__), "*.py")):
filename = os.path.basename(strat_file)
if filename in ["__init__.py", "base.py"]:
continue
mod_name = filename.replace(".py", "").replace(".pyc", "")
__import__("pfp.fuzz." + mod_name)
def mutate(field, strat_name_or_cls, num=100, at_once=1, yield_changed=False):
"""Mutate the provided field (probably a Dom or struct instance) using the
strategy specified with ``strat_name_or_class``, yielding ``num`` mutations
that affect up to ``at_once`` fields at once.
This function will yield back the field after each mutation, optionally
also yielding a ``set`` of fields that were mutated in that iteration (if ``yield_changed`` is
``True``). It should also be noted that the yielded set of changed fields *can*
be modified and is no longer needed by the mutate() function.
:param pfp.fields.Field field: The field to mutate (can be anything, not just Dom/Structs)
:param strat_name_or_class: Can be the name of a strategy, or the actual strategy class (not an instance)
:param int num: The number of mutations to yield
:param int at_once: The number of fields to mutate at once
:param bool yield_changed: Yield a list of fields changed along with the mutated dom
:returns: generator
"""
import pfp.fuzz.rand as rand
init()
strat = get_strategy(strat_name_or_cls)
to_mutate = strat.which(field)
with_strats = []
for to_mutate_field in to_mutate:
field_strat = strat.get_field_strat(to_mutate_field)
if field_strat is not None:
with_strats.append((to_mutate_field, field_strat))
# we don't need these ones anymore
del to_mutate
# save the current value of all subfields without
# triggering events
field._pfp__snapshot(recurse=True)
count = 0
for x in six.moves.range(num):
chosen_fields = set()
idx_pool = set([x for x in six.moves.xrange(len(with_strats))])
# modify `at_once` number of fields OR len(with_strats) number of fields,
# whichever is lower
for at_onces in six.moves.xrange(min(len(with_strats), at_once)):
# we'll never pull the same idx from idx_pool more than once
# since we're removing the idx after choosing it
rand_idx = rand.sample(idx_pool, 1)[0]
idx_pool.remove(rand_idx)
rand_field,field_strat = with_strats[rand_idx]
chosen_fields.add(rand_field)
field_strat.mutate(rand_field)
if yield_changed:
yield field, chosen_fields
else:
# yield back the original field
yield field
# restore the saved value of all subfields without
# triggering events
field._pfp__restore_snapshot(recurse=True)
| 2.484375 | 2 |
library/source1/mdl/v44/__init__.py | anderlli0053/SourceIO | 0 | 12794194 | from .mdl_file import MdlV44
| 1.09375 | 1 |
Course/functions/example_19.py | zevgenia/Python_shultais | 0 | 12794195 | <filename>Course/functions/example_19.py
# -*- coding: utf-8 -*-
def func(b):
b[0] = 1
def func2(b):
b = b[:]
b[0] = 2
def func3(b):
b = b.copy()
b[0] = 3
l = ['one', 'two', 'three']
func(l[:])
print("l1:", l)
func2(l)
print("l2:", l)
func3(l)
print("l3:", l) | 3.5625 | 4 |
eeyore/linalg/is_pos_def.py | papamarkou/eeyore | 6 | 12794196 | import torch
def is_pos_def(x):
if torch.equal(x, x.t()):
try:
torch.linalg.cholesky(x)
return True
except RuntimeError:
return False
else:
return False
| 2.671875 | 3 |
QSQuantifier/DataConnection.py | lizhuangzi/QSQuantificationCode | 2 | 12794197 | <gh_stars>1-10
#!/usr/bin python
# coding=utf-8
from pymongo import MongoClient
def startConnection(ip = 'localhost',port = 27017,dbname = 'StockDatas'):
client = MongoClient(ip,port)
# try:
# client.admin.command('ismaster')
# except Exception as e:
# print('Server not available')
db = client[dbname]
# print(db.collection_names(False))
if db == None:
print("db not exist")
else:
print("connect success")
return db
| 2.703125 | 3 |
barbeque/shortcuts.py | moccu/barbeque | 5 | 12794198 | <reponame>moccu/barbeque
from django.shortcuts import _get_queryset
def get_object_or_none(klass, *args, **kwargs):
"""Return an object or ``None`` if the object doesn't exist."""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
return None
| 2.140625 | 2 |
problems/DivisionTwo_Practice/solution.py | Pactionly/SpringComp2019 | 1 | 12794199 | print("Hello World from Division Two!")
| 1.328125 | 1 |
story/serializers.py | mshirdel/socialnews | 0 | 12794200 | from rest_framework import serializers
from accounts.serializers import UserSerializer
from .models import Story
class StorySerializer(serializers.ModelSerializer):
user = UserSerializer(read_only=True)
class Meta:
model = Story
fields = [
"id",
"title",
"slug",
"story_url",
"story_body_text",
"number_of_comments",
"number_of_votes",
"url_domain_name",
"rank",
"user",
]
read_only_fields = [
"number_of_comments",
"number_of_votes",
"url_domain_name",
"rank",
"slug",
]
def validate(self, data):
story_url = data.get("story_url", None)
story_body_text = data.get("story_body_text", None)
if story_url is None and story_body_text is None:
raise serializers.ValidationError(
"One of story_url or story_body_text is required."
)
return data
def create(self, validated_data):
user = self.context.get("user")
story = Story.objects.create(user=user, **validated_data)
return story
| 2.671875 | 3 |
development/server/algorithm/tf_faster_rcnn/data_processing/small_tools/get_you_need_label_img.py | FMsunyh/re_com | 0 | 12794201 | <filename>development/server/algorithm/tf_faster_rcnn/data_processing/small_tools/get_you_need_label_img.py<gh_stars>0
# -*- coding: utf-8 -*-
# @Time : 5/24/2018
# @Author : CarrieChen
# @File : get_you_need_label_img.py
# @Software: ZJ_AI
# this code is for read some labels from excel and find according imgs and put the imgs into a word.
import xlrd
import docx
from PIL import Image
import os
import xlwt
import io_utils
from PIL import ImageDraw
from PIL import ImageFont
#some paths
parent_path="C:\\Users\\Administrator\\Desktop\\data_processing_carriechen\\count_all_annotations"
excel_path=parent_path+"\\本批商品列表.xls"
img_path=parent_path+"\\pic+lab166"
refer_166classes=parent_path+"\\166_classes_list.xls"
this_batch_imgs_path=parent_path+"\\本批商品图例"
def get_labels(input_path):
data=xlrd.open_workbook(input_path)
table=data.sheets()[0]
labels=table.col_values(0)
return labels
def get_chinese(input_path,pointlabel): #excel
data=xlrd.open_workbook(input_path)
table=data.sheets()[0]
labels=table.col_values(4)
if pointlabel in labels: #else
row=labels.index(pointlabel)
product=table.cell(row,0).value
taste = table.cell(row, 1).value
weight=table.cell(row,2).value
package=table.cell(row,3).value
return product,taste,weight,package
def find_imgs_and_write_word(labels,parent_path):
file=docx.Document()
for i in range(len(labels)):
img=img_path+"\\"+labels[i]+".jpg"
product,taste,weight,package=get_chinese(refer_166classes,labels[i])
file.add_picture(img)
file.add_paragraph(product+taste+weight+package+" "+labels[i])
file.add_paragraph("\n")
file.save(parent_path+"\\"+"本批商品图例.doc") #这是生成的word文档的名字
def find_imgs_and_save_as_imgs(labels, parent_path):
io_utils.mkdir(this_batch_imgs_path)
for i in range(len(labels)):
background = Image.open("C:\\Users\\Administrator\\Desktop\\data_processing_carriechen\\count_all_annotations\\pure_white_background.jpg")
img = img_path + "\\" + labels[i] + ".jpg"
product, taste, weight, package = get_chinese(refer_166classes, labels[i])
img=Image.open(img)
background.paste(img,[100,50])
draw = ImageDraw.Draw(background)
width, height = background.size
setFont = ImageFont.truetype('C:\Windows\Fonts\\simfang.ttf', 30)
fillColor = "black"
draw.text((10, height - 100), u"\""+labels[i]+"\"", font=setFont, fill=fillColor)
draw.text((10, height - 50), u"\"" + product+taste+weight+package + "\"", font=setFont, fill=fillColor)
background.save(this_batch_imgs_path+"\\"+labels[i]+".jpg")
if __name__ =="__main__":
labels=get_labels(excel_path)
#find_imgs_and_write_word(labels,parent_path)
find_imgs_and_save_as_imgs(labels,parent_path)
| 2.5625 | 3 |
xy_python_utils/image_utils.py | yxiong/xy_python_utils | 0 | 12794202 | #!/usr/bin/env python
#
# Author: <NAME>.
# Created: Dec 11, 2014.
"""Some utility functions to handle images."""
import math
import numpy as np
import PIL.Image
from PIL.Image import ROTATE_180, ROTATE_90, ROTATE_270, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT
import skimage.transform
def imcast(img, dtype, color_space="default"):
"""Cast the input image to a given data type.
Parameters
----------
img: ndarray
The input image.
dtype: np.dtype
The type that output image to be cast into.
color_space: string, optional
The color space of the input image, which affects the casting operation.
Returns
-------
The output image that is cast into `dtype`.
Notes
-----
* For `color_space=="default"`, we perform a linear scaling with following
range conventions:
* `np.uint8`: `[0, 255]`;
* `np.uint16`: `[0, 65535]`;
* `np.float32` and `np.float64`: `[0.0, 1.0]`.
For example, if the input `img` is of `np.uint8` type and the expected
`dtype` is `np.float32`, then the output will be
`np.asarray(img / 255., np.float32)`.
* For `color_space=="CIE-L*a*b*"`, the "normal" value ranges are
`0 <= L <= 100, -127 <= a, b <= 127`, and we perform the following cast:
* `np.uint8`: `L <- L * 255 / 100, a <- a + 128, b <- b + 128`;
* `np.uint16`: currently not supported;
* `np.float32` and `np.float64`: left as is.
"""
if img.dtype == dtype:
return img
if color_space == "default":
if dtype == np.uint8:
if img.dtype == np.uint16:
return np.asarray(img / 257, np.uint8)
elif img.dtype == np.float32 or img.dtype == np.float64:
return np.asarray(img * 255., np.uint8)
elif dtype == np.uint16:
if img.dtype == np.uint8:
return np.asarray(img, np.uint16) * 257
elif img.dtype == np.float32 or img.dtype == np.float64:
return np.asarray(img * 65535., np.uint16)
elif dtype == np.float32 or dtype == np.float64:
if img.dtype == np.uint8:
return np.asarray(img, dtype) / 255.
elif img.dtype == np.uint16:
return np.asarray(img, dtype) / 65535.
elif img.dtype == np.float32 or img.dtype == np.float64:
return np.asarray(img, dtype)
elif color_space == "CIE-L*a*b*":
if dtype == np.uint8:
if img.dtype == np.float32 or img.dtype == np.float64:
dst = np.empty(img.shape, np.uint8)
dst[:,:,0] = img[:,:,0] * 255. / 100.
dst[:,:,1] = img[:,:,1] + 128.
dst[:,:,2] = img[:,:,2] + 128.
return dst
elif dtype == np.float32 or dtype == np.float64:
if img.dtype == np.uint8:
dst = np.empty(img.shape, dtype)
dst[:,:,0] = np.asarray(img[:,:,0], dtype) / 255. * 100.
dst[:,:,1] = np.asarray(img[:,:,1], dtype) - 128.
dst[:,:,2] = np.asarray(img[:,:,2], dtype) - 128.
return dst
raise Exception(
"Unexpected conversion from '%s' to '%s' with '%s' color space" % \
(img.dtype, dtype, color_space))
def imread(filename, dtype=np.uint8, color_space="default"):
"""Read the image followed by an :py:func:`imcast`."""
img = PIL.Image.open(filename)
if img.mode != "RGB":
img = img.convert("RGB")
if hasattr(img, "_getexif"):
try:
exif = img._getexif() or {}
except IOError:
exif = {}
orientation = exif.get(0x0112)
if orientation:
# see http://park2.wakwak.com/~tsuruzoh/Computer/Digicams/exif-e.html
# for explanation of the magical constants
# or see http://jpegclub.org/exif_orientation.html for a nice visual explanation
# also, rotations are counter-clockwise in PIL
orientation = int(orientation)
rotation = [None, None, ROTATE_180, None, ROTATE_270, ROTATE_270, ROTATE_90, ROTATE_90]
flip = [None, FLIP_LEFT_RIGHT, None, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT, None,
FLIP_LEFT_RIGHT, None]
orientation0 = orientation - 1 # it's 1-indexed per the EXIF spec
if 0 <= orientation0 < len(rotation):
if rotation[orientation0] is not None:
img = img.transpose(rotation[orientation0])
if flip[orientation0] is not None:
img = img.transpose(flip[orientation0])
return imcast(np.array(img), dtype, color_space)
def imwrite(filename, img, dtype=np.uint8, color_space="default"):
"""Perform an :py:func:`imcast` before writing to the output file."""
import scipy.misc
return scipy.misc.imsave(filename, imcast(img, dtype, color_space))
def imresize(img, size):
"""Resize the input image.
Parameters
----------
img: ndarray
The input image to be resized.
size: a scalar for `scale` or a 2-tuple for `(num_rows, num_cols)`
One of the `num_rows` or `num_cols` can be -1, which will be inferred
such that the output image has the same aspect ratio as the input.
Returns
-------
The resized image.
"""
if hasattr(size, "__len__"):
num_rows, num_cols = size
assert (num_rows > 0) or (num_cols > 0)
if num_rows < 0:
num_rows = num_cols * img.shape[0] / img.shape[1]
if num_cols < 0:
num_cols = num_rows * img.shape[1] / img.shape[0]
else:
num_rows = int(round(img.shape[0] * size))
num_cols = int(round(img.shape[1] * size))
return skimage.transform.resize(img, (num_rows, num_cols))
def create_icon_mosaic(icons, icon_shape=None,
border_size=1, border_color=None, empty_color=None,
mosaic_shape=None, mosaic_dtype=np.float):
"""Create a mosaic of image icons.
Parameters
----------
icons: a list of `ndarray`s
A list of icons to be put together for mosaic. Currently we require all
icons to be multi-channel images of the same size.
icon_shape: 3-tuple, optional
The shape of icons in the output mosaic as `(num_rows, num_cols, num_channels)`.
If not specified, use the shape of first image in `icons`.
border_size: int, optional
The size of border.
border_color: 3-tuple, optional
The color of border, black if not specified.
empty_color: 3-tuple, optional
The color for empty cells, black if not specified.
mosaic_shape: 2-tuple, optional
The shape of output mosaic as `(num_icons_per_row,
num_icons_per_col)`. If not specified, try to make a square mosaic
according to number of icons.
mosaic_dtype: dtype
The data type of output mosaic.
Returns
-------
The created mosaic image.
"""
# Set default parameters.
num_icons = len(icons)
assert num_icons > 0
if icon_shape is None:
icon_shape = icons[0].shape
assert len(icon_shape) == 3
num_channels = icon_shape[2]
if border_color is None:
border_color = np.zeros(num_channels)
if empty_color is None:
empty_color = np.zeros(num_channels)
if mosaic_shape is None:
num_cols = int(math.ceil(math.sqrt(num_icons)))
num_rows = int(math.ceil(float(num_icons) / num_cols))
mosaic_shape = (num_rows, num_cols)
mosaic_image_shape = (
mosaic_shape[0] * icon_shape[0] + (mosaic_shape[0]-1) * border_size,
mosaic_shape[1] * icon_shape[1] + (mosaic_shape[1]-1) * border_size,
icon_shape[2])
# Create mosaic image and fill with border color.
mosaic_image = np.empty(mosaic_image_shape, dtype=mosaic_dtype)
for c in xrange(mosaic_image.shape[2]):
mosaic_image[:,:,c] = border_color[c]
# Fill in the input icons.
for idx in xrange(num_icons):
i = idx / mosaic_shape[1]
j = idx % mosaic_shape[1]
iStart = i * (icon_shape[0] + border_size)
jStart = j * (icon_shape[1] + border_size)
mosaic_image[iStart:iStart+icon_shape[0],
jStart:jStart+icon_shape[1],:] = icons[idx]
# Fill the empty icons with empty colors.
for idx in xrange(num_icons, mosaic_shape[0]*mosaic_shape[1]):
i = idx / mosaic_shape[1]
j = idx % mosaic_shape[1]
iStart = i * (icon_shape[0] + border_size)
jStart = j * (icon_shape[1] + border_size)
for c in xrange(mosaic_image.shape[2]):
mosaic_image[iStart:iStart+icon_shape[0],
jStart:jStart+icon_shape[1],c] = empty_color[c]
return mosaic_image
def image_size_from_file(filename):
"""Read the image size from a file.
This function only loads but the image header (rather than the whole
rasterized data) in order to determine its dimension.
Parameters
----------
filename: string
The input image file.
Returns
-------
The 2-tuple for image size `(num_rows, num_cols)`.
"""
with PIL.Image.open(filename) as img:
width, height = img.size
return height, width
| 3.390625 | 3 |
main.py | takat0m0/test_solve_easy_maze_with_Q | 0 | 12794203 | # -*- coding:utf-8 -*-
import os
import sys
import numpy as np
from simulater import Simulater
from play_back import PlayBack, PlayBacks
COMMAND = ['UP', 'DOWN', 'LEFT', 'RIGHT']
def get_max_command(target_dict):
return max([(v,k) for k,v in target_dict.items()])[1]
def simplify(command):
return command[0]
def print_Q(Q, x, y):
ret = []
for i in range(y):
ret.append(['0' for _ in range(x)])
for k in Q:
ret[k[1]][k[0]] = simplify(get_max_command(Q[k]))
for this_line in ret:
print(''.join(this_line))
if __name__ == '__main__':
# parameters
file_name = 'default.txt'
epoch_num = 1000
max_trial = 5000
gamma = 0.1
alpha = 0.1
epsilon = 0.5
# make simulater
sim = Simulater(file_name)
# initialize Q value
x, y = sim.map_size()
Q = {}
for i in range(x):
for j in range(y):
Q[(i, j)] = {_:np.random.normal() for _ in COMMAND}
#Q[(i, j)] = {_:0.0 for _ in COMMAND}
# main
minimum_pbs = None
for epoch in range(epoch_num):
sim.reset()
this_pbs = PlayBacks()
for i in range(max_trial):
# get current
current_x, current_y = sim.get_current()
# select_command
tmp_Q = Q[(current_x, current_y)]
command = get_max_command(tmp_Q) if np.random.uniform() > epsilon else np.random.choice(COMMAND)
current_value = tmp_Q[command]
# reward
reward = sim(command)
# update
next_x, next_y = sim.get_current()
next_max_command = get_max_command(Q[(next_x, next_y)])
next_value = Q[(next_x, next_y)][next_max_command]
tmp_Q[command] += alpha * (reward + gamma * next_value - current_value)
# play back
this_pbs.append(PlayBack((current_x, current_y),
command,
(next_x, next_y),
reward))
# end check
if sim.end_episode():
print('find goal')
epsilon *= 0.95
if epsilon < 0.05:
epsilon = 0.05
if minimum_pbs is None:
minimum_pbs = this_pbs
elif len(minimum_pbs) > len(this_pbs):
minimum_pbs = this_pbs
print(epsilon)
break
# update with minimum_pbs
if minimum_pbs is not None:
for pb in minimum_pbs:
tmp_Q = Q[pb.state]
current_value = tmp_Q[pb.action]
next_Q = Q[pb.next_state]
next_max_command = get_max_command(next_Q)
next_value = next_Q[next_max_command]
tmp_Q[pb.action] += alpha * (pb.reward + gamma * next_value - current_value)
sim.printing()
print('---')
print_Q(Q, x, y)
print('---')
| 2.71875 | 3 |
pyvac/helpers/holiday.py | sayoun/pyvac | 21 | 12794204 | <reponame>sayoun/pyvac
# import time
import logging
import calendar
from datetime import datetime
from workalendar.europe import France, Luxembourg
from workalendar.usa import California
from workalendar.asia import Taiwan
log = logging.getLogger(__file__)
conv_table = {
'fr': France,
'us': California,
'zh': Taiwan,
'lu': Luxembourg,
}
override = {}
def init_override(content):
"""Load a yaml file for holidays override.
You can override holidays for a country and a year through
usage of a configuration setting:
pyvac.override_holidays_file = %(here)s/conf/holidays.yaml
here is a sample:
zh:
2016:
'2016-01-01': 'New Years Day'
'2016-02-07': 'Chinese New Years Eve'
"""
if not content:
return
override.update(content)
def utcify(date):
""" return an UTC datetime from a Date object """
return calendar.timegm(date.timetuple()) * 1000
def get_holiday(user, year=None, use_datetime=False):
""" return holidays for user country
format is unixtime for javascript
"""
klass = conv_table[user.country]
cal = klass()
current_year = year or datetime.now().year
next_year = current_year + 1
# retrieve Dates from workalendar
holiday_current_raw = [dt for dt, _ in cal.holidays(current_year)]
holiday_next_raw = [dt for dt, _ in cal.holidays(next_year)]
if user.country in override and current_year in override[user.country]:
holiday_current_raw = [datetime.strptime(dt, '%Y-%m-%d')
for dt in
override[user.country][current_year]]
if user.country in override and next_year in override[user.country]:
holiday_next_raw = [datetime.strptime(dt, '%Y-%m-%d')
for dt in
override[user.country][next_year]]
if not use_datetime:
# must cast to javascript timestamp
holiday_current = [utcify(dt) for dt in holiday_current_raw]
holiday_next = [utcify(dt) for dt in holiday_next_raw]
else:
# must cast to datetime as workalendar returns only Date objects
holiday_current = [datetime(dt.year, dt.month, dt.day)
for dt in holiday_current_raw]
holiday_next = [datetime(dt.year, dt.month, dt.day)
for dt in holiday_next_raw]
return holiday_current + holiday_next
| 2.5625 | 3 |
utils.py | wuch15/Sentiment-debiasing | 0 | 12794205 | from numpy.linalg import cholesky
import numpy as np
def senti2cate(x):
if x<=-0.6:
return 0
elif x>-0.6 and x<=-0.2:
return 1
elif x>-0.2 and x<0.2:
return 2
elif x>=0.2 and x<0.6:
return 3
elif x>=0.6:
return 4
def dcg_score(y_true, y_score, k=10):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
gains = 2 ** y_true - 1
discounts = np.log2(np.arange(len(y_true)) + 2)
return np.sum(gains / discounts)
def ndcg_score(y_true, y_score, k=10):
best = dcg_score(y_true, y_true, k)
actual = dcg_score(y_true, y_score, k)
return actual / best
def mrr_score(y_true, y_score):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order)
rr_score = y_true / (np.arange(len(y_true)) + 1)
return np.sum(rr_score) / np.sum(y_true)
def auc(label,score):
label=np.array(label)
score=np.array(score)
false_score = score[label==0]
positive_score = score[label==1]
num_positive = (label==1).sum()
num_negative = (label==0).sum()
positive_score = positive_score.reshape((num_positive,1))
positive_score = np.repeat(positive_score,num_negative,axis=1)
false_score = false_score.reshape((1,num_negative))
false_score = np.repeat(false_score,num_positive,axis=0)
return 1-((positive_score<false_score).mean()+0.5*(positive_score==false_score).mean())
def embedding(embfile,word_dict):
emb_dict = {}
with open(embfile,'rb')as f:
while True:
line = f.readline()
if len(line) == 0:
break
data = line.split()
word = data[0].decode()
if len(word) != 0:
vec = [float(x) for x in data[1:]]
if word in word_dict:
emb_dict[word] = vec
emb_table = [0]*len(word_dict)
dummy = np.zeros(300,dtype='float32')
all_emb = []
for i in emb_dict:
emb_table[word_dict[i][0]] = np.array(emb_dict[i],dtype='float32')
all_emb.append(emb_table[word_dict[i][0]])
all_emb = np.array(all_emb,dtype='float32')
mu = np.mean(all_emb, axis=0)
Sigma = np.cov(all_emb.T)
norm = np.random.multivariate_normal(mu, Sigma, 1)
for i in range(len(emb_table)):
if type(emb_table[i]) == int:
emb_table[i] = np.reshape(norm, 300)
emb_table[0] = np.random.uniform(-0.03,0.03,size=(300,))
emb_table = np.array(emb_table,dtype='float32')
return emb_table | 2.703125 | 3 |
sjb/hack/determine_install_upgrade_version_test.py | brenton/aos-cd-jobs | 45 | 12794206 | <reponame>brenton/aos-cd-jobs
import unittest
from determine_install_upgrade_version import *
class TestPackage(object):
def __init__(self, name, version, release, epoch, vra, pkgtup):
self.name = name
self.version = version
self.release = release
self.epoch = epoch
self.vra = vra
self.pkgtup = pkgtup
def __eq__(self, other):
return self.__dict__ == other.__dict__
@classmethod
def create_test_packages(self, test_pkgs):
test_pkgs_objs = []
for pkg in test_pkgs:
pkg_name, pkg_version, pkg_release, pkg_epoch, pkg_arch = rpmutils.splitFilename(pkg)
pkg_vra = pkg_version + "-" + pkg_release + "." + pkg_arch
pkg_tup = (pkg_name , pkg_arch, pkg_epoch, pkg_version, pkg_release)
test_pkgs_objs.append(TestPackage(pkg_name, pkg_version, pkg_release, pkg_epoch, pkg_vra, pkg_tup))
return test_pkgs_objs
class RemoveDuplicatePackages(unittest.TestCase):
"Test for `determine_install_upgrade_version.py`"
def test_removing_single_duplicate_package(self):
""" when is multiple duplicate packages, return only one """
test_pkgs = ["origin-1.4.1-1.el7.x86_64", "origin-1.5.0-0.4.el7.x86_64", "origin-1.5.0-0.4.el7.x86_64"]
test_pkgs_objs = TestPackage.create_test_packages(test_pkgs)
result_pkgs_objs = test_pkgs_objs[:2]
self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs)
def test_removing_no_duplicate_package(self):
""" when there is no duplicate package, return the single one """
test_pkgs = ["origin-1.4.1-1.el7.x86_64", "origin-1.5.0-0.4.el7.x86_64"]
test_pkgs_objs = TestPackage.create_test_packages(test_pkgs)
result_pkgs_objs = test_pkgs_objs[:2]
self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs)
class GetMatchingVersionTestCase(unittest.TestCase):
"Test for `determine_install_upgrade_version.py`"
def test_get_matching_versions(self):
""" when only one matching version exist and its pre-release, it is returned """
test_pkgs = ["origin-1.4.1-1.el7.x86_64", "origin-1.5.0-0.4.el7.x86_64"]
test_pkgs_objs = TestPackage.create_test_packages(test_pkgs)
self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7'])
def test_with_single_pre_release(self):
""" when only one pre-release version exist, it is returned """
test_pkgs = ["origin-1.5.0-0.4.el7.x86_64"]
test_pkgs_objs = TestPackage.create_test_packages(test_pkgs)
self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7'])
def test_with_multiple_pre_release(self):
""" when only one pre-release version exist, it is returned """
test_pkgs = ["origin-1.5.0-0.4.el7.x86_64", "origin-1.5.2-0.1.el7.x86_64"]
test_pkgs_objs = TestPackage.create_test_packages(test_pkgs)
self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7', '1.5.2-0.1.el7'])
def test_with_single_release(self):
""" when both release and pre-release versions exist, only release versions are returned """
test_pkgs = ["origin-1.5.0-0.4.el7.x86_64", "origin-1.5.0-1.1.el7.x86_64"]
test_pkgs_objs = TestPackage.create_test_packages(test_pkgs)
self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ["1.5.0-1.1.el7"])
def test_with_muptiple_release(self):
""" when both release and pre-release versions exist, only release version is returned """
test_pkgs = ["origin-1.5.0-0.4.el7.x86_64", "origin-1.5.0-1.1.el7.x86_64", "origin-1.5.2-1.1.el7.x86_64"]
test_pkgs_objs = TestPackage.create_test_packages(test_pkgs)
self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ["1.5.0-1.1.el7", "1.5.2-1.1.el7"])
def test_with_no_matches(self):
test_pkgs = ["origin-1.2.0-0.4.el7.x86_64", "origin-1.3.0-1.1.el7.x86_64", "origin-1.4.2-1.1.el7.x86_64"]
test_pkgs_objs = TestPackage.create_test_packages(test_pkgs)
self.assertRaises(SystemExit, get_matching_versions, 'origin', test_pkgs_objs, '1.5')
class DetermineSearchVersionTestCase(unittest.TestCase):
"Test for `determine_install_upgrade_version.py`"
def test_origin_with_standard_versioning_schema(self):
""" when the origin version is higher then the first version of the new origin versioning schema - origin-3.6 """
self.assertEqual(determine_search_versions("origin", "3.7.0"), ("3.6", "3.7"))
def test_origin_with_short_standard_versioning_schema(self):
""" when the origin version is in short format and higher then the first version of the new origin versioning schema - origin-3.6 """
self.assertEqual(determine_search_versions("origin", "3.7"), ("3.6", "3.7"))
def test_origin_with_standard_to_legacy_versioning_schema(self):
""" when the origin version is the first from the new origin versioning schema - origin-3.6 """
self.assertEqual(determine_search_versions("origin", "3.6.0"), ("1.5", "3.6"))
def test_origin_with_short_standard_to_legacy_versioning_schema(self):
""" when the origin version is in short format and first from the new origin versioning schema - origin-3.6 """
self.assertEqual(determine_search_versions("origin", "3.6"), ("1.5", "3.6"))
def test_origin_with_legacy_schema(self):
""" when the origin version is in the old versioning schema """
self.assertEqual(determine_search_versions("origin", "1.5.0"), ("1.4", "1.5"))
def test_origin_with_short_legacy_schema(self):
""" when the origin version is in short and old versioning schema """
self.assertEqual(determine_search_versions("origin", "1.5"), ("1.4", "1.5"))
def test_openshift_ansible_with_standard_versioning_schema(self):
""" when openshift-ansible, which doesnt have different versioning schema, is in 3.7 version """
self.assertEqual(determine_search_versions("openshift-ansible", "3.7.0"), ("3.6", "3.7"))
def test_openshift_ansible_with_standard_to_legacy_versioning_schema(self):
""" when openshift-ansible, which doesnt have different versioning schema is in 3.6 version """
self.assertEqual(determine_search_versions("openshift-ansible", "3.6.0"), ("3.5", "3.6"))
def test_openshift_ansible_with_short_standard_to_legacy_versioning_schema(self):
""" when openshift-ansible, which doesnt have different versioning schema, is in short format and in 3.6 version """
self.assertEqual(determine_search_versions("openshift-ansible", "3.6"), ("3.5", "3.6"))
def test_openshift_ansible_with_legacy_versioning_schema(self):
""" when openshift-ansible, which doesnt have different versioning schema is in 3.4 version """
self.assertEqual(determine_search_versions("openshift-ansible", "3.5.0"), ("3.4", "3.5"))
class SchemaChangeCheckTestCase(unittest.TestCase):
"Test for `determine_install_upgrade_version.py`"
def test_origin_package_with_new_schema(self):
""" when origin package is in 3.6 version """
self.assertEqual(schema_change_check("origin", "3", "6"), "3.6")
def test_origin_package_with_old_schema(self):
""" when origin package is in 1.5 version """
self.assertEqual(schema_change_check("origin", "3", "5"), "1.5")
def test_non_origin_package_with_new_schema(self):
""" when origin package is in 3.6 version """
self.assertEqual(schema_change_check("openshift-ansible", "3", "6"), "3.6")
def test_non_origin_package_with_old_schema(self):
""" when origin package is in 3.5 version """
self.assertEqual(schema_change_check("openshift-ansible", "3", "5"), "3.5")
class GetLastVersionTestCase(unittest.TestCase):
"Test for `determine_install_upgrade_version.py`"
def test_with_multiple_matching_release_versions(self):
""" when multiple matching version are present in released versions """
matching_versions = ["1.2.0-1.el7", "1.2.2-1.el7", "1.2.5-1.el7"]
install_version = "1.2.5-1.el7"
self.assertEqual(get_last_version(matching_versions), install_version)
def test_with_single_matching_release_version(self):
""" when only a single matching version is present in released versions """
matching_versions = ["1.5.0-1.4.el7"]
install_version = "1.5.0-1.4.el7"
self.assertEqual(get_last_version(matching_versions), install_version)
def test_with_multiple_matching_pre_release_versions(self):
""" when multiple matching pre-release version are present in pre-released versions """
matching_versions = ["1.2.0-0.el7", "1.2.2-0.el7", "1.2.5-0.el7"]
install_version = "1.2.5-0.el7"
self.assertEqual(get_last_version(matching_versions), install_version)
def test_with_single_matching_pre_release_version(self):
""" when only single matching pre-release version is present in pre-released versions """
matching_versions = ["1.5.0-0.4.el7"]
install_version = "1.5.0-0.4.el7"
self.assertEqual(get_last_version(matching_versions), install_version)
class SortPackagesTestCase(unittest.TestCase):
"Test for `determine_install_upgrade_version.py`"
def test_sort_packages_with_exceptional_origin_pkg(self):
""" when sorting origin packages with exceptional origin-3.6.0-0.0.alpha.0.1 package """
test_pkgs = ["origin-3.6.0-0.0.alpha.0.1.el7", "origin-3.6.0-0.alpha.0.2.el7"]
properly_sorted_pkgs = ["origin-3.6.0-0.alpha.0.2.el7"]
test_pkgs_obj = TestPackage.create_test_packages(test_pkgs)
properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs)
sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj)
self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj)
def test_sort_packages_with_same_minor_version(self):
""" when sorting origin packages within the same minor version """
test_pkgs = ["origin-1.5.1-1.el7", "origin-1.5.0-1.el7"]
properly_sorted_pkgs = ["origin-1.5.0-1.el7", "origin-1.5.1-1.el7"]
test_pkgs_obj = TestPackage.create_test_packages(test_pkgs)
properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs)
sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj)
self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj)
def test_sort_packages_with_different_minor_version(self):
""" when sorting origin packages with different minor version """
test_pkgs = ["origin-1.5.1-1.el7", "origin-1.4.0-1.el7"]
properly_sorted_pkgs = ["origin-1.4.0-1.el7", "origin-1.5.1-1.el7"]
test_pkgs_obj = TestPackage.create_test_packages(test_pkgs)
properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs)
sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj)
self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj)
if __name__ == '__main__':
unittest.main() | 2.390625 | 2 |
radionica-petak/hamster.py | Levara/pygame-hamster | 0 | 12794207 | <reponame>Levara/pygame-hamster
# ukljucivanje biblioteke pygame
import pygame
import random
#inicijalizacija pygame
pygame.init()
#inicijalizacije fontova
pygame.font.init()
# definiranje konstanti za velicinu prozora
WIDTH = 1024
HEIGHT = 600
# tuple velicine prozora
size = (WIDTH, HEIGHT)
#definiranje boja - guglaj colorpicker
WHITE= ( 255, 255, 255)
BLACK= ( 0, 0, 0 )
BLUE = (0, 0, 255)
#Renderiranje pozdravnog teksta
myfont = pygame.font.SysFont('Arial', 30)
welcome_text = myfont.render("Dobrodosli!", \
False, BLUE)
#daj mi velicinu welcome teksta
welcome_text_size = welcome_text.get_rect()
welcome_image = pygame.image.load( \
"shark.jpg")
hamster = pygame.image.load("hamster.png")
hamster = pygame.transform.scale(hamster, \
(100, 100) )
#definiranje novog ekrana za igru
screen = pygame.display.set_mode(size)
#definiranje naziva prozora
pygame.display.set_caption("Nasa kul igra")
clock = pygame.time.Clock()
game_state = "welcome"
done = False
hit = False
hamster_time = 3000
hamster_x, hamster_y = 100, 100
score = 0
while not done:
#event petlja
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
game_state = "game"
elif event.type == pygame.MOUSEBUTTONDOWN:
if game_state == "game":
pos = event.pos
if hamster_pos.collidepoint(pos):
hit = True
if game_state == "welcome":
screen.fill(WHITE)
screen.blit(welcome_image, (0,0) )
screen.blit(welcome_text, ( 100, 100) )
elif game_state == "game":
if hit:
hamster_time = 3000
hamster_x = random.randint(20, WIDTH)
hamster_y = random.randint(20, HEIGHT)
hit = False
score += 1
if hamster_time < 0 :
game_state = "game_over"
screen.fill(WHITE)
hamster_pos = screen.blit(hamster, \
(hamster_x, hamster_y))
score_text = \
myfont.render("Score: %d!"%score, \
False, BLUE)
screen.blit(score_text, (10, 10) )
hamster_time = hamster_time - clock.get_time()
elif game_state == "game_over":
screen.fill(BLACK)
pygame.display.flip()
#ukoliko je potrebno ceka do iscrtavanja
#iduceg framea kako bi imao 60fpsa
clock.tick(60)
| 3.109375 | 3 |
mizarlabs/tests/transformers/test_average_uniqueness.py | MizarAI/mizar-labs | 18 | 12794208 | <gh_stars>10-100
import pandas as pd
import pytest
from mizarlabs.static import CLOSE
from mizarlabs.transformers.sampling.average_uniqueness import AverageUniqueness
from mizarlabs.transformers.targets.labeling import TripleBarrierMethodLabeling
@pytest.mark.usefixtures("dollar_bar_dataframe")
def test_average_uniqueness(dollar_bar_dataframe: pd.DataFrame):
"""
Check whether average uniqueness is the same as the manual calculation.
"""
triple_barrier = TripleBarrierMethodLabeling(
num_expiration_bars=3, profit_taking_factor=0.1, stop_loss_factor=0.1
)
target_labels = triple_barrier.fit_transform(dollar_bar_dataframe[[CLOSE]])
target_labels = target_labels.dropna()
avg_uniqueness_transformer = AverageUniqueness()
avg_uniqueness = avg_uniqueness_transformer.transform(target_labels)
assert avg_uniqueness.iloc[22] == 13 / 36
if __name__ == "__main__":
pytest.main([__file__])
| 2.34375 | 2 |
ingestion/8.ingest_qualifying_file.py | iamrahulsen/formula-1-data-analysis | 0 | 12794209 | # Databricks notebook source
# MAGIC %md
# MAGIC ### Ingest qualifying json files
# COMMAND ----------
dbutils.widgets.text("p_data_source", "")
v_data_source = dbutils.widgets.get("p_data_source")
# COMMAND ----------
dbutils.widgets.text("p_file_date", "2021-03-21")
v_file_date = dbutils.widgets.get("p_file_date")
# COMMAND ----------
# MAGIC %run "../includes/configuration"
# COMMAND ----------
# MAGIC %run "../includes/common_functions"
# COMMAND ----------
# MAGIC %md
# MAGIC ##### Step 1 - Read the JSON file using the spark dataframe reader API
# COMMAND ----------
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
# COMMAND ----------
qualifying_schema = StructType(fields=[StructField("qualifyId", IntegerType(), False),
StructField("raceId", IntegerType(), True),
StructField("driverId", IntegerType(), True),
StructField("constructorId", IntegerType(), True),
StructField("number", IntegerType(), True),
StructField("position", IntegerType(), True),
StructField("q1", StringType(), True),
StructField("q2", StringType(), True),
StructField("q3", StringType(), True),
])
# COMMAND ----------
qualifying_df = spark.read \
.schema(qualifying_schema) \
.option("multiLine", True) \
.json(f"{raw_folder_path}/{v_file_date}/qualifying")
# COMMAND ----------
# MAGIC %md
# MAGIC ##### Step 2 - Rename columns and add new columns
# MAGIC 1. Rename qualifyingId, driverId, constructorId and raceId
# MAGIC 1. Add ingestion_date with current timestamp
# COMMAND ----------
qualifying_with_ingestion_date_df = add_ingestion_date(qualifying_df)
# COMMAND ----------
from pyspark.sql.functions import lit
# COMMAND ----------
final_df = qualifying_with_ingestion_date_df.withColumnRenamed("qualifyId", "qualify_id") \
.withColumnRenamed("driverId", "driver_id") \
.withColumnRenamed("raceId", "race_id") \
.withColumnRenamed("constructorId", "constructor_id") \
.withColumn("ingestion_date", current_timestamp()) \
.withColumn("data_source", lit(v_data_source)) \
.withColumn("file_date", lit(v_file_date))
# COMMAND ----------
# MAGIC %md
# MAGIC ##### Step 3 - Write to output to processed container in parquet format
# COMMAND ----------
#overwrite_partition(final_df, 'f1_processed', 'qualifying', 'race_id')
# COMMAND ----------
merge_condition = "tgt.qualify_id = src.qualify_id AND tgt.race_id = src.race_id"
merge_delta_data(final_df, 'f1_processed', 'qualifying', processed_folder_path, merge_condition, 'race_id')
# COMMAND ----------
dbutils.notebook.exit("Success")
# COMMAND ----------
| 2.8125 | 3 |
pool/cmd.py | jan-g/pool-balls | 0 | 12794210 | <filename>pool/cmd.py<gh_stars>0
from __future__ import print_function
import argparse
import os
import sys
import pool.demo
def main():
print(os.getcwd())
print(os.listdir("."))
print(os.listdir("tools"))
parser = argparse.ArgumentParser(
description='ball locator')
parser.add_argument('input', type=argparse.FileType('r'))
parser.add_argument('output', nargs='?', type=argparse.FileType('w'),
default=sys.stdout)
args = parser.parse_args()
assert args.input is not None
assert args.output is not None
pool.demo.demo(args.input)
return 0
if __name__ == '__main__':
main() | 2.515625 | 3 |
src/ctc/protocols/curve_utils/pool_lists.py | fei-protocol/checkthechain | 94 | 12794211 | from __future__ import annotations
import typing
from typing_extensions import TypedDict
from ctc import evm
from ctc import rpc
from ctc import spec
old_pool_factory = '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae'
pool_factory = '0xb9fc157394af804a3578134a6585c0dc9cc990d4'
eth_address = '0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee'
creation_blocks = {
'0x0959158b6040d32d04c301a72cbfd6b39e21c9ae': 11942404,
'0xb9fc157394af804a3578134a6585c0dc9cc990d4': 12903979,
}
#
# # call based
#
async def async_get_factory_pool_data(
factory: spec.Address,
include_balances: bool = False,
) -> list[CurvePoolData]:
import asyncio
n_pools = await rpc.async_eth_call(
to_address=factory,
function_name='pool_count',
)
coroutines = [
_async_get_pool_data(p, factory, include_balances=include_balances)
for p in range(n_pools)
]
return await asyncio.gather(*coroutines)
class CurvePoolData(TypedDict):
address: spec.Address
tokens: typing.Sequence[spec.Address]
symbols: typing.Sequence[str]
balances: typing.Sequence[int | float | None]
async def _async_get_pool_data(
p: int,
factory: spec.Address,
include_balances: bool = False,
) -> CurvePoolData:
pool = await rpc.async_eth_call(
to_address=factory,
function_name='pool_list',
function_parameters=[p],
)
coins = await rpc.async_eth_call(
to_address=factory,
function_name='get_coins',
function_parameters=[pool],
)
coins = [coin for coin in coins if coin not in [eth_address]]
valid_coins = [
coin
for coin in coins
if coin
not in ['0x0000000000000000000000000000000000000000', eth_address]
]
symbols = await evm.async_get_erc20s_symbols(
valid_coins,
)
if eth_address in coins:
index = coins.index(eth_address)
symbols.insert(index, 'ETH')
if include_balances:
balances: typing.MutableSequence[int | float | None] = (
await evm.async_get_erc20s_balance_of( # type: ignore
tokens=valid_coins,
address=pool,
)
)
if eth_address in coins:
eth_balance = await evm.async_get_eth_balance(pool)
balances.insert(index, eth_balance)
else:
balances = [None for coin in coins]
return {
'address': pool,
'tokens': coins,
'symbols': symbols,
'balances': balances,
}
#
# # event based
#
async def async_get_base_pools(
start_block: typing.Optional[spec.BlockNumberReference] = None,
end_block: typing.Optional[spec.BlockNumberReference] = None,
provider: spec.ProviderSpec = None,
verbose: bool = False,
) -> spec.DataFrame:
import asyncio
import pandas as pd
if start_block is None:
start_block = 12903979
# gather data
coroutines = []
for factory in [old_pool_factory, pool_factory]:
if start_block is None:
factory_start_block = creation_blocks[factory]
else:
factory_start_block = start_block
coroutine = evm.async_get_events(
contract_address=factory,
event_name='BasePoolAdded',
start_block=factory_start_block,
end_block=end_block,
provider=provider,
verbose=verbose,
)
coroutines.append(coroutine)
dfs = await asyncio.gather(*coroutines)
events = pd.concat(dfs)
# format data
events = events.sort_index()
events = events[['contract_address', 'transaction_hash', 'arg__base_pool']]
events = events.rename(
columns={
'contract_address': 'factory',
'arg__base_pool': 'pool',
}
)
return events
async def async_get_plain_pools(
start_block: typing.Optional[spec.BlockNumberReference] = None,
end_block: typing.Optional[spec.BlockNumberReference] = None,
provider: spec.ProviderSpec = None,
verbose: bool = False,
) -> spec.DataFrame:
if start_block is None:
start_block = 12903979
events = await evm.async_get_events(
contract_address=pool_factory,
event_name='PlainPoolDeployed',
start_block=start_block,
end_block=end_block,
provider=provider,
verbose=verbose,
)
events = events[
[
'transaction_hash',
'contract_address',
'arg__coins',
'arg__A',
'arg__fee',
'arg__deployer',
]
]
events = events.rename(
columns={
'contract_address': 'factory',
'arg__coins': 'coins',
'arg__A': 'A',
'arg__fee': 'fee',
'arg__deployer': 'deployer',
}
)
return events
async def async_get_meta_pools(
start_block: typing.Optional[spec.BlockNumberReference] = None,
end_block: typing.Optional[spec.BlockNumberReference] = None,
provider: spec.ProviderSpec = None,
verbose: bool = False,
) -> spec.DataFrame:
import asyncio
import pandas as pd
# gather data
coroutines = []
for factory in [old_pool_factory, pool_factory]:
if start_block is None:
factory_start_block: spec.BlockNumberReference = creation_blocks[
factory
]
else:
factory_start_block = start_block
coroutine = evm.async_get_events(
contract_address=factory,
event_name='MetaPoolDeployed',
start_block=factory_start_block,
end_block=end_block,
provider=provider,
verbose=verbose,
)
coroutines.append(coroutine)
dfs = await asyncio.gather(*coroutines)
events = pd.concat(dfs)
# format data
events = events.sort_index()
events = events[
[
'transaction_hash',
'contract_address',
'arg__coin',
'arg__base_pool',
'arg__A',
'arg__fee',
'arg__deployer',
]
]
events = events.rename(
columns={
'contract_address': 'factory',
'arg__coin': 'coin',
'arg__base_pool': 'base_pool',
'arg__A': 'A',
'arg__fee': 'fee',
'arg__deployer': 'deployer',
}
)
return events
| 2.125 | 2 |
Advent2015/6.py | SSteve/AdventOfCode | 0 | 12794212 | import numpy as np
import re
lineRegex = re.compile(r"(turn on|turn off|toggle) (\d+),(\d+) through (\d+),(\d+)")
def day6(fileName):
lights = np.zeros((1000, 1000), dtype=bool)
with open(fileName) as infile:
for line in infile:
match = lineRegex.match(line)
if match:
for x in range(int(match[2]), int(match[4]) + 1):
for y in range(int(match[3]), int(match[5]) + 1):
if match[1] == "turn on":
lights[y, x] = True
elif match[1] == "turn off":
lights[y, x] = False
elif match[1] == "toggle":
lights[y, x] = not lights[y, x]
else:
raise ValueError(f"Unknown directive: {match[1]}")
print(f"There are {lights.sum()} lights!")
def day6b(fileName):
lights = np.zeros((1000, 1000), dtype=int)
with open(fileName) as infile:
for line in infile:
match = lineRegex.match(line)
if match:
x1 = int(match[2])
x2 = int(match[4])
y1 = int(match[3])
y2 = int(match[5])
if match[1] == "turn on":
lights[y1:y2 + 1, x1:x2 + 1] += 1
elif match[1] == "turn off":
for x in range(x1, x2 + 1):
for y in range(y1, y2 + 1):
lights[y, x] = max(lights[y, x] - 1, 0)
elif match[1] == "toggle":
lights[y1:y2 + 1, x1:x2 + 1] += 2
else:
raise ValueError(f"Unknown directive: {match[1]}")
print(f"Brightness: {lights.sum()}")
#day6("6test.txt")
#day6("6.txt")
day6b("6btest.txt")
day6b("6.txt") #15343601
| 3.0625 | 3 |
client/configuration_monitor.py | fabiomassimo/pyre-check | 1 | 12794213 | <gh_stars>1-10
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import logging
import os
from logging import Logger
from typing import Any, Dict, List
from .analysis_directory import AnalysisDirectory
from .commands import stop
from .configuration import CONFIGURATION_FILE
from .filesystem import is_parent
from .watchman_subscriber import Subscription, WatchmanSubscriber
LOG: Logger = logging.getLogger(__name__)
class ConfigurationMonitor(WatchmanSubscriber):
"""
The ConfigurationMonitor watches only for .pyre_configuration(.local)
files, and will kill the corresponding server and ProjectFileMonitor
when a configuration changes.
Logs are found in .pyre/configuration_monitor/configuration_monitor.log
To kill a monitor, get pid from
.pyre/configuration_monitor/configuration_monitor.pid ; kill <pid>.
"""
def __init__(
self, arguments, configuration, analysis_directory: AnalysisDirectory
) -> None:
super(ConfigurationMonitor, self).__init__(configuration, analysis_directory)
self.arguments = arguments
self.configuration = configuration
self.analysis_directory = analysis_directory
@property
def _name(self) -> str:
return "configuration_monitor"
@property
def _subscriptions(self) -> List[Subscription]:
roots = self._watchman_client.query("watch-list")["roots"]
names = ["pyre_monitor_{}".format(os.path.basename(root)) for root in roots]
subscription = {
"expression": [
"allof",
["type", "f"],
["not", "empty"],
[
"anyof",
["suffix", "pyre_configuration.local"],
["suffix", "pyre_configuration"],
],
],
"fields": ["name"],
}
return [
Subscription(root, name, subscription) for (root, name) in zip(roots, names)
]
def _handle_response(self, response: Dict[str, Any]) -> None:
LOG.info(
"Update to configuration at %s",
os.path.join(response["root"], ",".join(response["files"])),
)
absolute_path = [
os.path.join(response["root"], file) for file in response["files"]
]
# Find the path to the project configuration file and compare it with the
# list of changed configuration files.
project_configuration = os.path.join(response["root"], CONFIGURATION_FILE)
if any((project_configuration == file) for file in absolute_path):
LOG.info("Pyre configuration changed. Stopping pyre server.")
stop.Stop(self.arguments, self.configuration, self.analysis_directory).run()
# TODO(T54088045): Find all local pyre servers running underneath
# and stop them.
else:
LOG.info("None of the changed paths correspond to pyre configuration.")
if self.arguments.local_configuration:
if any(
is_parent(self.arguments.local_configuration, file)
for file in absolute_path
):
LOG.info("Local configuration changed. Stopping pyre server.")
stop.Stop(
self.arguments, self.configuration, self.analysis_directory
).run()
else:
LOG.info(
"None of the changed paths correspond to the current local"
"configuration."
)
| 1.96875 | 2 |
Data.py | happylittlecat2333/travel_simulation | 0 | 12794214 | <filename>Data.py
city_dangers = {'南京': 0.5, '北京': 0.9, '成都': 0.5, '杭州': 0.2, '广州': 0.5, '武汉': 0.9, '上海': 0.9, '重庆': 0.2, '青岛': 0.9,
'深圳': 0.2, '郑州': 0.5, '西安': 0.2} # 在城市停留的风险值
trans_dangers = {"汽车": 2, "火车": 5, "飞机": 9} # 交通工具的风险值
time_table_values = [] # 所有班次时间表
map_values = [] # 所有城市的经纬度信息
map_geo = {} # 所有城市的经纬度,字典性的结构
all_place = [] # 所有航班里的城市
def load_data(cursor): # 从数据库加载数据
global time_table_values, map_values, map_geo, all_place # 定义变量为全局变量,实现在函数内部改变变量值
cursor.execute("select * from time_table where Tran=? ORDER BY RANDOM() limit 30", ('火车',)) # 30班次的火车
time_table_values = cursor.fetchall()
cursor.execute("select * from time_table where Tran=? ORDER BY RANDOM() limit 30", ('飞机',)) # 10班次的飞机
tmp = cursor.fetchall()
for i in tmp:
time_table_values.append(i)
cursor.execute("select * from time_table where Tran!=? and Tran!=?", ('飞机', '火车')) # 所有汽车班次
tmp = cursor.fetchall()
for i in tmp:
time_table_values.append(i)
for i in time_table_values:
if i[0] not in all_place:
all_place.append(i[0]) # 全部班次的城市的集合
cursor.execute("select * from map") # 城市位置:经纬坐标
map_values = cursor.fetchall()
for i in map_values:
if i[0] in all_place:
map_geo[i[0]] = [i[1], i[2]]
print(map_geo)
| 2.703125 | 3 |
readthedocs/config/utils.py | tkoyama010/readthedocs.org | 4,054 | 12794215 | """Shared functions for the config module."""
def to_dict(value):
"""Recursively transform a class from `config.models` to a dict."""
if hasattr(value, 'as_dict'):
return value.as_dict()
if isinstance(value, list):
return [
to_dict(v)
for v in value
]
if isinstance(value, dict):
return {
k: to_dict(v)
for k, v in value.items()
}
return value
def list_to_dict(list_):
"""Transform a list to a dictionary with its indices as keys."""
dict_ = {
str(i): element
for i, element in enumerate(list_)
}
return dict_
| 2.984375 | 3 |
qiandao.py | gyje/tieba_qiandao | 7 | 12794216 | import requests,time,gevent,gevent.monkey,re,os
from threading import Thread
import schedule
from pyquery import PyQuery as pq
gevent.monkey.patch_socket()
url="http://tieba.baidu.com/mo/q---F55A5B1F58548A7A5403ABA7602FEBAE%3AFG%3D1--1-1-0--2--wapp_1510665393192_464/sign?tbs=af62312bf49309c61510669752&fid=152744&kw="
ba_cookie='把cookie放到这'
headers={
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'zh-CN,zh;q=0.8',
'Cookie':ba_cookie,
'Host':'tieba.baidu.com',
'Proxy-Connection':'keep-alive',
'Upgrade-Insecure-Requests':'1',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'
}
s=requests.Session()
def run(ba_url,ba_name):
qian_url=ba_url+ba_name
s.get(qian_url,headers=headers)
def go():
taske=[gevent.spawn(run,url,i) for i in ba_name_tuple]
gevent.joinall(taske)
rebuild=re.compile(r"已签到")
def check(ba_name):
content=s.get(url+ba_name,headers=headers).text
return_list=rebuild.findall(content)
if str(return_list)=="['已签到']":
pass
else:
print (ba_name+"-->Error")
def checkth():
for g in ba_name_tuple:
m=Thread(target=check,args=(g,))
m.start()
def writeconfig():
temp=pq(requests.get("http://wapp.baidu.com/",headers={'Cookie':ba_cookie}).content)
ba_all_url="http://"+str([i.attr("href") for i in temp(".my_love_bar").children().items()][-1])[2:]
retemp=re.compile(r">\w*</a>")
ba_name_list=[]
for i in retemp.findall(requests.get(ba_all_url,headers={'Cookie':ba_cookie}).text)[1:-2]:
ba_name_list.append(i[1:-4])
with open("qd_config.ini","w+",encoding="utf-8") as fob:
fob.write(str(tuple(ba_name_list)))
def checkconfig():
if "qd_config.ini" in os.listdir(os.getcwd()):
pass
else:
writeconfig()
def readconfig():
global ba_name_tuple
with open("qd_config.ini","r",encoding="utf-8") as fob:
ba_name_tuple=eval(fob.read())
def serun():
checkconfig()
readconfig()
go()
if __name__=="__main__":
schedule.every().day.at("00:10").do(serun) #每天的签到时间
while 1:
schedule.run_pending()
time.sleep(1)
| 2.375 | 2 |
setup.py | magamba/dreamerv2 | 97 | 12794217 | <reponame>magamba/dreamerv2
import setuptools
setuptools.setup(
name="dreamerv2",
version="1.0.0",
description=(
"Mastering Atari with Discrete World Models"
),
license="MIT License",
url="https://github.com/RajGhugare19/dreamerv2",
packages=setuptools.find_packages(),
) | 1.101563 | 1 |
lib/spack/spack/util/web.py | koning/spack | 0 | 12794218 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import codecs
import errno
import re
import os
import os.path
import shutil
import ssl
import sys
import traceback
from itertools import product
import six
from six.moves.urllib.request import urlopen, Request
from six.moves.urllib.error import URLError
import multiprocessing.pool
try:
# Python 2 had these in the HTMLParser package.
from HTMLParser import HTMLParser, HTMLParseError
except ImportError:
# In Python 3, things moved to html.parser
from html.parser import HTMLParser
# Also, HTMLParseError is deprecated and never raised.
class HTMLParseError(Exception):
pass
from llnl.util.filesystem import mkdirp
import llnl.util.tty as tty
import spack.cmd
import spack.config
import spack.error
import spack.url
import spack.util.crypto
import spack.util.s3 as s3_util
import spack.util.url as url_util
from spack.util.compression import ALLOWED_ARCHIVE_TYPES
# Timeout in seconds for web requests
_timeout = 10
# See docstring for standardize_header_names()
_separators = ('', ' ', '_', '-')
HTTP_HEADER_NAME_ALIASES = {
"Accept-ranges": set(
''.join((A, 'ccept', sep, R, 'anges'))
for A, sep, R in product('Aa', _separators, 'Rr')),
"Content-length": set(
''.join((C, 'ontent', sep, L, 'ength'))
for C, sep, L in product('Cc', _separators, 'Ll')),
"Content-type": set(
''.join((C, 'ontent', sep, T, 'ype'))
for C, sep, T in product('Cc', _separators, 'Tt')),
"Date": set(('Date', 'date')),
"Last-modified": set(
''.join((L, 'ast', sep, M, 'odified'))
for L, sep, M in product('Ll', _separators, 'Mm')),
"Server": set(('Server', 'server'))
}
class LinkParser(HTMLParser):
"""This parser just takes an HTML page and strips out the hrefs on the
links. Good enough for a really simple spider. """
def __init__(self):
HTMLParser.__init__(self)
self.links = []
def handle_starttag(self, tag, attrs):
if tag == 'a':
for attr, val in attrs:
if attr == 'href':
self.links.append(val)
class NonDaemonProcess(multiprocessing.Process):
"""Process that allows sub-processes, so pools can have sub-pools."""
@property
def daemon(self):
return False
@daemon.setter
def daemon(self, value):
pass
if sys.version_info[0] < 3:
class NonDaemonPool(multiprocessing.pool.Pool):
"""Pool that uses non-daemon processes"""
Process = NonDaemonProcess
else:
class NonDaemonContext(type(multiprocessing.get_context())):
Process = NonDaemonProcess
class NonDaemonPool(multiprocessing.pool.Pool):
"""Pool that uses non-daemon processes"""
def __init__(self, *args, **kwargs):
kwargs['context'] = NonDaemonContext()
super(NonDaemonPool, self).__init__(*args, **kwargs)
def uses_ssl(parsed_url):
if parsed_url.scheme == 'https':
return True
if parsed_url.scheme == 's3':
endpoint_url = os.environ.get('S3_ENDPOINT_URL')
if not endpoint_url:
return True
if url_util.parse(endpoint_url, scheme='https').scheme == 'https':
return True
return False
__UNABLE_TO_VERIFY_SSL = (
lambda pyver: (
(pyver < (2, 7, 9)) or
((3,) < pyver < (3, 4, 3))
))(sys.version_info)
def read_from_url(url, accept_content_type=None):
url = url_util.parse(url)
context = None
verify_ssl = spack.config.get('config:verify_ssl')
# Don't even bother with a context unless the URL scheme is one that uses
# SSL certs.
if uses_ssl(url):
if verify_ssl:
if __UNABLE_TO_VERIFY_SSL:
# User wants SSL verification, but it cannot be provided.
warn_no_ssl_cert_checking()
else:
# User wants SSL verification, and it *can* be provided.
context = ssl.create_default_context()
else:
# User has explicitly indicated that they do not want SSL
# verification.
context = ssl._create_unverified_context()
req = Request(url_util.format(url))
content_type = None
is_web_url = url.scheme in ('http', 'https')
if accept_content_type and is_web_url:
# Make a HEAD request first to check the content type. This lets
# us ignore tarballs and gigantic files.
# It would be nice to do this with the HTTP Accept header to avoid
# one round-trip. However, most servers seem to ignore the header
# if you ask for a tarball with Accept: text/html.
req.get_method = lambda: "HEAD"
resp = _urlopen(req, timeout=_timeout, context=context)
content_type = resp.headers.get('Content-type')
# Do the real GET request when we know it's just HTML.
req.get_method = lambda: "GET"
response = _urlopen(req, timeout=_timeout, context=context)
if accept_content_type and not is_web_url:
content_type = response.headers.get('Content-type')
reject_content_type = (
accept_content_type and (
content_type is None or
not content_type.startswith(accept_content_type)))
if reject_content_type:
tty.debug("ignoring page {0}{1}{2}".format(
url_util.format(url),
" with content type " if content_type is not None else "",
content_type or ""))
return None, None, None
return response.geturl(), response.headers, response
def warn_no_ssl_cert_checking():
tty.warn("Spack will not check SSL certificates. You need to update "
"your Python to enable certificate verification.")
def push_to_url(local_file_path, remote_path, **kwargs):
keep_original = kwargs.get('keep_original', True)
remote_url = url_util.parse(remote_path)
verify_ssl = spack.config.get('config:verify_ssl')
if __UNABLE_TO_VERIFY_SSL and verify_ssl and uses_ssl(remote_url):
warn_no_ssl_cert_checking()
remote_file_path = url_util.local_file_path(remote_url)
if remote_file_path is not None:
mkdirp(os.path.dirname(remote_file_path))
if keep_original:
shutil.copy(local_file_path, remote_file_path)
else:
try:
os.rename(local_file_path, remote_file_path)
except OSError as e:
if e.errno == errno.EXDEV:
# NOTE(opadron): The above move failed because it crosses
# filesystem boundaries. Copy the file (plus original
# metadata), and then delete the original. This operation
# needs to be done in separate steps.
shutil.copy2(local_file_path, remote_file_path)
os.remove(local_file_path)
elif remote_url.scheme == 's3':
extra_args = kwargs.get('extra_args', {})
remote_path = remote_url.path
while remote_path.startswith('/'):
remote_path = remote_path[1:]
s3 = s3_util.create_s3_session(remote_url)
s3.upload_file(local_file_path, remote_url.netloc,
remote_path, ExtraArgs=extra_args)
if not keep_original:
os.remove(local_file_path)
else:
raise NotImplementedError(
'Unrecognized URL scheme: {SCHEME}'.format(
SCHEME=remote_url.scheme))
def url_exists(url):
url = url_util.parse(url)
local_path = url_util.local_file_path(url)
if local_path:
return os.path.exists(local_path)
if url.scheme == 's3':
s3 = s3_util.create_s3_session(url)
from botocore.exceptions import ClientError
try:
s3.get_object(Bucket=url.netloc, Key=url.path)
return True
except ClientError as err:
if err.response['Error']['Code'] == 'NoSuchKey':
return False
raise err
# otherwise, just try to "read" from the URL, and assume that *any*
# non-throwing response contains the resource represented by the URL
try:
read_from_url(url)
return True
except URLError:
return False
def remove_url(url):
url = url_util.parse(url)
local_path = url_util.local_file_path(url)
if local_path:
os.remove(local_path)
return
if url.scheme == 's3':
s3 = s3_util.create_s3_session(url)
s3.delete_object(Bucket=url.s3_bucket, Key=url.path)
return
# Don't even try for other URL schemes.
def _list_s3_objects(client, url, num_entries, start_after=None):
list_args = dict(
Bucket=url.netloc,
Prefix=url.path,
MaxKeys=num_entries)
if start_after is not None:
list_args['StartAfter'] = start_after
result = client.list_objects_v2(**list_args)
last_key = None
if result['IsTruncated']:
last_key = result['Contents'][-1]['Key']
iter = (key for key in
(
os.path.relpath(entry['Key'], url.path)
for entry in result['Contents']
)
if key != '.')
return iter, last_key
def _iter_s3_prefix(client, url, num_entries=1024):
key = None
while True:
contents, key = _list_s3_objects(
client, url, num_entries, start_after=key)
for x in contents:
yield x
if not key:
break
def list_url(url):
url = url_util.parse(url)
local_path = url_util.local_file_path(url)
if local_path:
return os.listdir(local_path)
if url.scheme == 's3':
s3 = s3_util.create_s3_session(url)
return list(set(
key.split('/', 1)[0]
for key in _iter_s3_prefix(s3, url)))
def _spider(url, visited, root, depth, max_depth, raise_on_error):
"""Fetches URL and any pages it links to up to max_depth.
depth should initially be zero, and max_depth is the max depth of
links to follow from the root.
Prints out a warning only if the root can't be fetched; it ignores
errors with pages that the root links to.
Returns a tuple of:
- pages: dict of pages visited (URL) mapped to their full text.
- links: set of links encountered while visiting the pages.
"""
pages = {} # dict from page URL -> text content.
links = set() # set of all links seen on visited pages.
try:
response_url, _, response = read_from_url(url, 'text/html')
if not response_url or not response:
return pages, links
page = codecs.getreader('utf-8')(response).read()
pages[response_url] = page
# Parse out the links in the page
link_parser = LinkParser()
subcalls = []
link_parser.feed(page)
while link_parser.links:
raw_link = link_parser.links.pop()
abs_link = url_util.join(
response_url,
raw_link.strip(),
resolve_href=True)
links.add(abs_link)
# Skip stuff that looks like an archive
if any(raw_link.endswith(suf) for suf in ALLOWED_ARCHIVE_TYPES):
continue
# Skip things outside the root directory
if not abs_link.startswith(root):
continue
# Skip already-visited links
if abs_link in visited:
continue
# If we're not at max depth, follow links.
if depth < max_depth:
subcalls.append((abs_link, visited, root,
depth + 1, max_depth, raise_on_error))
visited.add(abs_link)
if subcalls:
pool = NonDaemonPool(processes=len(subcalls))
try:
results = pool.map(_spider_wrapper, subcalls)
for sub_pages, sub_links in results:
pages.update(sub_pages)
links.update(sub_links)
finally:
pool.terminate()
pool.join()
except URLError as e:
tty.debug(e)
if hasattr(e, 'reason') and isinstance(e.reason, ssl.SSLError):
tty.warn("Spack was unable to fetch url list due to a certificate "
"verification problem. You can try running spack -k, "
"which will not check SSL certificates. Use this at your "
"own risk.")
if raise_on_error:
raise NoNetworkConnectionError(str(e), url)
except HTMLParseError as e:
# This error indicates that Python's HTML parser sucks.
msg = "Got an error parsing HTML."
# Pre-2.7.3 Pythons in particular have rather prickly HTML parsing.
if sys.version_info[:3] < (2, 7, 3):
msg += " Use Python 2.7.3 or newer for better HTML parsing."
tty.warn(msg, url, "HTMLParseError: " + str(e))
except Exception as e:
# Other types of errors are completely ignored, except in debug mode.
tty.debug("Error in _spider: %s:%s" % (type(e), e),
traceback.format_exc())
return pages, links
def _spider_wrapper(args):
"""Wrapper for using spider with multiprocessing."""
return _spider(*args)
def _urlopen(req, *args, **kwargs):
"""Wrapper for compatibility with old versions of Python."""
url = req
try:
url = url.get_full_url()
except AttributeError:
pass
# We don't pass 'context' parameter because it was only introduced starting
# with versions 2.7.9 and 3.4.3 of Python.
if 'context' in kwargs:
del kwargs['context']
opener = urlopen
if url_util.parse(url).scheme == 's3':
import spack.s3_handler
opener = spack.s3_handler.open
return opener(req, *args, **kwargs)
def spider(root, depth=0):
"""Gets web pages from a root URL.
If depth is specified (e.g., depth=2), then this will also follow
up to <depth> levels of links from the root.
This will spawn processes to fetch the children, for much improved
performance over a sequential fetch.
"""
root = url_util.parse(root)
pages, links = _spider(root, set(), root, 0, depth, False)
return pages, links
def find_versions_of_archive(archive_urls, list_url=None, list_depth=0):
"""Scrape web pages for new versions of a tarball.
Arguments:
archive_urls (str or list or tuple): URL or sequence of URLs for
different versions of a package. Typically these are just the
tarballs from the package file itself. By default, this searches
the parent directories of archives.
Keyword Arguments:
list_url (str or None): URL for a listing of archives.
Spack will scrape these pages for download links that look
like the archive URL.
list_depth (int): Max depth to follow links on list_url pages.
Defaults to 0.
"""
if not isinstance(archive_urls, (list, tuple)):
archive_urls = [archive_urls]
# Generate a list of list_urls based on archive urls and any
# explicitly listed list_url in the package
list_urls = set()
if list_url is not None:
list_urls.add(list_url)
for aurl in archive_urls:
list_urls |= spack.url.find_list_urls(aurl)
# Add '/' to the end of the URL. Some web servers require this.
additional_list_urls = set()
for lurl in list_urls:
if not lurl.endswith('/'):
additional_list_urls.add(lurl + '/')
list_urls |= additional_list_urls
# Grab some web pages to scrape.
pages = {}
links = set()
for lurl in list_urls:
pg, lnk = spider(lurl, depth=list_depth)
pages.update(pg)
links.update(lnk)
# Scrape them for archive URLs
regexes = []
for aurl in archive_urls:
# This creates a regex from the URL with a capture group for
# the version part of the URL. The capture group is converted
# to a generic wildcard, so we can use this to extract things
# on a page that look like archive URLs.
url_regex = spack.url.wildcard_version(aurl)
# We'll be a bit more liberal and just look for the archive
# part, not the full path.
url_regex = os.path.basename(url_regex)
# We need to add a / to the beginning of the regex to prevent
# Spack from picking up similarly named packages like:
# https://cran.r-project.org/src/contrib/pls_2.6-0.tar.gz
# https://cran.r-project.org/src/contrib/enpls_5.7.tar.gz
# https://cran.r-project.org/src/contrib/autopls_1.3.tar.gz
# https://cran.r-project.org/src/contrib/matrixpls_1.0.4.tar.gz
url_regex = '/' + url_regex
# We need to add a $ anchor to the end of the regex to prevent
# Spack from picking up signature files like:
# .asc
# .md5
# .sha256
# .sig
# However, SourceForge downloads still need to end in '/download'.
url_regex += r'(\/download)?$'
regexes.append(url_regex)
# Build a dict version -> URL from any links that match the wildcards.
# Walk through archive_url links first.
# Any conflicting versions will be overwritten by the list_url links.
versions = {}
for url in archive_urls + sorted(links):
if any(re.search(r, url) for r in regexes):
try:
ver = spack.url.parse_version(url)
versions[ver] = url
except spack.url.UndetectableVersionError:
continue
return versions
def standardize_header_names(headers):
"""Replace certain header names with standardized spellings.
Standardizes the spellings of the following header names:
- Accept-ranges
- Content-length
- Content-type
- Date
- Last-modified
- Server
Every name considered is translated to one of the above names if the only
difference between the two is how the first letters of each word are
capitalized; whether words are separated; or, if separated, whether they
are so by a dash (-), underscore (_), or space ( ). Header names that
cannot be mapped as described above are returned unaltered.
For example: The standard spelling of "Content-length" would be substituted
for any of the following names:
- Content-length
- content_length
- contentlength
- content_Length
- contentLength
- content Length
... and any other header name, such as "Content-encoding", would not be
altered, regardless of spelling.
If headers is a string, then it (or an appropriate substitute) is returned.
If headers is a non-empty tuple, headers[0] is a string, and there exists a
standardized spelling for header[0] that differs from it, then a new tuple
is returned. This tuple has the same elements as headers, except the first
element is the standardized spelling for headers[0].
If headers is a sequence, then a new list is considered, where each element
is its corresponding element in headers, but mapped as above if a string or
tuple. This new list is returned if at least one of its elements differ
from their corrsponding element in headers.
If headers is a mapping, then a new dict is considered, where the key in
each item is the key of its corresponding item in headers, mapped as above
if a string or tuple. The value is taken from the corresponding item. If
the keys of multiple items in headers map to the same key after being
standardized, then the value for the resulting item is undefined. The new
dict is returned if at least one of its items has a key that differs from
that of their corresponding item in headers, or if the keys of multiple
items in headers map to the same key after being standardized.
In all other cases headers is returned unaltered.
"""
if isinstance(headers, six.string_types):
for standardized_spelling, other_spellings in (
HTTP_HEADER_NAME_ALIASES.items()):
if headers in other_spellings:
if headers == standardized_spelling:
return headers
return standardized_spelling
return headers
if isinstance(headers, tuple):
if not headers:
return headers
old = headers[0]
if isinstance(old, six.string_types):
new = standardize_header_names(old)
if old is not new:
return (new,) + headers[1:]
return headers
try:
changed = False
new_dict = {}
for key, value in headers.items():
if isinstance(key, (tuple, six.string_types)):
old_key, key = key, standardize_header_names(key)
changed = changed or key is not old_key
new_dict[key] = value
return new_dict if changed else headers
except (AttributeError, TypeError, ValueError):
pass
try:
changed = False
new_list = []
for item in headers:
if isinstance(item, (tuple, six.string_types)):
old_item, item = item, standardize_header_names(item)
changed = changed or item is not old_item
new_list.append(item)
return new_list if changed else headers
except TypeError:
pass
return headers
class SpackWebError(spack.error.SpackError):
"""Superclass for Spack web spidering errors."""
class NoNetworkConnectionError(SpackWebError):
"""Raised when an operation can't get an internet connection."""
def __init__(self, message, url):
super(NoNetworkConnectionError, self).__init__(
"No network connection: " + str(message),
"URL was: " + str(url))
self.url = url
| 2.015625 | 2 |
hydrocarbon_problem/api/aspen_api.py | ADChristos/Aspen-RL | 0 | 12794219 | from typing import Tuple
from Simulation import Simulation
from hydrocarbon_problem.api.api_base import BaseAspenDistillationAPI
from hydrocarbon_problem.api.types import StreamSpecification, ColumnInputSpecification, \
ColumnOutputSpecification, ProductSpecification, PerCompoundProperty
PATH = 'C:/Users/s2199718/Desktop/RL_PD/AspenSimulation/HydrocarbonMixture.bkp'
class AspenAPI(BaseAspenDistillationAPI):
def __init__(self):
self._flowsheet: Simulation = Simulation(PATH=PATH, VISIBILITY=False)
self._feed_name: str = "S1"
self._tops_name: str = "S2"
self._bottoms_name: str = "S3"
self._name_to_aspen_name = PerCompoundProperty(ethane="ETHANE",
propane="PROPANE", isobutane="I-BUTANE",
n_butane="N-BUTANE", isopentane="I-PENTAN", n_pentane="N-PENTAN")
def set_input_stream_specification(self, stream_specification: StreamSpecification) -> None:
"""Sets the input stream to a column to fit the stream specification"""
# Defining the Thermodynamic Properties
self._flowsheet.STRM_Temperature(self._feed_name, stream_specification.temperature)
self._flowsheet.STRM_Pressure(self._feed_name, stream_specification.pressure)
# Defining the Stream Composition for the Feed
self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.ethane,
stream_specification.molar_flows.ethane)
self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.propane,
stream_specification.molar_flows.propane)
self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isobutane,
stream_specification.molar_flows.isobutane)
self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_butane,
stream_specification.molar_flows.n_butane)
self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isopentane,
stream_specification.molar_flows.isopentane)
self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_pentane,
stream_specification.molar_flows.n_pentane)
def get_output_stream_specifications(self) -> Tuple[StreamSpecification, StreamSpecification]:
# Getting the physical values of Top streams
tops_temperature = self._flowsheet.STRM_Get_Temperature(self._tops_name)
tops_pressure = self._flowsheet.STRM_Get_Pressure(self._tops_name)
# Acquiring the outputs out of the Destillate (Top Stream)
tops_ethane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.ethane)
tops_propane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.propane)
tops_isobutane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isobutane)
tops_n_butane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_butane)
tops_isopentane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isopentane)
tops_n_pentane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_pentane)
# Passing all the variables to their respective "slot"
tops_specifications = StreamSpecification(temperature=tops_temperature, pressure=tops_pressure,
molar_flows=PerCompoundProperty(ethane=tops_ethane,
propane=tops_propane,
isobutane=tops_isobutane,
n_butane=tops_n_butane,
isopentane=tops_isopentane,
n_pentane=tops_n_pentane))
# Getting the physical values of Top streams
bots_temperature = self._flowsheet.STRM_Get_Temperature(self._bottoms_name)
bots_pressure = self._flowsheet.STRM_Get_Pressure(self._bottoms_name)
# Acquiring the outputs out of the Bottom (Bottom Stream)
bots_ethane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.ethane)
bots_propane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.propane)
bots_isobutane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isobutane)
bots_n_butane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_butane)
bots_isopentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isopentane)
bots_n_pentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_pentane)
# Tubulating the Results of the Bottom Stream
bots_specifications = StreamSpecification(temperature=bots_temperature, pressure=bots_pressure,
molar_flows=PerCompoundProperty(ethane=bots_ethane,
propane=bots_propane,
isobutane=bots_isobutane,
n_butane=bots_n_butane,
isopentane=bots_isopentane,
n_pentane=bots_n_pentane))
return tops_specifications, bots_specifications
def get_simulated_column_properties(self) -> ColumnOutputSpecification:
# D_F_Location = self._flowsheet.BLK_Get_FeedLocation()
# D_Pressure = self._flowsheet.BLK_Get_Pressure()
# D_Reflux_Ratio = self._flowsheet.BLK_Get_RefluxRatio()
# D_Reboiler_Ratio = self._flowsheet.BLK_Get_ReboilerRatio()
D_Cond_Duty = self._flowsheet.BLK_Get_Condenser_Duty()
D_Reb_Duty = self._flowsheet.BLK_Get_Reboiler_Duty()
D_Col_Diameter = self._flowsheet.BLK_Get_Column_Diameter()
D_Specifications = ColumnOutputSpecification(condensor_duty=D_Cond_Duty, reboiler_duty=D_Reb_Duty,
diameter=D_Col_Diameter)
return D_Specifications
def set_column_specification(self, column_specification: ColumnInputSpecification) -> None:
self._flowsheet.BLK_NumberOfStages(column_specification.n_stages)
self._flowsheet.BLK_FeedLocation(column_specification.feed_stage_location)
self._flowsheet.BLK_Pressure(column_specification.condensor_pressure)
self._flowsheet.BLK_RefluxRatio(column_specification.reflux_ratio)
self._flowsheet.BLK_ReboilerRatio(column_specification.reboil_ratio)
def solve_flowsheet(self) -> bool:
self._flowsheet.Run()
def get_column_cost(self, column_specification: ColumnOutputSpecification) -> float:
"""Calculates the TAC of the column."""
raise NotImplementedError
def get_stream_value(self, stream_specification: StreamSpecification) -> float:
"""Calculates the value (per year) of a stream."""
raise NotImplementedError
def stream_is_product(self, stream_specification: StreamSpecification, product_specification:
ProductSpecification) -> bool:
"""Checks whether a stream meets the product specification."""
raise NotImplementedError
if __name__ == '__main__':
from hydrocarbon_problem.api.api_tests import test_api
aspen_api = AspenAPI()
test_api(aspen_api) | 2.53125 | 3 |
rssparser/tests/test_convert_module.py | disp1air/rss_parser | 0 | 12794220 | <filename>rssparser/tests/test_convert_module.py
import unittest
from rssparser.convert_module import rss_items_to_list, rss_to_dict
class TestConvertModule(unittest.TestCase):
def test_rss_items_to_list(self):
self.assertEqual(rss_items_to_list([{}]), [{}])
def test_rss_to_dict(self):
self.assertEqual(rss_to_dict([{}]), {'item1': {}})
if __name__ == '__main__':
unittest.main()
| 2.6875 | 3 |
SciGen/Network/utilities.py | SamuelSchmidgall/SciGen | 1 | 12794221 | import random
def ReLU(x, derivative=False):
""" ReLU function with corresponding derivative """
if derivative:
x[x <= 0] = 0
x[x > 0] = 1
return x
x[x < 0] = 0
return x
def ReLU_uniform_random():
""" Ideal weight starting values for ReLU """
return random.uniform(0.005, 0.2)
def uniform_random():
""" Generic uniform random from -n to n given output is multiplied by n """
return random.uniform(-1, 1)
| 3.875 | 4 |
ipypdf/widgets/canvas.py | JoelStansbury/ipypdf | 0 | 12794222 | from ipycanvas import MultiCanvas
CANVAS_TYPE_KWARGS = {
"section": {"color": "blue"},
"text": {"color": "black"},
"image": {"color": "red"},
"pdf": {"color": "black"}, # Unused
"folder": {"color": "black"}, # Unused
"table": {"color": "green"},
}
class PdfCanvas(MultiCanvas):
def __init__(self, **kwargs):
super().__init__(3, **kwargs)
self.add_class("ipypdf-pdf-canvas")
self.bboxes = []
self.bg_layer = self[0]
self.fixed_layer = self[1]
self.animated_layer = self[2]
self.animated_layer.on_mouse_down(self.mouse_down)
self.animated_layer.on_mouse_move(self.mouse_move)
self.animated_layer.on_mouse_up(self.mouse_up)
self.rect = None
self.mouse_is_down = False
def update(self):
"""
I don't know why, but this is needed in order to allow animated_layer
to update correctly after making a change to any other layer
"""
self._canvases = [self.bg_layer, self.fixed_layer, self.animated_layer]
def xywh(self, coords=None):
"""
ipycanvas requires xywh coords, but ipyevents (and PIL) uses xyxy,
so conversion is needed to draw the box on the canvas.
"""
x1, y1, x2, y2 = self.rect if not coords else coords
x = min(x1, x2)
y = min(y1, y2)
w = abs(x2 - x1)
h = abs(y2 - y1)
return x, y, w, h
def draw_rect(self):
self.animated_layer.clear_rect(0, 0, self.width, self.height)
self.animated_layer.stroke_rect(*self.xywh())
self.add_class("ipypdf-pdf-canvas")
def clear(self):
self.fixed_layer.clear_rect(0, 0, self.width, self.height)
def mouse_down(self, x, y):
self.mouse_is_down = True
self.rect = [x, y, x + 1, y + 1]
self.draw_rect()
def mouse_move(self, x, y):
if self.mouse_is_down:
self.rect[2] = x
self.rect[3] = y
self.draw_rect()
def mouse_up(self, x, y):
self.mouse_is_down = False
self.animated_layer.clear_rect(0, 0, self.width, self.height)
self.fixed_layer.stroke_rect(*self.xywh())
self.update()
def add_image(self, img):
":param img: raw byte data of image"
self.bg_layer.draw_image(img)
self.update()
def draw_many(self, rects):
self.clear()
for coords, _type in rects:
self.fixed_layer.stroke_style = CANVAS_TYPE_KWARGS[_type]["color"]
self.fixed_layer.stroke_rect(*self.xywh(coords))
self.update()
def set_type(self, _type: str):
self._type = _type
self.fixed_layer.stroke_style = CANVAS_TYPE_KWARGS[_type]["color"]
self.animated_layer.stroke_style = CANVAS_TYPE_KWARGS[_type]["color"]
| 2.921875 | 3 |
[Kaleido-subs]/Dropped/FGO - Absolute Demonic Front - Babylonia/ac_Babylonia_03.py | tuilakhanh/Encoding-Projects | 57 | 12794223 | <filename>[Kaleido-subs]/Dropped/FGO - Absolute Demonic Front - Babylonia/ac_Babylonia_03.py
#!/usr/bin/env python3
import vapoursynth as vs
import audiocutter
import lvsfunc as lvf
from subprocess import call
core = vs.core
ts_in = r'03/Fate Grand Order_ Zettai Majuu Sensen Babylonia - 03 (MX).d2v'
src = lvf.src(ts_in)
if ts_in.endswith('d2v'):
src = core.vivtc.VDecimate(src)
ac = audiocutter.AudioCutter()
vid = ac.split(src, [(809,3542),(5223,18815),(20256,37971)])
ac.ready_qp_and_chapters(vid)
vid.set_output(0)
if __name__ == "__main__":
ac.cut_audio(r'Babylonia_03_cut.m4a', audio_source=r'03/Fate Grand Order_ Zettai Majuu Sensen Babylonia - 03 (MX) T112 stereo 244 kbps DELAY -356 ms.aac')
| 1.820313 | 2 |
test/message_test.py | gavincyi/Telex | 0 | 12794224 | #!/bin/python
import unittest
import logging
import os
import sys
from src.message import message
class message_test(unittest.TestCase):
def test_from_message_record(self):
message_record = message(
msg_id=185,
channel_id=82,
source_id=50,
source_chat_id='111111',
msg='Hello world')
row = message_record.str().split(',')
row = [e.replace("'", "") if e.find("'") > -1 else int(e) for e in row]
message_record_from_row = message.from_message_record(row, False)
## Positive test
self.assertEqual(message_record.date, message_record_from_row.date)
self.assertEqual(message_record.time, message_record_from_row.time)
self.assertEqual(message_record.msg_id, message_record_from_row.msg_id)
self.assertEqual(message_record.channel_id, message_record_from_row.channel_id)
self.assertEqual(message_record.source_id, message_record_from_row.source_id)
self.assertEqual(message_record.source_chat_id, message_record_from_row.source_chat_id)
self.assertEqual(message_record.msg, message_record_from_row.msg)
## Negative test
message_record_from_row = message.from_message_record(None)
self.assertEqual(0, message_record_from_row.msg_id)
self.assertEqual(0, message_record_from_row.channel_id)
self.assertEqual(0, message_record_from_row.source_id)
self.assertEqual('', message_record_from_row.source_chat_id)
self.assertEqual('', message_record_from_row.msg)
if __name__ == '__main__':
unittest.main() | 3.359375 | 3 |
djsubject/__init__.py | ttngu207/canonical-colony-management | 0 | 12794225 | from .subject import schema as subject
| 1.0625 | 1 |
backend/app/models/episode.py | flsworld/comment-rick-n-morty | 0 | 12794226 | from sqlalchemy import Column, Integer, String, Date
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import relationship
from app.db.session import Base
from app.models import CharacterEpisode
class Episode(Base):
__tablename__ = "episode"
id = Column(Integer, primary_key=True, index=True)
name = Column(String, unique=True)
air_date = Column(Date)
segment = Column(String, unique=True)
characters = relationship("CharacterEpisode", back_populates="episode")
comments = relationship("Comment", back_populates="episode")
association_ids = association_proxy(
"characters",
"character_id",
creator=lambda cid: CharacterEpisode(character_id=cid),
)
| 2.625 | 3 |
BG-NBD_model/3_training_prediction_evaluation.py | les1smore/Stat-Notes | 0 | 12794227 | <filename>BG-NBD_model/3_training_prediction_evaluation.py
# Fitting
from lifetimes import BetaGeoFitter
# instantiation of BG-NBD model
bgf = BetaGeoFitter(penalizer_coef = 0.0)
# fitting of BG-NBD model
bgf.fit(frequency = rfm_cal_holdout['frequency_cal'],
recency = rfm_cal_holdout['recency_cal'],
T = rfm_cal_holdout['T_cal'])
bgf.summary
# Assessment of model fit
# First is to compare the frequencies between our real calibration data and artificial data sampled from the distributions generated by the BG-NBD model
from lifetimes.plotting import plot_period_transactions
_ = plot_period_transactions(bgf)
# Prediction
# First we choose a sample customer
sample_customer = rfm_cal_holdout.iloc[20]
# Inspect this customer's frequency, recency and T both for the calibration and observation periods
sample_customer
# Calculate the conditional expected number of transactions in the given period
n_transactions_pred = bgf.predict(t = 26, # set it to 26 weeks (the length of the observation period)
frequency = sample_customer['frequency_cal'],
recency = sample_customer['recency_cal'],
T = sample_customer['T_cal'])
n_transactions_pred
# Calculate the probability of alive customers
alive_prob = bgf.conditional_probability_alive(frequency = sample_customer['frequency_cal'],
recency = sample_customer['recency_cal'],
T = sample_customer['T_cal'])
alive_prob
# Compare the real and predicted number of transactions
# Get the real number of transactions in the observation period, which equals frequency_holdout + 1
rfm_cal_holdout['n_transactions_holdout_real'] = rfm_cal_holdout['frequency_holdout'] + 1
# Get the predicted number of transactions in the next 26 weeks (lenght of the observation period)
rfm_cal_holdout['n_transactions_holdout_pred'] = bgf.predict(t=26,
frequency=rfm_cal_holdout['frequency_cal'],
recency = rfm_cal_holdout['recency_cal'],
T = rfm_cal_holdout['T_cal'])
# Compare the real and predicted transactions
rfm_cal_holdout[['n_transactions_holdout_real', 'n_transactions_holdout_pred']].head()
#RMSE
from sklearn.metrics import mean_squared_error
RMSE = mean_squared_error(y_true = rfm_cal_holdout['n_transactions_holdout_real'],
y_pred = rfm_cal_holdout['n_transactions_holdout_pred'],
squared = False)
RMSE
| 2.71875 | 3 |
blkdiscovery/lsblk.py | jaredeh/blkdiscovery | 0 | 12794228 | import json
#hack for python2 support
try:
from .blkdiscoveryutil import *
except:
from blkdiscoveryutil import *
class LsBlk(BlkDiscoveryUtil):
def disks(self):
retval = []
parent = self.details()
for path, diskdetails in parent.items():
if not diskdetails.get('type') == "disk":
continue
retval.append(path)
return retval
def label_children(self,retval):
if not retval.get('children'):
return
children = {}
for child in retval['children']:
self.label_children(child)
if child.get('name'):
name = child['name']
else:
name = "UNKNOWN"
children[name] = child
retval['children'] = children
def details(self):
retval = {}
rawoutput = self.subprocess_check_output(["lsblk", '--json', '-O', '-p'])
parent = json.loads(rawoutput)
for child in parent.get('blockdevices',[]):
#print child['id'] + child['class']
path = child.get('name')
retval[path] = child
for disk, details in retval.items():
self.label_children(details)
return self.stringify(retval)
if __name__ == '__main__':
import pprint
pp = pprint.PrettyPrinter(indent=4)
l = LsBlk()
devdata = l.details()
pp.pprint(devdata)
disks = l.disks()
pp.pprint(disks)
| 2.4375 | 2 |
Env/discretize.py | mikema2019/Deep-reinforcement-learning | 1 | 12794229 | import numpy as np
import tensorflow as tf
def discretize(value,action_dim,n_outputs):
discretization = tf.round(value)
discretization = tf.minimum(tf.constant(n_outputs-1, dtype=tf.float32,shape=[1,action_dim]),
tf.maximum(tf.constant(0, dtype=tf.float32,shape=[1,action_dim]), tf.to_float(discretization)))
return tf.to_int32(discretization)
if __name__=='__main__':
value=np.array((0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9))
a=discretize(value,value.shape[0],2)
with tf.Session() as sess:
print(a.eval()) | 2.9375 | 3 |
developmentHub/posts/tests/test_models.py | MariiaBel/developmentHub | 0 | 12794230 | <reponame>MariiaBel/developmentHub
from django.test import TestCase
from django.contrib.auth import get_user_model
from ..models import Post, Group
User = get_user_model();
class PostModelTest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = User.objects.create(username = "Юзер")
cls.post = Post.objects.create(
title="Заголовок тестовой статьи",
text="Текст тестовой статьи",
author = cls.user,
)
cls.group = Group.objects.create(
title = "Название тестовой группы",
slug = "test-slug",
description = "Описание тестовой группы",
)
def setUp(self):
self.post = PostModelTest.post
self.group = PostModelTest.group
def test_verbose_name_post(self):
"""Checks verbose names for post"""
field_verboses = {
"title": "Название статьи",
"text": "Текст статьи",
"pub_date": "Дата публикации",
"group": "Название группы",
"author": "Автор статьи",
}
for value, expected in field_verboses.items():
with self.subTest(value=value):
self.assertEqual(self.post._meta.get_field(value).verbose_name, expected)
def test_verbose_name_group(self):
"""Checks verbose names for group"""
field_verboses = {
"title": "Название группы",
"slug": "Слаг",
"description": "Описание группы",
}
for value, expected in field_verboses.items():
with self.subTest(value=value):
self.assertEqual(self.group._meta.get_field(value).verbose_name, expected)
def test_help_text_post(self):
"""Checks help text for post"""
field_help_text = {
"title": "Дайте название статье",
"group": "Укажите группу для статьи",
}
for value, expected in field_help_text.items():
with self.subTest(value=value):
self.assertEqual(self.post._meta.get_field(value).help_text, expected)
def test_help_text_group(self):
"""Checks help text for group"""
field_help_text = {
"title": "Дайте назание группе",
"slug": ('Укажите адрес для группы. Используйте '
'только латиницу, цифры, дефисы и знаки '
'подчёркивания'),
}
for value, expected in field_help_text.items():
with self.subTest(value=value):
self.assertEqual(self.group._meta.get_field(value).help_text, expected)
def test_str_post(self):
"""Checks __str__ for post"""
expected_str = self.post.text[:15]
self.assertEqual(expected_str, str(self.post))
def test_str_group(self):
"""Checks __str__ for group"""
expected_str = self.group.title
self.assertEqual(expected_str, str(self.group)) | 2.578125 | 3 |
ChessAI/GameController/game_board.py | PavelLebed20/chess_classic | 1 | 12794231 | import copy
import sys
import ChessAI.GameController.game_figures as Figures
from ChessBoard.chess_board import Board
from ChessBoard.chess_figure import FigureType, Side
from Vector2d.Vector2d import Vector2d, Move
class GameBoard:
default_white_king_pos = Vector2d(4, 7)
default_black_king_pos = Vector2d(4, 0)
default_white_pawn_row = 6
default_black_pawn_row = 1
default_white_rook_right_pos = Vector2d(7, 7)
default_white_rook_left_pos = Vector2d(0, 7)
default_black_rook_right_pos = Vector2d(7, 0)
default_black_rook_left_pos = Vector2d(0, 0)
def __init__(self, chess_board):
self.board = [[None for j in range(0, Board.ROW_SIZE)]
for i in range(0, Board.COLUMN_SIZE)]
for i in range(0, Board.COLUMN_SIZE):
for j in range(0, Board.ROW_SIZE):
if chess_board.board[j][i] is None:
continue
figure_type = chess_board.board[j][i].figure_type
side = chess_board.board[j][i].side
cur_pos = Vector2d(j, i)
if figure_type == FigureType.KING:
was_moved = True
if side == Side.WHITE:
if cur_pos == GameBoard.default_white_king_pos:
was_moved = False
elif side == Side.BLACK:
if cur_pos == GameBoard.default_black_king_pos:
was_moved = False
self.board[j][i] = Figures.King(side, cur_pos, was_moved)
elif figure_type == FigureType.QUEEN:
self.board[j][i] = Figures.Queen(side, cur_pos)
elif figure_type == FigureType.ROOK:
was_moved = True
if side == Side.WHITE:
if cur_pos == GameBoard.default_white_rook_left_pos or cur_pos == GameBoard.default_white_rook_right_pos:
was_moved = False
elif side == Side.BLACK:
if cur_pos == GameBoard.default_black_rook_left_pos or cur_pos == GameBoard.default_black_rook_right_pos:
was_moved = False
self.board[j][i] = Figures.Rook(side, cur_pos, was_moved)
elif figure_type == FigureType.KNIGHT:
self.board[j][i] = Figures.Knight(side, cur_pos)
elif figure_type == FigureType.BISHOP:
self.board[j][i] = Figures.Bishop(side, cur_pos)
elif figure_type == FigureType.PAWN:
was_moved = True
if side == Side.WHITE:
if i == GameBoard.default_white_pawn_row:
was_moved = False
elif side == Side.BLACK:
if i == GameBoard.default_black_pawn_row:
was_moved = False
self.board[j][i] = Figures.Pawn(side, cur_pos, was_moved)
else:
continue
def serialize_to_str(self):
str_board = ['.' for j in range(0, Board.ROW_SIZE)
for i in range(0, Board.COLUMN_SIZE)]
for i in range(0, Board.COLUMN_SIZE):
for j in range(0, Board.ROW_SIZE):
if self.board[j][i] is None:
continue
str_board[i * Board.ROW_SIZE + j] = self.board[j][i].serialized_letter()
res = ""
for i in range(0, Board.COLUMN_SIZE * Board.ROW_SIZE):
res += str_board[i]
return res
def deserialize_from_str(self, board_as_str):
self.board = [[None for j in range(0, Board.ROW_SIZE)]
for i in range(0, Board.COLUMN_SIZE)]
str_board = ['.' for j in range(0, Board.ROW_SIZE)
for i in range(0, Board.COLUMN_SIZE)]
for i in range(0, Board.COLUMN_SIZE):
for j in range(0, Board.ROW_SIZE):
str_board[i * Board.ROW_SIZE + j] = str(board_as_str).__getitem__(i * Board.ROW_SIZE + j)
for i in range(0, Board.COLUMN_SIZE):
for j in range(0, Board.ROW_SIZE):
letter = str_board[i * Board.ROW_SIZE + j]
if letter.isupper():
side = Side.WHITE
else:
side = Side.BLACK
letter = letter.lower()
cur_pos = Vector2d(j, i)
if letter == 'k':
self.board[j][i] = Figures.King(side, cur_pos, False)
elif letter == 'i':
self.board[j][i] = Figures.King(side, cur_pos, True)
elif letter == 'b':
self.board[j][i] = Figures.Bishop(side, cur_pos)
elif letter == 'r':
self.board[j][i] = Figures.Rook(side, cur_pos, False)
elif letter == 'o':
self.board[j][i] = Figures.Rook(side, cur_pos, True)
elif letter == 'n':
self.board[j][i] = Figures.Knight(side, cur_pos)
elif letter == 'q':
self.board[j][i] = Figures.Queen(side, cur_pos)
elif letter == 'p':
self.board[j][i] = Figures.Pawn(side, cur_pos)
elif letter == 'a':
self.board[j][i] = Figures.Pawn(side, cur_pos, True)
elif letter == 'w':
self.board[j][i] = Figures.Pawn(side, cur_pos, False, True)
def export_chess_board(self):
export_board = ['.' for j in range(0, Board.ROW_SIZE)
for i in range(0, Board.COLUMN_SIZE)]
for i in range(0, Board.COLUMN_SIZE):
for j in range(0, Board.ROW_SIZE):
if self.board[j][i] is None:
continue
figure_type = self.board[j][i].figure_type
side = self.board[j][i].side
if figure_type == FigureType.KING:
latter = 'k'
elif figure_type == FigureType.QUEEN:
latter = 'q'
elif figure_type == FigureType.ROOK:
latter = 'r'
elif figure_type == FigureType.KNIGHT:
latter = 'n'
elif figure_type == FigureType.BISHOP:
latter = 'b'
elif figure_type == FigureType.PAWN:
latter = 'p'
if side == Side.WHITE:
latter = latter.upper()
export_board[i * Board.ROW_SIZE + j] = latter
return export_board
def print(self):
sys.stdout.write(" ")
sys.stdout.write(" ")
sys.stdout.write(" ")
for i in range(0, Board.ROW_SIZE):
sys.stdout.write(i.__str__())
sys.stdout.write(" ")
print()
print()
for i in range(0, Board.COLUMN_SIZE):
sys.stdout.write(i.__str__())
sys.stdout.write(" ")
sys.stdout.write(" ")
for j in range(0, Board.ROW_SIZE):
if self.board[j][i] is not None:
self.board[j][i].print()
sys.stdout.write(" ")
else:
sys.stdout.write("*")
sys.stdout.write(" ")
print()
def print_attacked_cells(self):
for i in range(0, Board.COLUMN_SIZE):
for j in range(0, Board.ROW_SIZE):
if self.board[j][i] is not None:
attack_cells = self.board[j][i].generate_moves(self)
self.board[j][i].print()
sys.stdout.write(": ")
for k in range(len(attack_cells)):
sys.stdout.write(attack_cells[k].x.__str__())
sys.stdout.write(" ")
sys.stdout.write(attack_cells[k].y.__str__())
sys.stdout.write("; ")
print()
def get_by_pos(self, x, y):
return self.board[x][y]
def get(self, position):
return self.board[position.x][position.y]
def set(self, position, game_object):
self.board[position.x][position.y] = game_object
def get_king_cell(self, side):
for i in range(0, Board.COLUMN_SIZE):
for j in range(0, Board.ROW_SIZE):
if self.board[j][i] is not None:
if isinstance(self.board[j][i], Figures.King) and self.board[j][i].side == side:
return Vector2d(j, i)
def get_figures_list(self, side):
figures = []
for i in range(0, Board.COLUMN_SIZE):
for j in range(0, Board.ROW_SIZE):
if self.board[j][i] is not None:
if self.board[j][i].side == side:
figures.append(self.board[j][i])
return figures
def make_move(self, move):
self.get(move.point_from).make_move(self, move.point_to)
def summary_attacked_cells(self, side):
attacked_cells = []
for j in range(Board.ROW_SIZE):
for i in range(Board.COLUMN_SIZE):
figure = self.get_by_pos(j, i)
if figure is not None and figure.side == side:
if isinstance(figure, Figures.King):
attacked_cells = attacked_cells + figure.generate_moves(self, False)
elif isinstance(figure, Figures.Pawn):
attacked_cells = attacked_cells + figure.generate_moves(self, True)
else:
attacked_cells = attacked_cells + figure.generate_moves(self)
return attacked_cells
def summary_moves(self, side, my_turn=True):
summary_moves = []
attacked_cells = []
for j in range(Board.ROW_SIZE):
for i in range(Board.COLUMN_SIZE):
attacked_cells.clear()
figure = self.get_by_pos(j, i)
if figure is not None and figure.side == side:
if isinstance(figure, Figures.King):
attacked_cells = attacked_cells + figure.generate_moves(self, my_turn)
else:
attacked_cells = attacked_cells + figure.generate_moves(self)
for k in range(len(attacked_cells)):
summary_moves.append(Move(Vector2d(j, i), attacked_cells[k]))
return summary_moves
def is_that_check(self, my_side):
attacked_cells = self.summary_attacked_cells(my_side)
enemy_king_cell = self.get_king_cell(Side.get_oposite(my_side))
return enemy_king_cell in attacked_cells
def is_that_mate(self, my_side):
enemy_figures = self.get_figures_list(Side.get_oposite(my_side))
for i in range(len(enemy_figures)):
cur_figure = enemy_figures[i]
available_moves = cur_figure.generate_moves(self)
for j in range(len(available_moves)):
new_chess_board = copy.deepcopy(self)
if new_chess_board.get(cur_figure.position) is None:
print(cur_figure.position.x)
print(cur_figure.position.y)
new_chess_board.make_move(Move(cur_figure.position, available_moves[j]))
if new_chess_board.is_that_check(my_side) is False:
return False
return True
def is_that_stalemate(self, my_side):
enemy_figures = self.get_figures_list(Side.get_oposite(my_side))
for i in range(len(enemy_figures)):
cur_figure = enemy_figures[i]
if isinstance(cur_figure, Figures.King) is not True:
available_moves = cur_figure.generate_moves(self)
if len(available_moves) != 0:
return False
else:
available_moves = cur_figure.generate_moves(self)
for j in range(len(available_moves)):
new_chess_board = copy.deepcopy(self)
if new_chess_board.get(cur_figure.position) is None:
print(cur_figure.position.x)
print(cur_figure.position.y)
new_chess_board.make_move(Move(cur_figure.position, available_moves[j]))
if new_chess_board.is_that_check(my_side) is False:
return False
return True
def evaluate(self, side):
total = 0
for j in range(Board.ROW_SIZE):
for i in range(Board.COLUMN_SIZE):
pos = Vector2d(j, i)
figure = self.get(pos)
if figure is not None:
if figure.side is side:
sign = 1
else:
sign = -1
total = total + (figure.evaluate(j, i) * sign)
return total
def delete_double_move(self, side_to_del):
for j in range(Board.ROW_SIZE):
for i in range(Board.COLUMN_SIZE):
figure = self.get_by_pos(j, i)
if figure is not None and figure.side == side_to_del:
if isinstance(figure, Figures.Pawn):
figure.double_move = False
def swap_pawn(self, position, figure_lat):
side = self.board[position.x][position.y].side
lower = figure_lat.lower()
if lower == 'q':
self.board[position.x][position.y] = Figures.Queen(side, position)
if lower == 'b':
self.board[position.x][position.y] = Figures.Bishop(side, position)
if lower == 'n':
self.board[position.x][position.y] = Figures.Knight(side, position)
if lower == 'r':
self.board[position.x][position.y] = Figures.Rook(side, position, True)
| 2.96875 | 3 |
scripts/map_price.py | allinbits/gravity-dex-stats | 2 | 12794232 | <gh_stars>1-10
import csv
import sys
import requests
def get_pools():
url = "https://staging.demeris.io/v1/liquidity/cosmos/liquidity/v1beta1/pools"
r = requests.get(url)
return r.json()["pools"]
def get_balance(addr):
url = "https://staging.demeris.io/v1/liquidity/cosmos/bank/v1beta1/balances/" + addr
r = requests.get(url)
return r.json()["balances"]
def get_verified_denoms():
url = "https://staging.demeris.io/v1/verified_denoms"
r = requests.get(url)
return r.json()["verified_denoms"]
def get_prices():
url = "https://staging.demeris.io/v1/oracle/prices"
r = requests.get(url)
return r.json()["data"]
def get_ibc_denom_info(denom):
denom = denom.removeprefix("ibc/")
url = "https://staging.demeris.io/v1/chain/cosmos-hub/denom/verify_trace/" + denom
r = requests.get(url)
if r.status_code != 200:
return None
return r.json()["verify_trace"]
def make_denom_price_map():
denom_set = set()
for pool in get_pools():
denom_set |= set(pool["reserve_coin_denoms"])
verified_denoms = get_verified_denoms()
verified_denom_map = {x["name"]: x for x in verified_denoms}
prices = get_prices()
price_map = {x["Symbol"]: x["Price"] for x in prices["Tokens"]}
denom_price_map = {}
for denom in denom_set:
if denom.startswith("ibc/"):
info = get_ibc_denom_info(denom)
if not info:
continue
base_denom = info["base_denom"]
else:
base_denom = denom
denom_data = verified_denom_map[base_denom]
if not denom_data["fetch_price"]:
continue
precision = denom_data["precision"]
ticker = denom_data["ticker"]
price = price_map[ticker + "USDT"]
denom_price_map[denom] = price / pow(10, precision)
return denom_price_map
if __name__ == "__main__":
if len(sys.argv) < 2:
print(f"usage: python3 {sys.argv[0]} [pools file]")
sys.exit(0)
denom_price_map = make_denom_price_map()
tvl = 0.0
for pool in get_pools():
vl = 0.0
try:
for x in get_balance(pool["reserve_account_address"]):
if x["denom"] in pool["reserve_coin_denoms"]:
vl += int(x["amount"]) * denom_price_map[x["denom"]]
except KeyError:
continue
tvl += vl
print(f"total value locked: {tvl}")
swap_amount = 0.0
fee_amount = 0.0
with open(sys.argv[1], newline="") as f:
reader = csv.DictReader(f)
for row in reader:
try:
x_price = denom_price_map[row["x_denom"]]
y_price = denom_price_map[row["y_denom"]]
except KeyError:
continue
swap_amount += int(row["offer_x"]) * x_price
swap_amount += int(row["offer_y"]) * y_price
fee_amount += int(row["offer_x_fee"]) * x_price
fee_amount += int(row["demand_y_fee"]) * y_price
fee_amount += int(row["offer_y_fee"]) * y_price
fee_amount += int(row["demand_x_fee"]) * x_price
print(f"total swapped amount: {swap_amount}")
print(f"total fees paid: {fee_amount}")
| 2.5 | 2 |
publicationData/paperResultsScripts/Fig2_Extended-model.py | pranasag/extendedEcoliGEM | 1 | 12794233 | import cbmpy
import numpy as np
import os
import sys
import pandas as pd
import re
modelLoc = sys.argv[1]
growthMediumLoc = sys.argv[2]
scriptLoc = sys.argv[3]
proteomicsLoc = sys.argv[4]
resultsFolder = sys.argv[5]
model = cbmpy.CBRead.readSBML3FBC(modelLoc, scan_notes_gpr = False)
growthData = pd.read_csv(growthMediumLoc)
proteomicsData = pd.read_csv(proteomicsLoc)
resultsPath = '%s/%s' %(scriptLoc, resultsFolder)
if not (os.path.isdir(resultsPath)): os.mkdir(resultsPath)
os.chdir(resultsPath)
"""
Total protein volume constraint for <NAME>i
See the supplementary material of the paper for the derivation of the constraint
"""
protSum=float(0.62/0.34)
pID = 'UP000000625'
constraint = []
UniProtIDs = pd.read_csv('proteinMasses.txt', sep = '\t')
for entry in UniProtIDs.index: constraint.append([(7.3*pow(10,-4)*float(UniProtIDs['Mass'][entry].replace(',',''))), 'P_%s_synthesis' %(UniProtIDs['Entry'][entry])])
model.addUserConstraint(pid = None, fluxes = constraint, operator = '<=', rhs = protSum)
os.chdir(resultsPath)
"""
Here, we define the multiplier for the concentrations of nutrients in the growth medium. We will use this to perform glucose (and amino acid, for the supplemented MOPS variants) limitation simulations.
"""
multiplier = 1.0 #No changes in Glc abundance
for i in growthData['Reaction ID']:
model.setReactionLowerBound(i, multiplier * growthData['Lower Bound'].loc[growthData['Reaction ID']==i].values[0])
fbaResult = cbmpy.CBCPLEX.cplx_analyzeModel(model)
fva = cbmpy.CBCPLEX.cplx_FluxVariabilityAnalysis(model, pre_opt=True)
cbmpy.CBWrite.writeFVAdata(fva[0], fva[1], 'glcTitration_%s_%.2f.csv' %(os.path.split(growthMediumLoc)[1].replace('.csv', ''), j))
| 2.53125 | 3 |
HSPpipe/callall.py | fedhere/getlucky | 0 | 12794234 | import sys,os,time
import pyfits as PF
sys.path.append("../LIHSPcommon")
from myutils import mygetenv,readconfig, mymkdir, mjd
speedyout=mygetenv('SPEEDYOUT')
def readconfig(configfile):
f = open(configfile, 'r')
config_string = f.read()
parameters = eval(config_string)
return parameters
#unspool
def unspool(par,outpath):
from myutils import mymkdir,mygetenv
from unspool import unspoolit
if mymkdir(outpath)!= 0:
sys.exit()
if mymkdir(outpath+'/unspooled')!= 0:
sys.exit()
# strg = 'mkdir %s'%outpath
# os.system(strg)
heredir=mygetenv('SPEEDYOUT')+'/darks/'
if mymkdir(heredir)!= 0:
sys.exit()
dodarks = [par['dodark']]
if (isinstance(par['spool'],types.StringTypes)):
print "only 1 file to unspool"
else:
for i in range(1,len(par['spool'])):
if dodarks[0] == 3 or dodarks[0] == 0:
dodarks.append(dodarks[0])
else:
dodarks.append(2)
inpath=par['impath']+'/'+par['imdir']+'/'
print inpath
print par['spool'], dodarks
heredir=speedyout+'/darks/'
if mymkdir(heredir)!= 0:
sys.exit()
if (isinstance(par['spool'],types.StringTypes)):
nameroot=par['spool']
if nameroot.endswith('.fits'):
nameroot = nameroot[:-5]
fname = '%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot)
if os.path.isfile(fname):
ret = unspoolit(inpath, nameroot+'.fits', inpath,par['dark'],'avg', dodarks[0], heredir,outpath,0)
if ret !=1:
print "\n\n\n!!!!!!!!!!!!!!!PANIC: unspooling failed. exiting!!!!!!!!!!!\n\n\n"
sys.exit(0)
else:
print 'no spool %s to be found'%fname
sys.exit(0)
else :
for i,img in enumerate(par['spool']):
if img.endswith('.fits'):
img = img[:-5]
fname = '%s/%s/%s.fits'%(par['impath'],par['imdir'], img)
if os.path.isfile(fname):
ret = unspoolit(inpath, img+'.fits', inpath,par['dark'],'avg', dodarks[i], heredir,outpath)
if ret !=1:
print "\n\n\n!!!!!!!!!!!!!!!PANIC: unspooling failed. exiting!!!!!!!!!!!\n\n\n"
sys.exit(0)
else:
print 'no spool %s to be found'%fname
sys.exit(0)
################### aperture photometry
def myapphot(par, cosmic):
from myutils import mymkdir,mygetenv
from myapphot import *
SPEEDYOUT=mygetenv('SPEEDYOUT')
if (cosmic):
if (isinstance(par['spool'],types.StringTypes)):
nameroot=par['spool']
if nameroot.endswith('.fits'):
nameroot = nameroot[:-5]
inpath = '%s/%s//clean/'%(SPEEDYOUT,nameroot)
else:
nameroot=par['spool'][0]
if nameroot.endswith('.fits'):
nameroot = nameroot[:-5]
inpath = '%s/%s_all/clean/'%(SPEEDYOUT,nameroot)
else:
if (isinstance(par['spool'],types.StringTypes)):
nameroot=par['spool']
if nameroot.endswith('.fits'):
nameroot = nameroot[:-5]
inpath = '%s/%s/unspooled/'%(SPEEDYOUT,nameroot)
else:
nameroot=par['spool'][0]
if nameroot.endswith('.fits'):
nameroot = nameroot[:-5]
inpath = '%s/%s_all/unspooled/'%(SPEEDYOUT,nameroot)
ret =myphot(inpath, par['coords'], int(par['last']), par['ap'], par['centroid'], nameroot, par['target'])
if ret !=1:
print "\n\n\n!!!!!!!!!!!!!!!PANIC: photometry failed. exiting!!!!!!!!!!!\n\n\n"
sys.exit(0)
######################################################################################
################### cosmics and sextractor
def runsex(par,cosmic):
if (cosmic == 1):
strg = 'python runcosmic.py /science/fbianco/HSPdata/unspooled/%s %s_ 0 %d'%(par['spool'],par['spool'],par['last'])
os.system(strg)
strg='for i in /science/fbianco/HSPdata/%s/clean/unspooled/*fits ; do python runsextractor.py $i; done'%par['spool']
os.system(strg)
if (cosmic == 0):
strg='for i in /science/fbianco/HSPdata/%s/unspooled/*fits ; do python runsextractor.py $i; done'%par['spool']
print strg
os.system(strg)
#photometry
def photometry(par, cosmic):
if (cosmic):
strg = 'python phot.py /science/fbianco/HSPdata/%s/unspooled/clean/ %s %d %s'%(par['spool'],par['coordat'], par['last'], par['spool'])
os.system(strg)
else:
strg = 'python phot.py /science/fbianco/HSPdata/%s/unspooled/ %s %d %s'%(par['spool'],par['coordat'], par['last'], par['spool'])
os.system(strg)
######################################################################################
from myutils import mymkdir,mygetenv
import types
par = readconfig(sys.argv[1])
print par
SPEEDYOUT=mygetenv('SPEEDYOUT')
if (isinstance(par['spool'],types.StringTypes)):
nameroot=par['spool']
if nameroot.endswith('.fits'):
nameroot = nameroot[:-5]
outpath = '%s/%s/'%(SPEEDYOUT,nameroot)
else :
nameroot=par['spool'][0]
if nameroot.endswith('.fits'):
nameroot = nameroot[:-5]
outpath = '%s/%s_all/'%(SPEEDYOUT,nameroot)
tmp = '%s/%s/%s.fits'%(par['impath'],par['imdir'], nameroot)
image=PF.open(tmp)
header=image[0].header
print "last image in spool: ",par['last']
if par['last'] == 0 or par['last'] >image[0].header['NAXIS3']:
par['last']=int(image[0].header['NAXIS3'])
print "last image in spool: ",par['last']
if par['unspool'].startswith('y'):
print "\n\n\nUNSPOOLING\n\n\n"
unspool(par, outpath)
cosmic = 0
if par['cosmic'].startswith('y'):
print "\n\n\nremoving cosmics...\n\n\n"
cosmic = 1
if par['sextract'].startswith('y'):
print "\n\n\nextracting (sex)...\n\n\n"
runsex(par, cosmic)
if par['phot'].startswith('y'):
print "\n\n\nrunning iraf photometry...\n\n\n"
photometry(par, cosmic)
if par['createlc'].startswith('y'):
print "\n\n\ncreating lcvs...\n\n\n"
if(cosmic):
strg = 'python extractlc.py /science/fbianco/HSPdata/%s/unspooled/clean/ %s %s %d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap'])
os.system(strg)
else:
strg = 'python extractlc.py /science/fbianco/HSPdata/%s/unspooled %s %s %d %d'%(nameroot,par['coordat'],nameroot,par['last'],par['ap'])
os.system(strg)
if par['myapphot'].startswith('y'):
print "\n\n\nrunning my aperture photometry...\n\n\n"
myapphot(par, cosmic)
| 2.109375 | 2 |
maskrcnn/tflite_convert.py | hqbao/dlp_tf | 0 | 12794235 | <filename>maskrcnn/tflite_convert.py
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
import tensorflow as tf
from pycocotools.coco import COCO
from matplotlib.patches import Rectangle
from models import build_inference_maskrcnn_non_fpn
from utils import genanchors, box2frame
from settings import settings
params = settings('non-fpn-inference')
asizes = params['asizes']
ishape = params['ishape']
ssize = params['ssize']
max_num_of_rois = params['max_num_of_rois']
unified_roi_size = params['unified_roi_size']
rpn_head_dim = params['rpn_head_dim']
fc_denses = params['fc_denses']
block_settings = params['resnet']
nsm_iou_threshold = params['nsm_iou_threshold']
nsm_score_threshold = params['nsm_score_threshold']
classes = params['classes']
output_path = params['output_path']
abox4d = genanchors(isize=ishape[:2], ssize=ssize, asizes=asizes)
anchor_4dtensor = tf.constant(value=abox4d, dtype='float32')
rpn_model, detection_model = build_inference_maskrcnn_non_fpn(
ishape=ishape,
anchor_4dtensor=anchor_4dtensor,
classes=classes,
max_num_of_rois=max_num_of_rois,
nsm_iou_threshold=nsm_iou_threshold,
nsm_score_threshold=nsm_score_threshold,
unified_roi_size=unified_roi_size,
rpn_head_dim=rpn_head_dim,
fc_denses=fc_denses,
block_settings=block_settings,
base_block_trainable=False)
rpn_model.load_weights('{}/rpn_weights.h5'.format(output_path), by_name=True)
# detection_model.load_weights('{}/detection_weights.h5'.format(output_path), by_name=True)
rpn_model.save('{}/rpn_model'.format(output_path))
# detection_model.save('{}/detection_model'.format(output_path))
converter = tf.lite.TFLiteConverter.from_saved_model('{}/rpn_model'.format(output_path))
converter.experimental_new_converter = True
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
tflite_model = converter.convert()
open('{}/rpn_model.tflite'.format(output_path), 'wb').write(tflite_model)
# converter = tf.lite.TFLiteConverter.from_saved_model('{}/detection_model'.format(output_path))
# converter.experimental_new_converter = True
# converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
# tflite_model = converter.convert()
# open('{}/detection_model.tflite'.format(output_path), 'wb').write(tflite_model)
| 1.953125 | 2 |
lib/augment3D/elastic_deform.py | utayao/MedicalZooPytorch | 1 | 12794236 | import numpy as np
from scipy.interpolate import RegularGridInterpolator
from scipy.ndimage.filters import gaussian_filter
"""
Elastic deformation of images as described in
<NAME>, "Best Practices for
Convolutional Neural Networks applied to Visual
Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
Modified from:
https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a
https://github.com/fcalvet/image_tools/blob/master/image_augmentation.py#L62
Modified to take 3D inputs
Deforms both the image and corresponding label file
Label volumes are interpolated via nearest neighbour
"""
def elastic_transform_3d(img_numpy, labels=None, alpha=1, sigma=20, c_val=0.0, method="linear"):
"""
:param img_numpy: 3D medical image modality
:param labels: 3D medical image labels
:param alpha: scaling factor of gaussian filter
:param sigma: standard deviation of random gaussian filter
:param c_val: fill value
:param method: interpolation method. supported methods : ("linear", "nearest")
:return: deformed image and/or label
"""
assert img_numpy.ndim == 3 , 'Wrong img shape, provide 3D img'
if labels is not None:
assert img_numpy.shape == labels.shape , "Shapes of img and label do not much!"
shape = img_numpy.shape
# Define 3D coordinate system
coords = np.arange(shape[0]), np.arange(shape[1]), np.arange(shape[2])
# Interpolated img
im_intrps = RegularGridInterpolator(coords, img_numpy,
method=method,
bounds_error=False,
fill_value=c_val)
# Get random elastic deformations
dx = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma,
mode="constant", cval=0.) * alpha
dy = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma,
mode="constant", cval=0.) * alpha
dz = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma,
mode="constant", cval=0.) * alpha
# Define sample points
x, y, z = np.mgrid[0:shape[0], 0:shape[1], 0:shape[2]]
indices = np.reshape(x + dx, (-1, 1)), \
np.reshape(y + dy, (-1, 1)), \
np.reshape(z + dz, (-1, 1))
# Interpolate 3D image image
img_numpy = im_intrps(indices).reshape(shape)
# Interpolate labels
if labels is not None:
lab_intrp = RegularGridInterpolator(coords, labels,
method="nearest",
bounds_error=False,
fill_value=0)
labels = lab_intrp(indices).reshape(shape).astype(labels.dtype)
return img_numpy, labels
return img_numpy
| 2.8125 | 3 |
tensorflow_graphics/projects/nasa/lib/utils.py | Liang813/graphics | 2,759 | 12794237 | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General helper functions."""
from os import path
import numpy as np
from skimage import measure
import tensorflow.compat.v1 as tf
from tensorflow_graphics.projects.cvxnet.lib.libmise import mise
from tensorflow_graphics.projects.nasa.lib import datasets
from tensorflow_graphics.projects.nasa.lib import models
import tensorflow_probability as tfp
from tqdm import trange
import trimesh
tf.disable_eager_execution()
tfd = tfp.distributions
def define_flags():
"""Define command line flags."""
flags = tf.app.flags
# Dataset Parameters
flags.DEFINE_enum("dataset", "amass",
list(k for k in datasets.dataset_dict.keys()),
"Name of the dataset.")
flags.DEFINE_string("data_dir", None, "Directory to load data from.")
flags.mark_flag_as_required("data_dir")
flags.DEFINE_integer("sample_bbox", 1024, "Number of bbox samples.")
flags.DEFINE_integer("sample_surf", 1024, "Number of surface samples.")
flags.DEFINE_integer("batch_size", 12, "Batch size.")
flags.DEFINE_integer("motion", 0, "Index of the motion for evaluation.")
flags.DEFINE_integer("subject", 0, "Index of the subject for training.")
# Model Parameters
flags.DEFINE_enum("model", "nasa", list(k for k in models.model_dict.keys()),
"Name of the model.")
flags.DEFINE_integer("n_parts", 24, "Number of parts.")
flags.DEFINE_integer("total_dim", 960,
"Dimension of the latent vector (in total).")
flags.DEFINE_bool("shared_decoder", False, "Whether to use shared decoder.")
flags.DEFINE_float("soft_blend", 5., "The constant to blend parts.")
flags.DEFINE_bool("projection", True,
"Whether to use projected shape features.")
flags.DEFINE_float("level_set", 0.5, "The value of the level_set.")
flags.DEFINE_integer("n_dims", 3, "The dimension of the query points.")
# Training Parameters
flags.DEFINE_float("lr", 1e-4, "Learning rate")
flags.DEFINE_string("train_dir", None, "Training directory.")
flags.mark_flag_as_required("train_dir")
flags.DEFINE_integer("max_steps", 200000, "Number of optimization steps.")
flags.DEFINE_integer("save_every", 5000,
"Number of steps to save checkpoint.")
flags.DEFINE_integer("summary_every", 500,
"Number of steps to save checkpoint.")
flags.DEFINE_float("label_w", 0.5, "Weight of labed vertices loss.")
flags.DEFINE_float("minimal_w", 0.05, "Weight of minimal loss.")
flags.DEFINE_bool("use_vert", True,
"Whether to use vertices on the mesh for training.")
flags.DEFINE_bool("use_joint", True,
"Whether to use joint-based transformation.")
flags.DEFINE_integer("sample_vert", 2048, "Number of vertex samples.")
# Evalulation Parameters
flags.DEFINE_bool("gen_mesh_only", False, "Whether to generate meshes only.")
# Tracking Parameters
flags.DEFINE_float("theta_lr", 5e-4, "Learning rate")
flags.DEFINE_integer("max_steps_per_frame", 1792,
"Number of optimization steps for tracking each frame.")
flags.DEFINE_enum("gradient_type", "reparam", ["vanilla", "reparam"],
"Type of gradient to use in theta optimization.")
flags.DEFINE_integer("sample_track_vert", 1024,
"Number of vertex samples for tracking each frame.")
flags.DEFINE_integer("n_noisy_samples", 8,
"Number of noisy samples per vertex")
flags.DEFINE_float("bandwidth", 1e-2, "Bandwidth of the gaussian noises.")
flags.DEFINE_bool(
"left_trans", False,
"Whether to use left side transformation (True) or right side (False).")
flags.DEFINE_string("joint_data", None, "Path to load joint data.")
flags.DEFINE_float("glue_w", 20., "Weight of length constraint loss.")
flags.DEFINE_float("trans_range", 1., "The range of allowed translations.")
def gen_mesh(sess,
feed_dict,
latent_holder,
point_holder,
latent,
occ,
batch_val,
hparams,
idx=0):
"""Generating meshes given a trained NASA model."""
scale = 1.1 # Scale of the padded bbox regarding the tight one.
level_set = hparams.level_set
latent_val = sess.run(latent, feed_dict)
mesh_extractor = mise.MISE(32, 3, level_set)
points = mesh_extractor.query()
gt_verts = batch_val["vert"].reshape([-1, 3])
gt_bbox = np.stack([gt_verts.min(axis=0), gt_verts.max(axis=0)], axis=0)
gt_center = (gt_bbox[0] + gt_bbox[1]) * 0.5
gt_scale = (gt_bbox[1] - gt_bbox[0]).max()
while points.shape[0] != 0:
orig_points = points
points = points.astype(np.float32)
points = (np.expand_dims(points, axis=0) / mesh_extractor.resolution -
0.5) * scale
points = points * gt_scale + gt_center
n_points = points.shape[1]
values = []
for i in range(0, n_points,
100000): # Add this to prevent OOM due to points overload.
feed_dict[latent_holder] = latent_val
feed_dict[point_holder] = np.expand_dims(points[:, i:i + 100000], axis=1)
value = sess.run(occ[:, idx], feed_dict)
values.append(value)
values = np.concatenate(values, axis=1)
values = values[0, :, 0].astype(np.float64)
mesh_extractor.update(orig_points, values)
points = mesh_extractor.query()
value_grid = mesh_extractor.to_dense()
try:
value_grid = np.pad(value_grid, 1, "constant", constant_values=-1e6)
verts, faces, normals, unused_var = measure.marching_cubes_lewiner(
value_grid, min(level_set, value_grid.max()))
del normals
verts -= 1
verts /= np.array([
value_grid.shape[0] - 3, value_grid.shape[1] - 3,
value_grid.shape[2] - 3
],
dtype=np.float32)
verts = scale * (verts - 0.5)
verts = verts * gt_scale + gt_center
faces = np.stack([faces[..., 1], faces[..., 0], faces[..., 2]], axis=-1)
mesh = trimesh.Trimesh(vertices=verts, faces=faces)
return mesh
except: # pylint: disable=bare-except
return None
def save_mesh(sess,
feed_dict,
latent_holder,
point_holder,
latent,
occ,
batch_val,
hparams,
pth="meshes"):
"""Generate and save meshes to disk given a trained NASA model."""
name = batch_val["name"][0].decode("utf-8")
subject, motion, frame = amass_name_helper(name)
pth = path.join(hparams.train_dir, pth, frame)
if not tf.io.gfile.isdir(pth):
tf.io.gfile.makedirs(pth)
start = hparams.n_parts
for i in range(start, hparams.n_parts + 1):
mesh_model = gen_mesh(
sess,
feed_dict,
latent_holder,
point_holder,
latent,
occ,
batch_val,
hparams,
idx=i)
mesh_name = "full_pred.obj"
if mesh_model is not None:
with tf.io.gfile.GFile(path.join(pth, mesh_name), "w") as fout:
mesh_model.export(fout, file_type="obj")
return subject, motion, frame, mesh_model
def save_pointcloud(data, hparams, pth="pointcloud"):
"""Save pointcloud to disk."""
name = data["name"][0].decode("utf-8")
unused_subject, unused_motion, frame = amass_name_helper(name)
pth = path.join(hparams.train_dir, pth, frame)
if not tf.io.gfile.isdir(pth):
tf.io.gfile.makedirs(pth)
mesh_name = "pointcloud.obj"
with tf.io.gfile.GFile(path.join(pth, mesh_name), "w") as fout:
pointcloud = data["vert"].reshape([-1, 3])
for v in pointcloud:
fout.write("v {0} {1} {2}\n".format(*v.tolist()))
def amass_name_helper(name):
name, frame = name.split("-")
subject = name[:5]
motion = name[6:]
return subject, motion, frame
def make_summary_feed_dict(
iou_hook,
iou,
best_hook,
best_iou,
):
feed_dict = {}
feed_dict[iou_hook] = iou
feed_dict[best_hook] = best_iou
return feed_dict
def parse_global_step(ckpt):
basename = path.basename(ckpt)
return int(basename.split("-")[-1])
def compute_iou(sess, feed_dict, latent_holder, point_holder, latent, occ,
point, label, hparams):
"""Compute IoU."""
iou = 0.
eps = 1e-9
latent_val = sess.run(latent, feed_dict)
n_points = point.shape[2]
preds = []
for start in range(0, n_points, 100000):
feed_dict[point_holder] = point[:, :, start:start + 100000]
feed_dict[latent_holder] = latent_val
pred = sess.run(occ, feed_dict)
preds.append(pred)
pred = np.concatenate(preds, axis=2)
pred = (pred >= hparams.level_set).astype(np.float32)
label = (label[:, :1] >= 0.5).astype(np.float32).squeeze(axis=1)
iou += np.sum(pred * label) / np.maximum(np.sum(np.maximum(pred, label)), eps)
return iou
def compute_glue_loss(connect, end_pts, inv_transforms, inv_first_frame_trans,
joints, hparams):
"""Compute the prior term as a glue loss."""
n_dims = hparams.n_dims
# Invert the transformation
r_inv = inv_transforms[..., :n_dims, :n_dims]
t_inv = inv_transforms[..., :n_dims, -1:]
r = tf.transpose(r_inv, [0, 2, 1])
t = -tf.matmul(r, t_inv)
transforms = tf.concat(
[tf.concat([r, t], axis=-1), inv_transforms[..., -1:, :]], axis=-2)
transforms = tf.matmul(transforms, inv_first_frame_trans)
# Compute transformations of father joints and apply it to vectors from frame0
father_transforms = tf.reduce_sum(
tf.expand_dims(transforms, axis=1) *
connect.reshape([hparams.n_parts, hparams.n_parts, 1, 1]),
axis=0)
end_pts_homo = tf.expand_dims(
tf.concat([end_pts, tf.ones_like(end_pts[..., :1])], axis=-1), axis=-1)
end_pts_transformed = tf.matmul(father_transforms, end_pts_homo)
end_pts_transformed = tf.squeeze(end_pts_transformed, axis=-1)[..., :n_dims]
# Compute vectors in current configuration
pred_links = tf.reshape(joints, [hparams.n_parts, n_dims])
# Compute distance between links and transformed vectors
return tf.reduce_sum(tf.square(pred_links - end_pts_transformed))
def vanilla_theta_gradient(model_fn, batch_holder, hparams):
"""A vanilla gradient estimator for the pose, theta."""
latent_holder, latent, occ_eval = model_fn(batch_holder, None, None,
"gen_mesh")
if hparams.sample_vert > 0:
points = batch_holder["point"]
weights = batch_holder["weight"]
n_vert = tf.shape(points)[2]
sample_indices = tf.random.uniform([1, 1, hparams.sample_vert],
minval=0,
maxval=n_vert,
dtype=tf.int32)
points = tf.gather(points, sample_indices, axis=2, batch_dims=2)
weights = tf.gather(weights, sample_indices, axis=2, batch_dims=2)
batch_holder["point"] = points
batch_holder["weight"] = weights
unused_var0, unused_var1, occ = model_fn(batch_holder, None, None, "gen_mesh")
return latent_holder, latent, occ_eval, tf.reduce_mean(
tf.square(occ - hparams.level_set))
def reparam_theta_gradient(model_fn, batch_holder, hparams):
"""A gradient estimaor for the pose, theta, using the reparam trick."""
sigma = hparams.bandwidth
n_samples = hparams.n_noisy_samples
latent_holder, latent, occ_eval = model_fn(batch_holder, None, None,
"gen_mesh")
if hparams.sample_vert > 0:
points = batch_holder["point"]
weights = batch_holder["weight"]
n_vert = tf.shape(points)[2]
sample_indices = tf.random.uniform([1, 1, hparams.sample_vert],
minval=0,
maxval=n_vert,
dtype=tf.int32)
points = tf.gather(points, sample_indices, axis=2, batch_dims=2)
weights = tf.gather(weights, sample_indices, axis=2, batch_dims=2)
batch_holder["point"] = points
batch_holder["weight"] = weights
dist = tfd.Normal(loc=0., scale=sigma)
n_pts = hparams.sample_vert if hparams.sample_vert > 0 else hparams.n_vert
noises = dist.sample((1, hparams.n_parts, n_pts, n_samples, hparams.n_dims))
unused_var0, unused_var1, occ = model_fn(batch_holder, noises, None,
"gen_mesh")
occ = tf.reshape(occ, [1, hparams.n_parts + 1, -1, n_samples, 1])
occ = tf.reduce_mean(occ[:, hparams.n_parts:], axis=3)
return latent_holder, latent, occ_eval, tf.reduce_mean(
tf.square(occ - hparams.level_set))
def optimize_theta(feed_dict, loss, reset_op, train_op, rec_loss, glue_loss,
sess, k, hparams):
"""Optimize the pose, theta, during tracking."""
sess.run(reset_op)
loss_val = 0
glue_val = 0
with trange(hparams.max_steps_per_frame) as t:
for unused_i in t:
loss_val, unused_var, rec_val, glue_val = sess.run(
[loss, train_op, rec_loss, glue_loss], feed_dict)
t.set_description("Frame_{0} {1:.4f}|{2:.4f}".format(
k, rec_val, glue_val))
return loss_val, glue_val
| 1.8125 | 2 |
setup.py | vermakhushboo/sedpy | 16 | 12794238 | <filename>setup.py
from setuptools import setup
# The text of the README file
with open("README.md", 'r') as f:
long_description = f.read()
# This call to setup() does all the work
setup(name="sedpy",
version="1.0.0",
description="Cross-platform stream-line editing tool",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mritunjaysharma394/sedpy",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8"
],
packages=["sedpy"],
include_package_data=True,
entry_points={'console_scripts': ['sedpy = sedpy.__main__:main']})
| 1.671875 | 2 |
dht/main.py | leonkoens/dht | 1 | 12794239 | <filename>dht/main.py
import argparse
import asyncio
import importlib
import logging
import random
import settings
import string
from collections import deque
from dht.node import SelfNode
from dht.protocol import DHTServerProtocol, DHTClientProtocol
from dht.routing import BucketTree
from dht.utils import hash_string
class DHT:
def __init__(self, listen_port, initial_node=None):
self.initial_node = initial_node
self.listen_port = listen_port
logging.info("Listening on {}".format(self.listen_port))
self.value_store = self.create_value_store()
self.self_key = self.create_self_key()
self.self_node = self.create_self_node()
self.bucket_tree = self.create_bucket_tree()
self.loop = asyncio.get_event_loop()
self.create_server()
if self.initial_node is not None:
self.connect_to_initial_node()
self.loop.create_task(self.refresh_nodes(key=self.self_key))
self.loop.create_task(self.connect_to_unconnected_nodes())
def create_value_store(self):
""" Create a Store to store values in. """
module = importlib.import_module('value_stores.' + settings.VALUE_STORE)
value_store_class = getattr(module, 'MemoryStore')
return value_store_class()
def create_self_key(self):
""" Create a key with which we will identify ourselves. """
key = hash_string(
''.join([random.choice(string.ascii_letters) for _ in range(160)]))
logging.info("Our key is {}".format(key))
return key
def create_self_node(self):
""" Create a Node to represent ourselves. """
self_node = SelfNode(key=self.self_key)
return self_node
def create_bucket_tree(self):
""" Create the BucketTree to store Nodes. """
tree = BucketTree(self.self_node)
return tree
def create_server(self):
""" Create the server to listen for incoming connections. """
listen = self.loop.create_server(
lambda: DHTServerProtocol(
self.self_key, self.bucket_tree, self.value_store, self.listen_port),
'0.0.0.0',
self.listen_port
)
self.loop.run_until_complete(listen)
def connect_to_initial_node(self):
""" Connect to the initial node if one is known. """
logging.info("Connecting to initial node: {}".format(self.initial_node))
connect = self.loop.create_connection(
lambda: DHTClientProtocol(
self.self_key, self.bucket_tree, self.value_store, self.listen_port),
self.initial_node[0],
int(self.initial_node[1])
)
self.loop.run_until_complete(connect)
async def refresh_nodes(self, key=None, wait=None):
to_check = deque([key])
while True:
if wait is None:
wait = 3
# TODO maximum wait in the settings
wait = min(wait * 2, 30)
logging.debug("refresh_node sleeping {:d} seconds".format(wait))
await asyncio.sleep(wait)
try:
while True:
search_key = to_check.pop()
nodes = self.bucket_tree.find_nodes(search_key)
for node in nodes:
if node == self.self_node or node.protocol is None:
continue
node.protocol.find_node(search_key)
except IndexError:
for bucket_node in self.bucket_tree.get_leaf_bucket_nodes(include_self=False):
bucket_node_range = bucket_node.get_range()
key = hex(random.randrange(bucket_node_range[0], bucket_node_range[1]))[2:]
to_check.appendleft(key)
async def connect_to_unconnected_nodes(self):
while True:
await asyncio.sleep(1)
for node in self.bucket_tree.get_unconnected_nodes():
logging.debug(node)
connect = self.loop.create_connection(
lambda: DHTClientProtocol(
self.self_key, self.bucket_tree, self.value_store, self.listen_port),
node.address,
int(node.port)
)
_, protocol = await connect
node.protocol = protocol
protocol.node = node
def run(self):
""" Run the loop to start everything. """
try:
self.loop.run_forever()
except KeyboardInterrupt:
pass
self.loop.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='A python DHT')
parser.add_argument(
'--initial-node', '-n', help='The initial node to connect to (1.2.3.4:5678).')
parser.add_argument(
'--listen-port', '-p', default=9999, help='The port to listen on.')
parser.add_argument('-v', action='store_true', dest='verbose_info', help='Verbose')
parser.add_argument('-vv', action='store_true', dest='verbose_debug', help='More verbose')
args = parser.parse_args()
if args.verbose_debug:
logging.basicConfig(level=logging.DEBUG)
logging.debug('Setting logging level to debug')
if args.verbose_info:
logging.basicConfig(level=logging.INFO)
logging.info('Setting logging level to info')
if args.initial_node is not None:
initial_node = tuple(args.initial_node.split(":"))
else:
initial_node = None
dht = DHT(args.listen_port, initial_node)
dht.run()
| 2.3125 | 2 |
my_files_utils.py | luisst/Audio_Performance_Evaluation | 0 | 12794240 | <reponame>luisst/Audio_Performance_Evaluation<filename>my_files_utils.py<gh_stars>0
#!/usr/bin/env python3
"""
Utilities functions to check and find files from UNIX-based systems
This should be syncronized for all of my repos
"""
import glob
import os
import sys
import shutil
import re
import numpy as np
import pandas as pd
from os.path import join as pj
from os.path import exists
import collections
import datetime
## List of the functions:
# - check_empty_folder .-
### # # # Check processes # # #
def check_empty_folder(current_folder_path):
""" Return true if folder is NOT empty """
if (len(os.listdir(current_folder_path)) != 0):
return True
else:
print("Folder {} is empty".format(current_folder_path))
return False
def check_ending_format(current_file_path, ending_format_substring):
if current_file_path[-3:] != ending_format_substring:
print("Error in the format, must end with {}!".format(ending_format_substring))
sys.exit()
def check_same_length(list1, list2):
if len(list1) != len(list2):
print("Error, your list1 and list2 have different lengths")
sys.exit()
def check_csv_exists(csv_path):
if exists(csv_path):
print("CSV file already exists, do you want to overwrite? (y)")
if input().lower() != 'y':
print("File not modified")
sys.exit()
### # # # Store TXT and CSV # # #
def log_message(msg, log_file, mode, both=True):
'''Function that prints and/or adds to log'''
#Always log file
with open(log_file, mode) as f:
f.write(msg)
#If {both} is true, print to terminal as well
if both:
print(msg, end='')
def write_my_csv(*args, **kwargs):
"""
Function to write csv files.
args:
- Columns for the csv (matched to the names)
kwargs:
- cols: List of names for columns (matched to args)
- path: output_path for the csv
"""
defaultKwargs = { 'time_format': True, 'txt_flag': False }
kwargs = { **defaultKwargs, **kwargs }
# my_df = pd.DataFrame(index=False)
my_df = pd.DataFrame()
csv_path = kwargs['path']
columns_values = kwargs['cols']
# check if csv file exists
check_csv_exists(csv_path)
if len(args) > 2:
check_same_length(args[0], args[1])
elif len(args) > 3:
check_same_length(args[1], args[2])
idx = 0
for current_list in args:
my_df[columns_values[idx]] = current_list
idx = idx + 1
today_date = '_' + str(datetime.date.today())
datetime_object = datetime.datetime.now()
time_f = "-{:d}_{:02d}".format(datetime_object.hour, datetime_object.minute)
if kwargs['time_format']:
if kwargs['txt_flag']:
full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + today_date + time_f + '.txt'
my_df.to_csv(full_output_csv_path, header=False, sep='\t', index=False)
else:
full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + today_date + time_f + '.csv'
my_df.to_csv(full_output_csv_path, index=False)
else:
if kwargs['txt_flag']:
full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + '.txt'
my_df.to_csv(full_output_csv_path, header=False, sep='\t', index=False)
else:
full_output_csv_path = '/'.join(csv_path.split('/')[0:-1]) + '/' + csv_path.split('/')[-1][0:-4] + '.csv'
my_df.to_csv(full_output_csv_path, index=False)
### # # # Files processes # # #
def locate_single_txt(src_dir, obj_format = 'txt'):
"""
Input:
- Folder path
Output:
- Path of the only transcript found in folder
"""
all_texts = glob.glob("{}/*.{}".format(src_dir, obj_format))
if len(all_texts) != 1:
print("Too many transcripts! Please use only 1")
else:
input_transcript_path = all_texts[0]
print(input_transcript_path)
print("\n")
return input_transcript_path
def get_pathsList_from_transcript(transcript_path, csv_flag=False):
"""
Given a transcript in txt (or csv), it returns a list of the paths of all the audios
"""
if csv_flag == False:
check_ending_format(transcript_path, 'txt')
transcript_data = pd.read_csv(transcript_path, sep="\t", header=None, index_col=False)
path_list = transcript_data[0].tolist()
return path_list
def get_list_of_GT(folder_path, csv_flag=False, single_col=False):
transcript_path = locate_single_txt(folder_path)
if csv_flag == False:
check_ending_format(transcript_path, 'txt')
transcript_data = pd.read_csv(transcript_path, sep="\t", header=None, index_col=False)
if single_col:
GT_list = transcript_data[0].tolist()
else:
GT_list = transcript_data[1].tolist()
return GT_list
def get_list_of_audios(folder_path, audio_extension = 'wav',
confirm_with_transcript = True,
verbose = False):
"""
Confirm_with_transcript requires: file path in first column
To-do: check if in csv file the header_none will throw count the header as the first row.
To-do: Add a function to only compare the filename, not the entire path
"""
all_audios = sorted(glob.glob("{}/*.{}".format(folder_path, audio_extension)))
### To double check with transcript paths
if confirm_with_transcript:
transcript_path = locate_single_txt(folder_path)
# Obtain list of paths from transcript
transcript_lists = sorted(get_pathsList_from_transcript(transcript_path))
# Compare 2 lists, full path or only names
if collections.Counter(all_audios) == collections.Counter(transcript_lists):
if verbose:
print("Transcript and audios are consistent")
else:
print("Transcript paths and names of files does not match")
sys.exit()
return all_audios
| 2.6875 | 3 |
MNIST/mnist_keras.py | im0qianqian/ML_demo | 2 | 12794241 | import keras
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.datasets import mnist
x_train = None
y_train = None
x_test = None
y_test = None
def init():
global x_train, y_train, x_test, y_test
(x_train_tmp, y_train_tmp), (x_test_tmp, y_test_tmp) = mnist.load_data()
x_train = x_train_tmp.reshape(-1, 784)
x_test = x_test_tmp.reshape(-1, 784)
train_size = x_train.shape[0]
test_size = x_test.shape[0]
y_train = np.zeros((train_size, 10))
for i in range(train_size):
y_train[i][y_train_tmp[i]] = 1
y_test = np.zeros((test_size, 10))
for i in range(test_size):
y_test[i][y_test_tmp[i]] = 1
pass
if __name__ == '__main__':
import time
init()
model = Sequential()
model.add(Dense(units=1000, activation='sigmoid', input_dim=784))
model.add(Dense(units=500, activation='sigmoid'))
model.add(Dense(units=10, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
start_time = time.time()
model.fit(x_train, y_train, epochs=10, batch_size=1000)
loss_and_metrics = model.evaluate(x_test, y_test, batch_size=1000)
print(loss_and_metrics)
print('Total Time: ', (time.time() - start_time))
| 3.09375 | 3 |
examples/exception.py | pawelmhm/scrapy-playwright | 155 | 12794242 | import logging
from pathlib import Path
from scrapy import Spider, Request
from scrapy.crawler import CrawlerProcess
from scrapy_playwright.page import PageCoroutine
class HandleTimeoutMiddleware:
def process_exception(self, request, exception, spider):
logging.info("Caught exception: %s", exception.__class__)
return Request(
url="https://httpbin.org/get",
meta={
"playwright": True,
"playwright_page_coroutines": [
PageCoroutine(
"screenshot", path=Path(__file__).parent / "recovered.png", full_page=True
),
],
},
)
class HandleExceptionSpider(Spider):
"""
Handle exceptions in the Playwright downloader, such as TimeoutError
"""
name = "awesome"
custom_settings = {
"PLAYWRIGHT_DEFAULT_NAVIGATION_TIMEOUT": 1000,
"DOWNLOADER_MIDDLEWARES": {
HandleTimeoutMiddleware: 100,
},
}
def start_requests(self):
yield Request(
url="https://httpbin.org/delay/300",
meta={"playwright": True},
)
def parse(self, response):
yield {"url": response.url}
if __name__ == "__main__":
process = CrawlerProcess(
settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
"DOWNLOAD_HANDLERS": {
"https": "scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler",
# "http": "scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler",
},
"RETRY_TIMES": 0,
}
)
process.crawl(HandleExceptionSpider)
process.start()
| 2.515625 | 3 |
Labs/Lab-3.1/fan.py | Josverl/MicroPython-Bootcamp | 4 | 12794243 | from machine import Pin, PWM
# Initialization
pwmFan = PWM(Pin(21), duty=0)
reverseFan = Pin(22, Pin.OUT)
# Turn Fan forward 70% speed
reverseFan.value(0)
pwmFan.duty(70)
# Decrease speed
pwmFan.duty(50)
# Decrease speed further (it might stop)
pwmFan.duty(30)
# Turn Fan backwards 70% speed
reverseFan.value(1)
pwmFan.duty(30)
# Decrease speed
pwmFan.duty(50)
# Decrease speed further (it might stop)
pwmFan.duty(70)
# Clean up
reverseFan(0)
pwmFan.deinit() | 3.265625 | 3 |
submissions/aising2019/b.py | m-star18/atcoder | 1 | 12794244 | import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
n = int(readline())
a, b = map(int, readline().split())
p = list(map(int, readline().split()))
memo = [0, 0, 0]
for check in p:
if check <= a:
memo[0] += 1
elif a < check <= b:
memo[1] += 1
else:
memo[2] += 1
print(min(memo))
| 2.6875 | 3 |
flog/user/__init__.py | mutalisk999/Flog | 1 | 12794245 | """
MIT License
Copyright (c) 2020 <NAME>
"""
from flask import Blueprint
user_bp = Blueprint("user", __name__)
from . import views
| 1.4375 | 1 |
examples/icml_2018_experiments/scripts/generate_all_cave_reports.py | kaddynator/BOAH | 58 | 12794246 | <gh_stars>10-100
import os
from cave.cavefacade import CAVE
def analyze_all():
for dirpath, dirnames, filenames in os.walk('../opt_results/'):
if not dirnames:
print(dirpath)
cave = CAVE(folders=[dirpath],
output_dir=os.path.join("../CAVE_reports", dirpath[15:]), # output for debug/images/etc
ta_exec_dir=["."], # Only important for SMAC-results
file_format='BOHB' if os.path.exists(os.path.join(dirpath, 'configs.json')) else 'SMAC3',
#verbose_level='DEV_DEBUG',
verbose_level='OFF',
show_jupyter=False,
)
cave.analyze()
if __name__ == '__main__':
analyze_all()
| 2.578125 | 3 |
Pacote download/Exercicios/tabuada com while.py | Henrique-GM/Exercicios_de_Python | 0 | 12794247 | while True:
numero = int(input('Digite um número: '))
i = 0
while i <= 10:
print(f'{numero} x {i} = {numero * i}')
i += 1
resposta = str(input('Deseja continuar[S/N]: ')).strip().upper()[0]
if resposta == 'N':
break | 3.890625 | 4 |
tests.py | cooperlees/69 | 2 | 12794248 | #!/usr/bin/env python3
import asyncio
import unittest
import sixtynine
class TestSixtynine(unittest.TestCase):
def setUp(self):
self.loop = asyncio.get_event_loop()
def tearDown(self):
self.loop.close()
def test_mouthful(self):
self.assertEqual(self.loop.run_until_complete(sixtynine.mouthful()), 69)
| 2.625 | 3 |
GoingMerry/__init__.py | Luffin/ThousandSunny | 0 | 12794249 | <gh_stars>0
import os
from tornado import web
from settings import *
from handlers import handlers
theme_path = os.path.join(os.path.dirname(__file__), 'themes', theme_name)
class Application(web.Application):
def __init__(self):
settings = dict(
static_path = os.path.join(theme_path, 'static'),
template_path = os.path.join(theme_path, 'template'),
debug = True,
title = title,
email = email,
login_url = '/onepeice',
cookie_secret = '<KEY>
)
super(Application, self).__init__(handlers, **settings) | 1.9375 | 2 |
number_reader.py | CoffeeCodeRpt/python-crash-course | 0 | 12794250 | <reponame>CoffeeCodeRpt/python-crash-course<filename>number_reader.py
from fileinput import filename
import json
filename = 'chapter_10/numbers.json'
with open(filename) as f_object:
numbers = json.load(f_object)
print(numbers) | 3.3125 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.