repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
scmarquez/Hause-Price-Kaggle-Competition | analisis_de_variables.py | 5fe32fed87a7bf2c6e5f41761ea1c4dd00761f21 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 29 16:40:53 2017
@author: Sergio
"""
#Analisis de variables
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import ensemble, tree, linear_model
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.utils import shuffle
import warnings
#Ignorar los warnings
warnings.filterwarnings('ignore')
#Lectura de los datos
#En train se guandan los datos con los que se entrenará al modelo
train = pd.read_csv('train.csv')
#En test se guarda el conjunto de datos para el test
test = pd.read_csv('test.csv')
#Primero hay que eliminar las varibles que tengan un número alto de valores perdidos
#El número de valores perdidos de cada conjunto en cada variable
NAs = pd.concat([train.isnull().sum()/1460, test.isnull().sum()/1459], axis=1, keys=['Train', 'Test'])
#print(NAs)
#Eliminar todas las variables que tengan más de un 0.2 de valores perdidos
eliminar = []
nvars = 0
for index, row in NAs.iterrows():
print(index)
print(row['Test'])
if (row['Test'] > 0.2) or (row ['Train'] > 0.2):
eliminar.append(index)
#En la variable eliminar estan los nombres de las variables que deben ser directamente eliminadas
#Dentro de las variables a eliminar encontramos que la variable de Alley NA no indica desconocido, es un posible valor más de los posibles a tomar
#Esa variable debe seguir estando en nuestro conjunto
print(eliminar)
eliminar.remove('Alley')
eliminar.remove('FireplaceQu')#Sucede lo mismo que con Alley
train.drop(eliminar,axis=1, inplace=True)
test.drop(eliminar,axis=1, inplace=True)
"""
Ahora es necesario un análisis más profundo de las variables.
En primer lugar encontramos algunas variables que parecen tener una representación
numérica, como por ejemplo 'MSSubClass' o 'OverallCond'.
Al leer la documentación sobre que información aportan las variables
encontramos que OverallCond aunque sea una variable aparentemente nominal
expresa cosas que son medibles como la calidad, es decir muestra una puntuación entre 1 y 10
"""
#Variables numéricas que deben ser transformadas a string
test['MSSubClass'] = test['MSSubClass'].astype(str)
train['MSSubClass'] = train['MSSubClass'].astype(str)
test['YrSold'] = test['YrSold'].astype(str)
train['YrSold'] = train['YrSold'].astype(str)
#Variables categóricas que deben ser numéricas, ya que expresan puntuación
#El lógico pensar que aumentar la puntuación en algo hace efecto directo en el precio final
ExterQualvalues = {'ExterQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
ExterCondvalues = {'ExterCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
BsmQualvalues = {'BsmtQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
BsmCondvalues = {'BsmtCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1,}}
HeatingQCvalues = {'HeatingQC':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
KitchenQualvalues = {'KitchenQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
FireplaceQuvalues = {'FireplaceQu':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
GarageCondvalues = {'GarageCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
GarageQualvalues = {'GarageQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
PoolQCvalues = {'PoolQC':{'Ex':4,'Gd':3,'TA':2,'Fa':1}}
#Reemplazar los valores en las tablas
train.replace(ExterQualvalues,inplace=True)
train.replace(ExterCondvalues,inplace=True)
train.replace(BsmQualvalues,inplace=True)
train.replace(BsmCondvalues,inplace=True)
train.replace(HeatingQCvalues,inplace=True)
train.replace(KitchenQualvalues,inplace=True)
train.replace(FireplaceQuvalues,inplace=True)
train.replace(GarageCondvalues,inplace=True)
train.replace(GarageQualvalues,inplace=True)
train.replace(PoolQCvalues,inplace=True)
test.replace(ExterQualvalues,inplace=True)
test.replace(ExterCondvalues,inplace=True)
test.replace(BsmQualvalues,inplace=True)
test.replace(BsmCondvalues,inplace=True)
test.replace(HeatingQCvalues,inplace=True)
test.replace(KitchenQualvalues,inplace=True)
test.replace(FireplaceQuvalues,inplace=True)
test.replace(GarageCondvalues,inplace=True)
test.replace(GarageQualvalues,inplace=True)
test.replace(PoolQCvalues,inplace=True)
#Ahora tenemos todas las variables con un tipo de dato 'correcto'
#Cuantas variables de cada tipo tenemos
train_labels = train.pop('SalePrice')
features = pd.concat([train, test], keys=['train', 'test'])
enteras = features.dtypes[features.dtypes == 'int64'].index
flotantes = features.dtypes[features.dtypes == 'float64'].index
nominales = features.dtypes[features.dtypes == 'object'].index
#Se pasa a formato lista para su uso
ent = []
for var in enteras:
ent.append(var)
flot = []
for var in flotantes:
flot.append(var)
nom = []
for var in nominales:
nom.append(var)
numericas = ent+flot
#Ahora es necesario rellenar los valores perdidos de cada variable.
"""En algunas de las variables que han sido transformadas a numéricas
NAN no expresa que el dato no exista, sino que expresa puntuación 0"""
features['BsmtQual'] = features['BsmtQual'].fillna(0)
features['BsmtCond'] = features['BsmtCond'].fillna(0)
features['FireplaceQu'] = features['FireplaceQu'].fillna(0)
features['GarageQual'] = features['GarageQual'].fillna(0)
features['GarageCond'] = features['GarageCond'].fillna(0)
#El resto de variables pueden rellenarse con la media
for var in numericas:
if features[var].isnull().sum() > 0:
features[var] = features[var].fillna(features[var].mean())
#El resto ce variables nomnales se rellenan con el valor más frecuente
for var in nominales:
if features[var].isnull().sum() > 0:
features[var] = features[var].fillna(features[var].mode()[0])
"""Una vez que la tabla de datos está en el formato correcto vamos a estudiar la correlación
de las variables con el precio. Las variables que presenten una correlación baja se descartarán
ya que lo único que van a hacer es hacer que nuestro modelo se impreciso.
Si se imputan demasiadas variables perderemos información valiosa y el modelo volverá a ser impreciso.
Sacando un Heatmap se puede ver la correlación de las variables"""
#train_labels = np.log(train_labels)#La transformación logarítmica de los datos los aproxima a una distribución normal
complete = features.loc['train']#Solo se usan las entradas de entrenamiento
complete = pd.concat([complete,train_labels],axis=1)#Se adjunta la columna de precios de nuevo
correlationPlot = complete.corr()#Mantiene la matriz de correlación en un DataFrame
f,ax = plt.subplots(figsize=(12,9))#Configuración del tamaño de la imagen
sns.heatmap(correlationPlot,vmax=.8,square=True)#Crea el heatmap con los valores de correlación
plt.yticks(rotation=0)#cambia el eje de las etiquetas del gráfico para que se vean bien
plt.xticks(rotation=90)#cambia el eje de las etiquetas del gráfico para que se vean bien
plt.show()#Muestra el gráfico
f.savefig('Heatmap.png')#Guarda el gráfico en un archivo
"""La matriz de correlación muestra la correlación entre dos variables de forma que los valores
más claros muestran que dos variables tienen una correlación alta
El siguiente paso del análisis es buscar que variables muestran una correlación alta entre sí y eliminar
una de esas variables, ya que es información redundante y puede eliminarse. Otra manera de enfocar el problema
es que usar dos variables correlacionadas puede ayudar a sofocar el efecto del ruido en una variable.
En primer lugar es necesario descubrir que variables son las que determinan el precio de la vivienda usando la correlación.
"""
#Crear la lista de variables con correlación alta con el precio de la vivienda
"""Inciso:
calcular la correlación antes de aplicar la escala logaritmica a los datos
tiene sentido, pues el coeficiente de correlación de Pearson no varía con
la escala y el origen. Además solo nos sirve para hacer una aproximación
hacia que variables usar o no en el algoritmo. Después si será necesario
hacer que las variables tengan una distribución normalizada.
"""
HighCorrelation = []
for index, row in correlationPlot.iterrows():
if (row['SalePrice'] >= 0.5) or (row ['SalePrice'] <= -0.5):
HighCorrelation.append(index)
print(row['SalePrice'])
print("total de variables: "+str(len(HighCorrelation)))
print(HighCorrelation)
"""Ahora hay que examniar las variables nominales que se tendrán en cuenta
Para hacer este análisis se va a usar una gráfica que exprese la relación entre
el precio y el valor de la vivienda."""
complete = features.loc['train']
complete = pd.concat([complete,train_labels],axis=1)
malas = [#'MSSubClass',
'LandContour',
'LandSlope',
#'RoofStyle',
#'RoofMatl',
'Exterior2nd',
#'Exterior1st',
'MasVnrType',
'BsmtExposure',
'Functional',
'YrSold']
##################################
#malas = ['Utilities', 'RoofMatl','Heating','Functional']
for var in malas:
data = pd.concat([complete[var],complete['SalePrice']],axis=1)
f,ax = plt.subplots(figsize=(12,9))
fig = sns.boxplot(x=var,y="SalePrice",data=data)
fig.axis(ymin=0,ymax=800000)
plt.xticks(rotation=90)
f.savefig(str(var)+'_Price.png')
"""
aparentemente malas variables:
LandContour
LandScope
RoofStyle
RoofMatl
Exterior2nd
Exterior1st
MasVnrType
BsmtExposure
Functional
YrSold
"""
"""Analisis con PCA"""
| [((20, 0, 20, 33), 'warnings.filterwarnings', 'warnings.filterwarnings', ({(20, 24, 20, 32): '"""ignore"""'}, {}), "('ignore')", False, 'import warnings\n'), ((24, 8, 24, 32), 'pandas.read_csv', 'pd.read_csv', ({(24, 20, 24, 31): '"""train.csv"""'}, {}), "('train.csv')", True, 'import pandas as pd\n'), ((26, 7, 26, 30), 'pandas.read_csv', 'pd.read_csv', ({(26, 19, 26, 29): '"""test.csv"""'}, {}), "('test.csv')", True, 'import pandas as pd\n'), ((112, 11, 112, 59), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((156, 11, 156, 52), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((159, 7, 159, 35), 'matplotlib.pyplot.subplots', 'plt.subplots', (), '', True, 'import matplotlib.pyplot as plt\n'), ((160, 0, 160, 48), 'seaborn.heatmap', 'sns.heatmap', (), '', True, 'import seaborn as sns\n'), ((161, 0, 161, 22), 'matplotlib.pyplot.yticks', 'plt.yticks', (), '', True, 'import matplotlib.pyplot as plt\n'), ((162, 0, 162, 23), 'matplotlib.pyplot.xticks', 'plt.xticks', (), '', True, 'import matplotlib.pyplot as plt\n'), ((163, 0, 163, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((192, 11, 192, 52), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((207, 8, 207, 63), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((208, 8, 208, 36), 'matplotlib.pyplot.subplots', 'plt.subplots', (), '', True, 'import matplotlib.pyplot as plt\n'), ((209, 7, 209, 49), 'seaborn.boxplot', 'sns.boxplot', (), '', True, 'import seaborn as sns\n'), ((211, 1, 211, 24), 'matplotlib.pyplot.xticks', 'plt.xticks', (), '', True, 'import matplotlib.pyplot as plt\n')] |
mdatsev/prostgres | query-gen.py | 3418258a8b832546ef4d5009867bf1cf79248b7b | import random
import sys
ntables = 100
ncols = 100
nrows = 10000
def printstderr(s):
sys.stderr.write(s + '\n')
sys.stderr.flush()
def get_value():
return random.randint(-99999999, 99999999)
for t in range(ntables):
printstderr(f'{t}/{ntables}')
print(f"create table x ({','.join(['x int'] * ncols)});")
for r in range(nrows):
print(f"insert into _last ({','.join(['x'] * ncols)}) values (", end='')
for c in range(ncols):
print(get_value(), end=('' if c==ncols-1 else ','))
print(');')
# 10 min to generate
# 3 min to process | [((9, 2, 9, 28), 'sys.stderr.write', 'sys.stderr.write', ({(9, 19, 9, 27): "(s + '\\n')"}, {}), "(s + '\\n')", False, 'import sys\n'), ((10, 2, 10, 20), 'sys.stderr.flush', 'sys.stderr.flush', ({}, {}), '()', False, 'import sys\n'), ((13, 9, 13, 44), 'random.randint', 'random.randint', ({(13, 24, 13, 33): '(-99999999)', (13, 35, 13, 43): '(99999999)'}, {}), '(-99999999, 99999999)', False, 'import random\n')] |
joshbenner/sensu-ansible-role | molecule/default/tests/test_default.py | ecc92ba3462d7edf50ad96ddda61080ba58c29f8 | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_packages(host):
package = host.package('sensu')
assert package.is_installed
assert '1.7.0' in package.version
def test_dir_ownership(host):
assert host.file('/opt/sensu').group == 'sensu'
def test_main_config(host):
f = host.file('/etc/sensu/config.json')
assert f.exists
assert f.is_file
assert f.user == 'sensu'
assert f.group == 'sensu'
assert f.mode == 0o600
assert f.contains('rabbitmq')
assert f.contains('check-cpu.rb')
assert f.contains('"foo": "bar"')
assert f.contains('example_subscription')
assert f.contains('"zip": "zap"')
assert not f.contains('subscription_to_be_overridden')
def test_server_running(host):
server = host.service('sensu-server')
assert server.is_running
assert server.is_enabled
def test_api_running(host):
api = host.service('sensu-api')
assert api.is_running
assert api.is_enabled
def test_client_running(host):
client = host.service('sensu-client')
assert client.is_running
assert client.is_enabled
def test_api_listening(host):
assert host.socket('tcp://0.0.0.0:4567').is_listening
def test_plugin_installed(host):
assert host.file('/opt/sensu/embedded/bin/check-memory.rb').exists
# Tests extension install/enable
def test_snmp_listening(host):
assert host.socket('udp://0.0.0.0:1062').is_listening
| [] |
moepman/wgskex | wgskex/worker/netlink.py | 7a931088b5910f8034ad5a1362777e08c47c42fe | import hashlib
import logging
import re
from dataclasses import dataclass
from datetime import datetime, timedelta
from textwrap import wrap
from typing import Dict, List
from pyroute2 import IPRoute, NDB, WireGuard
from wgskex.common.utils import mac2eui64
logger = logging.getLogger(__name__)
# TODO make loglevel configurable
logger.setLevel("DEBUG")
@dataclass
class WireGuardClient:
public_key: str
domain: str
remove: bool
@property
def lladdr(self) -> str:
m = hashlib.md5()
m.update(self.public_key.encode("ascii") + b"\n")
hashed_key = m.hexdigest()
hash_as_list = wrap(hashed_key, 2)
temp_mac = ":".join(["02"] + hash_as_list[:5])
lladdr = re.sub(r"/\d+$", "/128", mac2eui64(mac=temp_mac, prefix="fe80::/10"))
return lladdr
@property
def vx_interface(self) -> str:
return f"vx-{self.domain}"
@property
def wg_interface(self) -> str:
return f"wg-{self.domain}"
"""WireGuardClient describes complete configuration for a specific WireGuard client
Attributes:
public_key: WireGuard Public key
domain: Domain Name of the WireGuard peer
lladdr: IPv6 lladdr of the WireGuard peer
wg_interface: Name of the WireGuard interface this peer will use
vx_interface: Name of the VXLAN interface we set a route for the lladdr to
remove: Are we removing this peer or not?
"""
def wg_flush_stale_peers(domain: str) -> List[Dict]:
stale_clients = find_stale_wireguard_clients("wg-" + domain)
result = []
for stale_client in stale_clients:
stale_wireguard_client = WireGuardClient(
public_key=stale_client,
domain=domain,
remove=True,
)
result.append(link_handler(stale_wireguard_client))
return result
# pyroute2 stuff
def link_handler(client: WireGuardClient) -> Dict[str, Dict]:
results = {}
results.update({"Wireguard": wireguard_handler(client)})
try:
results.update({"Route": route_handler(client)})
except Exception as e:
results.update({"Route": {"Exception": e}})
results.update({"Bridge FDB": bridge_fdb_handler(client)})
return results
def bridge_fdb_handler(client: WireGuardClient) -> Dict:
with IPRoute() as ip:
return ip.fdb(
"del" if client.remove else "append",
# FIXME this list may be empty if the interface is not existing
ifindex=ip.link_lookup(ifname=client.vx_interface)[0],
lladdr="00:00:00:00:00:00",
dst=re.sub(r"/\d+$", "", client.lladdr),
nda_ifindex=ip.link_lookup(ifname=client.wg_interface)[0],
)
def wireguard_handler(client: WireGuardClient) -> Dict:
with WireGuard() as wg:
wg_peer = {
"public_key": client.public_key,
"persistent_keepalive": 15,
"allowed_ips": [client.lladdr],
"remove": client.remove,
}
return wg.set(client.wg_interface, peer=wg_peer)
def route_handler(client: WireGuardClient) -> Dict:
with IPRoute() as ip:
return ip.route(
"del" if client.remove else "replace",
dst=client.lladdr,
oif=ip.link_lookup(ifname=client.wg_interface)[0],
)
def find_wireguard_domains() -> List[str]:
with NDB() as ndb:
# ndb.interfaces[{"kind": "wireguard"}]] seems to trigger https://github.com/svinota/pyroute2/issues/737
iface_values = ndb.interfaces.values()
interfaces = [iface.get("ifname", "") for iface in iface_values if iface.get("kind", "") == "wireguard"]
result = [iface.removeprefix("wg-") for iface in interfaces if iface.startswith("wg-")]
return result
def find_stale_wireguard_clients(wg_interface: str) -> List[str]:
with WireGuard() as wg:
all_clients = []
infos = wg.info(wg_interface)
for info in infos:
clients = info.get_attr("WGDEVICE_A_PEERS")
if clients is not None:
all_clients.extend(clients)
three_minutes_ago = (datetime.now() - timedelta(minutes=3)).timestamp()
stale_clients = [
client.get_attr("WGPEER_A_PUBLIC_KEY").decode("utf-8")
for client in all_clients
# TODO add never connected peers to a list and remove them on next call
if 0 < (client.get_attr("WGPEER_A_LAST_HANDSHAKE_TIME") or {}).get("tv_sec", int()) < three_minutes_ago
]
return stale_clients
| [((13, 9, 13, 36), 'logging.getLogger', 'logging.getLogger', ({(13, 27, 13, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((26, 12, 26, 25), 'hashlib.md5', 'hashlib.md5', ({}, {}), '()', False, 'import hashlib\n'), ((30, 23, 30, 42), 'textwrap.wrap', 'wrap', ({(30, 28, 30, 38): 'hashed_key', (30, 40, 30, 41): '2'}, {}), '(hashed_key, 2)', False, 'from textwrap import wrap\n'), ((84, 9, 84, 18), 'pyroute2.IPRoute', 'IPRoute', ({}, {}), '()', False, 'from pyroute2 import IPRoute, NDB, WireGuard\n'), ((96, 9, 96, 20), 'pyroute2.WireGuard', 'WireGuard', ({}, {}), '()', False, 'from pyroute2 import IPRoute, NDB, WireGuard\n'), ((109, 9, 109, 18), 'pyroute2.IPRoute', 'IPRoute', ({}, {}), '()', False, 'from pyroute2 import IPRoute, NDB, WireGuard\n'), ((118, 9, 118, 14), 'pyroute2.NDB', 'NDB', ({}, {}), '()', False, 'from pyroute2 import IPRoute, NDB, WireGuard\n'), ((129, 9, 129, 20), 'pyroute2.WireGuard', 'WireGuard', ({}, {}), '()', False, 'from pyroute2 import IPRoute, NDB, WireGuard\n'), ((33, 42, 33, 85), 'wgskex.common.utils.mac2eui64', 'mac2eui64', (), '', False, 'from wgskex.common.utils import mac2eui64\n'), ((90, 16, 90, 51), 're.sub', 're.sub', ({(90, 23, 90, 31): '"""/\\\\d+$"""', (90, 33, 90, 35): '""""""', (90, 37, 90, 50): 'client.lladdr'}, {}), "('/\\\\d+$', '', client.lladdr)", False, 'import re\n'), ((138, 29, 138, 43), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((138, 46, 138, 66), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n')] |
Ju99ernaut/super-fastapi | api/main.py | 83c232bcaff1006d413a9945ced3ba398b673505 | import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from routes import items
import config
from constants import *
config.parse_args()
app = FastAPI(
title="API",
description="API boilerplate",
version="1.0.0",
openapi_tags=API_TAGS_METADATA,
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(items.router)
@app.get("/")
async def root():
return {
"docs": "api documentation at /docs or /redoc",
}
if __name__ == "__main__":
uvicorn.run("main:app", host=config.CONFIG.host, port=int(config.CONFIG.port))
| [((11, 0, 11, 19), 'config.parse_args', 'config.parse_args', ({}, {}), '()', False, 'import config\n'), ((12, 6, 17, 1), 'fastapi.FastAPI', 'FastAPI', (), '', False, 'from fastapi import FastAPI\n')] |
vallka/djellifique | gellifinsta/models.py | fb84fba6be413f9d38276d89ae84aeaff761218f | from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.html import mark_safe
# Create your models here.
class Gellifinsta(models.Model):
class Meta:
ordering = ['-taken_at_datetime']
shortcode = models.CharField(_("Shortcode"), max_length=20)
taken_at_datetime = models.DateTimeField(_("taken at"))
username = models.CharField(_("Username"), max_length=100)
is_active = models.BooleanField(_("Active"),default=True)
is_video = models.BooleanField(_("Video"),default=False)
file_path = models.CharField(_("File Path"), max_length=500)
url = models.CharField(_("URL"), max_length=500)
created_dt = models.DateTimeField(_("Created Date/Time"), auto_now_add=True, null=True)
updated_dt = models.DateTimeField(_("Updated Date/Time"), auto_now=True, null=True)
caption = models.TextField(_("Caption"), blank=True, null=True)
tags = models.TextField(_("Tags"), blank=True, null=True)
def __str__(self):
return self.shortcode + ':' + str(self.taken_at_datetime)
def image_tag(self):
return mark_safe('<img src="%s" width="250" />' % (self.url))
image_tag.short_description = 'Image'
def tags_spaced(self):
return self.tags.replace(',',' ')
tags_spaced.short_description = 'Tags'
class Products(models.Model):
class Meta:
ordering = ['name']
name = models.CharField(_("Name"), max_length=100, unique=True)
is_active = models.BooleanField(_("Active"),default=True)
def __str__(self):
return self.name
| [((11, 33, 11, 47), 'django.utils.translation.ugettext_lazy', '_', ({(11, 35, 11, 46): '"""Shortcode"""'}, {}), "('Shortcode')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((12, 45, 12, 58), 'django.utils.translation.ugettext_lazy', '_', ({(12, 47, 12, 57): '"""taken at"""'}, {}), "('taken at')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((13, 32, 13, 45), 'django.utils.translation.ugettext_lazy', '_', ({(13, 34, 13, 44): '"""Username"""'}, {}), "('Username')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((14, 36, 14, 47), 'django.utils.translation.ugettext_lazy', '_', ({(14, 38, 14, 46): '"""Active"""'}, {}), "('Active')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((15, 35, 15, 45), 'django.utils.translation.ugettext_lazy', '_', ({(15, 37, 15, 44): '"""Video"""'}, {}), "('Video')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((16, 33, 16, 47), 'django.utils.translation.ugettext_lazy', '_', ({(16, 35, 16, 46): '"""File Path"""'}, {}), "('File Path')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((17, 27, 17, 35), 'django.utils.translation.ugettext_lazy', '_', ({(17, 29, 17, 34): '"""URL"""'}, {}), "('URL')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((18, 38, 18, 60), 'django.utils.translation.ugettext_lazy', '_', ({(18, 40, 18, 59): '"""Created Date/Time"""'}, {}), "('Created Date/Time')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((19, 38, 19, 60), 'django.utils.translation.ugettext_lazy', '_', ({(19, 40, 19, 59): '"""Updated Date/Time"""'}, {}), "('Updated Date/Time')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((20, 31, 20, 43), 'django.utils.translation.ugettext_lazy', '_', ({(20, 33, 20, 42): '"""Caption"""'}, {}), "('Caption')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((21, 28, 21, 37), 'django.utils.translation.ugettext_lazy', '_', ({(21, 30, 21, 36): '"""Tags"""'}, {}), "('Tags')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((27, 15, 27, 70), 'django.utils.html.mark_safe', 'mark_safe', ({(27, 25, 27, 69): '(\'<img src="%s" width="250" />\' % self.url)'}, {}), '(\'<img src="%s" width="250" />\' % self.url)', False, 'from django.utils.html import mark_safe\n'), ((39, 28, 39, 37), 'django.utils.translation.ugettext_lazy', '_', ({(39, 30, 39, 36): '"""Name"""'}, {}), "('Name')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((40, 36, 40, 47), 'django.utils.translation.ugettext_lazy', '_', ({(40, 38, 40, 46): '"""Active"""'}, {}), "('Active')", True, 'from django.utils.translation import ugettext_lazy as _\n')] |
wsqy/sacn_server | scanBase/migrations/0003_ipsection.py | e91a41a71b27926fbcfbe3f22bbb6bbc61b39461 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-01-16 13:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('scanBase', '0002_auto_20180116_1321'),
]
operations = [
migrations.CreateModel(
name='IPSection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip_section', models.CharField(blank=True, max_length=30, null=True, unique=True, verbose_name='ip段')),
('ip_start', models.GenericIPAddressField(blank=True, null=True, verbose_name='开始ip')),
('ip_end', models.GenericIPAddressField(blank=True, null=True, verbose_name='结束ip')),
('total', models.IntegerField(blank=True, null=True, verbose_name='总量')),
('deal_time', models.DateTimeField(blank=True, null=True, verbose_name='处理时间')),
('country', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='scanBase.CountryInfo', verbose_name='所属国家')),
],
options={
'verbose_name_plural': 'ip段信息',
'verbose_name': 'ip段信息',
},
),
]
| [((19, 23, 19, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((20, 31, 20, 120), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((21, 29, 21, 105), 'django.db.models.GenericIPAddressField', 'models.GenericIPAddressField', (), '', False, 'from django.db import migrations, models\n'), ((22, 27, 22, 103), 'django.db.models.GenericIPAddressField', 'models.GenericIPAddressField', (), '', False, 'from django.db import migrations, models\n'), ((23, 26, 23, 91), 'django.db.models.IntegerField', 'models.IntegerField', (), '', False, 'from django.db import migrations, models\n'), ((24, 30, 24, 102), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((25, 28, 25, 146), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n')] |
LostCow/KLUE | sts/train.py | 73b1b0526cf6b1b6f5ef535b9527d8abe6ca1a77 | import argparse
import numpy as np
import os
import torch
from transformers import AutoTokenizer, AutoConfig, Trainer, TrainingArguments
from model import RobertaForStsRegression
from dataset import KlueStsWithSentenceMaskDataset
from utils import read_json, seed_everything
from metric import compute_metrics
def main(args):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
config = AutoConfig.from_pretrained(args.model_name_or_path)
config.num_labels = args.num_labels
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
train_file_path = os.path.join(args.data_dir, args.train_filename)
valid_file_path = os.path.join(args.data_dir, args.valid_filename)
train_json = read_json(train_file_path)
valid_json = read_json(valid_file_path)
train_dataset = KlueStsWithSentenceMaskDataset(train_json, tokenizer, 510)
valid_dataset = KlueStsWithSentenceMaskDataset(train_json, tokenizer, 510)
model = RobertaForStsRegression.from_pretrained(
args.model_name_or_path, config=config
)
model.to(device)
training_args = TrainingArguments(
output_dir=args.model_dir,
save_total_limit=args.save_total_limit,
save_steps=args.save_steps,
num_train_epochs=args.num_train_epochs,
learning_rate=args.learning_rate,
per_device_train_batch_size=args.batch_size,
per_device_eval_batch_size=64,
gradient_accumulation_steps=args.gradient_accumulation_steps,
weight_decay=args.weight_decay,
logging_dir="./logs",
logging_steps=args.save_steps,
evaluation_strategy=args.evaluation_strategy,
metric_for_best_model="pearsonr",
fp16=True,
fp16_opt_level="O1",
eval_steps=args.save_steps,
load_best_model_at_end=True,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=valid_dataset,
compute_metrics=compute_metrics,
)
trainer.train()
model.save_pretrained(args.model_dir)
tokenizer.save_pretrained(args.model_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# data_arg
parser.add_argument("--data_dir", type=str, default="./data")
parser.add_argument("--model_dir", type=str, default="./model")
parser.add_argument("--output_dir", type=str, default="./output")
parser.add_argument("--model_name_or_path", type=str, default="klue/roberta-large")
parser.add_argument(
"--train_filename", type=str, default="klue-sts-v1.1_train.json"
)
parser.add_argument("--valid_filename", type=str, default="klue-sts-v1.1_dev.json")
# train_arg
parser.add_argument("--num_labels", type=int, default=1)
parser.add_argument("--seed", type=int, default=15)
parser.add_argument("--num_train_epochs", type=int, default=5)
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--learning_rate", type=float, default=5e-5)
parser.add_argument("--gradient_accumulation_steps", type=int, default=1)
parser.add_argument("--weight_decay", type=float, default=0.01)
# eval_arg
parser.add_argument("--evaluation_strategy", type=str, default="steps")
parser.add_argument("--save_steps", type=int, default=250)
parser.add_argument("--eval_steps", type=int, default=250)
parser.add_argument("--save_total_limit", type=int, default=2)
args = parser.parse_args()
main(args)
| [((16, 13, 16, 64), 'transformers.AutoConfig.from_pretrained', 'AutoConfig.from_pretrained', ({(16, 40, 16, 63): 'args.model_name_or_path'}, {}), '(args.model_name_or_path)', False, 'from transformers import AutoTokenizer, AutoConfig, Trainer, TrainingArguments\n'), ((18, 16, 18, 70), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', ({(18, 46, 18, 69): 'args.model_name_or_path'}, {}), '(args.model_name_or_path)', False, 'from transformers import AutoTokenizer, AutoConfig, Trainer, TrainingArguments\n'), ((20, 22, 20, 70), 'os.path.join', 'os.path.join', ({(20, 35, 20, 48): 'args.data_dir', (20, 50, 20, 69): 'args.train_filename'}, {}), '(args.data_dir, args.train_filename)', False, 'import os\n'), ((21, 22, 21, 70), 'os.path.join', 'os.path.join', ({(21, 35, 21, 48): 'args.data_dir', (21, 50, 21, 69): 'args.valid_filename'}, {}), '(args.data_dir, args.valid_filename)', False, 'import os\n'), ((23, 17, 23, 43), 'utils.read_json', 'read_json', ({(23, 27, 23, 42): 'train_file_path'}, {}), '(train_file_path)', False, 'from utils import read_json, seed_everything\n'), ((24, 17, 24, 43), 'utils.read_json', 'read_json', ({(24, 27, 24, 42): 'valid_file_path'}, {}), '(valid_file_path)', False, 'from utils import read_json, seed_everything\n'), ((26, 20, 26, 78), 'dataset.KlueStsWithSentenceMaskDataset', 'KlueStsWithSentenceMaskDataset', ({(26, 51, 26, 61): 'train_json', (26, 63, 26, 72): 'tokenizer', (26, 74, 26, 77): '510'}, {}), '(train_json, tokenizer, 510)', False, 'from dataset import KlueStsWithSentenceMaskDataset\n'), ((27, 20, 27, 78), 'dataset.KlueStsWithSentenceMaskDataset', 'KlueStsWithSentenceMaskDataset', ({(27, 51, 27, 61): 'train_json', (27, 63, 27, 72): 'tokenizer', (27, 74, 27, 77): '510'}, {}), '(train_json, tokenizer, 510)', False, 'from dataset import KlueStsWithSentenceMaskDataset\n'), ((29, 12, 31, 5), 'model.RobertaForStsRegression.from_pretrained', 'RobertaForStsRegression.from_pretrained', (), '', False, 'from model import RobertaForStsRegression\n'), ((34, 20, 52, 5), 'transformers.TrainingArguments', 'TrainingArguments', (), '', False, 'from transformers import AutoTokenizer, AutoConfig, Trainer, TrainingArguments\n'), ((54, 14, 60, 5), 'transformers.Trainer', 'Trainer', (), '', False, 'from transformers import AutoTokenizer, AutoConfig, Trainer, TrainingArguments\n'), ((67, 13, 67, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((15, 38, 15, 63), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n')] |
walkr/nanoservice | test/test_base_client.py | e2098986b1baa5f283167ae487d14f3c6c21961a | import unittest
from nanoservice import Responder
from nanoservice import Requester
class BaseTestCase(unittest.TestCase):
def setUp(self):
addr = 'inproc://test'
self.client = Requester(addr)
self.service = Responder(addr)
self.service.register('divide', lambda x, y: x / y)
self.service.register('echo', lambda x: x)
def tearDown(self):
self.client.socket.close()
self.service.socket.close()
class TestClient(BaseTestCase):
def test_build_payload(self):
payload = self.client.build_payload('echo', 'My Name')
method, args, ref = payload
self.assertTrue(method == 'echo')
self.assertTrue(len(payload) == 3)
def test_encoder(self):
data = {'name': 'Joe Doe'}
encoded = self.client.encode(data)
decoded = self.client.decode(encoded)
self.assertEqual(data, decoded)
def test_call_wo_receive(self):
# Requester side ops
method, args = 'echo', 'hello world'
payload = self.client.build_payload(method, args)
self.client.socket.send(self.client.encode(payload))
# Responder side ops
method, args, ref = self.service.receive()
self.assertEqual(method, 'echo')
self.assertEqual(args, 'hello world')
self.assertEqual(ref, payload[2])
def test_basic_socket_operation(self):
msg = 'abc'
self.client.socket.send(msg)
res = self.service.socket.recv().decode('utf-8')
self.assertEqual(msg, res)
def test_timeout(self):
c = Requester('inproc://timeout', timeouts=(1, 1))
c.socket.send('hello')
self.assertRaises(Exception, c.socket.recv)
if __name__ == '__main__':
unittest.main()
| [((58, 4, 58, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((11, 22, 11, 37), 'nanoservice.Requester', 'Requester', ({(11, 32, 11, 36): 'addr'}, {}), '(addr)', False, 'from nanoservice import Requester\n'), ((12, 23, 12, 38), 'nanoservice.Responder', 'Responder', ({(12, 33, 12, 37): 'addr'}, {}), '(addr)', False, 'from nanoservice import Responder\n'), ((53, 12, 53, 58), 'nanoservice.Requester', 'Requester', (), '', False, 'from nanoservice import Requester\n')] |
chidioguejiofor/airtech-api | airtech_api/flight/models.py | 45d77da0cc4230dd3cb7ab4cbb5168a9239850f5 | from airtech_api.utils.auditable_model import AuditableBaseModel
from django.db import models
# Create your models here.
class Flight(AuditableBaseModel):
class Meta:
db_table = 'Flight'
capacity = models.IntegerField(null=False)
location = models.TextField(null=False)
destination = models.TextField(null=False)
schedule = models.DateTimeField(null=False)
current_price = models.IntegerField()
type = models.CharField(
choices=(('local', 'local'), ('international', 'international')),
max_length=13,
)
| [((10, 15, 10, 46), 'django.db.models.IntegerField', 'models.IntegerField', (), '', False, 'from django.db import models\n'), ((11, 15, 11, 43), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import models\n'), ((12, 18, 12, 46), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import models\n'), ((13, 15, 13, 47), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import models\n'), ((14, 20, 14, 41), 'django.db.models.IntegerField', 'models.IntegerField', ({}, {}), '()', False, 'from django.db import models\n'), ((16, 11, 19, 5), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n')] |
kaka-lin/autonomous-driving-notes | Sensor Fusion and Tracking/Kalman Filters/Gaussian/gaussian.py | 6c1b29752d6deb679637766b6cea5c6fe5b72319 | import numpy as np
import matplotlib.pyplot as plt
def gaussian(x, mean, std):
std2 = np.power(std, 2)
return (1 / np.sqrt(2* np.pi * std2)) * np.exp(-.5 * (x - mean)**2 / std2)
if __name__ == "__main__":
gauss_1 = gaussian(10, 8, 2) # 0.12098536225957168
gauss_2 = gaussian(10, 10, 2) # 0.19947114020071635
print("Gauss(10, 8, 2): {}".format(gauss_1))
print("Gauss(10, 10, 2): {}".format(gauss_2))
# 標準高斯分佈
mean = 0
variance = 1
std = np.sqrt(variance)
# Plot between -10 and 10 with .001 steps.
x = np.arange(-5, 5, 0.001)
gauss = []
for i in x:
gauss.append(gaussian(i, mean, std))
gauss = np.array(gauss)
plt.plot(x, gauss)
plt.show()
| [((6, 11, 6, 27), 'numpy.power', 'np.power', ({(6, 20, 6, 23): 'std', (6, 25, 6, 26): '2'}, {}), '(std, 2)', True, 'import numpy as np\n'), ((20, 10, 20, 27), 'numpy.sqrt', 'np.sqrt', ({(20, 18, 20, 26): 'variance'}, {}), '(variance)', True, 'import numpy as np\n'), ((23, 8, 23, 31), 'numpy.arange', 'np.arange', ({(23, 18, 23, 20): '-5', (23, 22, 23, 23): '5', (23, 25, 23, 30): '0.001'}, {}), '(-5, 5, 0.001)', True, 'import numpy as np\n'), ((27, 12, 27, 27), 'numpy.array', 'np.array', ({(27, 21, 27, 26): 'gauss'}, {}), '(gauss)', True, 'import numpy as np\n'), ((29, 4, 29, 22), 'matplotlib.pyplot.plot', 'plt.plot', ({(29, 13, 29, 14): 'x', (29, 16, 29, 21): 'gauss'}, {}), '(x, gauss)', True, 'import matplotlib.pyplot as plt\n'), ((30, 4, 30, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((7, 44, 7, 78), 'numpy.exp', 'np.exp', ({(7, 51, 7, 77): '(-0.5 * (x - mean) ** 2 / std2)'}, {}), '(-0.5 * (x - mean) ** 2 / std2)', True, 'import numpy as np\n'), ((7, 16, 7, 40), 'numpy.sqrt', 'np.sqrt', ({(7, 24, 7, 39): '(2 * np.pi * std2)'}, {}), '(2 * np.pi * std2)', True, 'import numpy as np\n')] |
fazillatheef/lsbasi | part19/test_interpreter.py | 07e1a14516156a21ebe2d82e0bae4bba5ad73dd6 | import unittest
class LexerTestCase(unittest.TestCase):
def makeLexer(self, text):
from spi import Lexer
lexer = Lexer(text)
return lexer
def test_tokens(self):
from spi import TokenType
records = (
('234', TokenType.INTEGER_CONST, 234),
('3.14', TokenType.REAL_CONST, 3.14),
('*', TokenType.MUL, '*'),
('DIV', TokenType.INTEGER_DIV, 'DIV'),
('/', TokenType.FLOAT_DIV, '/'),
('+', TokenType.PLUS, '+'),
('-', TokenType.MINUS, '-'),
('(', TokenType.LPAREN, '('),
(')', TokenType.RPAREN, ')'),
(':=', TokenType.ASSIGN, ':='),
('.', TokenType.DOT, '.'),
('number', TokenType.ID, 'number'),
(';', TokenType.SEMI, ';'),
('BEGIN', TokenType.BEGIN, 'BEGIN'),
('END', TokenType.END, 'END'),
('PROCEDURE', TokenType.PROCEDURE, 'PROCEDURE'),
)
for text, tok_type, tok_val in records:
lexer = self.makeLexer(text)
token = lexer.get_next_token()
self.assertEqual(token.type, tok_type)
self.assertEqual(token.value, tok_val)
def test_lexer_exception(self):
from spi import LexerError
lexer = self.makeLexer('<')
with self.assertRaises(LexerError):
lexer.get_next_token()
class ParserTestCase(unittest.TestCase):
def makeParser(self, text):
from spi import Lexer, Parser
lexer = Lexer(text)
parser = Parser(lexer)
return parser
def test_expression_invalid_syntax_01(self):
from spi import ParserError, ErrorCode
parser = self.makeParser(
"""
PROGRAM Test;
VAR
a : INTEGER;
BEGIN
a := 10 * ; {Invalid syntax}
END.
"""
)
with self.assertRaises(ParserError) as cm:
parser.parse()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.UNEXPECTED_TOKEN)
self.assertEqual(the_exception.token.value, ';')
self.assertEqual(the_exception.token.lineno, 6)
def test_expression_invalid_syntax_02(self):
from spi import ParserError, ErrorCode
parser = self.makeParser(
"""
PROGRAM Test;
VAR
a : INTEGER;
BEGIN
a := 1 (1 + 2); {Invalid syntax}
END.
"""
)
with self.assertRaises(ParserError) as cm:
parser.parse()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.UNEXPECTED_TOKEN)
self.assertEqual(the_exception.token.value, '(')
self.assertEqual(the_exception.token.lineno, 6)
def test_maximum_one_VAR_block_is_allowed(self):
from spi import ParserError, ErrorCode
# zero VARs
parser = self.makeParser(
"""
PROGRAM Test;
BEGIN
END.
"""
)
parser.parse()
# one VAR
parser = self.makeParser(
"""
PROGRAM Test;
VAR
a : INTEGER;
BEGIN
END.
"""
)
parser.parse()
parser = self.makeParser(
"""
PROGRAM Test;
VAR
a : INTEGER;
VAR
b : INTEGER;
BEGIN
a := 5;
b := a + 10;
END.
"""
)
with self.assertRaises(ParserError) as cm:
parser.parse()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.UNEXPECTED_TOKEN)
self.assertEqual(the_exception.token.value, 'VAR')
self.assertEqual(the_exception.token.lineno, 5) # second VAR
class SemanticAnalyzerTestCase(unittest.TestCase):
def runSemanticAnalyzer(self, text):
from spi import Lexer, Parser, SemanticAnalyzer
lexer = Lexer(text)
parser = Parser(lexer)
tree = parser.parse()
semantic_analyzer = SemanticAnalyzer()
semantic_analyzer.visit(tree)
return semantic_analyzer
def test_semantic_duplicate_id_error(self):
from spi import SemanticError, ErrorCode
with self.assertRaises(SemanticError) as cm:
self.runSemanticAnalyzer(
"""
PROGRAM Test;
VAR
a : INTEGER;
a : REAL; {Duplicate identifier}
BEGIN
a := 5;
END.
"""
)
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.DUPLICATE_ID)
self.assertEqual(the_exception.token.value, 'a')
self.assertEqual(the_exception.token.lineno, 5)
def test_semantic_id_not_found_error(self):
from spi import SemanticError, ErrorCode
with self.assertRaises(SemanticError) as cm:
self.runSemanticAnalyzer(
"""
PROGRAM Test;
VAR
a : INTEGER;
BEGIN
a := 5 + b;
END.
"""
)
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.ID_NOT_FOUND)
self.assertEqual(the_exception.token.value, 'b')
class TestCallStack:
def __init__(self):
self._records = []
def push(self, ar):
self._records.append(ar)
def pop(self):
# do nothing
pass
def peek(self):
return self._records[-1]
class InterpreterTestCase(unittest.TestCase):
def makeInterpreter(self, text):
from spi import Lexer, Parser, SemanticAnalyzer, Interpreter
lexer = Lexer(text)
parser = Parser(lexer)
tree = parser.parse()
semantic_analyzer = SemanticAnalyzer()
semantic_analyzer.visit(tree)
interpreter = Interpreter(tree)
interpreter.call_stack = TestCallStack()
return interpreter
def test_integer_arithmetic_expressions(self):
for expr, result in (
('3', 3),
('2 + 7 * 4', 30),
('7 - 8 DIV 4', 5),
('14 + 2 * 3 - 6 DIV 2', 17),
('7 + 3 * (10 DIV (12 DIV (3 + 1) - 1))', 22),
('7 + 3 * (10 DIV (12 DIV (3 + 1) - 1)) DIV (2 + 3) - 5 - 3 + (8)', 10),
('7 + (((3 + 2)))', 12),
('- 3', -3),
('+ 3', 3),
('5 - - - + - 3', 8),
('5 - - - + - (3 + 4) - +2', 10),
):
interpreter = self.makeInterpreter(
"""PROGRAM Test;
VAR
a : INTEGER;
BEGIN
a := %s
END.
""" % expr
)
interpreter.interpret()
ar = interpreter.call_stack.peek()
self.assertEqual(ar['a'], result)
def test_float_arithmetic_expressions(self):
for expr, result in (
('3.14', 3.14),
('2.14 + 7 * 4', 30.14),
('7.14 - 8 / 4', 5.14),
):
interpreter = self.makeInterpreter(
"""PROGRAM Test;
VAR
a : REAL;
BEGIN
a := %s
END.
""" % expr
)
interpreter.interpret()
ar = interpreter.call_stack.peek()
self.assertEqual(ar['a'], result)
def test_procedure_call(self):
text = """\
program Main;
procedure Alpha(a : integer; b : integer);
var x : integer;
begin
x := (a + b ) * 2;
end;
begin { Main }
Alpha(3 + 5, 7);
end. { Main }
"""
interpreter = self.makeInterpreter(text)
interpreter.interpret()
ar = interpreter.call_stack.peek()
self.assertEqual(ar['a'], 8)
self.assertEqual(ar['b'], 7)
self.assertEqual(ar['x'], 30)
self.assertEqual(ar.nesting_level, 2)
def test_program(self):
text = """\
PROGRAM Part12;
VAR
number : INTEGER;
a, b : INTEGER;
y : REAL;
PROCEDURE P1;
VAR
a : REAL;
k : INTEGER;
PROCEDURE P2;
VAR
a, z : INTEGER;
BEGIN {P2}
z := 777;
END; {P2}
BEGIN {P1}
END; {P1}
BEGIN {Part12}
number := 2;
a := number ;
b := 10 * a + 10 * number DIV 4;
y := 20 / 7 + 3.14
END. {Part12}
"""
interpreter = self.makeInterpreter(text)
interpreter.interpret()
ar = interpreter.call_stack.peek()
self.assertEqual(len(ar.members.keys()), 4)
self.assertEqual(ar['number'], 2)
self.assertEqual(ar['a'], 2)
self.assertEqual(ar['b'], 25)
self.assertAlmostEqual(ar['y'], float(20) / 7 + 3.14) # 5.9971...
if __name__ == '__main__':
unittest.main()
| [((322, 4, 322, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((7, 16, 7, 27), 'spi.Lexer', 'Lexer', ({(7, 22, 7, 26): 'text'}, {}), '(text)', False, 'from spi import Lexer, Parser, SemanticAnalyzer, Interpreter\n'), ((46, 16, 46, 27), 'spi.Lexer', 'Lexer', ({(46, 22, 46, 26): 'text'}, {}), '(text)', False, 'from spi import Lexer, Parser, SemanticAnalyzer, Interpreter\n'), ((47, 17, 47, 30), 'spi.Parser', 'Parser', ({(47, 24, 47, 29): 'lexer'}, {}), '(lexer)', False, 'from spi import Lexer, Parser, SemanticAnalyzer, Interpreter\n'), ((136, 16, 136, 27), 'spi.Lexer', 'Lexer', ({(136, 22, 136, 26): 'text'}, {}), '(text)', False, 'from spi import Lexer, Parser, SemanticAnalyzer, Interpreter\n'), ((137, 17, 137, 30), 'spi.Parser', 'Parser', ({(137, 24, 137, 29): 'lexer'}, {}), '(lexer)', False, 'from spi import Lexer, Parser, SemanticAnalyzer, Interpreter\n'), ((140, 28, 140, 46), 'spi.SemanticAnalyzer', 'SemanticAnalyzer', ({}, {}), '()', False, 'from spi import Lexer, Parser, SemanticAnalyzer, Interpreter\n'), ((199, 16, 199, 27), 'spi.Lexer', 'Lexer', ({(199, 22, 199, 26): 'text'}, {}), '(text)', False, 'from spi import Lexer, Parser, SemanticAnalyzer, Interpreter\n'), ((200, 17, 200, 30), 'spi.Parser', 'Parser', ({(200, 24, 200, 29): 'lexer'}, {}), '(lexer)', False, 'from spi import Lexer, Parser, SemanticAnalyzer, Interpreter\n'), ((203, 28, 203, 46), 'spi.SemanticAnalyzer', 'SemanticAnalyzer', ({}, {}), '()', False, 'from spi import Lexer, Parser, SemanticAnalyzer, Interpreter\n'), ((206, 22, 206, 39), 'spi.Interpreter', 'Interpreter', ({(206, 34, 206, 38): 'tree'}, {}), '(tree)', False, 'from spi import Lexer, Parser, SemanticAnalyzer, Interpreter\n')] |
Ferlern/Arctic-Tundra | bot_components/configurator.py | 407b8c38c31f6c930df662e87ced527b9fd26c61 | import json
from typing import TypedDict
from .bot_emoji import AdditionalEmoji
class Warn(TypedDict):
text: str
mute_time: int
ban: bool
class PersonalVoice(TypedDict):
categoty: int
price: int
slot_price: int
bitrate_price: int
class System(TypedDict):
token: str
initial_extensions: list[str]
class ExperienceSystem(TypedDict):
experience_channel: int
cooldown: int
minimal_message_length: int
experience_per_message: list[int]
roles: dict[str, int]
coins_per_level_up: int
class AutoTranslation(TypedDict):
channels: list
lang: str
class Config(TypedDict):
guild: int
token: str
prefixes: list[str]
commands_channels: list[int]
mute_role: int
suggestions_channel: int
moderators_roles: list[int]
warns_system: list[Warn]
coin: str
daily: int
marry_price: int
personal_voice: PersonalVoice
experience_system: ExperienceSystem
auto_translation: AutoTranslation
additional_emoji: AdditionalEmoji
class Configurator:
def __init__(self) -> None:
self.system: System
self.config: Config
def dump(self):
with open("./bot_components/config.json", "w") as write_file:
to_dump = [self.system, self.config]
json.dump(to_dump, write_file, indent=4)
def load(self):
with open("./bot_components/config.json", "r") as write_file:
data = json.load(write_file)
self.system = System(data[0])
self.config = Config(data[1])
def reload(self):
self.dump()
self.load()
configurator = Configurator()
configurator.load()
| [((64, 12, 64, 52), 'json.dump', 'json.dump', (), '', False, 'import json\n'), ((68, 19, 68, 40), 'json.load', 'json.load', ({(68, 29, 68, 39): 'write_file'}, {}), '(write_file)', False, 'import json\n')] |
ihash5/reinforcement-learning | recnn/utils/plot.py | c72e9db33c6ed6abd34e9f48012189369b7cd5d0 | from scipy.spatial import distance
from scipy import ndimage
import matplotlib.pyplot as plt
import torch
from scipy import stats
import numpy as np
def pairwise_distances_fig(embs):
embs = embs.detach().cpu().numpy()
similarity_matrix_cos = distance.cdist(embs, embs, 'cosine')
similarity_matrix_euc = distance.cdist(embs, embs, 'euclidean')
fig = plt.figure(figsize=(16,10))
ax = fig.add_subplot(121)
cax = ax.matshow(similarity_matrix_cos)
fig.colorbar(cax)
ax.set_title('Cosine')
ax.axis('off')
ax = fig.add_subplot(122)
cax = ax.matshow(similarity_matrix_euc)
fig.colorbar(cax)
ax.set_title('Euclidian')
ax.axis('off')
fig.suptitle('Action pairwise distances')
plt.close()
return fig
def pairwise_distances(embs):
fig = pairwise_distances_fig(embs)
fig.show()
def smooth(scalars, weight): # Weight between 0 and 1
last = scalars[0] # First value in the plot (first timestep)
smoothed = list()
for point in scalars:
smoothed_val = last * weight + (1 - weight) * point # Calculate smoothed value
smoothed.append(smoothed_val) # Save it
last = smoothed_val # Anchor the last smoothed value
return smoothed
def smooth_gauss(arr, var):
return ndimage.gaussian_filter1d(arr, var)
class Plotter:
def __init__(self, loss, style):
self.loss = loss
self.style = style
self.smoothing = lambda x: smooth_gauss(x, 4)
def set_smoothing_func(self, f):
self.smoothing = f
def plot_loss(self):
for row in self.style:
fig, axes = plt.subplots(1, len(row), figsize=(16, 6))
if len(row) == 1: axes = [axes]
for col in range(len(row)):
key = row[col]
axes[col].set_title(key)
axes[col].plot(self.loss['train']['step'],
self.smoothing(self.loss['train'][key]), 'b-',
label='train')
axes[col].plot(self.loss['test']['step'],
self.loss['test'][key], 'r-.',
label='test')
plt.legend()
plt.show()
def log_loss(self, key, item, test=False):
kind = 'train'
if test:
kind = 'test'
self.loss[kind][key].append(item)
def log_losses(self, losses, test=False):
for key, val in losses.items():
self.log_loss(key, val, test)
@staticmethod
def kde_reconstruction_error(ad, gen_actions, true_actions, device=torch.device('cpu')):
def rec_score(actions):
return ad.rec_error(torch.tensor(actions).to(device).float()).detach().cpu().numpy()
true_scores = rec_score(true_actions)
gen_scores = rec_score(gen_actions)
true_kernel = stats.gaussian_kde(true_scores)
gen_kernel = stats.gaussian_kde(gen_scores)
x = np.linspace(0, 1000, 100)
probs_true = true_kernel(x)
probs_gen = gen_kernel(x)
fig = plt.figure(figsize=(16, 10))
ax = fig.add_subplot(111)
ax.plot(x, probs_true, '-b', label='true dist')
ax.plot(x, probs_gen, '-r', label='generated dist')
ax.legend()
return fig
@staticmethod
def plot_kde_reconstruction_error(*args, **kwargs):
fig = Plotter.kde_reconstruction_error(*args, **kwargs)
fig.show()
| [((11, 28, 11, 64), 'scipy.spatial.distance.cdist', 'distance.cdist', ({(11, 43, 11, 47): 'embs', (11, 49, 11, 53): 'embs', (11, 55, 11, 63): '"""cosine"""'}, {}), "(embs, embs, 'cosine')", False, 'from scipy.spatial import distance\n'), ((12, 28, 12, 67), 'scipy.spatial.distance.cdist', 'distance.cdist', ({(12, 43, 12, 47): 'embs', (12, 49, 12, 53): 'embs', (12, 55, 12, 66): '"""euclidean"""'}, {}), "(embs, embs, 'euclidean')", False, 'from scipy.spatial import distance\n'), ((14, 10, 14, 37), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((29, 4, 29, 15), 'matplotlib.pyplot.close', 'plt.close', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((50, 11, 50, 46), 'scipy.ndimage.gaussian_filter1d', 'ndimage.gaussian_filter1d', ({(50, 37, 50, 40): 'arr', (50, 42, 50, 45): 'var'}, {}), '(arr, var)', False, 'from scipy import ndimage\n'), ((76, 8, 76, 18), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((89, 71, 89, 90), 'torch.device', 'torch.device', ({(89, 84, 89, 89): '"""cpu"""'}, {}), "('cpu')", False, 'import torch\n'), ((97, 22, 97, 53), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', ({(97, 41, 97, 52): 'true_scores'}, {}), '(true_scores)', False, 'from scipy import stats\n'), ((98, 21, 98, 51), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', ({(98, 40, 98, 50): 'gen_scores'}, {}), '(gen_scores)', False, 'from scipy import stats\n'), ((100, 12, 100, 37), 'numpy.linspace', 'np.linspace', ({(100, 24, 100, 25): '0', (100, 27, 100, 31): '1000', (100, 33, 100, 36): '100'}, {}), '(0, 1000, 100)', True, 'import numpy as np\n'), ((103, 14, 103, 42), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((75, 12, 75, 24), 'matplotlib.pyplot.legend', 'plt.legend', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((92, 32, 92, 53), 'torch.tensor', 'torch.tensor', ({(92, 45, 92, 52): 'actions'}, {}), '(actions)', False, 'import torch\n')] |
pazzy-stack/twilio | tests/integration/insights/v1/call/test_metric.py | d3b9b9f1b17b9de89b2528e8d2ffd33edf9676e0 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class MetricTestCase(IntegrationTestCase):
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.insights.v1.calls(sid="CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.metrics.list()
self.holodeck.assert_has_request(Request(
'get',
'https://insights.twilio.com/v1/Voice/CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Metrics',
))
def test_read_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?PageSize=50&Page=0",
"previous_page_url": null,
"next_page_url": null,
"key": "metrics",
"url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?PageSize=50&Page=0"
},
"metrics": [
{
"timestamp": "2019-10-07T22:32:06Z",
"call_sid": "CA7569efe0253644fa4a88aa97beca3310",
"account_sid": "AC998c10b68cbfda9f67277f7d8f4439c9",
"edge": "sdk_edge",
"direction": "both",
"sdk_edge": {
"interval": {
"packets_received": 50,
"packets_lost": 0,
"audio_in": {
"value": 81.0
},
"audio_out": {
"value": 5237.0
},
"jitter": {
"value": 9
},
"mos": {
"value": 4.39
},
"rtt": {
"value": 81
}
},
"cumulative": {
"bytes_received": 547788,
"bytes_sent": 329425,
"packets_received": 3900,
"packets_lost": 0,
"packets_sent": 3934
}
},
"client_edge": null,
"carrier_edge": null,
"sip_edge": null,
"gateway": null,
"client": null
}
]
}
'''
))
actual = self.client.insights.v1.calls(sid="CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.metrics.list()
self.assertIsNotNone(actual)
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 10,
"page_size": 5,
"first_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?Direction=both&Edge=sdk_edge&PageSize=5&Page=0",
"previous_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?Direction=both&Edge=sdk_edge&PageSize=5&Page=9&PageToken=DP10",
"next_page_url": null,
"key": "metrics",
"url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?Direction=both&Edge=sdk_edge&PageSize=5&Page=10"
},
"metrics": [
{
"timestamp": "2019-10-07T22:32:06Z",
"call_sid": "CA7569efe0253644fa4a88aa97beca3310",
"account_sid": "AC998c10b68cbfda9f67277f7d8f4439c9",
"edge": "sdk_edge",
"direction": "both",
"sdk_edge": {
"interval": {
"packets_received": 50,
"packets_lost": 0,
"audio_in": {
"value": 81.0
},
"audio_out": {
"value": 5237.0
},
"jitter": {
"value": 9
},
"mos": {
"value": 4.39
},
"rtt": {
"value": 81
}
},
"cumulative": {
"bytes_received": 547788,
"bytes_sent": 329425,
"packets_received": 3900,
"packets_lost": 0,
"packets_sent": 3934
}
},
"client_edge": null,
"carrier_edge": null,
"sip_edge": null,
"gateway": null,
"client": null
}
]
}
'''
))
actual = self.client.insights.v1.calls(sid="CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.metrics.list()
self.assertIsNotNone(actual)
| [((18, 27, 18, 44), 'twilio.http.response.Response', 'Response', ({(18, 36, 18, 39): '(500)', (18, 41, 18, 43): '""""""'}, {}), "(500, '')", False, 'from twilio.http.response import Response\n'), ((24, 41, 27, 9), 'tests.holodeck.Request', 'Request', ({(25, 12, 25, 17): '"""get"""', (26, 12, 26, 93): '"""https://insights.twilio.com/v1/Voice/CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Metrics"""'}, {}), "('get',\n 'https://insights.twilio.com/v1/Voice/CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Metrics'\n )", False, 'from tests.holodeck import Request\n'), ((30, 27, 87, 9), 'twilio.http.response.Response', 'Response', ({(31, 12, 31, 15): '(200)', (32, 12, 86, 15): '"""\n {\n "meta": {\n "page": 0,\n "page_size": 50,\n "first_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?PageSize=50&Page=0",\n "previous_page_url": null,\n "next_page_url": null,\n "key": "metrics",\n "url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?PageSize=50&Page=0"\n },\n "metrics": [\n {\n "timestamp": "2019-10-07T22:32:06Z",\n "call_sid": "CA7569efe0253644fa4a88aa97beca3310",\n "account_sid": "AC998c10b68cbfda9f67277f7d8f4439c9",\n "edge": "sdk_edge",\n "direction": "both",\n "sdk_edge": {\n "interval": {\n "packets_received": 50,\n "packets_lost": 0,\n "audio_in": {\n "value": 81.0\n },\n "audio_out": {\n "value": 5237.0\n },\n "jitter": {\n "value": 9\n },\n "mos": {\n "value": 4.39\n },\n "rtt": {\n "value": 81\n }\n },\n "cumulative": {\n "bytes_received": 547788,\n "bytes_sent": 329425,\n "packets_received": 3900,\n "packets_lost": 0,\n "packets_sent": 3934\n }\n },\n "client_edge": null,\n "carrier_edge": null,\n "sip_edge": null,\n "gateway": null,\n "client": null\n }\n ]\n }\n """'}, {}), '(200,\n """\n {\n "meta": {\n "page": 0,\n "page_size": 50,\n "first_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?PageSize=50&Page=0",\n "previous_page_url": null,\n "next_page_url": null,\n "key": "metrics",\n "url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?PageSize=50&Page=0"\n },\n "metrics": [\n {\n "timestamp": "2019-10-07T22:32:06Z",\n "call_sid": "CA7569efe0253644fa4a88aa97beca3310",\n "account_sid": "AC998c10b68cbfda9f67277f7d8f4439c9",\n "edge": "sdk_edge",\n "direction": "both",\n "sdk_edge": {\n "interval": {\n "packets_received": 50,\n "packets_lost": 0,\n "audio_in": {\n "value": 81.0\n },\n "audio_out": {\n "value": 5237.0\n },\n "jitter": {\n "value": 9\n },\n "mos": {\n "value": 4.39\n },\n "rtt": {\n "value": 81\n }\n },\n "cumulative": {\n "bytes_received": 547788,\n "bytes_sent": 329425,\n "packets_received": 3900,\n "packets_lost": 0,\n "packets_sent": 3934\n }\n },\n "client_edge": null,\n "carrier_edge": null,\n "sip_edge": null,\n "gateway": null,\n "client": null\n }\n ]\n }\n """\n )', False, 'from twilio.http.response import Response\n'), ((95, 27, 152, 9), 'twilio.http.response.Response', 'Response', ({(96, 12, 96, 15): '(200)', (97, 12, 151, 15): '"""\n {\n "meta": {\n "page": 10,\n "page_size": 5,\n "first_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?Direction=both&Edge=sdk_edge&PageSize=5&Page=0",\n "previous_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?Direction=both&Edge=sdk_edge&PageSize=5&Page=9&PageToken=DP10",\n "next_page_url": null,\n "key": "metrics",\n "url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?Direction=both&Edge=sdk_edge&PageSize=5&Page=10"\n },\n "metrics": [\n {\n "timestamp": "2019-10-07T22:32:06Z",\n "call_sid": "CA7569efe0253644fa4a88aa97beca3310",\n "account_sid": "AC998c10b68cbfda9f67277f7d8f4439c9",\n "edge": "sdk_edge",\n "direction": "both",\n "sdk_edge": {\n "interval": {\n "packets_received": 50,\n "packets_lost": 0,\n "audio_in": {\n "value": 81.0\n },\n "audio_out": {\n "value": 5237.0\n },\n "jitter": {\n "value": 9\n },\n "mos": {\n "value": 4.39\n },\n "rtt": {\n "value": 81\n }\n },\n "cumulative": {\n "bytes_received": 547788,\n "bytes_sent": 329425,\n "packets_received": 3900,\n "packets_lost": 0,\n "packets_sent": 3934\n }\n },\n "client_edge": null,\n "carrier_edge": null,\n "sip_edge": null,\n "gateway": null,\n "client": null\n }\n ]\n }\n """'}, {}), '(200,\n """\n {\n "meta": {\n "page": 10,\n "page_size": 5,\n "first_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?Direction=both&Edge=sdk_edge&PageSize=5&Page=0",\n "previous_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?Direction=both&Edge=sdk_edge&PageSize=5&Page=9&PageToken=DP10",\n "next_page_url": null,\n "key": "metrics",\n "url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?Direction=both&Edge=sdk_edge&PageSize=5&Page=10"\n },\n "metrics": [\n {\n "timestamp": "2019-10-07T22:32:06Z",\n "call_sid": "CA7569efe0253644fa4a88aa97beca3310",\n "account_sid": "AC998c10b68cbfda9f67277f7d8f4439c9",\n "edge": "sdk_edge",\n "direction": "both",\n "sdk_edge": {\n "interval": {\n "packets_received": 50,\n "packets_lost": 0,\n "audio_in": {\n "value": 81.0\n },\n "audio_out": {\n "value": 5237.0\n },\n "jitter": {\n "value": 9\n },\n "mos": {\n "value": 4.39\n },\n "rtt": {\n "value": 81\n }\n },\n "cumulative": {\n "bytes_received": 547788,\n "bytes_sent": 329425,\n "packets_received": 3900,\n "packets_lost": 0,\n "packets_sent": 3934\n }\n },\n "client_edge": null,\n "carrier_edge": null,\n "sip_edge": null,\n "gateway": null,\n "client": null\n }\n ]\n }\n """\n )', False, 'from twilio.http.response import Response\n')] |
essepuntato/comp-think | 2017-2018/lecture-notes/python/02-algorithms_listing_8_contains_word.py | 3dac317bda0eb7650adc4a92c1ccb8a4ce87a3a6 | def contains_word(first_word, second_word, bibliographic_entry):
contains_first_word = first_word in bibliographic_entry
contains_second_word = second_word in bibliographic_entry
if contains_first_word and contains_second_word:
return 2
elif contains_first_word or contains_second_word:
return 1
else:
return 0
if __name__ == "__main__":
bibliographic_entry = "Peroni, S., Osborne, F., Di Iorio, A., Nuzzolese, A. G., Poggi, F., Vitali, F., " \
"Motta, E. (2017). Research Articles in Simplified HTML: a Web-first format for " \
"HTML-based scholarly articles. PeerJ Computer Science 3: e132. e2513. " \
"DOI: https://doi.org/10.7717/peerj-cs.132"
print(contains_word("Peroni", "Osborne", bibliographic_entry))
print(contains_word("Peroni", "Asprino", bibliographic_entry))
print(contains_word("Reforgiato", "Osborne", bibliographic_entry))
print(contains_word("Reforgiato", "Asprino", bibliographic_entry))
| [] |
ivaivalous/ivodb | backend/user/scripter.py | e9b0969225fdb725d35a2ecfab21f87d1d9b2a00 | #!/usr/bin/env python
import responses
from selenium import webdriver
# This file contains/references the default JS
# used to provide functions dealing with input/output
SCRIPT_RUNNER = "runner.html"
ENCODING = 'utf-8'
PAGE_LOAD_TIMEOUT = 5
PAGE_LOAD_TIMEOUT_MS = PAGE_LOAD_TIMEOUT * 1000
capabilities = webdriver.DesiredCapabilities.PHANTOMJS
capabilities["phantomjs.page.settings.resourceTimeout"] = PAGE_LOAD_TIMEOUT_MS
capabilities["phantomjs.page.settings.loadImages"] = False
SCRIPT_TEMPLATE = """
window.requestData = {{method:"{0}", headers:{1}, data:"{2}", params:{3}}};
window.method = requestData.method;
window.headers = requestData.headers;
window.data = requestData.data;
window.params = requestData.params;
window.logs = [];
window.log = function(message) {{
window.logs.push({{
"time": (new Date).getTime(),
"message": message
}})
}};
"""
GET_LOGS_SCRIPT = 'return window.logs;'
class Scripter:
def __init__(self):
self.driver = webdriver.PhantomJS(desired_capabilities=capabilities)
self.driver.implicitly_wait(PAGE_LOAD_TIMEOUT)
self.driver.set_page_load_timeout(PAGE_LOAD_TIMEOUT)
def run(self, request, script_body, input_params):
self.driver.get(SCRIPT_RUNNER)
self.driver.execute_script(
Scripter.build_runner_script(request, input_params))
try:
response = self.execute_user_script(script_body)
logs = self.driver.execute_script(GET_LOGS_SCRIPT)
return response.encode(ENCODING), logs
except:
return responses.get_invalid_request(), []
def execute_user_script(self, script_body):
"""Execute a user-contributed script."""
return self.driver.execute_script(script_body)
@staticmethod
def build_runner_script(request, input_params):
# Build JS related to having access to input
# and request data.
return SCRIPT_TEMPLATE.format(
request.method,
Scripter.build_headers_map(request.headers),
request.get_data().encode(ENCODING),
Scripter.build_params_map(input_params.encode(ENCODING)))
@staticmethod
def build_params_map(input_params):
# input_params looks like "test=aaa&test2=jjj"
couples = input_params.split("&")
params_map = {}
for couple in couples:
c = couple.split("=")
key = c[0]
value = c[1] if len(c) > 1 else ""
params_map[key] = value
return params_map
@staticmethod
def build_headers_map(headers):
headers_map = {}
for key, value in headers:
if 'jwt=' in value:
continue
headers_map[key] = value.encode(ENCODING)
return headers_map
| [((38, 22, 38, 76), 'selenium.webdriver.PhantomJS', 'webdriver.PhantomJS', (), '', False, 'from selenium import webdriver\n'), ((52, 19, 52, 50), 'responses.get_invalid_request', 'responses.get_invalid_request', ({}, {}), '()', False, 'import responses\n')] |
luhouxiang/byrobot | bwtougu/api/names.py | e110e7865965a344d2b61cb925c959cee1387758 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
VALID_HISTORY_FIELDS = [
'datetime', 'open', 'close', 'high', 'low', 'total_turnover', 'volume',
'acc_net_value', 'discount_rate', 'unit_net_value',
'limit_up', 'limit_down', 'open_interest', 'basis_spread', 'settlement', 'prev_settlement'
]
VALID_GET_PRICE_FIELDS = [
'OpeningPx', 'ClosingPx', 'HighPx', 'LowPx', 'TotalTurnover', 'TotalVolumeTraded',
'AccNetValue', 'UnitNetValue', 'DiscountRate',
'SettlPx', 'PrevSettlPx', 'OpenInterest', 'BasisSpread', 'HighLimitPx', 'LowLimitPx'
]
VALID_TENORS = [
'0S', '1M', '2M', '3M', '6M', '9M', '1Y', '2Y', '3Y', '4Y',
'5Y', '6Y', '7Y', '8Y', '9Y', '10Y', '15Y', '20Y', '30Y',
'40Y', '50Y'
]
VALID_INSTRUMENT_TYPES = [
'CS', 'Future', 'INDX', 'ETF', 'LOF', 'SF', 'FenjiA', 'FenjiB', 'FenjiMu',
'Stock', 'Fund', 'Index'
]
VALID_XUEQIU_FIELDS = [
'new_comments', 'total_comments',
'new_followers', 'total_followers',
'sell_actions', 'buy_actions',
]
VALID_MARGIN_FIELDS = [
'margin_balance',
'buy_on_margin_value',
'short_sell_quantity',
'margin_repayment',
'short_balance_quantity',
'short_repayment_quantity',
'short_balance',
'total_balance'
]
VALID_SHARE_FIELDS = [
'total', 'circulation_a', 'management_circulation', 'non_circulation_a', 'total_a'
]
VALID_TURNOVER_FIELDS = (
'today',
'week',
'month',
'three_month',
'six_month',
'year',
'current_year',
'total',
)
| [] |
dveni/causal-text-embeddings | src/PeerRead/data_cleaning/process_PeerRead_abstracts.py | 82104f3fb6fd540cf98cb4ca0fd5b5d1fb5f757a | """
Simple pre-processing for PeerRead papers.
Takes in JSON formatted data from ScienceParse and outputs a tfrecord
Reference example:
https://github.com/tensorlayer/tensorlayer/blob/9528da50dfcaf9f0f81fba9453e488a1e6c8ee8f/examples/data_process/tutorial_tfrecord3.py
"""
import argparse
import glob
import os
import random
import io
import json
from dateutil.parser import parse as parse_date
import tensorflow as tf
import bert.tokenization as tokenization
from PeerRead.ScienceParse.Paper import Paper
from PeerRead.ScienceParse.ScienceParseReader import ScienceParseReader
from PeerRead.data_cleaning.PeerRead_hand_features import get_PeerRead_hand_features
rng = random.Random(0)
def process_json_paper(paper_json_filename, scienceparse_dir, tokenizer):
paper = Paper.from_json(paper_json_filename)
paper.SCIENCEPARSE = ScienceParseReader.read_science_parse(paper.ID, paper.TITLE, paper.ABSTRACT,
scienceparse_dir)
# tokenize PeerRead features
try:
title_tokens = tokenizer.tokenize(paper.TITLE)
except ValueError: # missing titles are quite common sciparse
print("Missing title for " + paper_json_filename)
title_tokens = None
abstract_tokens = tokenizer.tokenize(paper.ABSTRACT)
text_features = {'title': title_tokens,
'abstract': abstract_tokens}
context_features = {'authors': paper.AUTHORS,
'accepted': paper.ACCEPTED,
'name': paper.ID}
# add hand crafted features from PeerRead
pr_hand_features = get_PeerRead_hand_features(paper)
context_features.update(pr_hand_features)
return text_features, context_features
def bert_process_sentence(example_tokens, max_seq_length, tokenizer):
"""
Tokenization and pre-processing of text as expected by Bert
Parameters
----------
example_tokens
max_seq_length
tokenizer
Returns
-------
"""
# Account for [CLS] and [SEP] with "- 2"
if len(example_tokens) > max_seq_length - 2:
example_tokens = example_tokens[0:(max_seq_length - 2)]
# The convention in BERT for single sequences is:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. (vv: Not relevant for us)
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
# vv: segment_ids seem to be the same as type_ids
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in example_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return input_ids, input_mask, segment_ids
def paper_to_bert_Example(text_features, context_features, max_seq_length, tokenizer):
"""
Parses the input paper into a tf.Example as expected by Bert
Note: the docs for tensorflow Example are awful ¯\_(ツ)_/¯
"""
abstract_features = {}
abstract_tokens, abstract_padding_mask, _ = \
bert_process_sentence(text_features['abstract'], max_seq_length, tokenizer)
abstract_features["token_ids"] = _int64_feature(abstract_tokens)
abstract_features["token_mask"] = _int64_feature(abstract_padding_mask)
# abstract_features["segment_ids"] = create_int_feature(feature.segment_ids) TODO: ommission may cause bugs
# abstract_features["label_ids"] = _int64_feature([feature.label_id])
# non-sequential features
tf_context_features, tf_context_features_types = _dict_of_nonlist_numerical_to_tf_features(context_features)
features = {**tf_context_features, **abstract_features}
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
return tf_example
def _int64_feature(value):
"""Wrapper for inserting an int64 Feature into a SequenceExample proto,
e.g, An integer label.
"""
if isinstance(value, list):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
else:
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float_feature(value):
"""Wrapper for inserting a float Feature into a SequenceExample proto,
e.g, An integer label.
"""
if isinstance(value, list):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
else:
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _bytes_feature(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto,
e.g, an image in byte
"""
# return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _dict_of_nonlist_numerical_to_tf_features(my_dict):
"""
Strip out non-numerical features
Returns tf_features_dict: a dictionary suitable for passing to tf.train.example
tf_types_dict: a dictionary of the tf types of previous dict
"""
tf_types_dict = {}
tf_features_dict = {}
for k, v in my_dict.items():
if isinstance(v, int) or isinstance(v, bool):
tf_features_dict[k] = _int64_feature(v)
tf_types_dict[k] = tf.int64
elif isinstance(v, float):
tf_features_dict[k] = _float_feature(v)
tf_types_dict[k] = tf.float32
else:
pass
return tf_features_dict, tf_types_dict
venues = {'acl': 1,
'conll': 2,
'iclr': 3,
'nips': 4,
'icml': 5,
'emnlp': 6,
'aaai': 7,
'hlt-naacl': 8,
'arxiv': 0}
def _venues(venue_name):
if venue_name.lower() in venues:
return venues[venue_name.lower()]
else:
return -1
def _arxiv_subject(subjects):
subject = subjects[0]
if 'lg' in subject.lower():
return 0
elif 'cl' in subject.lower():
return 1
elif 'ai' in subject.lower():
return 2
else:
raise Exception("arxiv subject not recognized")
def clean_PeerRead_dataset(review_json_dir, parsedpdf_json_dir,
venue, year,
out_dir, out_file,
max_abs_len, tokenizer,
default_accept=1,
is_arxiv = False):
if not os.path.exists(out_dir):
os.makedirs(out_dir)
print('Reading reviews from...', review_json_dir)
paper_json_filenames = sorted(glob.glob('{}/*.json'.format(review_json_dir)))
with tf.python_io.TFRecordWriter(out_dir + "/" + out_file) as writer:
for idx, paper_json_filename in enumerate(paper_json_filenames):
text_features, context_features = process_json_paper(paper_json_filename, parsedpdf_json_dir, tokenizer)
if context_features['accepted'] is None: # missing for conferences other than ICLR (we only see accepts)
context_features['accepted'] = default_accept
many_split = rng.randint(0, 100) # useful for easy data splitting later
# other context features
arxiv = -1
if is_arxiv:
with io.open(paper_json_filename) as json_file:
loaded = json.load(json_file)
year = parse_date(loaded['DATE_OF_SUBMISSION']).year
venue = _venues(loaded['conference'])
arxiv = _arxiv_subject([loaded['SUBJECTS']])
extra_context = {'id': idx, 'venue': venue, 'year': year, 'many_split': many_split,
'arxiv': arxiv}
context_features.update(extra_context)
# turn it into a tf.data example
paper_ex = paper_to_bert_Example(text_features, context_features,
max_seq_length=max_abs_len, tokenizer=tokenizer)
writer.write(paper_ex.SerializeToString())
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--review-json-dir', type=str, default='../dat/PeerRead/arxiv.all/all/reviews')
parser.add_argument('--parsedpdf-json-dir', type=str, default='../dat/PeerRead/arxiv.all/all/parsed_pdfs')
parser.add_argument('--out-dir', type=str, default='../dat/PeerRead/proc')
parser.add_argument('--out-file', type=str, default='arxiv-all.tf_record')
parser.add_argument('--vocab-file', type=str, default='../../bert/pre-trained/uncased_L-12_H-768_A-12/vocab.txt')
parser.add_argument('--max-abs-len', type=int, default=250)
parser.add_argument('--venue', type=int, default=0)
parser.add_argument('--year', type=int, default=2017)
args = parser.parse_args()
tokenizer = tokenization.FullTokenizer(
vocab_file=args.vocab_file, do_lower_case=True)
clean_PeerRead_dataset(args.review_json_dir, args.parsedpdf_json_dir,
args.venue, args.year,
args.out_dir, args.out_file,
args.max_abs_len, tokenizer, is_arxiv=True)
if __name__ == "__main__":
main()
| [((26, 6, 26, 22), 'random.Random', 'random.Random', ({(26, 20, 26, 21): '0'}, {}), '(0)', False, 'import random\n'), ((30, 12, 30, 48), 'PeerRead.ScienceParse.Paper.Paper.from_json', 'Paper.from_json', ({(30, 28, 30, 47): 'paper_json_filename'}, {}), '(paper_json_filename)', False, 'from PeerRead.ScienceParse.Paper import Paper\n'), ((31, 25, 32, 80), 'PeerRead.ScienceParse.ScienceParseReader.ScienceParseReader.read_science_parse', 'ScienceParseReader.read_science_parse', ({(31, 63, 31, 71): 'paper.ID', (31, 73, 31, 84): 'paper.TITLE', (31, 86, 31, 100): 'paper.ABSTRACT', (32, 63, 32, 79): 'scienceparse_dir'}, {}), '(paper.ID, paper.TITLE, paper.ABSTRACT,\n scienceparse_dir)', False, 'from PeerRead.ScienceParse.ScienceParseReader import ScienceParseReader\n'), ((51, 23, 51, 56), 'PeerRead.data_cleaning.PeerRead_hand_features.get_PeerRead_hand_features', 'get_PeerRead_hand_features', ({(51, 50, 51, 55): 'paper'}, {}), '(paper)', False, 'from PeerRead.data_cleaning.PeerRead_hand_features import get_PeerRead_hand_features\n'), ((265, 13, 265, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((279, 16, 280, 55), 'bert.tokenization.FullTokenizer', 'tokenization.FullTokenizer', (), '', True, 'import bert.tokenization as tokenization\n'), ((230, 11, 230, 34), 'os.path.exists', 'os.path.exists', ({(230, 26, 230, 33): 'out_dir'}, {}), '(out_dir)', False, 'import os\n'), ((231, 8, 231, 28), 'os.makedirs', 'os.makedirs', ({(231, 20, 231, 27): 'out_dir'}, {}), '(out_dir)', False, 'import os\n'), ((236, 9, 236, 62), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', ({(236, 37, 236, 61): "(out_dir + '/' + out_file)"}, {}), "(out_dir + '/' + out_file)", True, 'import tensorflow as tf\n'), ((137, 43, 137, 78), 'tensorflow.train.Features', 'tf.train.Features', (), '', True, 'import tensorflow as tf\n'), ((167, 39, 167, 72), 'tensorflow.train.BytesList', 'tf.train.BytesList', (), '', True, 'import tensorflow as tf\n'), ((147, 43, 147, 74), 'tensorflow.train.Int64List', 'tf.train.Int64List', (), '', True, 'import tensorflow as tf\n'), ((149, 43, 149, 76), 'tensorflow.train.Int64List', 'tf.train.Int64List', (), '', True, 'import tensorflow as tf\n'), ((157, 43, 157, 74), 'tensorflow.train.FloatList', 'tf.train.FloatList', (), '', True, 'import tensorflow as tf\n'), ((159, 43, 159, 76), 'tensorflow.train.FloatList', 'tf.train.FloatList', (), '', True, 'import tensorflow as tf\n'), ((248, 21, 248, 49), 'io.open', 'io.open', ({(248, 29, 248, 48): 'paper_json_filename'}, {}), '(paper_json_filename)', False, 'import io\n'), ((249, 29, 249, 49), 'json.load', 'json.load', ({(249, 39, 249, 48): 'json_file'}, {}), '(json_file)', False, 'import json\n'), ((250, 23, 250, 63), 'dateutil.parser.parse', 'parse_date', ({(250, 34, 250, 62): "loaded['DATE_OF_SUBMISSION']"}, {}), "(loaded['DATE_OF_SUBMISSION'])", True, 'from dateutil.parser import parse as parse_date\n')] |
An7ar35/python-app-skeleton-structure | app/packageB/__init__.py | 9060411bd32840c6510ad8fe18dcdc097c07b511 | __all__=['module1'] | [] |
ZakDoesGaming/OregonTrail | lib/shop.py | 90cab35536ac5c6ba9e772ac5c29c914017c9c23 | from pygame import Surface, font
from copy import copy
from random import randint, choice
import string
from lib.transactionButton import TransactionButton
SHOP_PREFIX = ["archer", "baker", "fisher", "miller", "rancher", "robber"]
SHOP_SUFFIX = ["cave", "creek", "desert", "farm", "field", "forest", "hill", "lake", "mountain", "pass", "valley", "woods"]
class Shop():
def __init__(self, name, inventory, priceModifier, groupInventory, groupMoney, itemPrices, position, blitPosition, money, resourcePath):
self.yValue = 40
self.groupInventory = groupInventory
self.groupMoney = groupMoney
self.priceModifier = priceModifier
self.itemPrices = itemPrices
self.inventory = inventory
self.position = position
self.blitPosition = blitPosition
self.resourcePath = resourcePath
self.buyButtonList = []
self.sellButtonList = []
self.xPos = (-self.position * 40) + 1280
self.shopSurface = Surface((500, 300)).convert()
self.sepLine = Surface((self.shopSurface.get_width(), 10)).convert()
self.sepLine.fill((0, 0, 0))
self.invContainer = Surface((self.shopSurface.get_width() - 20,
self.shopSurface.get_height() / 2 - 35)).convert()
self.invContainer.fill((255, 255, 255))
self.titleFont = font.Font("res/fonts/west.ttf", 17)
self.textFont = font.Font("res/fonts/west.ttf", 15)
if (name == ""):
self.name = (choice(SHOP_PREFIX) + "'s " + choice(SHOP_SUFFIX)).capitalize()
else:
self.name = name
if (self.inventory == {}):
inventoryRandom = copy(self.groupInventory)
for key in list(inventoryRandom.keys()):
inventoryRandom[key] = randint(0, 10)
inventoryRandom["Food"] *= 20
self.inventory = inventoryRandom
if (money is None):
self.money = randint(200, 500)
else:
self.name = name
self.render()
def get_surface(self):
self.render()
return self.shopSurface
def update(self, groupInv, groupMoney):
self.groupInventory = groupInv
self.groupMoney = groupMoney
self.render()
def move(self, moveValue):
self.xPos += (2 * moveValue)
self.render()
def render(self):
self.yValue = 40
self.shopSurface.fill((133, 94, 66))
self.shopSurface.blit(self.titleFont.render(self.name + " - $" + str(self.money), 1, (0, 0, 255)), (10, 5))
self.shopSurface.blit(self.invContainer, (10, 25))
self.shopSurface.blit(self.invContainer, (10, self.shopSurface.get_height() / 2 + 30))
self.shopSurface.blit(self.textFont.render("Inventory", 1, (255, 0, 0)), (10, 25))
self.shopSurface.blit(self.textFont.render("Amount", 1, (255, 0, 0)), (130, 25))
self.shopSurface.blit(self.textFont.render("Price", 1, (255, 0, 0)), (200, 25))
for key in list(self.inventory.keys()):
self.shopSurface.blit(self.textFont.render(key + ":", 1, (0, 0, 0)), (10, self.yValue))
self.shopSurface.blit(self.textFont.render(str(self.inventory[key]), 1,
(0, 0, 0)), (150, self.yValue))
self.shopSurface.blit(self.textFont.render("$"+str(self.itemPrices[key] * self.priceModifier), 1,
(0, 0, 0)), (200, self.yValue))
if (len(self.buyButtonList) < len(self.inventory.keys())):
buttonPos = tuple(map(sum, zip(self.blitPosition, (250, self.yValue))))
self.buyButtonList.append(TransactionButton(transaction = "buy",
item = key,
imagePosition = (250, self.yValue),
rectPosition = buttonPos,
resourcePath = self.resourcePath))
self.yValue += 30
for button in self.buyButtonList:
self.shopSurface.blit(button.image, button.imagePosition)
self.shopSurface.blit(self.sepLine, (0, float(self.shopSurface.get_height()) / 2))
self.shopSurface.blit(self.titleFont.render("You - $" + str(self.groupMoney), 1, (0, 0, 255)),
(10, float(self.shopSurface.get_height()) / 2 + 10))
self.shopSurface.blit(self.titleFont.render("Inventory", 1, (255, 0, 0)),
(10, float(self.shopSurface.get_height()) / 2 + 30))
self.shopSurface.blit(self.titleFont.render("Amount", 1, (255, 0, 0)),
(130, float(self.shopSurface.get_height()) / 2 + 30))
self.shopSurface.blit(self.titleFont.render("Price", 1, (255, 0, 0)),
(200, float(self.shopSurface.get_height()) / 2 + 30))
self.yValue = (float(self.shopSurface.get_height()) / 2) + 45
for key in list(self.groupInventory.keys()):
self.shopSurface.blit(self.textFont.render(key + ":", 1, (0, 0, 0)), (10, self.yValue))
self.shopSurface.blit(self.textFont.render(str(self.groupInventory[key]), 1,
(0, 0, 0)), (150, self.yValue))
self.shopSurface.blit(self.textFont.render("$" + str(self.itemPrices[key] * self.priceModifier), 1,
(0, 0, 0)), (200, self.yValue))
if (len(self.sellButtonList) < len(self.inventory.keys())):
buttonPos = tuple(map(sum, zip(self.blitPosition, (250, self.yValue))))
self.sellButtonList.append(TransactionButton(transaction = "sell",
item = key,
imagePosition = (250, self.yValue),
rectPosition = buttonPos,
resourcePath = self.resourcePath))
self.yValue += 30
for button in self.sellButtonList:
self.shopSurface.blit(button.image, button.imagePosition)
| [((33, 19, 33, 54), 'pygame.font.Font', 'font.Font', ({(33, 29, 33, 49): '"""res/fonts/west.ttf"""', (33, 51, 33, 53): '17'}, {}), "('res/fonts/west.ttf', 17)", False, 'from pygame import Surface, font\n'), ((34, 18, 34, 53), 'pygame.font.Font', 'font.Font', ({(34, 28, 34, 48): '"""res/fonts/west.ttf"""', (34, 50, 34, 52): '15'}, {}), "('res/fonts/west.ttf', 15)", False, 'from pygame import Surface, font\n'), ((42, 21, 42, 46), 'copy.copy', 'copy', ({(42, 26, 42, 45): 'self.groupInventory'}, {}), '(self.groupInventory)', False, 'from copy import copy\n'), ((50, 16, 50, 33), 'random.randint', 'randint', ({(50, 24, 50, 27): '200', (50, 29, 50, 32): '500'}, {}), '(200, 500)', False, 'from random import randint, choice\n'), ((26, 21, 26, 40), 'pygame.Surface', 'Surface', ({(26, 29, 26, 39): '(500, 300)'}, {}), '((500, 300))', False, 'from pygame import Surface, font\n'), ((44, 27, 44, 41), 'random.randint', 'randint', ({(44, 35, 44, 36): '0', (44, 38, 44, 40): '10'}, {}), '(0, 10)', False, 'from random import randint, choice\n'), ((86, 30, 90, 48), 'lib.transactionButton.TransactionButton', 'TransactionButton', (), '', False, 'from lib.transactionButton import TransactionButton\n'), ((117, 31, 121, 49), 'lib.transactionButton.TransactionButton', 'TransactionButton', (), '', False, 'from lib.transactionButton import TransactionButton\n'), ((37, 46, 37, 65), 'random.choice', 'choice', ({(37, 53, 37, 64): 'SHOP_SUFFIX'}, {}), '(SHOP_SUFFIX)', False, 'from random import randint, choice\n'), ((37, 16, 37, 35), 'random.choice', 'choice', ({(37, 23, 37, 34): 'SHOP_PREFIX'}, {}), '(SHOP_PREFIX)', False, 'from random import randint, choice\n')] |
ajmal017/amp | core/dataflow/test/test_runners.py | 8de7e3b88be87605ec3bad03c139ac64eb460e5c | import logging
import numpy as np
import core.dataflow as dtf
import helpers.unit_test as hut
_LOG = logging.getLogger(__name__)
class TestRollingFitPredictDagRunner(hut.TestCase):
def test1(self) -> None:
"""
Test the DagRunner using `ArmaReturnsBuilder`
"""
dag_builder = dtf.ArmaReturnsBuilder()
config = dag_builder.get_config_template()
dag_builder.get_dag(config)
#
dag_runner = dtf.RollingFitPredictDagRunner(
config=config,
dag_builder=dag_builder,
start="2010-01-04 09:30",
end="2010-01-04 15:30",
retraining_freq="H",
retraining_lookback=4,
)
result_bundles = list(dag_runner.fit_predict())
np.testing.assert_equal(len(result_bundles), 2)
class TestIncrementalDagRunner(hut.TestCase):
def test1(self) -> None:
"""
Test the DagRunner using `ArmaReturnsBuilder`
"""
dag_builder = dtf.ArmaReturnsBuilder()
config = dag_builder.get_config_template()
# Create DAG and generate fit state.
dag = dag_builder.get_dag(config)
dag.run_leq_node("rets/clip", "fit")
fit_state = dtf.get_fit_state(dag)
#
dag_runner = dtf.IncrementalDagRunner(
config=config,
dag_builder=dag_builder,
start="2010-01-04 15:30",
end="2010-01-04 15:45",
freq="5T",
fit_state=fit_state,
)
result_bundles = list(dag_runner.predict())
self.assertEqual(len(result_bundles), 4)
# Check that dataframe results of `col` do not retroactively change
# over successive prediction steps (which would suggest future
# peeking).
col = "vwap_ret_0_vol_2_hat"
for rb_i, rb_i_next in zip(result_bundles[:-1], result_bundles[1:]):
srs_i = rb_i.result_df[col]
srs_i_next = rb_i_next.result_df[col]
self.assertTrue(srs_i.compare(srs_i_next[:-1]).empty)
| [((8, 7, 8, 34), 'logging.getLogger', 'logging.getLogger', ({(8, 25, 8, 33): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((17, 22, 17, 46), 'core.dataflow.ArmaReturnsBuilder', 'dtf.ArmaReturnsBuilder', ({}, {}), '()', True, 'import core.dataflow as dtf\n'), ((21, 21, 28, 9), 'core.dataflow.RollingFitPredictDagRunner', 'dtf.RollingFitPredictDagRunner', (), '', True, 'import core.dataflow as dtf\n'), ((39, 22, 39, 46), 'core.dataflow.ArmaReturnsBuilder', 'dtf.ArmaReturnsBuilder', ({}, {}), '()', True, 'import core.dataflow as dtf\n'), ((44, 20, 44, 42), 'core.dataflow.get_fit_state', 'dtf.get_fit_state', ({(44, 38, 44, 41): 'dag'}, {}), '(dag)', True, 'import core.dataflow as dtf\n'), ((46, 21, 53, 9), 'core.dataflow.IncrementalDagRunner', 'dtf.IncrementalDagRunner', (), '', True, 'import core.dataflow as dtf\n')] |
hmnk-1967/OCR-Python-Project-CS-BUIC | Main Project/Main_Program.py | 28c72d9913a25655f6183a7b960e527a0432c8e1 | import tkinter.messagebox
from tkinter import *
import tkinter as tk
from tkinter import filedialog
import numpy
import pytesseract #Python wrapper for Google-owned OCR engine known by the name of Tesseract.
import cv2
from PIL import Image, ImageTk
import os
root = tk.Tk()
root.title("Object Character Recognizer")
root.geometry("1280x720")
test_image = None
def browse_image():
fin = filedialog.askopenfilename(initialdir=os.getcwd(), title="Select Image File", filetypes=(("PNG Files", "*.png"), ("JPG Files", "*.jpg"), ("All Files", "*.*")))
global test_image
image = Image.open(fin)
test_image = image
img = ImageTk.PhotoImage(image.resize((650, 400)))
lb = tk.Label(image=img)
lb.place(x=25, y=50)
root.mainloop()
def use_ocr_default():
try:
global test_image
messge = None
#OEM stands for OCR Engine Mode and PSM stands for Page Segmentation Mode.
#OEM defines what kind of OCR engine is to be used (this defines the dataset that would be used to cross-match
#the available data with the testing data).
#PSM defines how Tesseract will treat the image that supposedly contains characters and how it will extract the
#data from the image.
tess = pytesseract.image_to_string(test_image, config='-l eng --oem 1 --psm 3')
label = Label(messge, text='Result:')
label.place(x=850, y=320)
display_message = Text(messge, width=46, height=15)
display_message.insert(END, str(tess))
display_message.config(state=DISABLED)
display_message.delete(0, END)
display_message.place(x=890, y=330)
except: #Print a error message when the user inputs an incompatible image.
tkinter.messagebox.showinfo('Something\'s Wrong!', 'Your picture may not contain English characters or you may have not selected a picture. Please select a picture with detectable English characters.')
def use_ocr_handwriting():
try:
global test_image
opencv_img = numpy.array(test_image)
opencv_img = opencv_img[:, :, ::-1].copy() #This line is used to convert RGB PIL image file to BGR cv2 image file.
blurred_img = cv2.medianBlur(opencv_img, 5)
gray_img = cv2.cvtColor(blurred_img, cv2.COLOR_BGR2GRAY)
thresh, binary = cv2.threshold(gray_img, 122, 255, cv2.THRESH_BINARY)
messge = None
tess = pytesseract.image_to_string(binary, config='-l eng --oem 1 --psm 3')
label = Label(messge, text='Result:')
label.place(x=850, y=320)
display_message = Text(messge, width=46, height=15)
display_message.insert(END, str(tess))
display_message.config(state=DISABLED)
display_message.delete(0, END)
display_message.place(x=890, y=330)
except:
tkinter.messagebox.showinfo('Something\'s Wrong!', 'Your picture may not contain English characters or you may have not selected a picture. Please select a picture with detectable English characters.')
def use_ocr_singletext():
try:
global test_image
messge = None
tess = pytesseract.image_to_string(test_image, config='-l eng --oem 1 --psm 7')
label = Label(messge, text='Result:')
label.place(x=850, y=320)
display_message = Text(messge, width=46, height=15)
display_message.insert(END, str(tess))
display_message.config(state=DISABLED)
display_message.delete(0, END)
display_message.place(x=890, y=330)
except:
tkinter.messagebox.showinfo('Something\'s Wrong!', 'Your picture may not contain English characters or you may have not selected a picture. Please select a picture with detectable English characters.')
w = tk.LabelFrame(root, text="Image:", width=768, height=600)
w.place(x=20, y=10)
w.pack_propagate(0)
w1 = tk.LabelFrame(root, text="Extracted Text:", width=500, height=310)
w1.place(x=800, y=300)
w2 = tk.LabelFrame(root, text="Operations:", width=350, height=280)
w2.place(x=800, y=10)
btn1 = tk.Button(w2, text="Load Image", padx=40, pady=10, command=browse_image)
btn1.place(x=22, y=20)
btn1 = tk.Button(w2, text="Run Handwritten OCR", padx=40, pady=10, command=use_ocr_handwriting)
btn1.place(x=22, y=80)
btn1 = tk.Button(w2, text="Run Default OCR", padx=40, pady=10, command=use_ocr_default)
btn1.place(x=22, y=140)
btn1 = tk.Button(w2, text="Run Single Text OCR", padx=40, pady=10, command=use_ocr_singletext)
btn1.place(x=22, y=200)
root.mainloop()
| [((11, 7, 11, 14), 'tkinter.Tk', 'tk.Tk', ({}, {}), '()', True, 'import tkinter as tk\n'), ((85, 4, 85, 61), 'tkinter.LabelFrame', 'tk.LabelFrame', (), '', True, 'import tkinter as tk\n'), ((88, 5, 88, 71), 'tkinter.LabelFrame', 'tk.LabelFrame', (), '', True, 'import tkinter as tk\n'), ((90, 5, 90, 67), 'tkinter.LabelFrame', 'tk.LabelFrame', (), '', True, 'import tkinter as tk\n'), ((92, 7, 92, 79), 'tkinter.Button', 'tk.Button', (), '', True, 'import tkinter as tk\n'), ((94, 7, 94, 95), 'tkinter.Button', 'tk.Button', (), '', True, 'import tkinter as tk\n'), ((96, 7, 96, 87), 'tkinter.Button', 'tk.Button', (), '', True, 'import tkinter as tk\n'), ((98, 7, 98, 94), 'tkinter.Button', 'tk.Button', (), '', True, 'import tkinter as tk\n'), ((20, 12, 20, 27), 'PIL.Image.open', 'Image.open', ({(20, 23, 20, 26): 'fin'}, {}), '(fin)', False, 'from PIL import Image, ImageTk\n'), ((23, 9, 23, 28), 'tkinter.Label', 'tk.Label', (), '', True, 'import tkinter as tk\n'), ((36, 15, 36, 87), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (), '', False, 'import pytesseract\n'), ((51, 21, 51, 44), 'numpy.array', 'numpy.array', ({(51, 33, 51, 43): 'test_image'}, {}), '(test_image)', False, 'import numpy\n'), ((53, 22, 53, 51), 'cv2.medianBlur', 'cv2.medianBlur', ({(53, 37, 53, 47): 'opencv_img', (53, 49, 53, 50): '5'}, {}), '(opencv_img, 5)', False, 'import cv2\n'), ((54, 19, 54, 64), 'cv2.cvtColor', 'cv2.cvtColor', ({(54, 32, 54, 43): 'blurred_img', (54, 45, 54, 63): 'cv2.COLOR_BGR2GRAY'}, {}), '(blurred_img, cv2.COLOR_BGR2GRAY)', False, 'import cv2\n'), ((55, 25, 55, 77), 'cv2.threshold', 'cv2.threshold', ({(55, 39, 55, 47): 'gray_img', (55, 49, 55, 52): '122', (55, 54, 55, 57): '255', (55, 59, 55, 76): 'cv2.THRESH_BINARY'}, {}), '(gray_img, 122, 255, cv2.THRESH_BINARY)', False, 'import cv2\n'), ((57, 15, 57, 83), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (), '', False, 'import pytesseract\n'), ((73, 15, 73, 87), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (), '', False, 'import pytesseract\n'), ((18, 48, 18, 59), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n')] |
wainshine/tensorflow | third_party/nasm/workspace.bzl | dc7a8dc8546c679b9c7b3df7494ce4506bfc1a6d | """loads the nasm library, used by TF."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
tf_http_archive(
name = "nasm",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/www.nasm.us/pub/nasm/releasebuilds/2.13.03/nasm-2.13.03.tar.bz2",
"http://pkgs.fedoraproject.org/repo/pkgs/nasm/nasm-2.13.03.tar.bz2/sha512/d7a6b4cee8dfd603d8d4c976e5287b5cc542fa0b466ff989b743276a6e28114e64289bf02a7819eca63142a5278aa6eed57773007e5f589e15768e6456a8919d/nasm-2.13.03.tar.bz2",
"http://www.nasm.us/pub/nasm/releasebuilds/2.13.03/nasm-2.13.03.tar.bz2",
],
sha256 = "63ec86477ad3f0f6292325fd89e1d93aea2e2fd490070863f17d48f7cd387011",
strip_prefix = "nasm-2.13.03",
build_file = "//third_party/nasm:nasm.BUILD",
system_build_file = "//third_party/nasm:BUILD.system",
)
| [] |
wence-/libCEED | python/tests/test-1-vector.py | c785ad36304ed34c5edefb75cf1a0fe5445db17b | # Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at
# the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights
# reserved. See files LICENSE and NOTICE for details.
#
# This file is part of CEED, a collection of benchmarks, miniapps, software
# libraries and APIs for efficient high-order finite element and spectral
# element discretizations for exascale applications. For more information and
# source code availability see http://github.com/ceed.
#
# The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
# a collaborative effort of two U.S. Department of Energy organizations (Office
# of Science and the National Nuclear Security Administration) responsible for
# the planning and preparation of a capable exascale ecosystem, including
# software, applications, hardware, advanced system engineering and early
# testbed platforms, in support of the nation's exascale computing imperative.
# @file
# Test Ceed Vector functionality
import os
import libceed
import numpy as np
import check
TOL = libceed.EPSILON * 256
# -------------------------------------------------------------------------------
# Utility
# -------------------------------------------------------------------------------
def check_values(ceed, x, value):
with x.array_read() as b:
for i in range(len(b)):
assert b[i] == value
# -------------------------------------------------------------------------------
# Test creation, setting, reading, restoring, and destroying of a vector
# -------------------------------------------------------------------------------
def test_100(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
with x.array_read() as b:
for i in range(n):
assert b[i] == 10 + i
# -------------------------------------------------------------------------------
# Test setValue
# -------------------------------------------------------------------------------
def test_101(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
value = 1
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
with x.array() as b:
for i in range(len(b)):
assert b[i] == 10 + i
x.set_value(3.0)
check_values(ceed, x, 3.0)
del x
x = ceed.Vector(n)
# Set value before setting or getting the array
x.set_value(5.0)
check_values(ceed, x, 5.0)
# -------------------------------------------------------------------------------
# Test getArrayRead state counter
# -------------------------------------------------------------------------------
def test_102(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
x.set_value(0)
# Two read accesses should not generate an error
a = x.get_array_read()
b = x.get_array_read()
x.restore_array_read()
x.restore_array_read()
# -------------------------------------------------------------------------------
# Test setting one vector from array of another vector
# -------------------------------------------------------------------------------
def test_103(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
y = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
with x.array() as x_array:
y.set_array(x_array, cmode=libceed.USE_POINTER)
with y.array_read() as y_array:
for i in range(n):
assert y_array[i] == 10 + i
# -------------------------------------------------------------------------------
# Test getArray to modify array
# -------------------------------------------------------------------------------
def test_104(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.zeros(n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
with x.array() as b:
b[3] = -3.14
if libceed.lib.CEED_SCALAR_TYPE == libceed.SCALAR_FP32:
assert a[3] == np.float32(-3.14)
else:
assert a[3] == -3.14
# -------------------------------------------------------------------------------
# Test creation, setting, reading, restoring, and destroying of a vector using
# CEED_MEM_DEVICE
# -------------------------------------------------------------------------------
def test_105(ceed_resource):
# Skip test for non-GPU backend
if 'gpu' in ceed_resource:
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
y = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
arr = x.get_array_read(memtype=libceed.MEM_DEVICE)
y.set_array(arr, memtype=libceed.MEM_DEVICE)
x.restore_array_read()
with y.array_read() as b:
for i in range(n):
assert b[i] == 10 + i
# -------------------------------------------------------------------------------
# Test view
# -------------------------------------------------------------------------------
def test_107(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
print(x)
stdout, stderr, ref_stdout = check.output(capsys)
assert not stderr
assert stdout == ref_stdout
# -------------------------------------------------------------------------------
# Test norms
# -------------------------------------------------------------------------------
def test_108(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(0, n, dtype=ceed.scalar_type())
for i in range(n):
if (i % 2 == 0):
a[i] *= -1
x.set_array(a, cmode=libceed.USE_POINTER)
norm = x.norm(normtype=libceed.NORM_1)
assert abs(norm - 45.) < TOL
norm = x.norm()
assert abs(norm - np.sqrt(285.)) < TOL
norm = x.norm(normtype=libceed.NORM_MAX)
assert abs(norm - 9.) < TOL
# -------------------------------------------------------------------------------
# Test taking the reciprocal of a vector
# -------------------------------------------------------------------------------
def test_119(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
x.reciprocal()
with x.array_read() as b:
for i in range(n):
assert abs(b[i] - 1. / (10 + i)) < TOL
# -------------------------------------------------------------------------------
# Test AXPY
# -------------------------------------------------------------------------------
def test_121(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
y = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.COPY_VALUES)
y.set_array(a, cmode=libceed.COPY_VALUES)
y.axpy(-0.5, x)
with y.array() as b:
assert np.allclose(.5 * a, b)
# -------------------------------------------------------------------------------
# Test pointwise multiplication
# -------------------------------------------------------------------------------
def test_122(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
w = ceed.Vector(n)
x = ceed.Vector(n)
y = ceed.Vector(n)
a = np.arange(0, n, dtype=ceed.scalar_type())
w.set_array(a, cmode=libceed.COPY_VALUES)
x.set_array(a, cmode=libceed.COPY_VALUES)
y.set_array(a, cmode=libceed.COPY_VALUES)
w.pointwise_mult(x, y)
with w.array() as b:
for i in range(len(b)):
assert abs(b[i] - i * i) < 1e-14
w.pointwise_mult(w, y)
with w.array() as b:
for i in range(len(b)):
assert abs(b[i] - i * i * i) < 1e-14
w.pointwise_mult(x, w)
with w.array() as b:
for i in range(len(b)):
assert abs(b[i] - i * i * i * i) < 1e-14
y.pointwise_mult(y, y)
with y.array() as b:
for i in range(len(b)):
assert abs(b[i] - i * i) < 1e-14
# -------------------------------------------------------------------------------
# Test Scale
# -------------------------------------------------------------------------------
def test_123(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.COPY_VALUES)
x.scale(-0.5)
with x.array() as b:
assert np.allclose(-.5 * a, b)
# -------------------------------------------------------------------------------
# Test getArrayWrite to modify array
# -------------------------------------------------------------------------------
def test_124(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
with x.array_write() as a:
for i in range(len(a)):
a[i] = 3 * i
with x.array_read() as a:
for i in range(len(a)):
assert a[i] == 3 * i
# -------------------------------------------------------------------------------
# Test modification of reshaped array
# -------------------------------------------------------------------------------
def test_199(ceed_resource):
"""Modification of reshaped array"""
ceed = libceed.Ceed(ceed_resource)
vec = ceed.Vector(12)
vec.set_value(0.0)
with vec.array(4, 3) as x:
x[...] = np.eye(4, 3)
with vec.array_read(3, 4) as x:
assert np.all(x == np.eye(4, 3).reshape(3, 4))
# -------------------------------------------------------------------------------
| [((43, 11, 43, 38), 'libceed.Ceed', 'libceed.Ceed', ({(43, 24, 43, 37): 'ceed_resource'}, {}), '(ceed_resource)', False, 'import libceed\n'), ((61, 11, 61, 38), 'libceed.Ceed', 'libceed.Ceed', ({(61, 24, 61, 37): 'ceed_resource'}, {}), '(ceed_resource)', False, 'import libceed\n'), ((87, 11, 87, 38), 'libceed.Ceed', 'libceed.Ceed', ({(87, 24, 87, 37): 'ceed_resource'}, {}), '(ceed_resource)', False, 'import libceed\n'), ((106, 11, 106, 38), 'libceed.Ceed', 'libceed.Ceed', ({(106, 24, 106, 37): 'ceed_resource'}, {}), '(ceed_resource)', False, 'import libceed\n'), ((129, 11, 129, 38), 'libceed.Ceed', 'libceed.Ceed', ({(129, 24, 129, 37): 'ceed_resource'}, {}), '(ceed_resource)', False, 'import libceed\n'), ((177, 11, 177, 38), 'libceed.Ceed', 'libceed.Ceed', ({(177, 24, 177, 37): 'ceed_resource'}, {}), '(ceed_resource)', False, 'import libceed\n'), ((187, 33, 187, 53), 'check.output', 'check.output', ({(187, 46, 187, 52): 'capsys'}, {}), '(capsys)', False, 'import check\n'), ((197, 11, 197, 38), 'libceed.Ceed', 'libceed.Ceed', ({(197, 24, 197, 37): 'ceed_resource'}, {}), '(ceed_resource)', False, 'import libceed\n'), ((226, 11, 226, 38), 'libceed.Ceed', 'libceed.Ceed', ({(226, 24, 226, 37): 'ceed_resource'}, {}), '(ceed_resource)', False, 'import libceed\n'), ((245, 11, 245, 38), 'libceed.Ceed', 'libceed.Ceed', ({(245, 24, 245, 37): 'ceed_resource'}, {}), '(ceed_resource)', False, 'import libceed\n'), ((265, 11, 265, 38), 'libceed.Ceed', 'libceed.Ceed', ({(265, 24, 265, 37): 'ceed_resource'}, {}), '(ceed_resource)', False, 'import libceed\n'), ((303, 11, 303, 38), 'libceed.Ceed', 'libceed.Ceed', ({(303, 24, 303, 37): 'ceed_resource'}, {}), '(ceed_resource)', False, 'import libceed\n'), ((321, 11, 321, 38), 'libceed.Ceed', 'libceed.Ceed', ({(321, 24, 321, 37): 'ceed_resource'}, {}), '(ceed_resource)', False, 'import libceed\n'), ((342, 11, 342, 38), 'libceed.Ceed', 'libceed.Ceed', ({(342, 24, 342, 37): 'ceed_resource'}, {}), '(ceed_resource)', False, 'import libceed\n'), ((154, 15, 154, 42), 'libceed.Ceed', 'libceed.Ceed', ({(154, 28, 154, 41): 'ceed_resource'}, {}), '(ceed_resource)', False, 'import libceed\n'), ((257, 15, 257, 37), 'numpy.allclose', 'np.allclose', ({(257, 27, 257, 33): '(0.5 * a)', (257, 35, 257, 36): 'b'}, {}), '(0.5 * a, b)', True, 'import numpy as np\n'), ((313, 15, 313, 38), 'numpy.allclose', 'np.allclose', ({(313, 27, 313, 34): '(-0.5 * a)', (313, 36, 313, 37): 'b'}, {}), '(-0.5 * a, b)', True, 'import numpy as np\n'), ((347, 17, 347, 29), 'numpy.eye', 'np.eye', ({(347, 24, 347, 25): '4', (347, 27, 347, 28): '3'}, {}), '(4, 3)', True, 'import numpy as np\n'), ((141, 23, 141, 40), 'numpy.float32', 'np.float32', ({(141, 34, 141, 39): '(-3.14)'}, {}), '(-3.14)', True, 'import numpy as np\n'), ((214, 22, 214, 35), 'numpy.sqrt', 'np.sqrt', ({(214, 30, 214, 34): '(285.0)'}, {}), '(285.0)', True, 'import numpy as np\n'), ((350, 27, 350, 39), 'numpy.eye', 'np.eye', ({(350, 34, 350, 35): '(4)', (350, 37, 350, 38): '(3)'}, {}), '(4, 3)', True, 'import numpy as np\n')] |
aperezpredictia/ESMValCore | esmvalcore/cmor/_fixes/cmip6/cesm2.py | d5bf3f459ff3a43e780d75d57b63b88b6cc8c4f2 | """Fixes for CESM2 model."""
from ..fix import Fix
from ..shared import (add_scalar_depth_coord, add_scalar_height_coord,
add_scalar_typeland_coord, add_scalar_typesea_coord)
class Fgco2(Fix):
"""Fixes for fgco2."""
def fix_metadata(self, cubes):
"""Add depth (0m) coordinate.
Parameters
----------
cube : iris.cube.CubeList
Returns
-------
iris.cube.Cube
"""
cube = self.get_cube_from_list(cubes)
add_scalar_depth_coord(cube)
return cubes
class Tas(Fix):
"""Fixes for tas."""
def fix_metadata(self, cubes):
"""Add height (2m) coordinate.
Parameters
----------
cube : iris.cube.CubeList
Returns
-------
iris.cube.Cube
"""
cube = self.get_cube_from_list(cubes)
add_scalar_height_coord(cube)
return cubes
class Sftlf(Fix):
"""Fixes for sftlf."""
def fix_metadata(self, cubes):
"""Add typeland coordinate.
Parameters
----------
cube : iris.cube.CubeList
Returns
-------
iris.cube.Cube
"""
cube = self.get_cube_from_list(cubes)
add_scalar_typeland_coord(cube)
return cubes
class Sftof(Fix):
"""Fixes for sftof."""
def fix_metadata(self, cubes):
"""Add typesea coordinate.
Parameters
----------
cube : iris.cube.CubeList
Returns
-------
iris.cube.Cube
"""
cube = self.get_cube_from_list(cubes)
add_scalar_typesea_coord(cube)
return cubes
| [] |
vitay/YouTubeFacesDB | examples/GenerateSubset.py | e7225e8d775ad64889fbee57a4452a25573a0360 | from YouTubeFacesDB import generate_ytf_database
###############################################################################
# Create the dataset
###############################################################################
generate_ytf_database(
directory= '../data',#'/scratch/vitay/Datasets/YouTubeFaces', # Location of the YTF dataset
filename='ytfdb.h5', # Name of the HDF5 file to write to
labels=10, # Number of labels to randomly select
max_number=-1, # Maximum number of images to use
size=(100, 100), # Size of the images
color=False, # Black and white
bw_first=True, # Final shape is (1, w, h)
cropped=True # The original images are cropped to the faces
) | [((6, 0, 15, 1), 'YouTubeFacesDB.generate_ytf_database', 'generate_ytf_database', (), '', False, 'from YouTubeFacesDB import generate_ytf_database\n')] |
opennode/nodeconductor-assembly-waldur | src/waldur_mastermind/billing/tests/test_price_current.py | cad9966389dc9b52b13d2301940c99cf4b243900 | from freezegun import freeze_time
from rest_framework import test
from waldur_mastermind.billing.tests.utils import get_financial_report_url
from waldur_mastermind.invoices import models as invoice_models
from waldur_mastermind.invoices.tests import factories as invoice_factories
from waldur_mastermind.invoices.tests import fixtures as invoice_fixtures
@freeze_time('2017-01-10')
class PriceCurrentTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = invoice_fixtures.InvoiceFixture()
invoice_factories.InvoiceItemFactory(
invoice=self.fixture.invoice,
project=self.fixture.project,
unit=invoice_models.InvoiceItem.Units.PER_MONTH,
unit_price=100,
quantity=1,
)
invoice_factories.InvoiceItemFactory(
invoice=self.fixture.invoice,
project=self.fixture.project,
unit=invoice_models.InvoiceItem.Units.PER_DAY,
unit_price=3,
quantity=31,
)
def test_current_price(self):
self.client.force_authenticate(self.fixture.staff)
url = get_financial_report_url(self.fixture.project.customer)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['billing_price_estimate']['current'], 100 + 9 * 3)
diff = (
data['billing_price_estimate']['total']
- data['billing_price_estimate']['current']
)
self.assertEqual(diff, 22 * 3)
| [((10, 1, 10, 26), 'freezegun.freeze_time', 'freeze_time', ({(10, 13, 10, 25): '"""2017-01-10"""'}, {}), "('2017-01-10')", False, 'from freezegun import freeze_time\n'), ((13, 23, 13, 56), 'waldur_mastermind.invoices.tests.fixtures.InvoiceFixture', 'invoice_fixtures.InvoiceFixture', ({}, {}), '()', True, 'from waldur_mastermind.invoices.tests import fixtures as invoice_fixtures\n'), ((15, 8, 21, 9), 'waldur_mastermind.invoices.tests.factories.InvoiceItemFactory', 'invoice_factories.InvoiceItemFactory', (), '', True, 'from waldur_mastermind.invoices.tests import factories as invoice_factories\n'), ((22, 8, 28, 9), 'waldur_mastermind.invoices.tests.factories.InvoiceItemFactory', 'invoice_factories.InvoiceItemFactory', (), '', True, 'from waldur_mastermind.invoices.tests import factories as invoice_factories\n'), ((32, 14, 32, 69), 'waldur_mastermind.billing.tests.utils.get_financial_report_url', 'get_financial_report_url', ({(32, 39, 32, 68): 'self.fixture.project.customer'}, {}), '(self.fixture.project.customer)', False, 'from waldur_mastermind.billing.tests.utils import get_financial_report_url\n')] |
ejfitzgerald/agents-aea | tests/test_cli/test_utils/test_utils.py | 6411fcba8af2cdf55a3005939ae8129df92e8c3e | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This test module contains the tests for aea.cli.utils module."""
from builtins import FileNotFoundError
from typing import cast
from unittest import TestCase, mock
from click import BadParameter, ClickException
from jsonschema import ValidationError
from yaml import YAMLError
from aea.cli.utils.click_utils import AEAJsonPathType, PublicIdParameter
from aea.cli.utils.config import (
_init_cli_config,
get_or_create_cli_config,
update_cli_config,
)
from aea.cli.utils.context import Context
from aea.cli.utils.decorators import _validate_config_consistency, clean_after
from aea.cli.utils.formatting import format_items
from aea.cli.utils.generic import is_readme_present
from aea.cli.utils.package_utils import (
find_item_in_distribution,
find_item_locally,
is_fingerprint_correct,
try_get_balance,
try_get_item_source_path,
try_get_item_target_path,
validate_author_name,
validate_package_name,
)
from tests.conftest import FETCHAI
from tests.test_cli.tools_for_testing import (
ConfigLoaderMock,
ContextMock,
PublicIdMock,
StopTest,
raise_stoptest,
)
AUTHOR = "author"
class FormatItemsTestCase(TestCase):
"""Test case for format_items method."""
def testformat_items_positive(self):
"""Test format_items positive result."""
items = [
{
"public_id": "author/name:version",
"name": "obj-name",
"description": "Some description",
"author": "author",
"version": "1.0",
}
]
result = format_items(items)
expected_result = (
"------------------------------\n"
"Public ID: author/name:version\n"
"Name: obj-name\n"
"Description: Some description\n"
"Author: author\n"
"Version: 1.0\n"
"------------------------------\n"
)
self.assertEqual(result, expected_result)
@mock.patch("aea.cli.utils.package_utils.os.path.join", return_value="some-path")
class TryGetItemSourcePathTestCase(TestCase):
"""Test case for try_get_item_source_path method."""
@mock.patch("aea.cli.utils.package_utils.os.path.exists", return_value=True)
def test_get_item_source_path_positive(self, exists_mock, join_mock):
"""Test for get_item_source_path positive result."""
result = try_get_item_source_path("cwd", AUTHOR, "skills", "skill-name")
expected_result = "some-path"
self.assertEqual(result, expected_result)
join_mock.assert_called_once_with("cwd", AUTHOR, "skills", "skill-name")
exists_mock.assert_called_once_with("some-path")
result = try_get_item_source_path("cwd", None, "skills", "skill-name")
self.assertEqual(result, expected_result)
@mock.patch("aea.cli.utils.package_utils.os.path.exists", return_value=False)
def test_get_item_source_path_not_exists(self, exists_mock, join_mock):
"""Test for get_item_source_path item already exists."""
with self.assertRaises(ClickException):
try_get_item_source_path("cwd", AUTHOR, "skills", "skill-name")
@mock.patch("aea.cli.utils.package_utils.os.path.join", return_value="some-path")
class TryGetItemTargetPathTestCase(TestCase):
"""Test case for try_get_item_target_path method."""
@mock.patch("aea.cli.utils.package_utils.os.path.exists", return_value=False)
def test_get_item_target_path_positive(self, exists_mock, join_mock):
"""Test for get_item_source_path positive result."""
result = try_get_item_target_path("packages", AUTHOR, "skills", "skill-name")
expected_result = "some-path"
self.assertEqual(result, expected_result)
join_mock.assert_called_once_with("packages", AUTHOR, "skills", "skill-name")
exists_mock.assert_called_once_with("some-path")
@mock.patch("aea.cli.utils.package_utils.os.path.exists", return_value=True)
def test_get_item_target_path_already_exists(self, exists_mock, join_mock):
"""Test for get_item_target_path item already exists."""
with self.assertRaises(ClickException):
try_get_item_target_path("skills", AUTHOR, "skill-name", "packages_path")
class PublicIdParameterTestCase(TestCase):
"""Test case for PublicIdParameter class."""
def test_get_metavar_positive(self):
"""Test for get_metavar positive result."""
result = PublicIdParameter.get_metavar("obj", "param")
expected_result = "PUBLIC_ID"
self.assertEqual(result, expected_result)
@mock.patch("aea.cli.utils.config.os.path.dirname", return_value="dir-name")
@mock.patch("aea.cli.utils.config.os.path.exists", return_value=False)
@mock.patch("aea.cli.utils.config.os.makedirs")
@mock.patch("builtins.open")
class InitConfigFolderTestCase(TestCase):
"""Test case for _init_cli_config method."""
def test_init_cli_config_positive(
self, open_mock, makedirs_mock, exists_mock, dirname_mock
):
"""Test for _init_cli_config method positive result."""
_init_cli_config()
dirname_mock.assert_called_once()
exists_mock.assert_called_once_with("dir-name")
makedirs_mock.assert_called_once_with("dir-name")
@mock.patch("aea.cli.utils.config.get_or_create_cli_config")
@mock.patch("aea.cli.utils.generic.yaml.dump")
@mock.patch("builtins.open", mock.mock_open())
class UpdateCLIConfigTestCase(TestCase):
"""Test case for update_cli_config method."""
def testupdate_cli_config_positive(self, dump_mock, icf_mock):
"""Test for update_cli_config method positive result."""
update_cli_config({"some": "config"})
icf_mock.assert_called_once()
dump_mock.assert_called_once()
def _raise_yamlerror(*args):
raise YAMLError()
def _raise_file_not_found_error(*args):
raise FileNotFoundError()
@mock.patch("builtins.open", mock.mock_open())
class GetOrCreateCLIConfigTestCase(TestCase):
"""Test case for read_cli_config method."""
@mock.patch(
"aea.cli.utils.generic.yaml.safe_load", return_value={"correct": "output"}
)
def testget_or_create_cli_config_positive(self, safe_load_mock):
"""Test for get_or_create_cli_config method positive result."""
result = get_or_create_cli_config()
expected_result = {"correct": "output"}
self.assertEqual(result, expected_result)
safe_load_mock.assert_called_once()
@mock.patch("aea.cli.utils.generic.yaml.safe_load", _raise_yamlerror)
def testget_or_create_cli_config_bad_yaml(self):
"""Test for rget_or_create_cli_config method bad yaml behavior."""
with self.assertRaises(ClickException):
get_or_create_cli_config()
class CleanAfterTestCase(TestCase):
"""Test case for clean_after decorator method."""
@mock.patch("aea.cli.utils.decorators.os.path.exists", return_value=True)
@mock.patch("aea.cli.utils.decorators._cast_ctx", lambda x: x)
@mock.patch("aea.cli.utils.decorators.shutil.rmtree")
def test_clean_after_positive(self, rmtree_mock, *mocks):
"""Test clean_after decorator method for positive result."""
@clean_after
def func(click_context):
ctx = cast(Context, click_context.obj)
ctx.clean_paths.append("clean/path")
raise ClickException("Message")
with self.assertRaises(ClickException):
func(ContextMock())
rmtree_mock.assert_called_once_with("clean/path")
@mock.patch("aea.cli.utils.package_utils.click.echo", raise_stoptest)
class ValidateAuthorNameTestCase(TestCase):
"""Test case for validate_author_name method."""
@mock.patch(
"aea.cli.utils.package_utils.click.prompt", return_value="correct_author"
)
def test_validate_author_name_positive(self, prompt_mock):
"""Test validate_author_name for positive result."""
author = "valid_author"
result = validate_author_name(author=author)
self.assertEqual(result, author)
result = validate_author_name()
self.assertEqual(result, "correct_author")
prompt_mock.assert_called_once()
@mock.patch(
"aea.cli.utils.package_utils.click.prompt", return_value="inv@l1d_@uth&r"
)
def test_validate_author_name_negative(self, prompt_mock):
"""Test validate_author_name for negative result."""
with self.assertRaises(StopTest):
validate_author_name()
prompt_mock.return_value = "skills"
with self.assertRaises(StopTest):
validate_author_name()
class ValidatePackageNameTestCase(TestCase):
"""Test case for validate_package_name method."""
def test_validate_package_name_positive(self):
"""Test validate_package_name for positive result."""
validate_package_name("correct_name")
def test_validate_package_name_negative(self):
"""Test validate_package_name for negative result."""
with self.assertRaises(BadParameter):
validate_package_name("incorrect-name")
def _raise_validation_error(*args, **kwargs):
raise ValidationError("Message.")
class FindItemLocallyTestCase(TestCase):
"""Test case for find_item_locally method."""
@mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=True)
@mock.patch(
"aea.cli.utils.package_utils.ConfigLoader.from_configuration_type",
_raise_validation_error,
)
def test_find_item_locally_bad_config(self, *mocks):
"""Test find_item_locally for bad config result."""
public_id = PublicIdMock.from_str("fetchai/echo:0.5.0")
with self.assertRaises(ClickException) as cm:
find_item_locally(ContextMock(), "skill", public_id)
self.assertIn("configuration file not valid", cm.exception.message)
@mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=True)
@mock.patch("aea.cli.utils.package_utils.Path.open", mock.mock_open())
@mock.patch(
"aea.cli.utils.package_utils.ConfigLoader.from_configuration_type",
return_value=ConfigLoaderMock(),
)
def test_find_item_locally_cant_find(self, from_conftype_mock, *mocks):
"""Test find_item_locally for can't find result."""
public_id = PublicIdMock.from_str("fetchai/echo:0.5.0")
with self.assertRaises(ClickException) as cm:
find_item_locally(ContextMock(), "skill", public_id)
self.assertEqual(
cm.exception.message, "Cannot find skill with author and version specified."
)
class FindItemInDistributionTestCase(TestCase):
"""Test case for find_item_in_distribution method."""
@mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=True)
@mock.patch(
"aea.cli.utils.package_utils.ConfigLoader.from_configuration_type",
_raise_validation_error,
)
def testfind_item_in_distribution_bad_config(self, *mocks):
"""Test find_item_in_distribution for bad config result."""
public_id = PublicIdMock.from_str("fetchai/echo:0.5.0")
with self.assertRaises(ClickException) as cm:
find_item_in_distribution(ContextMock(), "skill", public_id)
self.assertIn("configuration file not valid", cm.exception.message)
@mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=False)
def testfind_item_in_distribution_not_found(self, *mocks):
"""Test find_item_in_distribution for not found result."""
public_id = PublicIdMock.from_str("fetchai/echo:0.5.0")
with self.assertRaises(ClickException) as cm:
find_item_in_distribution(ContextMock(), "skill", public_id)
self.assertIn("Cannot find skill", cm.exception.message)
@mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=True)
@mock.patch("aea.cli.utils.package_utils.Path.open", mock.mock_open())
@mock.patch(
"aea.cli.utils.package_utils.ConfigLoader.from_configuration_type",
return_value=ConfigLoaderMock(),
)
def testfind_item_in_distribution_cant_find(self, from_conftype_mock, *mocks):
"""Test find_item_locally for can't find result."""
public_id = PublicIdMock.from_str("fetchai/echo:0.5.0")
with self.assertRaises(ClickException) as cm:
find_item_in_distribution(ContextMock(), "skill", public_id)
self.assertEqual(
cm.exception.message, "Cannot find skill with author and version specified."
)
class ValidateConfigConsistencyTestCase(TestCase):
"""Test case for _validate_config_consistency method."""
@mock.patch("aea.cli.utils.config.Path.exists", _raise_validation_error)
def test__validate_config_consistency_cant_find(self, *mocks):
"""Test _validate_config_consistency can't find result"""
with self.assertRaises(ValueError) as cm:
_validate_config_consistency(ContextMock(protocols=["some"]))
self.assertIn("Cannot find", str(cm.exception))
@mock.patch(
"aea.cli.utils.package_utils._compute_fingerprint",
return_value={"correct": "fingerprint"},
)
class IsFingerprintCorrectTestCase(TestCase):
"""Test case for adding skill with invalid fingerprint."""
def test_is_fingerprint_correct_positive(self, *mocks):
"""Test is_fingerprint_correct method for positive result."""
item_config = mock.Mock()
item_config.fingerprint = {"correct": "fingerprint"}
item_config.fingerprint_ignore_patterns = []
result = is_fingerprint_correct("package_path", item_config)
self.assertTrue(result)
def test_is_fingerprint_correct_negative(self, *mocks):
"""Test is_fingerprint_correct method for negative result."""
item_config = mock.Mock()
item_config.fingerprint = {"incorrect": "fingerprint"}
item_config.fingerprint_ignore_patterns = []
package_path = "package_dir"
result = is_fingerprint_correct(package_path, item_config)
self.assertFalse(result)
@mock.patch("aea.cli.config.click.ParamType")
class AEAJsonPathTypeTestCase(TestCase):
"""Test case for AEAJsonPathType class."""
@mock.patch("aea.cli.utils.click_utils.Path.exists", return_value=True)
def test_convert_root_vendor_positive(self, *mocks):
"""Test for convert method with root "vendor" positive result."""
value = "vendor.author.protocols.package_name.attribute_name"
ctx_mock = ContextMock()
ctx_mock.obj = mock.Mock()
ctx_mock.obj.set_config = mock.Mock()
obj = AEAJsonPathType()
obj.convert(value, "param", ctx_mock)
@mock.patch("aea.cli.utils.click_utils.Path.exists", return_value=False)
def test_convert_root_vendor_path_not_exists(self, *mocks):
"""Test for convert method with root "vendor" path not exists."""
value = "vendor.author.protocols.package_name.attribute_name"
obj = AEAJsonPathType()
with self.assertRaises(BadParameter):
obj.convert(value, "param", "ctx")
@mock.patch("aea.cli.utils.package_utils.LedgerApis", mock.MagicMock())
class TryGetBalanceTestCase(TestCase):
"""Test case for try_get_balance method."""
def test_try_get_balance_positive(self):
"""Test for try_get_balance method positive result."""
agent_config = mock.Mock()
agent_config.default_ledger_config = FETCHAI
wallet_mock = mock.Mock()
wallet_mock.addresses = {FETCHAI: "some-adress"}
try_get_balance(agent_config, wallet_mock, FETCHAI)
@mock.patch("aea.cli.utils.generic.os.path.exists", return_value=True)
class IsReadmePresentTestCase(TestCase):
"""Test case for is_readme_present method."""
def test_is_readme_present_positive(self, *mocks):
"""Test is_readme_present for positive result."""
self.assertTrue(is_readme_present("readme/path"))
| [((92, 1, 92, 81), 'unittest.mock.patch', 'mock.patch', (), '', False, 'from unittest import TestCase, mock\n'), ((115, 1, 115, 81), 'unittest.mock.patch', 'mock.patch', (), '', False, 'from unittest import TestCase, mock\n'), ((145, 1, 145, 76), 'unittest.mock.patch', 'mock.patch', (), '', False, 'from unittest import TestCase, mock\n'), ((146, 1, 146, 70), 'unittest.mock.patch', 'mock.patch', (), '', False, 'from unittest import TestCase, mock\n'), ((147, 1, 147, 47), 'unittest.mock.patch', 'mock.patch', ({(147, 12, 147, 46): '"""aea.cli.utils.config.os.makedirs"""'}, {}), "('aea.cli.utils.config.os.makedirs')", False, 'from unittest import TestCase, mock\n'), ((148, 1, 148, 28), 'unittest.mock.patch', 'mock.patch', ({(148, 12, 148, 27): '"""builtins.open"""'}, {}), "('builtins.open')", False, 'from unittest import TestCase, mock\n'), ((162, 1, 162, 60), 'unittest.mock.patch', 'mock.patch', ({(162, 12, 162, 59): '"""aea.cli.utils.config.get_or_create_cli_config"""'}, {}), "('aea.cli.utils.config.get_or_create_cli_config')", False, 'from unittest import TestCase, mock\n'), ((163, 1, 163, 46), 'unittest.mock.patch', 'mock.patch', ({(163, 12, 163, 45): '"""aea.cli.utils.generic.yaml.dump"""'}, {}), "('aea.cli.utils.generic.yaml.dump')", False, 'from unittest import TestCase, mock\n'), ((224, 1, 224, 69), 'unittest.mock.patch', 'mock.patch', ({(224, 12, 224, 52): '"""aea.cli.utils.package_utils.click.echo"""', (224, 54, 224, 68): 'raise_stoptest'}, {}), "('aea.cli.utils.package_utils.click.echo', raise_stoptest)", False, 'from unittest import TestCase, mock\n'), ((358, 1, 361, 1), 'unittest.mock.patch', 'mock.patch', (), '', False, 'from unittest import TestCase, mock\n'), ((383, 1, 383, 45), 'unittest.mock.patch', 'mock.patch', ({(383, 12, 383, 44): '"""aea.cli.config.click.ParamType"""'}, {}), "('aea.cli.config.click.ParamType')", False, 'from unittest import TestCase, mock\n'), ((420, 1, 420, 70), 'unittest.mock.patch', 'mock.patch', (), '', False, 'from unittest import TestCase, mock\n'), ((96, 5, 96, 80), 'unittest.mock.patch', 'mock.patch', (), '', False, 'from unittest import TestCase, mock\n'), ((108, 5, 108, 81), 'unittest.mock.patch', 'mock.patch', (), '', False, 'from unittest import TestCase, mock\n'), ((119, 5, 119, 81), 'unittest.mock.patch', 'mock.patch', (), '', False, 'from unittest import TestCase, mock\n'), ((128, 5, 128, 80), 'unittest.mock.patch', 'mock.patch', (), '', False, 'from unittest import TestCase, mock\n'), ((164, 29, 164, 45), 'unittest.mock.mock_open', 'mock.mock_open', ({}, {}), '()', False, 'from unittest import TestCase, mock\n'), ((176, 10, 176, 21), 'yaml.YAMLError', 'YAMLError', ({}, {}), '()', False, 'from yaml import YAMLError\n'), ((180, 10, 180, 29), 'builtins.FileNotFoundError', 'FileNotFoundError', ({}, {}), '()', False, 'from builtins import FileNotFoundError\n'), ((187, 5, 189, 5), 'unittest.mock.patch', 'mock.patch', (), '', False, 'from unittest import TestCase, mock\n'), ((197, 5, 197, 73), 'unittest.mock.patch', 'mock.patch', ({(197, 16, 197, 54): '"""aea.cli.utils.generic.yaml.safe_load"""', (197, 56, 197, 72): '_raise_yamlerror'}, {}), "('aea.cli.utils.generic.yaml.safe_load', _raise_yamlerror)", False, 'from unittest import TestCase, mock\n'), ((183, 29, 183, 45), 'unittest.mock.mock_open', 'mock.mock_open', ({}, {}), '()', False, 'from unittest import TestCase, mock\n'), ((207, 5, 207, 77), 'unittest.mock.patch', 'mock.patch', (), '', False, 'from unittest import TestCase, mock\n'), ((208, 5, 208, 66), 'unittest.mock.patch', 'mock.patch', ({(208, 16, 208, 52): '"""aea.cli.utils.decorators._cast_ctx"""', (208, 54, 208, 65): '(lambda x: x)'}, {}), "('aea.cli.utils.decorators._cast_ctx', lambda x: x)", False, 'from unittest import TestCase, mock\n'), ((209, 5, 209, 57), 'unittest.mock.patch', 'mock.patch', ({(209, 16, 209, 56): '"""aea.cli.utils.decorators.shutil.rmtree"""'}, {}), "('aea.cli.utils.decorators.shutil.rmtree')", False, 'from unittest import TestCase, mock\n'), ((228, 5, 230, 5), 'unittest.mock.patch', 'mock.patch', (), '', False, 'from unittest import TestCase, mock\n'), ((241, 5, 243, 5), 'unittest.mock.patch', 'mock.patch', (), '', False, 'from unittest import TestCase, mock\n'), ((268, 10, 268, 37), 'jsonschema.ValidationError', 'ValidationError', ({(268, 26, 268, 36): '"""Message."""'}, {}), "('Message.')", False, 'from jsonschema import ValidationError\n'), ((274, 5, 274, 77), 'unittest.mock.patch', 'mock.patch', (), '', False, 'from unittest import TestCase, mock\n'), ((275, 5, 278, 5), 'unittest.mock.patch', 'mock.patch', ({(276, 8, 276, 74): '"""aea.cli.utils.package_utils.ConfigLoader.from_configuration_type"""', (277, 8, 277, 31): '_raise_validation_error'}, {}), "('aea.cli.utils.package_utils.ConfigLoader.from_configuration_type',\n _raise_validation_error)", False, 'from unittest import TestCase, mock\n'), ((287, 5, 287, 77), 'unittest.mock.patch', 'mock.patch', (), '', False, 'from unittest import TestCase, mock\n'), ((307, 5, 307, 77), 'unittest.mock.patch', 'mock.patch', (), '', False, 'from unittest import TestCase, mock\n'), ((308, 5, 311, 5), 'unittest.mock.patch', 'mock.patch', ({(309, 8, 309, 74): '"""aea.cli.utils.package_utils.ConfigLoader.from_configuration_type"""', (310, 8, 310, 31): '_raise_validation_error'}, {}), "('aea.cli.utils.package_utils.ConfigLoader.from_configuration_type',\n _raise_validation_error)", False, 'from unittest import TestCase, mock\n'), ((320, 5, 320, 78), 'unittest.mock.patch', 'mock.patch', (), '', False, 'from unittest import TestCase, mock\n'), ((329, 5, 329, 77), 'unittest.mock.patch', 'mock.patch', (), '', False, 'from unittest import TestCase, mock\n'), ((349, 5, 349, 76), 'unittest.mock.patch', 'mock.patch', ({(349, 16, 349, 50): '"""aea.cli.utils.config.Path.exists"""', (349, 52, 349, 75): '_raise_validation_error'}, {}), "('aea.cli.utils.config.Path.exists', _raise_validation_error)", False, 'from unittest import TestCase, mock\n'), ((387, 5, 387, 75), 'unittest.mock.patch', 'mock.patch', (), '', False, 'from unittest import TestCase, mock\n'), ((397, 5, 397, 76), 'unittest.mock.patch', 'mock.patch', (), '', False, 'from unittest import TestCase, mock\n'), ((406, 54, 406, 70), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import TestCase, mock\n'), ((79, 17, 79, 36), 'aea.cli.utils.formatting.format_items', 'format_items', ({(79, 30, 79, 35): 'items'}, {}), '(items)', False, 'from aea.cli.utils.formatting import format_items\n'), ((99, 17, 99, 80), 'aea.cli.utils.package_utils.try_get_item_source_path', 'try_get_item_source_path', ({(99, 42, 99, 47): '"""cwd"""', (99, 49, 99, 55): 'AUTHOR', (99, 57, 99, 65): '"""skills"""', (99, 67, 99, 79): '"""skill-name"""'}, {}), "('cwd', AUTHOR, 'skills', 'skill-name')", False, 'from aea.cli.utils.package_utils import find_item_in_distribution, find_item_locally, is_fingerprint_correct, try_get_balance, try_get_item_source_path, try_get_item_target_path, validate_author_name, validate_package_name\n'), ((105, 17, 105, 78), 'aea.cli.utils.package_utils.try_get_item_source_path', 'try_get_item_source_path', ({(105, 42, 105, 47): '"""cwd"""', (105, 49, 105, 53): 'None', (105, 55, 105, 63): '"""skills"""', (105, 65, 105, 77): '"""skill-name"""'}, {}), "('cwd', None, 'skills', 'skill-name')", False, 'from aea.cli.utils.package_utils import find_item_in_distribution, find_item_locally, is_fingerprint_correct, try_get_balance, try_get_item_source_path, try_get_item_target_path, validate_author_name, validate_package_name\n'), ((122, 17, 122, 85), 'aea.cli.utils.package_utils.try_get_item_target_path', 'try_get_item_target_path', ({(122, 42, 122, 52): '"""packages"""', (122, 54, 122, 60): 'AUTHOR', (122, 62, 122, 70): '"""skills"""', (122, 72, 122, 84): '"""skill-name"""'}, {}), "('packages', AUTHOR, 'skills', 'skill-name')", False, 'from aea.cli.utils.package_utils import find_item_in_distribution, find_item_locally, is_fingerprint_correct, try_get_balance, try_get_item_source_path, try_get_item_target_path, validate_author_name, validate_package_name\n'), ((140, 17, 140, 62), 'aea.cli.utils.click_utils.PublicIdParameter.get_metavar', 'PublicIdParameter.get_metavar', ({(140, 47, 140, 52): '"""obj"""', (140, 54, 140, 61): '"""param"""'}, {}), "('obj', 'param')", False, 'from aea.cli.utils.click_utils import AEAJsonPathType, PublicIdParameter\n'), ((156, 8, 156, 26), 'aea.cli.utils.config._init_cli_config', '_init_cli_config', ({}, {}), '()', False, 'from aea.cli.utils.config import _init_cli_config, get_or_create_cli_config, update_cli_config\n'), ((170, 8, 170, 45), 'aea.cli.utils.config.update_cli_config', 'update_cli_config', ({(170, 26, 170, 44): "{'some': 'config'}"}, {}), "({'some': 'config'})", False, 'from aea.cli.utils.config import _init_cli_config, get_or_create_cli_config, update_cli_config\n'), ((192, 17, 192, 43), 'aea.cli.utils.config.get_or_create_cli_config', 'get_or_create_cli_config', ({}, {}), '()', False, 'from aea.cli.utils.config import _init_cli_config, get_or_create_cli_config, update_cli_config\n'), ((234, 17, 234, 52), 'aea.cli.utils.package_utils.validate_author_name', 'validate_author_name', (), '', False, 'from aea.cli.utils.package_utils import find_item_in_distribution, find_item_locally, is_fingerprint_correct, try_get_balance, try_get_item_source_path, try_get_item_target_path, validate_author_name, validate_package_name\n'), ((237, 17, 237, 39), 'aea.cli.utils.package_utils.validate_author_name', 'validate_author_name', ({}, {}), '()', False, 'from aea.cli.utils.package_utils import find_item_in_distribution, find_item_locally, is_fingerprint_correct, try_get_balance, try_get_item_source_path, try_get_item_target_path, validate_author_name, validate_package_name\n'), ((259, 8, 259, 45), 'aea.cli.utils.package_utils.validate_package_name', 'validate_package_name', ({(259, 30, 259, 44): '"""correct_name"""'}, {}), "('correct_name')", False, 'from aea.cli.utils.package_utils import find_item_in_distribution, find_item_locally, is_fingerprint_correct, try_get_balance, try_get_item_source_path, try_get_item_target_path, validate_author_name, validate_package_name\n'), ((281, 20, 281, 63), 'tests.test_cli.tools_for_testing.PublicIdMock.from_str', 'PublicIdMock.from_str', ({(281, 42, 281, 62): '"""fetchai/echo:0.5.0"""'}, {}), "('fetchai/echo:0.5.0')", False, 'from tests.test_cli.tools_for_testing import ConfigLoaderMock, ContextMock, PublicIdMock, StopTest, raise_stoptest\n'), ((295, 20, 295, 63), 'tests.test_cli.tools_for_testing.PublicIdMock.from_str', 'PublicIdMock.from_str', ({(295, 42, 295, 62): '"""fetchai/echo:0.5.0"""'}, {}), "('fetchai/echo:0.5.0')", False, 'from tests.test_cli.tools_for_testing import ConfigLoaderMock, ContextMock, PublicIdMock, StopTest, raise_stoptest\n'), ((288, 57, 288, 73), 'unittest.mock.mock_open', 'mock.mock_open', ({}, {}), '()', False, 'from unittest import TestCase, mock\n'), ((314, 20, 314, 63), 'tests.test_cli.tools_for_testing.PublicIdMock.from_str', 'PublicIdMock.from_str', ({(314, 42, 314, 62): '"""fetchai/echo:0.5.0"""'}, {}), "('fetchai/echo:0.5.0')", False, 'from tests.test_cli.tools_for_testing import ConfigLoaderMock, ContextMock, PublicIdMock, StopTest, raise_stoptest\n'), ((323, 20, 323, 63), 'tests.test_cli.tools_for_testing.PublicIdMock.from_str', 'PublicIdMock.from_str', ({(323, 42, 323, 62): '"""fetchai/echo:0.5.0"""'}, {}), "('fetchai/echo:0.5.0')", False, 'from tests.test_cli.tools_for_testing import ConfigLoaderMock, ContextMock, PublicIdMock, StopTest, raise_stoptest\n'), ((337, 20, 337, 63), 'tests.test_cli.tools_for_testing.PublicIdMock.from_str', 'PublicIdMock.from_str', ({(337, 42, 337, 62): '"""fetchai/echo:0.5.0"""'}, {}), "('fetchai/echo:0.5.0')", False, 'from tests.test_cli.tools_for_testing import ConfigLoaderMock, ContextMock, PublicIdMock, StopTest, raise_stoptest\n'), ((330, 57, 330, 73), 'unittest.mock.mock_open', 'mock.mock_open', ({}, {}), '()', False, 'from unittest import TestCase, mock\n'), ((367, 22, 367, 33), 'unittest.mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'from unittest import TestCase, mock\n'), ((370, 17, 370, 68), 'aea.cli.utils.package_utils.is_fingerprint_correct', 'is_fingerprint_correct', ({(370, 40, 370, 54): '"""package_path"""', (370, 56, 370, 67): 'item_config'}, {}), "('package_path', item_config)", False, 'from aea.cli.utils.package_utils import find_item_in_distribution, find_item_locally, is_fingerprint_correct, try_get_balance, try_get_item_source_path, try_get_item_target_path, validate_author_name, validate_package_name\n'), ((375, 22, 375, 33), 'unittest.mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'from unittest import TestCase, mock\n'), ((379, 17, 379, 66), 'aea.cli.utils.package_utils.is_fingerprint_correct', 'is_fingerprint_correct', ({(379, 40, 379, 52): 'package_path', (379, 54, 379, 65): 'item_config'}, {}), '(package_path, item_config)', False, 'from aea.cli.utils.package_utils import find_item_in_distribution, find_item_locally, is_fingerprint_correct, try_get_balance, try_get_item_source_path, try_get_item_target_path, validate_author_name, validate_package_name\n'), ((391, 19, 391, 32), 'tests.test_cli.tools_for_testing.ContextMock', 'ContextMock', ({}, {}), '()', False, 'from tests.test_cli.tools_for_testing import ConfigLoaderMock, ContextMock, PublicIdMock, StopTest, raise_stoptest\n'), ((392, 23, 392, 34), 'unittest.mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'from unittest import TestCase, mock\n'), ((393, 34, 393, 45), 'unittest.mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'from unittest import TestCase, mock\n'), ((394, 14, 394, 31), 'aea.cli.utils.click_utils.AEAJsonPathType', 'AEAJsonPathType', ({}, {}), '()', False, 'from aea.cli.utils.click_utils import AEAJsonPathType, PublicIdParameter\n'), ((401, 14, 401, 31), 'aea.cli.utils.click_utils.AEAJsonPathType', 'AEAJsonPathType', ({}, {}), '()', False, 'from aea.cli.utils.click_utils import AEAJsonPathType, PublicIdParameter\n'), ((412, 23, 412, 34), 'unittest.mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'from unittest import TestCase, mock\n'), ((415, 22, 415, 33), 'unittest.mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'from unittest import TestCase, mock\n'), ((417, 8, 417, 59), 'aea.cli.utils.package_utils.try_get_balance', 'try_get_balance', ({(417, 24, 417, 36): 'agent_config', (417, 38, 417, 49): 'wallet_mock', (417, 51, 417, 58): 'FETCHAI'}, {}), '(agent_config, wallet_mock, FETCHAI)', False, 'from aea.cli.utils.package_utils import find_item_in_distribution, find_item_locally, is_fingerprint_correct, try_get_balance, try_get_item_source_path, try_get_item_target_path, validate_author_name, validate_package_name\n'), ((112, 12, 112, 75), 'aea.cli.utils.package_utils.try_get_item_source_path', 'try_get_item_source_path', ({(112, 37, 112, 42): '"""cwd"""', (112, 44, 112, 50): 'AUTHOR', (112, 52, 112, 60): '"""skills"""', (112, 62, 112, 74): '"""skill-name"""'}, {}), "('cwd', AUTHOR, 'skills', 'skill-name')", False, 'from aea.cli.utils.package_utils import find_item_in_distribution, find_item_locally, is_fingerprint_correct, try_get_balance, try_get_item_source_path, try_get_item_target_path, validate_author_name, validate_package_name\n'), ((132, 12, 132, 85), 'aea.cli.utils.package_utils.try_get_item_target_path', 'try_get_item_target_path', ({(132, 37, 132, 45): '"""skills"""', (132, 47, 132, 53): 'AUTHOR', (132, 55, 132, 67): '"""skill-name"""', (132, 69, 132, 84): '"""packages_path"""'}, {}), "('skills', AUTHOR, 'skill-name', 'packages_path')", False, 'from aea.cli.utils.package_utils import find_item_in_distribution, find_item_locally, is_fingerprint_correct, try_get_balance, try_get_item_source_path, try_get_item_target_path, validate_author_name, validate_package_name\n'), ((201, 12, 201, 38), 'aea.cli.utils.config.get_or_create_cli_config', 'get_or_create_cli_config', ({}, {}), '()', False, 'from aea.cli.utils.config import _init_cli_config, get_or_create_cli_config, update_cli_config\n'), ((215, 18, 215, 50), 'typing.cast', 'cast', ({(215, 23, 215, 30): 'Context', (215, 32, 215, 49): 'click_context.obj'}, {}), '(Context, click_context.obj)', False, 'from typing import cast\n'), ((217, 18, 217, 43), 'click.ClickException', 'ClickException', ({(217, 33, 217, 42): '"""Message"""'}, {}), "('Message')", False, 'from click import BadParameter, ClickException\n'), ((247, 12, 247, 34), 'aea.cli.utils.package_utils.validate_author_name', 'validate_author_name', ({}, {}), '()', False, 'from aea.cli.utils.package_utils import find_item_in_distribution, find_item_locally, is_fingerprint_correct, try_get_balance, try_get_item_source_path, try_get_item_target_path, validate_author_name, validate_package_name\n'), ((251, 12, 251, 34), 'aea.cli.utils.package_utils.validate_author_name', 'validate_author_name', ({}, {}), '()', False, 'from aea.cli.utils.package_utils import find_item_in_distribution, find_item_locally, is_fingerprint_correct, try_get_balance, try_get_item_source_path, try_get_item_target_path, validate_author_name, validate_package_name\n'), ((264, 12, 264, 51), 'aea.cli.utils.package_utils.validate_package_name', 'validate_package_name', ({(264, 34, 264, 50): '"""incorrect-name"""'}, {}), "('incorrect-name')", False, 'from aea.cli.utils.package_utils import find_item_in_distribution, find_item_locally, is_fingerprint_correct, try_get_balance, try_get_item_source_path, try_get_item_target_path, validate_author_name, validate_package_name\n'), ((291, 21, 291, 39), 'tests.test_cli.tools_for_testing.ConfigLoaderMock', 'ConfigLoaderMock', ({}, {}), '()', False, 'from tests.test_cli.tools_for_testing import ConfigLoaderMock, ContextMock, PublicIdMock, StopTest, raise_stoptest\n'), ((333, 21, 333, 39), 'tests.test_cli.tools_for_testing.ConfigLoaderMock', 'ConfigLoaderMock', ({}, {}), '()', False, 'from tests.test_cli.tools_for_testing import ConfigLoaderMock, ContextMock, PublicIdMock, StopTest, raise_stoptest\n'), ((426, 24, 426, 56), 'aea.cli.utils.generic.is_readme_present', 'is_readme_present', ({(426, 42, 426, 55): '"""readme/path"""'}, {}), "('readme/path')", False, 'from aea.cli.utils.generic import is_readme_present\n'), ((220, 17, 220, 30), 'tests.test_cli.tools_for_testing.ContextMock', 'ContextMock', ({}, {}), '()', False, 'from tests.test_cli.tools_for_testing import ConfigLoaderMock, ContextMock, PublicIdMock, StopTest, raise_stoptest\n'), ((283, 30, 283, 43), 'tests.test_cli.tools_for_testing.ContextMock', 'ContextMock', ({}, {}), '()', False, 'from tests.test_cli.tools_for_testing import ConfigLoaderMock, ContextMock, PublicIdMock, StopTest, raise_stoptest\n'), ((297, 30, 297, 43), 'tests.test_cli.tools_for_testing.ContextMock', 'ContextMock', ({}, {}), '()', False, 'from tests.test_cli.tools_for_testing import ConfigLoaderMock, ContextMock, PublicIdMock, StopTest, raise_stoptest\n'), ((316, 38, 316, 51), 'tests.test_cli.tools_for_testing.ContextMock', 'ContextMock', ({}, {}), '()', False, 'from tests.test_cli.tools_for_testing import ConfigLoaderMock, ContextMock, PublicIdMock, StopTest, raise_stoptest\n'), ((325, 38, 325, 51), 'tests.test_cli.tools_for_testing.ContextMock', 'ContextMock', ({}, {}), '()', False, 'from tests.test_cli.tools_for_testing import ConfigLoaderMock, ContextMock, PublicIdMock, StopTest, raise_stoptest\n'), ((339, 38, 339, 51), 'tests.test_cli.tools_for_testing.ContextMock', 'ContextMock', ({}, {}), '()', False, 'from tests.test_cli.tools_for_testing import ConfigLoaderMock, ContextMock, PublicIdMock, StopTest, raise_stoptest\n'), ((353, 41, 353, 72), 'tests.test_cli.tools_for_testing.ContextMock', 'ContextMock', (), '', False, 'from tests.test_cli.tools_for_testing import ConfigLoaderMock, ContextMock, PublicIdMock, StopTest, raise_stoptest\n')] |
SanjarbekSaminjonov/musofirlar.backend | api/flat/urls.py | 23b09e90cc4e3d153063ad1768b5ae1c18ff866d | from django.urls import path
from . import views
urlpatterns = [
path('', views.FlatListAPIView.as_view()),
path('create/', views.FlatCreateAPIView.as_view()),
path('<int:pk>/', views.FlatDetailAPIView.as_view()),
path('<int:pk>/update/', views.FlatUpdateAPIView.as_view()),
path('<int:pk>/delete/', views.FlatDeleteAPIView.as_view()),
]
| [] |
hsky77/hyssop | hyssop_aiohttp/component/__init__.py | 4ab1e82f9e2592de56589c7426a037564bef49a6 | # Copyright (C) 2020-Present the hyssop authors and contributors.
#
# This module is part of hyssop and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
'''
File created: January 1st 2021
Modified By: hsky77
Last Updated: January 7th 2021 15:30:08 pm
'''
from hyssop.project.component import ComponentTypes
from .aio_client import AioClientComponent
class AioHttpComponentTypes(ComponentTypes):
AioClient = ('aioclient', 'aio_client', 'AioClientComponent')
| [] |
tGhattas/IMP-seamless-cloning | run_clone.py | 2c81e0bd9bc99955afe06ec4eea187a5a42761e3 | import cv2
import getopt
import sys
from gui import MaskPainter, MaskMover
from clone import seamless_cloning, shepards_seamless_cloning
from utils import read_image, plt
from os import path
def usage():
print(
"Usage: python run_clone.py [options] \n\n\
Options: \n\
\t-h\t Flag to specify a brief help message and exits..\n\
\t-s\t(Required) Specify a source image.\n\
\t-t\t(Required) Specify a target image.\n\
\t-m\t(Optional) Specify a mask image with the object in white and other part in black, ignore this option if you plan to draw it later.\n\
\t-x\t(Optional) Flag to specify a mode, either 'possion' or 'shepard'. default is possion.\n\
\t-v\t(Optional) Flag to specify grad field of source only or both in case of Possion solver is used. default is source only.")
if __name__ == '__main__':
# parse command line arguments
args = {}
try:
opts, _ = getopt.getopt(sys.argv[1:], "vxhs:t:m:p:")
except getopt.GetoptError as err:
# print help information and exit:
print(err) # will print something like "option -a not recognized"
print("See help: run_clone.py -h")
exit(2)
for o, a in opts:
if o in ("-h"):
usage()
exit()
elif o in ("-s"):
args["source"] = a
elif o in ("-t"):
args["target"] = a
elif o in ("-m"):
args["mask"] = a
elif o in ("-x"):
args["mode"] = a.lower()
elif o in ("-v"):
args["gradient_field_source_only"] = a
else:
continue
#
if ("source" not in args) or ("target" not in args):
usage()
exit()
#
# set default mode to Possion solver
mode = "poisson" if ("mode" not in args) else args["mode"]
gradient_field_source_only = ("gradient_field_source_only" not in args)
source = read_image(args["source"], 2)
target = read_image(args["target"], 2)
if source is None or target is None:
print('Source or target image not exist.')
exit()
if source.shape[0] > target.shape[0] or source.shape[1] > target.shape[1]:
print('Source image cannot be larger than target image.')
exit()
# draw the mask
mask_path = ""
if "mask" not in args:
print('Please highlight the object to disapparate.\n')
mp = MaskPainter(args["source"])
mask_path = mp.paint_mask()
else:
mask_path = args["mask"]
# adjust mask position for target image
print('Please move the object to desired location to apparate.\n')
mm = MaskMover(args["target"], mask_path)
offset_x, offset_y, target_mask_path = mm.move_mask()
# blend
print('Blending ...')
target_mask = read_image(target_mask_path, 1)
offset = offset_x, offset_y
cloning_tool = seamless_cloning if mode == "poisson" else shepards_seamless_cloning
kwargs = {"gradient_field_source_only": gradient_field_source_only} if mode == "poisson" else {}
blend_result = cloning_tool(source, target, target_mask, offset, **kwargs)
cv2.imwrite(path.join(path.dirname(args["source"]), 'target_result.png'),
blend_result)
plt.figure("Result"), plt.imshow(blend_result), plt.show()
print('Done.\n')
'''
running example:
- Possion based solver:
python run_clone.py -s external/blend-1.jpg -t external/main-1.jpg
python run_clone.py -s external/source3.jpg -t external/target3.jpg -v
- Shepard's interpolation:
python run_clone.py -s external/blend-1.jpg -t external/main-1.jpg -x
python run_clone.py -s external/source3.jpg -t external/target3.jpg -x
''' | [((60, 13, 60, 42), 'utils.read_image', 'read_image', ({(60, 24, 60, 38): "args['source']", (60, 40, 60, 41): '2'}, {}), "(args['source'], 2)", False, 'from utils import read_image, plt\n'), ((61, 13, 61, 42), 'utils.read_image', 'read_image', ({(61, 24, 61, 38): "args['target']", (61, 40, 61, 41): '2'}, {}), "(args['target'], 2)", False, 'from utils import read_image, plt\n'), ((82, 9, 82, 45), 'gui.MaskMover', 'MaskMover', ({(82, 19, 82, 33): "args['target']", (82, 35, 82, 44): 'mask_path'}, {}), "(args['target'], mask_path)", False, 'from gui import MaskPainter, MaskMover\n'), ((87, 18, 87, 49), 'utils.read_image', 'read_image', ({(87, 29, 87, 45): 'target_mask_path', (87, 47, 87, 48): '1'}, {}), '(target_mask_path, 1)', False, 'from utils import read_image, plt\n'), ((27, 18, 27, 60), 'getopt.getopt', 'getopt.getopt', ({(27, 32, 27, 44): 'sys.argv[1:]', (27, 46, 27, 59): '"""vxhs:t:m:p:"""'}, {}), "(sys.argv[1:], 'vxhs:t:m:p:')", False, 'import getopt\n'), ((75, 13, 75, 40), 'gui.MaskPainter', 'MaskPainter', ({(75, 25, 75, 39): "args['source']"}, {}), "(args['source'])", False, 'from gui import MaskPainter, MaskMover\n'), ((96, 4, 96, 24), 'utils.plt.figure', 'plt.figure', ({(96, 15, 96, 23): '"""Result"""'}, {}), "('Result')", False, 'from utils import read_image, plt\n'), ((96, 26, 96, 50), 'utils.plt.imshow', 'plt.imshow', ({(96, 37, 96, 49): 'blend_result'}, {}), '(blend_result)', False, 'from utils import read_image, plt\n'), ((96, 52, 96, 62), 'utils.plt.show', 'plt.show', ({}, {}), '()', False, 'from utils import read_image, plt\n'), ((94, 26, 94, 54), 'os.path.dirname', 'path.dirname', ({(94, 39, 94, 53): "args['source']"}, {}), "(args['source'])", False, 'from os import path\n')] |
Punkweb/punkweb-boards | punkweb_boards/rest/serializers.py | 8934d15fbff2a3ce9191fdb19d58d029eb55ef16 | from rest_framework import serializers
from punkweb_boards.conf.settings import SHOUTBOX_DISABLED_TAGS
from punkweb_boards.models import (
BoardProfile,
Category,
Subcategory,
Thread,
Post,
Conversation,
Message,
Report,
Shout,
)
class BoardProfileSerializer(serializers.ModelSerializer):
post_count = serializers.ReadOnlyField()
can_shout = serializers.ReadOnlyField()
rendered_username = serializers.ReadOnlyField()
rendered_rank = serializers.ReadOnlyField()
class Meta:
model = BoardProfile
fields = "__all__"
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
exclude = ("auth_req",)
class SubcategorySerializer(serializers.ModelSerializer):
last_thread = serializers.ReadOnlyField(source="last_thread.id")
last_thread_title = serializers.ReadOnlyField(source="last_thread.title")
last_thread_created = serializers.ReadOnlyField(
source="last_thread.created"
)
last_thread_user = serializers.ReadOnlyField(
source="last_thread.user.profile.rendered_username"
)
parent_name = serializers.ReadOnlyField(source="parent.name")
thread_count = serializers.ReadOnlyField()
post_count = serializers.ReadOnlyField()
can_post = serializers.SerializerMethodField()
def get_can_post(self, obj):
return obj.can_post(self.context.get("request").user)
class Meta:
model = Subcategory
exclude = ("auth_req",)
class ThreadSerializer(serializers.ModelSerializer):
last_post = serializers.ReadOnlyField(source="last_post.id")
last_post_created = serializers.ReadOnlyField(source="last_post.created")
last_post_username = serializers.ReadOnlyField(
source="last_post.user.username"
)
last_post_rendered_username = serializers.ReadOnlyField(
source="last_post.user.profile.rendered_username"
)
user_username = serializers.ReadOnlyField(source="user.username")
user_rendered_username = serializers.ReadOnlyField(
source="user.profile.rendered_username"
)
user_image = serializers.ReadOnlyField(source="user.profile.avatar")
user_post_count = serializers.ReadOnlyField(
source="user.profile.post_count"
)
user_join_date = serializers.ReadOnlyField(source="user.created")
flagged = serializers.ReadOnlyField(source="reported")
posts_count = serializers.ReadOnlyField()
can_edit = serializers.SerializerMethodField()
def get_can_edit(self, obj):
return obj.can_edit(self.context.get("request").user)
class Meta:
model = Thread
fields = "__all__"
read_only_fields = (
"pinned",
"closed",
"user",
"upvoted_by",
"downvoted_by",
)
class PostSerializer(serializers.ModelSerializer):
flagged = serializers.ReadOnlyField(source="reported")
can_edit = serializers.SerializerMethodField()
def get_can_edit(self, obj):
return obj.can_edit(self.context.get("request").user)
class Meta:
model = Post
fields = "__all__"
read_only_fields = ("user", "upvoted_by", "downvoted_by")
class ConversationSerializer(serializers.ModelSerializer):
last_message = serializers.ReadOnlyField(source="last_message.id")
last_message_title = serializers.ReadOnlyField(source="last_message.title")
last_message_created = serializers.ReadOnlyField(
source="last_message.created"
)
last_message_user = serializers.ReadOnlyField(
source="last_message.user.profile.rendered_username"
)
message_count = serializers.ReadOnlyField()
class Meta:
model = Conversation
fields = "__all__"
read_only_fields = ("unread_by",)
class MessageSerializer(serializers.ModelSerializer):
class Meta:
model = Message
fields = "__all__"
read_only_fields = ("user",)
class ShoutSerializer(serializers.ModelSerializer):
username = serializers.ReadOnlyField(source="user.username")
rendered_username = serializers.ReadOnlyField(
source="user.profile.rendered_username"
)
class Meta:
model = Shout
fields = (
"id",
"user",
"username",
"rendered_username",
"content",
"_content_rendered",
"created",
"modified",
)
read_only_fields = ("user",)
def create(self, validated_data):
for key in SHOUTBOX_DISABLED_TAGS:
key_tag = "[{}]".format(key).lower()
if (
key_tag[: len(key_tag) - 1]
in validated_data.get("content").lower()
):
raise serializers.ValidationError(
{
"notAllowed": "{} is not allowed in the shoutbox".format(
key_tag
)
}
)
return Shout.objects.create(**validated_data)
| [((17, 17, 17, 44), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', ({}, {}), '()', False, 'from rest_framework import serializers\n'), ((18, 16, 18, 43), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', ({}, {}), '()', False, 'from rest_framework import serializers\n'), ((19, 24, 19, 51), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', ({}, {}), '()', False, 'from rest_framework import serializers\n'), ((20, 20, 20, 47), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', ({}, {}), '()', False, 'from rest_framework import serializers\n'), ((34, 18, 34, 68), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', (), '', False, 'from rest_framework import serializers\n'), ((35, 24, 35, 77), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', (), '', False, 'from rest_framework import serializers\n'), ((36, 26, 38, 5), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', (), '', False, 'from rest_framework import serializers\n'), ((39, 23, 41, 5), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', (), '', False, 'from rest_framework import serializers\n'), ((42, 18, 42, 65), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', (), '', False, 'from rest_framework import serializers\n'), ((43, 19, 43, 46), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', ({}, {}), '()', False, 'from rest_framework import serializers\n'), ((44, 17, 44, 44), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', ({}, {}), '()', False, 'from rest_framework import serializers\n'), ((45, 15, 45, 50), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ({}, {}), '()', False, 'from rest_framework import serializers\n'), ((56, 16, 56, 64), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', (), '', False, 'from rest_framework import serializers\n'), ((57, 24, 57, 77), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', (), '', False, 'from rest_framework import serializers\n'), ((58, 25, 60, 5), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', (), '', False, 'from rest_framework import serializers\n'), ((61, 34, 63, 5), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', (), '', False, 'from rest_framework import serializers\n'), ((64, 20, 64, 69), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', (), '', False, 'from rest_framework import serializers\n'), ((65, 29, 67, 5), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', (), '', False, 'from rest_framework import serializers\n'), ((68, 17, 68, 72), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', (), '', False, 'from rest_framework import serializers\n'), ((69, 22, 71, 5), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', (), '', False, 'from rest_framework import serializers\n'), ((72, 21, 72, 69), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', (), '', False, 'from rest_framework import serializers\n'), ((73, 14, 73, 58), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', (), '', False, 'from rest_framework import serializers\n'), ((74, 18, 74, 45), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', ({}, {}), '()', False, 'from rest_framework import serializers\n'), ((75, 15, 75, 50), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ({}, {}), '()', False, 'from rest_framework import serializers\n'), ((93, 14, 93, 58), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', (), '', False, 'from rest_framework import serializers\n'), ((94, 15, 94, 50), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ({}, {}), '()', False, 'from rest_framework import serializers\n'), ((106, 19, 106, 70), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', (), '', False, 'from rest_framework import serializers\n'), ((107, 25, 107, 79), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', (), '', False, 'from rest_framework import serializers\n'), ((108, 27, 110, 5), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', (), '', False, 'from rest_framework import serializers\n'), ((111, 24, 113, 5), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', (), '', False, 'from rest_framework import serializers\n'), ((114, 20, 114, 47), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', ({}, {}), '()', False, 'from rest_framework import serializers\n'), ((130, 15, 130, 64), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', (), '', False, 'from rest_framework import serializers\n'), ((131, 24, 133, 5), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', (), '', False, 'from rest_framework import serializers\n'), ((164, 15, 164, 53), 'punkweb_boards.models.Shout.objects.create', 'Shout.objects.create', ({}, {}), '(**validated_data)', False, 'from punkweb_boards.models import BoardProfile, Category, Subcategory, Thread, Post, Conversation, Message, Report, Shout\n')] |
ulise/hetida-designer | runtime/components/Statistic/moving_minimum_time.py | a6be8eb45abf950d5498e3ca756ea1d2e46b5c00 | from hetdesrun.component.registration import register
from hetdesrun.datatypes import DataType
import pandas as pd
import numpy as np
# ***** DO NOT EDIT LINES BELOW *****
# These lines may be overwritten if input/output changes.
@register(
inputs={"data": DataType.Any, "t": DataType.String},
outputs={"movmin": DataType.Any},
)
def main(*, data, t):
"""entrypoint function for this component
Usage example:
>>> main(
... data = pd.Series(
... {
... "2019-08-01T15:20:00": 4.0,
... "2019-08-01T15:20:01": 5.0,
... "2019-08-01T15:20:05": 1.0,
... "2019-08-01T15:20:09": 9.0,
... }
... ),
... t = "4s"
... )["movmin"]
2019-08-01 15:20:00 4.0
2019-08-01 15:20:01 4.0
2019-08-01 15:20:05 1.0
2019-08-01 15:20:09 9.0
dtype: float64
"""
# ***** DO NOT EDIT LINES ABOVE *****
# write your code here.
try:
data.index = pd.to_datetime(data.index)
except (ValueError, TypeError):
raise TypeError("indices of data must be datetime")
data_sort = data.sort_index().dropna()
try:
return {"movmin": data_sort.rolling(t).min()}
except (ValueError):
raise ValueError(f"t could not be parsed as frequency: {t}")
| [((9, 1, 12, 1), 'hetdesrun.component.registration.register', 'register', (), '', False, 'from hetdesrun.component.registration import register\n'), ((38, 21, 38, 47), 'pandas.to_datetime', 'pd.to_datetime', ({(38, 36, 38, 46): 'data.index'}, {}), '(data.index)', True, 'import pandas as pd\n')] |
MikhailNakhatovich/rooms_painting | painter.py | 51b92797c867d4bb1c8d42a58785c0f4dacd4075 | import cv2
import ezdxf
import numpy as np
def draw_hatch(img, entity, color, mask):
for poly_path in entity.paths.paths:
# print(poly_path.path_type_flags)
polygon = np.array([vertex[:-1] for vertex in poly_path.vertices]).astype(int)
if poly_path.path_type_flags & 1 == 1:
cv2.fillPoly(img, [polygon], color)
cv2.fillPoly(mask, [polygon], (255, 255, 255))
else:
cv2.fillPoly(img, [polygon], (255, 255, 255))
return color
def draw_line(img, entity, color, mask):
p1 = entity.dxf.start[:-1]
p2 = entity.dxf.end[:-1]
cv2.line(img, (int(p1[0]), int(p1[1])), (int(p2[0]), int(p2[1])), color, 1)
cv2.line(mask, (int(p1[0]), int(p1[1])), (int(p2[0]), int(p2[1])), (255, 255, 255), 2)
return color
def draw_lwpolyline(img, entity, color, mask):
polyline = []
a = np.array(entity.lwpoints.values).astype(int)
while len(a) > 0:
polyline.append((a[0], a[1]))
a = a[5:]
cv2.polylines(img, [np.array(polyline)], entity.closed, color, 1)
cv2.polylines(mask, [np.array(polyline)], entity.closed, (255, 255, 255), 2)
return color
def draw_arc(img, entity, color, mask):
s = entity.dxf.start_angle * np.pi / 180
e = entity.dxf.end_angle * np.pi / 180
if s > e:
s -= 2 * np.pi
d = (e - s) / (int((e - s) * 180 / np.pi) + 1)
r = entity.dxf.radius
cx, cy = entity.dxf.center.xyz[:-1]
angles = np.arange(s, e + d / 2, d)
x = cx + r * np.cos(angles)
y = cy + r * np.sin(angles)
points = np.column_stack((x, y)).astype(int)
cv2.polylines(img, [points], abs(s - e) < 1e-9, color, 1)
cv2.polylines(mask, [points], abs(s - e) < 1e-9, (255, 255, 255), 2)
return color
def draw_circle(img, entity, color, mask):
r = entity.dxf.radius
cx, cy = entity.dxf.center.xyz[:-1]
cv2.circle(img, (int(cx), int(cy)), int(r), color, 1)
cv2.circle(mask, (int(cx), int(cy)), int(r), (255, 255, 255), -1)
return color
def draw_ellipse(img, entity, color, mask):
cx, cy = entity.dxf.center.xyz[:-1]
ma = entity.dxf.major_axis.magnitude
angle = entity.dxf.major_axis.angle_deg
mi = ma * entity.dxf.ratio
s = entity.dxf.start_param * 180 / np.pi
e = entity.dxf.end_param * 180 / np.pi
if entity.dxf.extrusion.z == -1:
s = 360 - s
e = 360 - e
cv2.ellipse(img, (int(cx), int(cy)), (int(ma), int(mi)), angle, s, e, color, 1)
cv2.ellipse(mask, (int(cx), int(cy)), (int(ma), int(mi)), angle, s, e, (255, 255, 255), 1)
return color
def draw_point(img, entity, color, mask):
cx, cy = entity.dxf.location.xyz[:-1]
cv2.circle(img, (int(cx), int(cy)), 0, color, 1)
cv2.circle(mask, (int(cx), int(cy)), 0, (255, 255, 255), -1)
return color
draw_map = {
'HATCH': draw_hatch,
'LINE': draw_line,
'LWPOLYLINE': draw_lwpolyline,
'ARC': draw_arc,
'CIRCLE': draw_circle,
'ELLIPSE': draw_ellipse,
'POINT': draw_point,
}
def paint(in_path, out_path, config):
doc = ezdxf.readfile(in_path)
extmax, extmin = doc.header['$EXTMAX'], doc.header['$EXTMIN']
xmin, ymin = np.floor(extmin[:-1]).astype(int)
xmax, ymax = np.ceil(extmax[:-1]).astype(int)
img = np.ones((ymax + ymin, xmax + xmin, 3), np.uint8) * 255
mask = np.zeros_like(img)
msp = doc.modelspace()
layers = config.get('layers', {})
colors = config.get('colors', {})
# print(doc.layers.entries.keys())
for layer_name, names in layers.items():
color = tuple(colors.get(layer_name, [0, 0, 0]))
for name in names:
if name not in doc.layers:
continue
entities = msp.query('*[layer=="%s"]' % name)
tmp = np.zeros((ymax + ymin, xmax + xmin), np.uint8)
for entity in entities:
if entity.DXFTYPE in draw_map:
draw_map[entity.DXFTYPE](img, entity, color, tmp)
else:
print("%s: %s" % (name, entity.DXFTYPE))
contours, hierarchy = cv2.findContours(tmp, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(mask, contours, -1, color, -1)
res, img_png = cv2.imencode('.png', cv2.flip(img, 0))
res, mask_png = cv2.imencode('.png', cv2.flip(mask, 0))
with open(out_path, 'wb') as f:
f.write(img_png.tobytes())
with open(out_path[:-4] + "_mask.png", 'wb') as f:
f.write(mask_png.tobytes())
| [((45, 13, 45, 39), 'numpy.arange', 'np.arange', ({(45, 23, 45, 24): 's', (45, 26, 45, 35): 'e + d / 2', (45, 37, 45, 38): 'd'}, {}), '(s, e + d / 2, d)', True, 'import numpy as np\n'), ((96, 10, 96, 33), 'ezdxf.readfile', 'ezdxf.readfile', ({(96, 25, 96, 32): 'in_path'}, {}), '(in_path)', False, 'import ezdxf\n'), ((101, 11, 101, 29), 'numpy.zeros_like', 'np.zeros_like', ({(101, 25, 101, 28): 'img'}, {}), '(img)', True, 'import numpy as np\n'), ((100, 10, 100, 58), 'numpy.ones', 'np.ones', ({(100, 18, 100, 47): '(ymax + ymin, xmax + xmin, 3)', (100, 49, 100, 57): 'np.uint8'}, {}), '((ymax + ymin, xmax + xmin, 3), np.uint8)', True, 'import numpy as np\n'), ((121, 40, 121, 56), 'cv2.flip', 'cv2.flip', ({(121, 49, 121, 52): 'img', (121, 54, 121, 55): '0'}, {}), '(img, 0)', False, 'import cv2\n'), ((122, 41, 122, 58), 'cv2.flip', 'cv2.flip', ({(122, 50, 122, 54): 'mask', (122, 56, 122, 57): '0'}, {}), '(mask, 0)', False, 'import cv2\n'), ((11, 12, 11, 47), 'cv2.fillPoly', 'cv2.fillPoly', ({(11, 25, 11, 28): 'img', (11, 30, 11, 39): '[polygon]', (11, 41, 11, 46): 'color'}, {}), '(img, [polygon], color)', False, 'import cv2\n'), ((12, 12, 12, 58), 'cv2.fillPoly', 'cv2.fillPoly', ({(12, 25, 12, 29): 'mask', (12, 31, 12, 40): '[polygon]', (12, 42, 12, 57): '(255, 255, 255)'}, {}), '(mask, [polygon], (255, 255, 255))', False, 'import cv2\n'), ((14, 12, 14, 57), 'cv2.fillPoly', 'cv2.fillPoly', ({(14, 25, 14, 28): 'img', (14, 30, 14, 39): '[polygon]', (14, 41, 14, 56): '(255, 255, 255)'}, {}), '(img, [polygon], (255, 255, 255))', False, 'import cv2\n'), ((28, 8, 28, 40), 'numpy.array', 'np.array', ({(28, 17, 28, 39): 'entity.lwpoints.values'}, {}), '(entity.lwpoints.values)', True, 'import numpy as np\n'), ((32, 24, 32, 42), 'numpy.array', 'np.array', ({(32, 33, 32, 41): 'polyline'}, {}), '(polyline)', True, 'import numpy as np\n'), ((33, 25, 33, 43), 'numpy.array', 'np.array', ({(33, 34, 33, 42): 'polyline'}, {}), '(polyline)', True, 'import numpy as np\n'), ((46, 17, 46, 31), 'numpy.cos', 'np.cos', ({(46, 24, 46, 30): 'angles'}, {}), '(angles)', True, 'import numpy as np\n'), ((47, 17, 47, 31), 'numpy.sin', 'np.sin', ({(47, 24, 47, 30): 'angles'}, {}), '(angles)', True, 'import numpy as np\n'), ((48, 13, 48, 36), 'numpy.column_stack', 'np.column_stack', ({(48, 29, 48, 35): '(x, y)'}, {}), '((x, y))', True, 'import numpy as np\n'), ((98, 17, 98, 38), 'numpy.floor', 'np.floor', ({(98, 26, 98, 37): 'extmin[:-1]'}, {}), '(extmin[:-1])', True, 'import numpy as np\n'), ((99, 17, 99, 37), 'numpy.ceil', 'np.ceil', ({(99, 25, 99, 36): 'extmax[:-1]'}, {}), '(extmax[:-1])', True, 'import numpy as np\n'), ((112, 18, 112, 64), 'numpy.zeros', 'np.zeros', ({(112, 27, 112, 53): '(ymax + ymin, xmax + xmin)', (112, 55, 112, 63): 'np.uint8'}, {}), '((ymax + ymin, xmax + xmin), np.uint8)', True, 'import numpy as np\n'), ((118, 34, 118, 99), 'cv2.findContours', 'cv2.findContours', ({(118, 51, 118, 54): 'tmp', (118, 56, 118, 73): 'cv2.RETR_EXTERNAL', (118, 75, 118, 98): 'cv2.CHAIN_APPROX_SIMPLE'}, {}), '(tmp, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)', False, 'import cv2\n'), ((119, 12, 119, 59), 'cv2.drawContours', 'cv2.drawContours', ({(119, 29, 119, 33): 'mask', (119, 35, 119, 43): 'contours', (119, 45, 119, 47): '(-1)', (119, 49, 119, 54): 'color', (119, 56, 119, 58): '(-1)'}, {}), '(mask, contours, -1, color, -1)', False, 'import cv2\n'), ((9, 18, 9, 74), 'numpy.array', 'np.array', ({(9, 27, 9, 73): '[vertex[:-1] for vertex in poly_path.vertices]'}, {}), '([vertex[:-1] for vertex in poly_path.vertices])', True, 'import numpy as np\n')] |
vascoalramos/misago-deployment | misago/misago/users/serializers/auth.py | 20226072138403108046c0afad9d99eb4163cedc | from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework import serializers
from ...acl.useracl import serialize_user_acl
from .user import UserSerializer
User = get_user_model()
__all__ = ["AuthenticatedUserSerializer", "AnonymousUserSerializer"]
class AuthFlags:
def get_is_authenticated(self, obj):
return bool(obj.is_authenticated)
def get_is_anonymous(self, obj):
return bool(obj.is_anonymous)
class AuthenticatedUserSerializer(UserSerializer, AuthFlags):
email = serializers.SerializerMethodField()
is_authenticated = serializers.SerializerMethodField()
is_anonymous = serializers.SerializerMethodField()
class Meta:
model = User
fields = UserSerializer.Meta.fields + [
"has_usable_password",
"is_hiding_presence",
"limits_private_thread_invites_to",
"unread_private_threads",
"subscribe_to_started_threads",
"subscribe_to_replied_threads",
"is_authenticated",
"is_anonymous",
]
def get_acl(self, obj):
acl = self.context.get("acl")
if acl:
return serialize_user_acl(acl)
return {}
def get_email(self, obj):
return obj.email
def get_api(self, obj):
return {
"avatar": reverse("misago:api:user-avatar", kwargs={"pk": obj.pk}),
"data_downloads": reverse(
"misago:api:user-data-downloads", kwargs={"pk": obj.pk}
),
"details": reverse("misago:api:user-details", kwargs={"pk": obj.pk}),
"change_email": reverse(
"misago:api:user-change-email", kwargs={"pk": obj.pk}
),
"change_password": reverse(
"misago:api:user-change-password", kwargs={"pk": obj.pk}
),
"edit_details": reverse(
"misago:api:user-edit-details", kwargs={"pk": obj.pk}
),
"options": reverse("misago:api:user-forum-options", kwargs={"pk": obj.pk}),
"request_data_download": reverse(
"misago:api:user-request-data-download", kwargs={"pk": obj.pk}
),
"username": reverse("misago:api:user-username", kwargs={"pk": obj.pk}),
"delete": reverse(
"misago:api:user-delete-own-account", kwargs={"pk": obj.pk}
),
}
AuthenticatedUserSerializer = AuthenticatedUserSerializer.exclude_fields(
"is_avatar_locked",
"is_blocked",
"is_followed",
"is_signature_locked",
"meta",
"signature",
"status",
)
class AnonymousUserSerializer(serializers.Serializer, AuthFlags):
id = serializers.ReadOnlyField()
acl = serializers.SerializerMethodField()
is_authenticated = serializers.SerializerMethodField()
is_anonymous = serializers.SerializerMethodField()
def get_acl(self, obj):
acl = self.context.get("acl")
if acl:
return serialize_user_acl(acl)
return {}
| [((8, 7, 8, 23), 'django.contrib.auth.get_user_model', 'get_user_model', ({}, {}), '()', False, 'from django.contrib.auth import get_user_model\n'), ((22, 12, 22, 47), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ({}, {}), '()', False, 'from rest_framework import serializers\n'), ((23, 23, 23, 58), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ({}, {}), '()', False, 'from rest_framework import serializers\n'), ((24, 19, 24, 54), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ({}, {}), '()', False, 'from rest_framework import serializers\n'), ((87, 9, 87, 36), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', ({}, {}), '()', False, 'from rest_framework import serializers\n'), ((88, 10, 88, 45), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ({}, {}), '()', False, 'from rest_framework import serializers\n'), ((89, 23, 89, 58), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ({}, {}), '()', False, 'from rest_framework import serializers\n'), ((90, 19, 90, 54), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ({}, {}), '()', False, 'from rest_framework import serializers\n'), ((50, 22, 50, 78), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((51, 30, 53, 13), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((54, 23, 54, 80), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((55, 28, 57, 13), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((58, 31, 60, 13), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((61, 28, 63, 13), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((64, 23, 64, 86), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((65, 37, 67, 13), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((68, 24, 68, 82), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((69, 22, 71, 13), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n')] |
mohammadanarul/Ecommerce-Django-YT | shop/models.py | afecc8f41693925619b81986d979706c64175360 | from ctypes.wintypes import CHAR
from distutils.command.upload import upload
from random import choice
from telnetlib import STATUS
from unicodedata import category
from django.db import models
from ckeditor.fields import RichTextField
from taggit.managers import TaggableManager
# Create your models here.
from mptt.models import MPTTModel, TreeForeignKey
class Category(MPTTModel):
name = models.CharField(max_length=50, unique=True)
parent = TreeForeignKey('self', on_delete=models.CASCADE, null=True, blank=True, related_name='children')
is_active = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class MPTTMeta:
order_insertion_by = ['name']
class Brand(models.Model):
name = models.CharField(max_length=50)
is_active = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Product(models.Model):
STATUS_CHOICES = (
('NONE', 'NONE'),
('NEW', 'NEW'),
('SALE', 'SALE'),
('HOT', 'HOT'),
)
title = models.CharField(max_length=50)
price = models.DecimalField(max_digits=5, decimal_places=2)
short_description = RichTextField()
tags = TaggableManager()
description = RichTextField()
specification = RichTextField()
image = models.ImageField(upload_to='product/')
category = models.ForeignKey(Category, on_delete=models.CASCADE)
brand = models.ForeignKey(Brand, on_delete=models.CASCADE)
stack = models.IntegerField(default=5)
status = models.CharField(max_length=5, choices=STATUS_CHOICES, default='NONE')
is_fetured = models.BooleanField(default=False)
is_special = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class ProductImages(models.Model):
category = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='images')
image = models.ImageField(upload_to='products/') | [((14, 11, 14, 55), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((15, 13, 15, 109), 'mptt.models.TreeForeignKey', 'TreeForeignKey', (), '', False, 'from mptt.models import MPTTModel, TreeForeignKey\n'), ((16, 16, 16, 49), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import models\n'), ((17, 18, 17, 57), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import models\n'), ((18, 18, 18, 53), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import models\n'), ((24, 11, 24, 42), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((25, 16, 25, 49), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import models\n'), ((26, 18, 26, 57), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import models\n'), ((27, 18, 27, 53), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import models\n'), ((37, 12, 37, 43), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((38, 12, 38, 63), 'django.db.models.DecimalField', 'models.DecimalField', (), '', False, 'from django.db import models\n'), ((39, 24, 39, 39), 'ckeditor.fields.RichTextField', 'RichTextField', ({}, {}), '()', False, 'from ckeditor.fields import RichTextField\n'), ((40, 11, 40, 28), 'taggit.managers.TaggableManager', 'TaggableManager', ({}, {}), '()', False, 'from taggit.managers import TaggableManager\n'), ((41, 18, 41, 33), 'ckeditor.fields.RichTextField', 'RichTextField', ({}, {}), '()', False, 'from ckeditor.fields import RichTextField\n'), ((42, 20, 42, 35), 'ckeditor.fields.RichTextField', 'RichTextField', ({}, {}), '()', False, 'from ckeditor.fields import RichTextField\n'), ((43, 12, 43, 51), 'django.db.models.ImageField', 'models.ImageField', (), '', False, 'from django.db import models\n'), ((44, 15, 44, 68), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import models\n'), ((45, 12, 45, 62), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import models\n'), ((46, 12, 46, 42), 'django.db.models.IntegerField', 'models.IntegerField', (), '', False, 'from django.db import models\n'), ((47, 13, 47, 83), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((48, 18, 48, 52), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import models\n'), ((49, 18, 49, 52), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import models\n'), ((50, 16, 50, 49), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import models\n'), ((51, 18, 51, 57), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import models\n'), ((52, 18, 52, 53), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import models\n'), ((55, 15, 55, 90), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import models\n'), ((56, 12, 56, 52), 'django.db.models.ImageField', 'models.ImageField', (), '', False, 'from django.db import models\n')] |
deeuu/supriya | supriya/patterns/NoteEvent.py | 14fcb5316eccb4dafbe498932ceff56e1abb9d27 | import uuid
import supriya.commands
import supriya.realtime
from supriya.patterns.Event import Event
class NoteEvent(Event):
### CLASS VARIABLES ###
__slots__ = ()
### INITIALIZER ###
def __init__(
self,
add_action=None,
delta=None,
duration=None,
is_stop=True,
synthdef=None,
target_node=None,
uuid=None,
**settings,
):
if add_action is not None:
add_action = supriya.AddAction.from_expr(add_action)
Event.__init__(
self,
add_action=add_action,
delta=delta,
duration=duration,
is_stop=bool(is_stop),
synthdef=synthdef,
target_node=target_node,
uuid=uuid,
**settings,
)
### PRIVATE METHODS ###
def _perform_nonrealtime(self, session, uuids, offset, maximum_offset=None):
import supriya.assets.synthdefs
settings = self.settings.copy() # Do not mutate in place.
synthdef = self.get("synthdef", supriya.assets.synthdefs.default)
synthdef = synthdef or supriya.assets.synthdefs.default
synth_uuid = self.get("uuid", uuid.uuid4())
is_stop = self.get("is_stop")
duration = self.get("duration")
if duration is None:
duration = 1
if "duration" in settings:
duration = settings.pop("duration")
dictionaries = self._expand(
settings, synthdef, uuids, realtime=False, synth_parameters_only=True
)
if synth_uuid not in uuids:
# Begin a Pbind or Pmono synth
target_node = self["target_node"]
if isinstance(target_node, uuid.UUID) and target_node in uuids:
target_node = uuids[target_node]
prototype = (supriya.nonrealtime.Session, supriya.nonrealtime.Node)
if not isinstance(target_node, prototype):
target_node = session
synths = []
with session.at(offset):
for dictionary in dictionaries:
synth = target_node.add_synth(
add_action=self["add_action"],
duration=duration,
synthdef=synthdef,
**dictionary,
)
synths.append(synth)
if not is_stop:
uuids[synth_uuid] = tuple(synths)
else:
# Extend and make settings on a Pmono synth
synths = uuids[synth_uuid]
stop_offset = offset + duration
for synth, dictionary in zip(synths, dictionaries):
duration = stop_offset - synth.start_offset
synth.set_duration(duration)
with session.at(offset):
for key, value in dictionary.items():
synth[key] = value
return offset + max(self.delta, self.get("duration", 0))
def _perform_realtime(self, index=0, server=None, timestamp=0, uuids=None):
import supriya.assets.synthdefs
import supriya.patterns
synth_uuid = self.get("uuid") or uuid.uuid4()
synthdef = self.get("synthdef", supriya.assets.synthdefs.default)
synthdef = synthdef or supriya.assets.synthdefs.default
is_stop = self.get("is_stop")
duration = self["duration"]
if duration is None:
duration = 1
dictionaries = self._expand(self.settings, synthdef, uuids)
first_visit = False
if synth_uuid not in uuids:
first_visit = True
node_ids = {
server.node_id_allocator.allocate_node_id(): None
for _ in range(len(dictionaries))
}
uuids[synth_uuid] = node_ids
start_product = self._build_start_bundle(
dictionaries, first_visit, index, synth_uuid, synthdef, timestamp, uuids
)
if self.get("duration"):
if is_stop:
stop_product = self._build_stop_bundle(
index, synth_uuid, synthdef, timestamp, uuids
)
else:
stop_product = supriya.patterns.EventProduct(
event=None,
index=index,
is_stop=True,
requests=(),
timestamp=timestamp + duration,
uuid=None,
)
return [start_product, stop_product]
else:
uuids.pop(synth_uuid)
return [start_product]
def _build_start_bundle(
self, dictionaries, first_visit, index, synth_uuid, synthdef, timestamp, uuids
):
import supriya.patterns
requests = []
node_ids = uuids[synth_uuid]
if first_visit:
for node_id, dictionary in zip(node_ids, dictionaries):
add_action = dictionary.pop("add_action")
target_node = dictionary.pop("target_node")
if target_node is None:
target_node = 1
synth_kwargs = {
key: value
for key, value in dictionary.items()
if key in synthdef.parameter_names
}
request = supriya.commands.SynthNewRequest(
add_action=add_action,
node_id=node_id,
synthdef=synthdef,
target_node_id=target_node,
**synth_kwargs,
)
requests.append(request)
synth = supriya.realtime.Synth(synthdef)
node_ids[node_id] = synth
else:
for node_id, dictionary in zip(node_ids, dictionaries):
synth_kwargs = {
key: value
for key, value in dictionary.items()
if key in synthdef.parameter_names
}
request = supriya.commands.NodeSetRequest(
node_id=node_id, **synth_kwargs
)
requests.append(request)
event_product = supriya.patterns.EventProduct(
event=self,
index=index,
is_stop=False,
requests=requests,
timestamp=timestamp,
uuid=synth_uuid,
)
return event_product
def _build_stop_bundle(self, index, synth_uuid, synthdef, timestamp, uuids):
import supriya.patterns
import supriya.synthdefs
duration = self["duration"]
if duration is None:
duration = 1
requests = []
timestamp = timestamp + duration
node_ids = sorted(uuids[synth_uuid])
if synthdef.has_gate:
for node_id in node_ids:
request = supriya.commands.NodeSetRequest(node_id=node_id, gate=0)
requests.append(request)
elif any(x >= supriya.DoneAction.FREE_SYNTH for x in synthdef.done_actions):
pass
else:
request = supriya.commands.NodeFreeRequest(node_ids=node_ids)
requests.append(request)
event_product = supriya.patterns.EventProduct(
event=self,
index=index,
is_stop=True,
requests=requests,
timestamp=timestamp,
uuid=synth_uuid,
)
return event_product
| [((49, 38, 49, 50), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((95, 41, 95, 53), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n')] |
ApacheAA/LastSeen | emoji_utils.py | 1fe675b3ee3072d56e9fe094d1d80e1f7d876215 | # unicode digit emojis
# digits from '0' to '9'
zero_digit_code = zd = 48
# excluded digits
excl_digits = [2, 4, 5, 7]
# unicode digit keycap
udkc = '\U0000fe0f\U000020e3'
hours_0_9 = [chr(i) + udkc for i in range(zd, zd + 10)
if i - zd not in excl_digits]
# number '10' emoji
hours_0_9.append('\U0001f51f')
# custom emojis from '11' to '23'
hours_11_23 = [str(i) for i in range(11, 24)]
vote = ('PLUS', 'MINUS')
edit = '\U0001F4DD' | [] |
Sniper970119/ExampleForTransformers | TFBertForMaskedLM/main.py | 3348525957c38b2a45898d4f4652879933503b25 | # -*- coding:utf-8 -*-
"""
┏┛ ┻━━━━━┛ ┻┓
┃ ┃
┃ ━ ┃
┃ ┳┛ ┗┳ ┃
┃ ┃
┃ ┻ ┃
┃ ┃
┗━┓ ┏━━━┛
┃ ┃ 神兽保佑
┃ ┃ 代码无BUG!
┃ ┗━━━━━━━━━┓
┃CREATE BY SNIPER┣┓
┃ ┏┛
┗━┓ ┓ ┏━━━┳ ┓ ┏━┛
┃ ┫ ┫ ┃ ┫ ┫
┗━┻━┛ ┗━┻━┛
"""
import tensorflow as tf
import numpy as np
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
from transformers import BertTokenizer, TFBertForMaskedLM
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
model = TFBertForMaskedLM.from_pretrained('bert-base-cased', return_dict=True)
inputs = tokenizer("The capital of France is [MASK].", return_tensors="tf")
outputs = model(inputs)
logits = outputs.logits
output = np.argmax(logits[0][6])
o1 = tokenizer.decode(int(output))
inputs = tokenizer("The capital of [MASK] is BeiJing.", return_tensors="tf")
outputs = model(inputs)
logits = outputs.logits
output = np.argmax(logits[0][4])
o2 = tokenizer.decode(int(output))
print()
| [((26, 11, 26, 62), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', ({(26, 56, 26, 61): '"""GPU"""'}, {}), "('GPU')", True, 'import tensorflow as tf\n'), ((31, 12, 31, 60), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', ({(31, 42, 31, 59): '"""bert-base-cased"""'}, {}), "('bert-base-cased')", False, 'from transformers import BertTokenizer, TFBertForMaskedLM\n'), ((33, 8, 33, 78), 'transformers.TFBertForMaskedLM.from_pretrained', 'TFBertForMaskedLM.from_pretrained', (), '', False, 'from transformers import BertTokenizer, TFBertForMaskedLM\n'), ((40, 9, 40, 32), 'numpy.argmax', 'np.argmax', ({(40, 19, 40, 31): 'logits[0][6]'}, {}), '(logits[0][6])', True, 'import numpy as np\n'), ((48, 9, 48, 32), 'numpy.argmax', 'np.argmax', ({(48, 19, 48, 31): 'logits[0][4]'}, {}), '(logits[0][4])', True, 'import numpy as np\n'), ((27, 4, 27, 55), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', ({(27, 45, 27, 48): 'gpu', (27, 50, 27, 54): '(True)'}, {}), '(gpu, True)', True, 'import tensorflow as tf\n')] |
gcastellan0s/mirariapp | mirari/TCS/migrations/0042_auto_20190726_0145.py | 24a9db06d10f96c894d817ef7ccfeec2a25788b7 | # Generated by Django 2.0.5 on 2019-07-26 06:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('TCS', '0041_auto_20190726_0030'),
]
operations = [
migrations.AlterModelOptions(
name='modelo',
options={'default_permissions': [], 'ordering': ['-id'], 'permissions': [('Can_View__Modelo', 'Ve modelos'), ('Can_Create__Modelo', 'Crea modelos'), ('Can_Update__Modelo', 'Modifica modelos'), ('Can_Delete__Modelo', 'Elimina modelos'), ('Can_Change__ModelTCS', 'Modifica modelos de equipo')], 'verbose_name': 'Modelo', 'verbose_name_plural': 'Modelos'},
),
]
| [((13, 8, 16, 9), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', (), '', False, 'from django.db import migrations\n')] |
belltailjp/kornia | kornia/geometry/calibration/undistort.py | cfa3b6823d55e276893847f1c3f06ddf108c606a | import torch
from kornia.geometry.linalg import transform_points
from kornia.geometry.transform import remap
from kornia.utils import create_meshgrid
from .distort import distort_points, tilt_projection
# Based on https://github.com/opencv/opencv/blob/master/modules/calib3d/src/undistort.dispatch.cpp#L384
def undistort_points(points: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
r"""Compensate for lens distortion a set of 2D image points.
Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`,
tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)`
distortion models are considered in this function.
Args:
points: Input image points with shape :math:`(*, N, 2)`.
K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`.
dist: Distortion coefficients
:math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is
a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`.
Returns:
Undistorted 2D points with shape :math:`(*, N, 2)`.
Example:
>>> _ = torch.manual_seed(0)
>>> x = torch.rand(1, 4, 2)
>>> K = torch.eye(3)[None]
>>> dist = torch.rand(1, 4)
>>> undistort_points(x, K, dist)
tensor([[[-0.1513, -0.1165],
[ 0.0711, 0.1100],
[-0.0697, 0.0228],
[-0.1843, -0.1606]]])
"""
if points.dim() < 2 and points.shape[-1] != 2:
raise ValueError(f'points shape is invalid. Got {points.shape}.')
if K.shape[-2:] != (3, 3):
raise ValueError(f'K matrix shape is invalid. Got {K.shape}.')
if dist.shape[-1] not in [4, 5, 8, 12, 14]:
raise ValueError(f"Invalid number of distortion coefficients. Got {dist.shape[-1]}")
# Adding zeros to obtain vector with 14 coeffs.
if dist.shape[-1] < 14:
dist = torch.nn.functional.pad(dist, [0, 14 - dist.shape[-1]])
# Convert 2D points from pixels to normalized camera coordinates
cx: torch.Tensor = K[..., 0:1, 2] # princial point in x (Bx1)
cy: torch.Tensor = K[..., 1:2, 2] # princial point in y (Bx1)
fx: torch.Tensor = K[..., 0:1, 0] # focal in x (Bx1)
fy: torch.Tensor = K[..., 1:2, 1] # focal in y (Bx1)
# This is equivalent to K^-1 [u,v,1]^T
x: torch.Tensor = (points[..., 0] - cx) / fx # (BxN - Bx1)/Bx1 -> BxN
y: torch.Tensor = (points[..., 1] - cy) / fy # (BxN - Bx1)/Bx1 -> BxN
# Compensate for tilt distortion
if torch.any(dist[..., 12] != 0) or torch.any(dist[..., 13] != 0):
inv_tilt = tilt_projection(dist[..., 12], dist[..., 13], True)
# Transposed untilt points (instead of [x,y,1]^T, we obtain [x,y,1])
x, y = transform_points(inv_tilt, torch.stack([x, y], dim=-1)).unbind(-1)
# Iteratively undistort points
x0, y0 = x, y
for _ in range(5):
r2 = x * x + y * y
inv_rad_poly = (1 + dist[..., 5:6] * r2 + dist[..., 6:7] * r2 * r2 + dist[..., 7:8] * r2 ** 3) / (
1 + dist[..., 0:1] * r2 + dist[..., 1:2] * r2 * r2 + dist[..., 4:5] * r2 ** 3
)
deltaX = (
2 * dist[..., 2:3] * x * y
+ dist[..., 3:4] * (r2 + 2 * x * x)
+ dist[..., 8:9] * r2
+ dist[..., 9:10] * r2 * r2
)
deltaY = (
dist[..., 2:3] * (r2 + 2 * y * y)
+ 2 * dist[..., 3:4] * x * y
+ dist[..., 10:11] * r2
+ dist[..., 11:12] * r2 * r2
)
x = (x0 - deltaX) * inv_rad_poly
y = (y0 - deltaY) * inv_rad_poly
# Convert points from normalized camera coordinates to pixel coordinates
x = fx * x + cx
y = fy * y + cy
return torch.stack([x, y], -1)
# Based on https://github.com/opencv/opencv/blob/master/modules/calib3d/src/undistort.dispatch.cpp#L287
def undistort_image(image: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
r"""Compensate an image for lens distortion.
Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`,
tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)`
distortion models are considered in this function.
Args:
image: Input image with shape :math:`(*, C, H, W)`.
K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`.
dist: Distortion coefficients
:math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is
a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`.
Returns:
Undistorted image with shape :math:`(*, C, H, W)`.
Example:
>>> img = torch.rand(1, 3, 5, 5)
>>> K = torch.eye(3)[None]
>>> dist_coeff = torch.rand(4)
>>> out = undistort_image(img, K, dist_coeff)
>>> out.shape
torch.Size([1, 3, 5, 5])
"""
if len(image.shape) < 2:
raise ValueError(f"Image shape is invalid. Got: {image.shape}.")
if K.shape[-2:] != (3, 3):
raise ValueError(f'K matrix shape is invalid. Got {K.shape}.')
if dist.shape[-1] not in [4, 5, 8, 12, 14]:
raise ValueError(f'Invalid number of distortion coefficients. Got {dist.shape[-1]}.')
if not image.is_floating_point():
raise ValueError(f'Invalid input image data type. Input should be float. Got {image.dtype}.')
B, _, rows, cols = image.shape
# Create point coordinates for each pixel of the image
xy_grid: torch.Tensor = create_meshgrid(rows, cols, False, image.device, image.dtype)
pts = xy_grid.reshape(-1, 2) # (rows*cols)x2 matrix of pixel coordinates
# Distort points and define maps
ptsd: torch.Tensor = distort_points(pts, K, dist) # Bx(rows*cols)x2
mapx: torch.Tensor = ptsd[..., 0].reshape(B, rows, cols) # B x rows x cols, float
mapy: torch.Tensor = ptsd[..., 1].reshape(B, rows, cols) # B x rows x cols, float
# Remap image to undistort
out = remap(image, mapx, mapy, align_corners=True)
return out
| [((96, 11, 96, 34), 'torch.stack', 'torch.stack', ({(96, 23, 96, 29): '[x, y]', (96, 31, 96, 33): '(-1)'}, {}), '([x, y], -1)', False, 'import torch\n'), ((141, 28, 141, 89), 'kornia.utils.create_meshgrid', 'create_meshgrid', ({(141, 44, 141, 48): 'rows', (141, 50, 141, 54): 'cols', (141, 56, 141, 61): '(False)', (141, 63, 141, 75): 'image.device', (141, 77, 141, 88): 'image.dtype'}, {}), '(rows, cols, False, image.device, image.dtype)', False, 'from kornia.utils import create_meshgrid\n'), ((150, 10, 150, 54), 'kornia.geometry.transform.remap', 'remap', (), '', False, 'from kornia.geometry.transform import remap\n'), ((50, 15, 50, 70), 'torch.nn.functional.pad', 'torch.nn.functional.pad', ({(50, 39, 50, 43): 'dist', (50, 45, 50, 69): '[0, 14 - dist.shape[-1]]'}, {}), '(dist, [0, 14 - dist.shape[-1]])', False, 'import torch\n'), ((62, 7, 62, 36), 'torch.any', 'torch.any', ({(62, 17, 62, 35): '(dist[..., 12] != 0)'}, {}), '(dist[..., 12] != 0)', False, 'import torch\n'), ((62, 40, 62, 69), 'torch.any', 'torch.any', ({(62, 50, 62, 68): '(dist[..., 13] != 0)'}, {}), '(dist[..., 13] != 0)', False, 'import torch\n'), ((66, 42, 66, 69), 'torch.stack', 'torch.stack', (), '', False, 'import torch\n')] |
o-Ian/Practice-Python | Tests/Aula_7a.py | 1e4b2d0788e70006096a53a7cf038db3148ba4b7 | n1 = int(input('Digite um valor: '))
n2 = int(input('Digite outro valor: '))
print('A soma é: {}!' .format(n1+n2))
print('A subtração entre {} e {} é {}!' .format(n1, n2, n1-n2))
print('A multiplicação desses valores é {}!' .format(n1 * n2))
print('A divisão entre {} e {} é {:.3}' .format(n1, n2, n1/n2))
print('A divisão sem restos é {}!' .format(n1//n2), end = ' ')
print('O resto dessa divisão é {}' .format(n1 % n2))
| [] |
shuvro-zz/manubot | manubot/cite/tests/test_citekey_api.py | 9023b7fbfa0b235c14a4d702516bc0cd6d3101ed | """Tests API-level functions in manubot.cite. Both functions are found in citekey.py"""
import pytest
from manubot.cite import citekey_to_csl_item, standardize_citekey
@pytest.mark.parametrize(
"citekey,expected",
[
("doi:10.5061/DRYAD.q447c/1", "doi:10.5061/dryad.q447c/1"),
("doi:10.5061/dryad.q447c/1", "doi:10.5061/dryad.q447c/1"),
("doi:10/b6vnmd", "doi:10.1016/s0933-3657(96)00367-3"),
("doi:10/B6VNMD", "doi:10.1016/s0933-3657(96)00367-3"),
(
"doi:10/xxxxxxxxxxxxxYY",
"doi:10/xxxxxxxxxxxxxyy",
), # passthrough non-existent shortDOI
("pmid:24159271", "pmid:24159271"),
("isbn:1339919885", "isbn:9781339919881"),
("isbn:1-339-91988-5", "isbn:9781339919881"),
("isbn:978-0-387-95069-3", "isbn:9780387950693"),
("isbn:9780387950938", "isbn:9780387950938"),
("isbn:1-55860-510-X", "isbn:9781558605107"),
("isbn:1-55860-510-x", "isbn:9781558605107"),
],
)
def test_standardize_citekey(citekey, expected):
"""
Standardize identifiers based on their source
"""
output = standardize_citekey(citekey)
assert output == expected
@pytest.mark.xfail(reason="https://twitter.com/dhimmel/status/950443969313419264")
def test_citekey_to_csl_item_doi_datacite():
citekey = "doi:10.7287/peerj.preprints.3100v1"
csl_item = citekey_to_csl_item(citekey)
assert csl_item["id"] == "11cb5HXoY"
assert csl_item["URL"] == "https://doi.org/10.7287/peerj.preprints.3100v1"
assert csl_item["DOI"] == "10.7287/peerj.preprints.3100v1"
assert csl_item["type"] == "report"
assert (
csl_item["title"]
== "Sci-Hub provides access to nearly all scholarly literature"
)
authors = csl_item["author"]
assert authors[0]["family"] == "Himmelstein"
assert authors[-1]["family"] == "Greene"
def test_citekey_to_csl_item_arxiv():
citekey = "arxiv:cond-mat/0703470v2"
csl_item = citekey_to_csl_item(citekey)
assert csl_item["id"] == "ES92tcdg"
assert csl_item["URL"] == "https://arxiv.org/abs/cond-mat/0703470v2"
assert csl_item["number"] == "cond-mat/0703470v2"
assert csl_item["version"] == "2"
assert csl_item["type"] == "report"
assert csl_item["container-title"] == "arXiv"
assert csl_item["title"] == "Portraits of Complex Networks"
authors = csl_item["author"]
assert authors[0]["literal"] == "J. P. Bagrow"
assert csl_item["DOI"] == "10.1209/0295-5075/81/68004"
def test_citekey_to_csl_item_pmc():
"""
https://api.ncbi.nlm.nih.gov/lit/ctxp/v1/pmc/?format=csl&id=3041534
"""
citekey = f"pmcid:PMC3041534"
csl_item = citekey_to_csl_item(citekey)
assert csl_item["id"] == "RoOhUFKU"
assert csl_item["URL"] == "https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3041534/"
assert csl_item["container-title-short"] == "Summit Transl Bioinform"
assert (
csl_item["title"]
== "Secondary Use of EHR: Data Quality Issues and Informatics Opportunities"
)
authors = csl_item["author"]
assert authors[0]["family"] == "Botsis"
assert csl_item["PMID"] == "21347133"
assert csl_item["PMCID"] == "PMC3041534"
assert "generated by Manubot" in csl_item["note"]
assert "standard_id: pmcid:PMC3041534" in csl_item["note"]
def test_citekey_to_csl_item_pubmed_1():
"""
Generated from XML returned by
https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=21347133&rettype=full
"""
citekey = "pmid:21347133"
csl_item = citekey_to_csl_item(citekey)
assert csl_item["id"] == "y9ONtSZ9"
assert csl_item["type"] == "article-journal"
assert csl_item["URL"] == "https://www.ncbi.nlm.nih.gov/pubmed/21347133"
assert csl_item["container-title"] == "Summit on translational bioinformatics"
assert (
csl_item["title"]
== "Secondary Use of EHR: Data Quality Issues and Informatics Opportunities."
)
assert csl_item["issued"]["date-parts"] == [[2010, 3, 1]]
authors = csl_item["author"]
assert authors[0]["given"] == "Taxiarchis"
assert authors[0]["family"] == "Botsis"
assert csl_item["PMID"] == "21347133"
assert csl_item["PMCID"] == "PMC3041534"
def test_citekey_to_csl_item_pubmed_2():
"""
Generated from XML returned by
https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=27094199&rettype=full
"""
citekey = "pmid:27094199"
csl_item = citekey_to_csl_item(citekey)
print(csl_item)
assert csl_item["id"] == "alaFV9OY"
assert csl_item["type"] == "article-journal"
assert csl_item["URL"] == "https://www.ncbi.nlm.nih.gov/pubmed/27094199"
assert csl_item["container-title"] == "Circulation. Cardiovascular genetics"
assert csl_item["container-title-short"] == "Circ Cardiovasc Genet"
assert csl_item["page"] == "179-84"
assert (
csl_item["title"]
== "Genetic Association-Guided Analysis of Gene Networks for the Study of Complex Traits."
)
assert csl_item["issued"]["date-parts"] == [[2016, 4]]
authors = csl_item["author"]
assert authors[0]["given"] == "Casey S"
assert authors[0]["family"] == "Greene"
assert csl_item["PMID"] == "27094199"
assert csl_item["DOI"] == "10.1161/circgenetics.115.001181"
def test_citekey_to_csl_item_pubmed_with_numeric_month():
"""
Generated from XML returned by
https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=29028984&rettype=full
See https://github.com/manubot/manubot/issues/69
"""
citekey = "pmid:29028984"
csl_item = citekey_to_csl_item(citekey)
print(csl_item)
assert csl_item["issued"]["date-parts"] == [[2018, 3, 15]]
def test_citekey_to_csl_item_pubmed_book():
"""
Extracting CSL metadata from books in PubMed is not supported.
Logic not implemented to parse XML returned by
https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=29227604&rettype=full
"""
with pytest.raises(NotImplementedError):
citekey_to_csl_item("pmid:29227604")
def test_citekey_to_csl_item_isbn():
csl_item = citekey_to_csl_item("isbn:9780387950693")
assert csl_item["type"] == "book"
assert csl_item["title"] == "Complex analysis"
| [((8, 1, 27, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(9, 4, 9, 22): '"""citekey,expected"""', (10, 4, 26, 5): "[('doi:10.5061/DRYAD.q447c/1', 'doi:10.5061/dryad.q447c/1'), (\n 'doi:10.5061/dryad.q447c/1', 'doi:10.5061/dryad.q447c/1'), (\n 'doi:10/b6vnmd', 'doi:10.1016/s0933-3657(96)00367-3'), ('doi:10/B6VNMD',\n 'doi:10.1016/s0933-3657(96)00367-3'), ('doi:10/xxxxxxxxxxxxxYY',\n 'doi:10/xxxxxxxxxxxxxyy'), ('pmid:24159271', 'pmid:24159271'), (\n 'isbn:1339919885', 'isbn:9781339919881'), ('isbn:1-339-91988-5',\n 'isbn:9781339919881'), ('isbn:978-0-387-95069-3', 'isbn:9780387950693'),\n ('isbn:9780387950938', 'isbn:9780387950938'), ('isbn:1-55860-510-X',\n 'isbn:9781558605107'), ('isbn:1-55860-510-x', 'isbn:9781558605107')]"}, {}), "('citekey,expected', [('doi:10.5061/DRYAD.q447c/1',\n 'doi:10.5061/dryad.q447c/1'), ('doi:10.5061/dryad.q447c/1',\n 'doi:10.5061/dryad.q447c/1'), ('doi:10/b6vnmd',\n 'doi:10.1016/s0933-3657(96)00367-3'), ('doi:10/B6VNMD',\n 'doi:10.1016/s0933-3657(96)00367-3'), ('doi:10/xxxxxxxxxxxxxYY',\n 'doi:10/xxxxxxxxxxxxxyy'), ('pmid:24159271', 'pmid:24159271'), (\n 'isbn:1339919885', 'isbn:9781339919881'), ('isbn:1-339-91988-5',\n 'isbn:9781339919881'), ('isbn:978-0-387-95069-3', 'isbn:9780387950693'),\n ('isbn:9780387950938', 'isbn:9780387950938'), ('isbn:1-55860-510-X',\n 'isbn:9781558605107'), ('isbn:1-55860-510-x', 'isbn:9781558605107')])", False, 'import pytest\n'), ((36, 1, 36, 82), 'pytest.mark.xfail', 'pytest.mark.xfail', (), '', False, 'import pytest\n'), ((32, 13, 32, 41), 'manubot.cite.standardize_citekey', 'standardize_citekey', ({(32, 33, 32, 40): 'citekey'}, {}), '(citekey)', False, 'from manubot.cite import citekey_to_csl_item, standardize_citekey\n'), ((39, 15, 39, 43), 'manubot.cite.citekey_to_csl_item', 'citekey_to_csl_item', ({(39, 35, 39, 42): 'citekey'}, {}), '(citekey)', False, 'from manubot.cite import citekey_to_csl_item, standardize_citekey\n'), ((55, 15, 55, 43), 'manubot.cite.citekey_to_csl_item', 'citekey_to_csl_item', ({(55, 35, 55, 42): 'citekey'}, {}), '(citekey)', False, 'from manubot.cite import citekey_to_csl_item, standardize_citekey\n'), ((73, 15, 73, 43), 'manubot.cite.citekey_to_csl_item', 'citekey_to_csl_item', ({(73, 35, 73, 42): 'citekey'}, {}), '(citekey)', False, 'from manubot.cite import citekey_to_csl_item, standardize_citekey\n'), ((95, 15, 95, 43), 'manubot.cite.citekey_to_csl_item', 'citekey_to_csl_item', ({(95, 35, 95, 42): 'citekey'}, {}), '(citekey)', False, 'from manubot.cite import citekey_to_csl_item, standardize_citekey\n'), ((118, 15, 118, 43), 'manubot.cite.citekey_to_csl_item', 'citekey_to_csl_item', ({(118, 35, 118, 42): 'citekey'}, {}), '(citekey)', False, 'from manubot.cite import citekey_to_csl_item, standardize_citekey\n'), ((146, 15, 146, 43), 'manubot.cite.citekey_to_csl_item', 'citekey_to_csl_item', ({(146, 35, 146, 42): 'citekey'}, {}), '(citekey)', False, 'from manubot.cite import citekey_to_csl_item, standardize_citekey\n'), ((162, 15, 162, 56), 'manubot.cite.citekey_to_csl_item', 'citekey_to_csl_item', ({(162, 35, 162, 55): '"""isbn:9780387950693"""'}, {}), "('isbn:9780387950693')", False, 'from manubot.cite import citekey_to_csl_item, standardize_citekey\n'), ((157, 9, 157, 43), 'pytest.raises', 'pytest.raises', ({(157, 23, 157, 42): 'NotImplementedError'}, {}), '(NotImplementedError)', False, 'import pytest\n'), ((158, 8, 158, 44), 'manubot.cite.citekey_to_csl_item', 'citekey_to_csl_item', ({(158, 28, 158, 43): '"""pmid:29227604"""'}, {}), "('pmid:29227604')", False, 'from manubot.cite import citekey_to_csl_item, standardize_citekey\n')] |
hmaarrfk/vispy | vispy/io/datasets.py | 7f3f6f60c8462bb8a3a8fa03344a2e6990b86eb2 | # -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import numpy as np
from os import path as op
from ..util import load_data_file
# This is the package data dir, not the dir for config, etc.
DATA_DIR = op.join(op.dirname(__file__), '_data')
def load_iris():
"""Load the iris dataset
Returns
-------
iris : NpzFile
data['data'] : a (150, 4) NumPy array with the iris' features
data['group'] : a (150,) NumPy array with the iris' group
"""
return np.load(load_data_file('iris/iris.npz',
force_download='2014-09-04'))
def load_crate():
"""Load an image of a crate
Returns
-------
crate : array
256x256x3 crate image.
"""
return np.load(load_data_file('orig/crate.npz'))['crate']
def pack_unit(value):
"""Packs float values between [0,1] into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
"""
pack = np.zeros(value.shape + (4,), dtype=np.ubyte)
for i in range(4):
value, pack[..., i] = np.modf(value * 256.)
return pack
def pack_ieee(value):
"""Packs float ieee binary representation into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
"""
return np.fromstring(value.tobytes(),
np.ubyte).reshape((value.shape + (4,)))
def load_spatial_filters(packed=True):
"""Load spatial-filters kernel
Parameters
----------
packed : bool
Whether or not the data should be in "packed" representation
for use in GLSL code.
Returns
-------
kernel : array
16x1024x4 (packed float in rgba) or
16x1024 (unpacked float)
16 interpolation kernel with length 1024 each.
names : tuple of strings
Respective interpolation names, plus "Nearest" which does
not require a filter but can still be used
"""
names = ("Bilinear", "Hanning", "Hamming", "Hermite",
"Kaiser", "Quadric", "Bicubic", "CatRom",
"Mitchell", "Spline16", "Spline36", "Gaussian",
"Bessel", "Sinc", "Lanczos", "Blackman", "Nearest")
kernel = np.load(op.join(DATA_DIR, 'spatial-filters.npy'))
if packed:
# convert the kernel to a packed representation
kernel = pack_unit(kernel)
return kernel, names
| [((11, 19, 11, 39), 'os.path.dirname', 'op.dirname', ({(11, 30, 11, 38): '__file__'}, {}), '(__file__)', True, 'from os import path as op\n'), ((46, 11, 46, 55), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((48, 30, 48, 51), 'numpy.modf', 'np.modf', ({(48, 38, 48, 50): 'value * 256.0'}, {}), '(value * 256.0)', True, 'import numpy as np\n'), ((89, 21, 89, 61), 'os.path.join', 'op.join', ({(89, 29, 89, 37): 'DATA_DIR', (89, 39, 89, 60): '"""spatial-filters.npy"""'}, {}), "(DATA_DIR, 'spatial-filters.npy')", True, 'from os import path as op\n')] |
jehung/universal_portfolio | universal_portfolio/knapsack.py | de731a6166ff057c8d6f3f73f80f9aca151805fa | # -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
np.random.seed(1335) # for reproducibility
np.set_printoptions(precision=5, suppress=True, linewidth=150)
import os
import pandas as pd
import backtest as twp
from matplotlib import pyplot as plt
from sklearn import metrics, preprocessing
from talib.abstract import *
from sklearn.externals import joblib
import quandl
import random, timeit
from sklearn import preprocessing
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
from keras.optimizers import RMSprop, Adam
'''
Name: The Self Learning Quant, Example 3
Author: Daniel Zakrisson
Created: 30/03/2016
Copyright: (c) Daniel Zakrisson 2016
Licence: BSD
Requirements:
Numpy
Pandas
MatplotLib
scikit-learn
TA-Lib, instructions at https://mrjbq7.github.io/ta-lib/install.html
Keras, https://keras.io/
Quandl, https://www.quandl.com/tools/python
backtest.py from the TWP library. Download backtest.py and put in the same folder
/plt create a subfolder in the same directory where plot files will be saved
'''
def get_ticker(x):
return x.split('/')[-1].split('.')[0]
def read_file(file, test=None):
scaler = preprocessing.MinMaxScaler()
d = pd.read_csv(file).set_index('Date')
d.fillna(0, inplace=True)
ticker = get_ticker(file)
d['ticker'] = ticker
d.rename(columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Close': 'close', 'Adj Close': 'adj_close',
'Volume (BTC)': 'volume'},
inplace=True)
x_train = d.iloc[:-100, ]
x_test = d.iloc[-100:, ]
if test:
return x_test, ticker
else:
return x_train, ticker
# Initialize first state, all items are placed deterministically
def init_state(file, test):
d, ticker = read_file(file, test=test)
xdata = pd.DataFrame()
scaler = preprocessing.StandardScaler()
xdata['adj_close'] = d['adj_close'] # .values
xdata['diff'] = xdata['adj_close'].diff(periods=1)
xdata['diff'].fillna(0, inplace=True)
xdata['sma15'] = SMA(d, timeperiod=15)
xdata['sma60'] = SMA(d, timeperiod=60)
xdata['rsi'] = RSI(d, timeperiod=14)
xdata['atr'] = ATR(d, timeperiod=14)
xdata.fillna(0, inplace=True)
# --- Preprocess data
# xdata = np.column_stack((close, diff, sma15, close - sma15, sma15 - sma60, rsi, atr))
xdata = pd.DataFrame(scaler.fit_transform(xdata), columns=xdata.columns)
xdata['ticker'] = ticker
pivot_columns = xdata.columns[0:-1]
pivot = xdata.pivot_table(index=d.index, columns='ticker', values=pivot_columns) # Make a pivot table from the data
pivot.columns = [s1 + '-' + s2 for (s1, s2) in pivot.columns.tolist()]
return pivot
def all_init_data(test=False):
filepath = 'util/stock_dfs/'
all = []
scaler = preprocessing.StandardScaler()
for f in os.listdir(filepath):
datapath = os.path.join(filepath, f)
if datapath.endswith('.csv'):
# print(datapath)
Res = init_state(datapath, test=test)
all.append(Res)
all = pd.concat(all, axis=1)
all.fillna(0, inplace=True)
closecol = [col for col in all.columns if 'adj_close' in col]
close = all[closecol].values
# xdata = np.column_stack((close, diff, sma15, close-sma15, sma15-sma60, rsi, atr))
xdata = np.vstack(all.values)
xdata = np.nan_to_num(xdata)
if test == False:
scaler = preprocessing.StandardScaler()
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
joblib.dump(scaler, 'data/scaler.pkl')
else:
scaler = joblib.load('data/scaler.pkl')
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
state = xdata[0:1, 0:1, :]
return state, xdata, close
# Take Action
def take_action(state, xdata, action, signal, time_step):
# this should generate a list of trade signals that at evaluation time are fed to the backtester
# the backtester should get a list of trade signals and a list of price data for the assett
# make necessary adjustments to state and then return it
time_step += 1
# if the current iteration is the last state ("terminal state") then set terminal_state to 1
if time_step + 1 == xdata.shape[0]:
state = xdata[time_step - 1:time_step, 0:1, :]
terminal_state = 1
signal.loc[time_step] = 0
return state, time_step, signal, terminal_state
# move the market data window one step forward
state = xdata[time_step - 1:time_step, 0:1, :]
# take action
if action == 1:
signal.loc[time_step] = 100
elif action == 2:
signal.loc[time_step] = -100
else:
signal.loc[time_step] = 0
# print(state)
terminal_state = 0
# print(signal)
return state, time_step, signal, terminal_state
# Get Reward, the reward is returned at the end of an episode
def get_reward(new_state, time_step, action, xdata, signal, terminal_state, eval=False, epoch=0):
reward = 0
signal.fillna(value=0, inplace=True)
if eval == False:
try:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata[time_step - 2:time_step]], index=signal[time_step - 2:time_step].index.values),
signal[time_step - 2:time_step], signalType='shares')
reward = np.max((bt.data['price'].iloc[-1] - bt.data['price'].iloc[-2]) * bt.data['shares'].iloc[-1])
except:
pass
if terminal_state == 1 and eval == True:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata], index=signal.index.values), signal, signalType='shares')
reward = bt.pnl.iloc[-1]
plt.figure(figsize=(9, 16))
bt.plotTrades()
plt.axvline(x=400, color='black', linestyle='--')
plt.text(250, 400, 'training data')
plt.text(450, 400, 'test data')
plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png')
plt.close('all')
'''
# save a figure of the test set
plt.figure(figsize=(10, 25))
for i in range(xdata.T.shape[0]):
#frame = pd.concat(btFrame, axis=1)
bt = twp.Backtest(pd.Series(data=[x for x in xdata.T[i]], index=signal.index.values), signal, signalType='shares')
reward += np.max(bt.pnl.iloc[-1])
bt.plotTrades()
#plt.axvline(x=400, color='black', linestyle='--')
#plt.text(250, 400, 'training data')
#plt.text(450, 400, 'test data')
#plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png', bbox_inches='tight', pad_inches=1, dpi=72)
plt.close('all')
'''
# print(time_step, terminal_state, eval, reward)
return reward
def evaluate_Q(eval_data, eval_model, epoch=0):
# This function is used to evaluate the performance of the system each epoch, without the influence of epsilon and random actions
signal = pd.Series(index=np.arange(len(eval_data)))
state, xdata, price_data = all_init_data()
status = 1
terminal_state = 0
time_step = 1
while (status == 1):
# We start in state S
qval = eval_model.predict(state, batch_size=batch_size)
action = (np.argmax(qval))
# Take action, observe new state S'
new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
eval_reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state, eval=True,
epoch=epoch)
state = new_state
if terminal_state == 1: # terminal state
status = 0
return eval_reward
if __name__ == "__main__":
# This neural network is the the Q-function, run it like this:
# model.predict(state.reshape(1,64), batch_size=1)
batch_size = 7
num_features = 2544
epochs = 3
gamma = 0.95 # since the reward can be several time steps away, make gamma high
epsilon = 1
batchSize = 100
buffer = 200
replay = []
learning_progress = []
model = Sequential()
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=True,
stateful=False))
model.add(Dropout(0.5))
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=False,
stateful=False))
model.add(Dropout(0.5))
model.add(Dense(4, init='lecun_uniform'))
model.add(Activation('linear')) # linear output so we can have range of real-valued outputs
rms = RMSprop()
adam = Adam()
model.compile(loss='mse', optimizer=adam)
start_time = timeit.default_timer()
# read_convert_data(symbol='XBTEUR') #run once to read indata, resample and convert to pickle
astate, xdata, aprice_data = all_init_data()
bstate, test_data, test_price_data = all_init_data(test=True)
'''
bstate, test_data, test_price_data = all_init_data(test=True)
print(astate.shape)
print(bstate.shape)
print(xdata.shape)
print(test_data.shape)
print(price_data.shape)
print(test_price_data.shape)
'''
# stores tuples of (S, A, R, S')
h = 0
# signal = pd.Series(index=market_data.index)
signal = pd.Series(index=np.arange(len(xdata)))
for i in range(epochs):
if i == epochs - 1: # the last epoch, use test data set
state, xdata, price_data = all_init_data()
else:
state, xdata, price_data = all_init_data(test=True)
status = 1
terminal_state = 0
time_step = 5
# while game still in progress
while (status == 1):
# We are in state S
# Let's run our Q function on S to get Q values for all possible actions
print('epoch ' + str(i))
qval = model.predict(state, batch_size=batch_size)
if (random.random() < epsilon): # choose random action
action = np.random.randint(0, 4) # assumes 4 different actions
else: # choose best action from Q(s,a) values
action = (np.argmax(qval))
# Take action, observe new state S'
new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state)
print('new_state', new_state)
print('reward', reward)
# Experience replay storage
if (len(replay) < buffer): # if buffer not filled, add to it
replay.append((state, action, reward, new_state))
# print(time_step, reward, terminal_state)
else: # if buffer full, overwrite old values
if (h < (buffer - 1)):
h += 1
else:
h = 0
replay[h] = (state, action, reward, new_state)
# randomly sample our experience replay memory
minibatch = random.sample(replay, batchSize)
X_train = []
y_train = []
for memory in minibatch:
# Get max_Q(S',a)
old_state, action, reward, new_state = memory
old_qval = model.predict(old_state, batch_size=batch_size)
newQ = model.predict(new_state, batch_size=batch_size)
maxQ = np.max(newQ)
y = np.zeros((1, 4))
y[:] = old_qval[:]
if terminal_state == 0: # non-terminal state
update = (reward + (gamma * maxQ))
else: # terminal state
update = reward
# print('rewardbase', reward)
# print('update', update)
y[0][action] = update
# print(time_step, reward, terminal_state)
X_train.append(old_state)
y_train.append(y.reshape(4, ))
X_train = np.squeeze(np.array(X_train), axis=(1))
y_train = np.array(y_train)
model.fit(X_train, y_train, batch_size=batchSize, epochs=100, verbose=0)
state = new_state
if terminal_state == 1: # if reached terminal state, update epoch status
status = 0
eval_reward = evaluate_Q(test_data, model, i)
# eval_reward = value_iter(test_data, epsilon, epochs)
learning_progress.append(eval_reward)
print("Epoch #: %s Reward: %f Epsilon: %f" % (i, eval_reward, epsilon))
# learning_progress.append((reward))
if epsilon > 0.1: # decrement epsilon over time
epsilon -= (1.0 / epochs)
elapsed = np.round(timeit.default_timer() - start_time, decimals=2)
print("Completed in %f" % (elapsed,))
bt = twp.Backtest(pd.Series(data=[x[0] for x in test_price_data]), signal, signalType='shares')
bt.data['delta'] = bt.data['shares'].diff().fillna(0)
print(bt.data)
bt.data.to_csv('plt/knapsack_data.csv')
unique, counts = np.unique(filter(lambda v: v == v, signal.values), return_counts=True)
print(np.asarray((unique, counts)).T)
plt.figure()
plt.subplot(3, 1, 1)
bt.plotTrades()
plt.subplot(3, 1, 2)
bt.pnl.plot(style='x-')
plt.subplot(3, 1, 3)
plt.plot(learning_progress)
print('to plot', learning_progress)
plt.savefig('plt/knapsack_summary' + '.png', bbox_inches='tight', pad_inches=1, dpi=72)
plt.show()
| [((5, 0, 5, 20), 'numpy.random.seed', 'np.random.seed', ({(5, 15, 5, 19): '(1335)'}, {}), '(1335)', True, 'import numpy as np\n'), ((6, 0, 6, 62), 'numpy.set_printoptions', 'np.set_printoptions', (), '', True, 'import numpy as np\n'), ((51, 13, 51, 41), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ({}, {}), '()', False, 'from sklearn import preprocessing\n'), ((71, 12, 71, 26), 'pandas.DataFrame', 'pd.DataFrame', ({}, {}), '()', True, 'import pandas as pd\n'), ((72, 13, 72, 43), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ({}, {}), '()', False, 'from sklearn import preprocessing\n'), ((97, 13, 97, 43), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ({}, {}), '()', False, 'from sklearn import preprocessing\n'), ((98, 13, 98, 33), 'os.listdir', 'os.listdir', ({(98, 24, 98, 32): 'filepath'}, {}), '(filepath)', False, 'import os\n'), ((104, 10, 104, 32), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((111, 12, 111, 33), 'numpy.vstack', 'np.vstack', ({(111, 22, 111, 32): 'all.values'}, {}), '(all.values)', True, 'import numpy as np\n'), ((112, 12, 112, 32), 'numpy.nan_to_num', 'np.nan_to_num', ({(112, 26, 112, 31): 'xdata'}, {}), '(xdata)', True, 'import numpy as np\n'), ((237, 12, 237, 24), 'keras.models.Sequential', 'Sequential', ({}, {}), '()', False, 'from keras.models import Sequential\n'), ((253, 10, 253, 19), 'keras.optimizers.RMSprop', 'RMSprop', ({}, {}), '()', False, 'from keras.optimizers import RMSprop, Adam\n'), ((254, 11, 254, 17), 'keras.optimizers.Adam', 'Adam', ({}, {}), '()', False, 'from keras.optimizers import RMSprop, Adam\n'), ((257, 17, 257, 39), 'timeit.default_timer', 'timeit.default_timer', ({}, {}), '()', False, 'import random, timeit\n'), ((367, 4, 367, 16), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'from matplotlib import pyplot as plt\n'), ((368, 4, 368, 24), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(368, 16, 368, 17): '(3)', (368, 19, 368, 20): '(1)', (368, 22, 368, 23): '(1)'}, {}), '(3, 1, 1)', True, 'from matplotlib import pyplot as plt\n'), ((370, 4, 370, 24), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(370, 16, 370, 17): '(3)', (370, 19, 370, 20): '(1)', (370, 22, 370, 23): '(2)'}, {}), '(3, 1, 2)', True, 'from matplotlib import pyplot as plt\n'), ((372, 4, 372, 24), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(372, 16, 372, 17): '(3)', (372, 19, 372, 20): '(1)', (372, 22, 372, 23): '(3)'}, {}), '(3, 1, 3)', True, 'from matplotlib import pyplot as plt\n'), ((373, 4, 373, 31), 'matplotlib.pyplot.plot', 'plt.plot', ({(373, 13, 373, 30): 'learning_progress'}, {}), '(learning_progress)', True, 'from matplotlib import pyplot as plt\n'), ((376, 4, 376, 91), 'matplotlib.pyplot.savefig', 'plt.savefig', (), '', True, 'from matplotlib import pyplot as plt\n'), ((377, 4, 377, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'from matplotlib import pyplot as plt\n'), ((99, 19, 99, 44), 'os.path.join', 'os.path.join', ({(99, 32, 99, 40): 'filepath', (99, 42, 99, 43): 'f'}, {}), '(filepath, f)', False, 'import os\n'), ((114, 17, 114, 47), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ({}, {}), '()', False, 'from sklearn import preprocessing\n'), ((116, 8, 116, 46), 'sklearn.externals.joblib.dump', 'joblib.dump', ({(116, 20, 116, 26): 'scaler', (116, 28, 116, 45): '"""data/scaler.pkl"""'}, {}), "(scaler, 'data/scaler.pkl')", False, 'from sklearn.externals import joblib\n'), ((118, 17, 118, 47), 'sklearn.externals.joblib.load', 'joblib.load', ({(118, 29, 118, 46): '"""data/scaler.pkl"""'}, {}), "('data/scaler.pkl')", False, 'from sklearn.externals import joblib\n'), ((172, 8, 172, 35), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'from matplotlib import pyplot as plt\n'), ((174, 8, 174, 57), 'matplotlib.pyplot.axvline', 'plt.axvline', (), '', True, 'from matplotlib import pyplot as plt\n'), ((175, 8, 175, 43), 'matplotlib.pyplot.text', 'plt.text', ({(175, 17, 175, 20): '(250)', (175, 22, 175, 25): '(400)', (175, 27, 175, 42): '"""training data"""'}, {}), "(250, 400, 'training data')", True, 'from matplotlib import pyplot as plt\n'), ((176, 8, 176, 39), 'matplotlib.pyplot.text', 'plt.text', ({(176, 17, 176, 20): '(450)', (176, 22, 176, 25): '(400)', (176, 27, 176, 38): '"""test data"""'}, {}), "(450, 400, 'test data')", True, 'from matplotlib import pyplot as plt\n'), ((179, 8, 179, 24), 'matplotlib.pyplot.close', 'plt.close', ({(179, 18, 179, 23): '"""all"""'}, {}), "('all')", True, 'from matplotlib import pyplot as plt\n'), ((211, 18, 211, 33), 'numpy.argmax', 'np.argmax', ({(211, 28, 211, 32): 'qval'}, {}), '(qval)', True, 'import numpy as np\n'), ((238, 14, 241, 34), 'keras.layers.recurrent.LSTM', 'LSTM', (), '', False, 'from keras.layers.recurrent import LSTM\n'), ((242, 14, 242, 26), 'keras.layers.core.Dropout', 'Dropout', ({(242, 22, 242, 25): '(0.5)'}, {}), '(0.5)', False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((244, 14, 247, 34), 'keras.layers.recurrent.LSTM', 'LSTM', (), '', False, 'from keras.layers.recurrent import LSTM\n'), ((248, 14, 248, 26), 'keras.layers.core.Dropout', 'Dropout', ({(248, 22, 248, 25): '(0.5)'}, {}), '(0.5)', False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((250, 14, 250, 44), 'keras.layers.core.Dense', 'Dense', (), '', False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((251, 14, 251, 34), 'keras.layers.core.Activation', 'Activation', ({(251, 25, 251, 33): '"""linear"""'}, {}), "('linear')", False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((358, 22, 358, 69), 'pandas.Series', 'pd.Series', (), '', True, 'import pandas as pd\n'), ((52, 8, 52, 25), 'pandas.read_csv', 'pd.read_csv', ({(52, 20, 52, 24): 'file'}, {}), '(file)', True, 'import pandas as pd\n'), ((166, 21, 166, 113), 'numpy.max', 'np.max', ({(166, 28, 166, 112): "(bt.data['price'].iloc[-1] - bt.data['price'].iloc[-2]) * bt.data['shares'\n ].iloc[-1]"}, {}), "((bt.data['price'].iloc[-1] - bt.data['price'].iloc[-2]) * bt.data[\n 'shares'].iloc[-1])", True, 'import numpy as np\n'), ((170, 26, 170, 90), 'pandas.Series', 'pd.Series', (), '', True, 'import pandas as pd\n'), ((355, 23, 355, 45), 'timeit.default_timer', 'timeit.default_timer', ({}, {}), '()', False, 'import random, timeit\n'), ((365, 10, 365, 38), 'numpy.asarray', 'np.asarray', ({(365, 21, 365, 37): '(unique, counts)'}, {}), '((unique, counts))', True, 'import numpy as np\n'), ((164, 30, 164, 144), 'pandas.Series', 'pd.Series', (), '', True, 'import pandas as pd\n'), ((291, 16, 291, 31), 'random.random', 'random.random', ({}, {}), '()', False, 'import random, timeit\n'), ((292, 25, 292, 48), 'numpy.random.randint', 'np.random.randint', ({(292, 43, 292, 44): '0', (292, 46, 292, 47): '4'}, {}), '(0, 4)', True, 'import numpy as np\n'), ((294, 26, 294, 41), 'numpy.argmax', 'np.argmax', ({(294, 36, 294, 40): 'qval'}, {}), '(qval)', True, 'import numpy as np\n'), ((314, 28, 314, 60), 'random.sample', 'random.sample', ({(314, 42, 314, 48): 'replay', (314, 50, 314, 59): 'batchSize'}, {}), '(replay, batchSize)', False, 'import random, timeit\n'), ((338, 26, 338, 43), 'numpy.array', 'np.array', ({(338, 35, 338, 42): 'y_train'}, {}), '(y_train)', True, 'import numpy as np\n'), ((322, 27, 322, 39), 'numpy.max', 'np.max', ({(322, 34, 322, 38): 'newQ'}, {}), '(newQ)', True, 'import numpy as np\n'), ((323, 24, 323, 40), 'numpy.zeros', 'np.zeros', ({(323, 33, 323, 39): '(1, 4)'}, {}), '((1, 4))', True, 'import numpy as np\n'), ((337, 37, 337, 54), 'numpy.array', 'np.array', ({(337, 46, 337, 53): 'X_train'}, {}), '(X_train)', True, 'import numpy as np\n')] |
bask0/q10hybrid | experiments/experiment_01.py | 9b18af9dd382c65dd667139f97e7da0241091a2c |
import pytorch_lightning as pl
import optuna
import xarray as xr
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
import os
import shutil
from argparse import ArgumentParser
from datetime import datetime
from project.fluxdata import FluxData
from models.hybrid import Q10Model
# Hardcoded `Trainer` args. Note that these cannot be passed via cli.
TRAINER_ARGS = dict(
max_epochs=100,
log_every_n_steps=1,
weights_summary=None
)
class Objective(object):
def __init__(self, args):
self.args = args
def __call__(self, trial: optuna.trial.Trial) -> float:
q10_init = trial.suggest_float('q10_init', 0.0001, 1000.)
seed = trial.suggest_int('seed', 0, 999999999999)
use_ta = trial.suggest_categorical('use_ta', [True, False])
dropout = trial.suggest_float('dropout', 0.0, 1.0)
if use_ta:
features = ['sw_pot', 'dsw_pot', 'ta']
else:
features = ['sw_pot', 'dsw_pot']
pl.seed_everything(seed)
# Further variables used in the hybrid model.
physical = ['ta']
# Target (multiple targets not possible currently).
targets = ['reco']
# Find variables that are only needed in physical model but not in NN.
physical_exclusive = [v for v in physical if v not in features]
# ------------
# data
# ------------
ds = xr.open_dataset(self.args.data_path)
fluxdata = FluxData(
ds,
features=features + physical_exclusive,
targets=targets,
context_size=1,
train_time=slice('2003-01-01', '2006-12-31'),
valid_time=slice('2007-01-01', '2007-12-31'),
test_time=slice('2008-01-01', '2008-12-31'),
batch_size=self.args.batch_size,
data_loader_kwargs={'num_workers': 4})
train_loader = fluxdata.train_dataloader()
val_loader = fluxdata.val_dataloader()
test_loader = fluxdata.test_dataloader()
# Create empty xr.Dataset, will be used by the model to save predictions every epoch.
max_epochs = TRAINER_ARGS['max_epochs']
ds_pred = fluxdata.target_xr('valid', varnames=['reco', 'rb'], num_epochs=max_epochs)
# ------------
# model
# ------------
model = Q10Model(
features=features,
targets=targets,
norm=fluxdata._norm,
ds=ds_pred,
q10_init=q10_init,
hidden_dim=self.args.hidden_dim,
num_layers=self.args.num_layers,
learning_rate=self.args.learning_rate,
dropout=dropout,
weight_decay=self.args.weight_decay,
num_steps=len(train_loader) * max_epochs)
# ------------
# training
# ------------
trainer = pl.Trainer.from_argparse_args(
self.args,
default_root_dir=self.args.log_dir,
**TRAINER_ARGS,
callbacks=[
EarlyStopping(
monitor='valid_loss',
patience=10,
min_delta=0.00001),
ModelCheckpoint(
filename='{epoch}-{val_loss:.2f}',
save_top_k=1,
verbose=False,
monitor='valid_loss',
mode='min',
prefix=model.__class__.__name__)
])
trainer.fit(model, train_loader, val_loader)
# ------------
# testing
# ------------
# trainer.test(test_dataloaders=test_loader)
# ------------
# save results
# ------------
# Store predictions.
ds = fluxdata.add_scalar_record(model.ds, varname='q10', x=model.q10_history)
trial.set_user_attr('q10', ds.q10[-1].item())
# Add some attributes that are required for analysis.
ds.attrs = {
'created': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
'author': '[email protected]',
'q10_init': q10_init,
'dropout': dropout,
'use_ta': int(use_ta),
'loss': trainer.callback_metrics['valid_loss'].item()
}
ds = ds.isel(epoch=slice(0, trainer.current_epoch + 1))
# Save data.
save_dir = os.path.join(model.logger.log_dir, 'predictions.nc')
print(f'Saving predictions to: {save_dir}')
ds.to_netcdf(save_dir)
return trainer.callback_metrics['valid_loss'].item()
@staticmethod
def add_project_specific_args(parent_parser: ArgumentParser) -> ArgumentParser:
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument(
'--batch_size', default=240, type=int)
parser.add_argument(
'--data_path', default='./data/Synthetic4BookChap.nc', type=str)
parser.add_argument(
'--log_dir', default='./logs/experiment_01/', type=str)
return parser
def main(parser: ArgumentParser = None, **kwargs):
"""Use kwargs to overload argparse args."""
# ------------
# args
# ------------
if parser is None:
parser = ArgumentParser()
parser = Objective.add_project_specific_args(parser)
parser = pl.Trainer.add_argparse_args(parser)
parser = Q10Model.add_model_specific_args(parser)
parser.add_argument('--create_study', action='store_true', help='create new study (deletes old) and exits')
parser.add_argument('--single_seed', action='store_true', help='use only one seed instead of (1, ..., 10).')
args = parser.parse_args()
globargs = TRAINER_ARGS.copy()
globargs.update(kwargs)
for k, v in globargs.items():
setattr(args, k, v)
# ------------
# study setup
# ------------
search_space = {
'q10_init': [0.5, 1.5, 2.5],
'seed': [0] if args.single_seed else [i for i in range(10)],
'dropout': [0.0, 0.2, 0.4, 0.6],
'use_ta': [True, False]
}
sql_file = os.path.abspath(os.path.join(args.log_dir, "optuna.db"))
sql_path = f'sqlite:///{sql_file}'
if args.create_study | (not os.path.isfile(sql_file)):
if os.path.isdir(args.log_dir):
shutil.rmtree(args.log_dir)
os.makedirs(args.log_dir, exist_ok=True)
study = optuna.create_study(
study_name="q10hybrid",
storage=sql_path,
sampler=optuna.samplers.GridSampler(search_space),
direction='minimize',
load_if_exists=False)
if args.create_study:
return None
if not os.path.isdir(args.log_dir):
os.makedirs(args.log_dir)
# ------------
# run study
# ------------
n_trials = 1
for _, v in search_space.items():
n_trials *= len(v)
study = optuna.load_study(
study_name="q10hybrid",
storage=sql_path,
sampler=optuna.samplers.GridSampler(search_space))
study.optimize(Objective(args), n_trials=n_trials)
if __name__ == '__main__':
main()
| [((166, 13, 166, 49), 'pytorch_lightning.Trainer.add_argparse_args', 'pl.Trainer.add_argparse_args', ({(166, 42, 166, 48): 'parser'}, {}), '(parser)', True, 'import pytorch_lightning as pl\n'), ((167, 13, 167, 53), 'models.hybrid.Q10Model.add_model_specific_args', 'Q10Model.add_model_specific_args', ({(167, 46, 167, 52): 'parser'}, {}), '(parser)', False, 'from models.hybrid import Q10Model\n'), ((40, 8, 40, 32), 'pytorch_lightning.seed_everything', 'pl.seed_everything', ({(40, 27, 40, 31): 'seed'}, {}), '(seed)', True, 'import pytorch_lightning as pl\n'), ((54, 13, 54, 49), 'xarray.open_dataset', 'xr.open_dataset', ({(54, 29, 54, 48): 'self.args.data_path'}, {}), '(self.args.data_path)', True, 'import xarray as xr\n'), ((138, 19, 138, 71), 'os.path.join', 'os.path.join', ({(138, 32, 138, 52): 'model.logger.log_dir', (138, 54, 138, 70): '"""predictions.nc"""'}, {}), "(model.logger.log_dir, 'predictions.nc')", False, 'import os\n'), ((146, 17, 146, 72), 'argparse.ArgumentParser', 'ArgumentParser', (), '', False, 'from argparse import ArgumentParser\n'), ((163, 17, 163, 33), 'argparse.ArgumentParser', 'ArgumentParser', ({}, {}), '()', False, 'from argparse import ArgumentParser\n'), ((188, 31, 188, 70), 'os.path.join', 'os.path.join', ({(188, 44, 188, 56): 'args.log_dir', (188, 58, 188, 69): '"""optuna.db"""'}, {}), "(args.log_dir, 'optuna.db')", False, 'import os\n'), ((192, 11, 192, 38), 'os.path.isdir', 'os.path.isdir', ({(192, 25, 192, 37): 'args.log_dir'}, {}), '(args.log_dir)', False, 'import os\n'), ((194, 8, 194, 48), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((205, 11, 205, 38), 'os.path.isdir', 'os.path.isdir', ({(205, 25, 205, 37): 'args.log_dir'}, {}), '(args.log_dir)', False, 'import os\n'), ((206, 8, 206, 33), 'os.makedirs', 'os.makedirs', ({(206, 20, 206, 32): 'args.log_dir'}, {}), '(args.log_dir)', False, 'import os\n'), ((191, 32, 191, 56), 'os.path.isfile', 'os.path.isfile', ({(191, 47, 191, 55): 'sql_file'}, {}), '(sql_file)', False, 'import os\n'), ((193, 12, 193, 39), 'shutil.rmtree', 'shutil.rmtree', ({(193, 26, 193, 38): 'args.log_dir'}, {}), '(args.log_dir)', False, 'import shutil\n'), ((217, 16, 217, 57), 'optuna.samplers.GridSampler', 'optuna.samplers.GridSampler', ({(217, 44, 217, 56): 'search_space'}, {}), '(search_space)', False, 'import optuna\n'), ((198, 20, 198, 61), 'optuna.samplers.GridSampler', 'optuna.samplers.GridSampler', ({(198, 48, 198, 60): 'search_space'}, {}), '(search_space)', False, 'import optuna\n'), ((99, 16, 102, 38), 'pytorch_lightning.callbacks.early_stopping.EarlyStopping', 'EarlyStopping', (), '', False, 'from pytorch_lightning.callbacks.early_stopping import EarlyStopping\n'), ((103, 16, 109, 52), 'pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint', 'ModelCheckpoint', (), '', False, 'from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint\n'), ((127, 23, 127, 37), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n')] |
warifp/InstagramPostAndDelete | main.py | d22577325eccf42e629cef076ab43f7788587bc4 | #! @@Author : WAHYU ARIF PURNOMO
#! @@Create : 18 Januari 2019
#! @@Modify : 19 Januari 2019
#! Gambar dari reddit.
#! Gunakan VPN karena DNS situs reddit sudah di blokir dari negara Indonesia.
import os
import json
import requests
import progressbar
from PIL import Image
from lxml import html
from time import sleep
from ImageDeleter import delete_png
from InstagramAPI import InstagramAPI
InstagramAPI = InstagramAPI(input("Username: "), input("Password: "))
while True:
if (InstagramAPI.login()):
break
else:
for x in range(300):
os.system('cls')
print(300-x)
sleep(1)
global useable
useable = []
os.system('pause')
def get_image():
print("Memulai mendapatkan gambar ..")
json_raw = requests.get('https://www.reddit.com/r/me_irl/new/.json', headers = {'User-agent': 'Image_Testing_V3'}).json()
json_data = json_raw['data']
json_children = json_data['children']
for x in range(len(json_children)):
json_current = json_children[x]
json_current_data = json_current['data']
json_current_url = json_current_data['url']
if "https://i.redd.it/" not in json_current_url:
pass
else:
if json_current_url not in useable:
useable.append(json_current_url)
download()
else:
pass
def download():
print("Memulai download ..")
global filename
new_filename = ""
filename = useable[-1]
filename = filename.replace("https://i.redd.it/", "")
print(filename)
f = open(filename, 'wb')
f.write(requests.get(useable[-1]).content)
f.close()
if (filename[-3] + filename[-2] + filename[-1]) != 'jpg':
im = Image.open(filename)
for x in range(len(filename)-3):
new_filename = new_filename + filename[x]
im = im.convert("RGB")
im.save("edit" + new_filename + 'jpg')
new_filename = "edit" + new_filename + "jpg"
print(new_filename)
else:
new_filename = filename
upload(new_filename)
def delete_image(bad_file):
print("Memulai menghapus gambar ..")
if (bad_file[0] + bad_file[1] + bad_file[2] + bad_file[3]) == "edit":
png_bad_file = ''
for x in range(len(bad_file)-3):
png_bad_file = png_bad_file + bad_file[x]
png_bad_file = png_bad_file + "png"
try:
os.remove(png_bad_file)
except Exception as e:
pass
os.remove(bad_file)
delete_png()
print("Selesai.")
wait()
def upload(file):
print("Memulai upload ..")
caption = ""
InstagramAPI.uploadPhoto(file, caption=caption)
delete_image(file)
def wait():
for i in progressbar.progressbar(range(1800)):
sleep(1)
while True:
get_image()
print("Gambar sukses di upload.")
sleep(5)
os.system('pause')
| [((28, 0, 28, 18), 'os.system', 'os.system', ({(28, 10, 28, 17): '"""pause"""'}, {}), "('pause')", False, 'import os\n'), ((81, 4, 81, 23), 'os.remove', 'os.remove', ({(81, 14, 81, 22): 'bad_file'}, {}), '(bad_file)', False, 'import os\n'), ((82, 4, 82, 16), 'ImageDeleter.delete_png', 'delete_png', ({}, {}), '()', False, 'from ImageDeleter import delete_png\n'), ((99, 4, 99, 12), 'time.sleep', 'sleep', ({(99, 10, 99, 11): '(5)'}, {}), '(5)', False, 'from time import sleep\n'), ((100, 4, 100, 22), 'os.system', 'os.system', ({(100, 14, 100, 21): '"""pause"""'}, {}), "('pause')", False, 'import os\n'), ((59, 13, 59, 33), 'PIL.Image.open', 'Image.open', ({(59, 24, 59, 32): 'filename'}, {}), '(filename)', False, 'from PIL import Image\n'), ((94, 8, 94, 16), 'time.sleep', 'sleep', ({(94, 14, 94, 15): '(1)'}, {}), '(1)', False, 'from time import sleep\n'), ((23, 12, 23, 28), 'os.system', 'os.system', ({(23, 22, 23, 27): '"""cls"""'}, {}), "('cls')", False, 'import os\n'), ((25, 12, 25, 20), 'time.sleep', 'sleep', ({(25, 18, 25, 19): '(1)'}, {}), '(1)', False, 'from time import sleep\n'), ((32, 15, 32, 118), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((56, 12, 56, 37), 'requests.get', 'requests.get', ({(56, 25, 56, 36): 'useable[-1]'}, {}), '(useable[-1])', False, 'import requests\n'), ((78, 12, 78, 35), 'os.remove', 'os.remove', ({(78, 22, 78, 34): 'png_bad_file'}, {}), '(png_bad_file)', False, 'import os\n')] |
maximilionus/pyspectator-x | pyspectator/collection.py | 1265f1f39e7ca0534f9e6ffcd7087f2ebced3397 | from collections import MutableMapping, Container
from datetime import datetime, timedelta
from pyvalid import accepts
class LimitedTimeTable(MutableMapping, Container):
def __init__(self, time_span):
self.__storage = dict()
self.__time_span = None
self.time_span = time_span
@property
def time_span(self):
return self.__time_span
@time_span.setter
@accepts(object, timedelta)
def time_span(self, value):
self.__time_span = value
@property
def oldest(self):
value = None
if self.__len__() > 0:
value = min(self.__storage.keys())
return value
@property
def newest(self):
value = None
if self.__len__() > 0:
value = max(self.__storage.keys())
return value
def oldest_keys(self, size):
for key in self.__get_slice(0, size):
yield key
def oldest_values(self, size):
for key in self.oldest_keys(size):
yield self.__storage.get(key)
def oldest_items(self, size):
for key in self.oldest_keys(size):
yield (key, self.__storage.get(key))
def newest_keys(self, size):
for key in self.__get_slice(-size, None):
yield key
def newest_values(self, size):
for key in self.newest_keys(size):
yield self.__storage.get(key)
def newest_items(self, size):
for key in self.newest_keys(size):
yield (key, self.__storage.get(key))
def __get_slice(self, start, end):
keys = sorted(self.keys())
return keys[start:end]
def __getitem__(self, item):
return self.__storage.__getitem__(item)
@accepts(object, datetime, object)
def __setitem__(self, key, value):
now = datetime.now()
if key > now:
raise ValueError('Can\'t set item from future!')
oldest = self.oldest
if (oldest is not None) and (oldest != key):
longest_time_span = now - oldest
# Item is too old for current timetable
if longest_time_span >= self.time_span:
self.__delitem__(oldest)
return self.__storage.__setitem__(key, value)
def __delitem__(self, key):
return self.__storage.__delitem__(key)
def __len__(self):
return self.__storage.__len__()
def __iter__(self):
return self.__storage.__iter__()
def __contains__(self, item):
return self.__storage.__contains__(item)
__all__ = ['LimitedTimeTable']
| [((18, 5, 18, 31), 'pyvalid.accepts', 'accepts', ({(18, 13, 18, 19): 'object', (18, 21, 18, 30): 'timedelta'}, {}), '(object, timedelta)', False, 'from pyvalid import accepts\n'), ((67, 5, 67, 38), 'pyvalid.accepts', 'accepts', ({(67, 13, 67, 19): 'object', (67, 21, 67, 29): 'datetime', (67, 31, 67, 37): 'object'}, {}), '(object, datetime, object)', False, 'from pyvalid import accepts\n'), ((69, 14, 69, 28), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n')] |
AndySamoil/Elite_Code | keyboardrow.py | 7dc3b7b1b8688c932474f8a10fd2637fd2918bdd | def findWords(self, words: List[str]) -> List[str]:
''' sets and iterate through sets
'''
every = [set("qwertyuiop"), set("asdfghjkl"), set("zxcvbnm")]
ans = []
for word in words:
l = len(word)
for sett in every:
count = 0
for let in word:
if let.lower() in sett:
count += 1
if count == l:
ans.append(word)
return ans | [] |
xli1110/LC | DFS_Backtracking/31. Next Permutation.py | 3c18b8809c5a21a62903060eef659654e0595036 | class Solution:
def __init__(self):
self.res = []
self.path = []
def arr_to_num(self, arr):
s = ""
for x in arr:
s += str(x)
return int(s)
def find_position(self, nums):
for i in range(len(self.res)):
if self.res[i] == nums:
if i == len(self.res) - 1:
return 0
# we need the check below for duplicate elements in nums
# run nums = [1, 5, 1] and see the case
next_num = self.arr_to_num(self.res[i + 1])
if next_num > self.arr_to_num(nums):
return i + 1
raise Exception("The permutation function has something wrong, please debug it.")
def DFS(self, arr):
if not arr:
self.res.append(self.path[:])
return
for i in range(len(arr)):
self.path.append(arr[i])
self.DFS(arr[:i] + arr[i + 1:])
self.path.pop()
def nextPermutation(self, nums: [int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
if not nums:
raise Exception("Empty Array")
# all permutations
# note that we need to SORT the array at first
arr = nums[:]
arr.sort()
self.DFS(arr)
# find position
position = self.find_position(nums)
# in-place replacement
for i in range(len(nums)):
nums[i] = self.res[position][i]
if __name__ == "__main__":
sol = Solution()
# nums = [2, 1, 3]
nums = [1, 5, 1]
sol.nextPermutation(nums)
print(sol.res)
| [] |
konradotto/TS | plugin/DataExport/extend.py | bf088bd8432b1e3f4b8c8c083650a30d9ef2ae2e | #!/usr/bin/python
# Copyright (C) 2015 Ion Torrent Systems, Inc. All Rights Reserved
import subprocess
import re
pluginName = 'DataExport'
pluginDir = ""
networkFS = ["nfs", "cifs"]
localFS = ["ext4", "ext3", "xfs", "ntfs", "exfat", "vboxsf"]
supportedFS = ",".join(localFS + networkFS)
def test(bucket):
return bucket
def runProcess(exe):
p = subprocess.Popen(exe, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return iter(p.stdout.readline, b'')
def runProcessAndReturnLastLine(exe):
p = subprocess.Popen(exe, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p.stdout.readlines()[-1]
def backupDevices(bucket):
devices = ""
cmd = "mount -l -t " + supportedFS
for line in runProcess(cmd.split()):
line_arr = line.split()
folder = line_arr[2]
fstype = line_arr[4]
perms = line_arr[5]
if perms.find('w') != -1:
use = True
if fstype in localFS:
m = re.match('^(/media|/mnt)', folder)
if not m:
use = False
if use:
cmd2 = "df -h %s " % folder
df = runProcessAndReturnLastLine(cmd2.split())
avail = df.split()[2]
devices = devices + "<OPTION VALUE=\"" + folder + "\">" + folder + " (" + avail + " free, " + fstype + ")</option>"
return devices
| [((20, 8, 20, 79), 'subprocess.Popen', 'subprocess.Popen', (), '', False, 'import subprocess\n'), ((25, 8, 25, 79), 'subprocess.Popen', 'subprocess.Popen', (), '', False, 'import subprocess\n'), ((40, 20, 40, 54), 're.match', 're.match', ({(40, 29, 40, 45): '"""^(/media|/mnt)"""', (40, 47, 40, 53): 'folder'}, {}), "('^(/media|/mnt)', folder)", False, 'import re\n')] |
PaulAustin/sb7-pgz | boids/biods_object.py | fca3e50132b9d1894fb348b2082e83ce7b937b19 | # Ported from JavaSript version to Python and Pygame Zero
# Designed to work well with mu-editor environment.
#
# The original Javascript version wasdonw by Ben Eater
# at https://github.com/beneater/boids (MIT License)
# No endorsement implied.
#
# Complex numbers are are used as vectors to integrate x and y positions and velocities
# MIT licesense (details in parent directory)
import random
import time
HEIGHT = 500 # window height
WIDTH = 900 # window width
MARGIN = 150 # disstance to start avoid edge
NUM_BOIDS = 75
VISUAL_RANGE = 70 # radius of influence for most algoriths
SPEED_LIMIT_UPPER = 13 # boids canonly fly so fast.
SPEED_LIMIT_LOWER = 3 # boid will fall if flying too slow
SPEED_INIT = 20 # range for random velocity
MIN_DISTANCE = 10 # the distance to stay away from other boids
AVOID_FACTOR = 0.05 # % location change if too close
CENTERING_FACTOR = 0.050 # % location change to pull to center
MATCHING_FACTOR = 0.015 # % velocity change if close
MARGIN_FACTOR = 0.25+0.0j # rate of turning away from edge
HISTORY_LENGTH = 30
BACK_COLOR = (0, 0, 90)
BOID_COLOR = (255, 128, 128)
BOID_SIZE = 8
TRAIL_COLOR = (255, 255, 64)
g_boids = []
class Boid:
def __init__(boid) :
boid.loc = complex(
(random.randint(0, WIDTH)),
(random.randint(0, HEIGHT)))
boid.vel = complex(
(random.randint(-SPEED_INIT, SPEED_INIT)),
(random.randint(-SPEED_INIT, SPEED_INIT)))
boid.history = []
def keep_within_bounds(boid) :
# Constrain a boid to within the window. If it gets too close to an edge,
# nudge it back in and reverse its direction.
if (boid.loc.real < MARGIN):
boid.vel += MARGIN_FACTOR * 1.0
if (boid.loc.real > WIDTH - MARGIN) :
boid.vel += MARGIN_FACTOR * -1.0
if (boid.loc.imag < MARGIN) :
boid.vel += MARGIN_FACTOR * 1.0j
if (boid.loc.imag > HEIGHT - MARGIN) :
boid.vel += MARGIN_FACTOR * -1.0j
def fly_towards_center(boid):
# Find the center of mass of the other boids and
# adjust velocity slightly to point towards the
# center of mass.
center = 0+0j
num_neighbors = 0
for other_boid in g_boids :
if abs(boid.loc - other_boid.loc) < VISUAL_RANGE :
center += other_boid.loc
num_neighbors += 1
if num_neighbors > 0 :
center = center / num_neighbors
boid.loc += (center - boid.loc) * CENTERING_FACTOR
def avoid_others(boid):
# Move away from other boids that are too close to avoid colliding
move = 0+0j
for other_boid in g_boids :
if not (other_boid is boid) :
if abs(boid.loc - other_boid.loc) < MIN_DISTANCE :
move += boid.loc - other_boid.loc
boid.vel += move * AVOID_FACTOR
def match_velocity(boid):
# Find the average velocity (speed and direction)
# of the other boids and adjust velocity slightly to match.
avg_vel = 0+0j
num_neighbors = 0
for otherBoid in g_boids:
if abs(boid.loc - otherBoid.loc) < VISUAL_RANGE :
avg_vel += otherBoid.vel
num_neighbors += 1
if num_neighbors > 0:
avg_vel /= num_neighbors
boid.vel += (avg_vel - boid.vel) * MATCHING_FACTOR
def limit_speed(boid):
# Speed will naturally vary in flocking behavior,
# but real animals can't go arbitrarily fast (or slow)
speed = abs(boid.vel)
if (speed > SPEED_LIMIT_UPPER) :
boid.vel = boid.vel / speed * SPEED_LIMIT_UPPER
if (speed < SPEED_LIMIT_LOWER) :
boid.vel = boid.vel / speed * SPEED_LIMIT_LOWER
return
def draw(boid):
screen.draw.filled_circle((boid.loc.real, boid.loc.imag), BOID_SIZE, BOID_COLOR)
tail = boid.loc + boid.vel * -1.8
screen.draw.line(
(boid.loc.real, boid.loc.imag),
(tail.real, tail.imag),
BOID_COLOR)
def draw_trail(boid):
pt_from = (boid.loc.real, boid.loc.imag)
for p in boid.history:
pt_to = (p.real, p.imag)
screen.draw.line(pt_from, pt_to, TRAIL_COLOR)
pt_from = pt_to
def draw():
screen.fill(BACK_COLOR)
if keyboard.space:
for boid in g_boids:
boid.draw_trail()
for boid in g_boids:
boid.draw()
screen.draw.text("space:tails r:restart", (20, 20))
def update():
for boid in g_boids:
# Apply rules
boid.fly_towards_center()
boid.avoid_others()
boid.match_velocity()
boid.limit_speed()
boid.keep_within_bounds()
# Update the position based on the current velocity
boid.loc += boid.vel
boid.history.insert(0, boid.loc)
boid.history = boid.history[:HISTORY_LENGTH]
def init():
global g_boids
g_boids = [Boid() for _ in range(NUM_BOIDS)]
def on_key_down(key, mod, unicode):
if (key == keys.R):
init()
init()
| [((42, 13, 42, 37), 'random.randint', 'random.randint', ({(42, 28, 42, 29): '0', (42, 31, 42, 36): 'WIDTH'}, {}), '(0, WIDTH)', False, 'import random\n'), ((43, 13, 43, 38), 'random.randint', 'random.randint', ({(43, 28, 43, 29): '0', (43, 31, 43, 37): 'HEIGHT'}, {}), '(0, HEIGHT)', False, 'import random\n'), ((45, 13, 45, 52), 'random.randint', 'random.randint', ({(45, 28, 45, 39): '-SPEED_INIT', (45, 41, 45, 51): 'SPEED_INIT'}, {}), '(-SPEED_INIT, SPEED_INIT)', False, 'import random\n'), ((46, 13, 46, 52), 'random.randint', 'random.randint', ({(46, 28, 46, 39): '-SPEED_INIT', (46, 41, 46, 51): 'SPEED_INIT'}, {}), '(-SPEED_INIT, SPEED_INIT)', False, 'import random\n')] |
UpOut/UpOutDF | upoutdf/types/recurring/yearly.py | 5d2f87884565d98b77e25c6a26af7dbea266be76 | # coding: utf-8
import pytz
from dateutil.relativedelta import relativedelta
from .base import BaseRecurring
from upoutdf.occurences import OccurenceBlock, OccurenceGroup
from upoutdf.constants import YEARLY_TYPE
class YearlyType(BaseRecurring):
year_day = None
required_attributes = [
'every',
'timezone',
'starting_time',
'lasting_seconds',
'type',
'starting_date'
]
def increment_by(self):
return relativedelta(years=+self.every)
def _snap_datetime(self,datetime,yearday):
if datetime is None:
return None
snapper = self.snapping_class(self.timezone)
return snapper.snap_to_year_day(datetime,yearday)
def _canonicalize_date(self,date):
if not date.tzinfo:
date = date.replace(tzinfo=pytz.utc)
if date.tzinfo != self.timezone:
date = self.timezone.normalize(date.astimezone(self.timezone))
return date
def canonicalize(self):
canonical = "every %s year" % self.every
if self.year_day is not None:
canonical = "%s day %s" % (
canonical,
self.year_day
)
#(starting <datetimestring>) (ending <datetimestring>)
if not self.starting_date_infinite:
starting_date = self._canonicalize_date(self.starting_date)
canonical = "%s starting %s" % (
canonical,
starting_date.strftime("_%m/%d/%Y")
)
if not self.ending_date_infinite:
ending_date = self._canonicalize_date(self.ending_date)
canonical = "%s ending %s" % (
canonical,
ending_date.strftime("_%m/%d/%Y")
)
if self.repeating_count is not None:
canonical = "%s repeating %s times" % (
canonical,
self.repeating_count
)
starting_time = self._canonicalize_date(self.starting_time)
canonical = "%s at %s" % (
canonical,
starting_time.strftime("%-I:%M%p")
)
canonical = "%s lasting %s seconds in %s" % (
canonical,
self.lasting_seconds,
str(self.timezone)
)
return canonical
def occurences(self):
if not self.verify_parsed():
raise RuntimeError("Please call parse before calling occurences")
ending = self.ending_date
repeating_count = self.repeating_count
ending_date_infinite = self.ending_date_infinite
if repeating_count is not None:
ending_date_infinite = False
if ending is not None:
ending = self._set_start_time(ending)
ending = self._strip_microseconds(ending)
occurence_start = self.starting_date
if self.year_day is not None:
try:
occurence_start = self._snap_datetime(self.starting_date,self.year_day)
except ValueError:
#If we had a problem, try the next year
occurence_start = self._snap_datetime(
self.starting_date+relativedelta(years=+1),
self.year_day
)
occurence_start = self._set_start_time(occurence_start)
occurence_start = self._strip_microseconds(occurence_start)
occurence_block = OccurenceBlock(
starting_date=occurence_start,
ending_date=None,
starting_date_infinite=self.starting_date_infinite,
ending_date_infinite=ending_date_infinite,
typeobj=self
)
repeated = 1
occurence_end = None
#While we're before the end date (if we have it)
#And we're before the max repetetions (if we have it)
while ((ending is None or occurence_start <= ending)
and (repeating_count is None or repeated <= repeating_count)):
occurence_end = self._get_end_datetime(occurence_start)
occurence_end = self._strip_microseconds(occurence_end)
occurence_block.add_occurence(occurence_start,occurence_end)
occurence_start = self._increment_occurence(occurence_start)
occurence_start = self._strip_microseconds(occurence_start)
repeated+=1
occurence_block.ending_date = occurence_end
#We always return a OccurenceGroup, even if just 1
return OccurenceGroup(blocks=[occurence_block])
def _parse_type(self,tokens):
if tokens[0] == 'day':
tokens = self._step_tokens(tokens)
try:
self.year_day = int(tokens[0])
except ValueError:
raise ValueError("Invalid year day")
tokens = self._step_tokens(tokens)
self.type = YEARLY_TYPE
return tokens
| [((24, 15, 24, 47), 'dateutil.relativedelta.relativedelta', 'relativedelta', (), '', False, 'from dateutil.relativedelta import relativedelta\n'), ((117, 26, 123, 9), 'upoutdf.occurences.OccurenceBlock', 'OccurenceBlock', (), '', False, 'from upoutdf.occurences import OccurenceBlock, OccurenceGroup\n'), ((144, 15, 144, 55), 'upoutdf.occurences.OccurenceGroup', 'OccurenceGroup', (), '', False, 'from upoutdf.occurences import OccurenceBlock, OccurenceGroup\n'), ((110, 39, 110, 62), 'dateutil.relativedelta.relativedelta', 'relativedelta', (), '', False, 'from dateutil.relativedelta import relativedelta\n')] |
dbinetti/captable | project/urls.py | 29769b2b99a3185fda241b3087ccbe621f8c97a2 | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic import TemplateView
urlpatterns = patterns(
'',
url(r'^$', TemplateView.as_view(template_name='home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='about.html'), name='about'),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('apps.captable.urls',)),
)
urlpatterns += staticfiles_urlpatterns()
| [((4, 0, 4, 20), 'django.contrib.admin.autodiscover', 'admin.autodiscover', ({}, {}), '()', False, 'from django.contrib import admin\n'), ((19, 15, 19, 40), 'django.contrib.staticfiles.urls.staticfiles_urlpatterns', 'staticfiles_urlpatterns', ({}, {}), '()', False, 'from django.contrib.staticfiles.urls import staticfiles_urlpatterns\n'), ((12, 15, 12, 62), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', (), '', False, 'from django.views.generic import TemplateView\n'), ((13, 21, 13, 69), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', (), '', False, 'from django.views.generic import TemplateView\n'), ((14, 24, 14, 64), 'django.conf.urls.include', 'include', ({(14, 32, 14, 63): '"""django.contrib.admindocs.urls"""'}, {}), "('django.contrib.admindocs.urls')", False, 'from django.conf.urls import patterns, include, url\n'), ((15, 20, 15, 44), 'django.conf.urls.include', 'include', ({(15, 28, 15, 43): 'admin.site.urls'}, {}), '(admin.site.urls)', False, 'from django.conf.urls import patterns, include, url\n'), ((16, 14, 16, 44), 'django.conf.urls.include', 'include', ({(16, 22, 16, 42): '"""apps.captable.urls"""'}, {}), "('apps.captable.urls')", False, 'from django.conf.urls import patterns, include, url\n')] |
marjanhs/procon20 | common/evaluators/bert_emotion_evaluator.py | c49ad38a77e58fd84ff0409cc9f5081c6de0bf0b | import warnings
import numpy as np
import torch
import torch.nn.functional as F
from sklearn import metrics
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
from tqdm import tqdm
from datasets.bert_processors.abstract_processor import convert_examples_to_features_with_emotion, \
convert_examples_to_hierarchical_features
from utils.preprocessing import pad_input_matrix
from utils.tokenization import BertTokenizer
from utils.emotion import Emotion
# Suppress warnings from sklearn.metrics
warnings.filterwarnings('ignore')
class BertEvaluator(object):
def __init__(self, model, processor, args, split='dev'):
self.args = args
self.model = model
self.processor = processor
self.tokenizer = BertTokenizer.from_pretrained(args.model, is_lowercase=args.is_lowercase)
self.emotioner = Emotion(args.nrc_path, args.max_em_len, args.emotion_filters)
if split == 'test':
self.eval_examples = self.processor.get_test_examples(args.data_dir, args.test_name)
elif split == 'dev':
self.eval_examples = self.processor.get_dev_examples(args.data_dir, args.dev_name)
else:
self.eval_examples = self.processor.get_any_examples(args.data_dir, split)
def get_scores(self, silent=False, return_indices=False):
all_indices = []
if self.args.is_hierarchical:
eval_features = convert_examples_to_hierarchical_features(
self.eval_examples, self.args.max_seq_length, self.tokenizer)
else:
eval_features = convert_examples_to_features_with_emotion(
self.eval_examples, self.args.max_seq_length, self.tokenizer, self.emotioner)
unpadded_input_ids = [f.input_ids for f in eval_features]
unpadded_input_mask = [f.input_mask for f in eval_features]
unpadded_segment_ids = [f.segment_ids for f in eval_features]
unpadded_emotion_scores = [f.sentiment_scores for f in eval_features]
if self.args.is_hierarchical:
pad_input_matrix(unpadded_input_ids, self.args.max_doc_length)
pad_input_matrix(unpadded_input_mask, self.args.max_doc_length)
pad_input_matrix(unpadded_segment_ids, self.args.max_doc_length)
padded_input_ids = torch.tensor(unpadded_input_ids, dtype=torch.long)
padded_input_mask = torch.tensor(unpadded_input_mask, dtype=torch.long)
padded_segment_ids = torch.tensor(unpadded_segment_ids, dtype=torch.long)
padded_emotion_ids = torch.tensor(unpadded_emotion_scores, dtype=torch.long)
label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(padded_input_ids, padded_input_mask, padded_segment_ids, padded_emotion_ids, label_ids)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=self.args.batch_size)
self.model.eval()
total_loss = 0
nb_eval_steps, nb_eval_examples = 0, 0
predicted_labels, target_labels = list(), list()
for input_ids, input_mask, segment_ids, emotion_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating", disable=silent):
input_ids = input_ids.to(self.args.device)
input_mask = input_mask.to(self.args.device)
segment_ids = segment_ids.to(self.args.device)
emotion_ids = emotion_ids.to(self.args.device)
label_ids = label_ids.to(self.args.device)
with torch.no_grad():
if return_indices:
outs = self.model(input_ids, segment_ids, input_mask, emotion_ids=emotion_ids, return_indices=return_indices)
else:
outs = self.model(input_ids, segment_ids, input_mask, emotion_ids=emotion_ids)
if isinstance(outs, tuple):
outs, _ = outs
if return_indices:
logits, indices = outs
all_indices.extend(indices.cpu().detach().numpy())
else:
logits = outs
if self.args.is_multilabel:
predicted_labels.extend(F.sigmoid(logits).round().long().cpu().detach().numpy())
target_labels.extend(label_ids.cpu().detach().numpy())
loss = F.binary_cross_entropy_with_logits(logits, label_ids.float(), size_average=False)
average, average_mac = 'micro', 'macro'
else:
predicted_labels.extend(torch.argmax(logits, dim=1).cpu().detach().numpy())
target_labels.extend(torch.argmax(label_ids, dim=1).cpu().detach().numpy())
loss = F.cross_entropy(logits, torch.argmax(label_ids, dim=1))
average, average_mac = 'binary', 'binary'
if self.args.n_gpu > 1:
loss = loss.mean()
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
total_loss += loss.item()
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
predicted_labels, target_labels = np.array(predicted_labels), np.array(target_labels)
accuracy = metrics.accuracy_score(target_labels, predicted_labels)
precision = metrics.precision_score(target_labels, predicted_labels, average=average)
recall = metrics.recall_score(target_labels, predicted_labels, average=average)
avg_loss = total_loss / nb_eval_steps
hamming_loss = metrics.hamming_loss(target_labels, predicted_labels)
jaccard_score = metrics.jaccard_score(target_labels, predicted_labels, average=average)
f1_micro = metrics.f1_score(target_labels, predicted_labels, average=average)
f1_macro = metrics.f1_score(target_labels, predicted_labels, average=average_mac)
if return_indices:
return [accuracy, precision, recall, f1_micro, avg_loss, f1_macro, hamming_loss, jaccard_score, predicted_labels, target_labels, all_indices],\
['accuracy', 'precision', 'recall', 'f1_micro', 'avg_loss', 'f1_macro', 'hamming_loss', 'jaccard', 'predicted_labels', 'target_labels', 'all_indices']
else:
return [accuracy, precision, recall, f1_micro, avg_loss, f1_macro, hamming_loss, jaccard_score, predicted_labels, target_labels],\
['accuracy', 'precision', 'recall', 'f1_micro', 'avg_loss', 'f1_macro', 'hamming_loss', 'jaccard', 'predicted_labels', 'target_labels']
def get_bert_layers(self, silent=False, last_bert_layers=-1):
if self.args.is_hierarchical:
eval_features = convert_examples_to_hierarchical_features(
self.eval_examples, self.args.max_seq_length, self.tokenizer)
else:
eval_features = convert_examples_to_features_with_emotion(
self.eval_examples, self.args.max_seq_length, self.tokenizer, self.emotioner)
unpadded_input_ids = [f.input_ids for f in eval_features]
unpadded_input_mask = [f.input_mask for f in eval_features]
unpadded_segment_ids = [f.segment_ids for f in eval_features]
unpadded_emotion_ids = [f.emotioniment_scores for f in eval_features]
if self.args.is_hierarchical:
pad_input_matrix(unpadded_input_ids, self.args.max_doc_length)
pad_input_matrix(unpadded_input_mask, self.args.max_doc_length)
pad_input_matrix(unpadded_segment_ids, self.args.max_doc_length)
padded_input_ids = torch.tensor(unpadded_input_ids, dtype=torch.long)
padded_input_mask = torch.tensor(unpadded_input_mask, dtype=torch.long)
padded_segment_ids = torch.tensor(unpadded_segment_ids, dtype=torch.long)
padded_emotion_ids = torch.tensor(unpadded_emotion_ids, dtype=torch.long)
label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(padded_input_ids, padded_input_mask, padded_segment_ids, padded_emotion_ids, label_ids)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=self.args.batch_size)
self.model.eval()
bert_layers_l, label_ids_l = [], []
for input_ids, input_mask, segment_ids, emotion_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating", disable=silent):
input_ids = input_ids.to(self.args.device)
input_mask = input_mask.to(self.args.device)
segment_ids = segment_ids.to(self.args.device)
emotion_ids = emotion_ids.to(self.args.device)
label_ids = label_ids.to(self.args.device)
with torch.no_grad():
bert_layers = self.model.get_bert_embedding(input_ids, segment_ids, input_mask, emotion_ids=emotion_ids, last_bert_layers=last_bert_layers)
label_ids = torch.argmax(label_ids, dim=1).cpu().detach().numpy()
bert_layers_l.extend(bert_layers)
label_ids_l.extend(label_ids)
bert_layers_l = torch.stack(bert_layers_l, dim=0)
return bert_layers_l, label_ids_l
| [((17, 0, 17, 33), 'warnings.filterwarnings', 'warnings.filterwarnings', ({(17, 24, 17, 32): '"""ignore"""'}, {}), "('ignore')", False, 'import warnings\n'), ((25, 25, 25, 98), 'utils.tokenization.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (), '', False, 'from utils.tokenization import BertTokenizer\n'), ((26, 25, 26, 86), 'utils.emotion.Emotion', 'Emotion', ({(26, 33, 26, 46): 'args.nrc_path', (26, 48, 26, 63): 'args.max_em_len', (26, 65, 26, 85): 'args.emotion_filters'}, {}), '(args.nrc_path, args.max_em_len, args.emotion_filters)', False, 'from utils.emotion import Emotion\n'), ((54, 27, 54, 77), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((55, 28, 55, 79), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((56, 29, 56, 81), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((57, 29, 57, 84), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((58, 20, 58, 87), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((60, 20, 60, 121), 'torch.utils.data.TensorDataset', 'TensorDataset', ({(60, 34, 60, 50): 'padded_input_ids', (60, 52, 60, 69): 'padded_input_mask', (60, 71, 60, 89): 'padded_segment_ids', (60, 91, 60, 109): 'padded_emotion_ids', (60, 111, 60, 120): 'label_ids'}, {}), '(padded_input_ids, padded_input_mask, padded_segment_ids,\n padded_emotion_ids, label_ids)', False, 'from torch.utils.data import DataLoader, SequentialSampler, TensorDataset\n'), ((61, 23, 61, 51), 'torch.utils.data.SequentialSampler', 'SequentialSampler', ({(61, 41, 61, 50): 'eval_data'}, {}), '(eval_data)', False, 'from torch.utils.data import DataLoader, SequentialSampler, TensorDataset\n'), ((62, 26, 62, 102), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader, SequentialSampler, TensorDataset\n'), ((70, 74, 70, 130), 'tqdm.tqdm', 'tqdm', (), '', False, 'from tqdm import tqdm\n'), ((113, 19, 113, 74), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', ({(113, 42, 113, 55): 'target_labels', (113, 57, 113, 73): 'predicted_labels'}, {}), '(target_labels, predicted_labels)', False, 'from sklearn import metrics\n'), ((114, 20, 114, 93), 'sklearn.metrics.precision_score', 'metrics.precision_score', (), '', False, 'from sklearn import metrics\n'), ((115, 17, 115, 87), 'sklearn.metrics.recall_score', 'metrics.recall_score', (), '', False, 'from sklearn import metrics\n'), ((118, 23, 118, 76), 'sklearn.metrics.hamming_loss', 'metrics.hamming_loss', ({(118, 44, 118, 57): 'target_labels', (118, 59, 118, 75): 'predicted_labels'}, {}), '(target_labels, predicted_labels)', False, 'from sklearn import metrics\n'), ((119, 24, 119, 95), 'sklearn.metrics.jaccard_score', 'metrics.jaccard_score', (), '', False, 'from sklearn import metrics\n'), ((120, 19, 120, 85), 'sklearn.metrics.f1_score', 'metrics.f1_score', (), '', False, 'from sklearn import metrics\n'), ((121, 19, 121, 89), 'sklearn.metrics.f1_score', 'metrics.f1_score', (), '', False, 'from sklearn import metrics\n'), ((151, 27, 151, 77), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((152, 28, 152, 79), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((153, 29, 153, 81), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((154, 29, 154, 81), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((155, 20, 155, 87), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((157, 20, 157, 121), 'torch.utils.data.TensorDataset', 'TensorDataset', ({(157, 34, 157, 50): 'padded_input_ids', (157, 52, 157, 69): 'padded_input_mask', (157, 71, 157, 89): 'padded_segment_ids', (157, 91, 157, 109): 'padded_emotion_ids', (157, 111, 157, 120): 'label_ids'}, {}), '(padded_input_ids, padded_input_mask, padded_segment_ids,\n padded_emotion_ids, label_ids)', False, 'from torch.utils.data import DataLoader, SequentialSampler, TensorDataset\n'), ((158, 23, 158, 51), 'torch.utils.data.SequentialSampler', 'SequentialSampler', ({(158, 41, 158, 50): 'eval_data'}, {}), '(eval_data)', False, 'from torch.utils.data import DataLoader, SequentialSampler, TensorDataset\n'), ((159, 26, 159, 102), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader, SequentialSampler, TensorDataset\n'), ((165, 74, 165, 130), 'tqdm.tqdm', 'tqdm', (), '', False, 'from tqdm import tqdm\n'), ((177, 24, 177, 57), 'torch.stack', 'torch.stack', (), '', False, 'import torch\n'), ((37, 28, 38, 77), 'datasets.bert_processors.abstract_processor.convert_examples_to_hierarchical_features', 'convert_examples_to_hierarchical_features', ({(38, 16, 38, 34): 'self.eval_examples', (38, 36, 38, 60): 'self.args.max_seq_length', (38, 62, 38, 76): 'self.tokenizer'}, {}), '(self.eval_examples, self.args.\n max_seq_length, self.tokenizer)', False, 'from datasets.bert_processors.abstract_processor import convert_examples_to_features_with_emotion, convert_examples_to_hierarchical_features\n'), ((40, 28, 41, 93), 'datasets.bert_processors.abstract_processor.convert_examples_to_features_with_emotion', 'convert_examples_to_features_with_emotion', ({(41, 16, 41, 34): 'self.eval_examples', (41, 36, 41, 60): 'self.args.max_seq_length', (41, 62, 41, 76): 'self.tokenizer', (41, 78, 41, 92): 'self.emotioner'}, {}), '(self.eval_examples, self.args.\n max_seq_length, self.tokenizer, self.emotioner)', False, 'from datasets.bert_processors.abstract_processor import convert_examples_to_features_with_emotion, convert_examples_to_hierarchical_features\n'), ((50, 12, 50, 74), 'utils.preprocessing.pad_input_matrix', 'pad_input_matrix', ({(50, 29, 50, 47): 'unpadded_input_ids', (50, 49, 50, 73): 'self.args.max_doc_length'}, {}), '(unpadded_input_ids, self.args.max_doc_length)', False, 'from utils.preprocessing import pad_input_matrix\n'), ((51, 12, 51, 75), 'utils.preprocessing.pad_input_matrix', 'pad_input_matrix', ({(51, 29, 51, 48): 'unpadded_input_mask', (51, 50, 51, 74): 'self.args.max_doc_length'}, {}), '(unpadded_input_mask, self.args.max_doc_length)', False, 'from utils.preprocessing import pad_input_matrix\n'), ((52, 12, 52, 76), 'utils.preprocessing.pad_input_matrix', 'pad_input_matrix', ({(52, 29, 52, 49): 'unpadded_segment_ids', (52, 51, 52, 75): 'self.args.max_doc_length'}, {}), '(unpadded_segment_ids, self.args.max_doc_length)', False, 'from utils.preprocessing import pad_input_matrix\n'), ((112, 42, 112, 68), 'numpy.array', 'np.array', ({(112, 51, 112, 67): 'predicted_labels'}, {}), '(predicted_labels)', True, 'import numpy as np\n'), ((112, 70, 112, 93), 'numpy.array', 'np.array', ({(112, 79, 112, 92): 'target_labels'}, {}), '(target_labels)', True, 'import numpy as np\n'), ((134, 28, 135, 77), 'datasets.bert_processors.abstract_processor.convert_examples_to_hierarchical_features', 'convert_examples_to_hierarchical_features', ({(135, 16, 135, 34): 'self.eval_examples', (135, 36, 135, 60): 'self.args.max_seq_length', (135, 62, 135, 76): 'self.tokenizer'}, {}), '(self.eval_examples, self.args.\n max_seq_length, self.tokenizer)', False, 'from datasets.bert_processors.abstract_processor import convert_examples_to_features_with_emotion, convert_examples_to_hierarchical_features\n'), ((137, 28, 138, 93), 'datasets.bert_processors.abstract_processor.convert_examples_to_features_with_emotion', 'convert_examples_to_features_with_emotion', ({(138, 16, 138, 34): 'self.eval_examples', (138, 36, 138, 60): 'self.args.max_seq_length', (138, 62, 138, 76): 'self.tokenizer', (138, 78, 138, 92): 'self.emotioner'}, {}), '(self.eval_examples, self.args.\n max_seq_length, self.tokenizer, self.emotioner)', False, 'from datasets.bert_processors.abstract_processor import convert_examples_to_features_with_emotion, convert_examples_to_hierarchical_features\n'), ((147, 12, 147, 74), 'utils.preprocessing.pad_input_matrix', 'pad_input_matrix', ({(147, 29, 147, 47): 'unpadded_input_ids', (147, 49, 147, 73): 'self.args.max_doc_length'}, {}), '(unpadded_input_ids, self.args.max_doc_length)', False, 'from utils.preprocessing import pad_input_matrix\n'), ((148, 12, 148, 75), 'utils.preprocessing.pad_input_matrix', 'pad_input_matrix', ({(148, 29, 148, 48): 'unpadded_input_mask', (148, 50, 148, 74): 'self.args.max_doc_length'}, {}), '(unpadded_input_mask, self.args.max_doc_length)', False, 'from utils.preprocessing import pad_input_matrix\n'), ((149, 12, 149, 76), 'utils.preprocessing.pad_input_matrix', 'pad_input_matrix', ({(149, 29, 149, 49): 'unpadded_segment_ids', (149, 51, 149, 75): 'self.args.max_doc_length'}, {}), '(unpadded_segment_ids, self.args.max_doc_length)', False, 'from utils.preprocessing import pad_input_matrix\n'), ((77, 17, 77, 32), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((172, 17, 172, 32), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((100, 47, 100, 77), 'torch.argmax', 'torch.argmax', (), '', False, 'import torch\n'), ((174, 28, 174, 58), 'torch.argmax', 'torch.argmax', (), '', False, 'import torch\n'), ((98, 40, 98, 67), 'torch.argmax', 'torch.argmax', (), '', False, 'import torch\n'), ((99, 37, 99, 67), 'torch.argmax', 'torch.argmax', (), '', False, 'import torch\n'), ((92, 40, 92, 57), 'torch.nn.functional.sigmoid', 'F.sigmoid', ({(92, 50, 92, 56): 'logits'}, {}), '(logits)', True, 'import torch.nn.functional as F\n')] |
andrearosasco/DistilledReplay | model/mlp1.py | 2a4efa88d22b9afc7016f07549114688f346dbe8 | import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, config):
super(Model, self).__init__()
self.drop = nn.Dropout(config['dropout'])
self.fc1 = nn.Linear(784, 2000)
self.fc2 = nn.Linear(2000, 2000)
self.fc3 = nn.Linear(2000, 2000)
self.fc4 = nn.Linear(2000, 2000)
self.fc5 = nn.Linear(2000, 10)
def forward(self, x):
# 784 -> 2000
x = F.relu(self.drop(self.fc1(x)))
# 2000 -> 2000
x = F.relu(self.drop(self.fc2(x)))
# 2000 -> 2000
x = F.relu(self.drop(self.fc3(x)))
# 2000 -> 2000
x = F.relu(self.drop(self.fc4(x)))
# 2000 -> 100
x = self.fc5(x)
return x | [((8, 20, 8, 49), 'torch.nn.Dropout', 'nn.Dropout', ({(8, 31, 8, 48): "config['dropout']"}, {}), "(config['dropout'])", True, 'import torch.nn as nn\n'), ((10, 19, 10, 39), 'torch.nn.Linear', 'nn.Linear', ({(10, 29, 10, 32): '784', (10, 34, 10, 38): '2000'}, {}), '(784, 2000)', True, 'import torch.nn as nn\n'), ((11, 19, 11, 40), 'torch.nn.Linear', 'nn.Linear', ({(11, 29, 11, 33): '2000', (11, 35, 11, 39): '2000'}, {}), '(2000, 2000)', True, 'import torch.nn as nn\n'), ((12, 19, 12, 40), 'torch.nn.Linear', 'nn.Linear', ({(12, 29, 12, 33): '2000', (12, 35, 12, 39): '2000'}, {}), '(2000, 2000)', True, 'import torch.nn as nn\n'), ((13, 19, 13, 40), 'torch.nn.Linear', 'nn.Linear', ({(13, 29, 13, 33): '2000', (13, 35, 13, 39): '2000'}, {}), '(2000, 2000)', True, 'import torch.nn as nn\n'), ((14, 19, 14, 38), 'torch.nn.Linear', 'nn.Linear', ({(14, 29, 14, 33): '2000', (14, 35, 14, 37): '10'}, {}), '(2000, 10)', True, 'import torch.nn as nn\n')] |
aslafy-z/netbox | netbox/ipam/managers.py | a5512dd4c46c005df8752fc330c1382ac22b31ea | from django.db import models
from ipam.lookups import Host, Inet
class IPAddressManager(models.Manager):
def get_queryset(self):
"""
By default, PostgreSQL will order INETs with shorter (larger) prefix lengths ahead of those with longer
(smaller) masks. This makes no sense when ordering IPs, which should be ordered solely by family and host
address. We can use HOST() to extract just the host portion of the address (ignoring its mask), but we must
then re-cast this value to INET() so that records will be ordered properly. We are essentially re-casting each
IP address as a /32 or /128.
"""
qs = super().get_queryset()
return qs.order_by(Inet(Host('address')))
| [((17, 32, 17, 47), 'ipam.lookups.Host', 'Host', ({(17, 37, 17, 46): '"""address"""'}, {}), "('address')", False, 'from ipam.lookups import Host, Inet\n')] |
VArdulov/learning-kis | train.py | 2637f08d5e8027a22feff17064be45ea51f738e5 | #!/usr/bin/env python
# coding: utf-8
""" Learning Koopman Invariant Subspace
(c) Naoya Takeishi, 2017.
[email protected]
"""
import numpy as np
np.random.seed(1234567890)
from argparse import ArgumentParser
from os import path
import time
from lkis import TimeSeriesBatchMaker, KoopmanInvariantSubspaceLearner
from losses import combined_loss
from torch import device, save, manual_seed
from torch.optim import SGD
import matplotlib.pyplot as plt
import seaborn as sns
# -- Parse arguments
t = time.time()
parser = ArgumentParser(description='Learning Koopman Invariant Subspace (Now with PyTorch!)')
parser.add_argument("--name", "-n", type=str, default=f"lkis-{int(time.time())}", help="name of experiment")
parser.add_argument("--data-path", type=str, default="./train.npy", help="time-series data to model")
parser.add_argument("--epochs", "-e", type=int, default=1000, help="number of epochs to train for")
parser.add_argument("--num-batches", "-b", type=int, default=1, help="how many batchs for break the data up into")
parser.add_argument("--gpu", action="store_true", default=False, help="use a GPU or no")
parser.add_argument("--intermediate-observable", "-i", type=int, default=-1, help="intermediate dimensional observation space")
parser.add_argument("--save-model", "-m", action="store_true", default=False, help="whether or not you want the model saved to $name$.torch.mdl")
parser.add_argument("--save-training-plot", "-p", action="store_true", default=False, help="where to save plotting")
parser.add_argument("--max-lag", "-l", type=int, default=-1, help="maximum_lag")
parser.add_argument("--state-space", "-s", type=int, default=1, help="dimensionality of the underlying state space")
parser.add_argument("--alpha", "-a", type=float, default=1.0, help="value to score the reconstruction loss by")
parser.add_argument("--learning-rate", "-r", type=float, default=0.001, help="Optimizer learning rate")
parser.add_argument("--validation-data-path", "-v", type=str, default="")
#ToDo: Implement
parser.add_argument("--dmd", action="store_true", default=False, help="Execute and save the DMD on the training set")
if __name__ == "__main__":
# grab the command line arguments
cli_args = parser.parse_args()
manual_seed(216)
# find and load the training data
data_path = cli_args.data_path
print(f"Loading training data from {data_path}")
data_train = np.load(data_path)
if len(data_train.shape) == 1:
data_train = data_train.reshape(-1, 1)
print(f"Loaded a dataset with dimension: {data_train.shape}")
validate = cli_args.validation_data_path != ""
data_val = None
if validate:
data_path = cli_args.validation_data_path
print(f"Loading validation data from {data_path}")
data_val = np.load(data_path)
# process the delay either set by the user or is set to one 10th of the data
delay = cli_args.max_lag if cli_args.max_lag > 0 else (data_train.shape[0] // 10)
# based on the number of batches, delay, and size of the data compute the samples per batch
samples_per_batch = (data_train.shape[0] - delay) // cli_args.num_batches
# construct the data preparer
batch_iterator = TimeSeriesBatchMaker(
y=data_train,
batch_size=samples_per_batch,
max_lag=delay
)
if validate:
val_batch_iterator = TimeSeriesBatchMaker(
y=data_val,
max_lag=delay
)
# construct the end-to-end model
lkis = KoopmanInvariantSubspaceLearner(
observable_dim=data_train.shape[1],
latent_dim=cli_args.state_space,
intermediate_observable=cli_args.intermediate_observable,
delay=delay
)
if cli_args.gpu:
device = device("cuda")
# initialize the optimizer
optimizer = SGD(lkis.parameters(), lr=cli_args.learning_rate)
losses = []
val_losses = []
for epoch in range(cli_args.epochs):
loss = 0
for b in range(cli_args.num_batches):
optimizer.zero_grad()
time_delayed_ys, y_true = next(batch_iterator)
if cli_args.gpu:
time_delayed_ys.to(device)
y_true.to(device)
g_pred, y_pred = lkis(time_delayed_ys)
g_0 = g_pred[:-1]
g_1 = g_pred[1:]
batch_loss = combined_loss(y_pred=y_pred, y_true=y_true, g_0=g_0, g_1=g_1)
batch_loss.backward()
optimizer.step()
loss += batch_loss.item()
# display the epoch training loss
print(f"epoch : {epoch + 1}/{cli_args.epochs}, loss = {loss:.6f}")
losses.append(loss)
if validate:
y_time_delayed_val, y_true = next(val_batch_iterator)
if cli_args.gpu:
y_time_delayed_val.to(device)
y_true.to(device)
g_pred, y_pred = lkis(y_time_delayed_val)
g_0 = g_pred[:-1]
g_1 = g_pred[1:]
batch_loss = combined_loss(y_pred=y_pred, y_true=y_true, g_0=g_0, g_1=g_1)
val_loss = batch_loss.item()
print(f"\tval-loss = {val_loss:.6f}")
val_losses.append(val_loss)
if cli_args.save_model:
save(lkis, f"{cli_args.name}.torch.mdl")
if cli_args.save_training_plot:
sns.lineplot(x=list(range(cli_args.epochs)), y=losses, label="training loss")
if validate:
sns.lineplot(x=list(range(cli_args.epochs)), y=val_losses, label="validation loss")
plt.xlabel("Epochs")
plt.ylabel("Combined Reconstruction and DMD Loss")
plt.title(f"Training Loss for {cli_args.name}")
plt.savefig(f"{cli_args.name}-training-loss.png")
| [((10, 0, 10, 26), 'numpy.random.seed', 'np.random.seed', ({(10, 15, 10, 25): '(1234567890)'}, {}), '(1234567890)', True, 'import numpy as np\n'), ((25, 4, 25, 15), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((26, 9, 26, 94), 'argparse.ArgumentParser', 'ArgumentParser', (), '', False, 'from argparse import ArgumentParser\n'), ((47, 4, 47, 20), 'torch.manual_seed', 'manual_seed', ({(47, 16, 47, 19): '(216)'}, {}), '(216)', False, 'from torch import device, save, manual_seed\n'), ((52, 17, 52, 35), 'numpy.load', 'np.load', ({(52, 25, 52, 34): 'data_path'}, {}), '(data_path)', True, 'import numpy as np\n'), ((70, 21, 74, 5), 'lkis.TimeSeriesBatchMaker', 'TimeSeriesBatchMaker', (), '', False, 'from lkis import TimeSeriesBatchMaker, KoopmanInvariantSubspaceLearner\n'), ((82, 11, 87, 5), 'lkis.KoopmanInvariantSubspaceLearner', 'KoopmanInvariantSubspaceLearner', (), '', False, 'from lkis import TimeSeriesBatchMaker, KoopmanInvariantSubspaceLearner\n'), ((61, 19, 61, 37), 'numpy.load', 'np.load', ({(61, 27, 61, 36): 'data_path'}, {}), '(data_path)', True, 'import numpy as np\n'), ((76, 29, 79, 9), 'lkis.TimeSeriesBatchMaker', 'TimeSeriesBatchMaker', (), '', False, 'from lkis import TimeSeriesBatchMaker, KoopmanInvariantSubspaceLearner\n'), ((138, 8, 138, 48), 'torch.save', 'save', ({(138, 13, 138, 17): 'lkis', (138, 19, 138, 47): 'f"""{cli_args.name}.torch.mdl"""'}, {}), "(lkis, f'{cli_args.name}.torch.mdl')", False, 'from torch import device, save, manual_seed\n'), ((144, 8, 144, 28), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(144, 19, 144, 27): '"""Epochs"""'}, {}), "('Epochs')", True, 'import matplotlib.pyplot as plt\n'), ((145, 8, 145, 58), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(145, 19, 145, 57): '"""Combined Reconstruction and DMD Loss"""'}, {}), "('Combined Reconstruction and DMD Loss')", True, 'import matplotlib.pyplot as plt\n'), ((146, 8, 146, 55), 'matplotlib.pyplot.title', 'plt.title', ({(146, 18, 146, 54): 'f"""Training Loss for {cli_args.name}"""'}, {}), "(f'Training Loss for {cli_args.name}')", True, 'import matplotlib.pyplot as plt\n'), ((147, 8, 147, 57), 'matplotlib.pyplot.savefig', 'plt.savefig', ({(147, 20, 147, 56): 'f"""{cli_args.name}-training-loss.png"""'}, {}), "(f'{cli_args.name}-training-loss.png')", True, 'import matplotlib.pyplot as plt\n'), ((110, 25, 110, 86), 'losses.combined_loss', 'combined_loss', (), '', False, 'from losses import combined_loss\n'), ((132, 25, 132, 86), 'losses.combined_loss', 'combined_loss', (), '', False, 'from losses import combined_loss\n'), ((27, 66, 27, 77), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')] |
KenWoo/Algorithm | Algorithms/Easy/1200. Minimum Absolute Difference/answer.py | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | from typing import List
class Solution:
def minimumAbsDifference(self, arr: List[int]) -> List[List[int]]:
arr.sort()
res = []
min_diff = arr[1] - arr[0]
res.append([arr[0], arr[1]])
for i in range(1, len(arr)-1):
diff = arr[i+1]-arr[i]
if diff < min_diff:
min_diff = diff
res.clear()
res.append([arr[i], arr[i+1]])
elif diff == min_diff:
res.append([arr[i], arr[i+1]])
return res
if __name__ == "__main__":
s = Solution()
result = s.minimumAbsDifference([3, 8, -10, 23, 19, -4, -14, 27])
print(result)
| [] |
VijayStroup/Physics_Problem_Solver_Basic | resources/physequations.py | fc6944475ed8bcfe91bbd207734c3f9aee31e0fe | import math
def close(expected, actual, maxerror):
'''checks to see if the actual number is within expected +- maxerror.'''
low = expected - maxerror
high = expected + maxerror
if actual >= low and actual <= high:
return True
else:
return False
def grav_potential_energy(mass, height, gravity=9.81):
'''calculate potential energy given mass and height. Mass in
kilograms and height in meters.'''
gp_energy = mass * height * gravity
return gp_energy
def kin_energy(mass, velocity):
'''calculate kinetic energy given mass and velocity. Mass in
kilograms and velocity in meters per second.'''
k_energy = .5 * mass * velocity ** 2
return k_energy
def work_energy(force, displacement, angle):
'''calculate work energy given force, displancement,
and angle. Force in newtons, displacement in meters, angle in degrees.'''
anglerad = math.radians(angle)
cos = math.cos(anglerad)
w_energy = force * displacement * cos
return w_energy
'''=============================================================================
Tests
============================================================================='''
if __name__ == '__main__':
def check(funcname, args, expected, ans, maxerror):
if not close(expected, ans, maxerror):
print(f'{funcname}({args}) = {ans} should = {expected}')
print(close(10, 11.1, 1))
print(close(100, 100.001, .01))
print(close(-10, -11.01, 1))
print(close(84756, 84300.2, 500.5))
#gravitional potential energy tests
ans = grav_potential_energy(3.00, 7.00)
check('grav_potential_energy', '3.00, 7.00', 206.01, ans, 0.00000000000000000000000001)
ans = grav_potential_energy(2.00, 5.00)
check('grav_potential_energy', '2.00, 5.00', 98.1, ans, 0.01)
#kinetic energy tests
ans = kin_energy(2, 6.55)
check('kin_energy', '2, 6.55', 42.90, ans, 0.01)
ans = kin_energy(5.65, 10)
check('kin_energy', '5.65, 10', 282.5, ans, 0.1)
#work energy tests
ans = work_energy(500, 10, 0)
check('work_energy', '500, 10, 0', 5000.0, ans, 0.1)
ans = work_energy(150, 50, 45)
check('work_energy', '150, 50, 45', 5303.30, ans, 0.01)
| [((27, 12, 27, 31), 'math.radians', 'math.radians', ({(27, 25, 27, 30): 'angle'}, {}), '(angle)', False, 'import math\n'), ((28, 7, 28, 25), 'math.cos', 'math.cos', ({(28, 16, 28, 24): 'anglerad'}, {}), '(anglerad)', False, 'import math\n')] |
andycon/PyMVPA | mvpa2/tests/test_erdataset.py | 67f7ee68012e3a1128168c583d6c83303b7a2c27 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
'''Tests for the event-related dataset'''
from mvpa2.testing import *
from mvpa2.datasets import dataset_wizard
from mvpa2.mappers.flatten import FlattenMapper
from mvpa2.mappers.boxcar import BoxcarMapper
from mvpa2.mappers.fx import FxMapper
from mvpa2.datasets.eventrelated import find_events, eventrelated_dataset, \
extract_boxcar_event_samples
from mvpa2.datasets.sources import load_example_fmri_dataset
from mvpa2.mappers.zscore import zscore
def test_erdataset():
# 3 chunks, 5 targets, blocks of 5 samples each
nchunks = 3
ntargets = 5
blocklength = 5
nfeatures = 10
targets = np.tile(np.repeat(range(ntargets), blocklength), nchunks)
chunks = np.repeat(np.arange(nchunks), ntargets * blocklength)
samples = np.repeat(
np.arange(nchunks * ntargets * blocklength),
nfeatures).reshape(-1, nfeatures)
ds = dataset_wizard(samples, targets=targets, chunks=chunks)
# check if events are determined properly
evs = find_events(targets=ds.sa.targets, chunks=ds.sa.chunks)
for ev in evs:
assert_equal(ev['duration'], blocklength)
assert_equal(ntargets * nchunks, len(evs))
for t in range(ntargets):
assert_equal(len([ev for ev in evs if ev['targets'] == t]),
nchunks)
# now turn `ds` into an eventreleated dataset
erds = eventrelated_dataset(ds, evs)
# the only unprefixed sample attributes are
assert_equal(sorted([a for a in ds.sa if not a.startswith('event')]),
['chunks', 'targets'])
# samples as expected?
assert_array_equal(erds.samples[0],
np.repeat(np.arange(blocklength), nfeatures))
# that should also be the temporal feature offset
assert_array_equal(erds.samples[0], erds.fa.event_offsetidx)
assert_array_equal(erds.sa.event_onsetidx, np.arange(0,71,5))
# finally we should see two mappers
assert_equal(len(erds.a.mapper), 2)
assert_true(isinstance(erds.a.mapper[0], BoxcarMapper))
assert_true(isinstance(erds.a.mapper[1], FlattenMapper))
# check alternative event mapper
# this one does temporal compression by averaging
erds_compress = eventrelated_dataset(
ds, evs, event_mapper=FxMapper('features', np.mean))
assert_equal(len(erds), len(erds_compress))
assert_array_equal(erds_compress.samples[:,0], np.arange(2,73,5))
#
# now check the same dataset with event descretization
tr = 2.5
ds.sa['time'] = np.arange(nchunks * ntargets * blocklength) * tr
evs = [{'onset': 4.9, 'duration': 6.2}]
# doesn't work without conversion
assert_raises(ValueError, eventrelated_dataset, ds, evs)
erds = eventrelated_dataset(ds, evs, time_attr='time')
assert_equal(len(erds), 1)
assert_array_equal(erds.samples[0], np.repeat(np.arange(1,5), nfeatures))
assert_array_equal(erds.sa.orig_onset, [evs[0]['onset']])
assert_array_equal(erds.sa.orig_duration, [evs[0]['duration']])
assert_array_almost_equal(erds.sa.orig_offset, [2.4])
assert_array_equal(erds.sa.time, [np.arange(2.5, 11, 2.5)])
# now with closest match
erds = eventrelated_dataset(ds, evs, time_attr='time', match='closest')
expected_nsamples = 3
assert_equal(len(erds), 1)
assert_array_equal(erds.samples[0],
np.repeat(np.arange(2,2+expected_nsamples),
nfeatures))
assert_array_equal(erds.sa.orig_onset, [evs[0]['onset']])
assert_array_equal(erds.sa.orig_duration, [evs[0]['duration']])
assert_array_almost_equal(erds.sa.orig_offset, [-0.1])
assert_array_equal(erds.sa.time, [np.arange(5.0, 11, 2.5)])
# now test the way back
results = np.arange(erds.nfeatures)
assert_array_equal(erds.a.mapper.reverse1(results),
results.reshape(expected_nsamples, nfeatures))
# what about multiple results?
nresults = 5
results = dataset_wizard([results] * nresults)
# and let's have an attribute to make it more difficult
results.sa['myattr'] = np.arange(5)
rds = erds.a.mapper.reverse(results)
assert_array_equal(rds,
results.samples.reshape(nresults * expected_nsamples,
nfeatures))
assert_array_equal(rds.sa.myattr, np.repeat(results.sa.myattr,
expected_nsamples))
evs = [dict(onset=12, duration=2), dict(onset=70, duration=3)]
evds = extract_boxcar_event_samples(ds, evs)
# it goes for the max of all durations
assert_equal(evds.shape, (len(evs), 3 * ds.nfeatures))
# overide duration
evds = extract_boxcar_event_samples(ds, evs, event_duration=1)
assert_equal(evds.shape, (len(evs), 1 * ds.nfeatures))
assert_equal(np.unique(evds.samples[1]), 70)
# overide onset
evds = extract_boxcar_event_samples(ds, evs, event_offset=2)
assert_equal(evds.shape, (len(evs), 3 * ds.nfeatures))
assert_equal(np.unique(evds.samples[1,:10]), 72)
# overide both
evds = extract_boxcar_event_samples(ds, evs, event_offset=-2,
event_duration=1)
assert_equal(evds.shape, (len(evs), 1 * ds.nfeatures))
assert_equal(np.unique(evds.samples[1]), 68)
def test_hrf_modeling():
skip_if_no_external('nibabel')
skip_if_no_external('nipy') # ATM relies on NiPy's GLM implementation
ds = load_example_fmri_dataset('25mm', literal=True)
# TODO: simulate short dataset with known properties and use it
# for testing
events = find_events(targets=ds.sa.targets, chunks=ds.sa.chunks)
tr = ds.a.imghdr['pixdim'][4]
for ev in events:
for a in ('onset', 'duration'):
ev[a] = ev[a] * tr
evds = eventrelated_dataset(ds, events, time_attr='time_coords',
condition_attr='targets',
design_kwargs=dict(drift_model='blank'),
glmfit_kwargs=dict(model='ols'),
model='hrf')
# same voxels
assert_equal(ds.nfeatures, evds.nfeatures)
assert_array_equal(ds.fa.voxel_indices, evds.fa.voxel_indices)
# one sample for each condition, plus constant
assert_equal(sorted(ds.sa['targets'].unique), sorted(evds.sa.targets))
assert_equal(evds.a.add_regs.sa.regressor_names[0], 'constant')
# with centered data
zscore(ds)
evds_demean = eventrelated_dataset(ds, events, time_attr='time_coords',
condition_attr='targets',
design_kwargs=dict(drift_model='blank'),
glmfit_kwargs=dict(model='ols'),
model='hrf')
# after demeaning the constant should consume a lot less
assert(evds.a.add_regs[0].samples.mean()
> evds_demean.a.add_regs[0].samples.mean())
# from eyeballing the sensitivity example -- would be better to test this on
# the tutorial data
assert(evds_demean[evds.sa.targets == 'shoe'].samples.max() \
> evds_demean[evds.sa.targets == 'bottle'].samples.max())
# HRF models
assert('regressors' in evds.sa)
assert('regressors' in evds.a.add_regs.sa)
assert_equal(evds.sa.regressors.shape[1], len(ds))
# custom regressors
evds_regrs = eventrelated_dataset(ds, events, time_attr='time_coords',
condition_attr='targets',
regr_attrs=['time_indices'],
design_kwargs=dict(drift_model='blank'),
glmfit_kwargs=dict(model='ols'),
model='hrf')
# verify that nothing screwed up time_coords
assert_equal(ds.sa.time_coords[0], 0)
assert_equal(len(evds_regrs), len(evds))
# one more output sample in .a.add_regs
assert_equal(len(evds_regrs.a.add_regs) - 1, len(evds.a.add_regs))
# comes last before constant
assert_equal('time_indices', evds_regrs.a.add_regs.sa.regressor_names[-2])
# order of main regressors is unchanged
assert_array_equal(evds.sa.targets, evds_regrs.sa.targets)
# custom regressors from external sources
evds_regrs = eventrelated_dataset(ds, events, time_attr='time_coords',
condition_attr='targets',
regr_attrs=['time_coords'],
design_kwargs=dict(drift_model='blank',
add_regs=np.linspace(1, -1, len(ds))[None].T,
add_reg_names=['negative_trend']),
glmfit_kwargs=dict(model='ols'),
model='hrf')
assert_equal(len(evds_regrs), len(evds))
# But we got one more in additional regressors
assert_equal(len(evds_regrs.a.add_regs) - 2, len(evds.a.add_regs))
# comes last before constant
assert_array_equal(['negative_trend', 'time_coords', 'constant'],
evds_regrs.a.add_regs.sa.regressor_names)
# order is otherwise unchanged
assert_array_equal(evds.sa.targets, evds_regrs.sa.targets)
# HRF models with estimating per each chunk
assert_equal(ds.sa.time_coords[0], 0)
evds_regrs = eventrelated_dataset(ds, events, time_attr='time_coords',
condition_attr=['targets', 'chunks'],
regr_attrs=['time_indices'],
design_kwargs=dict(drift_model='blank'),
glmfit_kwargs=dict(model='ols'),
model='hrf')
assert_true('add_regs' in evds_regrs.a)
assert_true('time_indices' in evds_regrs.a.add_regs.sa.regressor_names)
assert_equal(len(ds.UC) * len(ds.UT), len(evds_regrs))
assert_equal(len(evds_regrs.UC) * len(evds_regrs.UT), len(evds_regrs))
from mvpa2.mappers.fx import mean_group_sample
evds_regrs_meaned = mean_group_sample(['targets'])(evds_regrs)
assert_array_equal(evds_regrs_meaned.T, evds.T) # targets should be the same
#corr = np.corrcoef(np.vstack((evds.samples, evds_regrs_meaned)))
#import pydb; pydb.debugger()
#pass
#i = 1
| [((33, 9, 33, 64), 'mvpa2.datasets.dataset_wizard', 'dataset_wizard', (), '', False, 'from mvpa2.datasets import dataset_wizard\n'), ((35, 10, 35, 65), 'mvpa2.datasets.eventrelated.find_events', 'find_events', (), '', False, 'from mvpa2.datasets.eventrelated import find_events, eventrelated_dataset, extract_boxcar_event_samples\n'), ((43, 11, 43, 40), 'mvpa2.datasets.eventrelated.eventrelated_dataset', 'eventrelated_dataset', ({(43, 32, 43, 34): 'ds', (43, 36, 43, 39): 'evs'}, {}), '(ds, evs)', False, 'from mvpa2.datasets.eventrelated import find_events, eventrelated_dataset, extract_boxcar_event_samples\n'), ((70, 11, 70, 58), 'mvpa2.datasets.eventrelated.eventrelated_dataset', 'eventrelated_dataset', (), '', False, 'from mvpa2.datasets.eventrelated import find_events, eventrelated_dataset, extract_boxcar_event_samples\n'), ((78, 11, 78, 75), 'mvpa2.datasets.eventrelated.eventrelated_dataset', 'eventrelated_dataset', (), '', False, 'from mvpa2.datasets.eventrelated import find_events, eventrelated_dataset, extract_boxcar_event_samples\n'), ((94, 14, 94, 50), 'mvpa2.datasets.dataset_wizard', 'dataset_wizard', ({(94, 29, 94, 49): '[results] * nresults'}, {}), '([results] * nresults)', False, 'from mvpa2.datasets import dataset_wizard\n'), ((104, 11, 104, 48), 'mvpa2.datasets.eventrelated.extract_boxcar_event_samples', 'extract_boxcar_event_samples', ({(104, 40, 104, 42): 'ds', (104, 44, 104, 47): 'evs'}, {}), '(ds, evs)', False, 'from mvpa2.datasets.eventrelated import find_events, eventrelated_dataset, extract_boxcar_event_samples\n'), ((108, 11, 108, 66), 'mvpa2.datasets.eventrelated.extract_boxcar_event_samples', 'extract_boxcar_event_samples', (), '', False, 'from mvpa2.datasets.eventrelated import find_events, eventrelated_dataset, extract_boxcar_event_samples\n'), ((112, 11, 112, 64), 'mvpa2.datasets.eventrelated.extract_boxcar_event_samples', 'extract_boxcar_event_samples', (), '', False, 'from mvpa2.datasets.eventrelated import find_events, eventrelated_dataset, extract_boxcar_event_samples\n'), ((116, 11, 117, 57), 'mvpa2.datasets.eventrelated.extract_boxcar_event_samples', 'extract_boxcar_event_samples', (), '', False, 'from mvpa2.datasets.eventrelated import find_events, eventrelated_dataset, extract_boxcar_event_samples\n'), ((124, 9, 124, 56), 'mvpa2.datasets.sources.load_example_fmri_dataset', 'load_example_fmri_dataset', (), '', False, 'from mvpa2.datasets.sources import load_example_fmri_dataset\n'), ((127, 13, 127, 68), 'mvpa2.datasets.eventrelated.find_events', 'find_events', (), '', False, 'from mvpa2.datasets.eventrelated import find_events, eventrelated_dataset, extract_boxcar_event_samples\n'), ((144, 4, 144, 14), 'mvpa2.mappers.zscore.zscore', 'zscore', ({(144, 11, 144, 13): 'ds'}, {}), '(ds)', False, 'from mvpa2.mappers.zscore import zscore\n'), ((212, 24, 212, 54), 'mvpa2.mappers.fx.mean_group_sample', 'mean_group_sample', ({(212, 42, 212, 53): "['targets']"}, {}), "(['targets'])", False, 'from mvpa2.mappers.fx import mean_group_sample\n'), ((60, 46, 60, 75), 'mvpa2.mappers.fx.FxMapper', 'FxMapper', ({(60, 55, 60, 65): '"""features"""', (60, 67, 60, 74): 'np.mean'}, {}), "('features', np.mean)", False, 'from mvpa2.mappers.fx import FxMapper\n')] |
aksr-aashish/FIREXUSERBOT | userbot/plugins/delfp.py | dff0b7bf028cb27779626ce523402346cc990402 | from telethon.tl.functions.photos import DeletePhotosRequest, GetUserPhotosRequest
from telethon.tl.types import InputPhoto
from userbot.cmdhelp import CmdHelp
from userbot.utils import admin_cmd, edit_or_reply, sudo_cmd
CmdHelp("delfp").add_command("delpfp", None, "delete ur currnt profile picture").add()
@borg.on(admin_cmd(pattern="delpfp ?(.*)"))
@borg.on(sudo_cmd(pattern="delpfp ?(.*)", allow_sudo=True))
async def remove_profilepic(delpfp):
"""For .delpfp command, delete your current profile picture in Telegram."""
group = delpfp.text[8:]
if group == "all":
lim = 0
elif group.isdigit():
lim = int(group)
else:
lim = 1
pfplist = await delpfp.client(
GetUserPhotosRequest(user_id=delpfp.from_id, offset=0, max_id=0, limit=lim)
)
input_photos = [InputPhoto(
id=sep.id,
access_hash=sep.access_hash,
file_reference=sep.file_reference,
) for sep in pfplist.photos]
await delpfp.client(DeletePhotosRequest(id=input_photos))
await edit_or_reply(
delpfp, f"`Successfully deleted {len(input_photos)} profile picture(s).`"
)
| [((10, 9, 10, 42), 'userbot.utils.admin_cmd', 'admin_cmd', (), '', False, 'from userbot.utils import admin_cmd, edit_or_reply, sudo_cmd\n'), ((11, 9, 11, 58), 'userbot.utils.sudo_cmd', 'sudo_cmd', (), '', False, 'from userbot.utils import admin_cmd, edit_or_reply, sudo_cmd\n'), ((25, 20, 29, 13), 'telethon.tl.types.InputPhoto', 'InputPhoto', (), '', False, 'from telethon.tl.types import InputPhoto\n'), ((23, 8, 23, 83), 'telethon.tl.functions.photos.GetUserPhotosRequest', 'GetUserPhotosRequest', (), '', False, 'from telethon.tl.functions.photos import DeletePhotosRequest, GetUserPhotosRequest\n'), ((30, 24, 30, 60), 'telethon.tl.functions.photos.DeletePhotosRequest', 'DeletePhotosRequest', (), '', False, 'from telethon.tl.functions.photos import DeletePhotosRequest, GetUserPhotosRequest\n'), ((7, 0, 7, 16), 'userbot.cmdhelp.CmdHelp', 'CmdHelp', ({(7, 8, 7, 15): '"""delfp"""'}, {}), "('delfp')", False, 'from userbot.cmdhelp import CmdHelp\n')] |
pplonski/automlbenchmark | amlb/benchmarks/file.py | f49ddfa2583643173296ed8ab45a8c14c62a6987 | import logging
import os
from typing import List, Tuple, Optional
from amlb.utils import config_load, Namespace
log = logging.getLogger(__name__)
def _find_local_benchmark_definition(name: str, benchmark_definition_dirs: List[str]) -> str:
# 'name' should be either a full path to the benchmark,
# or a filename (without extension) in the benchmark directory.
if os.path.exists(name):
return name
for bd in benchmark_definition_dirs:
bf = os.path.join(bd, f"{name}.yaml")
if os.path.exists(bf):
# We don't account for duplicate definitions (yet).
return bf
# should we support s3 and check for s3 path before raising error?
raise ValueError(f"Incorrect benchmark name or path `{name}`, name not available in {benchmark_definition_dirs}.")
def load_file_benchmark(name: str, benchmark_definition_dirs: List[str]) -> Tuple[str, Optional[str], List[Namespace]]:
""" Loads benchmark from a local file. """
benchmark_file = _find_local_benchmark_definition(name, benchmark_definition_dirs)
log.info("Loading benchmark definitions from %s.", benchmark_file)
tasks = config_load(benchmark_file)
benchmark_name, _ = os.path.splitext(os.path.basename(benchmark_file))
return benchmark_name, benchmark_file, tasks
| [((7, 6, 7, 33), 'logging.getLogger', 'logging.getLogger', ({(7, 24, 7, 32): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((13, 7, 13, 27), 'os.path.exists', 'os.path.exists', ({(13, 22, 13, 26): 'name'}, {}), '(name)', False, 'import os\n'), ((30, 12, 30, 39), 'amlb.utils.config_load', 'config_load', ({(30, 24, 30, 38): 'benchmark_file'}, {}), '(benchmark_file)', False, 'from amlb.utils import config_load, Namespace\n'), ((17, 13, 17, 45), 'os.path.join', 'os.path.join', ({(17, 26, 17, 28): 'bd', (17, 30, 17, 44): 'f"""{name}.yaml"""'}, {}), "(bd, f'{name}.yaml')", False, 'import os\n'), ((18, 11, 18, 29), 'os.path.exists', 'os.path.exists', ({(18, 26, 18, 28): 'bf'}, {}), '(bf)', False, 'import os\n'), ((31, 41, 31, 73), 'os.path.basename', 'os.path.basename', ({(31, 58, 31, 72): 'benchmark_file'}, {}), '(benchmark_file)', False, 'import os\n')] |
eyesoft/pybuspro | pybuspro/devices/control.py | 9a178117be2db40ef1399cc60afdc18e251682bc | from ..core.telegram import Telegram
from ..helpers.enums import OperateCode
class _Control:
def __init__(self, buspro):
self._buspro = buspro
self.subnet_id = None
self.device_id = None
@staticmethod
def build_telegram_from_control(control):
if control is None:
return None
if type(control) == _SingleChannelControl:
operate_code = OperateCode.SingleChannelControl
payload = [control.channel_number, control.channel_level, control.running_time_minutes,
control.running_time_seconds]
elif type(control) == _SceneControl:
operate_code = OperateCode.SceneControl
payload = [control.area_number, control.scene_number]
elif type(control) == _ReadStatusOfChannels:
operate_code = OperateCode.ReadStatusOfChannels
payload = []
elif type(control) == _GenericControl:
operate_code = control.operate_code
payload = control.payload
elif type(control) == _UniversalSwitch:
operate_code = OperateCode.UniversalSwitchControl
payload = [control.switch_number, control.switch_status.value]
elif type(control) == _ReadStatusOfUniversalSwitch:
operate_code = OperateCode.ReadStatusOfUniversalSwitch
payload = [control.switch_number]
elif type(control) == _ReadSensorStatus:
operate_code = OperateCode.ReadSensorStatus
payload = []
elif type(control) == _ReadSensorsInOneStatus:
operate_code = OperateCode.ReadSensorsInOneStatus
payload = []
elif type(control) == _ReadFloorHeatingStatus:
operate_code = OperateCode.ReadFloorHeatingStatus
payload = []
elif type(control) == _ReadDryContactStatus:
operate_code = OperateCode.ReadDryContactStatus
payload = [1, control.switch_number]
elif type(control) == _ControlFloorHeatingStatus:
operate_code = OperateCode.ControlFloorHeatingStatus
payload = [control.temperature_type, control.status, control.mode, control.normal_temperature,
control.day_temperature, control.night_temperature, control.away_temperature]
else:
return None
telegram = Telegram()
telegram.target_address = (control.subnet_id, control.device_id)
telegram.operate_code = operate_code
telegram.payload = payload
return telegram
@property
def telegram(self):
return self.build_telegram_from_control(self)
async def send(self):
telegram = self.telegram
# if telegram.target_address[1] == 100:
# print("==== {}".format(str(telegram)))
await self._buspro.network_interface.send_telegram(telegram)
class _GenericControl(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.payload = None
self.operate_code = None
class _SingleChannelControl(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.channel_number = None
self.channel_level = None
self.running_time_minutes = None
self.running_time_seconds = None
class _SceneControl(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.area_number = None
self.scene_number = None
class _ReadStatusOfChannels(_Control):
def __init__(self, buspro):
super().__init__(buspro)
# no more properties
class _UniversalSwitch(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.switch_number = None
self.switch_status = None
class _ReadStatusOfUniversalSwitch(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.switch_number = None
class _ReadSensorStatus(_Control):
def __init__(self, buspro):
super().__init__(buspro)
# no more properties
class _ReadSensorsInOneStatus(_Control):
def __init__(self, buspro):
super().__init__(buspro)
# no more properties
class _ReadFloorHeatingStatus(_Control):
def __init__(self, buspro):
super().__init__(buspro)
# no more properties
class _ControlFloorHeatingStatus(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.temperature_type = None
self.status = None
self.mode = None
self.normal_temperature = None
self.day_temperature = None
self.night_temperature = None
self.away_temperature = None
class _ReadDryContactStatus(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.switch_number = None
| [] |
eunchong/infra | appengine/chrome_infra_console_loadtest/main.py | ce3728559112bfb3e8b32137eada517aec6d22f9 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import endpoints
import random
import webapp2
from apiclient import discovery
from google.appengine.ext import ndb
from oauth2client.client import GoogleCredentials
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from components import auth
CONFIG_DATASTORE_KEY = "CONFIG_DATASTORE_KEY"
API_NAME = 'consoleapp'
API_VERSION = 'v1'
DISCOVERY_URL = '%s/_ah/api/discovery/v1/apis/{api}/{apiVersion}/rest'
class FieldParamsModel(ndb.Model):
field_key = ndb.StringProperty()
values = ndb.StringProperty(repeated=True)
class MetricModel(ndb.Model):
name = ndb.StringProperty(default="")
minimum = ndb.FloatProperty(default=0)
maximum = ndb.FloatProperty(default=100)
class ParamsModel(ndb.Model):
time = ndb.FloatProperty(default=10)
freq = ndb.FloatProperty(default=1)
url = ndb.StringProperty()
params = ndb.LocalStructuredProperty(FieldParamsModel, repeated=True)
metrics = ndb.LocalStructuredProperty(MetricModel, repeated=True)
class Field(messages.Message):
key = messages.StringField(1)
value = messages.StringField(2)
class Point(messages.Message):
time = messages.FloatField(1)
value = messages.FloatField(2)
class FieldParams(messages.Message):
field_key = messages.StringField(1)
values = messages.StringField(2, repeated=True)
class Metric(messages.Message):
name = messages.StringField(1)
minimum = messages.FloatField(2)
maximum = messages.FloatField(3)
class Params(messages.Message):
time = messages.FloatField(1)
freq = messages.FloatField(2)
url = messages.StringField(3)
params = messages.MessageField(FieldParams, 4, repeated=True)
metrics = messages.MessageField(Metric, 5, repeated=True)
class TimeSeries(messages.Message):
points = messages.MessageField(Point, 1, repeated=True)
fields = messages.MessageField(Field, 2, repeated=True)
metric = messages.StringField(3)
class DataPacket(messages.Message):
timeseries = messages.MessageField(TimeSeries, 1, repeated=True)
@auth.endpoints_api(name='consoleapp', version='v1')
class LoadTestApi(remote.Service):
"""A testing endpoint that receives timeseries data."""
@auth.endpoints_method(DataPacket, message_types.VoidMessage,
name='timeseries.update')
@auth.require(lambda: auth.is_group_member('metric-generators'))
def timeseries_update(self, request):
logging.debug('Datapacket length is %d', len(request.timeseries))
return message_types.VoidMessage()
@auth.endpoints_api(name='ui', version='v1')
class UIApi(remote.Service):
"""API for the loadtest configuration UI."""
@auth.endpoints_method(message_types.VoidMessage, Params,
name='ui.get')
@auth.require(lambda: auth.is_group_member('metric-generators'))
def UI_get(self, _request):
data = ParamsModel.get_or_insert(CONFIG_DATASTORE_KEY)
params = [FieldParams(field_key=field.field_key, values=field.values)
for field in data.params]
metrics = [Metric(name=metric.name,
minimum=metric.minimum,
maximum=metric.maximum)
for metric in data.metrics]
return Params(time=data.time, freq=data.freq, url=data.url, params=params,
metrics=metrics)
@auth.endpoints_method(Params, message_types.VoidMessage,
name='ui.set')
@auth.require(lambda: auth.is_group_member('metric-generators'))
def UI_set(self, request):
logging.debug('Got %s', request)
data = ParamsModel.get_or_insert(CONFIG_DATASTORE_KEY)
data.time = request.time
data.freq = request.freq
data.url = request.url
data.params = [FieldParamsModel(field_key=field.field_key,
values=field.values)
for field in request.params]
data.metrics = [MetricModel(name=metric.name,
minimum=metric.minimum,
maximum=metric.maximum)
for metric in request.metrics]
data.put()
return message_types.VoidMessage()
def field_generator(dataparams, index, fields):
if index == len(dataparams):
return [fields]
else:
key = dataparams[index].field_key
return sum((field_generator(
dataparams, index+1, fields+[{'key': key, 'value': value}])
for value in dataparams[index].values), [])
class CronHandler(webapp2.RequestHandler):
def get(self):
data = ParamsModel.get_or_insert(CONFIG_DATASTORE_KEY)
metric_ranges = {}
for metric in data.metrics:
metric_ranges[metric.name] = (metric.minimum,metric.maximum)
datapacket = {'timeseries': []}
logging.debug('There are %d metrics', len(metric_ranges))
fieldlist = field_generator(data.params, 0, [])
for metric in metric_ranges:
for fields in fieldlist:
points = []
for x in xrange(0, int(data.time), int(data.freq)):
points.append({'time': x,
'value': random.uniform(*metric_ranges[metric])})
timeseries = {'points': points,
'fields': fields,
'metric': metric}
datapacket['timeseries'].append(timeseries)
logging.info('Send data to %s', data.url)
discovery_url = DISCOVERY_URL % data.url
credentials = GoogleCredentials.get_application_default()
service = discovery.build(API_NAME, API_VERSION,
discoveryServiceUrl=discovery_url,
credentials=credentials)
_response = service.timeseries().update(body=datapacket).execute()
backend_handlers = [
('/cron', CronHandler)
]
WEBAPP = webapp2.WSGIApplication(backend_handlers, debug=True)
APPLICATION = endpoints.api_server([LoadTestApi, UIApi])
| [((83, 1, 83, 52), 'components.auth.endpoints_api', 'auth.endpoints_api', (), '', False, 'from components import auth\n'), ((95, 1, 95, 44), 'components.auth.endpoints_api', 'auth.endpoints_api', (), '', False, 'from components import auth\n'), ((176, 9, 176, 62), 'webapp2.WSGIApplication', 'webapp2.WSGIApplication', (), '', False, 'import webapp2\n'), ((178, 14, 178, 56), 'endpoints.api_server', 'endpoints.api_server', ({(178, 35, 178, 55): '[LoadTestApi, UIApi]'}, {}), '([LoadTestApi, UIApi])', False, 'import endpoints\n'), ((26, 14, 26, 34), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ({}, {}), '()', False, 'from google.appengine.ext import ndb\n'), ((27, 11, 27, 44), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', (), '', False, 'from google.appengine.ext import ndb\n'), ((31, 9, 31, 39), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', (), '', False, 'from google.appengine.ext import ndb\n'), ((32, 12, 32, 40), 'google.appengine.ext.ndb.FloatProperty', 'ndb.FloatProperty', (), '', False, 'from google.appengine.ext import ndb\n'), ((33, 12, 33, 42), 'google.appengine.ext.ndb.FloatProperty', 'ndb.FloatProperty', (), '', False, 'from google.appengine.ext import ndb\n'), ((37, 9, 37, 38), 'google.appengine.ext.ndb.FloatProperty', 'ndb.FloatProperty', (), '', False, 'from google.appengine.ext import ndb\n'), ((38, 9, 38, 37), 'google.appengine.ext.ndb.FloatProperty', 'ndb.FloatProperty', (), '', False, 'from google.appengine.ext import ndb\n'), ((39, 8, 39, 28), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ({}, {}), '()', False, 'from google.appengine.ext import ndb\n'), ((40, 11, 40, 71), 'google.appengine.ext.ndb.LocalStructuredProperty', 'ndb.LocalStructuredProperty', (), '', False, 'from google.appengine.ext import ndb\n'), ((41, 12, 41, 67), 'google.appengine.ext.ndb.LocalStructuredProperty', 'ndb.LocalStructuredProperty', (), '', False, 'from google.appengine.ext import ndb\n'), ((45, 8, 45, 31), 'protorpc.messages.StringField', 'messages.StringField', ({(45, 29, 45, 30): '1'}, {}), '(1)', False, 'from protorpc import messages\n'), ((46, 10, 46, 33), 'protorpc.messages.StringField', 'messages.StringField', ({(46, 31, 46, 32): '2'}, {}), '(2)', False, 'from protorpc import messages\n'), ((50, 9, 50, 31), 'protorpc.messages.FloatField', 'messages.FloatField', ({(50, 29, 50, 30): '1'}, {}), '(1)', False, 'from protorpc import messages\n'), ((51, 10, 51, 32), 'protorpc.messages.FloatField', 'messages.FloatField', ({(51, 30, 51, 31): '2'}, {}), '(2)', False, 'from protorpc import messages\n'), ((55, 14, 55, 37), 'protorpc.messages.StringField', 'messages.StringField', ({(55, 35, 55, 36): '1'}, {}), '(1)', False, 'from protorpc import messages\n'), ((56, 11, 56, 49), 'protorpc.messages.StringField', 'messages.StringField', (), '', False, 'from protorpc import messages\n'), ((60, 9, 60, 32), 'protorpc.messages.StringField', 'messages.StringField', ({(60, 30, 60, 31): '1'}, {}), '(1)', False, 'from protorpc import messages\n'), ((61, 12, 61, 34), 'protorpc.messages.FloatField', 'messages.FloatField', ({(61, 32, 61, 33): '2'}, {}), '(2)', False, 'from protorpc import messages\n'), ((62, 12, 62, 34), 'protorpc.messages.FloatField', 'messages.FloatField', ({(62, 32, 62, 33): '3'}, {}), '(3)', False, 'from protorpc import messages\n'), ((66, 9, 66, 31), 'protorpc.messages.FloatField', 'messages.FloatField', ({(66, 29, 66, 30): '1'}, {}), '(1)', False, 'from protorpc import messages\n'), ((67, 9, 67, 31), 'protorpc.messages.FloatField', 'messages.FloatField', ({(67, 29, 67, 30): '2'}, {}), '(2)', False, 'from protorpc import messages\n'), ((68, 8, 68, 31), 'protorpc.messages.StringField', 'messages.StringField', ({(68, 29, 68, 30): '3'}, {}), '(3)', False, 'from protorpc import messages\n'), ((69, 11, 69, 63), 'protorpc.messages.MessageField', 'messages.MessageField', (), '', False, 'from protorpc import messages\n'), ((70, 12, 70, 59), 'protorpc.messages.MessageField', 'messages.MessageField', (), '', False, 'from protorpc import messages\n'), ((74, 11, 74, 57), 'protorpc.messages.MessageField', 'messages.MessageField', (), '', False, 'from protorpc import messages\n'), ((75, 11, 75, 57), 'protorpc.messages.MessageField', 'messages.MessageField', (), '', False, 'from protorpc import messages\n'), ((76, 11, 76, 34), 'protorpc.messages.StringField', 'messages.StringField', ({(76, 32, 76, 33): '3'}, {}), '(3)', False, 'from protorpc import messages\n'), ((80, 15, 80, 66), 'protorpc.messages.MessageField', 'messages.MessageField', (), '', False, 'from protorpc import messages\n'), ((87, 3, 88, 50), 'components.auth.endpoints_method', 'auth.endpoints_method', (), '', False, 'from components import auth\n'), ((99, 3, 100, 39), 'components.auth.endpoints_method', 'auth.endpoints_method', (), '', False, 'from components import auth\n'), ((113, 3, 114, 39), 'components.auth.endpoints_method', 'auth.endpoints_method', (), '', False, 'from components import auth\n'), ((92, 11, 92, 38), 'protorpc.message_types.VoidMessage', 'message_types.VoidMessage', ({}, {}), '()', False, 'from protorpc import message_types\n'), ((117, 4, 117, 36), 'logging.debug', 'logging.debug', ({(117, 18, 117, 26): '"""Got %s"""', (117, 28, 117, 35): 'request'}, {}), "('Got %s', request)", False, 'import logging\n'), ((130, 11, 130, 38), 'protorpc.message_types.VoidMessage', 'message_types.VoidMessage', ({}, {}), '()', False, 'from protorpc import message_types\n'), ((163, 4, 163, 45), 'logging.info', 'logging.info', ({(163, 17, 163, 34): '"""Send data to %s"""', (163, 36, 163, 44): 'data.url'}, {}), "('Send data to %s', data.url)", False, 'import logging\n'), ((165, 18, 165, 61), 'oauth2client.client.GoogleCredentials.get_application_default', 'GoogleCredentials.get_application_default', ({}, {}), '()', False, 'from oauth2client.client import GoogleCredentials\n'), ((166, 14, 168, 54), 'apiclient.discovery.build', 'discovery.build', (), '', False, 'from apiclient import discovery\n'), ((89, 24, 89, 65), 'components.auth.is_group_member', 'auth.is_group_member', ({(89, 45, 89, 64): '"""metric-generators"""'}, {}), "('metric-generators')", False, 'from components import auth\n'), ((101, 24, 101, 65), 'components.auth.is_group_member', 'auth.is_group_member', ({(101, 45, 101, 64): '"""metric-generators"""'}, {}), "('metric-generators')", False, 'from components import auth\n'), ((115, 24, 115, 65), 'components.auth.is_group_member', 'auth.is_group_member', ({(115, 45, 115, 64): '"""metric-generators"""'}, {}), "('metric-generators')", False, 'from components import auth\n'), ((158, 34, 158, 72), 'random.uniform', 'random.uniform', ({(158, 49, 158, 71): '*metric_ranges[metric]'}, {}), '(*metric_ranges[metric])', False, 'import random\n')] |
usnistgov/dioptra | src/mitre/securingai/restapi/task_plugin/controller.py | 08a08e96c27787915bafc75a483431333e2c70ca | # This Software (Dioptra) is being made available as a public service by the
# National Institute of Standards and Technology (NIST), an Agency of the United
# States Department of Commerce. This software was developed in part by employees of
# NIST and in part by NIST contractors. Copyright in portions of this software that
# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant
# to Title 17 United States Code Section 105, works of NIST employees are not
# subject to copyright protection in the United States. However, NIST may hold
# international copyright in software created by its employees and domestic
# copyright (or licensing rights) in portions of software that were assigned or
# licensed to NIST. To the extent that NIST holds copyright in this software, it is
# being made available under the Creative Commons Attribution 4.0 International
# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts
# of the software developed or licensed by NIST.
#
# ACCESS THE FULL CC BY 4.0 LICENSE HERE:
# https://creativecommons.org/licenses/by/4.0/legalcode
"""The module defining the task plugin endpoints."""
import uuid
from typing import List, Optional
import structlog
from flask import current_app, jsonify
from flask.wrappers import Response
from flask_accepts import accepts, responds
from flask_restx import Namespace, Resource
from injector import inject
from structlog.stdlib import BoundLogger
from mitre.securingai.restapi.utils import as_api_parser
from .errors import TaskPluginDoesNotExistError, TaskPluginUploadError
from .model import TaskPlugin, TaskPluginUploadForm, TaskPluginUploadFormData
from .schema import TaskPluginSchema, TaskPluginUploadSchema
from .service import TaskPluginService
LOGGER: BoundLogger = structlog.stdlib.get_logger()
api: Namespace = Namespace(
"TaskPlugin",
description="Task plugin registry operations",
)
@api.route("/")
class TaskPluginResource(Resource):
"""Shows a list of all task plugins, and lets you POST to upload new ones."""
@inject
def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None:
self._task_plugin_service = task_plugin_service
super().__init__(*args, **kwargs)
@responds(schema=TaskPluginSchema(many=True), api=api)
def get(self) -> List[TaskPlugin]:
"""Gets a list of all registered task plugins."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()), resource="taskPlugin", request_type="GET"
)
log.info("Request received")
return self._task_plugin_service.get_all(
bucket=current_app.config["AI_PLUGINS_BUCKET"], log=log
)
@api.expect(as_api_parser(api, TaskPluginUploadSchema))
@accepts(TaskPluginUploadSchema, api=api)
@responds(schema=TaskPluginSchema, api=api)
def post(self) -> TaskPlugin:
"""Registers a new task plugin uploaded via the task plugin upload form."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()), resource="taskPlugin", request_type="POST"
)
task_plugin_upload_form: TaskPluginUploadForm = TaskPluginUploadForm()
log.info("Request received")
if not task_plugin_upload_form.validate_on_submit():
log.error("Form validation failed")
raise TaskPluginUploadError
log.info("Form validation successful")
task_plugin_upload_form_data: TaskPluginUploadFormData = (
self._task_plugin_service.extract_data_from_form(
task_plugin_upload_form=task_plugin_upload_form, log=log
)
)
return self._task_plugin_service.create(
task_plugin_upload_form_data=task_plugin_upload_form_data,
bucket=current_app.config["AI_PLUGINS_BUCKET"],
log=log,
)
@api.route("/securingai_builtins")
class TaskPluginBuiltinsCollectionResource(Resource):
"""Shows a list of all builtin task plugins."""
@inject
def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None:
self._task_plugin_service = task_plugin_service
super().__init__(*args, **kwargs)
@responds(schema=TaskPluginSchema(many=True), api=api)
def get(self) -> List[TaskPlugin]:
"""Gets a list of all available builtin task plugins."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()),
resource="taskPluginBuiltinCollection",
request_type="GET",
)
log.info("Request received")
return self._task_plugin_service.get_all_in_collection(
collection="securingai_builtins",
bucket=current_app.config["AI_PLUGINS_BUCKET"],
log=log,
)
@api.route("/securingai_builtins/<string:taskPluginName>")
@api.param(
"taskPluginName",
"A unique string identifying a task plugin package within securingai_builtins "
"collection.",
)
class TaskPluginBuiltinCollectionNameResource(Resource):
"""Shows a single builtin task plugin package."""
@inject
def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None:
self._task_plugin_service = task_plugin_service
super().__init__(*args, **kwargs)
@responds(schema=TaskPluginSchema, api=api)
def get(self, taskPluginName: str) -> TaskPlugin:
"""Gets a builtin task plugin by its unique name."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()),
resource="taskPluginBuiltinCollectionName",
request_type="GET",
)
log.info("Request received")
task_plugin: Optional[
TaskPlugin
] = self._task_plugin_service.get_by_name_in_collection(
collection="securingai_builtins",
task_plugin_name=taskPluginName,
bucket=current_app.config["AI_PLUGINS_BUCKET"],
log=log,
)
if task_plugin is None:
log.error(
"TaskPlugin not found",
task_plugin_name=taskPluginName,
collection="securingai_builtins",
)
raise TaskPluginDoesNotExistError
return task_plugin
@api.route("/securingai_custom")
class TaskPluginCustomCollectionResource(Resource):
"""Shows a list of all custom task plugins."""
@inject
def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None:
self._task_plugin_service = task_plugin_service
super().__init__(*args, **kwargs)
@responds(schema=TaskPluginSchema(many=True), api=api)
def get(self) -> List[TaskPlugin]:
"""Gets a list of all registered custom task plugins."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()),
resource="taskPluginCustomCollection",
request_type="GET",
)
log.info("Request received")
return self._task_plugin_service.get_all_in_collection(
collection="securingai_custom",
bucket=current_app.config["AI_PLUGINS_BUCKET"],
log=log,
)
@api.route("/securingai_custom/<string:taskPluginName>")
@api.param(
"taskPluginName",
"A unique string identifying a task plugin package within securingai_custom "
"collection.",
)
class TaskPluginCustomCollectionNameResource(Resource):
"""Shows a single custom task plugin package and lets you delete it."""
@inject
def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None:
self._task_plugin_service = task_plugin_service
super().__init__(*args, **kwargs)
@responds(schema=TaskPluginSchema, api=api)
def get(self, taskPluginName: str) -> TaskPlugin:
"""Gets a custom task plugin by its unique name."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()),
resource="taskPluginCustomCollectionName",
request_type="GET",
)
log.info("Request received")
task_plugin: Optional[
TaskPlugin
] = self._task_plugin_service.get_by_name_in_collection(
collection="securingai_custom",
task_plugin_name=taskPluginName,
bucket=current_app.config["AI_PLUGINS_BUCKET"],
log=log,
)
if task_plugin is None:
log.error(
"TaskPlugin not found",
task_plugin_name=taskPluginName,
collection="securingai_custom",
)
raise TaskPluginDoesNotExistError
return task_plugin
def delete(self, taskPluginName: str) -> Response:
"""Deletes a custom task plugin by its unique name."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()),
resource="taskPluginCustomCollectionName",
task_plugin_name=taskPluginName,
request_type="DELETE",
)
log.info("Request received")
task_plugins: List[TaskPlugin] = self._task_plugin_service.delete(
collection="securingai_custom",
task_plugin_name=taskPluginName,
bucket=current_app.config["AI_PLUGINS_BUCKET"],
log=log,
)
name: List[str] = [x.task_plugin_name for x in task_plugins]
return jsonify( # type: ignore
dict(status="Success", collection="securingai_custom", taskPluginName=name)
)
| [((37, 22, 37, 51), 'structlog.stdlib.get_logger', 'structlog.stdlib.get_logger', ({}, {}), '()', False, 'import structlog\n'), ((39, 17, 42, 1), 'flask_restx.Namespace', 'Namespace', (), '', False, 'from flask_restx import Namespace, Resource\n'), ((66, 5, 66, 45), 'flask_accepts.accepts', 'accepts', (), '', False, 'from flask_accepts import accepts, responds\n'), ((67, 5, 67, 47), 'flask_accepts.responds', 'responds', (), '', False, 'from flask_accepts import accepts, responds\n'), ((133, 5, 133, 47), 'flask_accepts.responds', 'responds', (), '', False, 'from flask_accepts import accepts, responds\n'), ((202, 5, 202, 47), 'flask_accepts.responds', 'responds', (), '', False, 'from flask_accepts import accepts, responds\n'), ((65, 16, 65, 58), 'mitre.securingai.restapi.utils.as_api_parser', 'as_api_parser', ({(65, 30, 65, 33): 'api', (65, 35, 65, 57): 'TaskPluginUploadSchema'}, {}), '(api, TaskPluginUploadSchema)', False, 'from mitre.securingai.restapi.utils import as_api_parser\n'), ((58, 27, 58, 39), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((71, 27, 71, 39), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((107, 27, 107, 39), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((137, 27, 137, 39), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((176, 27, 176, 39), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((206, 27, 206, 39), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((234, 27, 234, 39), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n')] |
mjmaenpaa/dulwich | dulwich/tests/test_lru_cache.py | d13a0375f4cc3099ff1c6edacda97f317c28f67a | # Copyright (C) 2006, 2008 Canonical Ltd
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Tests for the lru_cache module."""
from dulwich import (
lru_cache,
)
from dulwich.tests import (
TestCase,
)
class TestLRUCache(TestCase):
"""Test that LRU cache properly keeps track of entries."""
def test_cache_size(self):
cache = lru_cache.LRUCache(max_cache=10)
self.assertEqual(10, cache.cache_size())
cache = lru_cache.LRUCache(max_cache=256)
self.assertEqual(256, cache.cache_size())
cache.resize(512)
self.assertEqual(512, cache.cache_size())
def test_missing(self):
cache = lru_cache.LRUCache(max_cache=10)
self.assertFalse('foo' in cache)
self.assertRaises(KeyError, cache.__getitem__, 'foo')
cache['foo'] = 'bar'
self.assertEqual('bar', cache['foo'])
self.assertTrue('foo' in cache)
self.assertFalse('bar' in cache)
def test_map_None(self):
# Make sure that we can properly map None as a key.
cache = lru_cache.LRUCache(max_cache=10)
self.assertFalse(None in cache)
cache[None] = 1
self.assertEqual(1, cache[None])
cache[None] = 2
self.assertEqual(2, cache[None])
# Test the various code paths of __getitem__, to make sure that we can
# handle when None is the key for the LRU and the MRU
cache[1] = 3
cache[None] = 1
cache[None]
cache[1]
cache[None]
self.assertEqual([None, 1], [n.key for n in cache._walk_lru()])
def test_add__null_key(self):
cache = lru_cache.LRUCache(max_cache=10)
self.assertRaises(ValueError, cache.add, lru_cache._null_key, 1)
def test_overflow(self):
"""Adding extra entries will pop out old ones."""
cache = lru_cache.LRUCache(max_cache=1, after_cleanup_count=1)
cache['foo'] = 'bar'
# With a max cache of 1, adding 'baz' should pop out 'foo'
cache['baz'] = 'biz'
self.assertFalse('foo' in cache)
self.assertTrue('baz' in cache)
self.assertEqual('biz', cache['baz'])
def test_by_usage(self):
"""Accessing entries bumps them up in priority."""
cache = lru_cache.LRUCache(max_cache=2)
cache['baz'] = 'biz'
cache['foo'] = 'bar'
self.assertEqual('biz', cache['baz'])
# This must kick out 'foo' because it was the last accessed
cache['nub'] = 'in'
self.assertFalse('foo' in cache)
def test_cleanup(self):
"""Test that we can use a cleanup function."""
cleanup_called = []
def cleanup_func(key, val):
cleanup_called.append((key, val))
cache = lru_cache.LRUCache(max_cache=2, after_cleanup_count=2)
cache.add('baz', '1', cleanup=cleanup_func)
cache.add('foo', '2', cleanup=cleanup_func)
cache.add('biz', '3', cleanup=cleanup_func)
self.assertEqual([('baz', '1')], cleanup_called)
# 'foo' is now most recent, so final cleanup will call it last
cache['foo']
cache.clear()
self.assertEqual([('baz', '1'), ('biz', '3'), ('foo', '2')],
cleanup_called)
def test_cleanup_on_replace(self):
"""Replacing an object should cleanup the old value."""
cleanup_called = []
def cleanup_func(key, val):
cleanup_called.append((key, val))
cache = lru_cache.LRUCache(max_cache=2)
cache.add(1, 10, cleanup=cleanup_func)
cache.add(2, 20, cleanup=cleanup_func)
cache.add(2, 25, cleanup=cleanup_func)
self.assertEqual([(2, 20)], cleanup_called)
self.assertEqual(25, cache[2])
# Even __setitem__ should make sure cleanup() is called
cache[2] = 26
self.assertEqual([(2, 20), (2, 25)], cleanup_called)
def test_len(self):
cache = lru_cache.LRUCache(max_cache=10, after_cleanup_count=10)
cache[1] = 10
cache[2] = 20
cache[3] = 30
cache[4] = 40
self.assertEqual(4, len(cache))
cache[5] = 50
cache[6] = 60
cache[7] = 70
cache[8] = 80
self.assertEqual(8, len(cache))
cache[1] = 15 # replacement
self.assertEqual(8, len(cache))
cache[9] = 90
cache[10] = 100
cache[11] = 110
# We hit the max
self.assertEqual(10, len(cache))
self.assertEqual([11, 10, 9, 1, 8, 7, 6, 5, 4, 3],
[n.key for n in cache._walk_lru()])
def test_cleanup_shrinks_to_after_clean_count(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=3)
cache.add(1, 10)
cache.add(2, 20)
cache.add(3, 25)
cache.add(4, 30)
cache.add(5, 35)
self.assertEqual(5, len(cache))
# This will bump us over the max, which causes us to shrink down to
# after_cleanup_cache size
cache.add(6, 40)
self.assertEqual(3, len(cache))
def test_after_cleanup_larger_than_max(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=10)
self.assertEqual(5, cache._after_cleanup_count)
def test_after_cleanup_none(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=None)
# By default _after_cleanup_size is 80% of the normal size
self.assertEqual(4, cache._after_cleanup_count)
def test_cleanup_2(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=2)
# Add these in order
cache.add(1, 10)
cache.add(2, 20)
cache.add(3, 25)
cache.add(4, 30)
cache.add(5, 35)
self.assertEqual(5, len(cache))
# Force a compaction
cache.cleanup()
self.assertEqual(2, len(cache))
def test_preserve_last_access_order(self):
cache = lru_cache.LRUCache(max_cache=5)
# Add these in order
cache.add(1, 10)
cache.add(2, 20)
cache.add(3, 25)
cache.add(4, 30)
cache.add(5, 35)
self.assertEqual([5, 4, 3, 2, 1], [n.key for n in cache._walk_lru()])
# Now access some randomly
cache[2]
cache[5]
cache[3]
cache[2]
self.assertEqual([2, 3, 5, 4, 1], [n.key for n in cache._walk_lru()])
def test_get(self):
cache = lru_cache.LRUCache(max_cache=5)
cache.add(1, 10)
cache.add(2, 20)
self.assertEqual(20, cache.get(2))
self.assertEqual(None, cache.get(3))
obj = object()
self.assertTrue(obj is cache.get(3, obj))
self.assertEqual([2, 1], [n.key for n in cache._walk_lru()])
self.assertEqual(10, cache.get(1))
self.assertEqual([1, 2], [n.key for n in cache._walk_lru()])
def test_keys(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=5)
cache[1] = 2
cache[2] = 3
cache[3] = 4
self.assertEqual([1, 2, 3], sorted(cache.keys()))
cache[4] = 5
cache[5] = 6
cache[6] = 7
self.assertEqual([2, 3, 4, 5, 6], sorted(cache.keys()))
def test_resize_smaller(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=4)
cache[1] = 2
cache[2] = 3
cache[3] = 4
cache[4] = 5
cache[5] = 6
self.assertEqual([1, 2, 3, 4, 5], sorted(cache.keys()))
cache[6] = 7
self.assertEqual([3, 4, 5, 6], sorted(cache.keys()))
# Now resize to something smaller, which triggers a cleanup
cache.resize(max_cache=3, after_cleanup_count=2)
self.assertEqual([5, 6], sorted(cache.keys()))
# Adding something will use the new size
cache[7] = 8
self.assertEqual([5, 6, 7], sorted(cache.keys()))
cache[8] = 9
self.assertEqual([7, 8], sorted(cache.keys()))
def test_resize_larger(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=4)
cache[1] = 2
cache[2] = 3
cache[3] = 4
cache[4] = 5
cache[5] = 6
self.assertEqual([1, 2, 3, 4, 5], sorted(cache.keys()))
cache[6] = 7
self.assertEqual([3, 4, 5, 6], sorted(cache.keys()))
cache.resize(max_cache=8, after_cleanup_count=6)
self.assertEqual([3, 4, 5, 6], sorted(cache.keys()))
cache[7] = 8
cache[8] = 9
cache[9] = 10
cache[10] = 11
self.assertEqual([3, 4, 5, 6, 7, 8, 9, 10], sorted(cache.keys()))
cache[11] = 12 # triggers cleanup back to new after_cleanup_count
self.assertEqual([6, 7, 8, 9, 10, 11], sorted(cache.keys()))
class TestLRUSizeCache(TestCase):
def test_basic_init(self):
cache = lru_cache.LRUSizeCache()
self.assertEqual(2048, cache._max_cache)
self.assertEqual(int(cache._max_size*0.8), cache._after_cleanup_size)
self.assertEqual(0, cache._value_size)
def test_add__null_key(self):
cache = lru_cache.LRUSizeCache()
self.assertRaises(ValueError, cache.add, lru_cache._null_key, 1)
def test_add_tracks_size(self):
cache = lru_cache.LRUSizeCache()
self.assertEqual(0, cache._value_size)
cache.add('my key', 'my value text')
self.assertEqual(13, cache._value_size)
def test_remove_tracks_size(self):
cache = lru_cache.LRUSizeCache()
self.assertEqual(0, cache._value_size)
cache.add('my key', 'my value text')
self.assertEqual(13, cache._value_size)
node = cache._cache['my key']
cache._remove_node(node)
self.assertEqual(0, cache._value_size)
def test_no_add_over_size(self):
"""Adding a large value may not be cached at all."""
cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=5)
self.assertEqual(0, cache._value_size)
self.assertEqual({}, cache.items())
cache.add('test', 'key')
self.assertEqual(3, cache._value_size)
self.assertEqual({'test': 'key'}, cache.items())
cache.add('test2', 'key that is too big')
self.assertEqual(3, cache._value_size)
self.assertEqual({'test':'key'}, cache.items())
# If we would add a key, only to cleanup and remove all cached entries,
# then obviously that value should not be stored
cache.add('test3', 'bigkey')
self.assertEqual(3, cache._value_size)
self.assertEqual({'test':'key'}, cache.items())
cache.add('test4', 'bikey')
self.assertEqual(3, cache._value_size)
self.assertEqual({'test':'key'}, cache.items())
def test_no_add_over_size_cleanup(self):
"""If a large value is not cached, we will call cleanup right away."""
cleanup_calls = []
def cleanup(key, value):
cleanup_calls.append((key, value))
cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=5)
self.assertEqual(0, cache._value_size)
self.assertEqual({}, cache.items())
cache.add('test', 'key that is too big', cleanup=cleanup)
# key was not added
self.assertEqual(0, cache._value_size)
self.assertEqual({}, cache.items())
# and cleanup was called
self.assertEqual([('test', 'key that is too big')], cleanup_calls)
def test_adding_clears_cache_based_on_size(self):
"""The cache is cleared in LRU order until small enough"""
cache = lru_cache.LRUSizeCache(max_size=20)
cache.add('key1', 'value') # 5 chars
cache.add('key2', 'value2') # 6 chars
cache.add('key3', 'value23') # 7 chars
self.assertEqual(5+6+7, cache._value_size)
cache['key2'] # reference key2 so it gets a newer reference time
cache.add('key4', 'value234') # 8 chars, over limit
# We have to remove 2 keys to get back under limit
self.assertEqual(6+8, cache._value_size)
self.assertEqual({'key2':'value2', 'key4':'value234'},
cache.items())
def test_adding_clears_to_after_cleanup_size(self):
cache = lru_cache.LRUSizeCache(max_size=20, after_cleanup_size=10)
cache.add('key1', 'value') # 5 chars
cache.add('key2', 'value2') # 6 chars
cache.add('key3', 'value23') # 7 chars
self.assertEqual(5+6+7, cache._value_size)
cache['key2'] # reference key2 so it gets a newer reference time
cache.add('key4', 'value234') # 8 chars, over limit
# We have to remove 3 keys to get back under limit
self.assertEqual(8, cache._value_size)
self.assertEqual({'key4':'value234'}, cache.items())
def test_custom_sizes(self):
def size_of_list(lst):
return sum(len(x) for x in lst)
cache = lru_cache.LRUSizeCache(max_size=20, after_cleanup_size=10,
compute_size=size_of_list)
cache.add('key1', ['val', 'ue']) # 5 chars
cache.add('key2', ['val', 'ue2']) # 6 chars
cache.add('key3', ['val', 'ue23']) # 7 chars
self.assertEqual(5+6+7, cache._value_size)
cache['key2'] # reference key2 so it gets a newer reference time
cache.add('key4', ['value', '234']) # 8 chars, over limit
# We have to remove 3 keys to get back under limit
self.assertEqual(8, cache._value_size)
self.assertEqual({'key4':['value', '234']}, cache.items())
def test_cleanup(self):
cache = lru_cache.LRUSizeCache(max_size=20, after_cleanup_size=10)
# Add these in order
cache.add('key1', 'value') # 5 chars
cache.add('key2', 'value2') # 6 chars
cache.add('key3', 'value23') # 7 chars
self.assertEqual(5+6+7, cache._value_size)
cache.cleanup()
# Only the most recent fits after cleaning up
self.assertEqual(7, cache._value_size)
def test_keys(self):
cache = lru_cache.LRUSizeCache(max_size=10)
cache[1] = 'a'
cache[2] = 'b'
cache[3] = 'cdef'
self.assertEqual([1, 2, 3], sorted(cache.keys()))
def test_resize_smaller(self):
cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=9)
cache[1] = 'abc'
cache[2] = 'def'
cache[3] = 'ghi'
cache[4] = 'jkl'
# Triggers a cleanup
self.assertEqual([2, 3, 4], sorted(cache.keys()))
# Resize should also cleanup again
cache.resize(max_size=6, after_cleanup_size=4)
self.assertEqual([4], sorted(cache.keys()))
# Adding should use the new max size
cache[5] = 'mno'
self.assertEqual([4, 5], sorted(cache.keys()))
cache[6] = 'pqr'
self.assertEqual([6], sorted(cache.keys()))
def test_resize_larger(self):
cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=9)
cache[1] = 'abc'
cache[2] = 'def'
cache[3] = 'ghi'
cache[4] = 'jkl'
# Triggers a cleanup
self.assertEqual([2, 3, 4], sorted(cache.keys()))
cache.resize(max_size=15, after_cleanup_size=12)
self.assertEqual([2, 3, 4], sorted(cache.keys()))
cache[5] = 'mno'
cache[6] = 'pqr'
self.assertEqual([2, 3, 4, 5, 6], sorted(cache.keys()))
cache[7] = 'stu'
self.assertEqual([4, 5, 6, 7], sorted(cache.keys()))
| [((33, 16, 33, 48), 'dulwich.lru_cache.LRUCache', 'lru_cache.LRUCache', (), '', False, 'from dulwich import lru_cache\n'), ((36, 16, 36, 49), 'dulwich.lru_cache.LRUCache', 'lru_cache.LRUCache', (), '', False, 'from dulwich import lru_cache\n'), ((43, 16, 43, 48), 'dulwich.lru_cache.LRUCache', 'lru_cache.LRUCache', (), '', False, 'from dulwich import lru_cache\n'), ((55, 16, 55, 48), 'dulwich.lru_cache.LRUCache', 'lru_cache.LRUCache', (), '', False, 'from dulwich import lru_cache\n'), ((71, 16, 71, 48), 'dulwich.lru_cache.LRUCache', 'lru_cache.LRUCache', (), '', False, 'from dulwich import lru_cache\n'), ((76, 16, 76, 70), 'dulwich.lru_cache.LRUCache', 'lru_cache.LRUCache', (), '', False, 'from dulwich import lru_cache\n'), ((89, 16, 89, 47), 'dulwich.lru_cache.LRUCache', 'lru_cache.LRUCache', (), '', False, 'from dulwich import lru_cache\n'), ((107, 16, 107, 70), 'dulwich.lru_cache.LRUCache', 'lru_cache.LRUCache', (), '', False, 'from dulwich import lru_cache\n'), ((127, 16, 127, 47), 'dulwich.lru_cache.LRUCache', 'lru_cache.LRUCache', (), '', False, 'from dulwich import lru_cache\n'), ((140, 16, 140, 72), 'dulwich.lru_cache.LRUCache', 'lru_cache.LRUCache', (), '', False, 'from dulwich import lru_cache\n'), ((170, 16, 170, 70), 'dulwich.lru_cache.LRUCache', 'lru_cache.LRUCache', (), '', False, 'from dulwich import lru_cache\n'), ((185, 16, 185, 71), 'dulwich.lru_cache.LRUCache', 'lru_cache.LRUCache', (), '', False, 'from dulwich import lru_cache\n'), ((189, 16, 189, 73), 'dulwich.lru_cache.LRUCache', 'lru_cache.LRUCache', (), '', False, 'from dulwich import lru_cache\n'), ((194, 16, 194, 70), 'dulwich.lru_cache.LRUCache', 'lru_cache.LRUCache', (), '', False, 'from dulwich import lru_cache\n'), ((209, 16, 209, 47), 'dulwich.lru_cache.LRUCache', 'lru_cache.LRUCache', (), '', False, 'from dulwich import lru_cache\n'), ((228, 16, 228, 47), 'dulwich.lru_cache.LRUCache', 'lru_cache.LRUCache', (), '', False, 'from dulwich import lru_cache\n'), ((241, 16, 241, 70), 'dulwich.lru_cache.LRUCache', 'lru_cache.LRUCache', (), '', False, 'from dulwich import lru_cache\n'), ((253, 16, 253, 70), 'dulwich.lru_cache.LRUCache', 'lru_cache.LRUCache', (), '', False, 'from dulwich import lru_cache\n'), ((272, 16, 272, 70), 'dulwich.lru_cache.LRUCache', 'lru_cache.LRUCache', (), '', False, 'from dulwich import lru_cache\n'), ((295, 16, 295, 40), 'dulwich.lru_cache.LRUSizeCache', 'lru_cache.LRUSizeCache', ({}, {}), '()', False, 'from dulwich import lru_cache\n'), ((301, 16, 301, 40), 'dulwich.lru_cache.LRUSizeCache', 'lru_cache.LRUSizeCache', ({}, {}), '()', False, 'from dulwich import lru_cache\n'), ((305, 16, 305, 40), 'dulwich.lru_cache.LRUSizeCache', 'lru_cache.LRUSizeCache', ({}, {}), '()', False, 'from dulwich import lru_cache\n'), ((311, 16, 311, 40), 'dulwich.lru_cache.LRUSizeCache', 'lru_cache.LRUSizeCache', ({}, {}), '()', False, 'from dulwich import lru_cache\n'), ((321, 16, 321, 73), 'dulwich.lru_cache.LRUSizeCache', 'lru_cache.LRUSizeCache', (), '', False, 'from dulwich import lru_cache\n'), ((346, 16, 346, 73), 'dulwich.lru_cache.LRUSizeCache', 'lru_cache.LRUSizeCache', (), '', False, 'from dulwich import lru_cache\n'), ((358, 16, 358, 51), 'dulwich.lru_cache.LRUSizeCache', 'lru_cache.LRUSizeCache', (), '', False, 'from dulwich import lru_cache\n'), ((371, 16, 371, 74), 'dulwich.lru_cache.LRUSizeCache', 'lru_cache.LRUSizeCache', (), '', False, 'from dulwich import lru_cache\n'), ((385, 16, 386, 65), 'dulwich.lru_cache.LRUSizeCache', 'lru_cache.LRUSizeCache', (), '', False, 'from dulwich import lru_cache\n'), ((399, 16, 399, 74), 'dulwich.lru_cache.LRUSizeCache', 'lru_cache.LRUSizeCache', (), '', False, 'from dulwich import lru_cache\n'), ((412, 16, 412, 51), 'dulwich.lru_cache.LRUSizeCache', 'lru_cache.LRUSizeCache', (), '', False, 'from dulwich import lru_cache\n'), ((420, 16, 420, 73), 'dulwich.lru_cache.LRUSizeCache', 'lru_cache.LRUSizeCache', (), '', False, 'from dulwich import lru_cache\n'), ((437, 16, 437, 73), 'dulwich.lru_cache.LRUSizeCache', 'lru_cache.LRUSizeCache', (), '', False, 'from dulwich import lru_cache\n')] |
pedrotari7/advent_of_code | py/2016/5B.py | 98d5bc8d903435624a019a5702f5421d7b4ef8c8 | import md5
(i,count) = (0,0)
password = ['']*8
while 1:
key = 'reyedfim' + str(i)
md = md5.new(key).hexdigest()
if md[:5] == '00000':
index = int(md[5],16)
if index < len(password) and password[index]=='':
password[index] = md[6]
count += 1
if count == 8:
break
i+=1
print ''.join(password) | [] |
lsica-scopely/mgear4 | release/scripts/mgear/shifter_epic_components/EPIC_foot_01/__init__.py | 28ed5d66370a9516da05d93d447bfc15f4c0c9f4 | import pymel.core as pm
import ast
from pymel.core import datatypes
from mgear.shifter import component
from mgear.core import node, applyop, vector
from mgear.core import attribute, transform, primitive
class Component(component.Main):
"""Shifter component Class"""
# =====================================================
# OBJECTS
# =====================================================
def addObjects(self):
"""Add all the objects needed to create the component."""
# joint Description Names
jd_names = ast.literal_eval(
self.settings["jointNamesDescription_custom"]
)
jdn_ball = jd_names[0]
self.up_axis = pm.upAxis(q=True, axis=True)
self.div_count = len(self.guide.apos) - 5
plane = [self.guide.apos[0], self.guide.apos[-4], self.guide.apos[-3]]
self.normal = self.getNormalFromPos(plane)
self.binormal = self.getBiNormalFromPos(plane)
# Heel ---------------------------------------------
# bank pivot
t = transform.getTransformLookingAt(
self.guide.pos["heel"],
self.guide.apos[-4],
self.normal,
"xz",
self.negate,
)
t = transform.setMatrixPosition(t, self.guide.pos["inpivot"])
self.in_npo = primitive.addTransform(
self.root, self.getName("in_npo"), t
)
self.in_piv = primitive.addTransform(
self.in_npo, self.getName("in_piv"), t
)
t = transform.setMatrixPosition(t, self.guide.pos["outpivot"])
self.out_piv = primitive.addTransform(
self.in_piv, self.getName("out_piv"), t
)
# heel
t = transform.getTransformLookingAt(
self.guide.pos["heel"],
self.guide.apos[-4],
self.normal,
"xz",
self.negate,
)
self.heel_loc = primitive.addTransform(
self.out_piv, self.getName("heel_loc"), t
)
attribute.setRotOrder(self.heel_loc, "YZX")
self.heel_ctl = self.addCtl(
self.heel_loc,
"heel_ctl",
t,
self.color_ik,
"sphere",
w=self.size * 0.1,
tp=self.parentCtlTag,
)
attribute.setKeyableAttributes(self.heel_ctl, self.r_params)
# Tip ----------------------------------------------
if self.up_axis == "y":
v = datatypes.Vector(
self.guide.apos[-5].x,
self.guide.pos["heel"].y,
self.guide.apos[-5].z,
)
else:
v = datatypes.Vector(
self.guide.apos[-5].x,
self.guide.apos[-5].y,
self.guide.pos["heel"].z,
)
t = transform.setMatrixPosition(t, v)
self.tip_ctl = self.addCtl(
self.heel_ctl,
"tip_ctl",
t,
self.color_ik,
"circle",
w=self.size,
tp=self.heel_ctl,
)
attribute.setKeyableAttributes(self.tip_ctl, self.r_params)
# Roll ---------------------------------------------
if self.settings["useRollCtl"]:
t = transform.getTransformLookingAt(
self.guide.pos["heel"],
self.guide.apos[-4],
self.normal,
"xz",
self.negate,
)
t = transform.setMatrixPosition(t, self.guide.pos["root"])
self.roll_np = primitive.addTransform(
self.root, self.getName("roll_npo"), t
)
self.roll_ctl = self.addCtl(
self.roll_np,
"roll_ctl",
t,
self.color_ik,
"cylinder",
w=self.size * 0.5,
h=self.size * 0.5,
ro=datatypes.Vector(3.1415 * 0.5, 0, 0),
tp=self.tip_ctl,
)
attribute.setKeyableAttributes(self.roll_ctl, ["rx", "rz"])
# Backward Controlers ------------------------------
bk_pos = self.guide.apos[1:-3]
bk_pos.reverse()
parent = self.tip_ctl
self.bk_ctl = []
self.bk_loc = []
self.previousTag = self.tip_ctl
for i, pos in enumerate(bk_pos):
if i == 0:
t = transform.getTransform(self.heel_ctl)
t = transform.setMatrixPosition(t, pos)
else:
direction = bk_pos[i - 1]
t = transform.getTransformLookingAt(
pos, direction, self.normal, "xz", self.negate
)
bk_loc = primitive.addTransform(
parent, self.getName("bk%s_loc" % i), t
)
bk_ctl = self.addCtl(
bk_loc,
"bk%s_ctl" % i,
t,
self.color_ik,
"sphere",
w=self.size * 0.15,
tp=self.previousTag,
)
attribute.setKeyableAttributes(bk_ctl, self.r_params)
self.previousTag = bk_ctl
self.bk_loc.append(bk_loc)
self.bk_ctl.append(bk_ctl)
parent = bk_ctl
# FK Reference ------------------------------------
self.fk_ref = primitive.addTransformFromPos(
self.bk_ctl[-1], self.getName("fk_ref"), self.guide.apos[0]
)
self.fk_npo = primitive.addTransform(
self.fk_ref,
self.getName("fk0_npo"),
transform.getTransform(self.bk_ctl[-1]),
)
# Forward Controlers ------------------------------
self.fk_ctl = []
self.fk_loc = []
parent = self.fk_npo
self.previousTag = self.tip_ctl
for i, bk_ctl in enumerate(reversed(self.bk_ctl[1:])):
if i == len(self.bk_ctl) - 2:
t = transform.getTransform(self.tip_ctl)
v = transform.getTranslation(bk_ctl)
t = transform.setMatrixPosition(t, v)
else:
t = transform.getTransform(bk_ctl)
dist = vector.getDistance(
self.guide.apos[i + 1], self.guide.apos[i + 2]
)
fk_loc = primitive.addTransform(
parent, self.getName("fk%s_loc" % i), t
)
po_vec = datatypes.Vector(dist * 0.5 * self.n_factor, 0, 0)
fk_ctl = self.addCtl(
fk_loc,
"fk%s_ctl" % i,
t,
self.color_fk,
"cube",
w=dist,
h=self.size * 0.5,
d=self.size * 0.5,
po=po_vec,
tp=self.previousTag,
)
self.previousTag = fk_ctl
attribute.setKeyableAttributes(fk_ctl)
if i:
name = jdn_ball + str(i)
else:
name = jdn_ball
self.jnt_pos.append([fk_ctl, name])
parent = fk_ctl
self.fk_ctl.append(fk_ctl)
self.fk_loc.append(fk_loc)
# =====================================================
# ATTRIBUTES
# =====================================================
def addAttributes(self):
"""Create the anim and setupr rig attributes for the component"""
# Anim -------------------------------------------
# Roll Angles
if not self.settings["useRollCtl"]:
self.roll_att = self.addAnimParam(
"roll", "Roll", "double", 0, -180, 180
)
self.bank_att = self.addAnimParam(
"bank", "Bank", "double", 0, -180, 180
)
self.angles_att = [
self.addAnimParam("angle_%s" % i, "Angle %s" % i, "double", -20)
for i in range(self.div_count)
]
# Setup ------------------------------------------
self.blend_att = self.addSetupParam(
"blend", "Fk/Ik Blend", "double", 1, 0, 1
)
# =====================================================
# OPERATORS
# =====================================================
def addOperators(self):
"""Create operators and set the relations for the component rig
Apply operators, constraints, expressions to the hierarchy.
In order to keep the code clean and easier to debug,
we shouldn't create any new object in this method.
"""
# Visibilities -------------------------------------
try:
# ik
if self.settings["useRollCtl"]:
for shp in self.roll_ctl.getShapes():
pm.connectAttr(self.blend_att, shp.attr("visibility"))
for bk_ctl in self.bk_ctl:
for shp in bk_ctl.getShapes():
pm.connectAttr(self.blend_att, shp.attr("visibility"))
for shp in self.heel_ctl.getShapes():
pm.connectAttr(self.blend_att, shp.attr("visibility"))
for shp in self.tip_ctl.getShapes():
pm.connectAttr(self.blend_att, shp.attr("visibility"))
except RuntimeError:
pm.displayInfo("Visibility already connect")
# Roll / Bank --------------------------------------
if self.settings["useRollCtl"]: # Using the controler
self.roll_att = self.roll_ctl.attr("rz")
self.bank_att = self.roll_ctl.attr("rx")
clamp_node = node.createClampNode(
[self.roll_att, self.bank_att, self.bank_att],
[0, -180, 0],
[180, 0, 180],
)
inAdd_nod = node.createAddNode(
clamp_node.outputB,
pm.getAttr(self.in_piv.attr("rx")) * self.n_factor,
)
pm.connectAttr(clamp_node.outputR, self.heel_loc.attr("rz"))
pm.connectAttr(clamp_node.outputG, self.out_piv.attr("rx"))
pm.connectAttr(inAdd_nod.output, self.in_piv.attr("rx"))
# Reverse Controler offset -------------------------
angle_outputs = node.createAddNodeMulti(self.angles_att)
for i, bk_loc in enumerate(reversed(self.bk_loc)):
if i == 0: # First
inpu = self.roll_att
min_input = self.angles_att[i]
elif i == len(self.angles_att): # Last
sub_nod = node.createSubNode(
self.roll_att, angle_outputs[i - 1]
)
inpu = sub_nod.output
min_input = -360
else: # Others
sub_nod = node.createSubNode(
self.roll_att, angle_outputs[i - 1]
)
inpu = sub_nod.output
min_input = self.angles_att[i]
clamp_node = node.createClampNode(inpu, min_input, 0)
add_node = node.createAddNode(
clamp_node.outputR, bk_loc.getAttr("rz")
)
pm.connectAttr(add_node.output, bk_loc.attr("rz"))
# Reverse compensation -----------------------------
for i, fk_loc in enumerate(self.fk_loc):
bk_ctl = self.bk_ctl[-i - 1]
bk_loc = self.bk_loc[-i - 1]
fk_ctl = self.fk_ctl[i]
# Inverse Rotorder
o_node = applyop.gear_inverseRotorder_op(bk_ctl, fk_ctl)
pm.connectAttr(o_node.output, bk_loc.attr("ro"))
pm.connectAttr(fk_ctl.attr("ro"), fk_loc.attr("ro"))
attribute.lockAttribute(bk_ctl, "ro")
# Compensate the backward rotation
# ik
addx_node = node.createAddNode(
bk_ctl.attr("rx"), bk_loc.attr("rx")
)
addy_node = node.createAddNode(
bk_ctl.attr("ry"), bk_loc.attr("ry")
)
addz_node = node.createAddNode(
bk_ctl.attr("rz"), bk_loc.attr("rz")
)
addz_node = node.createAddNode(
addz_node.output, -bk_loc.getAttr("rz") - fk_loc.getAttr("rz")
)
neg_node = node.createMulNode(
[addx_node.output, addy_node.output, addz_node.output],
[-1, -1, -1],
)
add_node = node.createAddNode(
neg_node.outputY.get() * -1, neg_node.outputY
)
ik_outputs = [neg_node.outputX, add_node.output, neg_node.outputZ]
# fk
fk_outputs = [0, 0, fk_loc.getAttr("rz")]
# blend
blend_node = node.createBlendNode(
ik_outputs, fk_outputs, self.blend_att
)
pm.connectAttr(blend_node.output, fk_loc.attr("rotate"))
return
# =====================================================
# CONNECTOR
# =====================================================
def setRelation(self):
"""Set the relation beetween object from guide to rig"""
self.relatives["root"] = self.fk_ctl[0]
self.relatives["heel"] = self.fk_ctl[0]
self.relatives["inpivot"] = self.fk_ctl[0]
self.relatives["outpivot"] = self.fk_ctl[0]
self.controlRelatives["root"] = self.fk_ctl[0]
self.controlRelatives["heel"] = self.fk_ctl[0]
self.controlRelatives["inpivot"] = self.fk_ctl[0]
self.controlRelatives["outpivot"] = self.fk_ctl[0]
self.jointRelatives["root"] = 0
self.jointRelatives["heel"] = 0
self.jointRelatives["inpivot"] = 0
self.jointRelatives["outpivot"] = 0
for i in range(self.div_count):
self.relatives["%s_loc" % i] = self.fk_ctl[i]
self.jointRelatives["%s_loc" % i] = i
if self.div_count > 0:
self.relatives["%s_loc" % self.div_count] = self.fk_ctl[-1]
self.jointRelatives["%s_loc" % self.div_count] = self.div_count - 1
def addConnection(self):
"""Add more connection definition to the set"""
self.connections["EPIC_leg_01"] = self.connect_leg_2jnt_01
self.connections["leg_2jnt_01"] = self.connect_leg_2jnt_01
self.connections["leg_ms_2jnt_01"] = self.connect_leg_ms_2jnt_01
self.connections["leg_3jnt_01"] = self.connect_leg_3jnt_01
def connect_leg_2jnt_01(self):
"""Connector for leg 2jnt"""
# If the parent component hasn't been generated we skip the connection
if self.parent_comp is None:
return
pm.connectAttr(self.parent_comp.blend_att, self.blend_att)
pm.parent(self.root, self.parent_comp.ik_ctl)
pm.parent(self.parent_comp.ik_ref, self.bk_ctl[-1])
pm.parentConstraint(
self.parent_comp.tws2_rot, self.fk_ref, maintainOffset=True
)
return
def connect_leg_ms_2jnt_01(self):
"""Connector for leg ms 2jnt"""
# If the parent component hasn't been generated we skip the connection
if self.parent_comp is None:
return
pm.connectAttr(self.parent_comp.blend_att, self.blend_att)
pm.parent(self.root, self.parent_comp.ik_ctl)
pm.parent(self.parent_comp.ik_ref, self.bk_ctl[-1])
pm.parentConstraint(
self.parent_comp.tws3_rot, self.fk_ref, maintainOffset=True
)
cns = pm.scaleConstraint(
self.parent_comp.fk_ref,
self.parent_comp.ik_ref,
self.fk_ref,
wal=True,
)
bc_node = pm.createNode("blendColors")
pm.connectAttr(
bc_node.outputB, cns + ".%sW0" % self.parent_comp.fk_ref
)
pm.connectAttr(
bc_node.outputR, cns + ".%sW1" % self.parent_comp.ik_ref
)
pm.connectAttr(self.parent_comp.blend_att, bc_node.blender)
return
def connect_leg_3jnt_01(self):
"""Connector for leg 3jnt"""
# If the parent component hasn't been generated we skip the connection
if self.parent_comp is None:
return
pm.connectAttr(self.parent_comp.blend_att, self.blend_att)
pm.parent(self.root, self.parent_comp.ik_ctl)
pm.parent(self.parent_comp.ik_ref, self.bk_ctl[-1])
pm.parent(self.parent_comp.ik2b_ikCtl_ref, self.bk_ctl[-1])
pm.parentConstraint(
self.parent_comp.tws3_rot, self.fk_ref, maintainOffset=True
)
return
| [((21, 19, 23, 9), 'ast.literal_eval', 'ast.literal_eval', ({(22, 12, 22, 57): "self.settings['jointNamesDescription_custom']"}, {}), "(self.settings['jointNamesDescription_custom'])", False, 'import ast\n'), ((26, 23, 26, 51), 'pymel.core.upAxis', 'pm.upAxis', (), '', True, 'import pymel.core as pm\n'), ((37, 12, 43, 9), 'mgear.core.transform.getTransformLookingAt', 'transform.getTransformLookingAt', ({(38, 12, 38, 34): "self.guide.pos['heel']", (39, 12, 39, 31): 'self.guide.apos[-4]', (40, 12, 40, 23): 'self.normal', (41, 12, 41, 16): '"""xz"""', (42, 12, 42, 23): 'self.negate'}, {}), "(self.guide.pos['heel'], self.guide.apos[-4],\n self.normal, 'xz', self.negate)", False, 'from mgear.core import attribute, transform, primitive\n'), ((45, 12, 45, 69), 'mgear.core.transform.setMatrixPosition', 'transform.setMatrixPosition', ({(45, 40, 45, 41): 't', (45, 43, 45, 68): "self.guide.pos['inpivot']"}, {}), "(t, self.guide.pos['inpivot'])", False, 'from mgear.core import attribute, transform, primitive\n'), ((55, 12, 55, 70), 'mgear.core.transform.setMatrixPosition', 'transform.setMatrixPosition', ({(55, 40, 55, 41): 't', (55, 43, 55, 69): "self.guide.pos['outpivot']"}, {}), "(t, self.guide.pos['outpivot'])", False, 'from mgear.core import attribute, transform, primitive\n'), ((62, 12, 68, 9), 'mgear.core.transform.getTransformLookingAt', 'transform.getTransformLookingAt', ({(63, 12, 63, 34): "self.guide.pos['heel']", (64, 12, 64, 31): 'self.guide.apos[-4]', (65, 12, 65, 23): 'self.normal', (66, 12, 66, 16): '"""xz"""', (67, 12, 67, 23): 'self.negate'}, {}), "(self.guide.pos['heel'], self.guide.apos[-4],\n self.normal, 'xz', self.negate)", False, 'from mgear.core import attribute, transform, primitive\n'), ((74, 8, 74, 51), 'mgear.core.attribute.setRotOrder', 'attribute.setRotOrder', ({(74, 30, 74, 43): 'self.heel_loc', (74, 45, 74, 50): '"""YZX"""'}, {}), "(self.heel_loc, 'YZX')", False, 'from mgear.core import attribute, transform, primitive\n'), ((85, 8, 85, 68), 'mgear.core.attribute.setKeyableAttributes', 'attribute.setKeyableAttributes', ({(85, 39, 85, 52): 'self.heel_ctl', (85, 54, 85, 67): 'self.r_params'}, {}), '(self.heel_ctl, self.r_params)', False, 'from mgear.core import attribute, transform, primitive\n'), ((100, 12, 100, 45), 'mgear.core.transform.setMatrixPosition', 'transform.setMatrixPosition', ({(100, 40, 100, 41): 't', (100, 43, 100, 44): 'v'}, {}), '(t, v)', False, 'from mgear.core import attribute, transform, primitive\n'), ((110, 8, 110, 67), 'mgear.core.attribute.setKeyableAttributes', 'attribute.setKeyableAttributes', ({(110, 39, 110, 51): 'self.tip_ctl', (110, 53, 110, 66): 'self.r_params'}, {}), '(self.tip_ctl, self.r_params)', False, 'from mgear.core import attribute, transform, primitive\n'), ((293, 21, 297, 9), 'mgear.core.node.createClampNode', 'node.createClampNode', ({(294, 12, 294, 57): '[self.roll_att, self.bank_att, self.bank_att]', (295, 12, 295, 24): '[0, -180, 0]', (296, 12, 296, 25): '[180, 0, 180]'}, {}), '([self.roll_att, self.bank_att, self.bank_att], [0, -\n 180, 0], [180, 0, 180])', False, 'from mgear.core import node, applyop, vector\n'), ((309, 24, 309, 64), 'mgear.core.node.createAddNodeMulti', 'node.createAddNodeMulti', ({(309, 48, 309, 63): 'self.angles_att'}, {}), '(self.angles_att)', False, 'from mgear.core import node, applyop, vector\n'), ((429, 8, 429, 66), 'pymel.core.connectAttr', 'pm.connectAttr', ({(429, 23, 429, 49): 'self.parent_comp.blend_att', (429, 51, 429, 65): 'self.blend_att'}, {}), '(self.parent_comp.blend_att, self.blend_att)', True, 'import pymel.core as pm\n'), ((430, 8, 430, 53), 'pymel.core.parent', 'pm.parent', ({(430, 18, 430, 27): 'self.root', (430, 29, 430, 52): 'self.parent_comp.ik_ctl'}, {}), '(self.root, self.parent_comp.ik_ctl)', True, 'import pymel.core as pm\n'), ((431, 8, 431, 59), 'pymel.core.parent', 'pm.parent', ({(431, 18, 431, 41): 'self.parent_comp.ik_ref', (431, 43, 431, 58): 'self.bk_ctl[-1]'}, {}), '(self.parent_comp.ik_ref, self.bk_ctl[-1])', True, 'import pymel.core as pm\n'), ((432, 8, 434, 9), 'pymel.core.parentConstraint', 'pm.parentConstraint', (), '', True, 'import pymel.core as pm\n'), ((445, 8, 445, 66), 'pymel.core.connectAttr', 'pm.connectAttr', ({(445, 23, 445, 49): 'self.parent_comp.blend_att', (445, 51, 445, 65): 'self.blend_att'}, {}), '(self.parent_comp.blend_att, self.blend_att)', True, 'import pymel.core as pm\n'), ((446, 8, 446, 53), 'pymel.core.parent', 'pm.parent', ({(446, 18, 446, 27): 'self.root', (446, 29, 446, 52): 'self.parent_comp.ik_ctl'}, {}), '(self.root, self.parent_comp.ik_ctl)', True, 'import pymel.core as pm\n'), ((447, 8, 447, 59), 'pymel.core.parent', 'pm.parent', ({(447, 18, 447, 41): 'self.parent_comp.ik_ref', (447, 43, 447, 58): 'self.bk_ctl[-1]'}, {}), '(self.parent_comp.ik_ref, self.bk_ctl[-1])', True, 'import pymel.core as pm\n'), ((448, 8, 450, 9), 'pymel.core.parentConstraint', 'pm.parentConstraint', (), '', True, 'import pymel.core as pm\n'), ((451, 14, 456, 9), 'pymel.core.scaleConstraint', 'pm.scaleConstraint', (), '', True, 'import pymel.core as pm\n'), ((457, 18, 457, 46), 'pymel.core.createNode', 'pm.createNode', ({(457, 32, 457, 45): '"""blendColors"""'}, {}), "('blendColors')", True, 'import pymel.core as pm\n'), ((458, 8, 460, 9), 'pymel.core.connectAttr', 'pm.connectAttr', ({(459, 12, 459, 27): 'bc_node.outputB', (459, 29, 459, 68): "(cns + '.%sW0' % self.parent_comp.fk_ref)"}, {}), "(bc_node.outputB, cns + '.%sW0' % self.parent_comp.fk_ref)", True, 'import pymel.core as pm\n'), ((461, 8, 463, 9), 'pymel.core.connectAttr', 'pm.connectAttr', ({(462, 12, 462, 27): 'bc_node.outputR', (462, 29, 462, 68): "(cns + '.%sW1' % self.parent_comp.ik_ref)"}, {}), "(bc_node.outputR, cns + '.%sW1' % self.parent_comp.ik_ref)", True, 'import pymel.core as pm\n'), ((464, 8, 464, 67), 'pymel.core.connectAttr', 'pm.connectAttr', ({(464, 23, 464, 49): 'self.parent_comp.blend_att', (464, 51, 464, 66): 'bc_node.blender'}, {}), '(self.parent_comp.blend_att, bc_node.blender)', True, 'import pymel.core as pm\n'), ((475, 8, 475, 66), 'pymel.core.connectAttr', 'pm.connectAttr', ({(475, 23, 475, 49): 'self.parent_comp.blend_att', (475, 51, 475, 65): 'self.blend_att'}, {}), '(self.parent_comp.blend_att, self.blend_att)', True, 'import pymel.core as pm\n'), ((476, 8, 476, 53), 'pymel.core.parent', 'pm.parent', ({(476, 18, 476, 27): 'self.root', (476, 29, 476, 52): 'self.parent_comp.ik_ctl'}, {}), '(self.root, self.parent_comp.ik_ctl)', True, 'import pymel.core as pm\n'), ((477, 8, 477, 59), 'pymel.core.parent', 'pm.parent', ({(477, 18, 477, 41): 'self.parent_comp.ik_ref', (477, 43, 477, 58): 'self.bk_ctl[-1]'}, {}), '(self.parent_comp.ik_ref, self.bk_ctl[-1])', True, 'import pymel.core as pm\n'), ((478, 8, 478, 67), 'pymel.core.parent', 'pm.parent', ({(478, 18, 478, 49): 'self.parent_comp.ik2b_ikCtl_ref', (478, 51, 478, 66): 'self.bk_ctl[-1]'}, {}), '(self.parent_comp.ik2b_ikCtl_ref, self.bk_ctl[-1])', True, 'import pymel.core as pm\n'), ((479, 8, 481, 9), 'pymel.core.parentConstraint', 'pm.parentConstraint', (), '', True, 'import pymel.core as pm\n'), ((89, 16, 93, 13), 'pymel.core.datatypes.Vector', 'datatypes.Vector', ({(90, 16, 90, 37): 'self.guide.apos[-5].x', (91, 16, 91, 40): "self.guide.pos['heel'].y", (92, 16, 92, 37): 'self.guide.apos[-5].z'}, {}), "(self.guide.apos[-5].x, self.guide.pos['heel'].y, self.\n guide.apos[-5].z)", False, 'from pymel.core import datatypes\n'), ((95, 16, 99, 13), 'pymel.core.datatypes.Vector', 'datatypes.Vector', ({(96, 16, 96, 37): 'self.guide.apos[-5].x', (97, 16, 97, 37): 'self.guide.apos[-5].y', (98, 16, 98, 40): "self.guide.pos['heel'].z"}, {}), "(self.guide.apos[-5].x, self.guide.apos[-5].y, self.guide.\n pos['heel'].z)", False, 'from pymel.core import datatypes\n'), ((114, 16, 120, 13), 'mgear.core.transform.getTransformLookingAt', 'transform.getTransformLookingAt', ({(115, 16, 115, 38): "self.guide.pos['heel']", (116, 16, 116, 35): 'self.guide.apos[-4]', (117, 16, 117, 27): 'self.normal', (118, 16, 118, 20): '"""xz"""', (119, 16, 119, 27): 'self.negate'}, {}), "(self.guide.pos['heel'], self.guide.apos[-4],\n self.normal, 'xz', self.negate)", False, 'from mgear.core import attribute, transform, primitive\n'), ((121, 16, 121, 70), 'mgear.core.transform.setMatrixPosition', 'transform.setMatrixPosition', ({(121, 44, 121, 45): 't', (121, 47, 121, 69): "self.guide.pos['root']"}, {}), "(t, self.guide.pos['root'])", False, 'from mgear.core import attribute, transform, primitive\n'), ((139, 12, 139, 71), 'mgear.core.attribute.setKeyableAttributes', 'attribute.setKeyableAttributes', ({(139, 43, 139, 56): 'self.roll_ctl', (139, 58, 139, 70): "['rx', 'rz']"}, {}), "(self.roll_ctl, ['rx', 'rz'])", False, 'from mgear.core import attribute, transform, primitive\n'), ((171, 12, 171, 65), 'mgear.core.attribute.setKeyableAttributes', 'attribute.setKeyableAttributes', ({(171, 43, 171, 49): 'bk_ctl', (171, 51, 171, 64): 'self.r_params'}, {}), '(bk_ctl, self.r_params)', False, 'from mgear.core import attribute, transform, primitive\n'), ((185, 12, 185, 51), 'mgear.core.transform.getTransform', 'transform.getTransform', ({(185, 35, 185, 50): 'self.bk_ctl[-1]'}, {}), '(self.bk_ctl[-1])', False, 'from mgear.core import attribute, transform, primitive\n'), ((200, 19, 202, 13), 'mgear.core.vector.getDistance', 'vector.getDistance', ({(201, 16, 201, 38): 'self.guide.apos[i + 1]', (201, 40, 201, 62): 'self.guide.apos[i + 2]'}, {}), '(self.guide.apos[i + 1], self.guide.apos[i + 2])', False, 'from mgear.core import node, applyop, vector\n'), ((208, 21, 208, 71), 'pymel.core.datatypes.Vector', 'datatypes.Vector', ({(208, 38, 208, 64): 'dist * 0.5 * self.n_factor', (208, 66, 208, 67): '0', (208, 69, 208, 70): '0'}, {}), '(dist * 0.5 * self.n_factor, 0, 0)', False, 'from pymel.core import datatypes\n'), ((223, 12, 223, 50), 'mgear.core.attribute.setKeyableAttributes', 'attribute.setKeyableAttributes', ({(223, 43, 223, 49): 'fk_ctl'}, {}), '(fk_ctl)', False, 'from mgear.core import attribute, transform, primitive\n'), ((330, 25, 330, 65), 'mgear.core.node.createClampNode', 'node.createClampNode', ({(330, 46, 330, 50): 'inpu', (330, 52, 330, 61): 'min_input', (330, 63, 330, 64): '0'}, {}), '(inpu, min_input, 0)', False, 'from mgear.core import node, applyop, vector\n'), ((345, 21, 345, 68), 'mgear.core.applyop.gear_inverseRotorder_op', 'applyop.gear_inverseRotorder_op', ({(345, 53, 345, 59): 'bk_ctl', (345, 61, 345, 67): 'fk_ctl'}, {}), '(bk_ctl, fk_ctl)', False, 'from mgear.core import node, applyop, vector\n'), ((348, 12, 348, 49), 'mgear.core.attribute.lockAttribute', 'attribute.lockAttribute', ({(348, 36, 348, 42): 'bk_ctl', (348, 44, 348, 48): '"""ro"""'}, {}), "(bk_ctl, 'ro')", False, 'from mgear.core import attribute, transform, primitive\n'), ((365, 23, 368, 13), 'mgear.core.node.createMulNode', 'node.createMulNode', ({(366, 16, 366, 70): '[addx_node.output, addy_node.output, addz_node.output]', (367, 16, 367, 28): '[-1, -1, -1]'}, {}), '([addx_node.output, addy_node.output, addz_node.output],\n [-1, -1, -1])', False, 'from mgear.core import node, applyop, vector\n'), ((378, 25, 380, 13), 'mgear.core.node.createBlendNode', 'node.createBlendNode', ({(379, 16, 379, 26): 'ik_outputs', (379, 28, 379, 38): 'fk_outputs', (379, 40, 379, 54): 'self.blend_att'}, {}), '(ik_outputs, fk_outputs, self.blend_att)', False, 'from mgear.core import node, applyop, vector\n'), ((151, 20, 151, 57), 'mgear.core.transform.getTransform', 'transform.getTransform', ({(151, 43, 151, 56): 'self.heel_ctl'}, {}), '(self.heel_ctl)', False, 'from mgear.core import attribute, transform, primitive\n'), ((152, 20, 152, 55), 'mgear.core.transform.setMatrixPosition', 'transform.setMatrixPosition', ({(152, 48, 152, 49): 't', (152, 51, 152, 54): 'pos'}, {}), '(t, pos)', False, 'from mgear.core import attribute, transform, primitive\n'), ((155, 20, 157, 17), 'mgear.core.transform.getTransformLookingAt', 'transform.getTransformLookingAt', ({(156, 20, 156, 23): 'pos', (156, 25, 156, 34): 'direction', (156, 36, 156, 47): 'self.normal', (156, 49, 156, 53): '"""xz"""', (156, 55, 156, 66): 'self.negate'}, {}), "(pos, direction, self.normal, 'xz', self.negate)", False, 'from mgear.core import attribute, transform, primitive\n'), ((195, 20, 195, 56), 'mgear.core.transform.getTransform', 'transform.getTransform', ({(195, 43, 195, 55): 'self.tip_ctl'}, {}), '(self.tip_ctl)', False, 'from mgear.core import attribute, transform, primitive\n'), ((196, 20, 196, 52), 'mgear.core.transform.getTranslation', 'transform.getTranslation', ({(196, 45, 196, 51): 'bk_ctl'}, {}), '(bk_ctl)', False, 'from mgear.core import attribute, transform, primitive\n'), ((197, 20, 197, 53), 'mgear.core.transform.setMatrixPosition', 'transform.setMatrixPosition', ({(197, 48, 197, 49): 't', (197, 51, 197, 52): 'v'}, {}), '(t, v)', False, 'from mgear.core import attribute, transform, primitive\n'), ((199, 20, 199, 50), 'mgear.core.transform.getTransform', 'transform.getTransform', ({(199, 43, 199, 49): 'bk_ctl'}, {}), '(bk_ctl)', False, 'from mgear.core import attribute, transform, primitive\n'), ((286, 12, 286, 56), 'pymel.core.displayInfo', 'pm.displayInfo', ({(286, 27, 286, 55): '"""Visibility already connect"""'}, {}), "('Visibility already connect')", True, 'import pymel.core as pm\n'), ((135, 19, 135, 55), 'pymel.core.datatypes.Vector', 'datatypes.Vector', ({(135, 36, 135, 48): '3.1415 * 0.5', (135, 50, 135, 51): '0', (135, 53, 135, 54): '0'}, {}), '(3.1415 * 0.5, 0, 0)', False, 'from pymel.core import datatypes\n'), ((317, 26, 319, 17), 'mgear.core.node.createSubNode', 'node.createSubNode', ({(318, 20, 318, 33): 'self.roll_att', (318, 35, 318, 55): 'angle_outputs[i - 1]'}, {}), '(self.roll_att, angle_outputs[i - 1])', False, 'from mgear.core import node, applyop, vector\n'), ((324, 26, 326, 17), 'mgear.core.node.createSubNode', 'node.createSubNode', ({(325, 20, 325, 33): 'self.roll_att', (325, 35, 325, 55): 'angle_outputs[i - 1]'}, {}), '(self.roll_att, angle_outputs[i - 1])', False, 'from mgear.core import node, applyop, vector\n')] |
Engerrs/ckan.org | streams/blog/migrations/0012_auto_20200928_1212.py | a5a9b63b0ca16cb5aa4f709f7a264b8f6c265158 | # Generated by Django 3.1.1 on 2020-09-28 12:12
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0011_blogpostpage_featured'),
]
operations = [
migrations.RemoveField(
model_name='blogpostpage',
name='date',
),
migrations.AddField(
model_name='blogpostpage',
name='created',
field=models.DateTimeField(blank=True, default=datetime.datetime.now),
),
]
| [((14, 8, 17, 9), 'django.db.migrations.RemoveField', 'migrations.RemoveField', (), '', False, 'from django.db import migrations, models\n'), ((21, 18, 21, 81), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n')] |
rafaelpezzuto/opac | opac/webapp/main/views.py | 9b54202350e262a27cb9cb756a892185b288df24 | # coding: utf-8
import logging
import requests
import mimetypes
from io import BytesIO
from urllib.parse import urlparse
from datetime import datetime, timedelta
from collections import OrderedDict
from flask_babelex import gettext as _
from flask import (
render_template,
abort,
current_app,
request,
session,
redirect,
jsonify,
url_for,
Response,
send_from_directory,
g,
make_response,
)
from werkzeug.contrib.atom import AtomFeed
from urllib.parse import urljoin
from legendarium.formatter import descriptive_short_format
from . import main
from webapp import babel
from webapp import cache
from webapp import controllers
from webapp.choices import STUDY_AREAS
from webapp.utils import utils
from webapp.utils.caching import cache_key_with_lang, cache_key_with_lang_with_qs
from webapp import forms
from webapp.config.lang_names import display_original_lang_name
from opac_schema.v1.models import Journal, Issue, Article, Collection
from lxml import etree
from packtools import HTMLGenerator
logger = logging.getLogger(__name__)
JOURNAL_UNPUBLISH = _("O periódico está indisponível por motivo de: ")
ISSUE_UNPUBLISH = _("O número está indisponível por motivo de: ")
ARTICLE_UNPUBLISH = _("O artigo está indisponível por motivo de: ")
IAHX_LANGS = dict(
p='pt',
e='es',
i='en',
)
def url_external(endpoint, **kwargs):
url = url_for(endpoint, **kwargs)
return urljoin(request.url_root, url)
class RetryableError(Exception):
"""Erro recuperável sem que seja necessário modificar o estado dos dados
na parte cliente, e.g., timeouts, erros advindos de particionamento de rede
etc.
"""
class NonRetryableError(Exception):
"""Erro do qual não pode ser recuperado sem modificar o estado dos dados
na parte cliente, e.g., recurso solicitado não exite, URI inválida etc.
"""
def fetch_data(url: str, timeout: float = 2) -> bytes:
try:
response = requests.get(url, timeout=timeout)
except (requests.ConnectionError, requests.Timeout) as exc:
raise RetryableError(exc) from exc
except (requests.InvalidSchema, requests.MissingSchema, requests.InvalidURL) as exc:
raise NonRetryableError(exc) from exc
else:
try:
response.raise_for_status()
except requests.HTTPError as exc:
if 400 <= exc.response.status_code < 500:
raise NonRetryableError(exc) from exc
elif 500 <= exc.response.status_code < 600:
raise RetryableError(exc) from exc
else:
raise
return response.content
@main.before_app_request
def add_collection_to_g():
if not hasattr(g, 'collection'):
try:
collection = controllers.get_current_collection()
setattr(g, 'collection', collection)
except Exception:
# discutir o que fazer aqui
setattr(g, 'collection', {})
@main.after_request
def add_header(response):
response.headers['x-content-type-options'] = 'nosniff'
return response
@main.after_request
def add_language_code(response):
language = session.get('lang', get_locale())
response.set_cookie('language', language)
return response
@main.before_app_request
def add_forms_to_g():
setattr(g, 'email_share', forms.EmailShareForm())
setattr(g, 'email_contact', forms.ContactForm())
setattr(g, 'error', forms.ErrorForm())
@main.before_app_request
def add_scielo_org_config_to_g():
language = session.get('lang', get_locale())
scielo_org_links = {
key: url[language]
for key, url in current_app.config.get('SCIELO_ORG_URIS', {}).items()
}
setattr(g, 'scielo_org', scielo_org_links)
@babel.localeselector
def get_locale():
langs = current_app.config.get('LANGUAGES')
lang_from_headers = request.accept_languages.best_match(list(langs.keys()))
if 'lang' not in list(session.keys()):
session['lang'] = lang_from_headers
if not lang_from_headers and not session['lang']:
# Caso não seja possível detectar o idioma e não tenhamos a chave lang
# no seção, fixamos o idioma padrão.
session['lang'] = current_app.config.get('BABEL_DEFAULT_LOCALE')
return session['lang']
@main.route('/set_locale/<string:lang_code>/')
def set_locale(lang_code):
langs = current_app.config.get('LANGUAGES')
if lang_code not in list(langs.keys()):
abort(400, _('Código de idioma inválido'))
referrer = request.referrer
hash = request.args.get('hash')
if hash:
referrer += "#" + hash
# salvar o lang code na sessão
session['lang'] = lang_code
return redirect(referrer)
def get_lang_from_session():
"""
Tenta retornar o idioma da seção, caso não consiga retorna
BABEL_DEFAULT_LOCALE.
"""
try:
return session['lang']
except KeyError:
return current_app.config.get('BABEL_DEFAULT_LOCALE')
@main.route('/')
@cache.cached(key_prefix=cache_key_with_lang)
def index():
language = session.get('lang', get_locale())
news = controllers.get_latest_news_by_lang(language)
tweets = controllers.get_collection_tweets()
press_releases = controllers.get_press_releases({'language': language})
urls = {
'downloads': '{0}/w/accesses?collection={1}'.format(
current_app.config['METRICS_URL'],
current_app.config['OPAC_COLLECTION']),
'references': '{0}/w/publication/size?collection={1}'.format(
current_app.config['METRICS_URL'],
current_app.config['OPAC_COLLECTION']),
'other': '{0}/?collection={1}'.format(
current_app.config['METRICS_URL'],
current_app.config['OPAC_COLLECTION'])
}
if (
g.collection is not None
and isinstance(g.collection, Collection)
and g.collection.metrics is not None
and current_app.config['USE_HOME_METRICS']
):
g.collection.metrics.total_journal = Journal.objects.filter(
is_public=True, current_status="current"
).count()
g.collection.metrics.total_article = Article.objects.filter(
is_public=True
).count()
context = {
'news': news,
'urls': urls,
'tweets': tweets,
'press_releases': press_releases,
}
return render_template("collection/index.html", **context)
# ##################################Collection###################################
@main.route('/journals/alpha')
@cache.cached(key_prefix=cache_key_with_lang)
def collection_list():
allowed_filters = ["current", "no-current", ""]
query_filter = request.args.get("status", "")
if not query_filter in allowed_filters:
query_filter = ""
journals_list = [
controllers.get_journal_json_data(journal)
for journal in controllers.get_journals(query_filter=query_filter)
]
return render_template("collection/list_journal.html",
**{'journals_list': journals_list, 'query_filter': query_filter})
@main.route("/journals/thematic")
@cache.cached(key_prefix=cache_key_with_lang)
def collection_list_thematic():
allowed_query_filters = ["current", "no-current", ""]
allowed_thematic_filters = ["areas", "wos", "publisher"]
thematic_table = {
"areas": "study_areas",
"wos": "subject_categories",
"publisher": "publisher_name",
}
query_filter = request.args.get("status", "")
title_query = request.args.get("query", "")
thematic_filter = request.args.get("filter", "areas")
if not query_filter in allowed_query_filters:
query_filter = ""
if not thematic_filter in allowed_thematic_filters:
thematic_filter = "areas"
lang = get_lang_from_session()[:2].lower()
objects = controllers.get_journals_grouped_by(
thematic_table[thematic_filter],
title_query,
query_filter=query_filter,
lang=lang,
)
return render_template(
"collection/list_thematic.html",
**{"objects": objects, "query_filter": query_filter, "filter": thematic_filter}
)
@main.route('/journals/feed/')
@cache.cached(key_prefix=cache_key_with_lang)
def collection_list_feed():
language = session.get('lang', get_locale())
collection = controllers.get_current_collection()
title = 'SciELO - %s - %s' % (collection.name, _('Últimos periódicos inseridos na coleção'))
subtitle = _('10 últimos periódicos inseridos na coleção %s' % collection.name)
feed = AtomFeed(title,
subtitle=subtitle,
feed_url=request.url, url=request.url_root)
journals = controllers.get_journals_paginated(
title_query='', page=1, order_by='-created', per_page=10)
if not journals.items:
feed.add('Nenhum periódico encontrado',
url=request.url,
updated=datetime.now())
for journal in journals.items:
issues = controllers.get_issues_by_jid(journal.jid, is_public=True)
last_issue = issues[0] if issues else None
articles = []
if last_issue:
articles = controllers.get_articles_by_iid(last_issue.iid,
is_public=True)
result_dict = OrderedDict()
for article in articles:
section = article.get_section_by_lang(language[:2])
result_dict.setdefault(section, [])
result_dict[section].append(article)
context = {
'journal': journal,
'articles': result_dict,
'language': language,
'last_issue': last_issue
}
feed.add(journal.title,
render_template("collection/list_feed_content.html", **context),
content_type='html',
author=journal.publisher_name,
url=url_external('main.journal_detail', url_seg=journal.url_segment),
updated=journal.updated,
published=journal.created)
return feed.get_response()
@main.route("/about/", methods=['GET'])
@main.route('/about/<string:slug_name>', methods=['GET'])
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def about_collection(slug_name=None):
language = session.get('lang', get_locale())
context = {}
page = None
if slug_name:
# caso seja uma página
page = controllers.get_page_by_slug_name(slug_name, language)
if not page:
abort(404, _('Página não encontrada'))
context['page'] = page
else:
# caso não seja uma página é uma lista
pages = controllers.get_pages_by_lang(language)
context['pages'] = pages
return render_template("collection/about.html", **context)
# ###################################Journal#####################################
@main.route('/scielo.php/')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def router_legacy():
script_php = request.args.get('script', None)
pid = request.args.get('pid', None)
tlng = request.args.get('tlng', None)
allowed_scripts = [
'sci_serial', 'sci_issuetoc', 'sci_arttext', 'sci_abstract', 'sci_issues', 'sci_pdf'
]
if (script_php is not None) and (script_php in allowed_scripts) and not pid:
# se tem pelo menos um param: pid ou script_php
abort(400, _(u'Requsição inválida ao tentar acessar o artigo com pid: %s' % pid))
elif script_php and pid:
if script_php == 'sci_serial':
# pid = issn
journal = controllers.get_journal_by_issn(pid)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
return redirect(url_for('main.journal_detail',
url_seg=journal.url_segment), code=301)
elif script_php == 'sci_issuetoc':
issue = controllers.get_issue_by_pid(pid)
if not issue:
abort(404, _('Número não encontrado'))
if not issue.is_public:
abort(404, ISSUE_UNPUBLISH + _(issue.unpublish_reason))
if not issue.journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(issue.journal.unpublish_reason))
if issue.url_segment and "ahead" in issue.url_segment:
return redirect(
url_for('main.aop_toc', url_seg=url_seg), code=301)
return redirect(
url_for(
"main.issue_toc",
url_seg=issue.journal.url_segment,
url_seg_issue=issue.url_segment),
301
)
elif script_php == 'sci_arttext' or script_php == 'sci_abstract':
article = controllers.get_article_by_pid_v2(pid)
if not article:
abort(404, _('Artigo não encontrado'))
# 'abstract' or None (not False, porque False converterá a string 'False')
part = (script_php == 'sci_abstract' and 'abstract') or None
if tlng not in article.languages:
tlng = article.original_language
return redirect(url_for('main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article.aid,
part=part,
lang=tlng),
code=301)
elif script_php == 'sci_issues':
journal = controllers.get_journal_by_issn(pid)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
return redirect(url_for('main.issue_grid',
url_seg=journal.url_segment), 301)
elif script_php == 'sci_pdf':
# accesso ao pdf do artigo:
article = controllers.get_article_by_pid_v2(pid)
if not article:
abort(404, _('Artigo não encontrado'))
return redirect(
url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article.aid,
format='pdf',
),
code=301
)
else:
abort(400, _(u'Requsição inválida ao tentar acessar o artigo com pid: %s' % pid))
else:
return redirect('/')
@main.route('/<string:journal_seg>')
@main.route('/journal/<string:journal_seg>')
def journal_detail_legacy_url(journal_seg):
return redirect(url_for('main.journal_detail',
url_seg=journal_seg), code=301)
@main.route('/j/<string:url_seg>/')
@cache.cached(key_prefix=cache_key_with_lang)
def journal_detail(url_seg):
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
utils.fix_journal_last_issue(journal)
# todo: ajustar para que seja só noticias relacionadas ao periódico
language = session.get('lang', get_locale())
news = controllers.get_latest_news_by_lang(language)
# Press releases
press_releases = controllers.get_press_releases({
'journal': journal,
'language': language})
# Lista de seções
# Mantendo sempre o idioma inglês para as seções na página incial do periódico
if journal.last_issue and journal.current_status == "current":
sections = [section for section in journal.last_issue.sections if section.language == 'en']
recent_articles = controllers.get_recent_articles_of_issue(journal.last_issue.iid, is_public=True)
else:
sections = []
recent_articles = []
latest_issue = journal.last_issue
if latest_issue:
latest_issue_legend = descriptive_short_format(
title=journal.title, short_title=journal.short_title,
pubdate=str(latest_issue.year), volume=latest_issue.volume, number=latest_issue.number,
suppl=latest_issue.suppl_text, language=language[:2].lower())
else:
latest_issue_legend = ''
journal_metrics = controllers.get_journal_metrics(journal)
context = {
'journal': journal,
'press_releases': press_releases,
'recent_articles': recent_articles,
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas
],
# o primiero item da lista é o último número.
# condicional para verificar se issues contém itens
'last_issue': latest_issue,
'latest_issue_legend': latest_issue_legend,
'sections': sections if sections else None,
'news': news,
'journal_metrics': journal_metrics
}
return render_template("journal/detail.html", **context)
@main.route('/journal/<string:url_seg>/feed/')
@cache.cached(key_prefix=cache_key_with_lang)
def journal_feed(url_seg):
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
issues = controllers.get_issues_by_jid(journal.jid, is_public=True)
last_issue = issues[0] if issues else None
articles = controllers.get_articles_by_iid(last_issue.iid, is_public=True)
feed = AtomFeed(journal.title,
feed_url=request.url,
url=request.url_root,
subtitle=utils.get_label_issue(last_issue))
feed_language = session.get('lang', get_locale())
feed_language = feed_language[:2].lower()
for article in articles:
# ######### TODO: Revisar #########
article_lang = feed_language
if feed_language not in article.languages:
article_lang = article.original_language
feed.add(article.title or _('Artigo sem título'),
render_template("issue/feed_content.html", article=article),
content_type='html',
id=article.doi or article.pid,
author=article.authors,
url=url_external('main.article_detail_v3',
url_seg=journal.url_segment,
article_pid_v3=article.aid,
lang=article_lang),
updated=journal.updated,
published=journal.created)
return feed.get_response()
@main.route("/journal/<string:url_seg>/about/", methods=['GET'])
@cache.cached(key_prefix=cache_key_with_lang)
def about_journal(url_seg):
language = session.get('lang', get_locale())
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
latest_issue = utils.fix_journal_last_issue(journal)
if latest_issue:
latest_issue_legend = descriptive_short_format(
title=journal.title, short_title=journal.short_title,
pubdate=str(latest_issue.year), volume=latest_issue.volume, number=latest_issue.number,
suppl=latest_issue.suppl_text, language=language[:2].lower())
else:
latest_issue_legend = None
page = controllers.get_page_by_journal_acron_lang(journal.acronym, language)
context = {
'journal': journal,
'latest_issue_legend': latest_issue_legend,
'last_issue': latest_issue,
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas
],
}
if page:
context['content'] = page.content
if page.updated_at:
context['page_updated_at'] = page.updated_at
return render_template("journal/about.html", **context)
@main.route("/journals/search/alpha/ajax/", methods=['GET', ])
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def journals_search_alpha_ajax():
if not request.is_xhr:
abort(400, _('Requisição inválida. Deve ser por ajax'))
query = request.args.get('query', '', type=str)
query_filter = request.args.get('query_filter', '', type=str)
page = request.args.get('page', 1, type=int)
lang = get_lang_from_session()[:2].lower()
response_data = controllers.get_alpha_list_from_paginated_journals(
title_query=query,
query_filter=query_filter,
page=page,
lang=lang)
return jsonify(response_data)
@main.route("/journals/search/group/by/filter/ajax/", methods=['GET'])
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def journals_search_by_theme_ajax():
if not request.is_xhr:
abort(400, _('Requisição inválida. Deve ser por ajax'))
query = request.args.get('query', '', type=str)
query_filter = request.args.get('query_filter', '', type=str)
filter = request.args.get('filter', 'areas', type=str)
lang = get_lang_from_session()[:2].lower()
if filter == 'areas':
objects = controllers.get_journals_grouped_by('study_areas', query, query_filter=query_filter, lang=lang)
elif filter == 'wos':
objects = controllers.get_journals_grouped_by('subject_categories', query, query_filter=query_filter, lang=lang)
elif filter == 'publisher':
objects = controllers.get_journals_grouped_by('publisher_name', query, query_filter=query_filter, lang=lang)
else:
return jsonify({
'error': 401,
'message': _('Parámetro "filter" é inválido, deve ser "areas", "wos" ou "publisher".')
})
return jsonify(objects)
@main.route("/journals/download/<string:list_type>/<string:extension>/", methods=['GET', ])
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def download_journal_list(list_type, extension):
if extension.lower() not in ['csv', 'xls']:
abort(401, _('Parámetro "extension" é inválido, deve ser "csv" ou "xls".'))
elif list_type.lower() not in ['alpha', 'areas', 'wos', 'publisher']:
abort(401, _('Parámetro "list_type" é inválido, deve ser: "alpha", "areas", "wos" ou "publisher".'))
else:
if extension.lower() == 'xls':
mimetype = 'application/vnd.ms-excel'
else:
mimetype = 'text/csv'
query = request.args.get('query', '', type=str)
data = controllers.get_journal_generator_for_csv(list_type=list_type,
title_query=query,
extension=extension.lower())
timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
filename = 'journals_%s_%s.%s' % (list_type, timestamp, extension)
response = Response(data, mimetype=mimetype)
response.headers['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
@main.route("/<string:url_seg>/contact", methods=['POST'])
def contact(url_seg):
if not request.is_xhr:
abort(403, _('Requisição inválida, deve ser ajax.'))
if utils.is_recaptcha_valid(request):
form = forms.ContactForm(request.form)
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal.enable_contact:
abort(403, _('Periódico não permite envio de email.'))
recipients = journal.editor_email
if form.validate():
sent, message = controllers.send_email_contact(recipients,
form.data['name'],
form.data['your_email'],
form.data['message'])
return jsonify({'sent': sent, 'message': str(message),
'fields': [key for key in form.data.keys()]})
else:
return jsonify({'sent': False, 'message': form.errors,
'fields': [key for key in form.data.keys()]})
else:
abort(400, _('Requisição inválida, captcha inválido.'))
@main.route("/form_contact/<string:url_seg>/", methods=['GET'])
def form_contact(url_seg):
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
context = {
'journal': journal
}
return render_template("journal/includes/contact_form.html", **context)
# ###################################Issue#######################################
@main.route('/grid/<string:url_seg>/')
def issue_grid_legacy(url_seg):
return redirect(url_for('main.issue_grid', url_seg=url_seg), 301)
@main.route('/j/<string:url_seg>/grid')
@cache.cached(key_prefix=cache_key_with_lang)
def issue_grid(url_seg):
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
# idioma da sessão
language = session.get('lang', get_locale())
# A ordenação padrão da função ``get_issues_by_jid``: "-year", "-volume", "-order"
issues_data = controllers.get_issues_for_grid_by_jid(journal.id, is_public=True)
latest_issue = issues_data['last_issue']
if latest_issue:
latest_issue_legend = descriptive_short_format(
title=journal.title, short_title=journal.short_title,
pubdate=str(latest_issue.year), volume=latest_issue.volume, number=latest_issue.number,
suppl=latest_issue.suppl_text, language=language[:2].lower())
else:
latest_issue_legend = None
context = {
'journal': journal,
'last_issue': issues_data['last_issue'],
'latest_issue_legend': latest_issue_legend,
'volume_issue': issues_data['volume_issue'],
'ahead': issues_data['ahead'],
'result_dict': issues_data['ordered_for_grid'],
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas
],
}
return render_template("issue/grid.html", **context)
@main.route('/toc/<string:url_seg>/<string:url_seg_issue>/')
def issue_toc_legacy(url_seg, url_seg_issue):
if url_seg_issue and "ahead" in url_seg_issue:
return redirect(url_for('main.aop_toc', url_seg=url_seg), code=301)
return redirect(
url_for('main.issue_toc',
url_seg=url_seg,
url_seg_issue=url_seg_issue),
code=301)
@main.route('/j/<string:url_seg>/i/<string:url_seg_issue>/')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def issue_toc(url_seg, url_seg_issue):
section_filter = None
goto = request.args.get("goto", None, type=str)
if goto not in ("previous", "next"):
goto = None
if goto in (None, "next") and "ahead" in url_seg_issue:
# redireciona para `aop_toc`
return redirect(url_for('main.aop_toc', url_seg=url_seg), code=301)
# idioma da sessão
language = session.get('lang', get_locale())
if current_app.config["FILTER_SECTION_ENABLE"]:
# seção dos documentos, se selecionada
section_filter = request.args.get('section', '', type=str).upper()
# obtém o issue
issue = controllers.get_issue_by_url_seg(url_seg, url_seg_issue)
if not issue:
abort(404, _('Número não encontrado'))
if not issue.is_public:
abort(404, ISSUE_UNPUBLISH + _(issue.unpublish_reason))
# obtém o journal
journal = issue.journal
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
# completa url_segment do last_issue
utils.fix_journal_last_issue(journal)
# goto_next_or_previous_issue (redireciona)
goto_url = goto_next_or_previous_issue(
issue, request.args.get('goto', None, type=str))
if goto_url:
return redirect(goto_url, code=301)
# obtém os documentos
articles = controllers.get_articles_by_iid(issue.iid, is_public=True)
if articles:
# obtém TODAS as seções dos documentos deste sumário
sections = sorted({a.section.upper() for a in articles if a.section})
else:
# obtém as seções dos documentos deste sumário
sections = []
if current_app.config["FILTER_SECTION_ENABLE"] and section_filter != '':
# obtém somente os documentos da seção selecionada
articles = [a for a in articles if a.section.upper() == section_filter]
# obtém PDF e TEXT de cada documento
has_math_content = False
for article in articles:
article_text_languages = [doc['lang'] for doc in article.htmls]
article_pdf_languages = [(doc['lang'], doc['url']) for doc in article.pdfs]
setattr(article, "article_text_languages", article_text_languages)
setattr(article, "article_pdf_languages", article_pdf_languages)
if 'mml:' in article.title:
has_math_content = True
# obtém a legenda bibliográfica
issue_bibliographic_strip = descriptive_short_format(
title=journal.title, short_title=journal.short_title,
pubdate=str(issue.year), volume=issue.volume, number=issue.number,
suppl=issue.suppl_text, language=language[:2].lower())
context = {
'this_page_url': url_for(
'main.issue_toc',
url_seg=url_seg,
url_seg_issue=url_seg_issue),
'has_math_content': has_math_content,
'journal': journal,
'issue': issue,
'issue_bibliographic_strip': issue_bibliographic_strip,
'articles': articles,
'sections': sections,
'section_filter': section_filter,
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas
],
'last_issue': journal.last_issue
}
return render_template("issue/toc.html", **context)
def goto_next_or_previous_issue(current_issue, goto_param):
if goto_param not in ["next", "previous"]:
return None
all_issues = list(
controllers.get_issues_by_jid(current_issue.journal.id, is_public=True))
if goto_param == "next":
selected_issue = utils.get_next_issue(all_issues, current_issue)
elif goto_param == "previous":
selected_issue = utils.get_prev_issue(all_issues, current_issue)
if selected_issue in (None, current_issue):
# nao precisa redirecionar
return None
try:
url_seg_issue = selected_issue.url_segment
except AttributeError:
return None
else:
return url_for('main.issue_toc',
url_seg=selected_issue.journal.url_segment,
url_seg_issue=url_seg_issue)
def get_next_or_previous_issue(current_issue, goto_param):
if goto_param not in ["next", "previous"]:
return current_issue
all_issues = list(
controllers.get_issues_by_jid(current_issue.journal.id, is_public=True))
if goto_param == "next":
return utils.get_next_issue(all_issues, current_issue)
return utils.get_prev_issue(all_issues, current_issue)
@main.route('/j/<string:url_seg>/aop')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def aop_toc(url_seg):
section_filter = request.args.get('section', '', type=str).upper()
aop_issues = controllers.get_aop_issues(url_seg) or []
if not aop_issues:
abort(404, _('Artigos ahead of print não encontrados'))
goto = request.args.get("goto", None, type=str)
if goto == "previous":
url = goto_next_or_previous_issue(aop_issues[-1], goto)
if url:
redirect(url, code=301)
journal = aop_issues[0].journal
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
utils.fix_journal_last_issue(journal)
articles = []
for aop_issue in aop_issues:
_articles = controllers.get_articles_by_iid(
aop_issue.iid, is_public=True)
if _articles:
articles.extend(_articles)
if not articles:
abort(404, _('Artigos ahead of print não encontrados'))
sections = sorted({a.section.upper() for a in articles if a.section})
if section_filter != '':
articles = [a for a in articles if a.section.upper() == section_filter]
for article in articles:
article_text_languages = [doc['lang'] for doc in article.htmls]
article_pdf_languages = [(doc['lang'], doc['url']) for doc in article.pdfs]
setattr(article, "article_text_languages", article_text_languages)
setattr(article, "article_pdf_languages", article_pdf_languages)
context = {
'this_page_url': url_for("main.aop_toc", url_seg=url_seg),
'journal': journal,
'issue': aop_issues[0],
'issue_bibliographic_strip': "ahead of print",
'articles': articles,
'sections': sections,
'section_filter': section_filter,
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper())
for study_area in journal.study_areas
],
# o primeiro item da lista é o último número.
'last_issue': journal.last_issue
}
return render_template("issue/toc.html", **context)
@main.route('/feed/<string:url_seg>/<string:url_seg_issue>/')
@cache.cached(key_prefix=cache_key_with_lang)
def issue_feed(url_seg, url_seg_issue):
issue = controllers.get_issue_by_url_seg(url_seg, url_seg_issue)
if not issue:
abort(404, _('Número não encontrado'))
if not issue.is_public:
abort(404, ISSUE_UNPUBLISH + _(issue.unpublish_reason))
if not issue.journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(issue.journal.unpublish_reason))
journal = issue.journal
articles = controllers.get_articles_by_iid(issue.iid, is_public=True)
feed = AtomFeed(journal.title or "",
feed_url=request.url,
url=request.url_root,
subtitle=utils.get_label_issue(issue))
feed_language = session.get('lang', get_locale())
for article in articles:
# ######### TODO: Revisar #########
article_lang = feed_language
if feed_language not in article.languages:
article_lang = article.original_language
feed.add(article.title or 'Unknow title',
render_template("issue/feed_content.html", article=article),
content_type='html',
author=article.authors,
id=article.doi or article.pid,
url=url_external('main.article_detail_v3',
url_seg=journal.url_segment,
article_pid_v3=article.aid,
lang=article_lang),
updated=journal.updated,
published=journal.created)
return feed.get_response()
# ##################################Article######################################
@main.route('/article/<regex("S\d{4}-\d{3}[0-9xX][0-2][0-9]{3}\d{4}\d{5}"):pid>/')
@cache.cached(key_prefix=cache_key_with_lang)
def article_detail_pid(pid):
article = controllers.get_article_by_pid(pid)
if not article:
article = controllers.get_article_by_oap_pid(pid)
if not article:
abort(404, _('Artigo não encontrado'))
return redirect(url_for('main.article_detail_v3',
url_seg=article.journal.acronym,
article_pid_v3=article.aid))
def render_html_from_xml(article, lang, gs_abstract=False):
logger.debug("Get XML: %s", article.xml)
if current_app.config["SSM_XML_URL_REWRITE"]:
result = fetch_data(use_ssm_url(article.xml))
else:
result = fetch_data(article.xml)
xml = etree.parse(BytesIO(result))
generator = HTMLGenerator.parse(
xml, valid_only=False, gs_abstract=gs_abstract, output_style="website")
return generator.generate(lang), generator.languages
def render_html_from_html(article, lang):
html_url = [html
for html in article.htmls
if html['lang'] == lang]
try:
html_url = html_url[0]['url']
except IndexError:
raise ValueError('Artigo não encontrado') from None
result = fetch_data(use_ssm_url(html_url))
html = result.decode('utf8')
text_languages = [html['lang'] for html in article.htmls]
return html, text_languages
def render_html_abstract(article, lang):
abstract_text = ''
for abstract in article.abstracts:
if abstract['language'] == lang:
abstract_text = abstract["text"]
break
return abstract_text, article.abstract_languages
def render_html(article, lang, gs_abstract=False):
if article.xml:
return render_html_from_xml(article, lang, gs_abstract)
elif article.htmls:
if gs_abstract:
return render_html_abstract(article, lang)
return render_html_from_html(article, lang)
else:
# TODO: Corrigir os teste que esperam ter o atributo ``htmls``
# O ideal seria levantar um ValueError.
return '', []
# TODO: Remover assim que o valor Article.xml estiver consistente na base de
# dados
def use_ssm_url(url):
"""Normaliza a string `url` de acordo com os valores das diretivas de
configuração OPAC_SSM_SCHEME, OPAC_SSM_DOMAIN e OPAC_SSM_PORT.
A normalização busca obter uma URL absoluta em função de uma relativa, ou
uma absoluta em função de uma absoluta, mas com as partes *scheme* e
*authority* trocadas pelas definidas nas diretivas citadas anteriormente.
Este código deve ser removido assim que o valor de Article.xml estiver
consistente, i.e., todos os registros possuirem apenas URLs absolutas.
"""
if url.startswith("http"):
parsed_url = urlparse(url)
return current_app.config["SSM_BASE_URI"] + parsed_url.path
else:
return current_app.config["SSM_BASE_URI"] + url
@main.route('/article/<string:url_seg>/<string:url_seg_issue>/<string:url_seg_article>/')
@main.route('/article/<string:url_seg>/<string:url_seg_issue>/<string:url_seg_article>/<regex("(?:\w{2})"):lang_code>/')
@main.route('/article/<string:url_seg>/<string:url_seg_issue>/<regex("(.*)"):url_seg_article>/')
@main.route('/article/<string:url_seg>/<string:url_seg_issue>/<regex("(.*)"):url_seg_article>/<regex("(?:\w{2})"):lang_code>/')
@cache.cached(key_prefix=cache_key_with_lang)
def article_detail(url_seg, url_seg_issue, url_seg_article, lang_code=''):
issue = controllers.get_issue_by_url_seg(url_seg, url_seg_issue)
if not issue:
abort(404, _('Issue não encontrado'))
article = controllers.get_article_by_issue_article_seg(issue.iid, url_seg_article)
if article is None:
article = controllers.get_article_by_aop_url_segs(
issue.journal, url_seg_issue, url_seg_article
)
if article is None:
abort(404, _('Artigo não encontrado'))
req_params = {
"url_seg": article.journal.acronym,
"article_pid_v3": article.aid,
}
if lang_code:
req_params["lang"] = lang_code
return redirect(url_for('main.article_detail_v3', **req_params))
@main.route('/j/<string:url_seg>/a/<string:article_pid_v3>/')
@main.route('/j/<string:url_seg>/a/<string:article_pid_v3>/<string:part>/')
@cache.cached(key_prefix=cache_key_with_lang)
def article_detail_v3(url_seg, article_pid_v3, part=None):
qs_lang = request.args.get('lang', type=str) or None
qs_goto = request.args.get('goto', type=str) or None
qs_stop = request.args.get('stop', type=str) or None
qs_format = request.args.get('format', 'html', type=str)
gs_abstract = (part == "abstract")
if part and not gs_abstract:
abort(404,
_("Não existe '{}'. No seu lugar use '{}'"
).format(part, 'abstract'))
try:
qs_lang, article = controllers.get_article(
article_pid_v3, url_seg, qs_lang, gs_abstract, qs_goto)
if qs_goto:
return redirect(
url_for(
'main.article_detail_v3',
url_seg=url_seg,
article_pid_v3=article.aid,
part=part,
format=qs_format,
lang=qs_lang,
stop=getattr(article, 'stop', None),
),
code=301
)
except (controllers.PreviousOrNextArticleNotFoundError) as e:
if gs_abstract:
abort(404, _('Resumo inexistente'))
abort(404, _('Artigo inexistente'))
except (controllers.ArticleNotFoundError,
controllers.ArticleJournalNotFoundError):
abort(404, _('Artigo não encontrado'))
except controllers.ArticleLangNotFoundError:
return redirect(
url_for(
'main.article_detail_v3',
url_seg=url_seg,
article_pid_v3=article_pid_v3,
format=qs_format,
),
code=301
)
except controllers.ArticleAbstractNotFoundError:
abort(404, _('Recurso não encontrado'))
except controllers.ArticleIsNotPublishedError as e:
abort(404, "{}{}".format(ARTICLE_UNPUBLISH, e))
except controllers.IssueIsNotPublishedError as e:
abort(404, "{}{}".format(ISSUE_UNPUBLISH, e))
except controllers.JournalIsNotPublishedError as e:
abort(404, "{}{}".format(JOURNAL_UNPUBLISH, e))
except ValueError as e:
abort(404, str(e))
def _handle_html():
citation_pdf_url = None
for pdf_data in article.pdfs:
if pdf_data.get("lang") == qs_lang:
citation_pdf_url = url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article_pid_v3,
lang=qs_lang,
format="pdf",
)
break
website = request.url
if website:
parsed_url = urlparse(request.url)
if current_app.config["FORCE_USE_HTTPS_GOOGLE_TAGS"]:
website = "{}://{}".format('https', parsed_url.netloc)
else:
website = "{}://{}".format(parsed_url.scheme, parsed_url.netloc)
if citation_pdf_url:
citation_pdf_url = "{}{}".format(website, citation_pdf_url)
try:
html, text_languages = render_html(article, qs_lang, gs_abstract)
except (ValueError, NonRetryableError):
abort(404, _('HTML do Artigo não encontrado ou indisponível'))
except RetryableError:
abort(500, _('Erro inesperado'))
text_versions = sorted(
[
(
lang,
display_original_lang_name(lang),
url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article_pid_v3,
lang=lang
)
)
for lang in text_languages
]
)
citation_xml_url = "{}{}".format(
website,
url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article_pid_v3,
format="xml",
lang=article.original_language,
)
)
context = {
'next_article': qs_stop != 'next',
'previous_article': qs_stop != 'previous',
'article': article,
'journal': article.journal,
'issue': article.issue,
'html': html,
'citation_pdf_url': citation_pdf_url,
'citation_xml_url': citation_xml_url,
'article_lang': qs_lang,
'text_versions': text_versions,
'related_links': controllers.related_links(article),
'gs_abstract': gs_abstract,
'part': part,
}
return render_template("article/detail.html", **context)
def _handle_pdf():
if not article.pdfs:
abort(404, _('PDF do Artigo não encontrado'))
pdf_info = [pdf for pdf in article.pdfs if pdf['lang'] == qs_lang]
if len(pdf_info) != 1:
abort(404, _('PDF do Artigo não encontrado'))
try:
pdf_url = pdf_info[0]['url']
except (IndexError, KeyError, ValueError, TypeError):
abort(404, _('PDF do Artigo não encontrado'))
if pdf_url:
return get_pdf_content(pdf_url)
raise abort(404, _('Recurso do Artigo não encontrado. Caminho inválido!'))
def _handle_xml():
if current_app.config["SSM_XML_URL_REWRITE"]:
result = fetch_data(use_ssm_url(article.xml))
else:
result = fetch_data(article.xml)
response = make_response(result)
response.headers['Content-Type'] = 'application/xml'
return response
if 'html' == qs_format:
return _handle_html()
elif 'pdf' == qs_format:
return _handle_pdf()
elif 'xml' == qs_format:
return _handle_xml()
else:
abort(400, _('Formato não suportado'))
@main.route('/readcube/epdf/')
@main.route('/readcube/epdf.php')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def article_epdf():
doi = request.args.get('doi', None, type=str)
pid = request.args.get('pid', None, type=str)
pdf_path = request.args.get('pdf_path', None, type=str)
lang = request.args.get('lang', None, type=str)
if not all([doi, pid, pdf_path, lang]):
abort(400, _('Parâmetros insuficientes para obter o EPDF do artigo'))
else:
context = {
'doi': doi,
'pid': pid,
'pdf_path': pdf_path,
'lang': lang,
}
return render_template("article/epdf.html", **context)
def get_pdf_content(url):
logger.debug("Get PDF: %s", url)
if current_app.config["SSM_ARTICLE_ASSETS_OR_RENDITIONS_URL_REWRITE"]:
url = use_ssm_url(url)
try:
response = fetch_data(url)
except NonRetryableError:
abort(404, _('PDF não encontrado'))
except RetryableError:
abort(500, _('Erro inesperado'))
else:
mimetype, __ = mimetypes.guess_type(url)
return Response(response, mimetype=mimetype)
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def get_content_from_ssm(resource_ssm_media_path):
resource_ssm_full_url = current_app.config['SSM_BASE_URI'] + resource_ssm_media_path
url = resource_ssm_full_url.strip()
mimetype, __ = mimetypes.guess_type(url)
try:
ssm_response = fetch_data(url)
except NonRetryableError:
abort(404, _('Recurso não encontrado'))
except RetryableError:
abort(500, _('Erro inesperado'))
else:
return Response(ssm_response, mimetype=mimetype)
@main.route('/media/assets/<regex("(.*)"):relative_media_path>')
@cache.cached(key_prefix=cache_key_with_lang)
def media_assets_proxy(relative_media_path):
resource_ssm_path = '{ssm_media_path}{resource_path}'.format(
ssm_media_path=current_app.config['SSM_MEDIA_PATH'],
resource_path=relative_media_path)
return get_content_from_ssm(resource_ssm_path)
@main.route('/article/ssm/content/raw/')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def article_ssm_content_raw():
resource_ssm_path = request.args.get('resource_ssm_path', None)
if not resource_ssm_path:
raise abort(404, _('Recurso do Artigo não encontrado. Caminho inválido!'))
else:
return get_content_from_ssm(resource_ssm_path)
@main.route('/pdf/<string:url_seg>/<string:url_seg_issue>/<string:url_seg_article>')
@main.route('/pdf/<string:url_seg>/<string:url_seg_issue>/<string:url_seg_article>/<regex("(?:\w{2})"):lang_code>')
@main.route('/pdf/<string:url_seg>/<string:url_seg_issue>/<regex("(.*)"):url_seg_article>')
@main.route('/pdf/<string:url_seg>/<string:url_seg_issue>/<regex("(.*)"):url_seg_article>/<regex("(?:\w{2})"):lang_code>')
@cache.cached(key_prefix=cache_key_with_lang)
def article_detail_pdf(url_seg, url_seg_issue, url_seg_article, lang_code=''):
"""
Padrões esperados:
`/pdf/csc/2021.v26suppl1/2557-2558`
`/pdf/csc/2021.v26suppl1/2557-2558/en`
"""
if not lang_code and "." not in url_seg_issue:
return router_legacy_pdf(url_seg, url_seg_issue, url_seg_article)
issue = controllers.get_issue_by_url_seg(url_seg, url_seg_issue)
if not issue:
abort(404, _('Issue não encontrado'))
article = controllers.get_article_by_issue_article_seg(issue.iid, url_seg_article)
if not article:
abort(404, _('Artigo não encontrado'))
req_params = {
'url_seg': article.journal.url_segment,
'article_pid_v3': article.aid,
'format': 'pdf',
}
if lang_code:
req_params['lang'] = lang_code
return redirect(url_for('main.article_detail_v3', **req_params), code=301)
@main.route('/pdf/<string:journal_acron>/<string:issue_info>/<string:pdf_filename>.pdf')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def router_legacy_pdf(journal_acron, issue_info, pdf_filename):
pdf_filename = '%s.pdf' % pdf_filename
journal = controllers.get_journal_by_url_seg(journal_acron)
if not journal:
abort(404, _('Este PDF não existe em http://www.scielo.br. Consulte http://search.scielo.org'))
article = controllers.get_article_by_pdf_filename(
journal_acron, issue_info, pdf_filename)
if not article:
abort(404, _('PDF do artigo não foi encontrado'))
return redirect(
url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article.aid,
format='pdf',
lang=article._pdf_lang,
),
code=301
)
@main.route('/cgi-bin/fbpe/<string:text_or_abstract>/')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def router_legacy_article(text_or_abstract):
pid = request.args.get('pid', None)
lng = request.args.get('lng', None)
if not (text_or_abstract in ['fbtext', 'fbabs'] and pid):
# se tem pid
abort(400, _('Requsição inválida ao tentar acessar o artigo com pid: %s' % pid))
article = controllers.get_article_by_pid_v1(pid)
if not article:
abort(404, _('Artigo não encontrado'))
return redirect(
url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article.aid,
),
code=301
)
# ###############################E-mail share##################################
@main.route("/email_share_ajax/", methods=['POST'])
def email_share_ajax():
if not request.is_xhr:
abort(400, _('Requisição inválida.'))
form = forms.EmailShareForm(request.form)
if form.validate():
recipients = [email.strip() for email in form.data['recipients'].split(';') if email.strip() != '']
sent, message = controllers.send_email_share(form.data['your_email'],
recipients,
form.data['share_url'],
form.data['subject'],
form.data['comment'])
return jsonify({'sent': sent, 'message': str(message),
'fields': [key for key in form.data.keys()]})
else:
return jsonify({'sent': False, 'message': form.errors,
'fields': [key for key in form.data.keys()]})
@main.route("/form_mail/", methods=['GET'])
def email_form():
context = {'url': request.args.get('url')}
return render_template("email/email_form.html", **context)
@main.route("/email_error_ajax/", methods=['POST'])
def email_error_ajax():
if not request.is_xhr:
abort(400, _('Requisição inválida.'))
form = forms.ErrorForm(request.form)
if form.validate():
recipients = [email.strip() for email in current_app.config.get('EMAIL_ACCOUNTS_RECEIVE_ERRORS') if email.strip() != '']
sent, message = controllers.send_email_error(form.data['name'],
form.data['your_email'],
recipients,
form.data['url'],
form.data['error_type'],
form.data['message'],
form.data['page_title'])
return jsonify({'sent': sent, 'message': str(message),
'fields': [key for key in form.data.keys()]})
else:
return jsonify({'sent': False, 'message': form.errors,
'fields': [key for key in form.data.keys()]})
@main.route("/error_mail/", methods=['GET'])
def error_form():
context = {'url': request.args.get('url')}
return render_template("includes/error_form.html", **context)
# ###############################Others########################################
@main.route("/media/<path:filename>/", methods=['GET'])
@cache.cached(key_prefix=cache_key_with_lang)
def download_file_by_filename(filename):
media_root = current_app.config['MEDIA_ROOT']
return send_from_directory(media_root, filename)
@main.route("/img/scielo.gif", methods=['GET'])
def full_text_image():
return send_from_directory('static', 'img/full_text_scielo_img.gif')
@main.route("/robots.txt", methods=['GET'])
def get_robots_txt_file():
return send_from_directory('static', 'robots.txt')
@main.route("/revistas/<path:journal_seg>/<string:page>.htm", methods=['GET'])
def router_legacy_info_pages(journal_seg, page):
"""
Essa view function realiza o redirecionamento das URLs antigas para as novas URLs.
Mantém um dicionário como uma tabela relacionamento entre o nome das páginas que pode ser:
Página âncora
[iaboutj.htm, eaboutj.htm, paboutj.htm] -> #about
[iedboard.htm, eedboard.htm, pedboard.htm] -> #editors
[iinstruc.htm einstruc.htm, pinstruc.htm]-> #instructions
isubscrp.htm -> Sem âncora
"""
page_anchor = {
'iaboutj': '#about',
'eaboutj': '#about',
'paboutj': '#about',
'eedboard': '#editors',
'iedboard': '#editors',
'pedboard': '#editors',
'iinstruc': '#instructions',
'pinstruc': '#instructions',
'einstruc': '#instructions'
}
return redirect('%s%s' % (url_for('main.about_journal',
url_seg=journal_seg), page_anchor.get(page, '')), code=301)
@main.route("/api/v1/counter_dict", methods=['GET'])
def router_counter_dicts():
"""
Essa view function retorna um dicionário, em formato JSON, que mapeia PIDs a insumos
necessários para o funcionamento das aplicações Matomo & COUNTER & SUSHI.
"""
end_date = request.args.get('end_date', '', type=str)
try:
end_date = datetime.strptime(end_date, '%Y-%m-%d')
except ValueError:
end_date = datetime.now()
begin_date = end_date - timedelta(days=30)
page = request.args.get('page', type=int)
if not page:
page = 1
limit = request.args.get('limit', type=int)
if not limit or limit > 100 or limit < 0:
limit = 100
results = {'dictionary_date': end_date,
'end_date': end_date.strftime('%Y-%m-%d %H-%M-%S'),
'begin_date': begin_date.strftime('%Y-%m-%d %H-%M-%S'),
'documents': {},
'collection': current_app.config['OPAC_COLLECTION']}
articles = controllers.get_articles_by_date_range(begin_date, end_date, page, limit)
for a in articles.items:
results['documents'].update(get_article_counter_data(a))
results['total'] = articles.total
results['pages'] = articles.pages
results['limit'] = articles.per_page
results['page'] = articles.page
return jsonify(results)
def get_article_counter_data(article):
return {
article.aid: {
"journal_acronym": article.journal.acronym,
"pid": article.pid if article.pid else '',
"aop_pid": article.aop_pid if article.aop_pid else '',
"pid_v1": article.scielo_pids.get('v1', ''),
"pid_v2": article.scielo_pids.get('v2', ''),
"pid_v3": article.scielo_pids.get('v3', ''),
"publication_date": article.publication_date,
"default_language": article.original_language,
"create": article.created,
"update": article.updated
}
}
@main.route('/cgi-bin/wxis.exe/iah/')
def author_production():
# http://www.scielo.br/cgi-bin/wxis.exe/iah/
# ?IsisScript=iah/iah.xis&base=article%5Edlibrary&format=iso.pft&
# lang=p&nextAction=lnk&
# indexSearch=AU&exprSearch=MEIERHOFFER,+LILIAN+KOZSLOWSKI
# ->
# //search.scielo.org/?lang=pt&q=au:MEIERHOFFER,+LILIAN+KOZSLOWSKI
search_url = current_app.config.get('URL_SEARCH')
if not search_url:
abort(404, "URL_SEARCH: {}".format(_('Página não encontrada')))
qs_exprSearch = request.args.get('exprSearch', type=str) or ''
qs_indexSearch = request.args.get('indexSearch', type=str) or ''
qs_lang = request.args.get('lang', type=str) or ''
_lang = IAHX_LANGS.get(qs_lang) or ''
_lang = _lang and "lang={}".format(_lang)
_expr = "{}{}".format(
qs_indexSearch == "AU" and "au:" or '', qs_exprSearch)
_expr = _expr and "q={}".format(_expr.replace(" ", "+"))
_and = _lang and _expr and "&" or ''
_question_mark = (_lang or _expr) and "?" or ""
if search_url.startswith("//"):
protocol = "https:"
elif search_url.startswith("http"):
protocol = ""
else:
protocol = "https://"
url = "{}{}{}{}{}{}".format(
protocol, search_url, _question_mark, _lang, _and, _expr)
return redirect(url, code=301)
| [((45, 9, 45, 36), 'logging.getLogger', 'logging.getLogger', ({(45, 27, 45, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((47, 20, 47, 73), 'flask_babelex.gettext', '_', ({(47, 22, 47, 72): '"""O periódico está indisponível por motivo de: """'}, {}), "('O periódico está indisponível por motivo de: ')", True, 'from flask_babelex import gettext as _\n'), ((48, 18, 48, 68), 'flask_babelex.gettext', '_', ({(48, 20, 48, 67): '"""O número está indisponível por motivo de: """'}, {}), "('O número está indisponível por motivo de: ')", True, 'from flask_babelex import gettext as _\n'), ((49, 20, 49, 69), 'flask_babelex.gettext', '_', ({(49, 22, 49, 68): '"""O artigo está indisponível por motivo de: """'}, {}), "('O artigo está indisponível por motivo de: ')", True, 'from flask_babelex import gettext as _\n'), ((183, 1, 183, 45), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((230, 1, 230, 45), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((248, 1, 248, 45), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((282, 1, 282, 45), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((337, 1, 337, 53), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((361, 1, 361, 53), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((475, 1, 475, 45), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((537, 1, 537, 45), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((582, 1, 582, 45), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((624, 1, 624, 53), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((645, 1, 645, 53), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((671, 1, 671, 53), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((748, 1, 748, 45), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((800, 1, 800, 53), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((923, 1, 923, 53), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((984, 1, 984, 45), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((1031, 1, 1031, 45), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((1128, 1, 1128, 45), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((1154, 1, 1154, 45), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((1322, 1, 1322, 53), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((1356, 1, 1356, 53), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((1374, 1, 1374, 45), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((1383, 1, 1383, 53), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((1396, 1, 1396, 45), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((1426, 1, 1426, 53), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((1453, 1, 1453, 53), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((1547, 1, 1547, 45), 'webapp.cache.cached', 'cache.cached', (), '', False, 'from webapp import cache\n'), ((59, 10, 59, 37), 'flask.url_for', 'url_for', ({(59, 18, 59, 26): 'endpoint'}, {}), '(endpoint, **kwargs)', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((60, 11, 60, 41), 'urllib.parse.urljoin', 'urljoin', ({(60, 19, 60, 35): 'request.url_root', (60, 37, 60, 40): 'url'}, {}), '(request.url_root, url)', False, 'from urllib.parse import urljoin\n'), ((140, 12, 140, 47), 'flask.current_app.config.get', 'current_app.config.get', ({(140, 35, 140, 46): '"""LANGUAGES"""'}, {}), "('LANGUAGES')", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((156, 12, 156, 47), 'flask.current_app.config.get', 'current_app.config.get', ({(156, 35, 156, 46): '"""LANGUAGES"""'}, {}), "('LANGUAGES')", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((162, 11, 162, 35), 'flask.request.args.get', 'request.args.get', ({(162, 28, 162, 34): '"""hash"""'}, {}), "('hash')", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((168, 11, 168, 29), 'flask.redirect', 'redirect', ({(168, 20, 168, 28): 'referrer'}, {}), '(referrer)', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((186, 11, 186, 56), 'webapp.controllers.get_latest_news_by_lang', 'controllers.get_latest_news_by_lang', ({(186, 47, 186, 55): 'language'}, {}), '(language)', False, 'from webapp import controllers\n'), ((188, 13, 188, 48), 'webapp.controllers.get_collection_tweets', 'controllers.get_collection_tweets', ({}, {}), '()', False, 'from webapp import controllers\n'), ((189, 21, 189, 75), 'webapp.controllers.get_press_releases', 'controllers.get_press_releases', ({(189, 52, 189, 74): "{'language': language}"}, {}), "({'language': language})", False, 'from webapp import controllers\n'), ((223, 11, 223, 62), 'flask.render_template', 'render_template', ({(223, 27, 223, 50): '"""collection/index.html"""'}, {}), "('collection/index.html', **context)", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((233, 19, 233, 49), 'flask.request.args.get', 'request.args.get', ({(233, 36, 233, 44): '"""status"""', (233, 46, 233, 48): '""""""'}, {}), "('status', '')", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((243, 11, 244, 92), 'flask.render_template', 'render_template', ({(243, 27, 243, 57): '"""collection/list_journal.html"""'}, {}), "('collection/list_journal.html', **{'journals_list':\n journals_list, 'query_filter': query_filter})", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((257, 19, 257, 49), 'flask.request.args.get', 'request.args.get', ({(257, 36, 257, 44): '"""status"""', (257, 46, 257, 48): '""""""'}, {}), "('status', '')", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((258, 18, 258, 47), 'flask.request.args.get', 'request.args.get', ({(258, 35, 258, 42): '"""query"""', (258, 44, 258, 46): '""""""'}, {}), "('query', '')", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((259, 22, 259, 57), 'flask.request.args.get', 'request.args.get', ({(259, 39, 259, 47): '"""filter"""', (259, 49, 259, 56): '"""areas"""'}, {}), "('filter', 'areas')", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((268, 14, 273, 5), 'webapp.controllers.get_journals_grouped_by', 'controllers.get_journals_grouped_by', (), '', False, 'from webapp import controllers\n'), ((275, 11, 278, 5), 'flask.render_template', 'render_template', ({(276, 8, 276, 39): '"""collection/list_thematic.html"""'}, {}), "('collection/list_thematic.html', **{'objects': objects,\n 'query_filter': query_filter, 'filter': thematic_filter})", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((285, 17, 285, 53), 'webapp.controllers.get_current_collection', 'controllers.get_current_collection', ({}, {}), '()', False, 'from webapp import controllers\n'), ((288, 15, 288, 87), 'flask_babelex.gettext', '_', ({(288, 17, 288, 86): "'10 últimos periódicos inseridos na coleção %s' % collection.name"}, {}), "('10 últimos periódicos inseridos na coleção %s' % collection.name)", True, 'from flask_babelex import gettext as _\n'), ((290, 11, 292, 63), 'werkzeug.contrib.atom.AtomFeed', 'AtomFeed', (), '', False, 'from werkzeug.contrib.atom import AtomFeed\n'), ((294, 15, 295, 65), 'webapp.controllers.get_journals_paginated', 'controllers.get_journals_paginated', (), '', False, 'from webapp import controllers\n'), ((354, 11, 354, 62), 'flask.render_template', 'render_template', ({(354, 27, 354, 50): '"""collection/about.html"""'}, {}), "('collection/about.html', **context)", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((364, 17, 364, 49), 'flask.request.args.get', 'request.args.get', ({(364, 34, 364, 42): '"""script"""', (364, 44, 364, 48): 'None'}, {}), "('script', None)", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((365, 10, 365, 39), 'flask.request.args.get', 'request.args.get', ({(365, 27, 365, 32): '"""pid"""', (365, 34, 365, 38): 'None'}, {}), "('pid', None)", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((366, 11, 366, 41), 'flask.request.args.get', 'request.args.get', ({(366, 28, 366, 34): '"""tlng"""', (366, 36, 366, 40): 'None'}, {}), "('tlng', None)", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((477, 14, 477, 57), 'webapp.controllers.get_journal_by_url_seg', 'controllers.get_journal_by_url_seg', ({(477, 49, 477, 56): 'url_seg'}, {}), '(url_seg)', False, 'from webapp import controllers\n'), ((485, 4, 485, 41), 'webapp.utils.utils.fix_journal_last_issue', 'utils.fix_journal_last_issue', ({(485, 33, 485, 40): 'journal'}, {}), '(journal)', False, 'from webapp.utils import utils\n'), ((489, 11, 489, 56), 'webapp.controllers.get_latest_news_by_lang', 'controllers.get_latest_news_by_lang', ({(489, 47, 489, 55): 'language'}, {}), '(language)', False, 'from webapp import controllers\n'), ((492, 21, 494, 30), 'webapp.controllers.get_press_releases', 'controllers.get_press_releases', ({(492, 52, 494, 29): "{'journal': journal, 'language': language}"}, {}), "({'journal': journal, 'language': language})", False, 'from webapp import controllers\n'), ((515, 22, 515, 62), 'webapp.controllers.get_journal_metrics', 'controllers.get_journal_metrics', ({(515, 54, 515, 61): 'journal'}, {}), '(journal)', False, 'from webapp import controllers\n'), ((533, 11, 533, 60), 'flask.render_template', 'render_template', ({(533, 27, 533, 48): '"""journal/detail.html"""'}, {}), "('journal/detail.html', **context)", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((539, 14, 539, 57), 'webapp.controllers.get_journal_by_url_seg', 'controllers.get_journal_by_url_seg', ({(539, 49, 539, 56): 'url_seg'}, {}), '(url_seg)', False, 'from webapp import controllers\n'), ((547, 13, 547, 71), 'webapp.controllers.get_issues_by_jid', 'controllers.get_issues_by_jid', (), '', False, 'from webapp import controllers\n'), ((549, 15, 549, 78), 'webapp.controllers.get_articles_by_iid', 'controllers.get_articles_by_iid', (), '', False, 'from webapp import controllers\n'), ((586, 14, 586, 57), 'webapp.controllers.get_journal_by_url_seg', 'controllers.get_journal_by_url_seg', ({(586, 49, 586, 56): 'url_seg'}, {}), '(url_seg)', False, 'from webapp import controllers\n'), ((594, 19, 594, 56), 'webapp.utils.utils.fix_journal_last_issue', 'utils.fix_journal_last_issue', ({(594, 48, 594, 55): 'journal'}, {}), '(journal)', False, 'from webapp.utils import utils\n'), ((604, 11, 604, 80), 'webapp.controllers.get_page_by_journal_acron_lang', 'controllers.get_page_by_journal_acron_lang', ({(604, 54, 604, 69): 'journal.acronym', (604, 71, 604, 79): 'language'}, {}), '(journal.acronym, language)', False, 'from webapp import controllers\n'), ((620, 11, 620, 59), 'flask.render_template', 'render_template', ({(620, 27, 620, 47): '"""journal/about.html"""'}, {}), "('journal/about.html', **context)", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((630, 12, 630, 51), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((631, 19, 631, 65), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((632, 11, 632, 48), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((635, 20, 639, 34), 'webapp.controllers.get_alpha_list_from_paginated_journals', 'controllers.get_alpha_list_from_paginated_journals', (), '', False, 'from webapp import controllers\n'), ((641, 11, 641, 33), 'flask.jsonify', 'jsonify', ({(641, 19, 641, 32): 'response_data'}, {}), '(response_data)', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((651, 12, 651, 51), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((652, 19, 652, 65), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((653, 13, 653, 58), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((667, 11, 667, 27), 'flask.jsonify', 'jsonify', ({(667, 19, 667, 26): 'objects'}, {}), '(objects)', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((699, 7, 699, 40), 'webapp.utils.utils.is_recaptcha_valid', 'utils.is_recaptcha_valid', ({(699, 32, 699, 39): 'request'}, {}), '(request)', False, 'from webapp.utils import utils\n'), ((729, 14, 729, 57), 'webapp.controllers.get_journal_by_url_seg', 'controllers.get_journal_by_url_seg', ({(729, 49, 729, 56): 'url_seg'}, {}), '(url_seg)', False, 'from webapp import controllers\n'), ((736, 11, 736, 75), 'flask.render_template', 'render_template', ({(736, 27, 736, 63): '"""journal/includes/contact_form.html"""'}, {}), "('journal/includes/contact_form.html', **context)", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((750, 14, 750, 57), 'webapp.controllers.get_journal_by_url_seg', 'controllers.get_journal_by_url_seg', ({(750, 49, 750, 56): 'url_seg'}, {}), '(url_seg)', False, 'from webapp import controllers\n'), ((762, 18, 762, 84), 'webapp.controllers.get_issues_for_grid_by_jid', 'controllers.get_issues_for_grid_by_jid', (), '', False, 'from webapp import controllers\n'), ((784, 11, 784, 56), 'flask.render_template', 'render_template', ({(784, 27, 784, 44): '"""issue/grid.html"""'}, {}), "('issue/grid.html', **context)", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((803, 11, 803, 51), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((819, 12, 819, 68), 'webapp.controllers.get_issue_by_url_seg', 'controllers.get_issue_by_url_seg', ({(819, 45, 819, 52): 'url_seg', (819, 54, 819, 67): 'url_seg_issue'}, {}), '(url_seg, url_seg_issue)', False, 'from webapp import controllers\n'), ((831, 4, 831, 41), 'webapp.utils.utils.fix_journal_last_issue', 'utils.fix_journal_last_issue', ({(831, 33, 831, 40): 'journal'}, {}), '(journal)', False, 'from webapp.utils import utils\n'), ((840, 15, 840, 73), 'webapp.controllers.get_articles_by_iid', 'controllers.get_articles_by_iid', (), '', False, 'from webapp import controllers\n'), ((885, 11, 885, 55), 'flask.render_template', 'render_template', ({(885, 27, 885, 43): '"""issue/toc.html"""'}, {}), "('issue/toc.html', **context)", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((919, 11, 919, 58), 'webapp.utils.utils.get_prev_issue', 'utils.get_prev_issue', ({(919, 32, 919, 42): 'all_issues', (919, 44, 919, 57): 'current_issue'}, {}), '(all_issues, current_issue)', False, 'from webapp.utils import utils\n'), ((932, 11, 932, 51), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((942, 4, 942, 41), 'webapp.utils.utils.fix_journal_last_issue', 'utils.fix_journal_last_issue', ({(942, 33, 942, 40): 'journal'}, {}), '(journal)', False, 'from webapp.utils import utils\n'), ((980, 11, 980, 55), 'flask.render_template', 'render_template', ({(980, 27, 980, 43): '"""issue/toc.html"""'}, {}), "('issue/toc.html', **context)", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((986, 12, 986, 68), 'webapp.controllers.get_issue_by_url_seg', 'controllers.get_issue_by_url_seg', ({(986, 45, 986, 52): 'url_seg', (986, 54, 986, 67): 'url_seg_issue'}, {}), '(url_seg, url_seg_issue)', False, 'from webapp import controllers\n'), ((998, 15, 998, 73), 'webapp.controllers.get_articles_by_iid', 'controllers.get_articles_by_iid', (), '', False, 'from webapp import controllers\n'), ((1034, 14, 1034, 49), 'webapp.controllers.get_article_by_pid', 'controllers.get_article_by_pid', ({(1034, 45, 1034, 48): 'pid'}, {}), '(pid)', False, 'from webapp import controllers\n'), ((1057, 16, 1058, 79), 'packtools.HTMLGenerator.parse', 'HTMLGenerator.parse', (), '', False, 'from packtools import HTMLGenerator\n'), ((1130, 12, 1130, 68), 'webapp.controllers.get_issue_by_url_seg', 'controllers.get_issue_by_url_seg', ({(1130, 45, 1130, 52): 'url_seg', (1130, 54, 1130, 67): 'url_seg_issue'}, {}), '(url_seg, url_seg_issue)', False, 'from webapp import controllers\n'), ((1134, 14, 1134, 86), 'webapp.controllers.get_article_by_issue_article_seg', 'controllers.get_article_by_issue_article_seg', ({(1134, 59, 1134, 68): 'issue.iid', (1134, 70, 1134, 85): 'url_seg_article'}, {}), '(issue.iid, url_seg_article)', False, 'from webapp import controllers\n'), ((1159, 16, 1159, 60), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1324, 10, 1324, 49), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1325, 10, 1325, 49), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1326, 15, 1326, 59), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1327, 11, 1327, 51), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1361, 19, 1361, 44), 'mimetypes.guess_type', 'mimetypes.guess_type', ({(1361, 40, 1361, 43): 'url'}, {}), '(url)', False, 'import mimetypes\n'), ((1385, 24, 1385, 67), 'flask.request.args.get', 'request.args.get', ({(1385, 41, 1385, 60): '"""resource_ssm_path"""', (1385, 62, 1385, 66): 'None'}, {}), "('resource_ssm_path', None)", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1406, 12, 1406, 68), 'webapp.controllers.get_issue_by_url_seg', 'controllers.get_issue_by_url_seg', ({(1406, 45, 1406, 52): 'url_seg', (1406, 54, 1406, 67): 'url_seg_issue'}, {}), '(url_seg, url_seg_issue)', False, 'from webapp import controllers\n'), ((1410, 14, 1410, 86), 'webapp.controllers.get_article_by_issue_article_seg', 'controllers.get_article_by_issue_article_seg', ({(1410, 59, 1410, 68): 'issue.iid', (1410, 70, 1410, 85): 'url_seg_article'}, {}), '(issue.iid, url_seg_article)', False, 'from webapp import controllers\n'), ((1430, 14, 1430, 63), 'webapp.controllers.get_journal_by_url_seg', 'controllers.get_journal_by_url_seg', ({(1430, 49, 1430, 62): 'journal_acron'}, {}), '(journal_acron)', False, 'from webapp import controllers\n'), ((1435, 14, 1436, 48), 'webapp.controllers.get_article_by_pdf_filename', 'controllers.get_article_by_pdf_filename', ({(1436, 8, 1436, 21): 'journal_acron', (1436, 23, 1436, 33): 'issue_info', (1436, 35, 1436, 47): 'pdf_filename'}, {}), '(journal_acron, issue_info, pdf_filename\n )', False, 'from webapp import controllers\n'), ((1455, 10, 1455, 39), 'flask.request.args.get', 'request.args.get', ({(1455, 27, 1455, 32): '"""pid"""', (1455, 34, 1455, 38): 'None'}, {}), "('pid', None)", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1456, 10, 1456, 39), 'flask.request.args.get', 'request.args.get', ({(1456, 27, 1456, 32): '"""lng"""', (1456, 34, 1456, 38): 'None'}, {}), "('lng', None)", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1461, 14, 1461, 52), 'webapp.controllers.get_article_by_pid_v1', 'controllers.get_article_by_pid_v1', ({(1461, 48, 1461, 51): 'pid'}, {}), '(pid)', False, 'from webapp import controllers\n'), ((1484, 11, 1484, 45), 'webapp.forms.EmailShareForm', 'forms.EmailShareForm', ({(1484, 32, 1484, 44): 'request.form'}, {}), '(request.form)', False, 'from webapp import forms\n'), ((1506, 11, 1506, 62), 'flask.render_template', 'render_template', ({(1506, 27, 1506, 50): '"""email/email_form.html"""'}, {}), "('email/email_form.html', **context)", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1515, 11, 1515, 40), 'webapp.forms.ErrorForm', 'forms.ErrorForm', ({(1515, 27, 1515, 39): 'request.form'}, {}), '(request.form)', False, 'from webapp import forms\n'), ((1540, 11, 1540, 65), 'flask.render_template', 'render_template', ({(1540, 27, 1540, 53): '"""includes/error_form.html"""'}, {}), "('includes/error_form.html', **context)", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1550, 11, 1550, 52), 'flask.send_from_directory', 'send_from_directory', ({(1550, 31, 1550, 41): 'media_root', (1550, 43, 1550, 51): 'filename'}, {}), '(media_root, filename)', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1555, 11, 1555, 72), 'flask.send_from_directory', 'send_from_directory', ({(1555, 31, 1555, 39): '"""static"""', (1555, 41, 1555, 71): '"""img/full_text_scielo_img.gif"""'}, {}), "('static', 'img/full_text_scielo_img.gif')", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1560, 11, 1560, 54), 'flask.send_from_directory', 'send_from_directory', ({(1560, 31, 1560, 39): '"""static"""', (1560, 41, 1560, 53): '"""robots.txt"""'}, {}), "('static', 'robots.txt')", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1599, 15, 1599, 57), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1606, 11, 1606, 45), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1610, 12, 1610, 47), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1620, 15, 1620, 88), 'webapp.controllers.get_articles_by_date_range', 'controllers.get_articles_by_date_range', ({(1620, 54, 1620, 64): 'begin_date', (1620, 66, 1620, 74): 'end_date', (1620, 76, 1620, 80): 'page', (1620, 82, 1620, 87): 'limit'}, {}), '(begin_date, end_date, page, limit)', False, 'from webapp import controllers\n'), ((1629, 11, 1629, 27), 'flask.jsonify', 'jsonify', ({(1629, 19, 1629, 26): 'results'}, {}), '(results)', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1658, 17, 1658, 53), 'flask.current_app.config.get', 'current_app.config.get', ({(1658, 40, 1658, 52): '"""URL_SEARCH"""'}, {}), "('URL_SEARCH')", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1685, 11, 1685, 34), 'flask.redirect', 'redirect', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((78, 19, 78, 53), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((123, 30, 123, 52), 'webapp.forms.EmailShareForm', 'forms.EmailShareForm', ({}, {}), '()', False, 'from webapp import forms\n'), ((124, 32, 124, 51), 'webapp.forms.ContactForm', 'forms.ContactForm', ({}, {}), '()', False, 'from webapp import forms\n'), ((125, 24, 125, 41), 'webapp.forms.ErrorForm', 'forms.ErrorForm', ({}, {}), '()', False, 'from webapp import forms\n'), ((149, 26, 149, 72), 'flask.current_app.config.get', 'current_app.config.get', ({(149, 49, 149, 71): '"""BABEL_DEFAULT_LOCALE"""'}, {}), "('BABEL_DEFAULT_LOCALE')", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((239, 8, 239, 50), 'webapp.controllers.get_journal_json_data', 'controllers.get_journal_json_data', ({(239, 42, 239, 49): 'journal'}, {}), '(journal)', False, 'from webapp import controllers\n'), ((303, 17, 303, 75), 'webapp.controllers.get_issues_by_jid', 'controllers.get_issues_by_jid', (), '', False, 'from webapp import controllers\n'), ((311, 22, 311, 35), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((345, 15, 345, 69), 'webapp.controllers.get_page_by_slug_name', 'controllers.get_page_by_slug_name', ({(345, 49, 345, 58): 'slug_name', (345, 60, 345, 68): 'language'}, {}), '(slug_name, language)', False, 'from webapp import controllers\n'), ((351, 16, 351, 55), 'webapp.controllers.get_pages_by_lang', 'controllers.get_pages_by_lang', ({(351, 46, 351, 54): 'language'}, {}), '(language)', False, 'from webapp import controllers\n'), ((470, 20, 471, 48), 'flask.url_for', 'url_for', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((500, 26, 500, 106), 'webapp.controllers.get_recent_articles_of_issue', 'controllers.get_recent_articles_of_issue', (), '', False, 'from webapp import controllers\n'), ((657, 18, 657, 113), 'webapp.controllers.get_journals_grouped_by', 'controllers.get_journals_grouped_by', (), '', False, 'from webapp import controllers\n'), ((701, 15, 701, 46), 'webapp.forms.ContactForm', 'forms.ContactForm', ({(701, 33, 701, 45): 'request.form'}, {}), '(request.form)', False, 'from webapp import forms\n'), ((703, 18, 703, 61), 'webapp.controllers.get_journal_by_url_seg', 'controllers.get_journal_by_url_seg', ({(703, 53, 703, 60): 'url_seg'}, {}), '(url_seg)', False, 'from webapp import controllers\n'), ((744, 20, 744, 63), 'flask.url_for', 'url_for', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((793, 8, 795, 44), 'flask.url_for', 'url_for', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((835, 15, 835, 55), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((837, 15, 837, 43), 'flask.redirect', 'redirect', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((869, 25, 872, 56), 'flask.url_for', 'url_for', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((893, 8, 893, 79), 'webapp.controllers.get_issues_by_jid', 'controllers.get_issues_by_jid', (), '', False, 'from webapp import controllers\n'), ((895, 25, 895, 72), 'webapp.utils.utils.get_next_issue', 'utils.get_next_issue', ({(895, 46, 895, 56): 'all_issues', (895, 58, 895, 71): 'current_issue'}, {}), '(all_issues, current_issue)', False, 'from webapp.utils import utils\n'), ((906, 15, 908, 51), 'flask.url_for', 'url_for', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((916, 8, 916, 79), 'webapp.controllers.get_issues_by_jid', 'controllers.get_issues_by_jid', (), '', False, 'from webapp import controllers\n'), ((918, 15, 918, 62), 'webapp.utils.utils.get_next_issue', 'utils.get_next_issue', ({(918, 36, 918, 46): 'all_issues', (918, 48, 918, 61): 'current_issue'}, {}), '(all_issues, current_issue)', False, 'from webapp.utils import utils\n'), ((928, 17, 928, 52), 'webapp.controllers.get_aop_issues', 'controllers.get_aop_issues', ({(928, 44, 928, 51): 'url_seg'}, {}), '(url_seg)', False, 'from webapp import controllers\n'), ((946, 20, 947, 42), 'webapp.controllers.get_articles_by_iid', 'controllers.get_articles_by_iid', (), '', False, 'from webapp import controllers\n'), ((965, 25, 965, 65), 'flask.url_for', 'url_for', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1037, 18, 1037, 57), 'webapp.controllers.get_article_by_oap_pid', 'controllers.get_article_by_oap_pid', ({(1037, 53, 1037, 56): 'pid'}, {}), '(pid)', False, 'from webapp import controllers\n'), ((1042, 20, 1044, 55), 'flask.url_for', 'url_for', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1055, 22, 1055, 37), 'io.BytesIO', 'BytesIO', ({(1055, 30, 1055, 36): 'result'}, {}), '(result)', False, 'from io import BytesIO\n'), ((1118, 21, 1118, 34), 'urllib.parse.urlparse', 'urlparse', ({(1118, 30, 1118, 33): 'url'}, {}), '(url)', False, 'from urllib.parse import urlparse\n'), ((1136, 18, 1138, 9), 'webapp.controllers.get_article_by_aop_url_segs', 'controllers.get_article_by_aop_url_segs', ({(1137, 12, 1137, 25): 'issue.journal', (1137, 27, 1137, 40): 'url_seg_issue', (1137, 42, 1137, 57): 'url_seg_article'}, {}), '(issue.journal, url_seg_issue,\n url_seg_article)', False, 'from webapp import controllers\n'), ((1149, 20, 1149, 67), 'flask.url_for', 'url_for', ({(1149, 28, 1149, 52): '"""main.article_detail_v3"""'}, {}), "('main.article_detail_v3', **req_params)", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1156, 14, 1156, 48), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1157, 14, 1157, 48), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1158, 14, 1158, 48), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1169, 27, 1170, 67), 'webapp.controllers.get_article', 'controllers.get_article', ({(1170, 12, 1170, 26): 'article_pid_v3', (1170, 28, 1170, 35): 'url_seg', (1170, 37, 1170, 44): 'qs_lang', (1170, 46, 1170, 57): 'gs_abstract', (1170, 59, 1170, 66): 'qs_goto'}, {}), '(article_pid_v3, url_seg, qs_lang, gs_abstract, qs_goto)', False, 'from webapp import controllers\n'), ((1282, 15, 1282, 64), 'flask.render_template', 'render_template', ({(1282, 31, 1282, 52): '"""article/detail.html"""'}, {}), "('article/detail.html', **context)", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1306, 19, 1306, 40), 'flask.make_response', 'make_response', ({(1306, 33, 1306, 39): 'result'}, {}), '(result)', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1338, 15, 1338, 62), 'flask.render_template', 'render_template', ({(1338, 31, 1338, 50): '"""article/epdf.html"""'}, {}), "('article/epdf.html', **context)", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1352, 23, 1352, 48), 'mimetypes.guess_type', 'mimetypes.guess_type', ({(1352, 44, 1352, 47): 'url'}, {}), '(url)', False, 'import mimetypes\n'), ((1353, 15, 1353, 52), 'flask.Response', 'Response', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1370, 15, 1370, 56), 'flask.Response', 'Response', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1422, 20, 1422, 67), 'flask.url_for', 'url_for', ({(1422, 28, 1422, 52): '"""main.article_detail_v3"""'}, {}), "('main.article_detail_v3', **req_params)", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1442, 8, 1448, 9), 'flask.url_for', 'url_for', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1466, 8, 1470, 9), 'flask.url_for', 'url_for', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1489, 24, 1493, 74), 'webapp.controllers.send_email_share', 'controllers.send_email_share', ({(1489, 53, 1489, 76): "form.data['your_email']", (1490, 53, 1490, 63): 'recipients', (1491, 53, 1491, 75): "form.data['share_url']", (1492, 53, 1492, 73): "form.data['subject']", (1493, 53, 1493, 73): "form.data['comment']"}, {}), "(form.data['your_email'], recipients, form.data\n ['share_url'], form.data['subject'], form.data['comment'])", False, 'from webapp import controllers\n'), ((1505, 22, 1505, 45), 'flask.request.args.get', 'request.args.get', ({(1505, 39, 1505, 44): '"""url"""'}, {}), "('url')", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1521, 24, 1527, 77), 'webapp.controllers.send_email_error', 'controllers.send_email_error', ({(1521, 53, 1521, 70): "form.data['name']", (1522, 53, 1522, 76): "form.data['your_email']", (1523, 53, 1523, 63): 'recipients', (1524, 53, 1524, 69): "form.data['url']", (1525, 53, 1525, 76): "form.data['error_type']", (1526, 53, 1526, 73): "form.data['message']", (1527, 53, 1527, 76): "form.data['page_title']"}, {}), "(form.data['name'], form.data['your_email'],\n recipients, form.data['url'], form.data['error_type'], form.data[\n 'message'], form.data['page_title'])", False, 'from webapp import controllers\n'), ((1539, 22, 1539, 45), 'flask.request.args.get', 'request.args.get', ({(1539, 39, 1539, 44): '"""url"""'}, {}), "('url')", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1601, 19, 1601, 58), 'datetime.datetime.strptime', 'datetime.strptime', ({(1601, 37, 1601, 45): 'end_date', (1601, 47, 1601, 57): '"""%Y-%m-%d"""'}, {}), "(end_date, '%Y-%m-%d')", False, 'from datetime import datetime, timedelta\n'), ((1604, 28, 1604, 46), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((1662, 20, 1662, 60), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1663, 21, 1663, 62), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1664, 14, 1664, 48), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((102, 25, 102, 61), 'webapp.controllers.get_current_collection', 'controllers.get_current_collection', ({}, {}), '()', False, 'from webapp import controllers\n'), ((143, 26, 143, 40), 'flask.session.keys', 'session.keys', ({}, {}), '()', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((159, 19, 159, 51), 'flask_babelex.gettext', '_', ({(159, 21, 159, 50): '"""Código de idioma inválido"""'}, {}), "('Código de idioma inválido')", True, 'from flask_babelex import gettext as _\n'), ((179, 15, 179, 61), 'flask.current_app.config.get', 'current_app.config.get', ({(179, 38, 179, 60): '"""BABEL_DEFAULT_LOCALE"""'}, {}), "('BABEL_DEFAULT_LOCALE')", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((240, 23, 240, 74), 'webapp.controllers.get_journals', 'controllers.get_journals', (), '', False, 'from webapp import controllers\n'), ((287, 51, 287, 99), 'flask_babelex.gettext', '_', ({(287, 53, 287, 98): '"""Últimos periódicos inseridos na coleção"""'}, {}), "('Últimos periódicos inseridos na coleção')", True, 'from flask_babelex import gettext as _\n'), ((308, 23, 309, 70), 'webapp.controllers.get_articles_by_iid', 'controllers.get_articles_by_iid', (), '', False, 'from webapp import controllers\n'), ((325, 17, 325, 80), 'flask.render_template', 'render_template', ({(325, 33, 325, 68): '"""collection/list_feed_content.html"""'}, {}), "('collection/list_feed_content.html', **context)", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((372, 19, 372, 91), 'flask_babelex.gettext', '_', ({(372, 21, 372, 90): "(u'Requsição inválida ao tentar acessar o artigo com pid: %s' % pid)"}, {}), "(u'Requsição inválida ao tentar acessar o artigo com pid: %s' % pid)", True, 'from flask_babelex import gettext as _\n'), ((464, 15, 464, 28), 'flask.redirect', 'redirect', ({(464, 24, 464, 27): '"""/"""'}, {}), "('/')", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((480, 19, 480, 50), 'flask_babelex.gettext', '_', ({(480, 21, 480, 49): '"""Periódico não encontrado"""'}, {}), "('Periódico não encontrado')", True, 'from flask_babelex import gettext as _\n'), ((542, 19, 542, 50), 'flask_babelex.gettext', '_', ({(542, 21, 542, 49): '"""Periódico não encontrado"""'}, {}), "('Periódico não encontrado')", True, 'from flask_babelex import gettext as _\n'), ((554, 29, 554, 62), 'webapp.utils.utils.get_label_issue', 'utils.get_label_issue', ({(554, 51, 554, 61): 'last_issue'}, {}), '(last_issue)', False, 'from webapp.utils import utils\n'), ((567, 17, 567, 76), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((589, 19, 589, 50), 'flask_babelex.gettext', '_', ({(589, 21, 589, 49): '"""Periódico não encontrado"""'}, {}), "('Periódico não encontrado')", True, 'from flask_babelex import gettext as _\n'), ((628, 19, 628, 65), 'flask_babelex.gettext', '_', ({(628, 21, 628, 64): '"""Requisição inválida. Deve ser por ajax"""'}, {}), "('Requisição inválida. Deve ser por ajax')", True, 'from flask_babelex import gettext as _\n'), ((649, 19, 649, 65), 'flask_babelex.gettext', '_', ({(649, 21, 649, 64): '"""Requisição inválida. Deve ser por ajax"""'}, {}), "('Requisição inválida. Deve ser por ajax')", True, 'from flask_babelex import gettext as _\n'), ((659, 18, 659, 120), 'webapp.controllers.get_journals_grouped_by', 'controllers.get_journals_grouped_by', (), '', False, 'from webapp import controllers\n'), ((674, 19, 674, 85), 'flask_babelex.gettext', '_', ({(674, 21, 674, 84): '"""Parámetro "extension" é inválido, deve ser "csv" ou "xls"."""'}, {}), '(\'Parámetro "extension" é inválido, deve ser "csv" ou "xls".\')', True, 'from flask_babelex import gettext as _\n'), ((682, 16, 682, 55), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((688, 19, 688, 52), 'flask.Response', 'Response', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((697, 19, 697, 62), 'flask_babelex.gettext', '_', ({(697, 21, 697, 61): '"""Requisição inválida, deve ser ajax."""'}, {}), "('Requisição inválida, deve ser ajax.')", True, 'from flask_babelex import gettext as _\n'), ((711, 28, 714, 80), 'webapp.controllers.send_email_contact', 'controllers.send_email_contact', ({(711, 59, 711, 69): 'recipients', (712, 59, 712, 76): "form.data['name']", (713, 59, 713, 82): "form.data['your_email']", (714, 59, 714, 79): "form.data['message']"}, {}), "(recipients, form.data['name'], form.data[\n 'your_email'], form.data['message'])", False, 'from webapp import controllers\n'), ((724, 19, 724, 66), 'flask_babelex.gettext', '_', ({(724, 21, 724, 65): '"""Requisição inválida, captcha inválido."""'}, {}), "('Requisição inválida, captcha inválido.')", True, 'from flask_babelex import gettext as _\n'), ((731, 19, 731, 50), 'flask_babelex.gettext', '_', ({(731, 21, 731, 49): '"""Periódico não encontrado"""'}, {}), "('Periódico não encontrado')", True, 'from flask_babelex import gettext as _\n'), ((753, 19, 753, 50), 'flask_babelex.gettext', '_', ({(753, 21, 753, 49): '"""Periódico não encontrado"""'}, {}), "('Periódico não encontrado')", True, 'from flask_babelex import gettext as _\n'), ((790, 24, 790, 64), 'flask.url_for', 'url_for', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((809, 24, 809, 64), 'flask.url_for', 'url_for', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((821, 19, 821, 47), 'flask_babelex.gettext', '_', ({(821, 21, 821, 46): '"""Número não encontrado"""'}, {}), "('Número não encontrado')", True, 'from flask_babelex import gettext as _\n'), ((897, 25, 897, 72), 'webapp.utils.utils.get_prev_issue', 'utils.get_prev_issue', ({(897, 46, 897, 56): 'all_issues', (897, 58, 897, 71): 'current_issue'}, {}), '(all_issues, current_issue)', False, 'from webapp.utils import utils\n'), ((926, 21, 926, 62), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((930, 19, 930, 63), 'flask_babelex.gettext', '_', ({(930, 21, 930, 62): '"""Artigos ahead of print não encontrados"""'}, {}), "('Artigos ahead of print não encontrados')", True, 'from flask_babelex import gettext as _\n'), ((936, 12, 936, 35), 'flask.redirect', 'redirect', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((951, 19, 951, 63), 'flask_babelex.gettext', '_', ({(951, 21, 951, 62): '"""Artigos ahead of print não encontrados"""'}, {}), "('Artigos ahead of print não encontrados')", True, 'from flask_babelex import gettext as _\n'), ((989, 19, 989, 47), 'flask_babelex.gettext', '_', ({(989, 21, 989, 46): '"""Número não encontrado"""'}, {}), "('Número não encontrado')", True, 'from flask_babelex import gettext as _\n'), ((1003, 29, 1003, 57), 'webapp.utils.utils.get_label_issue', 'utils.get_label_issue', ({(1003, 51, 1003, 56): 'issue'}, {}), '(issue)', False, 'from webapp.utils import utils\n'), ((1014, 17, 1014, 76), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1040, 19, 1040, 46), 'flask_babelex.gettext', '_', ({(1040, 21, 1040, 45): '"""Artigo não encontrado"""'}, {}), "('Artigo não encontrado')", True, 'from flask_babelex import gettext as _\n'), ((1132, 19, 1132, 45), 'flask_babelex.gettext', '_', ({(1132, 21, 1132, 44): '"""Issue não encontrado"""'}, {}), "('Issue não encontrado')", True, 'from flask_babelex import gettext as _\n'), ((1140, 19, 1140, 46), 'flask_babelex.gettext', '_', ({(1140, 21, 1140, 45): '"""Artigo não encontrado"""'}, {}), "('Artigo não encontrado')", True, 'from flask_babelex import gettext as _\n'), ((1228, 25, 1228, 46), 'urllib.parse.urlparse', 'urlparse', ({(1228, 34, 1228, 45): 'request.url'}, {}), '(request.url)', False, 'from urllib.parse import urlparse\n'), ((1259, 12, 1265, 13), 'flask.url_for', 'url_for', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1278, 29, 1278, 63), 'webapp.controllers.related_links', 'controllers.related_links', ({(1278, 55, 1278, 62): 'article'}, {}), '(article)', False, 'from webapp import controllers\n'), ((1299, 25, 1299, 83), 'flask_babelex.gettext', '_', ({(1299, 27, 1299, 82): '"""Recurso do Artigo não encontrado. Caminho inválido!"""'}, {}), "('Recurso do Artigo não encontrado. Caminho inválido!')", True, 'from flask_babelex import gettext as _\n'), ((1330, 19, 1330, 77), 'flask_babelex.gettext', '_', ({(1330, 21, 1330, 76): '"""Parâmetros insuficientes para obter o EPDF do artigo"""'}, {}), "('Parâmetros insuficientes para obter o EPDF do artigo')", True, 'from flask_babelex import gettext as _\n'), ((1387, 25, 1387, 83), 'flask_babelex.gettext', '_', ({(1387, 27, 1387, 82): '"""Recurso do Artigo não encontrado. Caminho inválido!"""'}, {}), "('Recurso do Artigo não encontrado. Caminho inválido!')", True, 'from flask_babelex import gettext as _\n'), ((1408, 19, 1408, 45), 'flask_babelex.gettext', '_', ({(1408, 21, 1408, 44): '"""Issue não encontrado"""'}, {}), "('Issue não encontrado')", True, 'from flask_babelex import gettext as _\n'), ((1412, 19, 1412, 46), 'flask_babelex.gettext', '_', ({(1412, 21, 1412, 45): '"""Artigo não encontrado"""'}, {}), "('Artigo não encontrado')", True, 'from flask_babelex import gettext as _\n'), ((1433, 19, 1433, 103), 'flask_babelex.gettext', '_', ({(1433, 21, 1433, 102): '"""Este PDF não existe em http://www.scielo.br. Consulte http://search.scielo.org"""'}, {}), "('Este PDF não existe em http://www.scielo.br. Consulte http://search.scielo.org'\n )", True, 'from flask_babelex import gettext as _\n'), ((1439, 19, 1439, 57), 'flask_babelex.gettext', '_', ({(1439, 21, 1439, 56): '"""PDF do artigo não foi encontrado"""'}, {}), "('PDF do artigo não foi encontrado')", True, 'from flask_babelex import gettext as _\n'), ((1459, 19, 1459, 90), 'flask_babelex.gettext', '_', ({(1459, 21, 1459, 89): "('Requsição inválida ao tentar acessar o artigo com pid: %s' % pid)"}, {}), "('Requsição inválida ao tentar acessar o artigo com pid: %s' % pid)", True, 'from flask_babelex import gettext as _\n'), ((1463, 19, 1463, 46), 'flask_babelex.gettext', '_', ({(1463, 21, 1463, 45): '"""Artigo não encontrado"""'}, {}), "('Artigo não encontrado')", True, 'from flask_babelex import gettext as _\n'), ((1482, 19, 1482, 47), 'flask_babelex.gettext', '_', ({(1482, 21, 1482, 46): '"""Requisição inválida."""'}, {}), "('Requisição inválida.')", True, 'from flask_babelex import gettext as _\n'), ((1513, 19, 1513, 47), 'flask_babelex.gettext', '_', ({(1513, 21, 1513, 46): '"""Requisição inválida."""'}, {}), "('Requisição inválida.')", True, 'from flask_babelex import gettext as _\n'), ((1603, 19, 1603, 33), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((209, 45, 211, 9), 'opac_schema.v1.models.Journal.objects.filter', 'Journal.objects.filter', (), '', False, 'from opac_schema.v1.models import Journal, Issue, Article, Collection\n'), ((212, 45, 214, 9), 'opac_schema.v1.models.Article.objects.filter', 'Article.objects.filter', (), '', False, 'from opac_schema.v1.models import Journal, Issue, Article, Collection\n'), ((300, 25, 300, 39), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((347, 23, 347, 51), 'flask_babelex.gettext', '_', ({(347, 25, 347, 50): '"""Página não encontrada"""'}, {}), "('Página não encontrada')", True, 'from flask_babelex import gettext as _\n'), ((377, 22, 377, 58), 'webapp.controllers.get_journal_by_issn', 'controllers.get_journal_by_issn', ({(377, 54, 377, 57): 'pid'}, {}), '(pid)', False, 'from webapp import controllers\n'), ((483, 39, 483, 66), 'flask_babelex.gettext', '_', ({(483, 41, 483, 65): 'journal.unpublish_reason'}, {}), '(journal.unpublish_reason)', True, 'from flask_babelex import gettext as _\n'), ((545, 39, 545, 66), 'flask_babelex.gettext', '_', ({(545, 41, 545, 65): 'journal.unpublish_reason'}, {}), '(journal.unpublish_reason)', True, 'from flask_babelex import gettext as _\n'), ((566, 34, 566, 57), 'flask_babelex.gettext', '_', ({(566, 36, 566, 56): '"""Artigo sem título"""'}, {}), "('Artigo sem título')", True, 'from flask_babelex import gettext as _\n'), ((592, 39, 592, 66), 'flask_babelex.gettext', '_', ({(592, 41, 592, 65): 'journal.unpublish_reason'}, {}), '(journal.unpublish_reason)', True, 'from flask_babelex import gettext as _\n'), ((661, 18, 661, 116), 'webapp.controllers.get_journals_grouped_by', 'controllers.get_journals_grouped_by', (), '', False, 'from webapp import controllers\n'), ((676, 19, 676, 110), 'flask_babelex.gettext', '_', ({(676, 21, 676, 109): '"""Parámetro "list_type" é inválido, deve ser: "alpha", "areas", "wos" ou "publisher"."""'}, {}), '(\'Parámetro "list_type" é inválido, deve ser: "alpha", "areas", "wos" ou "publisher".\'\n )', True, 'from flask_babelex import gettext as _\n'), ((706, 23, 706, 67), 'flask_babelex.gettext', '_', ({(706, 25, 706, 66): '"""Periódico não permite envio de email."""'}, {}), "('Periódico não permite envio de email.')", True, 'from flask_babelex import gettext as _\n'), ((756, 39, 756, 66), 'flask_babelex.gettext', '_', ({(756, 41, 756, 65): 'journal.unpublish_reason'}, {}), '(journal.unpublish_reason)', True, 'from flask_babelex import gettext as _\n'), ((816, 25, 816, 66), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((823, 37, 823, 62), 'flask_babelex.gettext', '_', ({(823, 39, 823, 61): 'issue.unpublish_reason'}, {}), '(issue.unpublish_reason)', True, 'from flask_babelex import gettext as _\n'), ((828, 39, 828, 66), 'flask_babelex.gettext', '_', ({(828, 41, 828, 65): 'journal.unpublish_reason'}, {}), '(journal.unpublish_reason)', True, 'from flask_babelex import gettext as _\n'), ((940, 39, 940, 66), 'flask_babelex.gettext', '_', ({(940, 41, 940, 65): 'journal.unpublish_reason'}, {}), '(journal.unpublish_reason)', True, 'from flask_babelex import gettext as _\n'), ((992, 37, 992, 62), 'flask_babelex.gettext', '_', ({(992, 39, 992, 61): 'issue.unpublish_reason'}, {}), '(issue.unpublish_reason)', True, 'from flask_babelex import gettext as _\n'), ((995, 39, 995, 72), 'flask_babelex.gettext', '_', ({(995, 41, 995, 71): 'issue.journal.unpublish_reason'}, {}), '(issue.journal.unpublish_reason)', True, 'from flask_babelex import gettext as _\n'), ((1187, 19, 1187, 42), 'flask_babelex.gettext', '_', ({(1187, 21, 1187, 41): '"""Artigo inexistente"""'}, {}), "('Artigo inexistente')", True, 'from flask_babelex import gettext as _\n'), ((1190, 19, 1190, 46), 'flask_babelex.gettext', '_', ({(1190, 21, 1190, 45): '"""Artigo não encontrado"""'}, {}), "('Artigo não encontrado')", True, 'from flask_babelex import gettext as _\n'), ((1193, 12, 1198, 13), 'flask.url_for', 'url_for', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1202, 19, 1202, 47), 'flask_babelex.gettext', '_', ({(1202, 21, 1202, 46): '"""Recurso não encontrado"""'}, {}), "('Recurso não encontrado')", True, 'from flask_babelex import gettext as _\n'), ((1217, 35, 1223, 24), 'flask.url_for', 'url_for', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1286, 23, 1286, 57), 'flask_babelex.gettext', '_', ({(1286, 25, 1286, 56): '"""PDF do Artigo não encontrado"""'}, {}), "('PDF do Artigo não encontrado')", True, 'from flask_babelex import gettext as _\n'), ((1290, 23, 1290, 57), 'flask_babelex.gettext', '_', ({(1290, 25, 1290, 56): '"""PDF do Artigo não encontrado"""'}, {}), "('PDF do Artigo não encontrado')", True, 'from flask_babelex import gettext as _\n'), ((1348, 19, 1348, 43), 'flask_babelex.gettext', '_', ({(1348, 21, 1348, 42): '"""PDF não encontrado"""'}, {}), "('PDF não encontrado')", True, 'from flask_babelex import gettext as _\n'), ((1350, 19, 1350, 39), 'flask_babelex.gettext', '_', ({(1350, 21, 1350, 38): '"""Erro inesperado"""'}, {}), "('Erro inesperado')", True, 'from flask_babelex import gettext as _\n'), ((1366, 19, 1366, 47), 'flask_babelex.gettext', '_', ({(1366, 21, 1366, 46): '"""Recurso não encontrado"""'}, {}), "('Recurso não encontrado')", True, 'from flask_babelex import gettext as _\n'), ((1368, 19, 1368, 39), 'flask_babelex.gettext', '_', ({(1368, 21, 1368, 38): '"""Erro inesperado"""'}, {}), "('Erro inesperado')", True, 'from flask_babelex import gettext as _\n'), ((1519, 49, 1519, 104), 'flask.current_app.config.get', 'current_app.config.get', ({(1519, 72, 1519, 103): '"""EMAIL_ACCOUNTS_RECEIVE_ERRORS"""'}, {}), "('EMAIL_ACCOUNTS_RECEIVE_ERRORS')", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1589, 30, 1590, 58), 'flask.url_for', 'url_for', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1660, 43, 1660, 71), 'flask_babelex.gettext', '_', ({(1660, 45, 1660, 70): '"""Página não encontrada"""'}, {}), "('Página não encontrada')", True, 'from flask_babelex import gettext as _\n'), ((133, 24, 133, 69), 'flask.current_app.config.get', 'current_app.config.get', ({(133, 47, 133, 64): '"""SCIELO_ORG_URIS"""', (133, 66, 133, 68): '{}'}, {}), "('SCIELO_ORG_URIS', {})", False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((385, 28, 386, 56), 'flask.url_for', 'url_for', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((390, 20, 390, 53), 'webapp.controllers.get_issue_by_pid', 'controllers.get_issue_by_pid', ({(390, 49, 390, 52): 'pid'}, {}), '(pid)', False, 'from webapp import controllers\n'), ((686, 20, 686, 34), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((1165, 14, 1166, 17), 'flask_babelex.gettext', '_', ({(1165, 16, 1165, 57): '"""Não existe \'{}\'. No seu lugar use \'{}\'"""'}, {}), '("Não existe \'{}\'. No seu lugar use \'{}\'")', True, 'from flask_babelex import gettext as _\n'), ((1186, 23, 1186, 46), 'flask_babelex.gettext', '_', ({(1186, 25, 1186, 45): '"""Resumo inexistente"""'}, {}), "('Resumo inexistente')", True, 'from flask_babelex import gettext as _\n'), ((1238, 23, 1238, 75), 'flask_babelex.gettext', '_', ({(1238, 25, 1238, 74): '"""HTML do Artigo não encontrado ou indisponível"""'}, {}), "('HTML do Artigo não encontrado ou indisponível')", True, 'from flask_babelex import gettext as _\n'), ((1240, 23, 1240, 43), 'flask_babelex.gettext', '_', ({(1240, 25, 1240, 42): '"""Erro inesperado"""'}, {}), "('Erro inesperado')", True, 'from flask_babelex import gettext as _\n'), ((1246, 23, 1246, 55), 'webapp.config.lang_names.display_original_lang_name', 'display_original_lang_name', ({(1246, 50, 1246, 54): 'lang'}, {}), '(lang)', False, 'from webapp.config.lang_names import display_original_lang_name\n'), ((1247, 23, 1252, 24), 'flask.url_for', 'url_for', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((1295, 23, 1295, 57), 'flask_babelex.gettext', '_', ({(1295, 25, 1295, 56): '"""PDF do Artigo não encontrado"""'}, {}), "('PDF do Artigo não encontrado')", True, 'from flask_babelex import gettext as _\n'), ((1317, 19, 1317, 46), 'flask_babelex.gettext', '_', ({(1317, 21, 1317, 45): '"""Formato não suportado"""'}, {}), "('Formato não suportado')", True, 'from flask_babelex import gettext as _\n'), ((380, 27, 380, 58), 'flask_babelex.gettext', '_', ({(380, 29, 380, 57): '"""Periódico não encontrado"""'}, {}), "('Periódico não encontrado')", True, 'from flask_babelex import gettext as _\n'), ((406, 16, 409, 52), 'flask.url_for', 'url_for', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((414, 22, 414, 60), 'webapp.controllers.get_article_by_pid_v2', 'controllers.get_article_by_pid_v2', ({(414, 56, 414, 59): 'pid'}, {}), '(pid)', False, 'from webapp import controllers\n'), ((665, 23, 665, 101), 'flask_babelex.gettext', '_', ({(665, 25, 665, 100): '"""Parámetro "filter" é inválido, deve ser "areas", "wos" ou "publisher"."""'}, {}), '(\'Parámetro "filter" é inválido, deve ser "areas", "wos" ou "publisher".\')', True, 'from flask_babelex import gettext as _\n'), ((383, 47, 383, 74), 'flask_babelex.gettext', '_', ({(383, 49, 383, 73): 'journal.unpublish_reason'}, {}), '(journal.unpublish_reason)', True, 'from flask_babelex import gettext as _\n'), ((393, 27, 393, 55), 'flask_babelex.gettext', '_', ({(393, 29, 393, 54): '"""Número não encontrado"""'}, {}), "('Número não encontrado')", True, 'from flask_babelex import gettext as _\n'), ((403, 20, 403, 60), 'flask.url_for', 'url_for', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((424, 28, 428, 46), 'flask.url_for', 'url_for', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((433, 22, 433, 58), 'webapp.controllers.get_journal_by_issn', 'controllers.get_journal_by_issn', ({(433, 54, 433, 57): 'pid'}, {}), '(pid)', False, 'from webapp import controllers\n'), ((396, 45, 396, 70), 'flask_babelex.gettext', '_', ({(396, 47, 396, 69): 'issue.unpublish_reason'}, {}), '(issue.unpublish_reason)', True, 'from flask_babelex import gettext as _\n'), ((399, 47, 399, 80), 'flask_babelex.gettext', '_', ({(399, 49, 399, 79): 'issue.journal.unpublish_reason'}, {}), '(issue.journal.unpublish_reason)', True, 'from flask_babelex import gettext as _\n'), ((416, 27, 416, 54), 'flask_babelex.gettext', '_', ({(416, 29, 416, 53): '"""Artigo não encontrado"""'}, {}), "('Artigo não encontrado')", True, 'from flask_babelex import gettext as _\n'), ((441, 28, 442, 64), 'flask.url_for', 'url_for', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((446, 22, 446, 60), 'webapp.controllers.get_article_by_pid_v2', 'controllers.get_article_by_pid_v2', ({(446, 56, 446, 59): 'pid'}, {}), '(pid)', False, 'from webapp import controllers\n'), ((436, 27, 436, 58), 'flask_babelex.gettext', '_', ({(436, 29, 436, 57): '"""Periódico não encontrado"""'}, {}), "('Periódico não encontrado')", True, 'from flask_babelex import gettext as _\n'), ((451, 16, 456, 17), 'flask.url_for', 'url_for', (), '', False, 'from flask import render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response\n'), ((461, 23, 461, 95), 'flask_babelex.gettext', '_', ({(461, 25, 461, 94): "(u'Requsição inválida ao tentar acessar o artigo com pid: %s' % pid)"}, {}), "(u'Requsição inválida ao tentar acessar o artigo com pid: %s' % pid)", True, 'from flask_babelex import gettext as _\n'), ((439, 47, 439, 74), 'flask_babelex.gettext', '_', ({(439, 49, 439, 73): 'journal.unpublish_reason'}, {}), '(journal.unpublish_reason)', True, 'from flask_babelex import gettext as _\n'), ((448, 27, 448, 54), 'flask_babelex.gettext', '_', ({(448, 29, 448, 53): '"""Artigo não encontrado"""'}, {}), "('Artigo não encontrado')", True, 'from flask_babelex import gettext as _\n')] |
CodeXfull/Pandas | create_read_write_1/Writing/to_csv.py | 08b0adc28eedba47f6eb8303ba6a36a37ababb92 | """
Converter um DataFrame para CSV
"""
import pandas as pd
dataset = pd.DataFrame({'Frutas': ["Abacaxi", "Mamão"],
"Nomes": ["Éverton", "Márcia"]},
index=["Linha 1", "Linha 2"])
dataset.to_csv("dataset.csv") | [((6, 10, 8, 50), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n')] |
13rilliant/Python-CMS | venv/Lib/site-packages/pygsheets/client.py | 56c4f3f1cbdd81020aa690ab92d0e26d042458c1 | # -*- coding: utf-8 -*-.
import re
import warnings
import os
import logging
from pygsheets.drive import DriveAPIWrapper
from pygsheets.sheet import SheetAPIWrapper
from pygsheets.spreadsheet import Spreadsheet
from pygsheets.exceptions import SpreadsheetNotFound, NoValidUrlKeyFound
from pygsheets.custom_types import ValueRenderOption, DateTimeRenderOption
from google_auth_httplib2 import AuthorizedHttp
GOOGLE_SHEET_CELL_UPDATES_LIMIT = 50000
_url_key_re_v1 = re.compile(r'key=([^&#]+)')
_url_key_re_v2 = re.compile(r"/spreadsheets/d/([a-zA-Z0-9-_]+)")
_email_patttern = re.compile(r"\"?([-a-zA-Z0-9.`?{}]+@[-a-zA-Z0-9.]+\.\w+)\"?")
# _domain_pattern = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
_deprecated_keyword_mapping = {
'parent_id': 'folder',
}
class Client(object):
"""Create or access Google spreadsheets.
Exposes members to create new spreadsheets or open existing ones. Use `authorize` to instantiate an instance of this
class.
>>> import pygsheets
>>> c = pygsheets.authorize()
The sheet API service object is stored in the sheet property and the drive API service object in the drive property.
>>> c.sheet.get('<SPREADSHEET ID>')
>>> c.drive.delete('<FILE ID>')
:param credentials: The credentials object returned by google-auth or google-auth-oauthlib.
:param retries: (Optional) Number of times to retry a connection before raising a TimeOut error.
Default: 3
:param http: The underlying HTTP object to use to make requests. If not specified, a
:class:`httplib2.Http` instance will be constructed.
"""
spreadsheet_cls = Spreadsheet
def __init__(self, credentials, retries=3, http=None):
self.oauth = credentials
self.logger = logging.getLogger(__name__)
http = AuthorizedHttp(credentials, http=http)
data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
self.sheet = SheetAPIWrapper(http, data_path, retries=retries)
self.drive = DriveAPIWrapper(http, data_path)
@property
def teamDriveId(self):
""" Enable team drive support
Deprecated: use client.drive.enable_team_drive(team_drive_id=?)
"""
return self.drive.team_drive_id
@teamDriveId.setter
def teamDriveId(self, value):
warnings.warn("Depricated please use drive.enable_team_drive")
self.drive.enable_team_drive(value)
def spreadsheet_ids(self, query=None):
"""Get a list of all spreadsheet ids present in the Google Drive or TeamDrive accessed."""
return [x['id'] for x in self.drive.spreadsheet_metadata(query)]
def spreadsheet_titles(self, query=None):
"""Get a list of all spreadsheet titles present in the Google Drive or TeamDrive accessed."""
return [x['name'] for x in self.drive.spreadsheet_metadata(query)]
def create(self, title, template=None, folder=None, **kwargs):
"""Create a new spreadsheet.
The title will always be set to the given value (even overwriting the templates title). The template
can either be a `spreadsheet resource <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#resource-spreadsheet>`_
or an instance of :class:`~pygsheets.Spreadsheet`. In both cases undefined values will be ignored.
:param title: Title of the new spreadsheet.
:param template: A template to create the new spreadsheet from.
:param folder: The Id of the folder this sheet will be stored in.
:param kwargs: Standard parameters (see reference for details).
:return: :class:`~pygsheets.Spreadsheet`
"""
result = self.sheet.create(title, template=template, **kwargs)
if folder:
self.drive.move_file(result['spreadsheetId'],
old_folder=self.drive.spreadsheet_metadata(query="name = '" + title + "'")[0]['parents'][0],
new_folder=folder)
return self.spreadsheet_cls(self, jsonsheet=result)
def open(self, title):
"""Open a spreadsheet by title.
In a case where there are several sheets with the same title, the first one found is returned.
>>> import pygsheets
>>> c = pygsheets.authorize()
>>> c.open('TestSheet')
:param title: A title of a spreadsheet.
:returns: :class:`~pygsheets.Spreadsheet`
:raises pygsheets.SpreadsheetNotFound: No spreadsheet with the given title was found.
"""
try:
spreadsheet = list(filter(lambda x: x['name'] == title, self.drive.spreadsheet_metadata()))[0]
return self.open_by_key(spreadsheet['id'])
except (KeyError, IndexError):
raise SpreadsheetNotFound('Could not find a spreadsheet with title %s.' % title)
def open_by_key(self, key):
"""Open a spreadsheet by key.
>>> import pygsheets
>>> c = pygsheets.authorize()
>>> c.open_by_key('0BmgG6nO_6dprdS1MN3d3MkdPa142WFRrdnRRUWl1UFE')
:param key: The key of a spreadsheet. (can be found in the sheet URL)
:returns: :class:`~pygsheets.Spreadsheet`
:raises pygsheets.SpreadsheetNotFound: The given spreadsheet ID was not found.
"""
response = self.sheet.get(key,
fields='properties,sheets/properties,spreadsheetId,namedRanges',
includeGridData=False)
return self.spreadsheet_cls(self, response)
def open_by_url(self, url):
"""Open a spreadsheet by URL.
>>> import pygsheets
>>> c = pygsheets.authorize()
>>> c.open_by_url('https://docs.google.com/spreadsheet/ccc?key=0Bm...FE&hl')
:param url: URL of a spreadsheet as it appears in a browser.
:returns: :class:`~pygsheets.Spreadsheet`
:raises pygsheets.SpreadsheetNotFound: No spreadsheet was found with the given URL.
"""
m1 = _url_key_re_v1.search(url)
if m1:
return self.open_by_key(m1.group(1))
else:
m2 = _url_key_re_v2.search(url)
if m2:
return self.open_by_key(m2.group(1))
else:
raise NoValidUrlKeyFound
def open_all(self, query=''):
"""Opens all available spreadsheets.
Result can be filtered when specifying the query parameter. On the details on how to form the query:
`Reference <https://developers.google.com/drive/v3/web/search-parameters>`_
:param query: (Optional) Can be used to filter the returned metadata.
:returns: A list of :class:`~pygsheets.Spreadsheet`.
"""
return [self.open_by_key(key) for key in self.spreadsheet_ids(query=query)]
def open_as_json(self, key):
"""Return a json representation of the spreadsheet.
See `Reference <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet>`__ for details.
"""
return self.sheet.get(key, fields='properties,sheets/properties,sheets/protectedRanges,'
'spreadsheetId,namedRanges',
includeGridData=False)
def get_range(self, spreadsheet_id,
value_range,
major_dimension='ROWS',
value_render_option=ValueRenderOption.FORMATTED_VALUE,
date_time_render_option=DateTimeRenderOption.SERIAL_NUMBER):
"""Returns a range of values from a spreadsheet. The caller must specify the spreadsheet ID and a range.
Reference: `request <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/get>`__
:param spreadsheet_id: The ID of the spreadsheet to retrieve data from.
:param value_range: The A1 notation of the values to retrieve.
:param major_dimension: The major dimension that results should use.
For example, if the spreadsheet data is: A1=1,B1=2,A2=3,B2=4, then
requesting range=A1:B2,majorDimension=ROWS will return [[1,2],[3,4]],
whereas requesting range=A1:B2,majorDimension=COLUMNS will return
[[1,3],[2,4]].
:param value_render_option: How values should be represented in the output. The default
render option is `ValueRenderOption.FORMATTED_VALUE`.
:param date_time_render_option: How dates, times, and durations should be represented in the output.
This is ignored if `valueRenderOption` is `FORMATTED_VALUE`. The default
dateTime render option is [`DateTimeRenderOption.SERIAL_NUMBER`].
:return: An array of arrays with the values fetched. Returns an empty array if no
values were fetched. Values are dynamically typed as int, float or string.
"""
result = self.sheet.values_get(spreadsheet_id, value_range, major_dimension, value_render_option,
date_time_render_option)
try:
return result['values']
except KeyError:
return [['']] | [((18, 17, 18, 44), 're.compile', 're.compile', ({(18, 28, 18, 43): '"""key=([^&#]+)"""'}, {}), "('key=([^&#]+)')", False, 'import re\n'), ((19, 17, 19, 64), 're.compile', 're.compile', ({(19, 28, 19, 63): '"""/spreadsheets/d/([a-zA-Z0-9-_]+)"""'}, {}), "('/spreadsheets/d/([a-zA-Z0-9-_]+)')", False, 'import re\n'), ((20, 18, 20, 79), 're.compile', 're.compile', ({(20, 29, 20, 78): '"""\\\\"?([-a-zA-Z0-9.`?{}]+@[-a-zA-Z0-9.]+\\\\.\\\\w+)\\\\"?"""'}, {}), '(\'\\\\"?([-a-zA-Z0-9.`?{}]+@[-a-zA-Z0-9.]+\\\\.\\\\w+)\\\\"?\')', False, 'import re\n'), ((53, 22, 53, 49), 'logging.getLogger', 'logging.getLogger', ({(53, 40, 53, 48): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((55, 15, 55, 53), 'google_auth_httplib2.AuthorizedHttp', 'AuthorizedHttp', (), '', False, 'from google_auth_httplib2 import AuthorizedHttp\n'), ((58, 21, 58, 70), 'pygsheets.sheet.SheetAPIWrapper', 'SheetAPIWrapper', (), '', False, 'from pygsheets.sheet import SheetAPIWrapper\n'), ((59, 21, 59, 53), 'pygsheets.drive.DriveAPIWrapper', 'DriveAPIWrapper', ({(59, 37, 59, 41): 'http', (59, 43, 59, 52): 'data_path'}, {}), '(http, data_path)', False, 'from pygsheets.drive import DriveAPIWrapper\n'), ((71, 8, 71, 71), 'warnings.warn', 'warnings.warn', ({(71, 22, 71, 70): '"""Depricated please use drive.enable_team_drive"""'}, {}), "('Depricated please use drive.enable_team_drive')", False, 'import warnings\n'), ((56, 49, 56, 74), 'os.path.abspath', 'os.path.abspath', ({(56, 65, 56, 73): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((120, 18, 120, 92), 'pygsheets.exceptions.SpreadsheetNotFound', 'SpreadsheetNotFound', ({(120, 38, 120, 91): "('Could not find a spreadsheet with title %s.' % title)"}, {}), "('Could not find a spreadsheet with title %s.' % title)", False, 'from pygsheets.exceptions import SpreadsheetNotFound, NoValidUrlKeyFound\n')] |
NatalyAristova/Training_python | model/group_contact.py | e95a2b9e25238285d705a880fd94d73f173c3a31 | from sys import maxsize
class Group_contact:
def __init__(self,firstname=None, middlename=None, lastname=None, nickname=None, title=None, company=None,
address=None, home=None, mobile=None, work=None, fax=None, email=None, email2=None, email3=None, byear=None,
address2=None, phone2=None, notes=None, all_phones_from_home_page=None, id=None, all_emails_from_home_page=None):
self.firstname=firstname
self.middlename=middlename
self.lastname=lastname
self.nickname=nickname
self.title=title
self.company=company
self.address=address
self.home=home
self.mobile=mobile
self.work=work
self.fax=fax
self.email=email
self.email2 = email2
self.email3 = email3
self.byear=byear
self.address2=address2
self.phone2=phone2
self.notes=notes
self.id = id
self.all_phones_from_home_page=all_phones_from_home_page
self.all_emails_from_home_page = all_emails_from_home_page
def __repr__(self):
return "%s:%s:%s:%s:%s:%s" % (self.id, self.lastname, self.firstname, self.middlename, self.nickname, self.title)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and (self.lastname, self.firstname) == (other.lastname, other.firstname)
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize | [] |
membranepotential/mendeley-python-sdk | test/manual/documents/test_iter_documents.py | 0336f0164f4d409309e813cbd0140011b5b2ff8f | from itertools import islice
from test import get_user_session, cassette
from test.resources.documents import delete_all_documents, create_document
def test_should_iterate_through_documents():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/documents/iter_documents/iterate_through_documents.yaml'):
create_document(session, 'title 1')
create_document(session, 'title 2')
create_document(session, 'title 3')
docs = list(islice(session.documents.iter(page_size=2), 3))
assert len(docs) == 3
assert docs[0].title == 'title 1'
assert docs[1].title == 'title 2'
assert docs[2].title == 'title 3'
| [((8, 14, 8, 32), 'test.get_user_session', 'get_user_session', ({}, {}), '()', False, 'from test import get_user_session, cassette\n'), ((9, 4, 9, 26), 'test.resources.documents.delete_all_documents', 'delete_all_documents', ({}, {}), '()', False, 'from test.resources.documents import delete_all_documents, create_document\n'), ((11, 9, 11, 95), 'test.cassette', 'cassette', ({(11, 18, 11, 94): '"""fixtures/resources/documents/iter_documents/iterate_through_documents.yaml"""'}, {}), "(\n 'fixtures/resources/documents/iter_documents/iterate_through_documents.yaml'\n )", False, 'from test import get_user_session, cassette\n'), ((12, 8, 12, 43), 'test.resources.documents.create_document', 'create_document', ({(12, 24, 12, 31): 'session', (12, 33, 12, 42): '"""title 1"""'}, {}), "(session, 'title 1')", False, 'from test.resources.documents import delete_all_documents, create_document\n'), ((13, 8, 13, 43), 'test.resources.documents.create_document', 'create_document', ({(13, 24, 13, 31): 'session', (13, 33, 13, 42): '"""title 2"""'}, {}), "(session, 'title 2')", False, 'from test.resources.documents import delete_all_documents, create_document\n'), ((14, 8, 14, 43), 'test.resources.documents.create_document', 'create_document', ({(14, 24, 14, 31): 'session', (14, 33, 14, 42): '"""title 3"""'}, {}), "(session, 'title 3')", False, 'from test.resources.documents import delete_all_documents, create_document\n')] |
cbsudux/minimal-hand | demo.py | 893c252e7e818a9a96b279023ea8a78a88fb0a4d | import argparse
import cv2
import keyboard
import numpy as np
import open3d as o3d
import os
import pygame
from transforms3d.axangles import axangle2mat
import config
from hand_mesh import HandMesh
from kinematics import mpii_to_mano
from utils import OneEuroFilter, imresize
from wrappers import ModelPipeline
from utils import *
def video_to_images(vid_file, img_folder=None, return_info=False):
if img_folder is None:
img_folder = osp.join('/tmp', osp.basename(vid_file).replace('.', '_'))
os.makedirs(img_folder, exist_ok=True)
command = ['ffmpeg',
'-i', vid_file,
'-f', 'image2',
'-v', 'error',
f'{img_folder}/%06d.png']
print(f'Running \"{" ".join(command)}\"')
subprocess.call(command)
print(f'Images saved to \"{img_folder}\"')
img_shape = cv2.imread(osp.join(img_folder, '000001.png')).shape
if return_info:
return img_folder, len(os.listdir(img_folder)), img_shape
else:
return img_folder
def run(args):
############ output visualization ############
# view_mat = axangle2mat([1, 0, 0], np.pi) # align different coordinate systems
# window_size = 1080
# hand_mesh = HandMesh(config.HAND_MESH_MODEL_PATH)
# mesh = o3d.geometry.TriangleMesh()
# mesh.triangles = o3d.utility.Vector3iVector(hand_mesh.faces)
# mesh.vertices = \
# o3d.utility.Vector3dVector(np.matmul(view_mat, hand_mesh.verts.T).T * 1000)
# mesh.compute_vertex_normals()
# viewer = o3d.visualization.Visualizer()
# viewer.create_window(
# width=window_size + 1, height=window_size + 1,
# window_name='Minimal Hand - output'
# )
# viewer.add_geometry(mesh)
# view_control = viewer.get_view_control()
# cam_params = view_control.convert_to_pinhole_camera_parameters()
# extrinsic = cam_params.extrinsic.copy()
# extrinsic[0:3, 3] = 0
# cam_params.extrinsic = extrinsic
# cam_params.intrinsic.set_intrinsics(
# window_size + 1, window_size + 1, config.CAM_FX, config.CAM_FY,
# window_size // 2, window_size // 2
# )
# view_control.convert_from_pinhole_camera_parameters(cam_params)
# view_control.set_constant_z_far(1000)
# render_option = viewer.get_render_option()
# render_option.load_from_json('./render_option.json')
# viewer.update_renderer()
# ############ input visualization ############
# pygame.init()
# display = pygame.display.set_mode((window_size, window_size))
# pygame.display.set_caption('Minimal Hand - input')
# ############ misc ############
# mesh_smoother = OneEuroFilter(4.0, 0.0)
# clock = pygame.time.Clock()
############ Move all of above code to local to render ###########
video_file = args.vid_file
if not os.path.isfile(video_file):
exit(f'Input video \"{video_file}\" does not exist!')
output_path = os.path.join(args.output_folder, os.path.basename(video_file).replace('.mp4', ''))
os.makedirs(output_path, exist_ok=True)
image_folder, num_frames, img_shape = video_to_images(video_file, return_info=True)
print(f'Input video number of frames {num_frames}')
orig_height, orig_width = img_shape[:2]
# total_time = time.time()
import pdb; pdb.set_trace()
image_file_names = [
osp.join(image_folder, x)
for x in os.listdir(image_folder)
if x.endswith('.png') or x.endswith('.jpg')
]
model = ModelPipeline()
for i in image_file_names:
# What do all these conditions check for?
frame_large = x
if frame_large is None:
continue
if frame_large.shape[0] > frame_large.shape[1]:
margin = int((frame_large.shape[0] - frame_large.shape[1]) / 2)
frame_large = frame_large[margin:-margin]
else:
margin = int((frame_large.shape[1] - frame_large.shape[0]) / 2)
frame_large = frame_large[:, margin:-margin]
frame_large = np.flip(frame_large, axis=1).copy() # why? Camera flip?
frame = imresize(frame_large, (128, 128)) # needed
######## Golden lines, run this here #########
_, theta_mpii = model.process(frame)
theta_mano = mpii_to_mano(theta_mpii)
######## Save theta_mano and pass as input to local ########
v = hand_mesh.set_abs_quat(theta_mano)
v *= 2 # for better visualization
v = v * 1000 + np.array([0, 0, 400])
v = mesh_smoother.process(v)
mesh.triangles = o3d.utility.Vector3iVector(hand_mesh.faces)
mesh.vertices = o3d.utility.Vector3dVector(np.matmul(view_mat, v.T).T)
mesh.paint_uniform_color(config.HAND_COLOR)
mesh.compute_triangle_normals()
mesh.compute_vertex_normals()
# for some version of open3d you may need `viewer.update_geometry(mesh)`
viewer.update_geometry()
viewer.poll_events()
display.blit(
pygame.surfarray.make_surface(
np.transpose(
imresize(frame_large, (window_size, window_size)
), (1, 0, 2))
),
(0, 0)
)
pygame.display.update()
if keyboard.is_pressed("esc"):
break
clock.tick(30) # What's this do? If it adds delay remove it
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--vid_file', type=str,
help='input video path or youtube link')
args = parser.parse_args()
run(args)
| [((22, 4, 22, 42), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((92, 4, 92, 43), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((100, 16, 100, 31), 'pdb.set_trace', 'pdb.set_trace', ({}, {}), '()', False, 'import pdb\n'), ((108, 12, 108, 27), 'wrappers.ModelPipeline', 'ModelPipeline', ({}, {}), '()', False, 'from wrappers import ModelPipeline\n'), ((162, 13, 162, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((88, 11, 88, 37), 'os.path.isfile', 'os.path.isfile', ({(88, 26, 88, 36): 'video_file'}, {}), '(video_file)', False, 'import os\n'), ((123, 16, 123, 49), 'utils.imresize', 'imresize', ({(123, 25, 123, 36): 'frame_large', (123, 38, 123, 48): '(128, 128)'}, {}), '(frame_large, (128, 128))', False, 'from utils import OneEuroFilter, imresize\n'), ((127, 21, 127, 45), 'kinematics.mpii_to_mano', 'mpii_to_mano', ({(127, 34, 127, 44): 'theta_mpii'}, {}), '(theta_mpii)', False, 'from kinematics import mpii_to_mano\n'), ((135, 25, 135, 68), 'open3d.utility.Vector3iVector', 'o3d.utility.Vector3iVector', ({(135, 52, 135, 67): 'hand_mesh.faces'}, {}), '(hand_mesh.faces)', True, 'import open3d as o3d\n'), ((153, 8, 153, 31), 'pygame.display.update', 'pygame.display.update', ({}, {}), '()', False, 'import pygame\n'), ((155, 11, 155, 37), 'keyboard.is_pressed', 'keyboard.is_pressed', ({(155, 31, 155, 36): '"""esc"""'}, {}), "('esc')", False, 'import keyboard\n'), ((104, 21, 104, 45), 'os.listdir', 'os.listdir', ({(104, 32, 104, 44): 'image_folder'}, {}), '(image_folder)', False, 'import os\n'), ((132, 23, 132, 44), 'numpy.array', 'np.array', ({(132, 32, 132, 43): '[0, 0, 400]'}, {}), '([0, 0, 400])', True, 'import numpy as np\n'), ((36, 31, 36, 53), 'os.listdir', 'os.listdir', ({(36, 42, 36, 52): 'img_folder'}, {}), '(img_folder)', False, 'import os\n'), ((91, 51, 91, 79), 'os.path.basename', 'os.path.basename', ({(91, 68, 91, 78): 'video_file'}, {}), '(video_file)', False, 'import os\n'), ((122, 22, 122, 50), 'numpy.flip', 'np.flip', (), '', True, 'import numpy as np\n'), ((136, 51, 136, 75), 'numpy.matmul', 'np.matmul', ({(136, 61, 136, 69): 'view_mat', (136, 71, 136, 74): 'v.T'}, {}), '(view_mat, v.T)', True, 'import numpy as np\n'), ((148, 16, 149, 13), 'utils.imresize', 'imresize', ({(148, 25, 148, 36): 'frame_large', (148, 38, 148, 64): '(window_size, window_size)'}, {}), '(frame_large, (window_size, window_size))', False, 'from utils import OneEuroFilter, imresize\n')] |
incuna/incuna-groups | test_project/settings.py | 148c181faf66fe73792cb2c5bbf5500ba61aa22d | import os
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEBUG = True
ALLOWED_HOSTS = []
ROOT_URLCONF = 'groups.tests.urls'
STATIC_URL = '/static/'
SECRET_KEY = 'krc34ji^-fd-=+r6e%p!0u0k9h$9!q*_#l=6)74h#o(jrxsx4p'
PASSWORD_HASHERS = ('django.contrib.auth.hashers.MD5PasswordHasher',)
DATABASES = {
'default': dj_database_url.config(default='postgres://localhost/groups')
}
DEFAULT_FILE_STORAGE = 'inmemorystorage.InMemoryStorage'
INSTALLED_APPS = (
'groups',
'crispy_forms',
'pagination',
'polymorphic',
# Put contenttypes before auth to work around test issue.
# See: https://code.djangoproject.com/ticket/10827#comment:12
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'groups', 'tests', 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
CRISPY_TEMPLATE_PACK = 'bootstrap3'
TEST_RUNNER = 'test_project.test_runner.Runner'
| [((17, 15, 17, 76), 'dj_database_url.config', 'dj_database_url.config', (), '', False, 'import dj_database_url\n'), ((6, 43, 6, 68), 'os.path.abspath', 'os.path.abspath', ({(6, 59, 6, 67): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((46, 12, 46, 66), 'os.path.join', 'os.path.join', ({(46, 25, 46, 33): 'BASE_DIR', (46, 35, 46, 43): '"""groups"""', (46, 45, 46, 52): '"""tests"""', (46, 54, 46, 65): '"""templates"""'}, {}), "(BASE_DIR, 'groups', 'tests', 'templates')", False, 'import os\n')] |
cclauss/akismet | tests/test_akismet.py | 7b65bc163d6947a3013d01bf9accf1bc6c0781ca | import datetime
import os
import sys
import unittest
from unittest import mock
import akismet
class AkismetTests(unittest.TestCase):
api_key = os.getenv("TEST_AKISMET_API_KEY")
blog_url = os.getenv("TEST_AKISMET_BLOG_URL")
api_key_env_var = "PYTHON_AKISMET_API_KEY"
blog_url_env_var = "PYTHON_AKISMET_BLOG_URL"
def setUp(self):
self.api = akismet.Akismet(key=self.api_key, blog_url=self.blog_url)
class AkismetConfigurationTests(AkismetTests):
"""
Tests configuration of the Akismet class.
"""
def test_config_from_args(self):
"""
Configuring via explicit arguments succeeds.
"""
api = akismet.Akismet(key=self.api_key, blog_url=self.blog_url)
self.assertEqual(self.api_key, api.api_key)
self.assertEqual(self.blog_url, api.blog_url)
def test_bad_config_args(self):
"""
Configuring with bad arguments fails.
"""
with self.assertRaises(akismet.APIKeyError):
akismet.Akismet(key="invalid", blog_url="http://invalid")
def test_config_from_env(self):
"""
Configuring via environment variables succeeds.
"""
try:
os.environ[self.api_key_env_var] = self.api_key
os.environ[self.blog_url_env_var] = self.blog_url
api = akismet.Akismet(key=None, blog_url=None)
self.assertEqual(self.api_key, api.api_key)
self.assertEqual(self.blog_url, api.blog_url)
api = akismet.Akismet()
self.assertEqual(self.api_key, api.api_key)
self.assertEqual(self.blog_url, api.blog_url)
finally:
os.environ[self.api_key_env_var] = ""
os.environ[self.blog_url_env_var] = ""
def test_bad_config_env(self):
"""
Configuring with bad environment variables fails.
"""
try:
os.environ[self.api_key_env_var] = "invalid"
os.environ[self.blog_url_env_var] = "http://invalid"
with self.assertRaises(akismet.APIKeyError):
akismet.Akismet()
finally:
os.environ[self.api_key_env_var] = ""
os.environ[self.blog_url_env_var] = ""
def test_bad_url(self):
"""
Configuring with a bad URL fails.
"""
bad_urls = (
"example.com",
"ftp://example.com",
"www.example.com",
"http//example.com",
"https//example.com",
)
for url in bad_urls:
with self.assertRaises(akismet.ConfigurationError):
akismet.Akismet(key=self.api_key, blog_url=url)
def test_missing_config(self):
"""
Instantiating without any configuration fails.
"""
with self.assertRaises(akismet.ConfigurationError):
akismet.Akismet(key=None, blog_url=None)
with self.assertRaises(akismet.ConfigurationError):
akismet.Akismet()
def test_user_agent(self):
"""
The Akismet class creates the correct user-agent string.
"""
api = akismet.Akismet(key=self.api_key, blog_url=self.blog_url)
expected_agent = "Python/{} | akismet.py/{}".format(
"{}.{}".format(*sys.version_info[:2]), akismet.__version__
)
self.assertEqual(expected_agent, api.user_agent_header["User-Agent"])
class AkismetAPITests(AkismetTests):
"""
Tests implementation of the Akismet API.
"""
base_kwargs = {
"user_ip": "127.0.0.1",
"user_agent": "Mozilla",
# Always send this when testing; Akismet recognizes it as a
# test query and does not train/learn from it.
"is_test": 1,
}
def test_verify_key_valid(self):
"""
The verify_key operation succeeds with a valid key and URL.
"""
self.assertTrue(akismet.Akismet.verify_key(self.api_key, self.blog_url))
def test_verify_key_invalid(self):
"""
The verify_key operation fails with an invalid key and URL.
"""
self.assertFalse(akismet.Akismet.verify_key("invalid", "http://invalid"))
def test_comment_check_spam(self):
"""
The comment_check method correctly identifies spam.
"""
check_kwargs = {
# Akismet guarantees this will be classified spam.
"comment_author": "viagra-test-123",
**self.base_kwargs,
}
self.assertTrue(self.api.comment_check(**check_kwargs))
def test_comment_check_not_spam(self):
"""
The comment_check method correctly identifies non-spam.
"""
check_kwargs = {
# Akismet guarantees this will not be classified spam.
"user_role": "administrator",
**self.base_kwargs,
}
self.assertFalse(self.api.comment_check(**check_kwargs))
def test_submit_spam(self):
"""
The submit_spam method succeeds.
"""
spam_kwargs = {
"comment_type": "comment",
"comment_author": "viagra-test-123",
"comment_content": "viagra-test-123",
**self.base_kwargs,
}
self.assertTrue(self.api.submit_spam(**spam_kwargs))
def test_submit_ham(self):
"""
The submit_ham method succeeds.
"""
ham_kwargs = {
"comment_type": "comment",
"comment_author": "Legitimate Author",
"comment_content": "This is a legitimate comment.",
"user_role": "administrator",
**self.base_kwargs,
}
self.assertTrue(self.api.submit_ham(**ham_kwargs))
def test_unexpected_verify_key_response(self):
"""
Unexpected verify_key API responses are correctly handled.
"""
post_mock = mock.MagicMock()
with mock.patch("requests.post", post_mock):
with self.assertRaises(akismet.ProtocolError):
akismet.Akismet.verify_key(self.api_key, self.blog_url)
def test_unexpected_comment_check_response(self):
"""
Unexpected comment_check API responses are correctly handled.
"""
post_mock = mock.MagicMock()
with mock.patch("requests.post", post_mock):
with self.assertRaises(akismet.ProtocolError):
check_kwargs = {"comment_author": "viagra-test-123", **self.base_kwargs}
self.api.comment_check(**check_kwargs)
def test_unexpected_submit_spam_response(self):
"""
Unexpected submit_spam API responses are correctly handled.
"""
post_mock = mock.MagicMock()
with mock.patch("requests.post", post_mock):
with self.assertRaises(akismet.ProtocolError):
spam_kwargs = {
"comment_type": "comment",
"comment_author": "viagra-test-123",
"comment_content": "viagra-test-123",
**self.base_kwargs,
}
self.api.submit_spam(**spam_kwargs)
def test_unexpected_submit_ham_response(self):
"""
Unexpected submit_ham API responses are correctly handled.
"""
post_mock = mock.MagicMock()
with mock.patch("requests.post", post_mock):
with self.assertRaises(akismet.ProtocolError):
ham_kwargs = {
"comment_type": "comment",
"comment_author": "Legitimate Author",
"comment_content": "This is a legitimate comment.",
"user_role": "administrator",
**self.base_kwargs,
}
self.api.submit_ham(**ham_kwargs)
class AkismetRequestTests(AkismetTests):
"""
Tests the requests constructed by the Akismet class.
"""
def _get_mock(self, text):
"""
Create a mock for requests.post() returning expected text.
"""
post_mock = mock.MagicMock()
post_mock.return_value.text = text
return post_mock
def _mock_request(self, method, endpoint, text, method_kwargs):
"""
Issue a mocked request and verify requests.post() was called
with the correct arguments.
"""
method_kwargs.update(user_ip="127.0.0.1", user_agent="Mozilla", is_test=1)
expected_kwargs = {"blog": self.blog_url, **method_kwargs}
post_mock = self._get_mock(text)
with mock.patch("requests.post", post_mock):
getattr(self.api, method)(**method_kwargs)
post_mock.assert_called_with(
endpoint.format(self.api_key),
data=expected_kwargs,
headers=akismet.Akismet.user_agent_header,
)
def test_verify_key(self):
"""
The request issued by verify_key() is correct.
"""
post_mock = self._get_mock("valid")
with mock.patch("requests.post", post_mock):
akismet.Akismet.verify_key(self.api_key, self.blog_url)
post_mock.assert_called_with(
akismet.Akismet.VERIFY_KEY_URL,
data={"key": self.api_key, "blog": self.blog_url},
headers=akismet.Akismet.user_agent_header,
)
def test_comment_check(self):
"""
The request issued by comment_check() is correct.
"""
self._mock_request(
"comment_check",
akismet.Akismet.COMMENT_CHECK_URL,
"true",
{"comment_author": "viagra-test-123"},
)
def test_submit_spam(self):
"""
The request issued by submit_spam() is correct.
"""
self._mock_request(
"submit_spam",
akismet.Akismet.SUBMIT_SPAM_URL,
akismet.Akismet.SUBMIT_SUCCESS_RESPONSE,
{"comment_content": "Bad comment", "comment_author": "viagra-test-123"},
)
def test_submit_ham(self):
"""
The request issued by submit_ham() is correct.
"""
self._mock_request(
"submit_ham",
akismet.Akismet.SUBMIT_HAM_URL,
akismet.Akismet.SUBMIT_SUCCESS_RESPONSE,
{
"comment_content": "Good comment",
"comment_author": "Legitimate commenter",
},
)
def test_full_kwargs(self):
"""
All optional Akismet arguments are correctly passed through.
"""
modified_timestamp = datetime.datetime.now()
posted_timestamp = modified_timestamp - datetime.timedelta(seconds=30)
full_kwargs = {
"referrer": "http://www.example.com/",
"permalink": "http://www.example.com/#comment123",
"comment_type": "comment",
"comment_author": "Legitimate Author",
"comment_author_email": "[email protected]",
"comment_author_url": "http://www.example.com/",
"comment_content": "This is a fine comment.",
"comment_date_gmt": posted_timestamp.isoformat(),
"comment_post_modified_gmt": modified_timestamp.isoformat(),
"blog_lang": "en_us",
"blog_charset": "utf-8",
"user_role": "administrator",
"recheck_reason": "edit",
}
self._mock_request(
"comment_check", akismet.Akismet.COMMENT_CHECK_URL, "false", full_kwargs
)
def test_unknown_kwargs(self):
"""
Unknown Akismet arguments are correctly rejected.
"""
bad_kwargs = {"bad_arg": "bad_val"}
with self.assertRaises(akismet.UnknownArgumentError):
self._mock_request(
"comment_check", akismet.Akismet.COMMENT_CHECK_URL, "false", bad_kwargs
)
| [((11, 14, 11, 47), 'os.getenv', 'os.getenv', ({(11, 24, 11, 46): '"""TEST_AKISMET_API_KEY"""'}, {}), "('TEST_AKISMET_API_KEY')", False, 'import os\n'), ((12, 15, 12, 49), 'os.getenv', 'os.getenv', ({(12, 25, 12, 48): '"""TEST_AKISMET_BLOG_URL"""'}, {}), "('TEST_AKISMET_BLOG_URL')", False, 'import os\n'), ((18, 19, 18, 76), 'akismet.Akismet', 'akismet.Akismet', (), '', False, 'import akismet\n'), ((32, 14, 32, 71), 'akismet.Akismet', 'akismet.Akismet', (), '', False, 'import akismet\n'), ((109, 14, 109, 71), 'akismet.Akismet', 'akismet.Akismet', (), '', False, 'import akismet\n'), ((200, 20, 200, 36), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((210, 20, 210, 36), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((221, 20, 221, 36), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((237, 20, 237, 36), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((261, 20, 261, 36), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((340, 29, 340, 52), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((42, 12, 42, 69), 'akismet.Akismet', 'akismet.Akismet', (), '', False, 'import akismet\n'), ((53, 18, 53, 58), 'akismet.Akismet', 'akismet.Akismet', (), '', False, 'import akismet\n'), ((57, 18, 57, 35), 'akismet.Akismet', 'akismet.Akismet', ({}, {}), '()', False, 'import akismet\n'), ((100, 12, 100, 52), 'akismet.Akismet', 'akismet.Akismet', (), '', False, 'import akismet\n'), ((102, 12, 102, 29), 'akismet.Akismet', 'akismet.Akismet', ({}, {}), '()', False, 'import akismet\n'), ((135, 24, 135, 79), 'akismet.Akismet.verify_key', 'akismet.Akismet.verify_key', ({(135, 51, 135, 63): 'self.api_key', (135, 65, 135, 78): 'self.blog_url'}, {}), '(self.api_key, self.blog_url)', False, 'import akismet\n'), ((142, 25, 142, 80), 'akismet.Akismet.verify_key', 'akismet.Akismet.verify_key', ({(142, 52, 142, 61): '"""invalid"""', (142, 63, 142, 79): '"""http://invalid"""'}, {}), "('invalid', 'http://invalid')", False, 'import akismet\n'), ((201, 13, 201, 51), 'unittest.mock.patch', 'mock.patch', ({(201, 24, 201, 39): '"""requests.post"""', (201, 41, 201, 50): 'post_mock'}, {}), "('requests.post', post_mock)", False, 'from unittest import mock\n'), ((211, 13, 211, 51), 'unittest.mock.patch', 'mock.patch', ({(211, 24, 211, 39): '"""requests.post"""', (211, 41, 211, 50): 'post_mock'}, {}), "('requests.post', post_mock)", False, 'from unittest import mock\n'), ((222, 13, 222, 51), 'unittest.mock.patch', 'mock.patch', ({(222, 24, 222, 39): '"""requests.post"""', (222, 41, 222, 50): 'post_mock'}, {}), "('requests.post', post_mock)", False, 'from unittest import mock\n'), ((238, 13, 238, 51), 'unittest.mock.patch', 'mock.patch', ({(238, 24, 238, 39): '"""requests.post"""', (238, 41, 238, 50): 'post_mock'}, {}), "('requests.post', post_mock)", False, 'from unittest import mock\n'), ((274, 13, 274, 51), 'unittest.mock.patch', 'mock.patch', ({(274, 24, 274, 39): '"""requests.post"""', (274, 41, 274, 50): 'post_mock'}, {}), "('requests.post', post_mock)", False, 'from unittest import mock\n'), ((288, 13, 288, 51), 'unittest.mock.patch', 'mock.patch', ({(288, 24, 288, 39): '"""requests.post"""', (288, 41, 288, 50): 'post_mock'}, {}), "('requests.post', post_mock)", False, 'from unittest import mock\n'), ((289, 12, 289, 67), 'akismet.Akismet.verify_key', 'akismet.Akismet.verify_key', ({(289, 39, 289, 51): 'self.api_key', (289, 53, 289, 66): 'self.blog_url'}, {}), '(self.api_key, self.blog_url)', False, 'import akismet\n'), ((341, 48, 341, 78), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((73, 16, 73, 33), 'akismet.Akismet', 'akismet.Akismet', ({}, {}), '()', False, 'import akismet\n'), ((92, 16, 92, 63), 'akismet.Akismet', 'akismet.Akismet', (), '', False, 'import akismet\n'), ((203, 16, 203, 71), 'akismet.Akismet.verify_key', 'akismet.Akismet.verify_key', ({(203, 43, 203, 55): 'self.api_key', (203, 57, 203, 70): 'self.blog_url'}, {}), '(self.api_key, self.blog_url)', False, 'import akismet\n')] |
gaurvigoyal/lifting_events_to_3d_hpe | experimenting/dataset/datamodule.py | 66d27eb7534f81a95d9f68e17cc534ef2a2c9b1c | import pytorch_lightning as pl
from torch.utils.data import DataLoader, Dataset
from .core import BaseCore
from .factory import BaseDataFactory
class DataModule(pl.LightningDataModule):
def __init__(
self,
dataset_factory: BaseDataFactory,
core: BaseCore,
aug_train_config,
aug_test_config,
batch_size: int,
num_workers: int,
train_val_split: float = 0.8,
):
super().__init__()
self.core = core
self.batch_size = batch_size
self.num_workers = num_workers
self.dataset_factory = dataset_factory
self.aug_train_config = aug_train_config
self.aug_test_config = aug_test_config
self.train_val_split = train_val_split
def prepare_data(self, *args, **kwargs):
pass
def setup(self, stage=None):
self.dataset_factory.set_dataset_core(self.core)
(
self.train_indexes,
self.val_indexes,
self.test_indexes,
) = self.dataset_factory.get_train_test_split(self.train_val_split)
self.train_dataset = self.dataset_factory.get_dataset(
self.train_indexes, self.aug_train_config
)
self.val_dataset = self.dataset_factory.get_dataset(
self.val_indexes, self.aug_test_config
)
self.test_dataset = self.dataset_factory.get_dataset(
self.test_indexes, self.aug_test_config
)
def train_dataloader(self):
return get_dataloader(self.train_dataset, self.batch_size, self.num_workers)
def val_dataloader(self):
return get_dataloader(
self.val_dataset,
self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
def test_dataloader(self):
return get_dataloader(
self.test_dataset,
self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
def test_frames_only_dataloader(self):
return get_dataloader(
self.dataset_factory.get_frame_only_dataset(
self.test_indexes, self.aug_test_config
),
self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
def get_dataloader(
dataset: Dataset, batch_size: int, num_workers: int = 12, shuffle=True
) -> DataLoader:
loader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=True,
)
return loader
| [((83, 13, 89, 5), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader, Dataset\n')] |
giulianoiorio/PeTar | sevn-interface/SEVN/resources/SEVN_walkthrough/running_folder/analysis_3_pandas.py | f6a849552b3d8e47c5e08fe90fed05bf38bc407d | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#Load file
dt=pd.read_csv("sevn_output/output_0.csv")
#Give a look to the columns
print(dt.columns)
#Consider only the final states
dt=dt.drop_duplicates(["ID","name"], keep='last')
#Load evolved file
dte=pd.read_csv("sevn_output/evolved_0.dat",sep='\s+')
#Give a look to the columns
print(dte.columns)
dte=dte.rename(columns={'#ID': 'ID','Mass_0':"Mzams_0", 'Mass_1':"Mzams_1"})
#After change
print(dte.columns)
#Join the two dataset
dt = dt.merge(dte, on=["ID","name"], how="inner", suffixes=("","_ini") )
# - on: column(s, can be a list of columns) to match during the merge of the two tables. The colum(s) has(have) to be present in both the tables
# - how: type of join to use, see documentation here and the next slide
# - suffixes: columns with the same name in the two tables (not used in on) will be renamed adding these suffixes.
#Give a look to the columns
print(dt.columns)
#Create filter indexes
idx0 = (dt.RemnantType_0==6)
idx1 = (dt.RemnantType_1==6)
idxb0 = idx0 & dt.Semimajor.notnull()
idxb1 = idx1 & dt.Semimajor.notnull()
idxm0 = idxb0 & (dt.GWtime + dt.BWorldtime <= 14000)
idxm1 = idxb1 & (dt.GWtime + dt.BWorldtime <= 14000)
#Filter and join masses
AllBH = pd.concat([dt[idx0].Mass_0,dt[idx1].Mass_1])
BoundBH = pd.concat([dt[idxb0].Mass_0,dt[idxb1].Mass_1])
MergingBH = pd.concat([dt[idxm0].Mass_0,dt[idxm1].Mass_1])
#Filter and join initial masses
AllBHzams = pd.concat([dt[idx0].Mzams_0,dt[idx1].Mzams_1])
BoundBHzams = pd.concat([dt[idxb0].Mzams_0,dt[idxb1].Mzams_1])
MergingBHzams = pd.concat([dt[idxm0].Mzams_0,dt[idxm1].Mzams_1])
#Filter and join initial semimajor axis
AllBHa = pd.concat([dt[idx0].a,dt[idx1].a])
BoundBHa = pd.concat([dt[idxb0].a,dt[idxb1].a])
MergingBHa = pd.concat([dt[idxm0].a,dt[idxm1].a])
#Plot
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.scatter(AllBHzams,AllBH,zorder=1,edgecolor="k",s=30,label="All")
plt.scatter(BoundBHzams,BoundBH,zorder=2,edgecolor="k",s=30, label="Bound")
plt.scatter(MergingBHzams,MergingBH,zorder=3,edgecolor="k",s=30, label="Merging")
plt.plot(np.linspace(0,140),np.linspace(0,140),ls="dashed",c="gray")
plt.xscale("log")
plt.yscale("log")
plt.ylabel("BH mass [M$_\odot$]",fontsize=18)
plt.xlabel("$M\mathrm{zams}$ [M$_\odot$]",fontsize=18)
plt.gca().tick_params(axis='both', which='major', labelsize=18)
plt.legend(fontsize=16)
plt.subplot(1,2,2)
plt.scatter(AllBHa,AllBH,zorder=1,edgecolor="k",s=30,label="All")
plt.scatter(BoundBHa,BoundBH,zorder=2,edgecolor="k",s=30,label="Bound")
plt.scatter(MergingBHa,MergingBH,zorder=3,edgecolor="k",s=30,label="Merging")
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Semimajor initial [R$_\odot$]",fontsize=18)
plt.ylabel("BH mass [M$_\odot$]",fontsize=18)
plt.gca().tick_params(axis='both', which='major', labelsize=18)
plt.tight_layout()
plt.savefig("analysis3.png")
plt.show()
| [((6, 3, 6, 42), 'pandas.read_csv', 'pd.read_csv', ({(6, 15, 6, 41): '"""sevn_output/output_0.csv"""'}, {}), "('sevn_output/output_0.csv')", True, 'import pandas as pd\n'), ((13, 4, 13, 54), 'pandas.read_csv', 'pd.read_csv', (), '', True, 'import pandas as pd\n'), ((39, 8, 39, 52), 'pandas.concat', 'pd.concat', ({(39, 18, 39, 51): '[dt[idx0].Mass_0, dt[idx1].Mass_1]'}, {}), '([dt[idx0].Mass_0, dt[idx1].Mass_1])', True, 'import pandas as pd\n'), ((40, 10, 40, 56), 'pandas.concat', 'pd.concat', ({(40, 20, 40, 55): '[dt[idxb0].Mass_0, dt[idxb1].Mass_1]'}, {}), '([dt[idxb0].Mass_0, dt[idxb1].Mass_1])', True, 'import pandas as pd\n'), ((41, 12, 41, 58), 'pandas.concat', 'pd.concat', ({(41, 22, 41, 57): '[dt[idxm0].Mass_0, dt[idxm1].Mass_1]'}, {}), '([dt[idxm0].Mass_0, dt[idxm1].Mass_1])', True, 'import pandas as pd\n'), ((44, 12, 44, 58), 'pandas.concat', 'pd.concat', ({(44, 22, 44, 57): '[dt[idx0].Mzams_0, dt[idx1].Mzams_1]'}, {}), '([dt[idx0].Mzams_0, dt[idx1].Mzams_1])', True, 'import pandas as pd\n'), ((45, 14, 45, 62), 'pandas.concat', 'pd.concat', ({(45, 24, 45, 61): '[dt[idxb0].Mzams_0, dt[idxb1].Mzams_1]'}, {}), '([dt[idxb0].Mzams_0, dt[idxb1].Mzams_1])', True, 'import pandas as pd\n'), ((46, 16, 46, 64), 'pandas.concat', 'pd.concat', ({(46, 26, 46, 63): '[dt[idxm0].Mzams_0, dt[idxm1].Mzams_1]'}, {}), '([dt[idxm0].Mzams_0, dt[idxm1].Mzams_1])', True, 'import pandas as pd\n'), ((49, 9, 49, 43), 'pandas.concat', 'pd.concat', ({(49, 19, 49, 42): '[dt[idx0].a, dt[idx1].a]'}, {}), '([dt[idx0].a, dt[idx1].a])', True, 'import pandas as pd\n'), ((50, 11, 50, 47), 'pandas.concat', 'pd.concat', ({(50, 21, 50, 46): '[dt[idxb0].a, dt[idxb1].a]'}, {}), '([dt[idxb0].a, dt[idxb1].a])', True, 'import pandas as pd\n'), ((51, 13, 51, 49), 'pandas.concat', 'pd.concat', ({(51, 23, 51, 48): '[dt[idxm0].a, dt[idxm1].a]'}, {}), '([dt[idxm0].a, dt[idxm1].a])', True, 'import pandas as pd\n'), ((55, 0, 55, 26), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((57, 0, 57, 18), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(57, 12, 57, 13): '(1)', (57, 14, 57, 15): '(2)', (57, 16, 57, 17): '(1)'}, {}), '(1, 2, 1)', True, 'import matplotlib.pyplot as plt\n'), ((58, 0, 58, 68), 'matplotlib.pyplot.scatter', 'plt.scatter', (), '', True, 'import matplotlib.pyplot as plt\n'), ((59, 0, 59, 75), 'matplotlib.pyplot.scatter', 'plt.scatter', (), '', True, 'import matplotlib.pyplot as plt\n'), ((60, 0, 60, 81), 'matplotlib.pyplot.scatter', 'plt.scatter', (), '', True, 'import matplotlib.pyplot as plt\n'), ((62, 0, 62, 17), 'matplotlib.pyplot.xscale', 'plt.xscale', ({(62, 11, 62, 16): '"""log"""'}, {}), "('log')", True, 'import matplotlib.pyplot as plt\n'), ((63, 0, 63, 17), 'matplotlib.pyplot.yscale', 'plt.yscale', ({(63, 11, 63, 16): '"""log"""'}, {}), "('log')", True, 'import matplotlib.pyplot as plt\n'), ((64, 0, 64, 45), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (), '', True, 'import matplotlib.pyplot as plt\n'), ((65, 0, 65, 55), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (), '', True, 'import matplotlib.pyplot as plt\n'), ((67, 0, 67, 23), 'matplotlib.pyplot.legend', 'plt.legend', (), '', True, 'import matplotlib.pyplot as plt\n'), ((69, 0, 69, 18), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(69, 12, 69, 13): '(1)', (69, 14, 69, 15): '(2)', (69, 16, 69, 17): '(2)'}, {}), '(1, 2, 2)', True, 'import matplotlib.pyplot as plt\n'), ((70, 0, 70, 65), 'matplotlib.pyplot.scatter', 'plt.scatter', (), '', True, 'import matplotlib.pyplot as plt\n'), ((71, 0, 71, 71), 'matplotlib.pyplot.scatter', 'plt.scatter', (), '', True, 'import matplotlib.pyplot as plt\n'), ((72, 0, 72, 77), 'matplotlib.pyplot.scatter', 'plt.scatter', (), '', True, 'import matplotlib.pyplot as plt\n'), ((73, 0, 73, 17), 'matplotlib.pyplot.xscale', 'plt.xscale', ({(73, 11, 73, 16): '"""log"""'}, {}), "('log')", True, 'import matplotlib.pyplot as plt\n'), ((74, 0, 74, 17), 'matplotlib.pyplot.yscale', 'plt.yscale', ({(74, 11, 74, 16): '"""log"""'}, {}), "('log')", True, 'import matplotlib.pyplot as plt\n'), ((75, 0, 75, 56), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (), '', True, 'import matplotlib.pyplot as plt\n'), ((76, 0, 76, 45), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (), '', True, 'import matplotlib.pyplot as plt\n'), ((81, 0, 81, 18), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((82, 0, 82, 28), 'matplotlib.pyplot.savefig', 'plt.savefig', ({(82, 12, 82, 27): '"""analysis3.png"""'}, {}), "('analysis3.png')", True, 'import matplotlib.pyplot as plt\n'), ((83, 0, 83, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((61, 9, 61, 27), 'numpy.linspace', 'np.linspace', ({(61, 21, 61, 22): '(0)', (61, 23, 61, 26): '(140)'}, {}), '(0, 140)', True, 'import numpy as np\n'), ((61, 28, 61, 46), 'numpy.linspace', 'np.linspace', ({(61, 40, 61, 41): '(0)', (61, 42, 61, 45): '(140)'}, {}), '(0, 140)', True, 'import numpy as np\n'), ((66, 0, 66, 9), 'matplotlib.pyplot.gca', 'plt.gca', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((77, 0, 77, 9), 'matplotlib.pyplot.gca', 'plt.gca', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n')] |
VladimirLazor/Lohika | apps/tg_bot/apps.py | a36407feeb2e3ade4f8c689030f343d88ff47a92 | from django.apps import AppConfig
class TgBotConfig(AppConfig):
name = 'apps.tg_bot'
| [] |
rikeshtailor/Office365-REST-Python-Client | office365/sharepoint/portal/group_site_manager.py | ca7bfa1b22212137bb4e984c0457632163e89a43 | from office365.runtime.client_object import ClientObject
from office365.runtime.client_result import ClientResult
from office365.runtime.http.http_method import HttpMethod
from office365.runtime.queries.service_operation_query import ServiceOperationQuery
from office365.runtime.resource_path import ResourcePath
from office365.sharepoint.portal.group_creation_params import GroupCreationInformation
from office365.sharepoint.portal.group_site_info import GroupSiteInfo
class GroupSiteManager(ClientObject):
def __init__(self, context):
super(GroupSiteManager, self).__init__(context, ResourcePath("GroupSiteManager"), None)
def create_group_ex(self, display_name, alias, is_public, optional_params=None):
"""
Create a modern site
:param str display_name:
:param str alias:
:param bool is_public:
:param office365.sharepoint.portal.group_creation_params.GroupCreationParams or None optional_params:
"""
payload = GroupCreationInformation(display_name, alias, is_public, optional_params)
result = ClientResult(self.context, GroupSiteInfo())
qry = ServiceOperationQuery(self, "CreateGroupEx", None, payload, None, result)
self.context.add_query(qry)
return result
def delete(self, site_url):
"""
Deletes a SharePoint Team site
:type site_url: str
"""
payload = {
"siteUrl": site_url
}
qry = ServiceOperationQuery(self, "Delete", None, payload)
self.context.add_query(qry)
return self
def get_status(self, group_id):
"""Get the status of a SharePoint site
:type group_id: str
"""
result = ClientResult(self.context, GroupSiteInfo())
qry = ServiceOperationQuery(self, "GetSiteStatus", None, {'groupId': group_id}, None, result)
self.context.add_query(qry)
def _construct_status_request(request):
request.method = HttpMethod.Get
request.url += "?groupId='{0}'".format(group_id)
self.context.before_execute(_construct_status_request)
return result
| [((23, 18, 23, 91), 'office365.sharepoint.portal.group_creation_params.GroupCreationInformation', 'GroupCreationInformation', ({(23, 43, 23, 55): 'display_name', (23, 57, 23, 62): 'alias', (23, 64, 23, 73): 'is_public', (23, 75, 23, 90): 'optional_params'}, {}), '(display_name, alias, is_public, optional_params)', False, 'from office365.sharepoint.portal.group_creation_params import GroupCreationInformation\n'), ((25, 14, 25, 87), 'office365.runtime.queries.service_operation_query.ServiceOperationQuery', 'ServiceOperationQuery', ({(25, 36, 25, 40): 'self', (25, 42, 25, 57): '"""CreateGroupEx"""', (25, 59, 25, 63): 'None', (25, 65, 25, 72): 'payload', (25, 74, 25, 78): 'None', (25, 80, 25, 86): 'result'}, {}), "(self, 'CreateGroupEx', None, payload, None, result)", False, 'from office365.runtime.queries.service_operation_query import ServiceOperationQuery\n'), ((38, 14, 38, 66), 'office365.runtime.queries.service_operation_query.ServiceOperationQuery', 'ServiceOperationQuery', ({(38, 36, 38, 40): 'self', (38, 42, 38, 50): '"""Delete"""', (38, 52, 38, 56): 'None', (38, 58, 38, 65): 'payload'}, {}), "(self, 'Delete', None, payload)", False, 'from office365.runtime.queries.service_operation_query import ServiceOperationQuery\n'), ((48, 14, 48, 101), 'office365.runtime.queries.service_operation_query.ServiceOperationQuery', 'ServiceOperationQuery', ({(48, 36, 48, 40): 'self', (48, 42, 48, 57): '"""GetSiteStatus"""', (48, 59, 48, 63): 'None', (48, 65, 48, 86): "{'groupId': group_id}", (48, 88, 48, 92): 'None', (48, 94, 48, 100): 'result'}, {}), "(self, 'GetSiteStatus', None, {'groupId': group_id},\n None, result)", False, 'from office365.runtime.queries.service_operation_query import ServiceOperationQuery\n'), ((12, 56, 12, 88), 'office365.runtime.resource_path.ResourcePath', 'ResourcePath', ({(12, 69, 12, 87): '"""GroupSiteManager"""'}, {}), "('GroupSiteManager')", False, 'from office365.runtime.resource_path import ResourcePath\n'), ((24, 44, 24, 59), 'office365.sharepoint.portal.group_site_info.GroupSiteInfo', 'GroupSiteInfo', ({}, {}), '()', False, 'from office365.sharepoint.portal.group_site_info import GroupSiteInfo\n'), ((47, 44, 47, 59), 'office365.sharepoint.portal.group_site_info.GroupSiteInfo', 'GroupSiteInfo', ({}, {}), '()', False, 'from office365.sharepoint.portal.group_site_info import GroupSiteInfo\n')] |
smok-serwis/cython | tests/errors/e_tuple_args_T692.py | e551a3a348888bd89d4aad809916709a634af1fb | # ticket: 692
# mode: error
def func((a, b)):
return a + b
_ERRORS = u"""
4:9: Missing argument name
5:11: undeclared name not builtin: a
5:15: undeclared name not builtin: b
"""
| [] |
Ladvien/esp32_upython_env | ble.py | 8b0feab940efd3feff16220473e1b5b27d679a56 | import bluetooth
import time
bt = bluetooth.BLE() # singleton
bt.active(True) # activate BT stack
UART_UUID = bluetooth.UUID('6E400001-B5A3-F393-E0A9-E50E24DCCA9E')
UART_TX = (bluetooth.UUID('6E400003-B5A3-F393-E0A9-E50E24DCCA9E'), bluetooth.FLAG_READ | bluetooth.FLAG_NOTIFY,)
UART_RX = (bluetooth.UUID('6E400002-B5A3-F393-E0A9-E50E24DCCA9E'), bluetooth.FLAG_WRITE,)
UART_SERVICE = (UART_UUID, (UART_TX, UART_RX,),)
SERVICES = (UART_SERVICE,)
( (tx, rx,), ) = bt.gatts_register_services(SERVICES)
bt.gap_advertise(100) | [((3, 5, 3, 20), 'bluetooth.BLE', 'bluetooth.BLE', ({}, {}), '()', False, 'import bluetooth\n'), ((5, 12, 5, 66), 'bluetooth.UUID', 'bluetooth.UUID', ({(5, 27, 5, 65): '"""6E400001-B5A3-F393-E0A9-E50E24DCCA9E"""'}, {}), "('6E400001-B5A3-F393-E0A9-E50E24DCCA9E')", False, 'import bluetooth\n'), ((6, 11, 6, 65), 'bluetooth.UUID', 'bluetooth.UUID', ({(6, 26, 6, 64): '"""6E400003-B5A3-F393-E0A9-E50E24DCCA9E"""'}, {}), "('6E400003-B5A3-F393-E0A9-E50E24DCCA9E')", False, 'import bluetooth\n'), ((7, 11, 7, 65), 'bluetooth.UUID', 'bluetooth.UUID', ({(7, 26, 7, 64): '"""6E400002-B5A3-F393-E0A9-E50E24DCCA9E"""'}, {}), "('6E400002-B5A3-F393-E0A9-E50E24DCCA9E')", False, 'import bluetooth\n')] |
luxbe/sledo | examples/custom-generator/customer.py | 26aa2b59b11ea115afc25bb407602578cb342170 | from random import randint
from sledo.generate.field_generators.base import FieldGenerator
values = ("Austria",
"Belgium",
"Bulgaria",
"Croatia",
"Cyprus",
"Czech Republic",
"Denmark",
"Estonia",
"Finland",
"France",
"Germany",
"Greece",
"Hungary",
"Ireland",
"Italy",
"Latvia",
"Lithuania",
"Luxembourg",
"Malta",
"Netherlands",
"Poland",
"Portugal",
"Romania",
"Slovakia",
"Slovenia",
"Spain",
"Sweden",
"United States",
"Japan",
"United Kingdom",
"Bangladesh",
"Argentina",
"China")
count = len(values) - 1
class CustomerAddressGenerator(FieldGenerator):
def generate(self, **_):
return values[randint(0, count)]
| [((43, 22, 43, 39), 'random.randint', 'randint', ({(43, 30, 43, 31): '(0)', (43, 33, 43, 38): 'count'}, {}), '(0, count)', False, 'from random import randint\n')] |
crawftv/CRAwTO | status-uncertain/baseline_model.py | 8c6fdb93ed963cbddfe967b041e8beb578d1e94d | #!/usr/bin/env python3
from sklearn.metrics import r2_score
import numpy as np
class BaselineModel(object):
def get_params(self):
return None
def predict(self, X):
return np.ones_like(X.index.values) * self._y_pred
def score(self, X, y):
y_true = y
y_pred = np.ones_like(y_true) * self._y_pred
return r2_score(y_true, y_pred)
class BaselineClassificationPrediction(BaselineModel):
def fit(
self, X, y,
):
self.y_pred = y.mode()
return self
def predict(
self, X,
):
return self.y_pred
class BaselineRegressionPrediction(BaselineModel):
def fit(self, X, y):
self._y_pred = y.median()
return self
| [((16, 15, 16, 39), 'sklearn.metrics.r2_score', 'r2_score', ({(16, 24, 16, 30): 'y_true', (16, 32, 16, 38): 'y_pred'}, {}), '(y_true, y_pred)', False, 'from sklearn.metrics import r2_score\n'), ((11, 15, 11, 43), 'numpy.ones_like', 'np.ones_like', ({(11, 28, 11, 42): 'X.index.values'}, {}), '(X.index.values)', True, 'import numpy as np\n'), ((15, 17, 15, 37), 'numpy.ones_like', 'np.ones_like', ({(15, 30, 15, 36): 'y_true'}, {}), '(y_true)', True, 'import numpy as np\n')] |
ecalder6/MT-HW2 | aligner/grow_diag_final.py | 1356aeb374a6e4d0b0ae819684bf314039948c56 | import optparse
import sys
def make_set(data, s, e_vocab, f_vocab, aligned, reverse):
for pair in data.split():
cur = pair.split('-')
if reverse:
e_vocab.add(int(cur[1]))
f_vocab.add(int(cur[0]))
aligned.add(int(cur[0]))
s.add((int(cur[1]), int(cur[0])))
else:
e_vocab.add(int(cur[0]))
f_vocab.add(int(cur[1]))
aligned.add(int(cur[0]))
s.add((int(cur[0]), int(cur[1])))
def grow_diag_final_and(e2f_data, f2e_data):
directions = [(-1,0),(0,-1),(1,0),(0,1),(-1,-1),(-1,1),(1,-1),(1,1)]
for (i, (e2f, f2e)) in enumerate(zip(open(e2f_data), open(f2e_data))):
e2f_set, f2e_set, e_vocab, f_vocab, e_aligned, f_aligned = set(), set(), set(), set(), set(), set()
make_set(e2f, e2f_set, e_vocab, f_vocab, e_aligned, False)
make_set(f2e, f2e_set, e_vocab, f_vocab, f_aligned, True)
alignment = e2f_set & f2e_set
union_alignment = e2f_set | f2e_set
grow_diag(e_vocab, f_vocab, e_aligned, f_aligned, alignment, union_alignment, directions)
final(e_vocab, f_vocab, e_aligned, f_aligned, alignment, union_alignment, True)
for e, f in alignment:
sys.stdout.write("%i-%i " % (e,f))
sys.stdout.write("\n")
def grow_diag(e_vocab, f_vocab, e_alignment, f_alignment, alignment, union_alignment, directions):
prev_len = 0
while prev_len != len(alignment):
prev_len = len(alignment)
for e in e_vocab:
for f in f_vocab:
if (e, f) in alignment:
for d in directions:
en, fn = e + d[0], f + d[1]
if (en not in e_alignment or fn not in f_alignment) and (en, fn) in union_alignment:
alignment.add((en, fn))
e_alignment.add(en)
f_alignment.add(fn)
def final(e_vocab, f_vocab, e_alignment, f_alignment, alignment, union_alignment, final_and):
for e in e_vocab:
for f in f_vocab:
c = False
if final_and:
c = e not in e_alignment and f not in f_alignment
else:
c = e not in e_alignment or f not in f_alignment
if c and (e, f) in union_alignment:
alignment.add((e, f))
e_alignment.add(e)
f_alignment.add(f)
def main():
optparser = optparse.OptionParser()
optparser.add_option("-d", "--data", dest="train", default="data/alignment", help="Data filename prefix (default=data)")
optparser.add_option("-e", "--e2f", dest="e2f", default="ef", help="Suffix of English to French filename (default=ef)")
optparser.add_option("-f", "--f2e", dest="f2e", default="fe", help="Suffix of French to English filename (default=fe)")
optparser.add_option("-a", "--final_and", dest="final_and", action="store_true", help="Whether to use Final-And version of the algorithm")
(opts, args) = optparser.parse_args()
e2f_data = "%s.%s" % (opts.train, opts.e2f)
f2e_data = "%s.%s" % (opts.train, opts.f2e)
grow_diag_final_and(e2f_data, f2e_data)
if __name__ == "__main__":
main()
| [((61, 16, 61, 39), 'optparse.OptionParser', 'optparse.OptionParser', ({}, {}), '()', False, 'import optparse\n'), ((31, 8, 31, 30), 'sys.stdout.write', 'sys.stdout.write', ({(31, 25, 31, 29): '"""\n"""'}, {}), "('\\n')", False, 'import sys\n'), ((30, 12, 30, 46), 'sys.stdout.write', 'sys.stdout.write', ({(30, 29, 30, 45): "('%i-%i ' % (e, f))"}, {}), "('%i-%i ' % (e, f))", False, 'import sys\n')] |
Transcranial-Solutions/t-bears | tests/test_tbears_db.py | 4712b8bb425814c444ee75f3220a31df934982aa | # -*- coding: utf-8 -*-
# Copyright 2017-2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
from tbears.block_manager.tbears_db import TbearsDB
DIRECTORY_PATH = os.path.abspath((os.path.dirname(__file__)))
DB_PATH = os.path.join(DIRECTORY_PATH, './.tbears_db')
class TestTBearsDB(unittest.TestCase):
def setUp(self):
self.TBEARS_DB = TbearsDB(TbearsDB.make_db(DB_PATH))
self.test_key = b'test_key'
self.test_value = b'test_value'
def tearDown(self):
self.TBEARS_DB.close()
shutil.rmtree(DB_PATH)
def test_put_and_get(self):
# Put and get
self.TBEARS_DB.put(self.test_key, self.test_value)
ret = self.TBEARS_DB.get(self.test_key)
self.assertEqual(ret, self.test_value)
# overwrite
overwrite_value = b'test_value_overwrite'
self.TBEARS_DB.put(self.test_key, overwrite_value)
ret = self.TBEARS_DB.get(self.test_key)
self.assertEqual(ret, overwrite_value)
# get invalid key
ret = self.TBEARS_DB.get(b'invalid_key')
self.assertIsNone(ret)
# put invalid type
self.assertRaises(TypeError, self.TBEARS_DB.put, 'test_key', self.test_value)
self.assertRaises(TypeError, self.TBEARS_DB.put, self.test_key, 123)
def test_delete(self):
self.TBEARS_DB.put(self.test_key, self.test_value)
ret = self.TBEARS_DB.get(self.test_key)
self.assertEqual(ret, self.test_value)
self.TBEARS_DB.delete(self.test_key)
ret = self.TBEARS_DB.get(self.test_key)
self.assertIsNone(ret)
def test_iterator(self):
self.TBEARS_DB.put(b'key1', b'value1')
self.TBEARS_DB.put(b'key2', b'value2')
self.TBEARS_DB.put(b'key3', b'value3')
self.TBEARS_DB.put(b'key4', b'value4')
i = 1
for _, actual_value in self.TBEARS_DB.iterator():
expected_value = ('value' + str(i)).encode()
self.assertEqual(expected_value, actual_value)
i += 1
| [((23, 10, 23, 54), 'os.path.join', 'os.path.join', ({(23, 23, 23, 37): 'DIRECTORY_PATH', (23, 39, 23, 53): '"""./.tbears_db"""'}, {}), "(DIRECTORY_PATH, './.tbears_db')", False, 'import os\n'), ((22, 34, 22, 59), 'os.path.dirname', 'os.path.dirname', ({(22, 50, 22, 58): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((35, 8, 35, 30), 'shutil.rmtree', 'shutil.rmtree', ({(35, 22, 35, 29): 'DB_PATH'}, {}), '(DB_PATH)', False, 'import shutil\n'), ((29, 34, 29, 59), 'tbears.block_manager.tbears_db.TbearsDB.make_db', 'TbearsDB.make_db', ({(29, 51, 29, 58): 'DB_PATH'}, {}), '(DB_PATH)', False, 'from tbears.block_manager.tbears_db import TbearsDB\n')] |
pierky/exabgp | src/exabgp/bgp/message/update/attribute/bgpls/link/mplsmask.py | 34be537ae5906c0830b31da1152ae63108ccf911 | # encoding: utf-8
"""
mplsmask.py
Created by Evelio Vila on 2016-12-01.
Copyright (c) 2014-2017 Exa Networks. All rights reserved.
"""
from exabgp.bgp.message.notification import Notify
from exabgp.bgp.message.update.attribute.bgpls.linkstate import LinkState
from exabgp.bgp.message.update.attribute.bgpls.linkstate import FlagLS
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |L|R| Reserved |
# +-+-+-+-+-+-+-+-+
# https://tools.ietf.org/html/rfc7752#section-3.3.2.2 MPLS Protocol Mask
#
# +------------+------------------------------------------+-----------+
# | Bit | Description | Reference |
# +------------+------------------------------------------+-----------+
# | 'L' | Label Distribution Protocol (LDP) | [RFC5036] |
# | 'R' | Extension to RSVP for LSP Tunnels | [RFC3209] |
# | | (RSVP-TE) | |
# | 'Reserved' | Reserved for future use | |
# +------------+------------------------------------------+-----------+
# RFC 7752 3.3.2.2. MPLS Protocol Mask TLV
@LinkState.register()
class MplsMask(FlagLS):
REPR = 'MPLS Protocol mask'
JSON = 'mpls-mask'
TLV = 1094
FLAGS = ['LDP', 'RSVP-TE', 'RSV', 'RSV', 'RSV', 'RSV', 'RSV', 'RSV']
LEN = 1
| [((33, 1, 33, 21), 'exabgp.bgp.message.update.attribute.bgpls.linkstate.LinkState.register', 'LinkState.register', ({}, {}), '()', False, 'from exabgp.bgp.message.update.attribute.bgpls.linkstate import LinkState\n')] |
hsorby/scaffoldmaker | tests/test_cecum.py | 5e3b4531665dbc465b53acc1662f8d9bbb9dc1e1 | import unittest
from opencmiss.utils.zinc.finiteelement import evaluateFieldNodesetRange
from opencmiss.utils.zinc.general import ChangeManager
from opencmiss.zinc.context import Context
from opencmiss.zinc.element import Element
from opencmiss.zinc.field import Field
from opencmiss.zinc.result import RESULT_OK
from scaffoldmaker.meshtypes.meshtype_3d_cecum1 import MeshType_3d_cecum1
from scaffoldmaker.utils.zinc_utils import createFaceMeshGroupExteriorOnFace
from testutils import assertAlmostEqualList
class CecumScaffoldTestCase(unittest.TestCase):
def test_cecum1(self):
"""
Test creation of cecum scaffold.
"""
parameterSetNames = MeshType_3d_cecum1.getParameterSetNames()
self.assertEqual(parameterSetNames, ["Default", "Pig 1"])
options = MeshType_3d_cecum1.getDefaultOptions("Pig 1")
self.assertEqual(30, len(options))
self.assertEqual(5, options.get("Number of segments"))
self.assertEqual(2, options.get("Number of elements around tenia coli"))
self.assertEqual(8, options.get("Number of elements along segment"))
self.assertEqual(1, options.get("Number of elements through wall"))
self.assertEqual(35.0, options.get("Start inner radius"))
self.assertEqual(3.0, options.get("Start inner radius derivative"))
self.assertEqual(38.0, options.get("End inner radius"))
self.assertEqual(3.0, options.get("End inner radius derivative"))
self.assertEqual(0.5, options.get("Corner inner radius factor"))
self.assertEqual(0.25, options.get("Haustrum inner radius factor"))
self.assertEqual(4.0, options.get("Segment length mid derivative factor"))
self.assertEqual(3, options.get("Number of tenia coli"))
self.assertEqual(5.0, options.get("Start tenia coli width"))
self.assertEqual(0.0, options.get("End tenia coli width derivative"))
self.assertEqual(2.0, options.get("Wall thickness"))
ostiumOptions = options['Ileocecal junction']
ostiumSettings = ostiumOptions.getScaffoldSettings()
self.assertEqual(1, ostiumSettings.get("Number of vessels"))
self.assertEqual(8, ostiumSettings.get("Number of elements around ostium"))
self.assertEqual(1, ostiumSettings.get("Number of elements through wall"))
self.assertEqual(20.0, ostiumSettings.get("Ostium diameter"))
self.assertEqual(10.0, ostiumSettings.get("Vessel inner diameter"))
self.assertEqual(60, options.get("Ileocecal junction angular position degrees"))
self.assertEqual(0.5, options.get("Ileocecal junction position along factor"))
context = Context("Test")
region = context.getDefaultRegion()
self.assertTrue(region.isValid())
annotationGroups = MeshType_3d_cecum1.generateBaseMesh(region, options)
self.assertEqual(2, len(annotationGroups))
fieldmodule = region.getFieldmodule()
self.assertEqual(RESULT_OK, fieldmodule.defineAllFaces())
mesh3d = fieldmodule.findMeshByDimension(3)
self.assertEqual(1492, mesh3d.getSize())
mesh2d = fieldmodule.findMeshByDimension(2)
self.assertEqual(5617, mesh2d.getSize())
mesh1d = fieldmodule.findMeshByDimension(1)
self.assertEqual(6767, mesh1d.getSize())
nodes = fieldmodule.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_NODES)
self.assertEqual(2642, nodes.getSize())
datapoints = fieldmodule.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_DATAPOINTS)
self.assertEqual(0, datapoints.getSize())
coordinates = fieldmodule.findFieldByName("coordinates").castFiniteElement()
self.assertTrue(coordinates.isValid())
minimums, maximums = evaluateFieldNodesetRange(coordinates, nodes)
assertAlmostEqualList(self, minimums, [-49.01658984455258, -46.89686037622053, -2.343256155753525], 1.0E-6)
assertAlmostEqualList(self, maximums, [42.18085849205387, 54.89264119402881, 180.0], 1.0E-6)
with ChangeManager(fieldmodule):
one = fieldmodule.createFieldConstant(1.0)
faceMeshGroup = createFaceMeshGroupExteriorOnFace(fieldmodule, Element.FACE_TYPE_XI3_1)
surfaceAreaField = fieldmodule.createFieldMeshIntegral(one, coordinates, faceMeshGroup)
surfaceAreaField.setNumbersOfPoints(4)
volumeField = fieldmodule.createFieldMeshIntegral(one, coordinates, mesh3d)
volumeField.setNumbersOfPoints(3)
fieldcache = fieldmodule.createFieldcache()
result, surfaceArea = surfaceAreaField.evaluateReal(fieldcache, 1)
self.assertEqual(result, RESULT_OK)
self.assertAlmostEqual(surfaceArea, 65960.20655074248, delta=1.0E-6)
result, volume = volumeField.evaluateReal(fieldcache, 1)
self.assertEqual(result, RESULT_OK)
self.assertAlmostEqual(volume, 127905.28250502056, delta=1.0E-6)
if __name__ == "__main__":
unittest.main()
| [((92, 4, 92, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((21, 28, 21, 69), 'scaffoldmaker.meshtypes.meshtype_3d_cecum1.MeshType_3d_cecum1.getParameterSetNames', 'MeshType_3d_cecum1.getParameterSetNames', ({}, {}), '()', False, 'from scaffoldmaker.meshtypes.meshtype_3d_cecum1 import MeshType_3d_cecum1\n'), ((23, 18, 23, 63), 'scaffoldmaker.meshtypes.meshtype_3d_cecum1.MeshType_3d_cecum1.getDefaultOptions', 'MeshType_3d_cecum1.getDefaultOptions', ({(23, 55, 23, 62): '"""Pig 1"""'}, {}), "('Pig 1')", False, 'from scaffoldmaker.meshtypes.meshtype_3d_cecum1 import MeshType_3d_cecum1\n'), ((50, 18, 50, 33), 'opencmiss.zinc.context.Context', 'Context', ({(50, 26, 50, 32): '"""Test"""'}, {}), "('Test')", False, 'from opencmiss.zinc.context import Context\n'), ((53, 27, 53, 79), 'scaffoldmaker.meshtypes.meshtype_3d_cecum1.MeshType_3d_cecum1.generateBaseMesh', 'MeshType_3d_cecum1.generateBaseMesh', ({(53, 63, 53, 69): 'region', (53, 71, 53, 78): 'options'}, {}), '(region, options)', False, 'from scaffoldmaker.meshtypes.meshtype_3d_cecum1 import MeshType_3d_cecum1\n'), ((71, 29, 71, 74), 'opencmiss.utils.zinc.finiteelement.evaluateFieldNodesetRange', 'evaluateFieldNodesetRange', ({(71, 55, 71, 66): 'coordinates', (71, 68, 71, 73): 'nodes'}, {}), '(coordinates, nodes)', False, 'from opencmiss.utils.zinc.finiteelement import evaluateFieldNodesetRange\n'), ((72, 8, 72, 115), 'testutils.assertAlmostEqualList', 'assertAlmostEqualList', ({(72, 30, 72, 34): 'self', (72, 36, 72, 44): 'minimums', (72, 46, 72, 106): '[-49.01658984455258, -46.89686037622053, -2.343256155753525]', (72, 108, 72, 114): '(1e-06)'}, {}), '(self, minimums, [-49.01658984455258, -\n 46.89686037622053, -2.343256155753525], 1e-06)', False, 'from testutils import assertAlmostEqualList\n'), ((73, 8, 73, 100), 'testutils.assertAlmostEqualList', 'assertAlmostEqualList', ({(73, 30, 73, 34): 'self', (73, 36, 73, 44): 'maximums', (73, 46, 73, 91): '[42.18085849205387, 54.89264119402881, 180.0]', (73, 93, 73, 99): '(1e-06)'}, {}), '(self, maximums, [42.18085849205387, 54.89264119402881,\n 180.0], 1e-06)', False, 'from testutils import assertAlmostEqualList\n'), ((75, 13, 75, 39), 'opencmiss.utils.zinc.general.ChangeManager', 'ChangeManager', ({(75, 27, 75, 38): 'fieldmodule'}, {}), '(fieldmodule)', False, 'from opencmiss.utils.zinc.general import ChangeManager\n'), ((77, 28, 77, 99), 'scaffoldmaker.utils.zinc_utils.createFaceMeshGroupExteriorOnFace', 'createFaceMeshGroupExteriorOnFace', ({(77, 62, 77, 73): 'fieldmodule', (77, 75, 77, 98): 'Element.FACE_TYPE_XI3_1'}, {}), '(fieldmodule, Element.FACE_TYPE_XI3_1)', False, 'from scaffoldmaker.utils.zinc_utils import createFaceMeshGroupExteriorOnFace\n')] |
jm66/pyvmomi-community-samples | samples/destroy_vm.py | 5ca4a50b767500e07b9bce9fba70240bfa963a4e | #!/usr/bin/env python
# Copyright 2015 Michael Rice <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import atexit
from pyVim import connect
from pyVmomi import vim
from tools import cli
from tools import tasks
def setup_args():
"""Adds additional ARGS to allow the vm name or uuid to
be set.
"""
parser = cli.build_arg_parser()
# using j here because -u is used for user
parser.add_argument('-j', '--uuid',
help='BIOS UUID of the VirtualMachine you want '
'to destroy.')
parser.add_argument('-n', '--name',
help='DNS Name of the VirtualMachine you want to '
'destroy.')
parser.add_argument('-i', '--ip',
help='IP Address of the VirtualMachine you want to '
'destroy')
parser.add_argument('-v', '--vm',
help='VM name of the VirtualMachine you want '
'to destroy.')
my_args = parser.parse_args()
return cli.prompt_for_password(my_args)
def get_obj(content, vimtype, name):
"""Create contrainer view and search for object in it"""
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
for c in container.view:
if name:
if c.name == name:
obj = c
break
else:
obj = c
break
container.Destroy()
return obj
ARGS = setup_args()
SI = None
try:
SI = connect.SmartConnectNoSSL(host=ARGS.host,
user=ARGS.user,
pwd=ARGS.password,
port=ARGS.port)
atexit.register(connect.Disconnect, SI)
except (IOError, vim.fault.InvalidLogin):
pass
if not SI:
raise SystemExit("Unable to connect to host with supplied credentials.")
VM = None
if ARGS.vm:
VM = get_obj(SI.content, [vim.VirtualMachine], ARGS.vm)
elif ARGS.uuid:
VM = SI.content.searchIndex.FindByUuid(None, ARGS.uuid,
True,
False)
elif ARGS.name:
VM = SI.content.searchIndex.FindByDnsName(None, ARGS.name,
True)
elif ARGS.ip:
VM = SI.content.searchIndex.FindByIp(None, ARGS.ip, True)
if VM is None:
raise SystemExit(
"Unable to locate VirtualMachine. Arguments given: "
"vm - {0} , uuid - {1} , name - {2} , ip - {3}"
.format(ARGS.vm, ARGS.uuid, ARGS.name, ARGS.ip)
)
print("Found: {0}".format(VM.name))
print("The current powerState is: {0}".format(VM.runtime.powerState))
if format(VM.runtime.powerState) == "poweredOn":
print("Attempting to power off {0}".format(VM.name))
TASK = VM.PowerOffVM_Task()
tasks.wait_for_tasks(SI, [TASK])
print("{0}".format(TASK.info.state))
print("Destroying VM from vSphere.")
TASK = VM.Destroy_Task()
tasks.wait_for_tasks(SI, [TASK])
print("Done.")
| [((115, 0, 115, 32), 'tools.tasks.wait_for_tasks', 'tasks.wait_for_tasks', ({(115, 21, 115, 23): 'SI', (115, 25, 115, 31): '[TASK]'}, {}), '(SI, [TASK])', False, 'from tools import tasks\n'), ((33, 13, 33, 35), 'tools.cli.build_arg_parser', 'cli.build_arg_parser', ({}, {}), '()', False, 'from tools import cli\n'), ((50, 11, 50, 43), 'tools.cli.prompt_for_password', 'cli.prompt_for_password', ({(50, 35, 50, 42): 'my_args'}, {}), '(my_args)', False, 'from tools import cli\n'), ((74, 9, 77, 50), 'pyVim.connect.SmartConnectNoSSL', 'connect.SmartConnectNoSSL', (), '', False, 'from pyVim import connect\n'), ((78, 4, 78, 43), 'atexit.register', 'atexit.register', ({(78, 20, 78, 38): 'connect.Disconnect', (78, 40, 78, 42): 'SI'}, {}), '(connect.Disconnect, SI)', False, 'import atexit\n'), ((110, 4, 110, 36), 'tools.tasks.wait_for_tasks', 'tasks.wait_for_tasks', ({(110, 25, 110, 27): 'SI', (110, 29, 110, 35): '[TASK]'}, {}), '(SI, [TASK])', False, 'from tools import tasks\n')] |
1000monkeys/MastermindRedux | helpers/Screen.py | 6b07a341ecbf2ea325949a49c84218cc3632cd33 | import sys
class Screen:
def __init__(self) -> None:
pass
def handle_events(self, events):
for event in events:
if event.type == self.pygame.QUIT:
sys.exit()
def draw(self, screen):
pass | [((10, 16, 10, 26), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n')] |
chris-han/ailab | VirtualStage/BackgroundMatting/fixed_threshold.py | b77d90f9089fa8003095843aa5de718fe73965a7 | import os
def fixed_split(videos, thresholds, mask_suffix, overlap=0, background_path="/"):
# crop target background video frames
backgrounds = [os.path.join(background_path, f[:-4]) for f in os.listdir(background_path) if f.endswith(".mp4")]
print(f"Splitting {len(backgrounds)} target background videos vertically by a fixed threshold")
for i, background in enumerate(backgrounds):
if i >= (len(thresholds)) or not thresholds[i]:
continue
try:
os.makedirs(background + "_up")
os.makedirs(background + "_dw")
except FileExistsError:
continue
threshold = int(thresholds[i])
iup_region = f"iw:{threshold + overlap}:0:0"
idw_region = f"iw:ih-{threshold + overlap}:0:{threshold - overlap}"
cmd=(
f"ffmpeg -i \"{os.path.join(background, '%04d_img.png')}\" "
f'-filter:v "crop={iup_region}" '
f"\"{os.path.join(background+'_up', '%04d_img.png')}\""
" > split_background_logs.txt 2>&1"
)
code = os.system(
cmd
)
if code != 0:
exit(code)
code = os.system(
f"ffmpeg -i \"{os.path.join(background, '%04d_img.png')}\" "
f'-filter:v "crop={idw_region}" '
f"\"{os.path.join(background+'_dw', '%04d_img.png')}\""
" > split_background_logs.txt 2>&1"
)
if code != 0:
exit(code)
print(f"Splitting {len(videos)} videos vertically by a fixed threshold")
for i, video in enumerate(videos):
if i >= (len(thresholds)) or not thresholds[i]:
continue
try:
os.makedirs(video + "_up")
os.makedirs(video + "_dw")
except FileExistsError:
continue
threshold = int(thresholds[i])
iup_region = f"iw:{threshold + overlap}:0:0"
idw_region = f"iw:ih-{threshold + overlap}:0:{threshold - overlap}"
# crop target background single image
cmd = (
f"ffmpeg -y -i \"{video+'.png'}\" "
f'-filter:v \"crop={iup_region}\" '
f"\"{video+'_up.png'}\""
" > split_logs.txt 2>&1"
)
code = os.system(
cmd
)
if code != 0:
exit(code)
code = os.system(
f"ffmpeg -y -i \"{video+'.png'}\" "
f'-filter:v "crop={idw_region}" '
f"\"{video+'_dw.png'}\""
" > split_logs.txt 2>&1"
)
if code != 0:
exit(code)
# crop color images
cmd=(
f"ffmpeg -i \"{os.path.join(video, '%04d_img.png')}\" "
f'-filter:v "crop={iup_region}" '
f"\"{os.path.join(video+'_up', '%04d_img.png')}\""
" > split_logs.txt 2>&1"
)
code = os.system(
cmd
)
if code != 0:
exit(code)
code = os.system(
f"ffmpeg -i \"{os.path.join(video, '%04d_img.png')}\" "
f'-filter:v "crop={idw_region}" '
f"\"{os.path.join(video+'_dw', '%04d_img.png')}\""
" > split_logs.txt 2>&1"
)
if code != 0:
exit(code)
# crop mask images
code = os.system(
f"ffmpeg -i \"{os.path.join(video, '%04d')}{mask_suffix}.png\" "
f'-filter:v "crop={iup_region}" '
f"\"{os.path.join(video+'_up', '%04d')}{mask_suffix}.png\""
" > split_logs.txt 2>&1"
)
if code != 0:
exit(code)
code = os.system(
f"ffmpeg -i \"{os.path.join(video, '%04d')}{mask_suffix}.png\" "
f'-filter:v "crop={idw_region}" '
f"\"{os.path.join(video+'_dw', '%04d')}{mask_suffix}.png\""
" > split_logs.txt 2>&1"
)
if code != 0:
exit(code)
print(f" Splitted {video} ({i+1}/{len(videos)})")
def fixed_merge(videos, factors, output_dir, suffix, outputs_list, overlap=0):
print(f"Reconstructing {len(videos)} output images")
for i, video in enumerate(videos):
if i < (len(factors)) and factors[i]:
# video split, merging
out_path = os.path.join(output_dir, os.path.basename(video)).replace(
"\\", "/"
)
try:
os.makedirs(out_path + suffix)
except FileExistsError:
continue
outpup = (out_path + "_up" + suffix).replace("\\", "/")
outpdw = (out_path + "_dw" + suffix).replace("\\", "/")
for o in outputs_list:
code = os.system(
f"ffmpeg -i \"{outpup}/%04d_{o}.png\" -i \"{outpdw}/%04d_{o}.png\" "
f'-filter_complex "[0:0]crop=iw:ih-{overlap}:0:0[v0];'
f"[1:0]crop=iw:ih-{overlap}:0:{overlap}[v1];"
f'[v0][v1]vstack" '
f"\"{out_path + suffix}/%04d_{o}.png\" -hide_banner"
" > merge_logs.txt"
)
if code != 0:
exit(code)
print(f" Merged {video} ({i+1}/{len(videos)})")
| [((8, 19, 8, 56), 'os.path.join', 'os.path.join', ({(8, 32, 8, 47): 'background_path', (8, 49, 8, 55): 'f[:-4]'}, {}), '(background_path, f[:-4])', False, 'import os\n'), ((31, 15, 33, 9), 'os.system', 'os.system', ({(32, 12, 32, 15): 'cmd'}, {}), '(cmd)', False, 'import os\n'), ((67, 15, 69, 9), 'os.system', 'os.system', ({(68, 12, 68, 15): 'cmd'}, {}), '(cmd)', False, 'import os\n'), ((72, 15, 77, 9), 'os.system', 'os.system', ({(73, 12, 76, 36): 'f"""ffmpeg -y -i "{video + \'.png\'}" -filter:v "crop={idw_region}" "{video + \'_dw.png\'}" > split_logs.txt 2>&1"""'}, {}), '(\n f\'ffmpeg -y -i "{video + \\\'.png\\\'}" -filter:v "crop={idw_region}" "{video + \\\'_dw.png\\\'}" > split_logs.txt 2>&1\'\n )', False, 'import os\n'), ((89, 15, 91, 9), 'os.system', 'os.system', ({(90, 12, 90, 15): 'cmd'}, {}), '(cmd)', False, 'import os\n'), ((8, 66, 8, 93), 'os.listdir', 'os.listdir', ({(8, 77, 8, 92): 'background_path'}, {}), '(background_path)', False, 'import os\n'), ((16, 12, 16, 43), 'os.makedirs', 'os.makedirs', ({(16, 24, 16, 42): "(background + '_up')"}, {}), "(background + '_up')", False, 'import os\n'), ((17, 12, 17, 43), 'os.makedirs', 'os.makedirs', ({(17, 24, 17, 42): "(background + '_dw')"}, {}), "(background + '_dw')", False, 'import os\n'), ((51, 12, 51, 38), 'os.makedirs', 'os.makedirs', ({(51, 24, 51, 37): "(video + '_up')"}, {}), "(video + '_up')", False, 'import os\n'), ((52, 12, 52, 38), 'os.makedirs', 'os.makedirs', ({(52, 24, 52, 37): "(video + '_dw')"}, {}), "(video + '_dw')", False, 'import os\n'), ((26, 27, 26, 67), 'os.path.join', 'os.path.join', ({(26, 40, 26, 50): 'background', (26, 52, 26, 66): '"""%04d_img.png"""'}, {}), "(background, '%04d_img.png')", False, 'import os\n'), ((26, 13, 26, 59), 'os.path.join', 'os.path.join', ({(26, 26, 26, 42): "(background + '_up')", (26, 44, 26, 58): '"""%04d_img.png"""'}, {}), "(background + '_up', '%04d_img.png')", False, 'import os\n'), ((84, 27, 84, 62), 'os.path.join', 'os.path.join', ({(84, 40, 84, 45): 'video', (84, 47, 84, 61): '"""%04d_img.png"""'}, {}), "(video, '%04d_img.png')", False, 'import os\n'), ((84, 13, 84, 54), 'os.path.join', 'os.path.join', ({(84, 26, 84, 37): "(video + '_up')", (84, 39, 84, 53): '"""%04d_img.png"""'}, {}), "(video + '_up', '%04d_img.png')", False, 'import os\n'), ((134, 16, 134, 46), 'os.makedirs', 'os.makedirs', ({(134, 28, 134, 45): '(out_path + suffix)'}, {}), '(out_path + suffix)', False, 'import os\n'), ((142, 23, 149, 17), 'os.system', 'os.system', ({(143, 20, 148, 39): 'f"""ffmpeg -i "{outpup}/%04d_{o}.png" -i "{outpdw}/%04d_{o}.png" -filter_complex "[0:0]crop=iw:ih-{overlap}:0:0[v0];[1:0]crop=iw:ih-{overlap}:0:{overlap}[v1];[v0][v1]vstack" "{out_path + suffix}/%04d_{o}.png" -hide_banner > merge_logs.txt"""'}, {}), '(\n f\'ffmpeg -i "{outpup}/%04d_{o}.png" -i "{outpdw}/%04d_{o}.png" -filter_complex "[0:0]crop=iw:ih-{overlap}:0:0[v0];[1:0]crop=iw:ih-{overlap}:0:{overlap}[v1];[v0][v1]vstack" "{out_path + suffix}/%04d_{o}.png" -hide_banner > merge_logs.txt\'\n )', False, 'import os\n'), ((37, 27, 37, 67), 'os.path.join', 'os.path.join', ({(37, 40, 37, 50): 'background', (37, 52, 37, 66): '"""%04d_img.png"""'}, {}), "(background, '%04d_img.png')", False, 'import os\n'), ((37, 13, 37, 59), 'os.path.join', 'os.path.join', ({(37, 26, 37, 42): "background + '_dw'", (37, 44, 37, 58): '"""%04d_img.png"""'}, {}), "(background + '_dw', '%04d_img.png')", False, 'import os\n'), ((95, 27, 95, 62), 'os.path.join', 'os.path.join', ({(95, 40, 95, 45): 'video', (95, 47, 95, 61): '"""%04d_img.png"""'}, {}), "(video, '%04d_img.png')", False, 'import os\n'), ((95, 13, 95, 54), 'os.path.join', 'os.path.join', ({(95, 26, 95, 37): "video + '_dw'", (95, 39, 95, 53): '"""%04d_img.png"""'}, {}), "(video + '_dw', '%04d_img.png')", False, 'import os\n'), ((105, 27, 105, 54), 'os.path.join', 'os.path.join', ({(105, 40, 105, 45): 'video', (105, 47, 105, 53): '"""%04d"""'}, {}), "(video, '%04d')", False, 'import os\n'), ((105, 13, 105, 46), 'os.path.join', 'os.path.join', ({(105, 26, 105, 37): "video + '_up'", (105, 39, 105, 45): '"""%04d"""'}, {}), "(video + '_up', '%04d')", False, 'import os\n'), ((113, 27, 113, 54), 'os.path.join', 'os.path.join', ({(113, 40, 113, 45): 'video', (113, 47, 113, 53): '"""%04d"""'}, {}), "(video, '%04d')", False, 'import os\n'), ((113, 13, 113, 46), 'os.path.join', 'os.path.join', ({(113, 26, 113, 37): "video + '_dw'", (113, 39, 113, 45): '"""%04d"""'}, {}), "(video + '_dw', '%04d')", False, 'import os\n'), ((129, 48, 129, 71), 'os.path.basename', 'os.path.basename', ({(129, 65, 129, 70): 'video'}, {}), '(video)', False, 'import os\n')] |
veredsil/hn2016_falwa | hn2016_falwa/utilities.py | 53035ac838860dd8a8d85619f16cc9785dee8655 | import numpy as np
from math import pi,exp
def static_stability(height,area,theta,s_et=None,n_et=None):
"""
The function "static_stability" computes the vertical gradient (z-derivative)
of hemispheric-averaged potential temperature, i.e. d\tilde{theta}/dz in the def-
inition of QGPV in eq.(3) of Huang and Nakamura (2016), by central differencing.
At the boundary, the static stability is estimated by forward/backward differen-
cing involving two adjacent z-grid points:
i.e. stat_n[0] = (t0_n[1]-t0_n[0])/(height[1]-height[0])
stat_n[-1] = (t0_n[-2]-t0_n[-1])/(height[-2]-height[-1])
Please make inquiries and report issues via Github: https://github.com/csyhuang/hn2016_falwa/issues
Parameters
----------
height : sequence or array_like
Array of z-coordinate [in meters] with dimension = (kmax), equally spaced
area : ndarray
Two-dimension numpy array specifying differential areal element of each grid point;
dimension = (nlat, nlon).
theta : ndarray
Matrix of potential temperature [K] with dimension (kmax,nlat,nlon) or (kmax,nlat)
s_et : int, optional
Index of the latitude that defines the boundary of the Southern hemispheric domain;
initialized as nlat/2 if not input
n_et : int, optional
Index of the latitude that defines the boundary of the Southern hemispheric domain;
initialized as nlat/2 if not input
Returns
-------
t0_n : sequence or array_like
Area-weighted average of potential temperature (\tilde{\theta} in HN16)
in the Northern hemispheric domain with dimension = (kmax)
t0_s : sequence or array_like
Area-weighted average of potential temperature (\tilde{\theta} in HN16)
in the Southern hemispheric domain with dimension = (kmax)
stat_n : sequence or array_like
Static stability (d\tilde{\theta}/dz in HN16) in the Northern hemispheric
domain with dimension = (kmax)
stat_s : sequence or array_like
Static stability (d\tilde{\theta}/dz in HN16) in the Southern hemispheric
domain with dimension = (kmax)
"""
nlat = theta.shape[1]
if s_et==None:
s_et = nlat//2
if n_et==None:
n_et = nlat//2
stat_n = np.zeros(theta.shape[0])
stat_s = np.zeros(theta.shape[0])
if theta.ndim==3:
zonal_mean = np.mean(theta,axis=-1)
elif theta.ndim==2:
zonal_mean = theta
if area.ndim==2:
area_zonal_mean = np.mean(area,axis=-1)
elif area.ndim==1:
area_zonal_mean = area
csm_n_et = np.sum(area_zonal_mean[-n_et:])
csm_s_et = np.sum(area_zonal_mean[:s_et])
t0_n = np.sum(zonal_mean[:,-n_et:]*area_zonal_mean[np.newaxis,-n_et:],axis=-1)/csm_n_et
t0_s = np.sum(zonal_mean[:,:s_et]*area_zonal_mean[np.newaxis,:s_et],axis=-1)/csm_s_et
stat_n[1:-1] = (t0_n[2:]-t0_n[:-2])/(height[2:]-height[:-2])
stat_s[1:-1] = (t0_s[2:]-t0_s[:-2])/(height[2:]-height[:-2])
stat_n[0] = (t0_n[1]-t0_n[0])/(height[1]-height[0])
stat_n[-1] = (t0_n[-2]-t0_n[-1])/(height[-2]-height[-1])
stat_s[0] = (t0_s[1]-t0_s[0])/(height[1]-height[0])
stat_s[-1] = (t0_s[-2]-t0_s[-1])/(height[-2]-height[-1])
return t0_n,t0_s,stat_n,stat_s
def compute_qgpv_givenvort(omega,nlat,nlon,kmax,unih,ylat,avort,potential_temp,
t0_cn,t0_cs,stat_cn,stat_cs,nlat_s=None,scale_height=7000.):
"""
The function "compute_qgpv_givenvort" computes the quasi-geostrophic potential
vorticity based on the absolute vorticity, potential temperature and static
stability given.
Please make inquiries and report issues via Github: https://github.com/csyhuang/hn2016_falwa/issues
Parameters
----------
omega : float, optional
Rotation rate of the planet.
nlat : int
Latitudinal dimension of the latitude grid.
nlon : int
Longitudinal dimension of the longitude grid.
kmax : int
Vertical dimension of the height grid.
unih : sequence or array_like
Numpy array of height in [meters]; dimension = (kmax)
ylat : sequence or array_like
Numpy array of latitudes in [degrees]; dimension = (nlat)
avort : ndarray
Three-dimension numpy array of absolute vorticity (i.e. relative vorticity
+ 2*Omega*sin(lat)) in [1/s]; dimension = (kmax x nlat x nlon)
potential_temp : ndarray
Three-dimension numpy array of potential temperature in [K];
dimension = (kmax x nlat x nlon)
t0_cn : sequence or array_like
Area-weighted average of potential temperature (\tilde{\theta} in HN16)
in the Northern hemispheric domain with dimension = (kmax)
t0_cs : sequence or array_like
Area-weighted average of potential temperature (\tilde{\theta} in HN16)
in the Southern hemispheric domain with dimension = (kmax)
stat_cn : sequence or array_like
Static stability (d\tilde{\theta}/dz in HN16) in the Northern hemispheric
domain with dimension = (kmax)
stat_cs : sequence or array_like
Static stability (d\tilde{\theta}/dz in HN16) in the Southern hemispheric
domain with dimension = (kmax)
scale_height : float
Scale height of the atmosphere in [m] with default value 7000.
Returns
-------
QGPV : ndarray
Three-dimension numpy array of quasi-geostrophic potential vorticity;
dimension = (kmax x nlat x nlon)
dzdiv : ndarray
Three-dimension numpy array of the stretching term in QGPV;
dimension = (kmax x nlat x nlon)
"""
if nlat_s==None:
nlat_s=nlat//2
clat = np.cos(ylat*pi/180.)
clat = np.abs(clat) # Just to avoid the negative value at poles
# --- Next, calculate PV ---
av2 = np.empty_like(potential_temp) # dv/d(lon)
av3 = np.empty_like(potential_temp) # du/d(lat)
qgpv = np.empty_like(potential_temp) # av1+av2+av3+dzdiv
av1 = np.ones((kmax,nlat,nlon)) * 2*omega*np.sin(ylat[np.newaxis,:,np.newaxis]*pi/180.)
# Calculate the z-divergence term
zdiv = np.empty_like(potential_temp)
dzdiv = np.empty_like(potential_temp)
for kk in range(kmax): # This is more efficient
zdiv[kk,:nlat_s,:] = exp(-unih[kk]/scale_height)*(potential_temp[kk,:nlat_s,:]-t0_cs[kk])/stat_cs[kk]
zdiv[kk,-nlat_s:,:] = exp(-unih[kk]/scale_height)*(potential_temp[kk,-nlat_s:,:]-t0_cn[kk])/stat_cn[kk]
dzdiv[1:kmax-1,:,:] = np.exp(unih[1:kmax-1,np.newaxis,np.newaxis]/scale_height)* \
(zdiv[2:kmax,:,:]-zdiv[0:kmax-2,:,:]) \
/(unih[2:kmax,np.newaxis,np.newaxis]-unih[0:kmax-2,np.newaxis,np.newaxis])
dzdiv[0,:,:] = exp(unih[0]/scale_height)*(zdiv[1,:,:]-zdiv[0,:,:])/ \
(unih[1,np.newaxis,np.newaxis]-unih[0,np.newaxis,np.newaxis])
dzdiv[kmax-1,:,:] = exp(unih[kmax-1]/scale_height)*(zdiv[kmax-1,:,:]-zdiv[kmax-2,:,:])/ \
(unih[kmax-1,np.newaxis,np.newaxis]-unih[kmax-2,np.newaxis,np.newaxis])
qgpv = avort+dzdiv * av1
return qgpv, dzdiv
| [((58, 13, 58, 37), 'numpy.zeros', 'np.zeros', ({(58, 22, 58, 36): 'theta.shape[0]'}, {}), '(theta.shape[0])', True, 'import numpy as np\n'), ((59, 13, 59, 37), 'numpy.zeros', 'np.zeros', ({(59, 22, 59, 36): 'theta.shape[0]'}, {}), '(theta.shape[0])', True, 'import numpy as np\n'), ((71, 15, 71, 46), 'numpy.sum', 'np.sum', ({(71, 22, 71, 45): 'area_zonal_mean[-n_et:]'}, {}), '(area_zonal_mean[-n_et:])', True, 'import numpy as np\n'), ((72, 15, 72, 45), 'numpy.sum', 'np.sum', ({(72, 22, 72, 44): 'area_zonal_mean[:s_et]'}, {}), '(area_zonal_mean[:s_et])', True, 'import numpy as np\n'), ((147, 11, 147, 31), 'numpy.cos', 'np.cos', ({(147, 18, 147, 30): 'ylat * pi / 180.0'}, {}), '(ylat * pi / 180.0)', True, 'import numpy as np\n'), ((148, 11, 148, 23), 'numpy.abs', 'np.abs', ({(148, 18, 148, 22): 'clat'}, {}), '(clat)', True, 'import numpy as np\n'), ((151, 10, 151, 39), 'numpy.empty_like', 'np.empty_like', ({(151, 24, 151, 38): 'potential_temp'}, {}), '(potential_temp)', True, 'import numpy as np\n'), ((152, 10, 152, 39), 'numpy.empty_like', 'np.empty_like', ({(152, 24, 152, 38): 'potential_temp'}, {}), '(potential_temp)', True, 'import numpy as np\n'), ((153, 11, 153, 40), 'numpy.empty_like', 'np.empty_like', ({(153, 25, 153, 39): 'potential_temp'}, {}), '(potential_temp)', True, 'import numpy as np\n'), ((158, 11, 158, 40), 'numpy.empty_like', 'np.empty_like', ({(158, 25, 158, 39): 'potential_temp'}, {}), '(potential_temp)', True, 'import numpy as np\n'), ((159, 12, 159, 41), 'numpy.empty_like', 'np.empty_like', ({(159, 26, 159, 40): 'potential_temp'}, {}), '(potential_temp)', True, 'import numpy as np\n'), ((62, 21, 62, 43), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((67, 26, 67, 47), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((74, 11, 74, 82), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((75, 11, 75, 80), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((155, 46, 155, 91), 'numpy.sin', 'np.sin', ({(155, 53, 155, 90): '(ylat[(np.newaxis), :, (np.newaxis)] * pi / 180.0)'}, {}), '(ylat[(np.newaxis), :, (np.newaxis)] * pi / 180.0)', True, 'import numpy as np\n'), ((164, 26, 164, 83), 'numpy.exp', 'np.exp', ({(164, 33, 164, 82): '(unih[1:kmax - 1, (np.newaxis), (np.newaxis)] / scale_height)'}, {}), '(unih[1:kmax - 1, (np.newaxis), (np.newaxis)] / scale_height)', True, 'import numpy as np\n'), ((168, 19, 168, 44), 'math.exp', 'exp', ({(168, 23, 168, 43): '(unih[0] / scale_height)'}, {}), '(unih[0] / scale_height)', False, 'from math import pi, exp\n'), ((170, 24, 170, 54), 'math.exp', 'exp', ({(170, 28, 170, 53): '(unih[kmax - 1] / scale_height)'}, {}), '(unih[kmax - 1] / scale_height)', False, 'from math import pi, exp\n'), ((155, 10, 155, 35), 'numpy.ones', 'np.ones', ({(155, 18, 155, 34): '(kmax, nlat, nlon)'}, {}), '((kmax, nlat, nlon))', True, 'import numpy as np\n'), ((161, 29, 161, 56), 'math.exp', 'exp', ({(161, 33, 161, 55): '(-unih[kk] / scale_height)'}, {}), '(-unih[kk] / scale_height)', False, 'from math import pi, exp\n'), ((162, 30, 162, 57), 'math.exp', 'exp', ({(162, 34, 162, 56): '(-unih[kk] / scale_height)'}, {}), '(-unih[kk] / scale_height)', False, 'from math import pi, exp\n')] |
JennyLawrance/azure-cli | src/command_modules/azure-cli-iot/azure/cli/command_modules/iot/_params.py | cb9ca4b694110806b31803a95f9f315b2fde6410 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from argcomplete.completers import FilesCompleter
from knack.arguments import CLIArgumentType
from azure.cli.core.commands.parameters import (get_location_type,
file_type,
get_resource_name_completion_list,
get_enum_type,
get_three_state_flag)
from azure.mgmt.iothub.models.iot_hub_client_enums import IotHubSku
from azure.mgmt.iothubprovisioningservices.models.iot_dps_client_enums import (IotDpsSku,
AllocationPolicy,
AccessRightsDescription)
from .custom import KeyType, SimpleAccessRights
from ._validators import validate_policy_permissions
from ._completers import get_device_id_completion_list
hub_name_type = CLIArgumentType(
completer=get_resource_name_completion_list('Microsoft.Devices/IotHubs'),
help='IoT Hub name.')
dps_name_type = CLIArgumentType(
options_list=['--dps-name'],
completer=get_resource_name_completion_list('Microsoft.Devices/ProvisioningServices'),
help='IoT Provisioning Service name')
def load_arguments(self, _): # pylint: disable=too-many-statements
# Arguments for IoT DPS
with self.argument_context('iot dps') as c:
c.argument('dps_name', dps_name_type, options_list=['--name', '-n'], id_part='name')
with self.argument_context('iot dps create') as c:
c.argument('location', get_location_type(self.cli_ctx),
help='Location of your IoT Provisioning Service. Default is the location of target resource group.')
c.argument('sku', arg_type=get_enum_type(IotDpsSku),
help='Pricing tier for the IoT provisioning service.')
c.argument('unit', help='Units in your IoT Provisioning Service.', type=int)
for subgroup in ['access-policy', 'linked-hub', 'certificate']:
with self.argument_context('iot dps {}'.format(subgroup)) as c:
c.argument('dps_name', options_list=['--dps-name'], id_part=None)
with self.argument_context('iot dps access-policy') as c:
c.argument('access_policy_name', options_list=['--access-policy-name', '--name', '-n'],
help='A friendly name for DPS access policy.')
with self.argument_context('iot dps access-policy create') as c:
c.argument('rights', options_list=['--rights', '-r'], nargs='+',
arg_type=get_enum_type(AccessRightsDescription),
help='Access rights for the IoT provisioning service. Use space-separated list for multiple rights.')
c.argument('primary_key', help='Primary SAS key value.')
c.argument('secondary_key', help='Secondary SAS key value.')
with self.argument_context('iot dps access-policy update') as c:
c.argument('rights', options_list=['--rights', '-r'], nargs='+',
arg_type=get_enum_type(AccessRightsDescription),
help='Access rights for the IoT provisioning service. Use space-separated list for multiple rights.')
c.argument('primary_key', help='Primary SAS key value.')
c.argument('secondary_key', help='Secondary SAS key value.')
with self.argument_context('iot dps linked-hub') as c:
c.argument('linked_hub', options_list=['--linked-hub'], help='Host name of linked IoT Hub.')
with self.argument_context('iot dps linked-hub create') as c:
c.argument('connection_string', help='Connection string of the IoT hub.')
c.argument('location', get_location_type(self.cli_ctx),
help='Location of the IoT hub.')
c.argument('apply_allocation_policy',
help='A boolean indicating whether to apply allocation policy to the IoT hub.',
arg_type=get_three_state_flag())
c.argument('allocation_weight', help='Allocation weight of the IoT hub.')
with self.argument_context('iot dps linked-hub update') as c:
c.argument('apply_allocation_policy',
help='A boolean indicating whether to apply allocation policy to the Iot hub.',
arg_type=get_three_state_flag())
c.argument('allocation_weight', help='Allocation weight of the IoT hub.')
with self.argument_context('iot dps allocation-policy update') as c:
c.argument('allocation_policy', options_list=['--policy', '-p'], arg_type=get_enum_type(AllocationPolicy),
help='Allocation policy for the IoT provisioning service.')
with self.argument_context('iot dps certificate') as c:
c.argument('certificate_path', options_list=['--path', '-p'], type=file_type,
completer=FilesCompleter([".cer", ".pem"]), help='The path to the file containing the certificate.')
c.argument('certificate_name', options_list=['--certificate-name', '--name', '-n'],
help='A friendly name for the certificate.')
c.argument('etag', options_list=['--etag', '-e'], help='Entity Tag (etag) of the object.')
# Arguments for IoT Hub
with self.argument_context('iot') as c:
c.argument('device_id', options_list=['--device-id', '-d'], help='Device Id.',
completer=get_device_id_completion_list)
with self.argument_context('iot hub') as c:
c.argument('hub_name', hub_name_type, options_list=['--name', '-n'], id_part='name')
c.argument('etag', options_list=['--etag', '-e'], help='Entity Tag (etag) of the object.')
for subgroup in ['consumer-group', 'policy', 'job', 'certificate']:
with self.argument_context('iot hub {}'.format(subgroup)) as c:
c.argument('hub_name', options_list=['--hub-name'])
with self.argument_context('iot device') as c:
c.argument('hub_name', hub_name_type)
with self.argument_context('iot hub certificate') as c:
c.argument('certificate_path', options_list=['--path', '-p'], type=file_type,
completer=FilesCompleter([".cer", ".pem"]), help='The path to the file containing the certificate.')
c.argument('certificate_name', options_list=['--name', '-n'], help='A friendly name for the certificate.')
with self.argument_context('iot hub consumer-group') as c:
c.argument('consumer_group_name', options_list=['--name', '-n'], id_part='child_name_2',
help='Event hub consumer group name.')
c.argument('event_hub_name', id_part='child_name_1', help='Event hub endpoint name.')
with self.argument_context('iot hub policy') as c:
c.argument('policy_name', options_list=['--name', '-n'], id_part='child_name_1',
help='Shared access policy name.')
permission_values = ', '.join([x.value for x in SimpleAccessRights])
c.argument('permissions', nargs='*', validator=validate_policy_permissions, type=str.lower,
help='Permissions of shared access policy. Use space-separated list for multiple permissions. '
'Possible values: {}'.format(permission_values))
with self.argument_context('iot hub job') as c:
c.argument('job_id', id_part='child_name_1', help='Job Id.')
with self.argument_context('iot hub create') as c:
c.argument('hub_name', completer=None)
c.argument('location', get_location_type(self.cli_ctx),
help='Location of your IoT Hub. Default is the location of target resource group.')
c.argument('sku', arg_type=get_enum_type(IotHubSku),
help='Pricing tier for Azure IoT Hub. Default value is F1, which is free. '
'Note that only one free IoT hub instance is allowed in each '
'subscription. Exception will be thrown if free instances exceed one.')
c.argument('unit', help='Units in your IoT Hub.', type=int)
c.argument('partition_count', help='The number of partitions for device-to-cloud messages.', type=int)
with self.argument_context('iot hub show-connection-string') as c:
c.argument('policy_name', help='Shared access policy to use.')
c.argument('key_type', arg_type=get_enum_type(KeyType), options_list=['--key'], help='The key to use.')
with self.argument_context('iot device create') as c:
c.argument('device_id', completer=None)
with self.argument_context('iot device create', arg_group='X.509 Certificate') as c:
c.argument('x509', action='store_true', help='Use X.509 certificate for device authentication.')
c.argument('primary_thumbprint', help='Primary X.509 certificate thumbprint to authenticate device.')
c.argument('secondary_thumbprint', help='Secondary X.509 certificate thumbprint to authenticate device.')
c.argument('valid_days', type=int, help='Number of days the generated self-signed X.509 certificate should be '
'valid for. Default validity is 365 days.')
c.argument('output_dir', help='Output directory for generated self-signed X.509 certificate. '
'Default is current working directory.')
with self.argument_context('iot device list') as c:
c.argument('top', help='Maximum number of device identities to return.', type=int)
with self.argument_context('iot device delete') as c:
c.argument('etag', help='ETag of the target device. It is used for the purpose of optimistic '
'concurrency. Delete operation will be performed only if the specified '
'ETag matches the value maintained by the server, indicating that the '
'device identity has not been modified since it was retrieved. Default '
'value is set to wildcard character (*) to force an unconditional '
'delete.')
with self.argument_context('iot device show-connection-string') as c:
c.argument('top', type=int, help='Maximum number of connection strings to return.')
c.argument('key_type', arg_type=get_enum_type(KeyType), options_list=['--key'], help='The key to use.')
with self.argument_context('iot device message') as c:
c.argument('lock_token', help='Message lock token.')
with self.argument_context('iot device message send', arg_group='Messaging') as c:
c.argument('data', help='Device-to-cloud message body.')
c.argument('message_id', help='Device-to-cloud message Id.')
c.argument('correlation_id', help='Device-to-cloud message correlation Id.')
c.argument('user_id', help='Device-to-cloud message user Id.')
with self.argument_context('iot device message receive') as c:
c.argument('lock_timeout', type=int,
help='In case a message returned to this call, this specifies the amount of '
'time in seconds, the message will be invisible to other receive calls.')
with self.argument_context('iot device export') as c:
c.argument('blob_container_uri',
help='Blob Shared Access Signature URI with write access to a blob container.'
'This is used to output the status of the job and the results.')
c.argument('include_keys', action='store_true',
help='If set, keys are exported normally. Otherwise, keys are set to null in '
'export output.')
with self.argument_context('iot device import') as c:
c.argument('input_blob_container_uri',
help='Blob Shared Access Signature URI with read access to a blob container.'
'This blob contains the operations to be performed on the identity '
'registry ')
c.argument('output_blob_container_uri',
help='Blob Shared Access Signature URI with write access to a blob container.'
'This is used to output the status of the job and the results.')
| [((25, 14, 25, 76), 'azure.cli.core.commands.parameters.get_resource_name_completion_list', 'get_resource_name_completion_list', ({(25, 48, 25, 75): '"""Microsoft.Devices/IotHubs"""'}, {}), "('Microsoft.Devices/IotHubs')", False, 'from azure.cli.core.commands.parameters import get_location_type, file_type, get_resource_name_completion_list, get_enum_type, get_three_state_flag\n'), ((30, 14, 30, 89), 'azure.cli.core.commands.parameters.get_resource_name_completion_list', 'get_resource_name_completion_list', ({(30, 48, 30, 88): '"""Microsoft.Devices/ProvisioningServices"""'}, {}), "('Microsoft.Devices/ProvisioningServices')", False, 'from azure.cli.core.commands.parameters import get_location_type, file_type, get_resource_name_completion_list, get_enum_type, get_three_state_flag\n'), ((40, 31, 40, 62), 'azure.cli.core.commands.parameters.get_location_type', 'get_location_type', ({(40, 49, 40, 61): 'self.cli_ctx'}, {}), '(self.cli_ctx)', False, 'from azure.cli.core.commands.parameters import get_location_type, file_type, get_resource_name_completion_list, get_enum_type, get_three_state_flag\n'), ((73, 31, 73, 62), 'azure.cli.core.commands.parameters.get_location_type', 'get_location_type', ({(73, 49, 73, 61): 'self.cli_ctx'}, {}), '(self.cli_ctx)', False, 'from azure.cli.core.commands.parameters import get_location_type, file_type, get_resource_name_completion_list, get_enum_type, get_three_state_flag\n'), ((136, 31, 136, 62), 'azure.cli.core.commands.parameters.get_location_type', 'get_location_type', ({(136, 49, 136, 61): 'self.cli_ctx'}, {}), '(self.cli_ctx)', False, 'from azure.cli.core.commands.parameters import get_location_type, file_type, get_resource_name_completion_list, get_enum_type, get_three_state_flag\n'), ((42, 35, 42, 59), 'azure.cli.core.commands.parameters.get_enum_type', 'get_enum_type', ({(42, 49, 42, 58): 'IotDpsSku'}, {}), '(IotDpsSku)', False, 'from azure.cli.core.commands.parameters import get_location_type, file_type, get_resource_name_completion_list, get_enum_type, get_three_state_flag\n'), ((56, 28, 56, 66), 'azure.cli.core.commands.parameters.get_enum_type', 'get_enum_type', ({(56, 42, 56, 65): 'AccessRightsDescription'}, {}), '(AccessRightsDescription)', False, 'from azure.cli.core.commands.parameters import get_location_type, file_type, get_resource_name_completion_list, get_enum_type, get_three_state_flag\n'), ((63, 28, 63, 66), 'azure.cli.core.commands.parameters.get_enum_type', 'get_enum_type', ({(63, 42, 63, 65): 'AccessRightsDescription'}, {}), '(AccessRightsDescription)', False, 'from azure.cli.core.commands.parameters import get_location_type, file_type, get_resource_name_completion_list, get_enum_type, get_three_state_flag\n'), ((77, 28, 77, 50), 'azure.cli.core.commands.parameters.get_three_state_flag', 'get_three_state_flag', ({}, {}), '()', False, 'from azure.cli.core.commands.parameters import get_location_type, file_type, get_resource_name_completion_list, get_enum_type, get_three_state_flag\n'), ((83, 28, 83, 50), 'azure.cli.core.commands.parameters.get_three_state_flag', 'get_three_state_flag', ({}, {}), '()', False, 'from azure.cli.core.commands.parameters import get_location_type, file_type, get_resource_name_completion_list, get_enum_type, get_three_state_flag\n'), ((87, 82, 87, 113), 'azure.cli.core.commands.parameters.get_enum_type', 'get_enum_type', ({(87, 96, 87, 112): 'AllocationPolicy'}, {}), '(AllocationPolicy)', False, 'from azure.cli.core.commands.parameters import get_location_type, file_type, get_resource_name_completion_list, get_enum_type, get_three_state_flag\n'), ((92, 29, 92, 61), 'argcomplete.completers.FilesCompleter', 'FilesCompleter', ({(92, 44, 92, 60): "['.cer', '.pem']"}, {}), "(['.cer', '.pem'])", False, 'from argcomplete.completers import FilesCompleter\n'), ((115, 29, 115, 61), 'argcomplete.completers.FilesCompleter', 'FilesCompleter', ({(115, 44, 115, 60): "['.cer', '.pem']"}, {}), "(['.cer', '.pem'])", False, 'from argcomplete.completers import FilesCompleter\n'), ((138, 35, 138, 59), 'azure.cli.core.commands.parameters.get_enum_type', 'get_enum_type', ({(138, 49, 138, 58): 'IotHubSku'}, {}), '(IotHubSku)', False, 'from azure.cli.core.commands.parameters import get_location_type, file_type, get_resource_name_completion_list, get_enum_type, get_three_state_flag\n'), ((147, 40, 147, 62), 'azure.cli.core.commands.parameters.get_enum_type', 'get_enum_type', ({(147, 54, 147, 61): 'KeyType'}, {}), '(KeyType)', False, 'from azure.cli.core.commands.parameters import get_location_type, file_type, get_resource_name_completion_list, get_enum_type, get_three_state_flag\n'), ((174, 40, 174, 62), 'azure.cli.core.commands.parameters.get_enum_type', 'get_enum_type', ({(174, 54, 174, 61): 'KeyType'}, {}), '(KeyType)', False, 'from azure.cli.core.commands.parameters import get_location_type, file_type, get_resource_name_completion_list, get_enum_type, get_three_state_flag\n')] |
nhsconnect/prm-practice-migration-dashboard | metrics-calculator/tests/integration/test_s3.py | 40c8760f409834d05bde4fb015aa5f8765acaa82 | import boto3
import gzip
from moto import mock_s3
import pytest
import os
from chalicelib.s3 import read_object_s3, write_object_s3, objects_exist
from tests.builders.file import build_gzip_csv
@pytest.fixture(scope='function')
def aws_credentials():
"""Mocked AWS Credentials for moto."""
os.environ['AWS_ACCESS_KEY_ID'] = 'testing'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'
os.environ['AWS_SECURITY_TOKEN'] = 'testing'
os.environ['AWS_SESSION_TOKEN'] = 'testing'
os.environ['AWS_DEFAULT_REGION'] = 'us-east-1'
@pytest.fixture(scope='function')
def s3(aws_credentials):
with mock_s3():
yield boto3.resource('s3', region_name='us-east-1')
@mock_s3
def test_read_object_s3_returns_object_content(s3):
bucket = s3.create_bucket(Bucket="test_bucket")
s3_object = bucket.Object("test_object.csv.gz")
gzipped_content = build_gzip_csv(
header=["id", "message", "comment"],
rows=[["123", "A message", "A comment"], [
"321", "Another message", "Another comment"]],
)
s3_object.put(
Body=gzipped_content
)
expected = "id,message,comment\n123,A message,A comment\n321,Another message,Another comment"
csv_stream = read_object_s3(s3, "s3://test_bucket/test_object.csv.gz")
with gzip.open(csv_stream, mode="rt") as f:
actual = f.read()
assert actual == expected
@mock_s3
def test_write_object_s3_writes_object_content(s3):
s3.create_bucket(Bucket="test_bucket")
json_string = b'{"fruit": "mango"}'
write_object_s3(s3, "s3://test_bucket/test_object.json", json_string)
s3_object_response = s3.Object("test_bucket", "test_object.json").get()
assert s3_object_response["Body"].read() == json_string
@mock_s3
def test_write_object_s3_writes_object_content_with_metadata(s3):
s3.create_bucket(Bucket="test_bucket")
json_string = b'{"fruit": "mango"}'
metadata = {
"start_date": "start-date",
"end_date": "end-date"
}
write_object_s3(s3, "s3://test_bucket/test_object.json", json_string, metadata)
s3_object_response = s3.Object("test_bucket", "test_object.json").get()
assert s3_object_response["Metadata"] == metadata
@mock_s3
def test_objects_exist_returns_true_when_all_objects_exist(s3):
s3.create_bucket(Bucket="test_bucket")
object_one = "object-one"
object_two = "object-two"
write_object_s3(s3, f"s3://test_bucket/{object_one}", 'object-one-content')
write_object_s3(s3, f"s3://test_bucket/{object_two}", 'object-two-content')
result = objects_exist(s3, "test_bucket", [object_one, object_two])
assert result
@mock_s3
def test_objects_exist_returns_false_when_only_one_object_exists(s3):
s3.create_bucket(Bucket="test_bucket")
object_one = "object-one"
object_two = "object-two"
write_object_s3(s3, f"s3://test_bucket/{object_one}", 'object-one-content')
result = objects_exist(s3, "test_bucket", [object_one, object_two])
assert not result
@mock_s3
def test_objects_exist_returns_false_when_no_objects_exist(s3):
s3.create_bucket(Bucket="test_bucket")
object_one = "object-one"
object_two = "object-two"
result = objects_exist(s3, "test_bucket", [object_one, object_two])
assert not result
| [((11, 1, 11, 33), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((21, 1, 21, 33), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((31, 22, 35, 5), 'tests.builders.file.build_gzip_csv', 'build_gzip_csv', (), '', False, 'from tests.builders.file import build_gzip_csv\n'), ((42, 17, 42, 74), 'chalicelib.s3.read_object_s3', 'read_object_s3', ({(42, 32, 42, 34): 's3', (42, 36, 42, 73): '"""s3://test_bucket/test_object.csv.gz"""'}, {}), "(s3, 's3://test_bucket/test_object.csv.gz')", False, 'from chalicelib.s3 import read_object_s3, write_object_s3, objects_exist\n'), ((55, 4, 55, 73), 'chalicelib.s3.write_object_s3', 'write_object_s3', ({(55, 20, 55, 22): 's3', (55, 24, 55, 59): '"""s3://test_bucket/test_object.json"""', (55, 61, 55, 72): 'json_string'}, {}), "(s3, 's3://test_bucket/test_object.json', json_string)", False, 'from chalicelib.s3 import read_object_s3, write_object_s3, objects_exist\n'), ((70, 4, 70, 83), 'chalicelib.s3.write_object_s3', 'write_object_s3', ({(70, 20, 70, 22): 's3', (70, 24, 70, 59): '"""s3://test_bucket/test_object.json"""', (70, 61, 70, 72): 'json_string', (70, 74, 70, 82): 'metadata'}, {}), "(s3, 's3://test_bucket/test_object.json', json_string, metadata)", False, 'from chalicelib.s3 import read_object_s3, write_object_s3, objects_exist\n'), ((83, 4, 83, 79), 'chalicelib.s3.write_object_s3', 'write_object_s3', ({(83, 20, 83, 22): 's3', (83, 24, 83, 56): 'f"""s3://test_bucket/{object_one}"""', (83, 58, 83, 78): '"""object-one-content"""'}, {}), "(s3, f's3://test_bucket/{object_one}', 'object-one-content')", False, 'from chalicelib.s3 import read_object_s3, write_object_s3, objects_exist\n'), ((84, 4, 84, 79), 'chalicelib.s3.write_object_s3', 'write_object_s3', ({(84, 20, 84, 22): 's3', (84, 24, 84, 56): 'f"""s3://test_bucket/{object_two}"""', (84, 58, 84, 78): '"""object-two-content"""'}, {}), "(s3, f's3://test_bucket/{object_two}', 'object-two-content')", False, 'from chalicelib.s3 import read_object_s3, write_object_s3, objects_exist\n'), ((86, 13, 86, 71), 'chalicelib.s3.objects_exist', 'objects_exist', ({(86, 27, 86, 29): 's3', (86, 31, 86, 44): '"""test_bucket"""', (86, 46, 86, 70): '[object_one, object_two]'}, {}), "(s3, 'test_bucket', [object_one, object_two])", False, 'from chalicelib.s3 import read_object_s3, write_object_s3, objects_exist\n'), ((98, 4, 98, 79), 'chalicelib.s3.write_object_s3', 'write_object_s3', ({(98, 20, 98, 22): 's3', (98, 24, 98, 56): 'f"""s3://test_bucket/{object_one}"""', (98, 58, 98, 78): '"""object-one-content"""'}, {}), "(s3, f's3://test_bucket/{object_one}', 'object-one-content')", False, 'from chalicelib.s3 import read_object_s3, write_object_s3, objects_exist\n'), ((100, 13, 100, 71), 'chalicelib.s3.objects_exist', 'objects_exist', ({(100, 27, 100, 29): 's3', (100, 31, 100, 44): '"""test_bucket"""', (100, 46, 100, 70): '[object_one, object_two]'}, {}), "(s3, 'test_bucket', [object_one, object_two])", False, 'from chalicelib.s3 import read_object_s3, write_object_s3, objects_exist\n'), ((112, 13, 112, 71), 'chalicelib.s3.objects_exist', 'objects_exist', ({(112, 27, 112, 29): 's3', (112, 31, 112, 44): '"""test_bucket"""', (112, 46, 112, 70): '[object_one, object_two]'}, {}), "(s3, 'test_bucket', [object_one, object_two])", False, 'from chalicelib.s3 import read_object_s3, write_object_s3, objects_exist\n'), ((23, 9, 23, 18), 'moto.mock_s3', 'mock_s3', ({}, {}), '()', False, 'from moto import mock_s3\n'), ((44, 9, 44, 41), 'gzip.open', 'gzip.open', (), '', False, 'import gzip\n'), ((24, 14, 24, 59), 'boto3.resource', 'boto3.resource', (), '', False, 'import boto3\n')] |
kaldap/image-analogies | image_analogy/losses/patch_matcher.py | 0867aedfae7dfc0d27c42805a3d07f7b9eb7eaa2 | import numpy as np
import scipy.interpolate
import scipy.ndimage
from sklearn.feature_extraction.image import extract_patches_2d, reconstruct_from_patches_2d
def _calc_patch_grid_dims(shape, patch_size, patch_stride):
x_w, x_h, x_c = shape
num_rows = 1 + (x_h - patch_size) // patch_stride
num_cols = 1 + (x_w - patch_size) // patch_stride
return num_rows, num_cols
def make_patch_grid(x, patch_size, patch_stride=1):
'''x shape: (num_channels, rows, cols)'''
x = x.transpose(2, 1, 0)
patches = extract_patches_2d(x, (patch_size, patch_size))
x_w, x_h, x_c = x.shape
num_rows, num_cols = _calc_patch_grid_dims(x.shape, patch_size, patch_stride)
patches = patches.reshape((num_rows, num_cols, patch_size, patch_size, x_c))
patches = patches.transpose((0, 1, 4, 2, 3))
#patches = np.rollaxis(patches, -1, 2)
return patches
def combine_patches_grid(in_patches, out_shape):
'''Reconstruct an image from these `patches`
input shape: (rows, cols, channels, patch_row, patch_col)
'''
num_rows, num_cols = in_patches.shape[:2]
num_channels = in_patches.shape[-3]
patch_size = in_patches.shape[-1]
num_patches = num_rows * num_cols
in_patches = np.reshape(in_patches, (num_patches, num_channels, patch_size, patch_size)) # (patches, channels, pr, pc)
in_patches = np.transpose(in_patches, (0, 2, 3, 1)) # (patches, p, p, channels)
recon = reconstruct_from_patches_2d(in_patches, out_shape)
return recon.transpose(2, 1, 0).astype(np.float32)
class PatchMatcher(object):
'''A matcher of image patches inspired by the PatchMatch algorithm.
image shape: (width, height, channels)
'''
def __init__(self, input_shape, target_img, patch_size=1, patch_stride=1, jump_size=0.5,
num_propagation_steps=5, num_random_steps=5, random_max_radius=1.0, random_scale=0.5):
self.input_shape = input_shape
self.patch_size = patch_size
self.patch_stride = patch_stride
self.jump_size = jump_size
self.num_propagation_steps = num_propagation_steps
self.num_random_steps = num_random_steps
self.random_max_radius = random_max_radius
self.random_scale = random_scale
self.num_input_rows, self.num_input_cols = _calc_patch_grid_dims(input_shape, patch_size, patch_stride)
self.target_patches = make_patch_grid(target_img, patch_size)
self.target_patches_normed = self.normalize_patches(self.target_patches)
self.coords = np.random.uniform(0.0, 1.0, # TODO: switch to pixels
(2, self.num_input_rows, self.num_input_cols))# * [[[self.num_input_rows]],[[self.num_input_cols]]]
self.similarity = np.zeros(input_shape[:2:-1], dtype=np.float32)
self.min_propagration_row = 1.0 / self.num_input_rows
self.min_propagration_col = 1.0 / self.num_input_cols
self.delta_row = np.array([[[self.min_propagration_row]], [[0.0]]])
self.delta_col = np.array([[[0.0]], [[self.min_propagration_col]]])
def update(self, input_img, reverse_propagation=False):
input_patches = self.get_patches_for(input_img)
self.update_with_patches(self.normalize_patches(input_patches), reverse_propagation=reverse_propagation)
def update_with_patches(self, input_patches, reverse_propagation=False):
self._propagate(input_patches, reverse_propagation=reverse_propagation)
self._random_update(input_patches)
def get_patches_for(self, img):
return make_patch_grid(img, self.patch_size);
def normalize_patches(self, patches):
norm = np.sqrt(np.sum(np.square(patches), axis=(2, 3, 4), keepdims=True))
return patches / norm
def _propagate(self, input_patches, reverse_propagation=False):
if reverse_propagation:
roll_direction = 1
else:
roll_direction = -1
sign = float(roll_direction)
for step_i in range(self.num_propagation_steps):
new_coords = self.clip_coords(np.roll(self.coords, roll_direction, 1) + self.delta_row * sign)
coords_row, similarity_row = self.eval_state(new_coords, input_patches)
new_coords = self.clip_coords(np.roll(self.coords, roll_direction, 2) + self.delta_col * sign)
coords_col, similarity_col = self.eval_state(new_coords, input_patches)
self.coords, self.similarity = self.take_best(coords_row, similarity_row, coords_col, similarity_col)
def _random_update(self, input_patches):
for alpha in range(1, self.num_random_steps + 1): # NOTE this should actually stop when the move is < 1
new_coords = self.clip_coords(self.coords + np.random.uniform(-self.random_max_radius, self.random_max_radius, self.coords.shape) * self.random_scale ** alpha)
self.coords, self.similarity = self.eval_state(new_coords, input_patches)
def eval_state(self, new_coords, input_patches):
new_similarity = self.patch_similarity(input_patches, new_coords)
delta_similarity = new_similarity - self.similarity
coords = np.where(delta_similarity > 0, new_coords, self.coords)
best_similarity = np.where(delta_similarity > 0, new_similarity, self.similarity)
return coords, best_similarity
def take_best(self, coords_a, similarity_a, coords_b, similarity_b):
delta_similarity = similarity_a - similarity_b
best_coords = np.where(delta_similarity > 0, coords_a, coords_b)
best_similarity = np.where(delta_similarity > 0, similarity_a, similarity_b)
return best_coords, best_similarity
def patch_similarity(self, source, coords):
'''Check the similarity of the patches specified in coords.'''
target_vals = self.lookup_coords(self.target_patches_normed, coords)
err = source * target_vals
return np.sum(err, axis=(2, 3, 4))
def clip_coords(self, coords):
# TODO: should this all be in pixel space?
coords = np.clip(coords, 0.0, 1.0)
return coords
def lookup_coords(self, x, coords):
x_shape = np.expand_dims(np.expand_dims(x.shape, -1), -1)
i_coords = np.round(coords * (x_shape[:2] - 1)).astype('int32')
return x[i_coords[0], i_coords[1]]
def get_reconstruction(self, patches=None, combined=None):
if combined is not None:
patches = make_patch_grid(combined, self.patch_size)
if patches is None:
patches = self.target_patches
patches = self.lookup_coords(patches, self.coords)
recon = combine_patches_grid(patches, self.input_shape)
return recon
def scale(self, new_shape, new_target_img):
'''Create a new matcher of the given shape and replace its
state with a scaled up version of the current matcher's state.
'''
new_matcher = PatchMatcher(new_shape, new_target_img, patch_size=self.patch_size,
patch_stride=self.patch_stride, jump_size=self.jump_size,
num_propagation_steps=self.num_propagation_steps,
num_random_steps=self.num_random_steps,
random_max_radius=self.random_max_radius,
random_scale=self.random_scale)
new_matcher.coords = congrid(self.coords, new_matcher.coords.shape, method='neighbour')
new_matcher.similarity = congrid(self.similarity, new_matcher.coords.shape, method='neighbour')
return new_matcher
def congrid(a, newdims, method='linear', centre=False, minusone=False):
'''Arbitrary resampling of source array to new dimension sizes.
Currently only supports maintaining the same number of dimensions.
To use 1-D arrays, first promote them to shape (x,1).
Uses the same parameters and creates the same co-ordinate lookup points
as IDL''s congrid routine, which apparently originally came from a VAX/VMS
routine of the same name.
method:
neighbour - closest value from original data
nearest and linear - uses n x 1-D interpolations using
scipy.interpolate.interp1d
(see Numerical Recipes for validity of use of n 1-D interpolations)
spline - uses ndimage.map_coordinates
centre:
True - interpolation points are at the centres of the bins
False - points are at the front edge of the bin
minusone:
For example- inarray.shape = (i,j) & new dimensions = (x,y)
False - inarray is resampled by factors of (i/x) * (j/y)
True - inarray is resampled by(i-1)/(x-1) * (j-1)/(y-1)
This prevents extrapolation one element beyond bounds of input array.
'''
if not a.dtype in [np.float64, np.float32]:
a = np.cast[float](a)
m1 = np.cast[int](minusone)
ofs = np.cast[int](centre) * 0.5
old = np.array( a.shape )
ndims = len( a.shape )
if len( newdims ) != ndims:
print("[congrid] dimensions error. " \
"This routine currently only support " \
"rebinning to the same number of dimensions.")
return None
newdims = np.asarray( newdims, dtype=float )
dimlist = []
if method == 'neighbour':
for i in range( ndims ):
base = np.indices(newdims)[i]
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
cd = np.array( dimlist ).round().astype(int)
newa = a[list( cd )]
return newa
elif method in ['nearest','linear']:
# calculate new dims
for i in range( ndims ):
base = np.arange( newdims[i] )
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
# specify old dims
olddims = [np.arange(i, dtype = np.float) for i in list( a.shape )]
# first interpolation - for ndims = any
mint = scipy.interpolate.interp1d( olddims[-1], a, kind=method )
newa = mint( dimlist[-1] )
trorder = [ndims - 1] + range( ndims - 1 )
for i in range( ndims - 2, -1, -1 ):
newa = newa.transpose( trorder )
mint = scipy.interpolate.interp1d( olddims[i], newa, kind=method )
newa = mint( dimlist[i] )
if ndims > 1:
# need one more transpose to return to original dimensions
newa = newa.transpose( trorder )
return newa
elif method in ['spline']:
oslices = [ slice(0,j) for j in old ]
oldcoords = np.ogrid[oslices]
nslices = [ slice(0,j) for j in list(newdims) ]
newcoords = np.mgrid[nslices]
newcoords_dims = range(np.rank(newcoords))
#make first index last
newcoords_dims.append(newcoords_dims.pop(0))
newcoords_tr = newcoords.transpose(newcoords_dims)
# makes a view that affects newcoords
newcoords_tr += ofs
deltas = (np.asarray(old) - m1) / (newdims - m1)
newcoords_tr *= deltas
newcoords_tr -= ofs
newa = scipy.ndimage.map_coordinates(a, newcoords)
return newa
else:
print("Congrid error: Unrecognized interpolation type.\n", \
"Currently only \'neighbour\', \'nearest\',\'linear\',", \
"and \'spline\' are supported.")
return None
if __name__ == '__main__':
import sys
import time
from scipy.misc import imsave
from image_analogy.img_utils import load_image, preprocess_image, deprocess_image
content_image_path, style_image_path, output_prefix = sys.argv[1:]
jump_size = 1.0
num_steps = 7
patch_size = 1
patch_stride = 1
feat_chans = 512
feat_style_shape = (feat_chans, 12, 18)
feat_style = np.random.uniform(0.0, 1.0, feat_style_shape)
feat_in_shape = (feat_chans, 17, 10)
feat_in = np.random.uniform(0.0, 1.0, feat_in_shape)
matcher = PatchMatcher(feat_in_shape[::-1], feat_style, patch_size=patch_size)
feat_in_normed = matcher.normalize_patches(matcher.get_patches_for(feat_in))
for i in range(num_steps):
matcher.update_with_patches(feat_in_normed)
r = matcher.get_reconstruction()
content_img_img = load_image(content_image_path)
content_n_channels, content_n_rows, content_n_cols = content_img_img.shape[::-1]
content_img = preprocess_image(content_img_img, content_n_cols, content_n_rows)[0]#.transpose((2,1,0))
style_img = load_image(style_image_path)
style_n_channels, style_n_rows, style_n_cols = content_img_img.shape[::-1]
style_img = preprocess_image(
load_image(style_image_path), style_n_cols, style_n_rows)[0]#.transpose((2,1,0))
pg = make_patch_grid(content_img, patch_size)
result = combine_patches_grid(pg, content_img.shape[::-1])
outimg = deprocess_image(result, contrast_percent=0)
imsave(output_prefix + '_bestre.png', outimg)
# # #
matcher = PatchMatcher((content_n_cols, content_n_rows, content_n_channels), style_img, patch_size=patch_size)
for i in range(num_steps):
start = time.time()
matcher.update(content_img, reverse_propagation=bool(i % 2))
print(matcher.similarity.min(), matcher.similarity.max(), matcher.similarity.mean())
end = time.time()
#print end-start
start = time.time()
result = matcher.get_reconstruction(patches=matcher.target_patches)
print(result.shape)
end = time.time()
print(end-start)
outimg = deprocess_image(result, contrast_percent=0)
# # imsave takes (rows, cols, channels)
imsave(output_prefix + '_best.png', outimg)
| [((17, 14, 17, 61), 'sklearn.feature_extraction.image.extract_patches_2d', 'extract_patches_2d', ({(17, 33, 17, 34): 'x', (17, 36, 17, 60): '(patch_size, patch_size)'}, {}), '(x, (patch_size, patch_size))', False, 'from sklearn.feature_extraction.image import extract_patches_2d, reconstruct_from_patches_2d\n'), ((35, 17, 35, 92), 'numpy.reshape', 'np.reshape', ({(35, 28, 35, 38): 'in_patches', (35, 40, 35, 91): '(num_patches, num_channels, patch_size, patch_size)'}, {}), '(in_patches, (num_patches, num_channels, patch_size, patch_size))', True, 'import numpy as np\n'), ((36, 17, 36, 55), 'numpy.transpose', 'np.transpose', ({(36, 30, 36, 40): 'in_patches', (36, 42, 36, 54): '(0, 2, 3, 1)'}, {}), '(in_patches, (0, 2, 3, 1))', True, 'import numpy as np\n'), ((37, 12, 37, 62), 'sklearn.feature_extraction.image.reconstruct_from_patches_2d', 'reconstruct_from_patches_2d', ({(37, 40, 37, 50): 'in_patches', (37, 52, 37, 61): 'out_shape'}, {}), '(in_patches, out_shape)', False, 'from sklearn.feature_extraction.image import extract_patches_2d, reconstruct_from_patches_2d\n'), ((184, 10, 184, 29), 'numpy.array', 'np.array', ({(184, 20, 184, 27): 'a.shape'}, {}), '(a.shape)', True, 'import numpy as np\n'), ((191, 14, 191, 48), 'numpy.asarray', 'np.asarray', (), '', True, 'import numpy as np\n'), ((271, 17, 271, 62), 'numpy.random.uniform', 'np.random.uniform', ({(271, 35, 271, 38): '0.0', (271, 40, 271, 43): '1.0', (271, 45, 271, 61): 'feat_style_shape'}, {}), '(0.0, 1.0, feat_style_shape)', True, 'import numpy as np\n'), ((273, 14, 273, 56), 'numpy.random.uniform', 'np.random.uniform', ({(273, 32, 273, 35): '0.0', (273, 37, 273, 40): '1.0', (273, 42, 273, 55): 'feat_in_shape'}, {}), '(0.0, 1.0, feat_in_shape)', True, 'import numpy as np\n'), ((280, 22, 280, 52), 'image_analogy.img_utils.load_image', 'load_image', ({(280, 33, 280, 51): 'content_image_path'}, {}), '(content_image_path)', False, 'from image_analogy.img_utils import load_image, preprocess_image, deprocess_image\n'), ((283, 16, 283, 44), 'image_analogy.img_utils.load_image', 'load_image', ({(283, 27, 283, 43): 'style_image_path'}, {}), '(style_image_path)', False, 'from image_analogy.img_utils import load_image, preprocess_image, deprocess_image\n'), ((289, 13, 289, 56), 'image_analogy.img_utils.deprocess_image', 'deprocess_image', (), '', False, 'from image_analogy.img_utils import load_image, preprocess_image, deprocess_image\n'), ((290, 4, 290, 49), 'scipy.misc.imsave', 'imsave', ({(290, 11, 290, 40): "(output_prefix + '_bestre.png')", (290, 42, 290, 48): 'outimg'}, {}), "(output_prefix + '_bestre.png', outimg)", False, 'from scipy.misc import imsave\n'), ((300, 12, 300, 23), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((303, 10, 303, 21), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((305, 13, 305, 56), 'image_analogy.img_utils.deprocess_image', 'deprocess_image', (), '', False, 'from image_analogy.img_utils import load_image, preprocess_image, deprocess_image\n'), ((307, 4, 307, 47), 'scipy.misc.imsave', 'imsave', ({(307, 11, 307, 38): "(output_prefix + '_best.png')", (307, 40, 307, 46): 'outimg'}, {}), "(output_prefix + '_best.png', outimg)", False, 'from scipy.misc import imsave\n'), ((59, 22, 60, 58), 'numpy.random.uniform', 'np.random.uniform', ({(59, 40, 59, 43): '0.0', (59, 45, 59, 48): '1.0', (60, 12, 60, 57): '(2, self.num_input_rows, self.num_input_cols)'}, {}), '(0.0, 1.0, (2, self.num_input_rows, self.num_input_cols))', True, 'import numpy as np\n'), ((61, 26, 61, 72), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((64, 25, 64, 75), 'numpy.array', 'np.array', ({(64, 34, 64, 74): '[[[self.min_propagration_row]], [[0.0]]]'}, {}), '([[[self.min_propagration_row]], [[0.0]]])', True, 'import numpy as np\n'), ((65, 25, 65, 75), 'numpy.array', 'np.array', ({(65, 34, 65, 74): '[[[0.0]], [[self.min_propagration_col]]]'}, {}), '([[[0.0]], [[self.min_propagration_col]]])', True, 'import numpy as np\n'), ((103, 17, 103, 72), 'numpy.where', 'np.where', ({(103, 26, 103, 46): 'delta_similarity > 0', (103, 48, 103, 58): 'new_coords', (103, 60, 103, 71): 'self.coords'}, {}), '(delta_similarity > 0, new_coords, self.coords)', True, 'import numpy as np\n'), ((104, 26, 104, 89), 'numpy.where', 'np.where', ({(104, 35, 104, 55): 'delta_similarity > 0', (104, 57, 104, 71): 'new_similarity', (104, 73, 104, 88): 'self.similarity'}, {}), '(delta_similarity > 0, new_similarity, self.similarity)', True, 'import numpy as np\n'), ((109, 22, 109, 72), 'numpy.where', 'np.where', ({(109, 31, 109, 51): 'delta_similarity > 0', (109, 53, 109, 61): 'coords_a', (109, 63, 109, 71): 'coords_b'}, {}), '(delta_similarity > 0, coords_a, coords_b)', True, 'import numpy as np\n'), ((110, 26, 110, 84), 'numpy.where', 'np.where', ({(110, 35, 110, 55): 'delta_similarity > 0', (110, 57, 110, 69): 'similarity_a', (110, 71, 110, 83): 'similarity_b'}, {}), '(delta_similarity > 0, similarity_a, similarity_b)', True, 'import numpy as np\n'), ((117, 15, 117, 42), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((121, 17, 121, 42), 'numpy.clip', 'np.clip', ({(121, 25, 121, 31): 'coords', (121, 33, 121, 36): '0.0', (121, 38, 121, 41): '1.0'}, {}), '(coords, 0.0, 1.0)', True, 'import numpy as np\n'), ((282, 18, 282, 83), 'image_analogy.img_utils.preprocess_image', 'preprocess_image', ({(282, 35, 282, 50): 'content_img_img', (282, 52, 282, 66): 'content_n_cols', (282, 68, 282, 82): 'content_n_rows'}, {}), '(content_img_img, content_n_cols, content_n_rows)', False, 'from image_analogy.img_utils import load_image, preprocess_image, deprocess_image\n'), ((295, 16, 295, 27), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((298, 14, 298, 25), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((125, 33, 125, 60), 'numpy.expand_dims', 'np.expand_dims', ({(125, 48, 125, 55): 'x.shape', (125, 57, 125, 59): '-1'}, {}), '(x.shape, -1)', True, 'import numpy as np\n'), ((286, 8, 286, 36), 'image_analogy.img_utils.load_image', 'load_image', ({(286, 19, 286, 35): 'style_image_path'}, {}), '(style_image_path)', False, 'from image_analogy.img_utils import load_image, preprocess_image, deprocess_image\n'), ((79, 30, 79, 48), 'numpy.square', 'np.square', ({(79, 40, 79, 47): 'patches'}, {}), '(patches)', True, 'import numpy as np\n'), ((126, 19, 126, 55), 'numpy.round', 'np.round', ({(126, 28, 126, 54): 'coords * (x_shape[:2] - 1)'}, {}), '(coords * (x_shape[:2] - 1))', True, 'import numpy as np\n'), ((196, 19, 196, 38), 'numpy.indices', 'np.indices', ({(196, 30, 196, 37): 'newdims'}, {}), '(newdims)', True, 'import numpy as np\n'), ((206, 19, 206, 42), 'numpy.arange', 'np.arange', ({(206, 30, 206, 40): 'newdims[i]'}, {}), '(newdims[i])', True, 'import numpy as np\n'), ((210, 19, 210, 49), 'numpy.arange', 'np.arange', (), '', True, 'import numpy as np\n'), ((89, 42, 89, 81), 'numpy.roll', 'np.roll', ({(89, 50, 89, 61): 'self.coords', (89, 63, 89, 77): 'roll_direction', (89, 79, 89, 80): '1'}, {}), '(self.coords, roll_direction, 1)', True, 'import numpy as np\n'), ((91, 42, 91, 81), 'numpy.roll', 'np.roll', ({(91, 50, 91, 61): 'self.coords', (91, 63, 91, 77): 'roll_direction', (91, 79, 91, 80): '2'}, {}), '(self.coords, roll_direction, 2)', True, 'import numpy as np\n'), ((234, 31, 234, 49), 'numpy.rank', 'np.rank', ({(234, 39, 234, 48): 'newcoords'}, {}), '(newcoords)', True, 'import numpy as np\n'), ((97, 56, 97, 141), 'numpy.random.uniform', 'np.random.uniform', ({(97, 74, 97, 97): '-self.random_max_radius', (97, 99, 97, 121): 'self.random_max_radius', (97, 123, 97, 140): 'self.coords.shape'}, {}), '(-self.random_max_radius, self.random_max_radius, self.\n coords.shape)', True, 'import numpy as np\n'), ((199, 13, 199, 32), 'numpy.array', 'np.array', ({(199, 23, 199, 30): 'dimlist'}, {}), '(dimlist)', True, 'import numpy as np\n'), ((242, 18, 242, 33), 'numpy.asarray', 'np.asarray', ({(242, 29, 242, 32): 'old'}, {}), '(old)', True, 'import numpy as np\n')] |
desafinadude/muni-portal-backend | muni_portal/core/migrations/0030_remove_servicerequest_mobile_reference.py | 9ffc447194b8f29619585cd919f67d62062457a3 | # Generated by Django 2.2.10 on 2021-02-24 09:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0029_auto_20210224_0936'),
]
operations = [
migrations.RemoveField(
model_name='servicerequest',
name='mobile_reference',
),
]
| [((13, 8, 16, 9), 'django.db.migrations.RemoveField', 'migrations.RemoveField', (), '', False, 'from django.db import migrations\n')] |
noahshpak/ray | rllib/agents/ppo/tests/test_appo.py | edd783bc327760a4892ab89222ee551e42df15b9 | import unittest
import ray
import ray.rllib.agents.ppo as ppo
from ray.rllib.utils.test_utils import check_compute_single_action, \
framework_iterator
class TestAPPO(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init()
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_appo_compilation(self):
"""Test whether an APPOTrainer can be built with both frameworks."""
config = ppo.appo.DEFAULT_CONFIG.copy()
config["num_workers"] = 1
num_iterations = 2
for _ in framework_iterator(config, frameworks=("torch", "tf")):
_config = config.copy()
trainer = ppo.APPOTrainer(config=_config, env="CartPole-v0")
for i in range(num_iterations):
print(trainer.train())
check_compute_single_action(trainer)
_config = config.copy()
_config["vtrace"] = True
trainer = ppo.APPOTrainer(config=_config, env="CartPole-v0")
for i in range(num_iterations):
print(trainer.train())
check_compute_single_action(trainer)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| [((12, 8, 12, 18), 'ray.init', 'ray.init', ({}, {}), '()', False, 'import ray\n'), ((16, 8, 16, 22), 'ray.shutdown', 'ray.shutdown', ({}, {}), '()', False, 'import ray\n'), ((20, 17, 20, 47), 'ray.rllib.agents.ppo.appo.DEFAULT_CONFIG.copy', 'ppo.appo.DEFAULT_CONFIG.copy', ({}, {}), '()', True, 'import ray.rllib.agents.ppo as ppo\n'), ((24, 17, 24, 71), 'ray.rllib.utils.test_utils.framework_iterator', 'framework_iterator', (), '', False, 'from ray.rllib.utils.test_utils import check_compute_single_action, framework_iterator\n'), ((42, 13, 42, 42), 'pytest.main', 'pytest.main', ({(42, 25, 42, 41): "['-v', __file__]"}, {}), "(['-v', __file__])", False, 'import pytest\n'), ((26, 22, 26, 72), 'ray.rllib.agents.ppo.APPOTrainer', 'ppo.APPOTrainer', (), '', True, 'import ray.rllib.agents.ppo as ppo\n'), ((29, 12, 29, 48), 'ray.rllib.utils.test_utils.check_compute_single_action', 'check_compute_single_action', ({(29, 40, 29, 47): 'trainer'}, {}), '(trainer)', False, 'from ray.rllib.utils.test_utils import check_compute_single_action, framework_iterator\n'), ((33, 22, 33, 72), 'ray.rllib.agents.ppo.APPOTrainer', 'ppo.APPOTrainer', (), '', True, 'import ray.rllib.agents.ppo as ppo\n'), ((36, 12, 36, 48), 'ray.rllib.utils.test_utils.check_compute_single_action', 'check_compute_single_action', ({(36, 40, 36, 47): 'trainer'}, {}), '(trainer)', False, 'from ray.rllib.utils.test_utils import check_compute_single_action, framework_iterator\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.