repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
bernease/whylogs-python | tests/unit/app/test_session.py | cfd2a2f71280537aae584cbd40a752fbe7da647b |
import pytest
from whylogs.app.session import get_or_create_session, get_session, get_logger, reset_default_session, session_from_config
from whylogs.app.config import SessionConfig
from whylogs.app.session import Session
from pandas import util
def test_get_global_session():
_session = None
session = get_or_create_session()
global_session = get_session()
assert session == global_session
def test_reset():
session = get_or_create_session()
reset_default_session()
global_session = get_session()
assert global_session.project is not None
def test_session_log_dataframe():
_session = None
session = session_from_config(SessionConfig(
"default-project", "default-pipeline", [], False
))
df = util.testing.makeDataFrame()
profile = session.log_dataframe(df)
assert session.logger() is not None
assert session.logger("default-project").dataset_name == "default-project"
def test_session_profile():
session = session_from_config(SessionConfig(
"default-project", "default-pipeline", [], False
))
df = util.testing.makeDataFrame()
profile = session.log_dataframe(df)
assert profile is not None
summary = profile.flat_summary()
flat_summary = summary['summary']
assert len(flat_summary) == 4
def test_profile_df():
session = get_or_create_session()
df = util.testing.makeDataFrame()
log_profile = session.log_dataframe(df)
profile = session.profile_dataframe(df)
assert log_profile.name == profile.name
assert log_profile.dataset_timestamp == profile.dataset_timestamp
assert log_profile.session_timestamp == profile.session_timestamp
assert len(profile.columns) == 4
assert len(log_profile.tags) == 1
assert len(profile.tags) == 2
def test_close_session():
session = get_or_create_session()
session.close()
assert session.is_active() == False
df = util.testing.makeDataFrame()
log_profile = session.log_dataframe(df)
assert log_profile == None
profile = session.profile_dataframe(df)
assert profile == None
profile = session.new_profile(df)
assert profile == None
with pytest.raises(RuntimeError):
session.logger()
def test_logger_cache():
_session = None
session = get_or_create_session()
with session.logger("cache-test", with_rotation_time="s") as logger:
logger.log({"name": 1})
session.close()
def test_remove_logger():
session = get_or_create_session()
session.logger("default-project")
with pytest.raises(KeyError):
session.remove_logger("test")
| [((12, 14, 12, 37), 'whylogs.app.session.get_or_create_session', 'get_or_create_session', ({}, {}), '()', False, 'from whylogs.app.session import get_or_create_session, get_session, get_logger, reset_default_session, session_from_config\n'), ((14, 21, 14, 34), 'whylogs.app.session.get_session', 'get_session', ({}, {}), '()', False, 'from whylogs.app.session import get_or_create_session, get_session, get_logger, reset_default_session, session_from_config\n'), ((21, 14, 21, 37), 'whylogs.app.session.get_or_create_session', 'get_or_create_session', ({}, {}), '()', False, 'from whylogs.app.session import get_or_create_session, get_session, get_logger, reset_default_session, session_from_config\n'), ((22, 4, 22, 27), 'whylogs.app.session.reset_default_session', 'reset_default_session', ({}, {}), '()', False, 'from whylogs.app.session import get_or_create_session, get_session, get_logger, reset_default_session, session_from_config\n'), ((23, 21, 23, 34), 'whylogs.app.session.get_session', 'get_session', ({}, {}), '()', False, 'from whylogs.app.session import get_or_create_session, get_session, get_logger, reset_default_session, session_from_config\n'), ((33, 9, 33, 37), 'pandas.util.testing.makeDataFrame', 'util.testing.makeDataFrame', ({}, {}), '()', False, 'from pandas import util\n'), ((46, 9, 46, 37), 'pandas.util.testing.makeDataFrame', 'util.testing.makeDataFrame', ({}, {}), '()', False, 'from pandas import util\n'), ((57, 14, 57, 37), 'whylogs.app.session.get_or_create_session', 'get_or_create_session', ({}, {}), '()', False, 'from whylogs.app.session import get_or_create_session, get_session, get_logger, reset_default_session, session_from_config\n'), ((58, 9, 58, 37), 'pandas.util.testing.makeDataFrame', 'util.testing.makeDataFrame', ({}, {}), '()', False, 'from pandas import util\n'), ((72, 14, 72, 37), 'whylogs.app.session.get_or_create_session', 'get_or_create_session', ({}, {}), '()', False, 'from whylogs.app.session import get_or_create_session, get_session, get_logger, reset_default_session, session_from_config\n'), ((75, 9, 75, 37), 'pandas.util.testing.makeDataFrame', 'util.testing.makeDataFrame', ({}, {}), '()', False, 'from pandas import util\n'), ((89, 14, 89, 37), 'whylogs.app.session.get_or_create_session', 'get_or_create_session', ({}, {}), '()', False, 'from whylogs.app.session import get_or_create_session, get_session, get_logger, reset_default_session, session_from_config\n'), ((96, 14, 96, 37), 'whylogs.app.session.get_or_create_session', 'get_or_create_session', ({}, {}), '()', False, 'from whylogs.app.session import get_or_create_session, get_session, get_logger, reset_default_session, session_from_config\n'), ((30, 34, 32, 5), 'whylogs.app.config.SessionConfig', 'SessionConfig', ({(31, 8, 31, 25): '"""default-project"""', (31, 27, 31, 45): '"""default-pipeline"""', (31, 47, 31, 49): '[]', (31, 51, 31, 56): 'False'}, {}), "('default-project', 'default-pipeline', [], False)", False, 'from whylogs.app.config import SessionConfig\n'), ((43, 34, 45, 5), 'whylogs.app.config.SessionConfig', 'SessionConfig', ({(44, 8, 44, 25): '"""default-project"""', (44, 27, 44, 45): '"""default-pipeline"""', (44, 47, 44, 49): '[]', (44, 51, 44, 56): 'False'}, {}), "('default-project', 'default-pipeline', [], False)", False, 'from whylogs.app.config import SessionConfig\n'), ((83, 9, 83, 36), 'pytest.raises', 'pytest.raises', ({(83, 23, 83, 35): 'RuntimeError'}, {}), '(RuntimeError)', False, 'import pytest\n'), ((98, 9, 98, 32), 'pytest.raises', 'pytest.raises', ({(98, 23, 98, 31): 'KeyError'}, {}), '(KeyError)', False, 'import pytest\n')] |
Bemesko/Intelligence-of-Home-GUI | Packages/constants.py | 4580d2d2a6b5f3509e2e0897fd0c9952711ccd2b | import enum
BASELINE = "baseline"
ENERGY = "energy"
MAX_PRICE = "max_price"
START_PRICE = "starting_price"
INCREMENT = "increment"
MIN_PRICE = "min_price"
MAX_LOT_SIZE = "max_lot_size_wh"
NAMESERVER_AGENT_AMOUNT = 3
ATTRIBUTE_LIST_LENGTH = 50
NEXT_ENERGY_CONSUMPTION = "next_energy_consumption"
NEXT_ENERGY_GENERATION = "next_energy_generation"
ENERGY_DIFFERENCE = "energy_difference"
ENERGY_MARKET_PRICE = "energy_market_price"
WANTED_ENERGY = "wanted_energy"
ENERGY_BUY_MAX_PRICE = "energy_buy_max_price"
ENERGY_BUY_STARTING_PRICE = "energy_buy_starting_price"
ENERGY_BUY_PRICE_INCREMENT = "energy_buy_price_increment"
ENERGY_SELL_MIN_PRICE = "energy_sell_min_price"
class buy_baseline(enum.Enum):
deficit = 0
all_energy = 1
infinite = 2
none = 3
class sell_baseline(enum.Enum):
surplus = 0
all_energy = 1
none = 2
| [] |
groundupnews/gu | target/tests.py | c7179ee3d058c8749d250d681032a76dc8d599d5 | from django.contrib.auth.models import User
from django.test import TestCase
from django.test import Client
from django.urls import reverse
from target import models
from django.utils import timezone
# Create your tests here.
class URLSWork(TestCase):
@classmethod
def setUpTestData(cls):
target = models.Target()
target.letters = 'practical'
target.words = 'practical'
target.published = timezone.now()
target.number = 1
target.save()
def test_urls(self):
user = User.objects.create_user('admin', '[email protected]', 'abcde')
user.is_staff = True
user.is_active = True
user.is_superuser = True
user.save()
c = Client()
response = c.login(username='admin', password='abcde')
self.assertEqual(response, True)
url = reverse('target:list')
response = c.get(url)
self.assertEqual(response.status_code, 200)
target = models.Target.objects.all()[0]
url = reverse('target:detail', args=(target.number,))
response = c.get(url)
self.assertEqual(response.status_code, 200)
url = reverse('target:create')
response = c.post(url)
self.assertEqual(response.status_code, 200)
url = reverse('target:create_letters', args=('practical',))
response = c.post(url)
self.assertEqual(response.status_code, 200)
url = reverse('target:delete', args=(1,))
response = c.get(url)
self.assertEqual(response.status_code, 200)
| [((14, 17, 14, 32), 'target.models.Target', 'models.Target', ({}, {}), '()', False, 'from target import models\n'), ((17, 27, 17, 41), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((22, 15, 22, 78), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', ({(22, 40, 22, 47): '"""admin"""', (22, 49, 22, 68): '"""[email protected]"""', (22, 70, 22, 77): '"""abcde"""'}, {}), "('admin', '[email protected]', 'abcde')", False, 'from django.contrib.auth.models import User\n'), ((27, 12, 27, 20), 'django.test.Client', 'Client', ({}, {}), '()', False, 'from django.test import Client\n'), ((30, 14, 30, 36), 'django.urls.reverse', 'reverse', ({(30, 22, 30, 35): '"""target:list"""'}, {}), "('target:list')", False, 'from django.urls import reverse\n'), ((34, 14, 34, 61), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((37, 14, 37, 38), 'django.urls.reverse', 'reverse', ({(37, 22, 37, 37): '"""target:create"""'}, {}), "('target:create')", False, 'from django.urls import reverse\n'), ((40, 14, 40, 67), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((43, 14, 43, 49), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((33, 17, 33, 44), 'target.models.Target.objects.all', 'models.Target.objects.all', ({}, {}), '()', False, 'from target import models\n')] |
julienduchesne/jenkinsapi | jenkinsapi/view.py | 369dc54a8d5bb1f4e985c647378b9e1e62c26961 | """
Module for jenkinsapi views
"""
import six
import logging
from jenkinsapi.jenkinsbase import JenkinsBase
from jenkinsapi.job import Job
from jenkinsapi.custom_exceptions import NotFound
log = logging.getLogger(__name__)
class View(JenkinsBase):
"""
View class
"""
def __init__(self, url, name, jenkins_obj):
self.name = name
self.jenkins_obj = jenkins_obj
JenkinsBase.__init__(self, url)
self.deleted = False
def __str__(self):
return self.name
def __getitem__(self, job_name):
assert isinstance(job_name, str)
api_url = self.python_api_url(self.get_job_url(job_name))
return Job(api_url, job_name, self.jenkins_obj)
def __contains__(self, job_name):
"""
True if view_name is the name of a defined view
"""
return job_name in self.keys()
def delete(self):
"""
Remove this view object
"""
url = "%s/doDelete" % self.baseurl
self.jenkins_obj.requester.post_and_confirm_status(url, data='')
self.jenkins_obj.poll()
self.deleted = True
def keys(self):
return self.get_job_dict().keys()
def iteritems(self):
it = six.iteritems(self.get_job_dict())
for name, url in it:
yield name, Job(url, name, self.jenkins_obj)
def values(self):
return [a[1] for a in self.iteritems()]
def items(self):
return [a for a in self.iteritems()]
def _get_jobs(self):
if 'jobs' in self._data:
for viewdict in self._data["jobs"]:
yield viewdict["name"], viewdict["url"]
def get_job_dict(self):
return dict(self._get_jobs())
def __len__(self):
return len(self.get_job_dict().keys())
def get_job_url(self, str_job_name):
if str_job_name in self:
return self.get_job_dict()[str_job_name]
else:
# noinspection PyUnboundLocalVariable
views_jobs = ", ".join(self.get_job_dict().keys())
raise NotFound("Job %s is not known, available jobs"
" in view are: %s" % (str_job_name, views_jobs))
def get_jenkins_obj(self):
return self.jenkins_obj
def add_job(self, str_job_name, job=None):
"""
Add job to a view
:param str_job_name: name of the job to be added
:param job: Job object to be added
:return: True if job has been added, False if job already exists or
job not known to Jenkins
"""
if not job:
if str_job_name in self.get_job_dict():
log.warning(
'Job %s is already in the view %s',
str_job_name, self.name)
return False
else:
# Since this call can be made from nested view,
# which doesn't have any jobs, we can miss existing job
# Thus let's create top level Jenkins and ask him
# http://jenkins:8080/view/CRT/view/CRT-FB/view/CRT-SCRT-1301/
top_jenkins = self.get_jenkins_obj().get_jenkins_obj_from_url(
self.baseurl.split('view/')[0])
if not top_jenkins.has_job(str_job_name):
log.error(
msg='Job "%s" is not known to Jenkins' %
str_job_name)
return False
else:
job = top_jenkins.get_job(str_job_name)
log.info(msg='Creating job %s in view %s' % (str_job_name, self.name))
url = '%s/addJobToView' % self.baseurl
params = {'name': str_job_name}
self.get_jenkins_obj().requester.post_and_confirm_status(
url,
data={},
params=params)
self.poll()
log.debug(msg='Job "%s" has been added to a view "%s"' %
(job.name, self.name))
return True
def _get_nested_views(self):
for viewdict in self._data.get("views", []):
yield viewdict["name"], viewdict["url"]
def get_nested_view_dict(self):
return dict(self._get_nested_views())
def get_config_xml_url(self):
return '%s/config.xml' % self.baseurl
def get_config(self):
"""
Return the config.xml from the view
"""
url = self.get_config_xml_url()
response = self.get_jenkins_obj().requester.get_and_confirm_status(url)
return response.text
def update_config(self, config):
"""
Update the config.xml to the view
"""
url = self.get_config_xml_url()
config = str(config) # cast unicode in case of Python 2
response = self.get_jenkins_obj().requester.post_url(
url, params={}, data=config)
return response.text
@property
def views(self):
return self.get_jenkins_obj().get_jenkins_obj_from_url(
self.baseurl).views
| [((12, 6, 12, 33), 'logging.getLogger', 'logging.getLogger', ({(12, 24, 12, 32): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((24, 8, 24, 39), 'jenkinsapi.jenkinsbase.JenkinsBase.__init__', 'JenkinsBase.__init__', ({(24, 29, 24, 33): 'self', (24, 35, 24, 38): 'url'}, {}), '(self, url)', False, 'from jenkinsapi.jenkinsbase import JenkinsBase\n'), ((33, 15, 33, 55), 'jenkinsapi.job.Job', 'Job', ({(33, 19, 33, 26): 'api_url', (33, 28, 33, 36): 'job_name', (33, 38, 33, 54): 'self.jenkins_obj'}, {}), '(api_url, job_name, self.jenkins_obj)', False, 'from jenkinsapi.job import Job\n'), ((82, 18, 83, 75), 'jenkinsapi.custom_exceptions.NotFound', 'NotFound', ({(82, 27, 83, 74): "('Job %s is not known, available jobs in view are: %s' % (str_job_name,\n views_jobs))"}, {}), "('Job %s is not known, available jobs in view are: %s' % (\n str_job_name, views_jobs))", False, 'from jenkinsapi.custom_exceptions import NotFound\n'), ((57, 24, 57, 56), 'jenkinsapi.job.Job', 'Job', ({(57, 28, 57, 31): 'url', (57, 33, 57, 37): 'name', (57, 39, 57, 55): 'self.jenkins_obj'}, {}), '(url, name, self.jenkins_obj)', False, 'from jenkinsapi.job import Job\n')] |
jmarangola/cv-chess | core/vision/collection.py | c1bf1754b622e76bc2bc92276b96760c321a8bd9 | """
Autonomous dataset collection of data for jetson nano
John Marangola - [email protected]
"""
import datasets
import json
from datasets import Board, ChessPiece, PieceColor, PieceType
#from realsense_utils import RealSenseCamera
import preprocessing as pr
import cv2
import pandas as pd
import os
from os.path import isfile, join
import uuid
import numpy as np
import uuid
from PIL import Image
from PIL.ExifTags import TAGS
RUN_CALIBRATION = False # Run calibration sequence or use preexisting board four corners data from config/setup.txt
BOARD_SAVE_DEST= r"board_metadata.jpeg" # Where the debug metadata board visualization image is saved (to ensure we properly setup the metadata)
TMP_DEST = "/home/spark/cv-chess/core/vision/tmp/" # Where images are temporarily saved before being uploaded to drive in a batch
LOCAL_MD_FILENAME = "local_meta.json"
LOCAL_METADATA_JSON_PATH = TMP_DEST + LOCAL_MD_FILENAME
TL = [250, 115]
BL = [250, 687]
TR = [825, 115]
BR = [825, 687]
def rotate_image(image, angle):
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
def fen_to_dict(string):
name_to_num = {
'p' : 1,
'b' : 2,
'n' : 3,
'r' : 4,
'q' : 5,
'k' : 6,
}
out = {}
letters = "ABCDEFGH"
for i in range(8):
for j in range(1,9):
out[letters[i] + str(j)] = 0
string = string.split('/')
new_string = []
for s in string:
for d in s:
if d.isnumeric():
ix = s.index(d)
for i in range(int(d)-1):
s = s[0:ix] + '1' + s[ix:]
new_string.append(s)
for i in range(8, 0, -1):
for j in range(8):
if new_string[8-i][j].isnumeric():
out[letters[j] + str(i)] = 0
else:
out[letters[j] + str(i)] = name_to_num[new_string[8-i][j].lower()]
return out
def get_sorted_time_saved(images):
"""
Given a list of image filenames, return a dictionary of image filename : time written to disk pairs.
Purpose: for debugging dataset
Args:
images (list): List of image filenames
Returns:
dict: dict of image filenames
"""
image_dat = []
for image in images:
imtmp = Image.open(image)
tmp = imtmp.getexif()
image_dat.append(tmp)
dt = {}
for exifdata in image_dat:
idx = image_dat.index(exifdata)
# iterating over all EXIF data fields
for tag_id in exifdata:
tag = TAGS.get(tag_id, tag_id)
data = exifdata.get(tag_id)
# decode bytes
if isinstance(data, bytes):
data = data.decode()
# Add datetime field
if tag == "DateTime":
dt[images[idx]] = data
print(f"{tag:25}: {data}")
output = sorted(dt.items(), key=lambda eta: eta[1], reverse=False)
print(output)
dt = {}
for item in output:
dt[item[0]] = item[1]
with open(TMP_DEST + "datetimes.json", "w") as wr: # dump to json
json.dump(output, wr)
return output
def del_batch_from_text_file(file):
filenames = []
with open(file, "r") as rd:
for line in rd.readlines():
# parse each line for file to delete:
commaIndex = line.index(",")
filename = line[:commaIndex]
os.remove(TMP_DEST + filename)
if __name__ == "__main__":
# Initialize camera
realsense = RealSenseCamera()
"""
# Check if calibration sequence must be run
if RUN_CALIBRATION:
realsense.calibrate_board_pos()
if realsense.get_board_corners() is None:
print("Failed to run calibration. Exiting...")
exit()
"""
"""
board_meta = Board()
# Add pieces to metadata csv
board_meta.add_pieces({
"A1":ChessPiece(PieceType.KNIGHT, PieceColor.BLUE), "A2":ChessPiece(PieceType.PAWN, PieceColor.BLUE), "A3":ChessPiece(PieceType.PAWN, PieceColor.ORANGE)
})
board_meta.display_board(dest=BOARD_SAVE_DEST)
print(f"Verify board is correct output dest={BOARD_SAVE_DEST}.\nContine [Y] or Exit [E]?")
validate = input()
if validate.upper() == "E" or validate.upper() == "N":
print("Exiting...")
realsense.stop_pipeline()
exit()
files = []
files = [f for f in os.listdir(TMP_DEST) if isfile(os.path.join(TMP_DEST, f))]
# Check to see if there is pre-existing .csv metadata to add to
if LOCAL_MD_FILENAME in files:
try:
total_metadata = pd.read_csv(LOCAL_METADATA_JSON_PATH)
except:
total_metadata = pd.DataFrame()
else:
total_metadata = pd.DataFrame()
# Loop through input
while input() != "exit":
img = realsense.capture_rgb_image() # Capture the image
img = img[105:690, 348:940, :]
img = rotate_image(img, 1.5)
files = pr.board_to_64_files(img, base_directory=TMP_DEST) # Break image up into 64 files
piece_types, piece_colors = [], []
batch_id = uuid.uuid1()
for tile in sorted(files.keys()):
temp = board_meta.get_chess_piece(tile)
if temp is None:
piece_types.append(None)
piece_colors.append(None)
else:
piece_types.append(temp.piece_type.name)
piece_colors.append(temp.piece_color.name)
tmp_meta = pd.DataFrame({
"File" : [files[file] for file in files.keys()],
"Position" : [file for file in files.keys()],
"Piece Type" : piece_types,
"Piece Color" : piece_colors,
"Batch ID" : [batch_id for i in range(len(files.keys()))]
})
frames = [total_metadata, tmp_meta]
total_metadata = pd.concat(frames) # Concatenate dataframes
print(total_metadata)
total_metadata.to_csv(path_or_buf=LOCAL_METADATA_JSON_PATH)
"""
#pr.delete_board2_64_output(base_directory=TMP_DEST)
FEN = "5P1R/1Q1RP1P1/3R1P2/QQPPK1R1/1B1K1N2/B1R2N1B/1N2B3R/2B1BN2".upper()
last_input = None
df = pd.DataFrame()
while input() != "end":
resp = input("[n] for new fen, [anything key to take an image] >")
if resp == "new":
fen = input("Enter a FEN:").upper()
img = realsense.capture_rgb_image() # Capture the image
print("Captured image")
img = img[105:690, 348:940, :]
img = rotate_image(img, 1.5)
cv2.imwrite("original.jpg", img)
# Get dict of positions
temp_dict = fen_to_dict(FEN)
tiles = pr.board_to_64_files(img, temp_dict, base_directory=TMP_DEST) # Break image up into 64 files
data_frame = pd.DataFrame(tiles)
data_frame = data_frame.transpose()
frames = [df, data_frame]
df = pd.concat(frames) # Concatenate dataframe
csv_file = df.to_csv(TMP_DEST + 'my_csv.csv', header=False, index=False)
# Close streams and end pipeline
realsense.stop_pipeline()
| [((34, 12, 34, 61), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', ({(34, 36, 34, 48): 'image_center', (34, 50, 34, 55): 'angle', (34, 57, 34, 60): '1.0'}, {}), '(image_center, angle, 1.0)', False, 'import cv2\n'), ((35, 11, 35, 85), 'cv2.warpAffine', 'cv2.warpAffine', (), '', False, 'import cv2\n'), ((199, 9, 199, 23), 'pandas.DataFrame', 'pd.DataFrame', ({}, {}), '()', True, 'import pandas as pd\n'), ((85, 16, 85, 33), 'PIL.Image.open', 'Image.open', ({(85, 27, 85, 32): 'image'}, {}), '(image)', False, 'from PIL import Image\n'), ((110, 8, 110, 29), 'json.dump', 'json.dump', ({(110, 18, 110, 24): 'output', (110, 26, 110, 28): 'wr'}, {}), '(output, wr)', False, 'import json\n'), ((211, 8, 211, 40), 'cv2.imwrite', 'cv2.imwrite', ({(211, 20, 211, 34): '"""original.jpg"""', (211, 36, 211, 39): 'img'}, {}), "('original.jpg', img)", False, 'import cv2\n'), ((215, 16, 215, 77), 'preprocessing.board_to_64_files', 'pr.board_to_64_files', (), '', True, 'import preprocessing as pr\n'), ((217, 21, 217, 40), 'pandas.DataFrame', 'pd.DataFrame', ({(217, 34, 217, 39): 'tiles'}, {}), '(tiles)', True, 'import pandas as pd\n'), ((221, 13, 221, 30), 'pandas.concat', 'pd.concat', ({(221, 23, 221, 29): 'frames'}, {}), '(frames)', True, 'import pandas as pd\n'), ((33, 23, 33, 51), 'numpy.array', 'np.array', ({(33, 32, 33, 50): 'image.shape[1::-1]'}, {}), '(image.shape[1::-1])', True, 'import numpy as np\n'), ((93, 18, 93, 42), 'PIL.ExifTags.TAGS.get', 'TAGS.get', ({(93, 27, 93, 33): 'tag_id', (93, 35, 93, 41): 'tag_id'}, {}), '(tag_id, tag_id)', False, 'from PIL.ExifTags import TAGS\n'), ((120, 12, 120, 42), 'os.remove', 'os.remove', ({(120, 22, 120, 41): '(TMP_DEST + filename)'}, {}), '(TMP_DEST + filename)', False, 'import os\n')] |
htwangtw/sbfc | tests/test_sbfc.py | 5119017a643b82efbfaaf373a26f191a51f8283a | import os
import numpy as np
import pandas as pd
from nilearn import datasets
from sbfc.parser import seed_base_connectivity
seed = os.path.dirname(__file__) + "/data/difumo64_pcc.nii.gz"
def _make_data_single_run(confound=True):
adhd_dataset = datasets.fetch_adhd(n_subjects=2)
group_confounds = pd.DataFrame(adhd_dataset.phenotypic)[
["Subject", "MeanFD", "age", "sex"]
]
group_confounds = group_confounds.rename(columns={"Subject": "subject_label"})
group_design_matrix = pd.DataFrame(adhd_dataset.phenotypic)[["Subject"]]
group_design_matrix = group_design_matrix.rename(
columns={"Subject": "subject_label"}
)
group_design_matrix["pheno"] = np.random.rand(2)
group_contrast = pd.DataFrame([1], columns=["pheno"])
if confound:
func_img = {
f"{sub_id}": {"func": [func], "confound": [confound]}
for func, confound, sub_id in zip(
adhd_dataset.func, adhd_dataset.confounds, group_confounds.index
)
}
else:
func_img = {
f"{sub_id}": {"func": [func], "confound": [None]}
for func, confound, sub_id in zip(
adhd_dataset.func, adhd_dataset.confounds, group_confounds.index
)
}
return func_img, group_design_matrix, group_confounds, group_contrast
def _make_data_multi_run():
adhd_dataset = datasets.fetch_adhd(n_subjects=2)
group_confounds = pd.DataFrame(adhd_dataset.phenotypic)[
["Subject", "MeanFD", "age", "sex"]
]
group_confounds = group_confounds.rename(columns={"Subject": "subject_label"})
group_design_matrix = pd.DataFrame(adhd_dataset.phenotypic)[["Subject"]]
group_design_matrix = group_design_matrix.rename(
columns={"Subject": "subject_label"}
)
group_design_matrix["pheno"] = np.random.rand(2)
group_contrast = pd.DataFrame([1], columns=["pheno"])
func_img = {
f"{sub_id}": {"func": [func, func], "confound": [confound, confound]}
for func, confound, sub_id in zip(
adhd_dataset.func, adhd_dataset.confounds, group_confounds.index
)
}
return func_img, group_design_matrix, group_confounds, group_contrast
def test_sbfc_single_run(tmpdir):
(
func_img,
group_design_matrix,
group_confounds,
group_contrast,
) = _make_data_single_run()
# Prepare seed
pcc_coords = (0, -53, 26)
first_m, first_con, s_m = seed_base_connectivity(
func_img,
pcc_coords,
group_confounds,
group_design_matrix,
group_contrast,
write_dir=tmpdir,
)
assert len(first_m) == 2
(
func_img,
group_design_matrix,
group_confounds,
group_contrast,
) = _make_data_single_run(confound=False)
# mask seed
first_m, first_con, s_m = seed_base_connectivity(
func_img,
seed,
group_confounds,
group_design_matrix,
group_contrast,
write_dir=tmpdir,
)
assert len(first_m) == 2
def test_sbfc_mutli_run(tmpdir):
(
func_img,
group_design_matrix,
group_confounds,
group_contrast,
) = _make_data_multi_run()
# mask seed
first_m, first_con, s_m = seed_base_connectivity(
func_img,
seed,
group_confounds,
group_design_matrix,
group_contrast,
write_dir=tmpdir,
)
assert len(first_m) == 2
| [((9, 7, 9, 32), 'os.path.dirname', 'os.path.dirname', ({(9, 23, 9, 31): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((13, 19, 13, 52), 'nilearn.datasets.fetch_adhd', 'datasets.fetch_adhd', (), '', False, 'from nilearn import datasets\n'), ((22, 35, 22, 52), 'numpy.random.rand', 'np.random.rand', ({(22, 50, 22, 51): '2'}, {}), '(2)', True, 'import numpy as np\n'), ((23, 21, 23, 57), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((42, 19, 42, 52), 'nilearn.datasets.fetch_adhd', 'datasets.fetch_adhd', (), '', False, 'from nilearn import datasets\n'), ((51, 35, 51, 52), 'numpy.random.rand', 'np.random.rand', ({(51, 50, 51, 51): '2'}, {}), '(2)', True, 'import numpy as np\n'), ((52, 21, 52, 57), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((72, 30, 79, 5), 'sbfc.parser.seed_base_connectivity', 'seed_base_connectivity', (), '', False, 'from sbfc.parser import seed_base_connectivity\n'), ((89, 30, 96, 5), 'sbfc.parser.seed_base_connectivity', 'seed_base_connectivity', (), '', False, 'from sbfc.parser import seed_base_connectivity\n'), ((108, 30, 115, 5), 'sbfc.parser.seed_base_connectivity', 'seed_base_connectivity', (), '', False, 'from sbfc.parser import seed_base_connectivity\n'), ((14, 22, 14, 59), 'pandas.DataFrame', 'pd.DataFrame', ({(14, 35, 14, 58): 'adhd_dataset.phenotypic'}, {}), '(adhd_dataset.phenotypic)', True, 'import pandas as pd\n'), ((18, 26, 18, 63), 'pandas.DataFrame', 'pd.DataFrame', ({(18, 39, 18, 62): 'adhd_dataset.phenotypic'}, {}), '(adhd_dataset.phenotypic)', True, 'import pandas as pd\n'), ((43, 22, 43, 59), 'pandas.DataFrame', 'pd.DataFrame', ({(43, 35, 43, 58): 'adhd_dataset.phenotypic'}, {}), '(adhd_dataset.phenotypic)', True, 'import pandas as pd\n'), ((47, 26, 47, 63), 'pandas.DataFrame', 'pd.DataFrame', ({(47, 39, 47, 62): 'adhd_dataset.phenotypic'}, {}), '(adhd_dataset.phenotypic)', True, 'import pandas as pd\n')] |
ChrisOmeh/xzceb-flask_eng_fr | final_project/machinetranslation/tests/test.py | 6ce4a79539b8ace4bce999c32a9f58aa73827e5c | import unittest
from translator import english_to_french, french_to_english
class TestenglishToFrench(unittest.TestCase):
def test1(self):
self.assertEqual(english_to_french(["Hello"]), "Bonjour")
self.assertNotEqual(english_to_french(["Bonjour"]), "Hello")
class TestfrenchToEnglish(unittest.TestCase):
def test1(self):
self.assertEqual(french_to_english(["Bonjour"]),'Hello')
self.assertNotEqual(french_to_english(["Hello"]), "Bonjour")
if __name__ == "__main__":
unittest.main() | [((15, 4, 15, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((6, 25, 6, 53), 'translator.english_to_french', 'english_to_french', ({(6, 43, 6, 52): "['Hello']"}, {}), "(['Hello'])", False, 'from translator import english_to_french, french_to_english\n'), ((7, 28, 7, 58), 'translator.english_to_french', 'english_to_french', ({(7, 46, 7, 57): "['Bonjour']"}, {}), "(['Bonjour'])", False, 'from translator import english_to_french, french_to_english\n'), ((11, 25, 11, 55), 'translator.french_to_english', 'french_to_english', ({(11, 43, 11, 54): "['Bonjour']"}, {}), "(['Bonjour'])", False, 'from translator import english_to_french, french_to_english\n'), ((12, 28, 12, 56), 'translator.french_to_english', 'french_to_english', ({(12, 46, 12, 55): "['Hello']"}, {}), "(['Hello'])", False, 'from translator import english_to_french, french_to_english\n')] |
huxian123/mindspore | tests/ut/python/parallel/test_auto_parallel_transformer.py | ec5ba10c82bbd6eccafe32d3a1149add90105bc8 | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import mindspore.nn as nn
from mindspore import Tensor, Parameter
from mindspore import context
from mindspore.common.api import _executor
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from tests.ut.python.ops.test_math_ops import VirtualLoss
grad_all = C.GradOperation(get_all=True)
class NetWithLoss(nn.Cell):
def __init__(self, network):
super(NetWithLoss, self).__init__()
self.loss = VirtualLoss()
self.network = network
def construct(self, x):
predict = self.network(x)
return self.loss(predict)
class GradWrap(nn.Cell):
def __init__(self, network):
super(GradWrap, self).__init__()
self.network = network
def construct(self, x):
return grad_all(self.network)(x)
class CustomDense(nn.Cell):
def __init__(self, row, column):
super(CustomDense, self).__init__()
self.weight = Parameter(Tensor(np.ones([row, column]).astype(np.float32) * 0.01), "w", requires_grad=True)
self.bias = Parameter(Tensor(np.zeros([row, column]).astype(np.float32)), "b", requires_grad=True)
self.matmul1 = P.MatMul()
self.add2 = P.TensorAdd()
self.activation3 = nn.ReLU()
def construct(self, x):
mat_output = self.matmul1(x, self.weight)
add_output = self.add2(mat_output, self.bias)
output = self.activation3(add_output)
return output
class DenseMutMulNet(nn.Cell):
def __init__(self):
super(DenseMutMulNet, self).__init__()
self.fc1 = CustomDense(4096, 4096)
self.fc2 = CustomDense(4096, 4096)
self.fc3 = CustomDense(4096, 4096)
self.fc4 = CustomDense(4096, 4096)
self.relu4 = nn.ReLU()
self.relu5 = nn.ReLU()
self.transpose = P.Transpose()
self.matmul1 = P.MatMul()
self.matmul2 = P.MatMul()
def construct(self, x):
q = self.fc1(x)
k = self.fc2(x)
v = self.fc3(x)
k = self.transpose(k, (1, 0))
c = self.relu4(self.matmul1(q, k))
s = self.relu5(self.matmul2(c, v))
s = self.fc4(s)
return s
class MultiTransformer(nn.Cell):
def __init__(self, layer_nums=1):
super(MultiTransformer, self).__init__()
self.layer = self._make_layer(layer_nums)
def _make_layer(self, layer_num):
layers = []
for _ in range(0, layer_num):
layers.append(DenseMutMulNet())
return nn.SequentialCell(layers)
def construct(self, x):
out = self.layer(x)
return out
def test_dmnet_train_step():
size = 8
context.set_auto_parallel_context(device_num=size, global_rank=0)
input_ = Tensor(np.ones([4096, 4096]).astype(np.float32) * 0.01)
net = GradWrap(NetWithLoss(MultiTransformer()))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
_executor.compile(net, input_)
| [((26, 11, 26, 40), 'mindspore.ops.composite.GradOperation', 'C.GradOperation', (), '', True, 'from mindspore.ops import composite as C\n'), ((109, 4, 109, 69), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', (), '', False, 'from mindspore import context\n'), ((113, 4, 113, 68), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', (), '', False, 'from mindspore import context\n'), ((115, 4, 115, 34), 'mindspore.common.api._executor.compile', '_executor.compile', ({(115, 22, 115, 25): 'net', (115, 27, 115, 33): 'input_'}, {}), '(net, input_)', False, 'from mindspore.common.api import _executor\n'), ((32, 20, 32, 33), 'tests.ut.python.ops.test_math_ops.VirtualLoss', 'VirtualLoss', ({}, {}), '()', False, 'from tests.ut.python.ops.test_math_ops import VirtualLoss\n'), ((54, 23, 54, 33), 'mindspore.ops.operations.MatMul', 'P.MatMul', ({}, {}), '()', True, 'from mindspore.ops import operations as P\n'), ((55, 20, 55, 33), 'mindspore.ops.operations.TensorAdd', 'P.TensorAdd', ({}, {}), '()', True, 'from mindspore.ops import operations as P\n'), ((56, 27, 56, 36), 'mindspore.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import mindspore.nn as nn\n'), ((73, 21, 73, 30), 'mindspore.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import mindspore.nn as nn\n'), ((74, 21, 74, 30), 'mindspore.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import mindspore.nn as nn\n'), ((75, 25, 75, 38), 'mindspore.ops.operations.Transpose', 'P.Transpose', ({}, {}), '()', True, 'from mindspore.ops import operations as P\n'), ((76, 23, 76, 33), 'mindspore.ops.operations.MatMul', 'P.MatMul', ({}, {}), '()', True, 'from mindspore.ops import operations as P\n'), ((77, 23, 77, 33), 'mindspore.ops.operations.MatMul', 'P.MatMul', ({}, {}), '()', True, 'from mindspore.ops import operations as P\n'), ((100, 15, 100, 40), 'mindspore.nn.SequentialCell', 'nn.SequentialCell', ({(100, 33, 100, 39): 'layers'}, {}), '(layers)', True, 'import mindspore.nn as nn\n'), ((111, 20, 111, 41), 'numpy.ones', 'np.ones', ({(111, 28, 111, 40): '[4096, 4096]'}, {}), '([4096, 4096])', True, 'import numpy as np\n'), ((53, 37, 53, 60), 'numpy.zeros', 'np.zeros', ({(53, 46, 53, 59): '[row, column]'}, {}), '([row, column])', True, 'import numpy as np\n'), ((52, 39, 52, 61), 'numpy.ones', 'np.ones', ({(52, 47, 52, 60): '[row, column]'}, {}), '([row, column])', True, 'import numpy as np\n')] |
rcbops-qa/cloudcafe | cloudcafe/compute/events/models/common.py | d937f85496aadafbb94a330b9adb8ea18bee79ba | """
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cloudcafe.compute.events.models.base import (
EventBaseModel, EventBaseListModel)
class Bandwidth(EventBaseModel):
"""Bandwidth Response Model
@summary: Response model for bandwidth from a compute
event notification
@note: Although the 'public' and 'private' interfaces are
not required, they are the most common names, and are
included as optional attributes for the sake of convenience
@note: This type may contain additional unspecified
BandwidthInterface fields, which will be captured in a
dictionary called kwargs
JSON Example:
{
"private": { <BandwidthInterface> },
"public": { <BandwidthInterface> }
}
"""
kwarg_map = {'private': 'private',
'public': 'public'}
optional_kwargs = ['private', 'public']
strict_checking = False
def __init__(self, private=None, public=None, **kwargs):
super(Bandwidth, self).__init__(locals())
@classmethod
def _dict_to_obj(cls, json_dict):
"""Override dict_to_obj implementation"""
obj = cls._map_values_to_kwargs(json_dict)
for key in obj.kwargs:
obj.kwargs[key] = BandwidthInterface._dict_to_obj(obj.kwargs[key])
if obj.private:
obj.private = BandwidthInterface._dict_to_obj(obj.private)
if obj.public:
obj.public = BandwidthInterface._dict_to_obj(obj.public)
return obj
class BandwidthInterface(EventBaseModel):
"""Bandwidth Interface Response Model
@summary: Response model for bandwidth on an interface from
a compute event notification
@note: Sub-model of Bandwidth
JSON Example:
{
"bw_in": 123456,
"bw_out": 654321
}
"""
kwarg_map = {'bw_in': 'bw_in',
'bw_out': 'bw_out'}
def __init__(self, bw_in, bw_out):
super(BandwidthInterface, self).__init__(locals())
class FixedIp(EventBaseModel):
"""Fixed IP Response Model
@summary: Response model for a fixed IP address from a
compute event notification
@note: Represents a single fixed IP
JSON Example:
{
"address": "10.10.0.0",
"floating_ips": [],
"label": "public",
"meta": {},
"type": "fixed",
"version": 4,
"vif_mac": "FE:ED:FA:00:1C:D4"
}
"""
kwarg_map = {
'address': 'address',
'floating_ips': 'floating_ips',
'label': 'label',
'meta': 'meta',
'type_': 'type',
'version': 'version',
'vif_mac': 'vif_mac'}
def __init__(self, address, floating_ips, label, meta, type_, version,
vif_mac):
super(FixedIp, self).__init__(locals())
class FixedIps(EventBaseListModel):
"""Fixed IPs Model
@summary: Response model for a list of fixed IP addresses
from a compute event notification
@note: Returns a list of elements of type 'FixedIp'
JSON Example:
{
"fixed_ips": [
{ <FixedIp> },
{ <FixedIp> }
]
}
"""
list_model_key = 'fixed_ips'
ObjectModel = FixedIp
class ImageMeta(EventBaseModel):
"""Image Metadata Model
@summary: Response model for image metadata from a compute
event notification
@note: This type may contain additional unspecified
fields, which will be captured in a dictionary called kwargs
JSON Example:
{
"image_meta": {
"auto_disk_config": "disabled",
"base_image_ref": "5e91ad7f-afe4-4a83-bd5f-84673462cae1",
"container_format": "ovf",
"disk_format": "vhd",
"image_type": "base",
"min_disk": "20",
"min_ram": "512",
"org.openstack__1__architecture": "x64",
"org.openstack__1__os_distro": "com.ubuntu",
"org.openstack__1__os_version": "12.04",
"os_type": "linux"
}
}
"""
kwarg_map = {
'auto_disk_config': 'auto_disk_config',
'base_image_ref': 'base_image_ref',
'container_format': 'container_format',
'disk_format': 'disk_format',
'image_type': 'image_type',
'min_disk': 'min_disk',
'min_ram': 'min_ram',
'org_openstack__1__architecture': 'org.openstack__1__architecture',
'org_openstack__1__os_distro': 'org.openstack__1__os_distro',
'org_openstack__1__os_version': 'org.openstack__1__os_version',
'os_type': 'os_type'}
strict_checking = False
def __init__(self, auto_disk_config, base_image_ref, container_format,
disk_format, image_type, min_disk, min_ram,
org_openstack__1__architecture, org_openstack__1__os_distro,
org_openstack__1__os_version, os_type, **kwargs):
super(ImageMeta, self).__init__(locals())
class InstanceException(EventBaseModel):
"""Instance Exception Model
@summary: Response model for an instance exception from a
compute event notification
@note: Represents a single instance exception
JSON Example:
{
"exception": {
"kwargs": {
"instance_uuid": "5e91ad7f-afe4-4a83-bd5f-84673462cae1",
"reason": "Something broke",
"code": 500
}
}
}
"""
kwarg_map = {'kwargs': 'kwargs'}
def __init__(self, kwargs):
super(InstanceException, self).__init__(locals())
| [] |
sekcheong/openpyxl | openpyxl/drawing/tests/test_shapes.py | e1ba037f171efa348f75431c35a50de5ca277b78 | from __future__ import absolute_import
# Copyright (c) 2010-2017 openpyxl
import pytest
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
@pytest.fixture
def GradientFillProperties():
from ..fill import GradientFillProperties
return GradientFillProperties
class TestGradientFillProperties:
def test_ctor(self, GradientFillProperties):
fill = GradientFillProperties()
xml = tostring(fill.to_tree())
expected = """
<gradFill></gradFill>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, GradientFillProperties):
src = """
<gradFill></gradFill>
"""
node = fromstring(src)
fill = GradientFillProperties.from_tree(node)
assert fill == GradientFillProperties()
@pytest.fixture
def Transform2D():
from ..shapes import Transform2D
return Transform2D
class TestTransform2D:
def test_ctor(self, Transform2D):
shapes = Transform2D()
xml = tostring(shapes.to_tree())
expected = """
<xfrm></xfrm>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, Transform2D):
src = """
<root />
"""
node = fromstring(src)
shapes = Transform2D.from_tree(node)
assert shapes == Transform2D()
| [((24, 15, 24, 41), 'openpyxl.tests.helper.compare_xml', 'compare_xml', ({(24, 27, 24, 30): 'xml', (24, 32, 24, 40): 'expected'}, {}), '(xml, expected)', False, 'from openpyxl.tests.helper import compare_xml\n'), ((32, 15, 32, 30), 'openpyxl.xml.functions.fromstring', 'fromstring', ({(32, 26, 32, 29): 'src'}, {}), '(src)', False, 'from openpyxl.xml.functions import fromstring, tostring\n'), ((51, 15, 51, 41), 'openpyxl.tests.helper.compare_xml', 'compare_xml', ({(51, 27, 51, 30): 'xml', (51, 32, 51, 40): 'expected'}, {}), '(xml, expected)', False, 'from openpyxl.tests.helper import compare_xml\n'), ((59, 15, 59, 30), 'openpyxl.xml.functions.fromstring', 'fromstring', ({(59, 26, 59, 29): 'src'}, {}), '(src)', False, 'from openpyxl.xml.functions import fromstring, tostring\n')] |
raysect/source | raysect/core/math/function/float/function3d/interpolate/tests/scripts/generate_3d_splines.py | 11f03089d0379fc7fb4d23c6f60c3d255673cec9 |
# Copyright (c) 2014-2021, Dr Alex Meakins, Raysect Project
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Raysect Project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from raysect.core.math.function.float.function3d.interpolate.interpolator3darray import Interpolator3DArray
from matplotlib.colors import SymLogNorm, Normalize
import scipy
import sys
from raysect.core.math.function.float.function3d.interpolate.tests.data.interpolator3d_test_data import \
TestInterpolatorLoadBigValues, TestInterpolatorLoadNormalValues, TestInterpolatorLoadSmallValues,\
TestInterpolatorLoadBigValuesUneven, TestInterpolatorLoadNormalValuesUneven, TestInterpolatorLoadSmallValuesUneven
from raysect.core.math.function.float.function3d.interpolate.tests.test_interpolator_3d import X_LOWER, X_UPPER,\
NB_XSAMPLES, NB_X, X_EXTRAP_DELTA_MAX, PRECISION, Y_LOWER, Y_UPPER, NB_YSAMPLES, NB_Y, \
Y_EXTRAP_DELTA_MAX, EXTRAPOLATION_RANGE, large_extrapolation_range, Z_LOWER, Z_UPPER, \
NB_ZSAMPLES, NB_Z, Z_EXTRAP_DELTA_MAX, N_EXTRAPOLATION, uneven_linspace
# Force scientific format to get the right number of significant figures
np.set_printoptions(30000, linewidth=100, formatter={'float': lambda x_str: format(x_str, '.'+str(PRECISION)+'E')},
threshold=sys.maxsize)
# Overwrite imported values here.
VISUAL_NOT_TESTS = False
if VISUAL_NOT_TESTS:
NB_X = 51
NB_Y = 51
NB_Z = 51
NB_XSAMPLES = 101
NB_YSAMPLES = 101
NB_ZSAMPLES = 101
X_EXTRAP_DELTA_MIN = 0.04
Y_EXTRAP_DELTA_MIN = 0.04
Z_EXTRAP_DELTA_MIN = 0.04
BIG_VALUE_FACTOR = 20.
SMALL_VALUE_FACTOR = -20.
def docstring_test():
"""
.. code-block:: python
>>> from raysect.core.math.function.float.function3d.interpolate.interpolator3darray import Interpolator3DArray
>>>
>>> x = np.linspace(-1., 1., 20)
>>> y = np.linspace(-1., 1., 20)
>>> z = np.linspace(-1., 1., 20)
>>> x_array, y_array, z_array = np.meshgrid(x, y, z, indexing='ij')
>>> f = np.exp(-(x_array**2 + y_array**2 + z_array**2))
>>> interpolator3D = Interpolator3DArray(x, y, z, f, 'cubic', 'nearest', 1.0, 1.0, 1.0)
>>> # Interpolation
>>> interpolator3D(1.0, 1.0, 0.2)
0.1300281183136766
>>> # Extrapolation
>>> interpolator3D(1.0, 1.0, 1.1)
0.0497870683678659
>>> # Extrapolation out of bounds
>>> interpolator3D(1.0, 1.0, 2.1)
ValueError: The specified value (z=2.1) is outside of extrapolation range.
"""
pass
def get_extrapolation_input_values(
x_lower, x_upper, y_lower, y_upper, z_lower, z_upper, x_extrap_delta_max, y_extrap_delta_max,
z_extrap_delta_max, x_extrap_delta_min, y_extrap_delta_min, z_extrap_delta_min):
xsamples_extrap_out_of_bounds_options = np.array(
[x_lower - x_extrap_delta_max, (x_lower + x_upper) / 2., x_upper + x_extrap_delta_max])
ysamples_extrap_out_of_bounds_options = np.array(
[y_lower - y_extrap_delta_max, (y_lower + y_upper) / 2., y_upper + y_extrap_delta_max])
zsamples_extrap_out_of_bounds_options = np.array(
[z_lower - z_extrap_delta_max, (z_lower + z_upper) / 2., z_upper + z_extrap_delta_max])
xsamples_extrap_in_bounds_options = np.array(
[x_lower - x_extrap_delta_min, (x_lower + x_upper) / 2., x_upper + x_extrap_delta_min])
ysamples_extrap_in_bounds_options = np.array(
[y_lower - y_extrap_delta_min, (y_lower + y_upper) / 2., y_upper + y_extrap_delta_min])
zsamples_extrap_in_bounds_options = np.array(
[z_lower - z_extrap_delta_min, (z_lower + z_upper) / 2., z_upper + z_extrap_delta_min])
xsamples_extrap_out_of_bounds = []
ysamples_extrap_out_of_bounds = []
zsamples_extrap_out_of_bounds = []
xsamples_extrap_in_bounds = []
ysamples_extrap_in_bounds = []
zsamples_extrap_in_bounds = []
edge_indicies_x = [0, len(xsamples_extrap_out_of_bounds_options) - 1]
edge_indicies_y = [0, len(ysamples_extrap_out_of_bounds_options) - 1]
edge_indicies_z = [0, len(zsamples_extrap_out_of_bounds_options) - 1]
for i_x in range(len(xsamples_extrap_out_of_bounds_options)):
for j_y in range(len(ysamples_extrap_out_of_bounds_options)):
for k_z in range(len(zsamples_extrap_out_of_bounds_options)):
if not (i_x not in edge_indicies_x and j_y not in edge_indicies_y and k_z not in edge_indicies_z):
xsamples_extrap_out_of_bounds.append(xsamples_extrap_out_of_bounds_options[i_x])
ysamples_extrap_out_of_bounds.append(ysamples_extrap_out_of_bounds_options[j_y])
zsamples_extrap_out_of_bounds.append(zsamples_extrap_out_of_bounds_options[k_z])
xsamples_extrap_in_bounds.append(xsamples_extrap_in_bounds_options[i_x])
ysamples_extrap_in_bounds.append(ysamples_extrap_in_bounds_options[j_y])
zsamples_extrap_in_bounds.append(zsamples_extrap_in_bounds_options[k_z])
return \
np.array(xsamples_extrap_out_of_bounds), np.array(ysamples_extrap_out_of_bounds), \
np.array(zsamples_extrap_out_of_bounds), np.array(xsamples_extrap_in_bounds), \
np.array(ysamples_extrap_in_bounds), np.array(zsamples_extrap_in_bounds)
def pcolourmesh_corners(input_array):
return np.concatenate((input_array[:-1] - np.diff(input_array)/2.,
np.array([input_array[-1] - (input_array[-1] - input_array[-2]) / 2.,
input_array[-1] + (input_array[-1] - input_array[-2]) / 2.])), axis=0)
def function_to_spline(x_input, y_input, z_input, factor_in):
t = np.pi * np.sqrt((x_input ** 2 + y_input ** 2 + z_input ** 2))
return factor_in*np.sinc(t)
if __name__ == '__main__':
# Calculate for big values, small values, or normal values
big_values = False
small_values = True
log_scale = False
uneven_spacing = False
use_saved_datastore_spline_knots = True
verbose_options = [False, True, False, False]
if VISUAL_NOT_TESTS:
index_x_in = 40
else:
index_x_in = 4
index_y_in = 0
index_z_in = 0
index_y_plot = 0
index_z_plot = 0
print('Using scipy version', scipy.__version__)
# Find the function values to be used
if big_values:
factor = np.power(10., BIG_VALUE_FACTOR)
elif small_values:
factor = np.power(10., SMALL_VALUE_FACTOR)
else:
factor = 1.
if uneven_spacing:
x_in = uneven_linspace(X_LOWER, X_UPPER, NB_X, offset_fraction=1./3.)
y_in = uneven_linspace(Y_LOWER, Y_UPPER, NB_Y, offset_fraction=1./3.)
z_in = uneven_linspace(Z_LOWER, Z_UPPER, NB_Z, offset_fraction=1./3.)
else:
x_in = np.linspace(X_LOWER, X_UPPER, NB_X)
y_in = np.linspace(Y_LOWER, Y_UPPER, NB_Y)
z_in = np.linspace(Z_LOWER, Z_UPPER, NB_Z)
x_in_full, y_in_full, z_in_full = np.meshgrid(x_in, y_in, z_in, indexing='ij')
f_in = function_to_spline(x_in_full, y_in_full, z_in_full, factor)
if use_saved_datastore_spline_knots:
if uneven_spacing:
if big_values:
reference_loaded_values = TestInterpolatorLoadBigValuesUneven()
elif small_values:
reference_loaded_values = TestInterpolatorLoadSmallValuesUneven()
else:
reference_loaded_values = TestInterpolatorLoadNormalValuesUneven()
else:
if big_values:
reference_loaded_values = TestInterpolatorLoadBigValues()
elif small_values:
reference_loaded_values = TestInterpolatorLoadSmallValues()
else:
reference_loaded_values = TestInterpolatorLoadNormalValues()
f_in = reference_loaded_values.data
if verbose_options[0]:
print('Save this to self.data in test_interpolator:\n', repr(f_in))
xsamples = np.linspace(X_LOWER, X_UPPER, NB_XSAMPLES)
ysamples = np.linspace(Y_LOWER, Y_UPPER, NB_YSAMPLES)
zsamples = np.linspace(Z_LOWER, Z_UPPER, NB_ZSAMPLES)
xsamples_extrapolation, ysamples_extrapolation, zsamples_extrapolation = large_extrapolation_range(
xsamples, ysamples, zsamples, EXTRAPOLATION_RANGE, N_EXTRAPOLATION
)
# # Extrapolation x and y values
xsamples_out_of_bounds, ysamples_out_of_bounds, zsamples_out_of_bounds, xsamples_in_bounds, ysamples_in_bounds, \
zsamples_in_bounds = get_extrapolation_input_values(
X_LOWER, X_UPPER, Y_LOWER, Y_UPPER, Z_LOWER, Z_UPPER, X_EXTRAP_DELTA_MAX, Y_EXTRAP_DELTA_MAX,
Z_EXTRAP_DELTA_MAX, X_EXTRAP_DELTA_MIN, Y_EXTRAP_DELTA_MIN, Z_EXTRAP_DELTA_MIN
)
interpolator3D = Interpolator3DArray(x_in, y_in, z_in, f_in, 'linear', 'linear', extrapolation_range_x=2.0,
extrapolation_range_y=2.0, extrapolation_range_z=2.0)
if VISUAL_NOT_TESTS:
n_lower_upper_interp = 51
else:
n_lower_upper_interp = 19
n_lower = 50
lower_p = 0.9
xsamples_lower_and_upper = np.linspace(X_LOWER, X_UPPER, n_lower_upper_interp)
ysamples_lower_and_upper = np.linspace(Y_LOWER, Y_UPPER, n_lower_upper_interp)
zsamples_lower_and_upper = np.linspace(Z_LOWER, Z_UPPER, n_lower_upper_interp)
xsamples_lower_and_upper = np.concatenate((np.linspace(X_LOWER - (X_UPPER - X_LOWER) * lower_p, X_LOWER, n_lower)[
:-1], xsamples_lower_and_upper,
np.linspace(X_UPPER, X_UPPER + (X_UPPER - X_LOWER) * lower_p, n_lower)[
1:]))
ysamples_lower_and_upper = np.concatenate((np.linspace(Y_LOWER - (Y_UPPER - Y_LOWER) * lower_p, Y_LOWER, n_lower)[
:-1], ysamples_lower_and_upper,
np.linspace(Y_UPPER, Y_UPPER + (Y_UPPER - Y_LOWER) * lower_p, n_lower)[
1:]))
zsamples_lower_and_upper = np.concatenate((np.linspace(Z_LOWER - (Z_UPPER - Z_LOWER) * lower_p, Z_LOWER, n_lower)[
:-1], zsamples_lower_and_upper,
np.linspace(Z_UPPER, Z_UPPER + (Z_UPPER - Z_LOWER) * lower_p, n_lower)[
1:]))
index_ysamples_lower_upper = np.where(x_in[index_y_in] == ysamples_lower_and_upper)[0].item()
# extrapolation to save
f_extrapolation_output = np.zeros((len(xsamples_extrapolation), ))
for i in range(len(xsamples_extrapolation)):
f_extrapolation_output[i] = interpolator3D(
xsamples_extrapolation[i], ysamples_extrapolation[i], zsamples_extrapolation[i]
)
if verbose_options[1]:
print('Output of extrapolation to be saved:\n', repr(f_extrapolation_output))
check_plot = True
if check_plot:
import matplotlib.pyplot as plt
from matplotlib import cm
# Install mayavi and pyQt5
main_plots_on = True
if main_plots_on:
fig, ax = plt.subplots(1, 4)
fig1, ax1 = plt.subplots(1, 2)
if not (x_in[index_x_in] == xsamples).any():
raise ValueError(
f'To compare a slice, NB_XSAMPLES={NB_XSAMPLES}-1, NB_YSAMPLES={NB_YSAMPLES}-1, NB_ZSAMPLES='
f'{NB_ZSAMPLES}-1 must be divisible by NB_X={NB_X}-1, NB_Y={NB_Y}-1, NB_Z={NB_Z}-1'
)
if not (y_in[index_y_in] == ysamples_lower_and_upper).any():
raise ValueError(
f'To compare a slice, NB_XSAMPLES={NB_XSAMPLES}-1, NB_YSAMPLES={NB_YSAMPLES}-1, NB_ZSAMPLES='
f'{NB_ZSAMPLES}-1 must be divisible by NB_X={NB_X}-1, NB_Y={NB_Y}-1, NB_Z={NB_Z}-1'
)
index_xsamples = np.where(x_in[index_x_in] == xsamples)[0].item()
index_ysamples_lower_upper = np.where(y_in[index_y_in] == ysamples_lower_and_upper)[0].item()
# index_ysamples_lower_upper = 0
# index_zsamples_lower_upper = 0
index_zsamples_lower_upper = np.where(z_in[index_z_in] == zsamples_lower_and_upper)[0].item()
f_plot_x = f_in[index_x_in, :, :]
y_corners_x = pcolourmesh_corners(y_in)
z_corners_x = pcolourmesh_corners(z_in)
min_colourmap = np.min(f_in)
max_colourmap = np.max(f_in)
if log_scale:
c_norm = SymLogNorm(vmin=min_colourmap, vmax=max_colourmap, linthresh=0.03)
else:
c_norm = Normalize(vmin=min_colourmap, vmax=max_colourmap)
colourmap = cm.get_cmap('viridis', 512)
ax[0].pcolormesh(y_corners_x, z_corners_x, f_plot_x, norm=c_norm, cmap='viridis')
# ax[0].pcolormesh(y_in, z_in, f_plot_x)
ax[0].set_aspect('equal')
f_out = np.zeros((len(xsamples), len(ysamples), len(zsamples)))
for i in range(len(xsamples)):
for j in range(len(ysamples)):
for k in range(len(zsamples)):
f_out[i, j, k] = interpolator3D(xsamples[i], ysamples[j], zsamples[k])
if verbose_options[2]:
print('Test interpolation:\n', repr(f_out))
f_out_lower_and_upper = np.zeros((len(xsamples_lower_and_upper), len(ysamples_lower_and_upper),
len(zsamples_lower_and_upper)))
for i in range(len(xsamples_lower_and_upper)):
for j in range(len(ysamples_lower_and_upper)):
for k in range(len(zsamples_lower_and_upper)):
f_out_lower_and_upper[i, j, k] = interpolator3D(
xsamples_lower_and_upper[i], ysamples_lower_and_upper[j], zsamples_lower_and_upper[k]
)
f_out_extrapolation = np.zeros((len(xsamples_extrapolation), ))
for i in range(len(xsamples_extrapolation)):
f_out_extrapolation[i] = interpolator3D(
xsamples_extrapolation[i], ysamples_extrapolation[i], zsamples_extrapolation[i]
)
if verbose_options[3]:
print('New output of extrapolation to be saved:\n', repr(f_out_extrapolation))
index_xsamples_extrap = np.where(x_in[index_x_in] == xsamples_extrapolation)
f_out_x_extrapolation = f_out_extrapolation[index_xsamples_extrap]
im = ax[3].scatter(
ysamples_extrapolation[index_xsamples_extrap], zsamples_extrapolation[index_xsamples_extrap],
c=f_out_x_extrapolation, norm=c_norm, cmap='viridis', s=10
)
ax[3].set_aspect('equal')
f_out_x = f_out[index_xsamples, :, :]
ysamples_mesh, zsamples_mesh = np.meshgrid(ysamples, zsamples)
ax[0].scatter(
ysamples_mesh.ravel(), zsamples_mesh.ravel(), c=f_out_x.ravel(), norm=c_norm, cmap='viridis', s=10
)
index_y_print = -1
index_z_print = 0
index_ysamples_print = np.where(y_in[index_y_print] == ysamples)[0].item()
index_zsamples_print = np.where(z_in[index_z_print] == zsamples)[0].item()
ax[0].set_title('Slice of x', size=20)
ax[1].set_title(f'Interpolated points \nin slice of x={x_in[index_x_in]}', size=20)
y_corners_xsamples = pcolourmesh_corners(ysamples)
z_corners_xsamples = pcolourmesh_corners(zsamples)
im2 = ax[1].pcolormesh(y_corners_xsamples, z_corners_xsamples, f_out_x, norm=c_norm, cmap='viridis')
ax[1].set_aspect('equal')
if not (x_in[index_x_in] == xsamples_lower_and_upper).any():
raise ValueError(
f'To compare a slice, n_lower_upper={n_lower}-1, must be divisible by NB_X={NB_X}-1, NB_Y={NB_Y}-1,'
f' NB_Z={NB_Z}-1'
)
index_xsamples_lower_and_upper = np.where(x_in[index_x_in] == xsamples_lower_and_upper)[0].item()
y_corners_xsamples_lower_and_upper = pcolourmesh_corners(ysamples_lower_and_upper)
z_corners_xsamples_lower_and_upper = pcolourmesh_corners(zsamples_lower_and_upper)
f_out_lower_and_upper_x = f_out_lower_and_upper[index_xsamples_lower_and_upper, :, :]
im3 = ax[2].pcolormesh(
y_corners_xsamples_lower_and_upper, z_corners_xsamples_lower_and_upper, f_out_lower_and_upper_x,
norm=c_norm, cmap='viridis'
)
check_array_z = np.zeros(len(zsamples_lower_and_upper))
check_array_y = np.zeros(len(ysamples_lower_and_upper))
for i in range(len(zsamples_lower_and_upper)):
check_array_z[i] = interpolator3D(
x_in[index_x_in], ysamples_lower_and_upper[index_ysamples_lower_upper], zsamples_lower_and_upper[i]
)
check_array_y[i] = interpolator3D(
x_in[index_x_in], ysamples_lower_and_upper[i], zsamples_lower_and_upper[index_zsamples_lower_upper]
)
ax1[0].plot(zsamples_lower_and_upper, f_out_lower_and_upper_x[index_ysamples_lower_upper, :])
ax1[0].plot(z_in, f_in[index_x_in, index_y_in, :], 'bo')
ax1[0].plot(zsamples_lower_and_upper, check_array_z, 'gx')
ax1[1].plot(ysamples_lower_and_upper, check_array_y)
# ax1[1].plot(ysamples_lower_and_upper, f_out_lower_and_upper_x[:, index_z_plot])
ax1[0].axvline(z_in[0], color='r', linestyle='--')
ax1[0].axvline(z_in[-1], color='r', linestyle='--')
ax1[1].axvline(y_in[0], color='r', linestyle='--')
ax1[1].axvline(y_in[-1], color='r', linestyle='--')
fig.colorbar(im, ax=ax[0])
fig.colorbar(im2, ax=ax[1])
fig.colorbar(im3, ax=ax[2])
ax[2].set_aspect('equal')
plt.show()
| [((96, 44, 97, 95), 'numpy.array', 'np.array', ({(97, 8, 97, 94): '[x_lower - x_extrap_delta_max, (x_lower + x_upper) / 2.0, x_upper +\n x_extrap_delta_max]'}, {}), '([x_lower - x_extrap_delta_max, (x_lower + x_upper) / 2.0, x_upper +\n x_extrap_delta_max])', True, 'import numpy as np\n'), ((99, 44, 100, 95), 'numpy.array', 'np.array', ({(100, 8, 100, 94): '[y_lower - y_extrap_delta_max, (y_lower + y_upper) / 2.0, y_upper +\n y_extrap_delta_max]'}, {}), '([y_lower - y_extrap_delta_max, (y_lower + y_upper) / 2.0, y_upper +\n y_extrap_delta_max])', True, 'import numpy as np\n'), ((102, 44, 103, 95), 'numpy.array', 'np.array', ({(103, 8, 103, 94): '[z_lower - z_extrap_delta_max, (z_lower + z_upper) / 2.0, z_upper +\n z_extrap_delta_max]'}, {}), '([z_lower - z_extrap_delta_max, (z_lower + z_upper) / 2.0, z_upper +\n z_extrap_delta_max])', True, 'import numpy as np\n'), ((105, 40, 106, 95), 'numpy.array', 'np.array', ({(106, 8, 106, 94): '[x_lower - x_extrap_delta_min, (x_lower + x_upper) / 2.0, x_upper +\n x_extrap_delta_min]'}, {}), '([x_lower - x_extrap_delta_min, (x_lower + x_upper) / 2.0, x_upper +\n x_extrap_delta_min])', True, 'import numpy as np\n'), ((108, 40, 109, 95), 'numpy.array', 'np.array', ({(109, 8, 109, 94): '[y_lower - y_extrap_delta_min, (y_lower + y_upper) / 2.0, y_upper +\n y_extrap_delta_min]'}, {}), '([y_lower - y_extrap_delta_min, (y_lower + y_upper) / 2.0, y_upper +\n y_extrap_delta_min])', True, 'import numpy as np\n'), ((111, 40, 112, 95), 'numpy.array', 'np.array', ({(112, 8, 112, 94): '[z_lower - z_extrap_delta_min, (z_lower + z_upper) / 2.0, z_upper +\n z_extrap_delta_min]'}, {}), '([z_lower - z_extrap_delta_min, (z_lower + z_upper) / 2.0, z_upper +\n z_extrap_delta_min])', True, 'import numpy as np\n'), ((185, 38, 185, 82), 'numpy.meshgrid', 'np.meshgrid', (), '', True, 'import numpy as np\n'), ((207, 15, 207, 57), 'numpy.linspace', 'np.linspace', ({(207, 27, 207, 34): 'X_LOWER', (207, 36, 207, 43): 'X_UPPER', (207, 45, 207, 56): 'NB_XSAMPLES'}, {}), '(X_LOWER, X_UPPER, NB_XSAMPLES)', True, 'import numpy as np\n'), ((208, 15, 208, 57), 'numpy.linspace', 'np.linspace', ({(208, 27, 208, 34): 'Y_LOWER', (208, 36, 208, 43): 'Y_UPPER', (208, 45, 208, 56): 'NB_YSAMPLES'}, {}), '(Y_LOWER, Y_UPPER, NB_YSAMPLES)', True, 'import numpy as np\n'), ((209, 15, 209, 57), 'numpy.linspace', 'np.linspace', ({(209, 27, 209, 34): 'Z_LOWER', (209, 36, 209, 43): 'Z_UPPER', (209, 45, 209, 56): 'NB_ZSAMPLES'}, {}), '(Z_LOWER, Z_UPPER, NB_ZSAMPLES)', True, 'import numpy as np\n'), ((211, 77, 213, 5), 'raysect.core.math.function.float.function3d.interpolate.tests.test_interpolator_3d.large_extrapolation_range', 'large_extrapolation_range', ({(212, 8, 212, 16): 'xsamples', (212, 18, 212, 26): 'ysamples', (212, 28, 212, 36): 'zsamples', (212, 38, 212, 57): 'EXTRAPOLATION_RANGE', (212, 59, 212, 74): 'N_EXTRAPOLATION'}, {}), '(xsamples, ysamples, zsamples, EXTRAPOLATION_RANGE,\n N_EXTRAPOLATION)', False, 'from raysect.core.math.function.float.function3d.interpolate.tests.test_interpolator_3d import X_LOWER, X_UPPER, NB_XSAMPLES, NB_X, X_EXTRAP_DELTA_MAX, PRECISION, Y_LOWER, Y_UPPER, NB_YSAMPLES, NB_Y, Y_EXTRAP_DELTA_MAX, EXTRAPOLATION_RANGE, large_extrapolation_range, Z_LOWER, Z_UPPER, NB_ZSAMPLES, NB_Z, Z_EXTRAP_DELTA_MAX, N_EXTRAPOLATION, uneven_linspace\n'), ((222, 21, 223, 94), 'raysect.core.math.function.float.function3d.interpolate.interpolator3darray.Interpolator3DArray', 'Interpolator3DArray', (), '', False, 'from raysect.core.math.function.float.function3d.interpolate.interpolator3darray import Interpolator3DArray\n'), ((230, 31, 230, 82), 'numpy.linspace', 'np.linspace', ({(230, 43, 230, 50): 'X_LOWER', (230, 52, 230, 59): 'X_UPPER', (230, 61, 230, 81): 'n_lower_upper_interp'}, {}), '(X_LOWER, X_UPPER, n_lower_upper_interp)', True, 'import numpy as np\n'), ((231, 31, 231, 82), 'numpy.linspace', 'np.linspace', ({(231, 43, 231, 50): 'Y_LOWER', (231, 52, 231, 59): 'Y_UPPER', (231, 61, 231, 81): 'n_lower_upper_interp'}, {}), '(Y_LOWER, Y_UPPER, n_lower_upper_interp)', True, 'import numpy as np\n'), ((232, 31, 232, 82), 'numpy.linspace', 'np.linspace', ({(232, 43, 232, 50): 'Z_LOWER', (232, 52, 232, 59): 'Z_UPPER', (232, 61, 232, 81): 'n_lower_upper_interp'}, {}), '(Z_LOWER, Z_UPPER, n_lower_upper_interp)', True, 'import numpy as np\n'), ((134, 8, 134, 47), 'numpy.array', 'np.array', ({(134, 17, 134, 46): 'xsamples_extrap_out_of_bounds'}, {}), '(xsamples_extrap_out_of_bounds)', True, 'import numpy as np\n'), ((134, 49, 134, 88), 'numpy.array', 'np.array', ({(134, 58, 134, 87): 'ysamples_extrap_out_of_bounds'}, {}), '(ysamples_extrap_out_of_bounds)', True, 'import numpy as np\n'), ((135, 8, 135, 47), 'numpy.array', 'np.array', ({(135, 17, 135, 46): 'zsamples_extrap_out_of_bounds'}, {}), '(zsamples_extrap_out_of_bounds)', True, 'import numpy as np\n'), ((135, 49, 135, 84), 'numpy.array', 'np.array', ({(135, 58, 135, 83): 'xsamples_extrap_in_bounds'}, {}), '(xsamples_extrap_in_bounds)', True, 'import numpy as np\n'), ((136, 8, 136, 43), 'numpy.array', 'np.array', ({(136, 17, 136, 42): 'ysamples_extrap_in_bounds'}, {}), '(ysamples_extrap_in_bounds)', True, 'import numpy as np\n'), ((136, 45, 136, 80), 'numpy.array', 'np.array', ({(136, 54, 136, 79): 'zsamples_extrap_in_bounds'}, {}), '(zsamples_extrap_in_bounds)', True, 'import numpy as np\n'), ((146, 16, 146, 69), 'numpy.sqrt', 'np.sqrt', ({(146, 25, 146, 67): '(x_input ** 2 + y_input ** 2 + z_input ** 2)'}, {}), '(x_input ** 2 + y_input ** 2 + z_input ** 2)', True, 'import numpy as np\n'), ((147, 21, 147, 31), 'numpy.sinc', 'np.sinc', ({(147, 29, 147, 30): 't'}, {}), '(t)', True, 'import numpy as np\n'), ((170, 17, 170, 48), 'numpy.power', 'np.power', ({(170, 26, 170, 29): '10.0', (170, 31, 170, 47): 'BIG_VALUE_FACTOR'}, {}), '(10.0, BIG_VALUE_FACTOR)', True, 'import numpy as np\n'), ((177, 15, 177, 77), 'raysect.core.math.function.float.function3d.interpolate.tests.test_interpolator_3d.uneven_linspace', 'uneven_linspace', (), '', False, 'from raysect.core.math.function.float.function3d.interpolate.tests.test_interpolator_3d import X_LOWER, X_UPPER, NB_XSAMPLES, NB_X, X_EXTRAP_DELTA_MAX, PRECISION, Y_LOWER, Y_UPPER, NB_YSAMPLES, NB_Y, Y_EXTRAP_DELTA_MAX, EXTRAPOLATION_RANGE, large_extrapolation_range, Z_LOWER, Z_UPPER, NB_ZSAMPLES, NB_Z, Z_EXTRAP_DELTA_MAX, N_EXTRAPOLATION, uneven_linspace\n'), ((178, 15, 178, 77), 'raysect.core.math.function.float.function3d.interpolate.tests.test_interpolator_3d.uneven_linspace', 'uneven_linspace', (), '', False, 'from raysect.core.math.function.float.function3d.interpolate.tests.test_interpolator_3d import X_LOWER, X_UPPER, NB_XSAMPLES, NB_X, X_EXTRAP_DELTA_MAX, PRECISION, Y_LOWER, Y_UPPER, NB_YSAMPLES, NB_Y, Y_EXTRAP_DELTA_MAX, EXTRAPOLATION_RANGE, large_extrapolation_range, Z_LOWER, Z_UPPER, NB_ZSAMPLES, NB_Z, Z_EXTRAP_DELTA_MAX, N_EXTRAPOLATION, uneven_linspace\n'), ((179, 15, 179, 77), 'raysect.core.math.function.float.function3d.interpolate.tests.test_interpolator_3d.uneven_linspace', 'uneven_linspace', (), '', False, 'from raysect.core.math.function.float.function3d.interpolate.tests.test_interpolator_3d import X_LOWER, X_UPPER, NB_XSAMPLES, NB_X, X_EXTRAP_DELTA_MAX, PRECISION, Y_LOWER, Y_UPPER, NB_YSAMPLES, NB_Y, Y_EXTRAP_DELTA_MAX, EXTRAPOLATION_RANGE, large_extrapolation_range, Z_LOWER, Z_UPPER, NB_ZSAMPLES, NB_Z, Z_EXTRAP_DELTA_MAX, N_EXTRAPOLATION, uneven_linspace\n'), ((181, 15, 181, 50), 'numpy.linspace', 'np.linspace', ({(181, 27, 181, 34): 'X_LOWER', (181, 36, 181, 43): 'X_UPPER', (181, 45, 181, 49): 'NB_X'}, {}), '(X_LOWER, X_UPPER, NB_X)', True, 'import numpy as np\n'), ((182, 15, 182, 50), 'numpy.linspace', 'np.linspace', ({(182, 27, 182, 34): 'Y_LOWER', (182, 36, 182, 43): 'Y_UPPER', (182, 45, 182, 49): 'NB_Y'}, {}), '(Y_LOWER, Y_UPPER, NB_Y)', True, 'import numpy as np\n'), ((183, 15, 183, 50), 'numpy.linspace', 'np.linspace', ({(183, 27, 183, 34): 'Z_LOWER', (183, 36, 183, 43): 'Z_UPPER', (183, 45, 183, 49): 'NB_Z'}, {}), '(Z_LOWER, Z_UPPER, NB_Z)', True, 'import numpy as np\n'), ((141, 27, 142, 97), 'numpy.array', 'np.array', ({(141, 36, 142, 96): '[input_array[-1] - (input_array[-1] - input_array[-2]) / 2.0, input_array[-\n 1] + (input_array[-1] - input_array[-2]) / 2.0]'}, {}), '([input_array[-1] - (input_array[-1] - input_array[-2]) / 2.0, \n input_array[-1] + (input_array[-1] - input_array[-2]) / 2.0])', True, 'import numpy as np\n'), ((172, 17, 172, 50), 'numpy.power', 'np.power', ({(172, 26, 172, 29): '10.0', (172, 31, 172, 49): 'SMALL_VALUE_FACTOR'}, {}), '(10.0, SMALL_VALUE_FACTOR)', True, 'import numpy as np\n'), ((264, 22, 264, 40), 'matplotlib.pyplot.subplots', 'plt.subplots', ({(264, 35, 264, 36): '1', (264, 38, 264, 39): '4'}, {}), '(1, 4)', True, 'import matplotlib.pyplot as plt\n'), ((265, 24, 265, 42), 'matplotlib.pyplot.subplots', 'plt.subplots', ({(265, 37, 265, 38): '1', (265, 40, 265, 41): '2'}, {}), '(1, 2)', True, 'import matplotlib.pyplot as plt\n'), ((286, 28, 286, 40), 'numpy.min', 'np.min', ({(286, 35, 286, 39): 'f_in'}, {}), '(f_in)', True, 'import numpy as np\n'), ((287, 28, 287, 40), 'numpy.max', 'np.max', ({(287, 35, 287, 39): 'f_in'}, {}), '(f_in)', True, 'import numpy as np\n'), ((292, 24, 292, 51), 'matplotlib.cm.get_cmap', 'cm.get_cmap', ({(292, 36, 292, 45): '"""viridis"""', (292, 47, 292, 50): '512'}, {}), "('viridis', 512)", False, 'from matplotlib import cm\n'), ((323, 36, 323, 88), 'numpy.where', 'np.where', ({(323, 45, 323, 87): 'x_in[index_x_in] == xsamples_extrapolation'}, {}), '(x_in[index_x_in] == xsamples_extrapolation)', True, 'import numpy as np\n'), ((334, 43, 334, 74), 'numpy.meshgrid', 'np.meshgrid', ({(334, 55, 334, 63): 'ysamples', (334, 65, 334, 73): 'zsamples'}, {}), '(ysamples, zsamples)', True, 'import numpy as np\n'), ((391, 12, 391, 22), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((191, 42, 191, 79), 'raysect.core.math.function.float.function3d.interpolate.tests.data.interpolator3d_test_data.TestInterpolatorLoadBigValuesUneven', 'TestInterpolatorLoadBigValuesUneven', ({}, {}), '()', False, 'from raysect.core.math.function.float.function3d.interpolate.tests.data.interpolator3d_test_data import TestInterpolatorLoadBigValues, TestInterpolatorLoadNormalValues, TestInterpolatorLoadSmallValues, TestInterpolatorLoadBigValuesUneven, TestInterpolatorLoadNormalValuesUneven, TestInterpolatorLoadSmallValuesUneven\n'), ((198, 42, 198, 73), 'raysect.core.math.function.float.function3d.interpolate.tests.data.interpolator3d_test_data.TestInterpolatorLoadBigValues', 'TestInterpolatorLoadBigValues', ({}, {}), '()', False, 'from raysect.core.math.function.float.function3d.interpolate.tests.data.interpolator3d_test_data import TestInterpolatorLoadBigValues, TestInterpolatorLoadNormalValues, TestInterpolatorLoadSmallValues, TestInterpolatorLoadBigValuesUneven, TestInterpolatorLoadNormalValuesUneven, TestInterpolatorLoadSmallValuesUneven\n'), ((233, 47, 233, 117), 'numpy.linspace', 'np.linspace', ({(233, 59, 233, 98): 'X_LOWER - (X_UPPER - X_LOWER) * lower_p', (233, 100, 233, 107): 'X_LOWER', (233, 109, 233, 116): 'n_lower'}, {}), '(X_LOWER - (X_UPPER - X_LOWER) * lower_p, X_LOWER, n_lower)', True, 'import numpy as np\n'), ((235, 47, 235, 117), 'numpy.linspace', 'np.linspace', ({(235, 59, 235, 66): 'X_UPPER', (235, 68, 235, 107): 'X_UPPER + (X_UPPER - X_LOWER) * lower_p', (235, 109, 235, 116): 'n_lower'}, {}), '(X_UPPER, X_UPPER + (X_UPPER - X_LOWER) * lower_p, n_lower)', True, 'import numpy as np\n'), ((237, 47, 237, 117), 'numpy.linspace', 'np.linspace', ({(237, 59, 237, 98): 'Y_LOWER - (Y_UPPER - Y_LOWER) * lower_p', (237, 100, 237, 107): 'Y_LOWER', (237, 109, 237, 116): 'n_lower'}, {}), '(Y_LOWER - (Y_UPPER - Y_LOWER) * lower_p, Y_LOWER, n_lower)', True, 'import numpy as np\n'), ((239, 47, 239, 117), 'numpy.linspace', 'np.linspace', ({(239, 59, 239, 66): 'Y_UPPER', (239, 68, 239, 107): 'Y_UPPER + (Y_UPPER - Y_LOWER) * lower_p', (239, 109, 239, 116): 'n_lower'}, {}), '(Y_UPPER, Y_UPPER + (Y_UPPER - Y_LOWER) * lower_p, n_lower)', True, 'import numpy as np\n'), ((241, 47, 241, 117), 'numpy.linspace', 'np.linspace', ({(241, 59, 241, 98): 'Z_LOWER - (Z_UPPER - Z_LOWER) * lower_p', (241, 100, 241, 107): 'Z_LOWER', (241, 109, 241, 116): 'n_lower'}, {}), '(Z_LOWER - (Z_UPPER - Z_LOWER) * lower_p, Z_LOWER, n_lower)', True, 'import numpy as np\n'), ((243, 47, 243, 117), 'numpy.linspace', 'np.linspace', ({(243, 59, 243, 66): 'Z_UPPER', (243, 68, 243, 107): 'Z_UPPER + (Z_UPPER - Z_LOWER) * lower_p', (243, 109, 243, 116): 'n_lower'}, {}), '(Z_UPPER, Z_UPPER + (Z_UPPER - Z_LOWER) * lower_p, n_lower)', True, 'import numpy as np\n'), ((245, 33, 245, 87), 'numpy.where', 'np.where', ({(245, 42, 245, 86): 'x_in[index_y_in] == ysamples_lower_and_upper'}, {}), '(x_in[index_y_in] == ysamples_lower_and_upper)', True, 'import numpy as np\n'), ((289, 25, 289, 91), 'matplotlib.colors.SymLogNorm', 'SymLogNorm', (), '', False, 'from matplotlib.colors import SymLogNorm, Normalize\n'), ((291, 25, 291, 74), 'matplotlib.colors.Normalize', 'Normalize', (), '', False, 'from matplotlib.colors import SymLogNorm, Normalize\n'), ((140, 46, 140, 66), 'numpy.diff', 'np.diff', ({(140, 54, 140, 65): 'input_array'}, {}), '(input_array)', True, 'import numpy as np\n'), ((193, 42, 193, 81), 'raysect.core.math.function.float.function3d.interpolate.tests.data.interpolator3d_test_data.TestInterpolatorLoadSmallValuesUneven', 'TestInterpolatorLoadSmallValuesUneven', ({}, {}), '()', False, 'from raysect.core.math.function.float.function3d.interpolate.tests.data.interpolator3d_test_data import TestInterpolatorLoadBigValues, TestInterpolatorLoadNormalValues, TestInterpolatorLoadSmallValues, TestInterpolatorLoadBigValuesUneven, TestInterpolatorLoadNormalValuesUneven, TestInterpolatorLoadSmallValuesUneven\n'), ((195, 42, 195, 82), 'raysect.core.math.function.float.function3d.interpolate.tests.data.interpolator3d_test_data.TestInterpolatorLoadNormalValuesUneven', 'TestInterpolatorLoadNormalValuesUneven', ({}, {}), '()', False, 'from raysect.core.math.function.float.function3d.interpolate.tests.data.interpolator3d_test_data import TestInterpolatorLoadBigValues, TestInterpolatorLoadNormalValues, TestInterpolatorLoadSmallValues, TestInterpolatorLoadBigValuesUneven, TestInterpolatorLoadNormalValuesUneven, TestInterpolatorLoadSmallValuesUneven\n'), ((200, 42, 200, 75), 'raysect.core.math.function.float.function3d.interpolate.tests.data.interpolator3d_test_data.TestInterpolatorLoadSmallValues', 'TestInterpolatorLoadSmallValues', ({}, {}), '()', False, 'from raysect.core.math.function.float.function3d.interpolate.tests.data.interpolator3d_test_data import TestInterpolatorLoadBigValues, TestInterpolatorLoadNormalValues, TestInterpolatorLoadSmallValues, TestInterpolatorLoadBigValuesUneven, TestInterpolatorLoadNormalValuesUneven, TestInterpolatorLoadSmallValuesUneven\n'), ((202, 42, 202, 76), 'raysect.core.math.function.float.function3d.interpolate.tests.data.interpolator3d_test_data.TestInterpolatorLoadNormalValues', 'TestInterpolatorLoadNormalValues', ({}, {}), '()', False, 'from raysect.core.math.function.float.function3d.interpolate.tests.data.interpolator3d_test_data import TestInterpolatorLoadBigValues, TestInterpolatorLoadNormalValues, TestInterpolatorLoadSmallValues, TestInterpolatorLoadBigValuesUneven, TestInterpolatorLoadNormalValuesUneven, TestInterpolatorLoadSmallValuesUneven\n'), ((276, 29, 276, 67), 'numpy.where', 'np.where', ({(276, 38, 276, 66): 'x_in[index_x_in] == xsamples'}, {}), '(x_in[index_x_in] == xsamples)', True, 'import numpy as np\n'), ((277, 41, 277, 95), 'numpy.where', 'np.where', ({(277, 50, 277, 94): 'y_in[index_y_in] == ysamples_lower_and_upper'}, {}), '(y_in[index_y_in] == ysamples_lower_and_upper)', True, 'import numpy as np\n'), ((280, 41, 280, 95), 'numpy.where', 'np.where', ({(280, 50, 280, 94): 'z_in[index_z_in] == zsamples_lower_and_upper'}, {}), '(z_in[index_z_in] == zsamples_lower_and_upper)', True, 'import numpy as np\n'), ((341, 35, 341, 76), 'numpy.where', 'np.where', ({(341, 44, 341, 75): 'y_in[index_y_print] == ysamples'}, {}), '(y_in[index_y_print] == ysamples)', True, 'import numpy as np\n'), ((342, 35, 342, 76), 'numpy.where', 'np.where', ({(342, 44, 342, 75): 'z_in[index_z_print] == zsamples'}, {}), '(z_in[index_z_print] == zsamples)', True, 'import numpy as np\n'), ((357, 45, 357, 99), 'numpy.where', 'np.where', ({(357, 54, 357, 98): 'x_in[index_x_in] == xsamples_lower_and_upper'}, {}), '(x_in[index_x_in] == xsamples_lower_and_upper)', True, 'import numpy as np\n')] |
girish946/supertokens-python | supertokens_python/recipe_module.py | ce0e7f6035941b3a8d3d1f7ae867224fd9c41c3c | # Copyright (c) 2021, VRAI Labs and/or its affiliates. All rights reserved.
#
# This software is licensed under the Apache License, Version 2.0 (the
# "License") as published by the Apache Software Foundation.
#
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import abc
from typing import Union, List, TYPE_CHECKING
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
from .framework.response import BaseResponse
if TYPE_CHECKING:
from supertokens_python.framework.request import BaseRequest
from .supertokens import AppInfo
from .normalised_url_path import NormalisedURLPath
from .exceptions import SuperTokensError
class RecipeModule(abc.ABC):
def __init__(self, recipe_id: str, app_info: AppInfo):
self.recipe_id = recipe_id
self.app_info = app_info
def get_recipe_id(self):
return self.recipe_id
def get_app_info(self):
return self.app_info
def return_api_id_if_can_handle_request(
self, path: NormalisedURLPath, method: str) -> Union[str, None]:
apis_handled = self.get_apis_handled()
for current_api in apis_handled:
if not current_api.disabled and current_api.method == method and self.app_info.api_base_path.append(
current_api.path_without_api_base_path).equals(path):
return current_api.request_id
return None
@abc.abstractmethod
def is_error_from_this_recipe_based_on_instance(self, err):
pass
@abc.abstractmethod
def get_apis_handled(self) -> List[APIHandled]:
pass
@abc.abstractmethod
async def handle_api_request(self, request_id: str, request: BaseRequest, path: NormalisedURLPath, method: str,
response: BaseResponse):
pass
@abc.abstractmethod
async def handle_error(self, request: BaseRequest, err: SuperTokensError, response: BaseResponse):
pass
@abc.abstractmethod
def get_all_cors_headers(self):
pass
class APIHandled:
def __init__(self, path_without_api_base_path: NormalisedURLPath,
method: Literal['post', 'get', 'delete', 'put', 'options', 'trace'], request_id: str, disabled: bool):
self.path_without_api_base_path = path_without_api_base_path
self.method = method
self.request_id = request_id
self.disabled = disabled
| [] |
mihaidumitrescu/flake8-html | tests/__init__.py | d5b62c05fb220a5cd6c777feacd69cb726a42e9a | # -*- coding: utf-8 -*-
"""Tests go in this directory."""
| [] |
Yambottle/dj-workflow-template | datajoint-workflow/{{cookiecutter.github_repo}}/src/{{cookiecutter.__pkg_import_name}}/version.py | a47a354af2f9303c898ef403491e69cfc396d196 | __version__ = "{{cookiecutter._pkg_version}}"
| [] |
shibing624/similarities | examples/benchmarking/benchmark_bm25.py | f573ae158b0e2a908c1ef549784bd88e23cbd9c6 | # -*- coding: utf-8 -*-
"""
@author:XuMing([email protected])
@description:
"""
import datetime
import os
import pathlib
import random
import sys
from loguru import logger
sys.path.append('../..')
from similarities import BM25Similarity
from similarities.utils import http_get
from similarities.data_loader import SearchDataLoader
from similarities.evaluation import evaluate
random.seed(42)
pwd_path = os.path.dirname(os.path.realpath(__file__))
def get_scifact():
# Download scifact.zip dataset and unzip the dataset
dataset = "scifact"
url = "https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/{}.zip".format(dataset)
zip_file = os.path.join(pwd_path, "scifact.zip")
if not os.path.exists(zip_file):
logger.info("Dataset not exists, downloading...")
http_get(url, zip_file, extract=True)
else:
logger.info("Dataset already exists, skipping download.")
data_path = os.path.join(pwd_path, dataset)
return data_path
def get_dbpedia():
dataset = "dbpedia-entity"
url = "https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/{}.zip".format(dataset)
zip_file = os.path.join(pwd_path, "dbpedia-entity.zip")
if not os.path.exists(zip_file):
logger.info("Dataset not exists, downloading...")
http_get(url, zip_file, extract=True)
else:
logger.info("Dataset already exists, skipping download.")
data_path = os.path.join(pwd_path, dataset)
return data_path
data_path = get_scifact()
#### Loading test queries and corpus in DBPedia
corpus, queries, qrels = SearchDataLoader(data_path).load(split="test")
corpus_ids, query_ids = list(corpus), list(queries)
logger.info(f"corpus: {len(corpus)}, queries: {len(queries)}")
#### Randomly sample 1M pairs from Original Corpus (4.63M pairs)
#### First include all relevant documents (i.e. present in qrels)
corpus_set = set()
for query_id in qrels:
corpus_set.update(list(qrels[query_id].keys()))
corpus_new = {corpus_id: corpus[corpus_id] for corpus_id in corpus_set}
#### Remove already seen k relevant documents and sample (1M - k) docs randomly
remaining_corpus = list(set(corpus_ids) - corpus_set)
sample = min(1000000 - len(corpus_set), len(remaining_corpus))
# sample = 10
for corpus_id in random.sample(remaining_corpus, sample):
corpus_new[corpus_id] = corpus[corpus_id]
corpus_docs = {corpus_id: corpus_new[corpus_id]['title'] + corpus_new[corpus_id]['text'] for corpus_id, corpus in
corpus_new.items()}
#### Index 1M passages into the index (seperately)
model = BM25Similarity(corpus_docs)
#### Saving benchmark times
time_taken_all = {}
for query_id in query_ids:
query = {query_id: queries[query_id]}
#### Measure time to retrieve top-10 BM25 documents using single query latency
start = datetime.datetime.now()
q_res = model.most_similar(query, topn=10)
end = datetime.datetime.now()
# print(q_res)
#### Measuring time taken in ms (milliseconds)
time_taken = (end - start)
time_taken = time_taken.total_seconds() * 1000
time_taken_all[query_id] = time_taken
# logger.info("query: {}: {} {:.2f}ms".format(query_id, query, time_taken))
# logger.info("\tsearch result: {}".format(results[:2]))
time_taken = list(time_taken_all.values())
logger.info("Average time taken: {:.2f}ms".format(sum(time_taken) / len(time_taken_all)))
#### Saving benchmark times with batch
# queries = [queries[query_id] for query_id in query_ids]
start = datetime.datetime.now()
results = model.most_similar(queries, topn=10)
end = datetime.datetime.now()
#### Measuring time taken in ms (milliseconds)
time_taken = (end - start)
time_taken = time_taken.total_seconds() * 1000
logger.info("All, Spend {:.2f}ms".format(time_taken))
logger.info("Average time taken: {:.2f}ms".format(time_taken / len(queries)))
logger.info(f"Results size: {len(results)}")
#### Evaluate your retrieval using NDCG@k, MAP@K ...
ndcg, _map, recall, precision = evaluate(qrels, results)
logger.info(f"MAP: {_map}")
| [((14, 0, 14, 24), 'sys.path.append', 'sys.path.append', ({(14, 16, 14, 23): '"""../.."""'}, {}), "('../..')", False, 'import sys\n'), ((20, 0, 20, 15), 'random.seed', 'random.seed', ({(20, 12, 20, 14): '(42)'}, {}), '(42)', False, 'import random\n'), ((70, 17, 70, 56), 'random.sample', 'random.sample', ({(70, 31, 70, 47): 'remaining_corpus', (70, 49, 70, 55): 'sample'}, {}), '(remaining_corpus, sample)', False, 'import random\n'), ((76, 8, 76, 35), 'similarities.BM25Similarity', 'BM25Similarity', ({(76, 23, 76, 34): 'corpus_docs'}, {}), '(corpus_docs)', False, 'from similarities import BM25Similarity\n'), ((100, 8, 100, 31), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((102, 6, 102, 29), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((111, 32, 111, 56), 'similarities.evaluation.evaluate', 'evaluate', ({(111, 41, 111, 46): 'qrels', (111, 48, 111, 55): 'results'}, {}), '(qrels, results)', False, 'from similarities.evaluation import evaluate\n'), ((112, 0, 112, 27), 'loguru.logger.info', 'logger.info', ({(112, 12, 112, 26): 'f"""MAP: {_map}"""'}, {}), "(f'MAP: {_map}')", False, 'from loguru import logger\n'), ((22, 27, 22, 53), 'os.path.realpath', 'os.path.realpath', ({(22, 44, 22, 52): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((29, 15, 29, 52), 'os.path.join', 'os.path.join', ({(29, 28, 29, 36): 'pwd_path', (29, 38, 29, 51): '"""scifact.zip"""'}, {}), "(pwd_path, 'scifact.zip')", False, 'import os\n'), ((35, 16, 35, 47), 'os.path.join', 'os.path.join', ({(35, 29, 35, 37): 'pwd_path', (35, 39, 35, 46): 'dataset'}, {}), '(pwd_path, dataset)', False, 'import os\n'), ((42, 15, 42, 59), 'os.path.join', 'os.path.join', ({(42, 28, 42, 36): 'pwd_path', (42, 38, 42, 58): '"""dbpedia-entity.zip"""'}, {}), "(pwd_path, 'dbpedia-entity.zip')", False, 'import os\n'), ((48, 16, 48, 47), 'os.path.join', 'os.path.join', ({(48, 29, 48, 37): 'pwd_path', (48, 39, 48, 46): 'dataset'}, {}), '(pwd_path, dataset)', False, 'import os\n'), ((83, 12, 83, 35), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((85, 10, 85, 33), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((30, 11, 30, 35), 'os.path.exists', 'os.path.exists', ({(30, 26, 30, 34): 'zip_file'}, {}), '(zip_file)', False, 'import os\n'), ((31, 8, 31, 57), 'loguru.logger.info', 'logger.info', ({(31, 20, 31, 56): '"""Dataset not exists, downloading..."""'}, {}), "('Dataset not exists, downloading...')", False, 'from loguru import logger\n'), ((32, 8, 32, 45), 'similarities.utils.http_get', 'http_get', (), '', False, 'from similarities.utils import http_get\n'), ((34, 8, 34, 65), 'loguru.logger.info', 'logger.info', ({(34, 20, 34, 64): '"""Dataset already exists, skipping download."""'}, {}), "('Dataset already exists, skipping download.')", False, 'from loguru import logger\n'), ((43, 11, 43, 35), 'os.path.exists', 'os.path.exists', ({(43, 26, 43, 34): 'zip_file'}, {}), '(zip_file)', False, 'import os\n'), ((44, 8, 44, 57), 'loguru.logger.info', 'logger.info', ({(44, 20, 44, 56): '"""Dataset not exists, downloading..."""'}, {}), "('Dataset not exists, downloading...')", False, 'from loguru import logger\n'), ((45, 8, 45, 45), 'similarities.utils.http_get', 'http_get', (), '', False, 'from similarities.utils import http_get\n'), ((47, 8, 47, 65), 'loguru.logger.info', 'logger.info', ({(47, 20, 47, 64): '"""Dataset already exists, skipping download."""'}, {}), "('Dataset already exists, skipping download.')", False, 'from loguru import logger\n'), ((54, 25, 54, 52), 'similarities.data_loader.SearchDataLoader', 'SearchDataLoader', ({(54, 42, 54, 51): 'data_path'}, {}), '(data_path)', False, 'from similarities.data_loader import SearchDataLoader\n')] |
sergachev/verilog-ethernet | tb/test_arp_64.py | cef6b47bb3b969120cabce3b89b0c98bb47ca6a9 | #!/usr/bin/env python
"""
Copyright (c) 2014-2018 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import axis_ep
import eth_ep
import arp_ep
module = 'arp_64'
testbench = 'test_%s' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("../rtl/lfsr.v")
srcs.append("../rtl/arp_cache.v")
srcs.append("../rtl/arp_eth_rx_64.v")
srcs.append("../rtl/arp_eth_tx_64.v")
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
s_eth_hdr_valid = Signal(bool(0))
s_eth_dest_mac = Signal(intbv(0)[48:])
s_eth_src_mac = Signal(intbv(0)[48:])
s_eth_type = Signal(intbv(0)[16:])
s_eth_payload_axis_tdata = Signal(intbv(0)[64:])
s_eth_payload_axis_tkeep = Signal(intbv(0)[8:])
s_eth_payload_axis_tvalid = Signal(bool(0))
s_eth_payload_axis_tlast = Signal(bool(0))
s_eth_payload_axis_tuser = Signal(bool(0))
m_eth_payload_axis_tready = Signal(bool(0))
m_eth_hdr_ready = Signal(bool(0))
arp_request_valid = Signal(bool(0))
arp_request_ip = Signal(intbv(0)[32:])
arp_response_ready = Signal(bool(0))
local_mac = Signal(intbv(0)[48:])
local_ip = Signal(intbv(0)[32:])
gateway_ip = Signal(intbv(0)[32:])
subnet_mask = Signal(intbv(0)[32:])
clear_cache = Signal(bool(0))
# Outputs
s_eth_hdr_ready = Signal(bool(0))
s_eth_payload_axis_tready = Signal(bool(0))
m_eth_hdr_valid = Signal(bool(0))
m_eth_dest_mac = Signal(intbv(0)[48:])
m_eth_src_mac = Signal(intbv(0)[48:])
m_eth_type = Signal(intbv(0)[16:])
m_eth_payload_axis_tdata = Signal(intbv(0)[64:])
m_eth_payload_axis_tkeep = Signal(intbv(0)[8:])
m_eth_payload_axis_tvalid = Signal(bool(0))
m_eth_payload_axis_tlast = Signal(bool(0))
m_eth_payload_axis_tuser = Signal(bool(0))
arp_request_ready = Signal(bool(0))
arp_response_valid = Signal(bool(0))
arp_response_error = Signal(bool(0))
arp_response_mac = Signal(intbv(0)[48:])
# sources and sinks
eth_source_pause = Signal(bool(0))
eth_sink_pause = Signal(bool(0))
eth_source = eth_ep.EthFrameSource()
eth_source_logic = eth_source.create_logic(
clk,
rst,
eth_hdr_ready=s_eth_hdr_ready,
eth_hdr_valid=s_eth_hdr_valid,
eth_dest_mac=s_eth_dest_mac,
eth_src_mac=s_eth_src_mac,
eth_type=s_eth_type,
eth_payload_tdata=s_eth_payload_axis_tdata,
eth_payload_tkeep=s_eth_payload_axis_tkeep,
eth_payload_tvalid=s_eth_payload_axis_tvalid,
eth_payload_tready=s_eth_payload_axis_tready,
eth_payload_tlast=s_eth_payload_axis_tlast,
eth_payload_tuser=s_eth_payload_axis_tuser,
pause=eth_source_pause,
name='eth_source'
)
eth_sink = eth_ep.EthFrameSink()
eth_sink_logic = eth_sink.create_logic(
clk,
rst,
eth_hdr_ready=m_eth_hdr_ready,
eth_hdr_valid=m_eth_hdr_valid,
eth_dest_mac=m_eth_dest_mac,
eth_src_mac=m_eth_src_mac,
eth_type=m_eth_type,
eth_payload_tdata=m_eth_payload_axis_tdata,
eth_payload_tkeep=m_eth_payload_axis_tkeep,
eth_payload_tvalid=m_eth_payload_axis_tvalid,
eth_payload_tready=m_eth_payload_axis_tready,
eth_payload_tlast=m_eth_payload_axis_tlast,
eth_payload_tuser=m_eth_payload_axis_tuser,
pause=eth_sink_pause,
name='eth_sink'
)
arp_request_source = axis_ep.AXIStreamSource()
arp_request_source_logic = arp_request_source.create_logic(
clk,
rst,
tdata=(arp_request_ip,),
tvalid=arp_request_valid,
tready=arp_request_ready,
name='arp_request_source'
)
arp_response_sink = axis_ep.AXIStreamSink()
arp_response_sink_logic = arp_response_sink.create_logic(
clk,
rst,
tdata=(arp_response_error, arp_response_mac),
tvalid=arp_response_valid,
tready=arp_response_ready,
name='arp_response_sink'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
s_eth_hdr_valid=s_eth_hdr_valid,
s_eth_hdr_ready=s_eth_hdr_ready,
s_eth_dest_mac=s_eth_dest_mac,
s_eth_src_mac=s_eth_src_mac,
s_eth_type=s_eth_type,
s_eth_payload_axis_tdata=s_eth_payload_axis_tdata,
s_eth_payload_axis_tkeep=s_eth_payload_axis_tkeep,
s_eth_payload_axis_tvalid=s_eth_payload_axis_tvalid,
s_eth_payload_axis_tready=s_eth_payload_axis_tready,
s_eth_payload_axis_tlast=s_eth_payload_axis_tlast,
s_eth_payload_axis_tuser=s_eth_payload_axis_tuser,
m_eth_hdr_valid=m_eth_hdr_valid,
m_eth_hdr_ready=m_eth_hdr_ready,
m_eth_dest_mac=m_eth_dest_mac,
m_eth_src_mac=m_eth_src_mac,
m_eth_type=m_eth_type,
m_eth_payload_axis_tdata=m_eth_payload_axis_tdata,
m_eth_payload_axis_tkeep=m_eth_payload_axis_tkeep,
m_eth_payload_axis_tvalid=m_eth_payload_axis_tvalid,
m_eth_payload_axis_tready=m_eth_payload_axis_tready,
m_eth_payload_axis_tlast=m_eth_payload_axis_tlast,
m_eth_payload_axis_tuser=m_eth_payload_axis_tuser,
arp_request_valid=arp_request_valid,
arp_request_ready=arp_request_ready,
arp_request_ip=arp_request_ip,
arp_response_valid=arp_response_valid,
arp_response_ready=arp_response_ready,
arp_response_error=arp_response_error,
arp_response_mac=arp_response_mac,
local_mac=local_mac,
local_ip=local_ip,
gateway_ip=gateway_ip,
subnet_mask=subnet_mask,
clear_cache=clear_cache
)
@always(delay(4))
def clkgen():
clk.next = not clk
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
yield clk.posedge
local_mac.next = 0xDAD1D2D3D4D5
local_ip.next = 0xc0a80165
gateway_ip.next = 0xc0a80101
subnet_mask.next = 0xFFFFFF00
yield clk.posedge
print("test 1: ARP request")
current_test.next = 1
test_frame = arp_ep.ARPFrame()
test_frame.eth_dest_mac = 0xFFFFFFFFFFFF
test_frame.eth_src_mac = 0x5A5152535455
test_frame.eth_type = 0x0806
test_frame.arp_htype = 0x0001
test_frame.arp_ptype = 0x0800
test_frame.arp_hlen = 6
test_frame.arp_plen = 4
test_frame.arp_oper = 1
test_frame.arp_sha = 0x5A5152535455
test_frame.arp_spa = 0xc0a80164
test_frame.arp_tha = 0x000000000000
test_frame.arp_tpa = 0xc0a80165
eth_source.send(test_frame.build_eth())
yield eth_sink.wait()
rx_frame = eth_sink.recv()
check_frame = arp_ep.ARPFrame()
check_frame.parse_eth(rx_frame)
assert check_frame.eth_dest_mac == 0x5A5152535455
assert check_frame.eth_src_mac == 0xDAD1D2D3D4D5
assert check_frame.eth_type == 0x0806
assert check_frame.arp_htype == 0x0001
assert check_frame.arp_ptype == 0x0800
assert check_frame.arp_hlen == 6
assert check_frame.arp_plen == 4
assert check_frame.arp_oper == 2
assert check_frame.arp_sha == 0xDAD1D2D3D4D5
assert check_frame.arp_spa == 0xc0a80165
assert check_frame.arp_tha == 0x5A5152535455
assert check_frame.arp_tpa == 0xc0a80164
yield delay(100)
yield clk.posedge
print("test 2: Cached read")
current_test.next = 2
arp_request_source.send([(0xc0a80164,)])
yield arp_response_sink.wait()
err, mac = arp_response_sink.recv().data[0]
assert not err
assert mac == 0x5A5152535455
yield delay(100)
yield clk.posedge
print("test 3: Unached read")
current_test.next = 3
arp_request_source.send([(0xc0a80166,)])
# wait for ARP request packet
yield eth_sink.wait()
rx_frame = eth_sink.recv()
check_frame = arp_ep.ARPFrame()
check_frame.parse_eth(rx_frame)
assert check_frame.eth_dest_mac == 0xFFFFFFFFFFFF
assert check_frame.eth_src_mac == 0xDAD1D2D3D4D5
assert check_frame.eth_type == 0x0806
assert check_frame.arp_htype == 0x0001
assert check_frame.arp_ptype == 0x0800
assert check_frame.arp_hlen == 6
assert check_frame.arp_plen == 4
assert check_frame.arp_oper == 1
assert check_frame.arp_sha == 0xDAD1D2D3D4D5
assert check_frame.arp_spa == 0xc0a80165
assert check_frame.arp_tha == 0x000000000000
assert check_frame.arp_tpa == 0xc0a80166
# generate response
test_frame = arp_ep.ARPFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0x6A6162636465
test_frame.eth_type = 0x0806
test_frame.arp_htype = 0x0001
test_frame.arp_ptype = 0x0800
test_frame.arp_hlen = 6
test_frame.arp_plen = 4
test_frame.arp_oper = 2
test_frame.arp_sha = 0x6A6162636465
test_frame.arp_spa = 0xc0a80166
test_frame.arp_tha = 0xDAD1D2D3D4D5
test_frame.arp_tpa = 0xc0a80165
eth_source.send(test_frame.build_eth())
# wait for lookup
yield arp_response_sink.wait()
err, mac = arp_response_sink.recv().data[0]
assert not err
assert mac == 0x6A6162636465
yield delay(100)
yield clk.posedge
print("test 4: Unached read, outside of subnet")
current_test.next = 4
arp_request_source.send([(0x08080808,)])
# wait for ARP request packet
yield eth_sink.wait()
rx_frame = eth_sink.recv()
check_frame = arp_ep.ARPFrame()
check_frame.parse_eth(rx_frame)
assert check_frame.eth_dest_mac == 0xFFFFFFFFFFFF
assert check_frame.eth_src_mac == 0xDAD1D2D3D4D5
assert check_frame.eth_type == 0x0806
assert check_frame.arp_htype == 0x0001
assert check_frame.arp_ptype == 0x0800
assert check_frame.arp_hlen == 6
assert check_frame.arp_plen == 4
assert check_frame.arp_oper == 1
assert check_frame.arp_sha == 0xDAD1D2D3D4D5
assert check_frame.arp_spa == 0xc0a80165
assert check_frame.arp_tha == 0x000000000000
assert check_frame.arp_tpa == 0xc0a80101
# generate response
test_frame = arp_ep.ARPFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0xAABBCCDDEEFF
test_frame.eth_type = 0x0806
test_frame.arp_htype = 0x0001
test_frame.arp_ptype = 0x0800
test_frame.arp_hlen = 6
test_frame.arp_plen = 4
test_frame.arp_oper = 2
test_frame.arp_sha = 0xAABBCCDDEEFF
test_frame.arp_spa = 0xc0a80101
test_frame.arp_tha = 0xDAD1D2D3D4D5
test_frame.arp_tpa = 0xc0a80165
eth_source.send(test_frame.build_eth())
# wait for lookup
yield arp_response_sink.wait()
err, mac = arp_response_sink.recv().data[0]
assert not err
assert mac == 0xAABBCCDDEEFF
yield delay(100)
yield clk.posedge
print("test 5: Unached read, timeout")
current_test.next = 5
arp_request_source.send([(0xc0a80167,)])
yield arp_response_sink.wait()
err, mac = arp_response_sink.recv().data[0]
assert err
# check for 4 ARP requests
assert eth_sink.count() == 4
while not eth_sink.empty():
rx_frame = eth_sink.recv()
check_frame = arp_ep.ARPFrame()
check_frame.parse_eth(rx_frame)
assert check_frame.eth_dest_mac == 0xFFFFFFFFFFFF
assert check_frame.eth_src_mac == 0xDAD1D2D3D4D5
assert check_frame.eth_type == 0x0806
assert check_frame.arp_htype == 0x0001
assert check_frame.arp_ptype == 0x0800
assert check_frame.arp_hlen == 6
assert check_frame.arp_plen == 4
assert check_frame.arp_oper == 1
assert check_frame.arp_sha == 0xDAD1D2D3D4D5
assert check_frame.arp_spa == 0xc0a80165
assert check_frame.arp_tha == 0x000000000000
assert check_frame.arp_tpa == 0xc0a80167
yield delay(100)
yield clk.posedge
print("test 6: Broadcast")
current_test.next = 6
# subnet broadcast
arp_request_source.send([(0xc0a801ff,)])
yield arp_response_sink.wait()
err, mac = arp_response_sink.recv().data[0]
assert not err
assert mac == 0xffffffffffff
# general broadcast
arp_request_source.send([(0xffffffff,)])
yield arp_response_sink.wait()
err, mac = arp_response_sink.recv().data[0]
assert not err
assert mac == 0xffffffffffff
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| [((102, 17, 102, 40), 'eth_ep.EthFrameSource', 'eth_ep.EthFrameSource', ({}, {}), '()', False, 'import eth_ep\n'), ((122, 15, 122, 36), 'eth_ep.EthFrameSink', 'eth_ep.EthFrameSink', ({}, {}), '()', False, 'import eth_ep\n'), ((142, 25, 142, 50), 'axis_ep.AXIStreamSource', 'axis_ep.AXIStreamSource', ({}, {}), '()', False, 'import axis_ep\n'), ((153, 24, 153, 47), 'axis_ep.AXIStreamSink', 'axis_ep.AXIStreamSink', ({}, {}), '()', False, 'import axis_ep\n'), ((165, 7, 165, 27), 'os.system', 'os.system', ({(165, 17, 165, 26): 'build_cmd'}, {}), '(build_cmd)', False, 'import os\n'), ((238, 21, 238, 38), 'arp_ep.ARPFrame', 'arp_ep.ARPFrame', ({}, {}), '()', False, 'import arp_ep\n'), ((255, 22, 255, 39), 'arp_ep.ARPFrame', 'arp_ep.ARPFrame', ({}, {}), '()', False, 'import arp_ep\n'), ((296, 22, 296, 39), 'arp_ep.ARPFrame', 'arp_ep.ARPFrame', ({}, {}), '()', False, 'import arp_ep\n'), ((313, 21, 313, 38), 'arp_ep.ARPFrame', 'arp_ep.ARPFrame', ({}, {}), '()', False, 'import arp_ep\n'), ((346, 22, 346, 39), 'arp_ep.ARPFrame', 'arp_ep.ARPFrame', ({}, {}), '()', False, 'import arp_ep\n'), ((363, 21, 363, 38), 'arp_ep.ARPFrame', 'arp_ep.ARPFrame', ({}, {}), '()', False, 'import arp_ep\n'), ((404, 26, 404, 43), 'arp_ep.ARPFrame', 'arp_ep.ARPFrame', ({}, {}), '()', False, 'import arp_ep\n')] |
ATRS7391/Discord_Nitro_Generator_And_Checker_Python_Version | NitroGenerator.py | 65c6e6e18e640afb4fc433394a9e646c7fe4f4fa | import random
import sys
import subprocess
def pip_install(module: str):
subprocess.run([sys.executable, "-m", "pip", "-q", "--disable-pip-version-check", "install", module])
try:
import requests
except:
print("'requests' module not found! Trying to install... ")
pip_install("requests")
import requests
def print_header():
header = """
+-------------------------+
| Discord Nitro Generator |
+-------------------------+
Note: For Educational Purposes Only
© ATRS 2021. All Rights Reserved.
"""
print(header)
def get_code(nitro_type: str):
characters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u',
'v', 'w', 'x', 'y', 'z', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'A', 'B', 'C', 'D', 'E',
'F',
'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
if nitro_type == "Boost":
return str("".join([random.choice(characters) for char in range(24)]))
elif nitro_type == "Classic":
return str("".join([random.choice(characters) for char in range(16)]))
def check_code(nitro_code: str):
try:
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
check_url = f"https://discordapp.com/api/v6/entitlements/gift-codes/{nitro_code}?with_application=false&with_subscription_plan=true"
status = requests.get(url=check_url, headers=headers).status_code
if status == 200:
return "True"
elif status == 429:
return "None"
else:
return "False"
except:
print("Something went wrong while checking urls. Press any key to exit. ")
input()
quit()
def get_nitro_type():
print("Enter what type of Discord Nitro you want to generate: \n\t1. Boost\n\t2. Classic")
user_response = input("> ")
if user_response.replace(" ", "").strip().lower() == "boost" or user_response.replace(" ",
"").strip().lower() == "1":
return "Boost"
elif user_response.replace(" ", "").strip().lower() == "classic" or user_response.replace(" ",
"").strip().lower() == "2":
return "Classic"
else:
print("Not a valid input. Press any key to exit. ")
input()
quit()
print_header()
user_nitro_type = get_nitro_type()
print("Enter the number of Nitro Codes you want: ")
amount = int(input("> "))
valid_codes = 0
invalid_codes = 0
unchecked_codes = 0
print()
print()
f = open("All_Nitro_Codes.txt", "w", encoding='utf-8')
for i in range(amount):
user_nitro_code = get_code(nitro_type=user_nitro_type)
validity = check_code(nitro_code=user_nitro_code)
if validity == "True":
display = f"Valid. | https://discord.com/gifts/{user_nitro_code}"
valid_codes += 1
print(display)
f.writelines(display + "\n")
elif validity == "False":
display = f"Invalid. | https://discord.com/gifts/{user_nitro_code}"
invalid_codes += 1
print(display)
f.writelines(display + "\n")
elif validity == "None":
display = f"Unchecked. Rate limited. | https://discord.com/gifts/{user_nitro_code}"
unchecked_codes += 1
print(display)
f.writelines(display + "\n")
print("\n\nSuccessfully generated Nitro Codes. ")
print("Valid Nitro Codes: " + str(valid_codes))
print("Invalid Nitro Codes: " + str(invalid_codes))
print("Unchecked Nitro Codes: " + str(unchecked_codes))
print("\nEnter any key to exit.")
input()
quit()
| [((7, 4, 7, 105), 'subprocess.run', 'subprocess.run', ({(7, 19, 7, 104): "[sys.executable, '-m', 'pip', '-q', '--disable-pip-version-check',\n 'install', module]"}, {}), "([sys.executable, '-m', 'pip', '-q',\n '--disable-pip-version-check', 'install', module])", False, 'import subprocess\n'), ((55, 17, 55, 61), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((40, 28, 40, 53), 'random.choice', 'random.choice', ({(40, 42, 40, 52): 'characters'}, {}), '(characters)', False, 'import random\n'), ((42, 28, 42, 53), 'random.choice', 'random.choice', ({(42, 42, 42, 52): 'characters'}, {}), '(characters)', False, 'import random\n')] |
sgravrock/adventofcode | 2015/main/13/part2.py | 1f5263ee242c8446ac1c08d2aef195a0a4595ccb | import sys
import itertools
def readfile(f):
result = {}
for line in f:
fields = line.rstrip().split(" ")
p1 = fields[0]
p2 = fields[10].replace(".", "")
n = int(fields[3])
if fields[2] == "lose":
n *= -1
result[(p1, p2)] = n
return result
def optimal(config):
add_self(config)
diners = set([k[0] for k in config.keys()])
arrangements = list(itertools.permutations(diners))
all = [(arr, happiness(config, arr)) for arr in arrangements]
return max(all, key=lambda p: p[1])
def happiness(config, arrangement):
return sum([happiness_for_pair(config, p) for p in makepairs(arrangement)])
def happiness_for_pair(config, pair):
opposite = (pair[1], pair[0])
return config[pair] + config[opposite]
def add_self(config):
for d in set([k[0] for k in config.keys()]):
config[(d, "self")] = 0
config[("self", d)] = 0
def makepairs(arr):
n = len(arr)
for i in xrange(1, n):
yield (arr[i-1], arr[i])
yield (arr[n-1], arr[0])
if __name__ == "__main__":
print optimal(readfile(sys.stdin))
| [] |
bcgrendel/python_networking | networking/connection/stun_client.py | b4c847d9eeeea078868b8dcb3d385e02eb0b8e96 | import socket
import sys
import traceback
import struct
import threading;
from threading import Thread;
import time;
import datetime;
import json
#import buffered_message;
import hashlib
from Crypto.PublicKey import RSA
from connection_state import ConnectionState
# publickey = RSA.importKey(key_string)
import tcp;
import udp;
# *************
# EXAMPLE USAGE
# *************
'''
import socket
import tcp
import udp
import stun_client
import time
start_listening = True
local_ip = socket.gethostbyname(socket.gethostname())
local_port = 30779
server_ip = socket.gethostbyname(socket.gethostname())
server_port = 30788
socket_timeout = 3.0
peer_block_manager = None
client = stun_client.STUN_Client(start_listening, local_ip, local_port, server_ip, server_port, socket_timeout, peer_block_manager)
# Set your available listening port ranges
client.available_ports = [[35000, 35100], [36500, 36700],]
# Register a user acccount with the stun server.
class RegisterCallback:
def __init__(self):
self.error_message = ""
self.success = None
def handle_timeout(self, params=None):
self.success = False
self.error_message = "Registration request to server has timed-out."
def complete_registration(self, success, error_message=""):
self.success = success
self.error_message = error_message
username = "test_user"
password = "test_pass123"
profile_map = {}
callback_object = RegisterCallback()
registration_type = "permanent"
client.register(username, password, profile_map, callback_object, registration_type)
response_check_interval = 0.5;
while callback_object.success == None:
time.sleep(response_check_interval)
if not callback_object.success:
print "Error: %s" % callback_object.error_message
exit()
# Login with username and password.
class AuthCallback:
def __init__(self):
self.error_message = ""
self.success = None
def handle_timeout(self, params=None):
self.success = False
self.error_message = "Authentication request to server has timed-out."
def complete_authentication(self, success, error_message=""):
self.success = success
self.error_message = error_message
callback_object = AuthCallback()
login = True # this authentication is to login. It'd be False if we wanted to log out.
client.authenticate(username, password, callback_object, login)
while callback_object.success == None:
time.sleep(response_check_interval)
if not callback_object.success:
print "Error: %s" % callback_object.error_message
exit()
# Now we can access the list of peers connected to the server.
# Alternatively, assign a function reference to client.peer_map_callback (argument will be a reference to client.peer_map) to be notified of peer list updates as they are received.
#
# sample peer_map:
# ["test_user":["test_user", None], "another_user":["another_user", None],]
# Get a peer from the list.
peer_username = None;
for _username, data in client.peer_map.iteritems():
if username != _username:
peer_username = _username
break
# Connect to that peer (hole-punch)
class ConnectionCallback:
def __init__(self):
self.error_message = ""
self.success = None
self.client_key = None
def handle_timeout(self, params=None):
self.success = False
self.error_message = "Connection request to server has timed-out."
def complete_connection(self, peer_username, success, error_message=""):
self.success = success
if success:
self.client_key = error_message
else:
self.error_message = error_message
buffer_size = 128
callback_object = ConnectionCallback()
client.connect_to_peer(peer_username, buffer_size, callback_object)
while callback_object.success == None:
time.sleep(response_check_interval)
if not callback_object.success:
print "Error: %s" % callback_object.error_message
exit()
client_key = callback_object.client_key
udp_client = client.client_map[client_key]
# Now you can communicate with that peer.
udp_client.send_message("Greetings!")
udp_client.pop_all_messages()
'''
class STUN_Client:
def __init__(self,
start_listen_thread=False,
local_ip=socket.gethostbyname(socket.gethostname()),
local_port=30779,
server_ip=socket.gethostbyname(socket.gethostname()),
server_port=30788,
socket_timeout=3.0,
peer_block_manager=None):
self.local_ip = local_ip;
self.local_port = local_port;
self.socket_timeout = socket_timeout;
self.peer_block_manager = peer_block_manager;
self.thread_sleep_duration = 0.1;
self.error_log = [];
self.username = None;
self.password = None;
self.profile_map = {};
self.authenticated = False;
self.auth_callback = None;
self.auth_keys = None;
self.auth_timeout = 15; # 15 seconds is the limit for authentication requests. It's just a magic number like many of these timeout values.
self.last_auth = None;
self.login_expiration = 20; # login will expire after this many seconds passes without successful keep-alive authentication
self.auth_keep_alive_interval = 5;
self.auth_keep_alive_multiplier = 1; # Avoid hammering the server if it's down. Will increment every time re-auth fails, returns to 1 upon successful authentication.
self.re_auth_ready = None;
self.master_log = []; # all messages recieved
self.message_log_map = {}; # log per message type.
# this will handle callbacks for keeping track of whether the user's authentication expires (namely from losing connection to the server.)
self.authentication_monitor_object = None;
self.hole_punch_timeout = 20;
self.hole_punch_max_attempts = 20;
self.server_response_timeout = 20;
# Server response flags. Set to None when sending a request; they are flipped to True upon receiving a response. Used for determining response time-out.
self._auth_status = None;
self._registration_status = None; # Private. Internal use only.
self._holepunch_status = {};
self.available_ports = [[34000, 34100],] # list of ranges, e.g. ports 34000 - 34100
self.used_ports = [];
self.registration_key = None;
self.udp_client_keep_alive_timeout = 30;
# dictionary of active udp connections (hole-punched)
self.client_map = {};
self.callback_map = {};
self.send_queue = [];
self.connection_state = ConnectionState(False);
# Initialize TCP client.
self.init_tcp_client(server_ip, server_port);
self.peer_map = {};
# Start listening to the stun server.
self.init_stun_listener();
self.keep_alive_monitor = KeepAliveMonitor(self);
self.peer_map_callback = None;
def shutdown(self, stun_only=True):
self.authenticated = False;
self.connection_state.active = False; # kills main thread, making the logout auth sequence impossible in its current implementation (get salt/key, then perform request) which needs the main loop.
self.stun_client.disconnect();
if not stun_only:
# disconnect all udp clients...
for key, client in self.client_map.iteritems():
client.disconnect();
self.client_map.clear();
self.peer_map.clear();
del self.used_ports[:]
def restart(self, stun_only=True):
self.shutdown(stun_only);
self.init_tcp_client(self.server_ip, self.server_port);
self.init_stun_listener();
def log_error(self, error_message, extra=None):
err_msg = "[STUN_Server] Line #%s: %s\n\n%s" % (str(traceback.tb_lineno(sys.exc_traceback)), traceback.format_exc(), sys.exc_info());
timestamp = time.time();
date_string = datetime.datetime.fromtimestamp(timestamp).strftime('(%Y-%m-%d) %H:%M:%S')
self.error_log.append((timestamp, date_string, err_msg, extra));
def monitor_response(self, target_object, target_key=None, timeout=20, callback=None, callback_params=None, timeout_callback=None, timeout_callback_params=None):
"""Waits until target is no longer null or timeout occurs. Timeout is in seconds. target_object and target_key should be strings.
If target key is not null, then target_object will be treated as a dictionary (using target_key for the index).
This function is best utilized on its own separate thread."""
# Wait until salt and key have been retrieved or timeout occurs.
time_elapsed = 0;
start_time = time.time();
target_attribute = getattr(self, target_object);
target = None;
connection_state = self.connection_state
#print "Monitoring for %s" % target_object;
# Behold, python lambda expressions in the wild!
if target_key == None:
target = lambda parent: getattr(parent, target_object);
else:
target = lambda parent: getattr(parent, target_object)[target_key];
while time_elapsed < timeout:
time_elapsed = time.time() - start_time;
# check for shutdown.
if not connection_state.active:
return;
# check for target condition
if target(self) != None:
break;
time.sleep(self.thread_sleep_duration);
# Check for timeout.
if target(self) == None:
#print "Timeout on %s" % target_object;
has_timeout_callback = timeout_callback != None;
if has_timeout_callback:
if timeout_callback_params != None:
timeout_callback(timeout_callback_params);
else:
timeout_callback();
return;
#else:
# print "No timeout on %s" % target_object;
# Success, run the callback if one was provided (maybe not if one is only concerned with the timeout event).
if callback != None:
if callback_params != None:
callback(target_object, target_key, callback_params);
else:
callback(target_object, target_key);
def authenticate_thread(self, username, password, callback_object=None, login=True):
# callback_object should have a complete_authentication(success, error_message) method.
self.username = username;
self.password = password;
self.auth_callback = callback_object;
timeout_handler = None;
has_timeout_handler = ((callback_object != None) and (hasattr(callback_object, "handle_timeout")))
if has_timeout_handler:
timeout_handler = callback_object.handle_timeout
# Send salt and dynamic key retrieval request.
self.auth_keys = None;
message = "auth_salt_request %s" % username;
if not self.stun_send_message(message):
#callback_object.complete_authentication(False, "Failed to connect to the server.");
if timeout_handler != None:
timeout_handler("Failed to connect to the server.");
return;
# Wait until salt and key have been retrieved or timeout occurs.
self.monitor_response("auth_keys", None, self.server_response_timeout, self.authenticate_send_credentials, [login, callback_object], timeout_handler, "Server failed to respond.");
def authenticate_send_credentials(self, target_object=None, target_key=None, params=None):
callback_object = None;
if params != None:
callback_object = params[1];
login = params[0]
# hash the password
salt, dynamic_key = self.auth_keys;
if not salt:
if callback_object != None:
callback_object.complete_authentication(False, "Failed to connect to the server.");
return;
salted_password = "%s%s" % (salt, self.password)
hashed_salted_password = hashlib.sha384(salted_password).hexdigest();
#print "hash1: %s\n" % hashed_salted_password;
key_and_hash = "%s%s" % (dynamic_key, hashed_salted_password)
hashed_password = hashlib.sha384(key_and_hash).hexdigest();
#print "hash2: %s" % hashed_password;
self._auth_status = None;
# Send authentication request.
message = "authenticate %s" % json.dumps([self.username, hashed_password, login, json.dumps(self.available_ports), json.dumps(self.used_ports)]);
if not self.stun_send_message(message):
if callback_object != None:
callback_object.complete_authentication(False, "Failed to connect to the server.");
return;
timeout_handler = None;
has_timeout_handler = ((callback_object != None) and (hasattr(callback_object, "handle_timeout")))
if has_timeout_handler:
timeout_handler = callback_object.handle_timeout
self.monitor_response("_auth_status", None, self.server_response_timeout, None, None, timeout_handler);
def registration_completion_handler(self, target_object, target_key, params):
callback_object = params;
registration_handler = None;
has_registration_handler = ((callback_object != None) and (hasattr(callback_object, "complete_registration")))
if has_registration_handler:
callback_object.complete_registration(True, "");
def send_encrypted_registration_request(self, target_object=None, target_key=None, params=None):
username, password, profile_map, callback_object, registration_type = params;
self._registration_status = None;
# Construct the message.
message = "%s" % json.dumps([username, password, profile_map, registration_type]);
# Encrypt the message.
public_key = RSA.importKey(self.registration_key)
message = public_key.encrypt(message, 32);
# Tack on the username in plain text and json_encode again. The STUN Server needs to username to determine which private key to use to decrypt the message.
message = "register %s %s" % (username, message[0]);
if not self.stun_send_message(message):
callback_object.complete_registration(False, "Failed to connect to the server.");
return;
timeout_handler = None;
has_timeout_handler = ((callback_object != None) and (hasattr(callback_object, "handle_timeout")))
if has_timeout_handler:
timeout_handler = callback_object.handle_timeout
# Wait until salt and key have been retrieved or timeout occurs.
self.monitor_response("_registration_status", None, self.server_response_timeout, self.registration_completion_handler, callback_object, timeout_handler);
def register_thread(self, username, password, profile_map, callback_object=None, registration_type="permanent"):
# callback_object should have a complete_registration(success, error_message) method.
self.username = username;
self.password = password;
self.profile_map = profile_map;
self.register_callback = callback_object;
self.registration_key = None;
message = "register_key %s" % username;
if not self.stun_send_message(message):
callback_object.complete_registration(False, "Failed to connect to the server.");
return;
timeout_handler = None;
has_timeout_handler = ((callback_object != None) and (hasattr(callback_object, "handle_timeout")))
if has_timeout_handler:
timeout_handler = callback_object.handle_timeout
params = [username, password, profile_map, callback_object, registration_type];
self.monitor_response("registration_key", None, self.server_response_timeout, self.send_encrypted_registration_request, params, timeout_handler);
def authenticate(self, username, password, callback_object=None, login=True):
"""Non-blocking. Sends a user authentication request."""
# Spawn a separate thread to perform authentication. This is to keep from blocking the caller, since a callback is expected to handle results.
Thread(target=self.authenticate_thread, args=(username, password, callback_object, login)).start();
def maintain_authentication(self, callback_object=None):
#self.authentication_monitor_object
username = self.username
password = self.password
last_auth = self.last_auth
self.re_auth_ready = True;
while self.authenticated:
last_reauth = self.keep_alive_monitor.last_reauth_attempt;
now = time.time();
ready_time = last_reauth + (self.auth_keep_alive_multiplier * self.auth_keep_alive_interval);
time_for_another_reauth_attempt = now >= ready_time;
# By re_auth_ready, I'm saying a re-authentication attempt isn't currently in progress. Yes, it's a poorly named variable.
# I'll need to rename it something better. Maybe later (trademark).
if self.re_auth_ready and time_for_another_reauth_attempt:
self.re_auth_ready = False;
self.authenticate(self.username, self.password, self.keep_alive_monitor);
time.sleep(self.thread_sleep_duration);
def logout(self):
self.authenticated = False;
self.authenticate(self.username, self.password, self.keep_alive_monitor, False);
def register(self, username, password, profile_map, callback_object=None, registration_type="permanent"):
"""Non-blocking. Sends a user registration request.
Only type of registration available for now is 'permanent'. Temporary to come later, maybe (for guests/'unregistered' users).
Note that profile_map should be a json-encoded string (you can store arbitrary data here)."""
# Spawn a separate thread to perform registration. This is to keep from blocking the caller, since a callback is expected to handle results.
Thread(target=self.register_thread, args=(username, password, profile_map, callback_object, registration_type)).start();
def init_tcp_client(self, server_ip, server_port, buffer_size=1024):
self.server_ip = server_ip;
self.server_port = server_port;
self.stun_client = tcp.TCP_Client(server_ip, server_port, buffer_size);
def init_stun_listener(self):
self.connection_state = ConnectionState(True);
Thread(target=self.stun_listen_loop).start();
def stun_send_message(self, message, json_encode=False, prepare=True):
try:
self.stun_client.send_message(message, json_encode, prepare);
return True;
except:
return False;
def stun_listen_loop(self):
connection_state = self.connection_state
message_object = None
while self.connection_state.active:
try:
message_object = self.stun_client.pop_message();
is_valid_message = ((message_object != None) and (len(message_object) > 2));
self.master_log.append(message_object);
if is_valid_message:
message = message_object[2];
message_type, message_body = message.split(" ",1);
if message_type not in self.message_log_map:
self.message_log_map[message_type] = [];
self.message_log_map[message_type].append(message_object);
#print "MESSAGE: %s\n" % message_object;
if(message_type == "peer_map"):
# peer data should be [[peer_username, public_profile_map], ...]
message_data = json.loads(message_body);
self.update_peer_map(message_data);
if self.peer_map_callback != None:
self.peer_map_callback(self.peer_map);
elif(message_type == "hole_punch"):
peer_allowed = True;
# message body should be [listen_ip, listen_port, peer_ip, peer_port, peer_username, buffer_size]
message_data = json.loads(message_body);
listen_ip, listen_port, peer_ip, peer_port, peer_username, buffer_size = message_data
port_in_use = False;
# Ensure port isn't already in use.
if listen_port in self.used_ports:
port_in_use = True;
self.stun_send_message("hole_punch_reject %s" % json.dumps([listen_ip, listen_port, self.username, peer_ip, peer_port, peer_username, buffer_size, port_in_use]));
continue;
message_body = json.dumps([listen_ip, listen_port, self.username, peer_ip, peer_port, peer_username, buffer_size, port_in_use]);
if(self.peer_block_manager != None):
peer_allowed = self.peer_block_manager.is_peer_allowed(message_data);
if(peer_allowed):
self.stun_send_message("hole_punch_ack %s" % message_body);
else:
self.stun_send_message("hole_punch_reject %s" % message_body);
elif(message_type == "hole_punch_request_rejected"):
# Deals with requests that fail due to lack of authentication (this client or the target client) or target client doesn't exist.
# message_body should be [listen_ip, listen_port, self.username, target_ip, target_port, username, buffer_size]
fail_type, target_username, error_message = json.loads(message_body);
if target_username in self.callback_map:
callback_object = self.callback_map[target_username];
callback_object.complete_connection(target_username, False, error_message);
del self.callback_map[target_username];
elif(message_type == "hole_punch_rejected"):
# message_body should be [listen_ip, listen_port, self.username, target_ip, target_port, username, buffer_size]
message_data = json.loads(message_body);
listen_ip, listen_port, self.username, target_ip, target_port, username, buffer_size = message_data
client_key = "%s-%s-%s" % (target_ip, target_port, username);
callback_object = None;
if client_key in self.callback_map:
callback_object = self.callback_map[client_key]
if callback_object != None:
callback_object.complete_connection(client_key, False, "Peer rejected the connection request.");
del self.callback_map[client_key];
elif(message_type == "init_hole_punch"):
try:
listen_ip, listen_port, peer_ip, peer_port, peer_username, buffer_size = json.loads(message_body);
if listen_port not in self.used_ports:
self.used_ports.append(listen_port);
# No else. We're just going to hope there's no way for that if to not run, and that we're just being half-assed at feeling paranoid.
# My mind is feeling like it's been twisted into a few knots at this point, to be honest.
Thread(target=self.connect_to_remote_peer, args=(listen_ip, listen_port, peer_ip, peer_port, buffer_size, peer_username)).start();
client_key = "%s_%s_%s" % (peer_ip, peer_port, peer_username)
if peer_username in self._holepunch_status:
self._holepunch_status[peer_username] = True;
if peer_username in self.callback_map:
self.callback_map[client_key] = self.callback_map[peer_username];
del self.callback_map[peer_username]
except Exception as e:
self.log_error(e);
elif(message_type == "auth_keys"):
# message body should be [salt, dynamic_key]
self.auth_keys = json.loads(message_body);
elif(message_type == "auth_response"):
# message body should be [success, username, profile_map, login, error_message]
success, username, profile_map, login, error_message = json.loads(message_body);
self._auth_status = True;
new_auth = not self.authenticated;
if success:
if login:
self.authenticated = True;
self.auth_keep_alive_multiplier = 1;
self.last_auth = time.time();
self.username = username;
self.profile_map = profile_map;
if new_auth:
Thread(target=self.maintain_authentication).start();
else:
self.authenticated = False;
self.auth_keep_alive_multiplier = 1;
self.last_auth = time.time();
self.username = username;
self.profile_map = profile_map;
if self.auth_callback != None:
self.auth_callback.complete_authentication(success, error_message);
elif(message_type == "registration_key"):
# message body should be "public_key"
self.registration_key = message_body;
elif(message_type == "registration_response"):
# message body should be [success, username, profile_map, error_message]
success, username, profile_map, error_message = json.loads(message_body);
if success:
self.username = username;
self.profile_map = profile_map;
self._registration_status = True;
if self.registration_callback != None:
self.register_callback.complete_registration(success, error_message);
except Exception as exc:
self.log_error(exc, message_object);
time.sleep(self.thread_sleep_duration);
def update_peer_map(self, packet):
username_list = [];
current_username_list = self.peer_map.keys();
for user_block in packet:
peer_username, profile_map = user_block;
valid_username = ((peer_username != None) and (peer_username.replace(" ","").replace("\t","").replace("\n","").replace("\r","") != ""));
if valid_username:
username_list.append(peer_username);
self.peer_map[peer_username] = user_block;
remove_username_list = [];
for username in current_username_list:
if username not in username_list:
remove_username_list.append(username);
for username in remove_username_list:
del self.peer_map[username];
def auto_select_local_endpoint(self):
listen_ip = self.local_ip;
range_count = len(self.available_ports);
for i in range(0, range_count):
x = range_count - (1 + i)
port_range = self.available_ports[x]
port_count = port_range[1] - port_range[0]
for j in range(0, port_count):
port = port_range[1] - j;
if port not in self.used_ports:
return (listen_ip, port);
return None;
def connect_to_peer(self, target_username, buffer_size, callback_object=None, listen_ip = None, listen_port = None):
""" callback_object should have a complete_connection(target, success, error_message) method where success is True or False.
Extract info with:
ip, port, username = target.split("-",2)
Returns False if it fails to send request message (e.g. peer is blocked or connection to server failed.).
"""
local_endpoint_not_specified = ((listen_ip == None) or (listen_port == None))
if local_endpoint_not_specified:
try:
listen_ip, listen_port = self.auto_select_local_endpoint();
except:
callback_object.complete_connection(client_key, False, "All available allowed local ports are already in use. Cannot initiate connection to peer.");
return False;
# Disallow connecting to yourself. What are you trying to pull?
if self.username == target_username:
callback_object.complete_connection(client_key, False, "You cannot connect to yourself.");
return False;
# disallow connecting to blocked peers.
if(self.peer_block_manager != None):
peer_allowed = self.peer_block_manager.is_peer_allowed([target_username, buffer_size]);
if not peer_allowed:
callback_object.complete_connection(client_key, False, "This peer has been blocked.");
return False;
client_key = target_username;
self.callback_map[client_key] = callback_object;
self._holepunch_status[client_key] = None;
# Start hole_punch process.
message = "request_hole_punch %s" % json.dumps([listen_ip, listen_port, self.username, target_username, buffer_size])
if not self.stun_send_message(message):
callback_object.complete_connection(client_key, False, "Failed to connect to the server.");
del self.callback_map[client_key];
return False;
timeout_handler = None;
has_timeout_handler = ((callback_object != None) and (hasattr(callback_object, "handle_timeout")))
if has_timeout_handler:
timeout_handler = callback_object.handle_timeout
# Wait until salt and key have been retrieved or timeout occurs.
Thread(target=self.monitor_response, args=("_holepunch_status", client_key, self.server_response_timeout, None, None, timeout_handler)).start();
return True;
def connect_to_remote_peer(self, local_ip, local_port, target_ip, target_port, buffer_size, username):
"""Warning: Internal use only!"""
print "Connecting to remote peer."
udp_client = udp.UDP_Client(True, local_ip, local_port, target_ip, target_port, buffer_size, True);
client_key = "%s_%s_%s" % (target_ip, target_port, username)
callback_object = None;
if client_key in self.callback_map:
callback_object = self.callback_map[client_key]
if self.hole_punch(udp_client, self.hole_punch_max_attempts, self.hole_punch_timeout):
print "Hole-punch succeeded."
if callback_object != None:
callback_object.complete_connection(username, True, client_key);
self.client_map[client_key] = udp_client; # success, add it to the map.
else:
print "Hole-punch failed."
# remove that port from the used ports list.
port_count = len(self.used_ports);
for i in range(0, port_count):
if self.used_ports[i] == local_port:
del self.used_ports[i]
break;
# run the callback, if there is one.
if callback_object != None:
callback_object.complete_connection(client_key, False, "Failed to connect to peer.");
def hole_punch_send_loop(self, udp_client, maximum_retries=20, delay=0.5):
for i in range(0, maximum_retries):
udp_client.send_message("syn", False, False);
time.sleep(delay);
# Create and return a udp socket that has established connection with the target peer, or None if it fails.
def hole_punch(self, udp_client, maximum_retries=20, timeout=20):
print "Performing hole-punch."
delay = 0.5
result = False;
connection_state = self.connection_state
Thread(target=self.hole_punch_send_loop, args=(udp_client, maximum_retries, delay)).start();
start_time = time.time();
for i in range(0, maximum_retries):
time.sleep(delay)
if not connection_state.active:
# give up and close it out.
udp_client.disconnect();
print "Fail 1";
return False;
packet = "";
try:
packet = udp_client.pop_message();
except:
pass;
if packet != None:
print "hole_punch_response: " + str(packet);
if len(packet) >= 3:
# check the packet.
if(packet[2] == "syn"):
udp_client.send_message("ack", False, False); # send acknowledge
elif(packet[2] == "ack"):
udp_client.send_message("ack2", False, False); # send ack ack and return socket.
result = True;
print "Success 1";
break;
elif(packet[2] == "ack2"):
result = True; # ack ack received, return socket.
print "Success 2";
break;
# check for timeout
time_elapsed = time.time() - start_time;
if(time_elapsed >= timeout):
print "Fail 2";
break;
return result;
class KeepAliveMonitor:
def __init__(self, parent):
self.parent = parent;
self.last_reauth_attempt = time.time();
def complete_authentication(self, success, error_message=""):
self.parent.re_auth_ready = True;
self.last_reauth_attempt = time.time();
if not success:
self.parent.auth_keep_alive_multiplier += 1;
def handle_timeout(self, params=None):
self.last_reauth_attempt = time.time();
self.parent.re_auth_ready = True;
self.parent.auth_keep_alive_multiplier += 1;
| [] |
qanat/wpt | tools/wptserve/tests/functional/test_response.py | 7c61a4594a95682531367b6956d1c37f8b8fd486 | import os
import unittest
import json
import types
from http.client import BadStatusLine
from io import BytesIO
import pytest
wptserve = pytest.importorskip("wptserve")
from .base import TestUsingServer, TestUsingH2Server, doc_root
def send_body_as_header(self):
if self._response.add_required_headers:
self.write_default_headers()
self.write("X-Body: ")
self._headers_complete = True
class TestResponse(TestUsingServer):
def test_head_without_body(self):
@wptserve.handlers.handler
def handler(request, response):
response.writer.end_headers = types.MethodType(send_body_as_header,
response.writer)
return [("X-Test", "TEST")], "body\r\n"
route = ("GET", "/test/test_head_without_body", handler)
self.server.router.register(*route)
resp = self.request(route[1], method="HEAD")
self.assertEqual("6", resp.info()['Content-Length'])
self.assertEqual("TEST", resp.info()['x-Test'])
self.assertEqual("", resp.info()['x-body'])
def test_head_with_body(self):
@wptserve.handlers.handler
def handler(request, response):
response.send_body_for_head_request = True
response.writer.end_headers = types.MethodType(send_body_as_header,
response.writer)
return [("X-Test", "TEST")], "body\r\n"
route = ("GET", "/test/test_head_with_body", handler)
self.server.router.register(*route)
resp = self.request(route[1], method="HEAD")
self.assertEqual("6", resp.info()['Content-Length'])
self.assertEqual("TEST", resp.info()['x-Test'])
self.assertEqual("body", resp.info()['X-Body'])
def test_write_content_no_status_no_header(self):
resp_content = b"TEST"
@wptserve.handlers.handler
def handler(request, response):
response.writer.write_content(resp_content)
route = ("GET", "/test/test_write_content_no_status_no_header", handler)
self.server.router.register(*route)
resp = self.request(route[1])
assert resp.getcode() == 200
assert resp.read() == resp_content
assert resp.info()["Content-Length"] == str(len(resp_content))
assert "Date" in resp.info()
assert "Server" in resp.info()
def test_write_content_no_headers(self):
resp_content = b"TEST"
@wptserve.handlers.handler
def handler(request, response):
response.writer.write_status(201)
response.writer.write_content(resp_content)
route = ("GET", "/test/test_write_content_no_headers", handler)
self.server.router.register(*route)
resp = self.request(route[1])
assert resp.getcode() == 201
assert resp.read() == resp_content
assert resp.info()["Content-Length"] == str(len(resp_content))
assert "Date" in resp.info()
assert "Server" in resp.info()
def test_write_content_no_status(self):
resp_content = b"TEST"
@wptserve.handlers.handler
def handler(request, response):
response.writer.write_header("test-header", "test-value")
response.writer.write_content(resp_content)
route = ("GET", "/test/test_write_content_no_status", handler)
self.server.router.register(*route)
resp = self.request(route[1])
assert resp.getcode() == 200
assert resp.read() == resp_content
assert sorted(x.lower() for x in resp.info().keys()) == sorted(['test-header', 'date', 'server', 'content-length'])
def test_write_content_no_status_no_required_headers(self):
resp_content = b"TEST"
@wptserve.handlers.handler
def handler(request, response):
response.add_required_headers = False
response.writer.write_header("test-header", "test-value")
response.writer.write_content(resp_content)
route = ("GET", "/test/test_write_content_no_status_no_required_headers", handler)
self.server.router.register(*route)
resp = self.request(route[1])
assert resp.getcode() == 200
assert resp.read() == resp_content
assert resp.info().items() == [('test-header', 'test-value')]
def test_write_content_no_status_no_headers_no_required_headers(self):
resp_content = b"TEST"
@wptserve.handlers.handler
def handler(request, response):
response.add_required_headers = False
response.writer.write_content(resp_content)
route = ("GET", "/test/test_write_content_no_status_no_headers_no_required_headers", handler)
self.server.router.register(*route)
resp = self.request(route[1])
assert resp.getcode() == 200
assert resp.read() == resp_content
assert resp.info().items() == []
def test_write_raw_content(self):
resp_content = b"HTTP/1.1 202 Giraffe\n" \
b"X-TEST: PASS\n" \
b"Content-Length: 7\n\n" \
b"Content"
@wptserve.handlers.handler
def handler(request, response):
response.writer.write_raw_content(resp_content)
route = ("GET", "/test/test_write_raw_content", handler)
self.server.router.register(*route)
resp = self.request(route[1])
assert resp.getcode() == 202
assert resp.info()["X-TEST"] == "PASS"
assert resp.read() == b"Content"
def test_write_raw_content_file(self):
@wptserve.handlers.handler
def handler(request, response):
with open(os.path.join(doc_root, "test.asis"), 'rb') as infile:
response.writer.write_raw_content(infile)
route = ("GET", "/test/test_write_raw_content", handler)
self.server.router.register(*route)
resp = self.request(route[1])
assert resp.getcode() == 202
assert resp.info()["X-TEST"] == "PASS"
assert resp.read() == b"Content"
def test_write_raw_none(self):
@wptserve.handlers.handler
def handler(request, response):
with pytest.raises(ValueError):
response.writer.write_raw_content(None)
route = ("GET", "/test/test_write_raw_content", handler)
self.server.router.register(*route)
self.request(route[1])
def test_write_raw_contents_invalid_http(self):
resp_content = b"INVALID HTTP"
@wptserve.handlers.handler
def handler(request, response):
response.writer.write_raw_content(resp_content)
route = ("GET", "/test/test_write_raw_content", handler)
self.server.router.register(*route)
with pytest.raises(BadStatusLine) as e:
self.request(route[1])
assert str(e.value) == resp_content.decode('utf-8')
class TestH2Response(TestUsingH2Server):
def test_write_without_ending_stream(self):
data = b"TEST"
@wptserve.handlers.handler
def handler(request, response):
headers = [
('server', 'test-h2'),
('test', 'PASS'),
]
response.writer.write_headers(headers, 202)
response.writer.write_data_frame(data, False)
# Should detect stream isn't ended and call `writer.end_stream()`
route = ("GET", "/h2test/test", handler)
self.server.router.register(*route)
resp = self.client.get(route[1])
assert resp.status_code == 202
assert [x for x in resp.headers.items()] == [('server', 'test-h2'), ('test', 'PASS')]
assert resp.content == data
def test_set_error(self):
@wptserve.handlers.handler
def handler(request, response):
response.set_error(503, message="Test error")
route = ("GET", "/h2test/test_set_error", handler)
self.server.router.register(*route)
resp = self.client.get(route[1])
assert resp.status_code == 503
assert json.loads(resp.content) == json.loads("{\"error\": {\"message\": \"Test error\", \"code\": 503}}")
def test_file_like_response(self):
@wptserve.handlers.handler
def handler(request, response):
content = BytesIO(b"Hello, world!")
response.content = content
route = ("GET", "/h2test/test_file_like_response", handler)
self.server.router.register(*route)
resp = self.client.get(route[1])
assert resp.status_code == 200
assert resp.content == b"Hello, world!"
def test_list_response(self):
@wptserve.handlers.handler
def handler(request, response):
response.content = ['hello', 'world']
route = ("GET", "/h2test/test_file_like_response", handler)
self.server.router.register(*route)
resp = self.client.get(route[1])
assert resp.status_code == 200
assert resp.content == b"helloworld"
def test_content_longer_than_frame_size(self):
@wptserve.handlers.handler
def handler(request, response):
size = response.writer.get_max_payload_size()
content = "a" * (size + 5)
return [('payload_size', size)], content
route = ("GET", "/h2test/test_content_longer_than_frame_size", handler)
self.server.router.register(*route)
resp = self.client.get(route[1])
assert resp.status_code == 200
payload_size = int(resp.headers['payload_size'])
assert payload_size
assert resp.content == b"a" * (payload_size + 5)
def test_encode(self):
@wptserve.handlers.handler
def handler(request, response):
response.encoding = "utf8"
t = response.writer.encode("hello")
assert t == b"hello"
with pytest.raises(ValueError):
response.writer.encode(None)
route = ("GET", "/h2test/test_content_longer_than_frame_size", handler)
self.server.router.register(*route)
resp = self.client.get(route[1])
assert resp.status_code == 200
def test_raw_header_frame(self):
@wptserve.handlers.handler
def handler(request, response):
response.writer.write_raw_header_frame([
(':status', '204'),
('server', 'TEST-H2')
], end_headers=True)
route = ("GET", "/h2test/test_file_like_response", handler)
self.server.router.register(*route)
resp = self.client.get(route[1])
assert resp.status_code == 204
assert resp.headers['server'] == 'TEST-H2'
assert resp.content == b''
def test_raw_data_frame(self):
@wptserve.handlers.handler
def handler(request, response):
response.write_status_headers()
response.writer.write_raw_data_frame(data=b'Hello world', end_stream=True)
route = ("GET", "/h2test/test_file_like_response", handler)
self.server.router.register(*route)
resp = self.client.get(route[1])
assert resp.content == b'Hello world'
def test_raw_header_continuation_frame(self):
@wptserve.handlers.handler
def handler(request, response):
response.writer.write_raw_header_frame([
(':status', '204')
])
response.writer.write_raw_continuation_frame([
('server', 'TEST-H2')
], end_headers=True)
route = ("GET", "/h2test/test_file_like_response", handler)
self.server.router.register(*route)
resp = self.client.get(route[1])
assert resp.status_code == 204
assert resp.headers['server'] == 'TEST-H2'
assert resp.content == b''
if __name__ == '__main__':
unittest.main()
| [((11, 11, 11, 42), 'pytest.importorskip', 'pytest.importorskip', ({(11, 31, 11, 41): '"""wptserve"""'}, {}), "('wptserve')", False, 'import pytest\n'), ((323, 4, 323, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((25, 42, 26, 75), 'types.MethodType', 'types.MethodType', ({(25, 59, 25, 78): 'send_body_as_header', (26, 59, 26, 74): 'response.writer'}, {}), '(send_body_as_header, response.writer)', False, 'import types\n'), ((40, 42, 41, 75), 'types.MethodType', 'types.MethodType', ({(40, 59, 40, 78): 'send_body_as_header', (41, 59, 41, 74): 'response.writer'}, {}), '(send_body_as_header, response.writer)', False, 'import types\n'), ((180, 13, 180, 41), 'pytest.raises', 'pytest.raises', ({(180, 27, 180, 40): 'BadStatusLine'}, {}), '(BadStatusLine)', False, 'import pytest\n'), ((217, 15, 217, 39), 'json.loads', 'json.loads', ({(217, 26, 217, 38): 'resp.content'}, {}), '(resp.content)', False, 'import json\n'), ((217, 43, 217, 114), 'json.loads', 'json.loads', ({(217, 54, 217, 113): '"""{"error": {"message": "Test error", "code": 503}}"""'}, {}), '(\'{"error": {"message": "Test error", "code": 503}}\')', False, 'import json\n'), ((222, 22, 222, 47), 'io.BytesIO', 'BytesIO', ({(222, 30, 222, 46): "b'Hello, world!'"}, {}), "(b'Hello, world!')", False, 'from io import BytesIO\n'), ((163, 17, 163, 42), 'pytest.raises', 'pytest.raises', ({(163, 31, 163, 41): 'ValueError'}, {}), '(ValueError)', False, 'import pytest\n'), ((267, 17, 267, 42), 'pytest.raises', 'pytest.raises', ({(267, 31, 267, 41): 'ValueError'}, {}), '(ValueError)', False, 'import pytest\n'), ((150, 22, 150, 57), 'os.path.join', 'os.path.join', ({(150, 35, 150, 43): 'doc_root', (150, 45, 150, 56): '"""test.asis"""'}, {}), "(doc_root, 'test.asis')", False, 'import os\n')] |
ks91/bbc1-pub | bbc1/core/command.py | 6b9c33c6c8aec7d410ba9b704eeeb8c3772012d0 | # -*- coding: utf-8 -*-
"""
Copyright (c) 2017 beyond-blockchain.org.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from argparse import ArgumentParser
import sys
sys.path.extend(["../../"])
from bbc1.core.bbc_config import DEFAULT_CORE_PORT, DEFAULT_P2P_PORT
DEFAULT_SERV_ADDR = '127.0.0.1'
def parser():
usage = 'python {} [--coreport <number>] [--p2pport <number>] [--workingdir <dir>] ' \
'[--config <filename>] [--default_config <filename>] [--nodekey] [--no_nodekey] [--domain0] ' \
'[--ledgersubsystem] [--ip4addr <IP addr>] [--ip6addr <IPv6 addr>] ' \
'[--log <filename>] [--verbose_level <string>] [--daemon] [--kill] [--help]'.format(__file__)
argparser = ArgumentParser(usage=usage)
argparser.add_argument('-cp', '--coreport', type=int, default=DEFAULT_CORE_PORT, help='waiting TCP port')
argparser.add_argument('-pp', '--p2pport', type=int, default=DEFAULT_P2P_PORT, help='waiting TCP port')
argparser.add_argument('-w', '--workingdir', type=str, default=".bbc1", help='working directory name')
argparser.add_argument('-c', '--config', type=str, default=None, help='config file name')
argparser.add_argument('--default_config', type=str, default=None, help='default config file')
argparser.add_argument('--nodekey', action='store_true', help='use node_key for admin command')
argparser.add_argument('--no_nodekey', action='store_true', help='don\'t use node_key for admin command')
argparser.add_argument('--domain0', action='store_true', help='connect to domain_global_0')
argparser.add_argument('--ledgersubsystem', action='store_true', help='use ledger_subsystem')
argparser.add_argument('--ip4addr', type=str, default=None, help='IPv4 address exposed to the external network')
argparser.add_argument('--ip6addr', type=str, default=None, help='IPv6 address exposed to the external network')
argparser.add_argument('-l', '--log', type=str, default="-", help='log filename/"-" means STDOUT')
argparser.add_argument('-d', '--daemon', action='store_true', help='run in background')
argparser.add_argument('-k', '--kill', action='store_true', help='kill the daemon')
argparser.add_argument('-v', '--verbose_level', type=str, default="debug",
help='log level all/debug/info/warning/error/critical/none')
args = argparser.parse_args()
return args
| [((19, 0, 19, 27), 'sys.path.extend', 'sys.path.extend', ({(19, 16, 19, 26): "['../../']"}, {}), "(['../../'])", False, 'import sys\n'), ((31, 16, 31, 43), 'argparse.ArgumentParser', 'ArgumentParser', (), '', False, 'from argparse import ArgumentParser\n')] |
cmcquinn/cmake-uvision-syncer | main.py | 26f34b79b3102a326ced2b0bca2524a98b69abf4 | """
Usage:
main.py [<project>]
Options:
<project> Path to the .uvprojx file (Keil® µVision5 Project File).
The .uvoptx file (Keil® µVision5 Project Options file) will
be located automatically as it shall be adjacent to the
.uvprojx file, having the same filename.
If this is a directory, .uvprojx is found automatically (if
multiple found then the latest changed is chosen).
If not provided then the current working directory is chosen
as a project directory.
"""
import enum
import operator
import os
import warnings
from collections import defaultdict
from dataclasses import dataclass
from os import DirEntry
from pathlib import Path
from typing import List, Optional, Union, Iterable, Collection, Set, Tuple, Callable, Dict, Iterator
from docopt import docopt
from lxml import etree
__author__ = "Bojan Potočnik"
UnknownInt = int
UnknownBool = bool
@enum.unique
class Language(enum.Enum):
ASM = "Assembler"
C = "C"
CPP = "C++"
@enum.unique
class FileType(enum.Enum):
C_SOURCE = 1
"""C Source file"""
ASM_SOURCE = 2
"""Assembly language file"""
OBJECT = 3
"""Object file"""
LIBRARY = 4
"""Library file"""
TEXT_DOCUMENT = 5
"""Text Document file"""
CUSTOM = 7
"""Custom file"""
CPP_SOURCE = 8
"""C++ Source file"""
IMAGE = 9
"""Image file"""
# region XML data structures for Project File
@dataclass
class Target:
@dataclass
class Toolset:
number: int
name: str
@dataclass
class Compiler:
cc: str
ac6: bool
@dataclass
class Options:
@dataclass
class Common:
device: str
vendor: str
pack_id: str
pack_url: str
cpu: str
device_id: int
register_file: str
@dataclass
class Properties:
use_cpp_compiler: bool
common: Common
properties: Properties
@dataclass
class Build:
@dataclass
class Misc:
@dataclass
class Memory:
@enum.unique
class Type(enum.Enum):
"""TODO: Real meaning unknown."""
TYPE0 = 0
TYPE1 = 1
name: str
type: Type
start: int
size: int
cpu_type: str
memories: List[Memory]
@dataclass
class C:
optimization: int
strict: bool
c99: bool
gnu: bool
misc: List[str]
defines: List[str]
undefines: List[str]
include_paths: List[str]
@dataclass
class Asm:
misc: List[str]
defines: List[str]
undefines: List[str]
include_paths: List[str]
@dataclass
class Linker:
text_address_range: int
data_address_range: int
misc: List[str]
misc: Misc
c: C
asm: Asm
ld: Linker
@dataclass
class File:
name: str
type: FileType
path: str
include_in_build: bool
"""Whether this file is included in the build or ignored."""
always_build: bool
"""Whether to always build this file."""
@dataclass
class Group:
name: str
files: List['Target.File']
name: str
toolset: Toolset
compiler: Compiler
options: Options
build: Build
groups: List[Group]
@dataclass
class RTE:
@dataclass
class TargetInfo:
@enum.unique
class VersionMatchMode(enum.Enum):
FIXED = "fixed"
name: str
version_match_mode: Optional[VersionMatchMode]
@dataclass
class Package:
name: str
url: str
vendor: str
version: str
target_infos: List['RTE.TargetInfo']
@dataclass
class Component:
class_: str
group: str
vendor: str
version: str
condition: str
package: 'RTE.Package'
target_infos: List['RTE.TargetInfo']
@dataclass
class File:
@enum.unique
class Attribute(enum.Enum):
CONFIG = "config"
@enum.unique
class Category(enum.Enum):
SOURCE = "source"
attr: Attribute
category: Category
condition: Optional[str]
name: str
version: str
instance: str
component: 'RTE.Component'
package: 'RTE.Package'
target_infos: List['RTE.TargetInfo']
packages: List[Package]
components: List[Component]
files: List[File]
# endregion XML data structures for Project File
# region XML data structures for Project Options file
@dataclass
class File:
group_number: int
"""Number of the :cls:`Group` this file belongs to."""
number: int
"""Number of the file (global across all groups)."""
type: FileType
"""File type as selected in the Options for File ... -> Properties dialog"""
expanded: bool
"""Whether the file is expanded (include file dependencies shown) in the Project Window file browser."""
include_in_build: bool
"""Whether this file is included in the build or ignored."""
always_build: bool
"""Whether to always build this file."""
tv_exp_opt_dlg: UnknownBool
dave2: UnknownBool
path: str
filename: str
rte_flag: bool
"""Whether this file is part of/managed by the Keil MDK Run-Time Environment (RTE) and therefore read-only."""
shared: UnknownBool
_project_file: Target.File = None
"""Reference to the instance of this file from the Project File."""
@dataclass
class Group:
name: str
"""Group name as shown in the Project Window file browser."""
expanded: bool
"""Whether the group is expanded (files shown) in the Project Window file browser."""
tv_exp_opt_dlg: UnknownBool
cb_sel: UnknownBool
rte_flag: bool
"""Whether this group is part of/managed by the Keil MDK Run-Time Environment (RTE) and therefore read-only."""
files: List[File]
"""List of files in this group."""
_project_group: Target.Group = None
"""Reference to the instance of this group from the Project File."""
# endregion XML data structures for Project Options file
# region XML parsing helper functions
def text(element: etree.ElementBase, name: str, is_attribute: bool = False, nullable: bool = False) -> Optional[str]:
if is_attribute:
if nullable:
return element.attrib.get(name)
else:
return element.attrib[name]
value = element.xpath(name)
if (not value) and nullable:
return None
if len(value) != 1:
raise ValueError(f"Only one '{name}' tag per tree is supported, {len(value)} found")
return value[0].text
def strict_bool(element: etree.ElementBase, name: str, nullable: bool = False, *,
false_value: str = "0", true_value: str = "1") -> Optional[bool]:
value = text(element, name, nullable=nullable)
if value == false_value:
return False
if value == true_value:
return True
if (value is None) and nullable:
return None
raise ValueError(f"'{value}' (of {name}) is not valid boolean value")
def strict_hex(element: etree.ElementBase, name: str) -> int:
value = text(element, name)
if not value.startswith("0x"):
raise ValueError(f"'{value}' (of {name}) is not valid hexadecimal value")
return int(value, 16)
# endregion XML parsing helper functions
@dataclass
class UVisionProject:
project_file_path: str
project_options_path: str
# region Project File
targets: List[Target]
# endregion Project File
# region Project Options
groups: List[Group]
"""Groups of files, as shown in the Project Window file browser."""
# endregion Project Options
@classmethod
def new(cls, project_file_path: str) -> 'UVisionProject':
fp_base = os.path.splitext(project_file_path)[0]
project_file_path = fp_base + ".uvprojx"
project_options_path = fp_base + ".uvoptx"
with open(project_file_path) as f:
# noinspection PyProtectedMember
xproj: etree._Element = etree.parse(f).getroot()
with open(project_options_path) as f:
# noinspection PyProtectedMember
xopt: etree._Element = etree.parse(f).getroot()
# region Project File
if xproj.tag != "Project":
raise ValueError("Invalid uVision Project File XML file")
# noinspection PyCallByClass,SpellCheckingInspection
targets = [
Target(
name=text(target, "TargetName"),
toolset=Target.Toolset(
number=strict_hex(target, "ToolsetNumber"),
name=text(target, "ToolsetName")
),
compiler=Target.Compiler(
cc=text(target, "pCCUsed", nullable=True),
ac6=strict_bool(target, "uAC6")
),
options=next(
# There is always only one package, but using generator is clean and
# effective way of creating an inline local variable.
Target.Options(
common=next(
Target.Options.Common(
device=text(tco, "Device"),
vendor=text(tco, "Vendor"),
pack_id=text(tco, "PackID"),
pack_url=text(tco, "PackURL"),
cpu=text(tco, "Cpu"),
device_id=text(tco, "DeviceId"),
register_file=text(tco, "RegisterFile")
) for tco in to.xpath("TargetCommonOption")
),
properties=next(
Target.Options.Properties(
use_cpp_compiler=strict_bool(tcp, "UseCPPCompiler"),
) for tcp in to.xpath("CommonProperty")
)
) for to in target.xpath("TargetOption")
),
build=next(
Target.Build(
misc=Target.Build.Misc(
cpu_type=text(to_taa, "ArmAdsMisc/AdsCpuType"),
memories=[
Target.Build.Misc.Memory(
name=memory.tag,
type=Target.Build.Misc.Memory.Type(int(text(memory, "Type"))),
start=strict_hex(memory, "StartAddress"),
size=strict_hex(memory, "Size")
) for memory in to_taa.xpath("ArmAdsMisc/OnChipMemories/*")
]
),
c=next(
Target.Build.C(
optimization=int(text(to_taa_c, "Optim")),
strict=strict_bool(to_taa_c, "Strict"),
c99=strict_bool(to_taa_c, "uC99"),
gnu=strict_bool(to_taa_c, "uGnu"),
misc=[
mc.strip() for mc in text(to_taa_c, "VariousControls/MiscControls").split(",")
],
defines=[
mc.strip() for mc in text(to_taa_c, "VariousControls/Define").split(" ")
],
undefines=[
mc.strip() for mc in (text(to_taa_c, "VariousControls/Undefine") or "").split(" ")
],
include_paths=[
mc.strip() for mc in text(to_taa_c, "VariousControls/IncludePath").split(";")
]
) for to_taa_c in to_taa.xpath("Cads")
),
asm=next(
Target.Build.Asm(
misc=[
mc.strip() for mc in (text(to_taa_a, "VariousControls/MiscControls") or "").split(",")
],
defines=[
mc.strip() for mc in (text(to_taa_a, "VariousControls/Define") or "").split(" ")
],
undefines=[
mc.strip() for mc in (text(to_taa_a, "VariousControls/Undefine") or "").split(" ")
],
include_paths=[
mc.strip() for mc in (text(to_taa_a, "VariousControls/IncludePath") or "").split(";")
]
) for to_taa_a in to_taa.xpath("Aads")
),
ld=next(
Target.Build.Linker(
text_address_range=strict_hex(to_taa_ld, "TextAddressRange"),
data_address_range=strict_hex(to_taa_ld, "DataAddressRange"),
misc=[
mc.strip() for mc in
text(to_taa_ld, "Misc").split(",") # TODO: Delimiter unknown
]
) for to_taa_ld in to_taa.xpath("LDads")
)
) for to_taa in target.xpath("TargetOption/TargetArmAds")
),
groups=[
Target.Group(
name=text(group, "GroupName"),
files=[
Target.File(
name=text(file, "FileName"),
type=FileType(int(text(file, "FileType"))),
path=text(file, "FilePath"),
include_in_build=strict_bool(file, "FileOption/CommonProperty/IncludeInBuild",
nullable=True),
always_build=strict_bool(file, "FileOption/CommonProperty/AlwaysBuild",
nullable=True, true_value="2")
) for file in group.xpath("Files/File")
]
) for group in target.xpath("Groups/Group")
]
) for target in xproj.xpath("Targets/Target")
]
# region RTE
# noinspection PyCallByClass,PyTypeChecker
rte = RTE(
packages=[
RTE.Package(
name=text(package, "name", True),
url=text(package, "url", True),
vendor=text(package, "vendor", True),
version=text(package, "version", True),
target_infos=[
RTE.TargetInfo(
name=text(ti, "name", True),
# Using generator and list only for local variable
version_match_mode=next(RTE.TargetInfo.VersionMatchMode(vmm) if vmm else None
for vmm in [text(ti, "versionMatchMode", True, True)])
) for ti in package.xpath("targetInfos/targetInfo")
]
) for package in xproj.xpath("RTE/packages/package")
],
components=[
RTE.Component(
class_=text(component, "Cclass", True),
group=text(component, "Cgroup", True),
vendor=text(component, "Cvendor", True),
version=text(component, "Cversion", True),
condition=text(component, "condition", True),
package=next(
# There is always only one package, but using generator is clean and
# effective way of creating an inline local variable.
# This new instance of package will be replaced below with reference to an actual matching
# instance of the package from rte.packages.
RTE.Package(
name=text(package, "name", True),
url=text(package, "url", True),
vendor=text(package, "vendor", True),
version=text(package, "version", True),
target_infos=None
) for package in component.xpath("package")
),
target_infos=[
RTE.TargetInfo(
name=text(ti, "name", True),
# TODO: Handle nullable
# RTE.TargetInfo.VersionMatchMode(text(ti, "versionMatchMode", True, True))
version_match_mode=None
) for ti in component.xpath("targetInfos/targetInfo")
]
) for component in xproj.xpath("RTE/components/component")
],
files=[
RTE.File(
attr=RTE.File.Attribute(text(file, "attr", True)),
category=RTE.File.Category(text(file, "category", True)),
condition=text(file, "condition", True, True),
name=text(file, "name", True),
version=text(file, "version", True),
instance=text(file, "instance"),
component=next(
RTE.Component(
class_=text(component, "Cclass", True),
group=text(component, "Cgroup", True),
vendor=text(component, "Cvendor", True),
version=text(component, "Cversion", True),
condition=text(component, "condition", True),
package=None,
target_infos=None
) for component in file.xpath("component")
),
package=None, # TODO
target_infos=None, # TODO
) for file in xproj.xpath("RTE/files/file")
]
)
# TODO: Connect actual references of the rte.packages and rte.packages.target_infos
for component in rte.components:
cp = component.package
component.package = None
cp.target_infos = None
for package in rte.packages:
# Temporally remove target_infos to enable usage of equality operator.
pti = package.target_infos
package.target_infos = None
if cp == package:
component.package = package
package.target_infos = pti
break
package.target_infos = pti
# endregion RTE
# endregion Project File
# region Project Options
if xopt.tag != "ProjectOpt":
raise ValueError("Invalid uVision Project Options XML file")
groups: List[Group] = []
for group in xopt.xpath("Group"):
group_name = text(group, "GroupName")
# Find this group in the Project File
xproj_group = next(g for g in next(iter(targets)).groups if (g.name == group_name))
# Find all files in this group and also in the Project File
files: List[File] = []
for file in group.xpath("File"):
file_type = FileType(int(text(file, "FileType")))
file_name = text(file, "FilenameWithoutPath")
xproj_file = next(f for f in xproj_group.files if (f.type == file_type and f.name == file_name))
files.append(File(
group_number=int(text(file, "GroupNumber")),
number=int(text(file, "FileNumber")),
type=file_type,
expanded=strict_bool(file, "tvExp"),
include_in_build=xproj_file.include_in_build,
always_build=xproj_file.always_build,
tv_exp_opt_dlg=strict_bool(file, "tvExpOptDlg"),
dave2=strict_bool(file, "bDave2"),
path=text(file, "PathWithFileName"),
filename=file_name,
rte_flag=strict_bool(file, "RteFlg"),
shared=strict_bool(file, "bShared")
))
groups.append(Group(
name=group_name,
expanded=strict_bool(group, "tvExp"),
tv_exp_opt_dlg=strict_bool(group, "tvExpOptDlg"),
cb_sel=strict_bool(group, "cbSel"),
rte_flag=strict_bool(group, "RteFlg"),
files=files
))
# There is no more *currently relevant* data in the Project Options file.
# endregion Project Options
# Add RTE files to the file groups to actually match the Project Window file browser.
for file in rte.files:
# Find the group to which this file belongs to (there shall be one and only one).
group = None
group_number = 1
for group_number, group in enumerate(groups, 1):
if group.files and group.files[0].group_number != group_number:
warnings.warn(f"Inconsistent group number {group.files[0].group_number} for group {group.name}"
f" (expected to be {group_number})")
if group.rte_flag and group.name.strip(":") == file.component.class_:
break
filename = os.path.basename(file.instance)
# Detect file type (this information is not provided for RTE files)
if filename.endswith(".s"):
file_type = FileType.ASM_SOURCE
elif filename.endswith(".c"):
file_type = FileType.C_SOURCE
elif filename.endswith(".cpp"):
file_type = FileType.CPP_SOURCE
elif filename.endswith(".h"):
file_type = FileType.TEXT_DOCUMENT
else:
warnings.warn(f"Unknown RTE file type '{file.instance}': {file}")
continue
group.files.append(File(
group_number=group_number,
number=max(f.number for g in groups for f in g.files) + 1,
type=file_type,
expanded=False,
include_in_build=True, # TODO: This information is available for RTE files
always_build=None,
tv_exp_opt_dlg=False, # TODO
dave2=False, # TODO
path=file.instance,
filename=os.path.basename(file.instance),
rte_flag=True,
shared=False
))
return cls(
project_file_path=project_file_path,
project_options_path=project_options_path,
targets=targets,
groups=groups
)
def source_files(self) -> Iterator[Tuple[File, Optional[Language], Optional[str]]]:
"""
Get all files grouped by the file type with group names as a comments.
"""
# Add source files
for group in self.groups:
comment = group.name
if group.rte_flag:
# RTE groups start with double colon (::).
comment = "RTE" + comment
# Group files by type and add one comment for every file type as they are in the separate sections.
files: Dict[Union[Language, None], List[File]] = defaultdict(list)
for file in group.files:
if file.type == FileType.ASM_SOURCE:
lang = Language.ASM
elif file.type == FileType.C_SOURCE:
lang = Language.C
elif file.type == FileType.TEXT_DOCUMENT:
lang = None
else:
warnings.warn(f"Unsupported file type: {file.type} for {file}")
continue
files[lang].append(file)
for lang, files in files.items():
comment_per_type = comment
for file in files:
yield file, lang, comment_per_type
comment_per_type = None
class CMake:
@dataclass
class String:
value: str
"""The actual string value."""
languages: Set[Language]
"""Set of all build configs in which this value is present."""
common: bool = False
comment: Optional[str] = None
"""Comment which will be added to the line before"""
def __eq__(self, o: 'CMake.String') -> bool:
if isinstance(o, type(self)):
return self.value == o.value
elif isinstance(o, str):
return self.value == o
return NotImplemented
def __init__(self) -> None:
self.include_paths: List[CMake.String] = []
self.defines: List[CMake.String] = []
self.undefines: List[CMake.String] = []
self.source_file_paths: List[CMake.String] = []
self.other_file_paths: List[CMake.String] = []
@classmethod
def _get(cls, lst: List[String], obj: str) -> String:
"""Get existing object from the list or append a new one to the end."""
try:
# noinspection PyTypeChecker
itm = lst[lst.index(obj)]
except ValueError:
# noinspection PyCallByClass
itm = cls.String(obj, set())
lst.append(itm)
return itm
@classmethod
def _add_values(cls, where: List[String], values: Union[str, Iterable[str]],
languages: Union[Language, Collection[Language], None], comment: Optional[str] = None) -> None:
if isinstance(languages, Language):
languages = [languages]
for val in values:
obj = cls._get(where, val)
if comment is not None:
# Add comment to the first value only
obj.comment = comment
comment = None
if languages:
obj.languages.update(languages)
@staticmethod
def _clean_paths(paths: Union[str, Iterable[str]]) -> List[str]:
if isinstance(paths, (str, Path)):
paths = [paths]
return [Path(p).as_posix() for p in map(os.path.normpath, paths)]
def add_include_paths(self, paths: Union[str, Iterable[str]], languages: Union[Language, Collection[Language]],
comment: str = None) -> None:
self._add_values(self.include_paths, self._clean_paths(paths), languages, comment)
def add_defines(self, defines: Union[str, Iterable[str]], languages: Union[Language, Collection[Language]],
comment: str = None) -> None:
self._add_values(self.defines, defines, languages, comment)
def add_undefines(self, undefines: Union[str, Iterable[str]], languages: Union[Language, Collection[Language]],
comment: str = None) -> None:
self._add_values(self.undefines, undefines, languages, comment)
def add_source_files(self, paths: Union[None, str, Iterable[str]],
languages: Union[Language, Collection[Language], None],
comment: str = None, include_in_build: bool = True) -> None:
paths = self._clean_paths(paths)
# If file is not included in the build, comment it
if include_in_build is False:
paths = ["# " + path for path in paths]
self._add_values(self.source_file_paths if languages else self.other_file_paths, paths, languages, comment)
def add_other_files(self, paths: Union[str, Iterable[str]], comment: str = None) -> None:
self.add_source_files(paths, None, comment)
def check_common(self) -> Set[Language]:
"""
Check which properties are common to all language configurations.
:return: Set of all used languages (languages with at least one property)
"""
all_props = (self.include_paths, self.defines, self.undefines, self.source_file_paths)
# Get all of the defined languages used
languages = {lang
for props in all_props
for prop in props
for lang in prop.languages}
for props in all_props:
for prop in props:
prop.common = (prop.languages == languages)
return languages
def __str__(self) -> str:
languages = sorted(self.check_common(), key=operator.attrgetter('value'))
ret_str = [
"# Made with CMake <> uVision project file synchronizer"
"# https://github.com/bojanpotocnik/cmake-uvision-syncer"
]
# Set of the build properties
prop_sets: List[Tuple[str, str, List[CMake.String], str]] = [
("definitions", "DEFINES", self.defines, "-D"),
("un-defines", "UNDEFINES", self.undefines, ""),
("include directories", "INCLUDE_DIRS", self.include_paths, ""),
("source files", "SOURCES", self.source_file_paths, ""),
]
# Set of the language configs per build property
sub_prop_sets: List[Tuple[str, str, Callable[[CMake.String], bool]]] = [
("Common", "COMMON", lambda prop: prop.common),
*((lang.value + " specific", lang.name,
lambda prop, lang_=lang: (not prop.common) and (lang_ in prop.languages))
for lang in languages)
]
def _add_section_files(comment: str, var_name: str, value_iterator: Iterable[CMake.String],
value_prefix: str = "") -> str:
s = (f"# {comment}\n"
f"set({var_name}")
value_str = ''
for value in value_iterator:
if value.comment is not None:
value_str += f"\n\t# {value.comment}"
value_str += f"\n\t{value_prefix}{value.value}"
if len(value_str) is not 0:
return s + value_str + "\n)"
else:
return None
for section_comment, section_var_prefix, section_props, val_prefix in prop_sets:
ss_str = []
for prop_set_comment, var_suffix, filter_fun in sub_prop_sets:
section_files = _add_section_files(
comment=f"{prop_set_comment} {section_comment}",
var_name=f"{section_var_prefix}_{var_suffix}",
value_iterator=filter(filter_fun, section_props),
value_prefix=val_prefix
)
if section_files is not None:
ss_str.append(section_files)
ret_str.append("\n\n".join(ss_str))
other_files = _add_section_files(
comment="Other files",
var_name="OTHER_FILES",
value_iterator=self.other_file_paths
)
if other_files is not None:
ret_str.append(other_files)
return "\n\n\n".join(ret_str)
def main() -> None:
# region Parse arguments
arguments = docopt(__doc__)
project_path: str = arguments["<project>"] or "."
if not os.path.isfile(project_path):
with os.scandir(project_path) as dirs: # type: Iterator[DirEntry]
projects = [de.path for de in dirs if (de.is_file() and (os.path.splitext(de.name)[1] == ".uvprojx"))]
if not projects:
raise FileNotFoundError(f"Could not find any .uvprojx file in '{project_path}'")
elif len(projects) > 1:
# Choose the latest file by modification time.
project_path = max(projects, key=os.path.getmtime)
else:
project_path = projects[0]
project_path = os.path.realpath(project_path)
# endregion Parse arguments
print(f"Using µVision5 Project File '{project_path}'")
# Parse uVision project XML files
uvp = UVisionProject.new(project_path)
# Generate CMake file and populate it with information from uVision project
cmake = CMake()
# Add Assembler properties
cmake.add_include_paths(uvp.targets[0].build.asm.include_paths, Language.ASM)
cmake.add_defines(uvp.targets[0].build.asm.defines, Language.ASM)
cmake.add_undefines(uvp.targets[0].build.asm.undefines, Language.ASM)
# Add C properties
cmake.add_include_paths(uvp.targets[0].build.c.include_paths, Language.C)
cmake.add_defines(uvp.targets[0].build.c.defines, Language.C)
cmake.add_undefines(uvp.targets[0].build.c.undefines, Language.C)
# Add source and other files
for file, lang, comment in uvp.source_files():
cmake.add_source_files(file.path, lang, comment, file.include_in_build)
fp_proj_cmake = os.path.join(os.path.dirname(uvp.project_file_path),
os.path.splitext(os.path.basename(uvp.project_file_path))[0] + ".cmake")
with open(fp_proj_cmake, 'w') as f:
print(cmake, file=f)
print(f"Generated CMake file '{fp_proj_cmake}'")
if __name__ == "__main__":
main()
| [((840, 16, 840, 31), 'docopt.docopt', 'docopt', ({(840, 23, 840, 30): '__doc__'}, {}), '(__doc__)', False, 'from docopt import docopt\n'), ((854, 19, 854, 49), 'os.path.realpath', 'os.path.realpath', ({(854, 36, 854, 48): 'project_path'}, {}), '(project_path)', False, 'import os\n'), ((843, 11, 843, 39), 'os.path.isfile', 'os.path.isfile', ({(843, 26, 843, 38): 'project_path'}, {}), '(project_path)', False, 'import os\n'), ((879, 33, 879, 71), 'os.path.dirname', 'os.path.dirname', ({(879, 49, 879, 70): 'uvp.project_file_path'}, {}), '(uvp.project_file_path)', False, 'import os\n'), ((328, 18, 328, 53), 'os.path.splitext', 'os.path.splitext', ({(328, 35, 328, 52): 'project_file_path'}, {}), '(project_file_path)', False, 'import os\n'), ((606, 23, 606, 54), 'os.path.basename', 'os.path.basename', ({(606, 40, 606, 53): 'file.instance'}, {}), '(file.instance)', False, 'import os\n'), ((653, 61, 653, 78), 'collections.defaultdict', 'defaultdict', ({(653, 73, 653, 77): 'list'}, {}), '(list)', False, 'from collections import defaultdict\n'), ((844, 13, 844, 37), 'os.scandir', 'os.scandir', ({(844, 24, 844, 36): 'project_path'}, {}), '(project_path)', False, 'import os\n'), ((777, 52, 777, 80), 'operator.attrgetter', 'operator.attrgetter', ({(777, 72, 777, 79): '"""value"""'}, {}), "('value')", False, 'import operator\n'), ((334, 36, 334, 50), 'lxml.etree.parse', 'etree.parse', ({(334, 48, 334, 49): 'f'}, {}), '(f)', False, 'from lxml import etree\n'), ((338, 35, 338, 49), 'lxml.etree.parse', 'etree.parse', ({(338, 47, 338, 48): 'f'}, {}), '(f)', False, 'from lxml import etree\n'), ((602, 20, 603, 70), 'warnings.warn', 'warnings.warn', ({(602, 34, 603, 69): 'f"""Inconsistent group number {group.files[0].group_number} for group {group.name} (expected to be {group_number})"""'}, {}), "(\n f'Inconsistent group number {group.files[0].group_number} for group {group.name} (expected to be {group_number})'\n )", False, 'import warnings\n'), ((730, 16, 730, 23), 'pathlib.Path', 'Path', ({(730, 21, 730, 22): 'p'}, {}), '(p)', False, 'from pathlib import Path\n'), ((880, 50, 880, 89), 'os.path.basename', 'os.path.basename', ({(880, 67, 880, 88): 'uvp.project_file_path'}, {}), '(uvp.project_file_path)', False, 'import os\n'), ((629, 25, 629, 56), 'os.path.basename', 'os.path.basename', ({(629, 42, 629, 55): 'file.instance'}, {}), '(file.instance)', False, 'import os\n'), ((617, 16, 617, 81), 'warnings.warn', 'warnings.warn', ({(617, 30, 617, 80): 'f"""Unknown RTE file type \'{file.instance}\': {file}"""'}, {}), '(f"Unknown RTE file type \'{file.instance}\': {file}")', False, 'import warnings\n'), ((663, 20, 663, 83), 'warnings.warn', 'warnings.warn', ({(663, 34, 663, 82): 'f"""Unsupported file type: {file.type} for {file}"""'}, {}), "(f'Unsupported file type: {file.type} for {file}')", False, 'import warnings\n'), ((845, 69, 845, 94), 'os.path.splitext', 'os.path.splitext', ({(845, 86, 845, 93): 'de.name'}, {}), '(de.name)', False, 'import os\n')] |
lesserwhirls/scipy-cwt | scipy/weave/base_spec.py | ee673656d879d9356892621e23ed0ced3d358621 | class base_converter(object):
"""
Properties:
headers -- list of strings that name the header files needed by this
object.
include_dirs -- list of directories where the header files can be found.
libraries -- list of libraries needed to link to when compiling
extension.
library_dirs -- list of directories to search for libraries.
support_code -- list of strings. Each string is a subroutine needed
by the type. Functions that are used in the conversion
between Python and C++ files are examples of these.
Methods:
type_match(value) returns 1 if this class is used to represent type
specification for value.
type_spec(name, value) returns a new object (of this class) that is
used to produce C++ code for value.
declaration_code() returns C++ code fragment for type declaration and
conversion of python object to C++ object.
cleanup_code() returns C++ code fragment for cleaning up after the
variable after main C++ code fragment has executed.
"""
_build_information = []
compiler = ''
def set_compiler(self,compiler):
self.compiler = compiler
def type_match(self,value):
raise NotImplementedError("You must override method in derived class")
def build_information(self):
return self._build_information
def type_spec(self,name,value):
pass
def declaration_code(self,templatize = 0):
return ""
def local_dict_code(self):
return ""
def cleanup_code(self):
return ""
def retrieve_py_variable(self,inline=0):
# this needs a little coordination in name choices with the
# ext_inline_function class.
if inline:
vn = 'get_variable("%s",raw_locals,raw_globals)' % self.name
else:
vn = 'py_' + self.name
return vn
def py_reference(self):
return "&py_" + self.name
def py_pointer(self):
return "*py_" + self.name
def py_variable(self):
return "py_" + self.name
def reference(self):
return "&" + self.name
def pointer(self):
return "*" + self.name
def init_flag(self):
return self.name + "_used"
def variable(self):
return self.name
def variable_as_string(self):
return '"' + self.name + '"'
import UserList
import base_info
class arg_spec_list(UserList.UserList):
def build_information(self):
all_info = base_info.info_list()
for i in self:
all_info.extend(i.build_information())
return all_info
def py_references(self):
return map(lambda x: x.py_reference(),self)
def py_pointers(self):
return map(lambda x: x.py_pointer(),self)
def py_variables(self):
return map(lambda x: x.py_variable(),self)
def references(self):
return map(lambda x: x.py_reference(),self)
def pointers(self):
return map(lambda x: x.pointer(),self)
def variables(self):
return map(lambda x: x.variable(),self)
def init_flags(self):
return map(lambda x: x.init_flag(),self)
def variable_as_strings(self):
return map(lambda x: x.variable_as_string(),self)
| [((76, 19, 76, 40), 'base_info.info_list', 'base_info.info_list', ({}, {}), '()', False, 'import base_info\n')] |
latentai/model-zoo-models | xception/test.py | 70a96e955b3b1245f8417613cd9debdae91b1d28 | #!/usr/bin/env python3
from utils.model_config_helpers import run_model_test
run_model_test()
| [((5, 0, 5, 16), 'utils.model_config_helpers.run_model_test', 'run_model_test', ({}, {}), '()', False, 'from utils.model_config_helpers import run_model_test\n')] |
alexsosn/MultiPlanarUNet | mpunet/bin/cv_split.py | 2d1cecdee391be8e9f72da95e33077ed82a2183a | from glob import glob
import sys
import os
import numpy as np
import random
from mpunet.utils import create_folders
import argparse
def get_parser():
parser = argparse.ArgumentParser(description="Prepare a data folder for a"
"CV experiment setup.")
parser.add_argument("--data_dir", type=str,
help="Path to data directory")
parser.add_argument("--CV", type=int, default=5,
help="Number of splits (default=5)")
parser.add_argument("--out_dir", type=str, default="views",
help="Directory to store CV subfolders "
"(default=views")
parser.add_argument("--im_sub_dir", type=str, default="images",
help="Subfolder under 'data_dir' in which image are "
"stored (default=images)")
parser.add_argument("--lab_sub_dir", type=str, default="labels",
help="Subfolder under 'data_dir' in which labels are "
"stored (default=labels)")
parser.add_argument("--copy", action="store_true",
help="Copy files to CV-subfolders instead of "
"symlinking (not recommended)")
parser.add_argument("--file_list", action="store_true",
help="Create text files with paths pointing to the "
"images at the image and labels subdirs under "
"each split instead of symlink/copying. This is"
" usefull on systems were symlink is not "
"supported, but the dataset size is too large to"
" store in copies. NOTE: Only one of --copy and "
"--file_list flags must be set.")
parser.add_argument("--file_regex", type=str, default="*.nii*",
help="Regex used to select files from the image "
"and labels subdirs. (default='*.nii*')")
parser.add_argument("--validation_fraction", type=float, default=0.20,
help="Fraction of OVERALL data size used for "
"validation in each split. In a 5-CV setting with "
"N=100 and val_frac=0.20, each split will have "
"N_train=60, N_val=20 and N_test=20 images")
parser.add_argument("--test_fraction", type=float, default=0.20,
help="Fraction of data size used for test if CV=1.")
parser.add_argument("--common_prefix_length", type=int, required=False, default=0)
return parser
def assert_dir_structure(data_dir, im_dir, lab_dir, out_dir):
for _dir in (data_dir, im_dir, lab_dir):
if not os.path.exists(_dir):
raise OSError("Invalid data directory '%s'. Does not exist." % data_dir)
if os.path.exists(out_dir):
raise OSError("Output directory at '%s' already exists." % out_dir)
def create_view_folders(out_dir, n_splits):
if not os.path.exists(out_dir):
print("Creating directory at %s" % out_dir)
os.makedirs(out_dir)
if n_splits > 1:
for i in range(n_splits):
split_dir = os.path.join(out_dir, "split_%i" % i)
print("Creating directory at %s" % split_dir)
os.mkdir(split_dir)
def pair_by_names(images, common_prefix_length):
if common_prefix_length == 0:
return images
from collections import defaultdict
names = [os.path.split(i)[-1][:common_prefix_length] for i in images]
inds = defaultdict(list)
for i, item in enumerate(names):
inds[item].append(i)
pairs = inds.values()
return [tuple(np.array(images)[i]) for i in pairs]
def add_images(images, im_folder_path, label_folder_path, im_dir, lab_dir,
link_func=os.symlink):
for image in images:
if not isinstance(image, (list, tuple, np.ndarray)):
image = (image,)
for im in image:
# Get file name
file_name = os.path.split(im)[-1]
# Get label path (OBS: filenames must match!)
lab = im.replace(im_dir, lab_dir)
if not os.path.exists(lab):
raise OSError("No label file found at '%s'. OBS: image and "
"label files must have exactly the same name. "
"Images should be located at '%s' and labels at"
" '%s'" % (lab, im_folder_path, label_folder_path))
# Get relative paths
rel_image = os.path.relpath(im, im_folder_path)
rel_label = os.path.relpath(lab, label_folder_path)
# Symlink or copy
link_func(rel_image, im_folder_path + "/%s" % file_name)
link_func(rel_label, label_folder_path + "/%s" % file_name)
def _add_to_file_list_fallback(rel_image_path, image_path,
fname="LIST_OF_FILES.txt"):
"""
On some system synlinks are not supported, if --files_list flag is set,
uses this function to add each absolute file path to a list at the final
subfolder that is supposed to store images and label links or actual files
At run-time, these files must be loaded by reading in the path from these
files instead.
"""
# Get folder where list of files should be stored
folder = os.path.split(image_path)[0]
# Get absolute path to image
# We change dir to get the correct abs path from the relative
os.chdir(folder)
abs_file_path = os.path.abspath(rel_image_path)
# Get path to the list of files
list_file_path = os.path.join(folder, fname)
with open(list_file_path, "a") as out_f:
out_f.write(abs_file_path + "\n")
def entry_func(args=None):
# Get parser
parser = vars(get_parser().parse_args(args))
# Get arguments
data_dir = os.path.abspath(parser["data_dir"])
n_splits = int(parser["CV"])
if n_splits > 1:
out_dir = os.path.join(data_dir, parser["out_dir"], "%i_CV" % n_splits)
else:
out_dir = os.path.join(data_dir, parser["out_dir"], "fixed_split")
im_dir = os.path.join(data_dir, parser["im_sub_dir"])
lab_dir = os.path.join(data_dir, parser["lab_sub_dir"])
copy = parser["copy"]
file_list = parser["file_list"]
regex = parser["file_regex"]
val_frac = parser["validation_fraction"]
test_frac = parser["test_fraction"]
common_prefix_length = parser["common_prefix_length"]
if n_splits == 1 and not test_frac:
raise ValueError("Must specify --test_fraction with --CV=1.")
if copy and file_list:
raise ValueError("Only one of --copy and --file_list "
"flags must be set.")
# Assert suitable folders
assert_dir_structure(data_dir, im_dir, lab_dir, out_dir)
# Create sub-folders
create_view_folders(out_dir, n_splits)
# Get images and pair by subject identifier if common_prefix_length > 0
images = glob(os.path.join(im_dir, regex))
images = pair_by_names(images, common_prefix_length)
print("-----")
print("Found {} images".format(len(images)))
# Get validation size
N_total = len(images)
if n_splits > 1:
N_test = N_total // n_splits
else:
N_test = int(np.ceil(N_total * test_frac))
N_val = int(np.ceil(N_total * val_frac))
if N_val + N_test >= N_total:
raise ValueError("Too large validation_fraction - "
"No training samples left!")
N_train = N_total - N_test - N_val
print("Total images:".ljust(40), N_total)
print("Train images pr. split:".ljust(40), N_train)
print("Validation images pr. split:".ljust(40), N_val)
print("Test images pr. split:".ljust(40), N_test)
# Shuffle and split the images into CV parts
random.shuffle(images)
splits = np.array_split(images, n_splits)
# Symlink / copy files
for i, split in enumerate(splits):
print(" Split %i/%i" % (i+1, n_splits), end="\r", flush=True)
# Set root path to split folder
if n_splits > 1:
split_path = os.path.join(out_dir, "split_%i" % i)
else:
split_path = out_dir
# Here we kind of hacky force the following code to work with CV=1
# Define a test set and overwrite the current split (which stores
# add the data, as splits was never split with n_splits=1
split = splits[0][:N_test]
# Overwrite the splits variable to a length 2 array with the
# remaining data which will be used as val+train. The loop still
# refers to the old split and thus will only execute once
splits = [split, splits[0][N_test:]]
# Define train, val and test sub-dirs
train_path = os.path.join(split_path, "train")
train_im_path = os.path.join(train_path, parser["im_sub_dir"])
train_label_path = os.path.join(train_path, parser["lab_sub_dir"])
if N_val:
val_path = os.path.join(split_path, "val")
val_im_path = os.path.join(val_path, parser["im_sub_dir"])
val_label_path = os.path.join(val_path, parser["lab_sub_dir"])
else:
val_path, val_im_path, val_label_path = (None,) * 3
test_path = os.path.join(split_path, "test")
test_im_path = os.path.join(test_path, parser["im_sub_dir"])
test_label_path = os.path.join(test_path, parser["lab_sub_dir"])
# Create folders if not existing
create_folders([train_path, val_path, train_im_path, train_label_path,
val_im_path, val_label_path, test_path, test_im_path,
test_label_path])
# Copy or symlink?
if copy:
from shutil import copyfile
move_func = copyfile
elif file_list:
move_func = _add_to_file_list_fallback
else:
move_func = os.symlink
# Add test data to test folder
add_images(split, test_im_path, test_label_path, im_dir, lab_dir, move_func)
# Join remaining splits into train+val
remaining = [x for ind, x in enumerate(splits) if ind != i]
remaining = [item for sublist in remaining for item in sublist]
# Extract validation data from the remaining
random.shuffle(remaining)
validation = remaining[:N_val]
training = remaining[N_val:]
# Add
if validation:
add_images(validation, val_im_path, val_label_path, im_dir, lab_dir, move_func)
add_images(training, train_im_path, train_label_path, im_dir, lab_dir, move_func)
if __name__ == "__main__":
entry_func()
| [((11, 13, 12, 72), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((55, 7, 55, 30), 'os.path.exists', 'os.path.exists', ({(55, 22, 55, 29): 'out_dir'}, {}), '(out_dir)', False, 'import os\n'), ((76, 11, 76, 28), 'collections.defaultdict', 'defaultdict', ({(76, 23, 76, 27): 'list'}, {}), '(list)', False, 'from collections import defaultdict\n'), ((126, 4, 126, 20), 'os.chdir', 'os.chdir', ({(126, 13, 126, 19): 'folder'}, {}), '(folder)', False, 'import os\n'), ((127, 20, 127, 51), 'os.path.abspath', 'os.path.abspath', ({(127, 36, 127, 50): 'rel_image_path'}, {}), '(rel_image_path)', False, 'import os\n'), ((130, 21, 130, 48), 'os.path.join', 'os.path.join', ({(130, 34, 130, 40): 'folder', (130, 42, 130, 47): 'fname'}, {}), '(folder, fname)', False, 'import os\n'), ((142, 15, 142, 50), 'os.path.abspath', 'os.path.abspath', ({(142, 31, 142, 49): "parser['data_dir']"}, {}), "(parser['data_dir'])", False, 'import os\n'), ((148, 13, 148, 57), 'os.path.join', 'os.path.join', ({(148, 26, 148, 34): 'data_dir', (148, 36, 148, 56): "parser['im_sub_dir']"}, {}), "(data_dir, parser['im_sub_dir'])", False, 'import os\n'), ((149, 14, 149, 59), 'os.path.join', 'os.path.join', ({(149, 27, 149, 35): 'data_dir', (149, 37, 149, 58): "parser['lab_sub_dir']"}, {}), "(data_dir, parser['lab_sub_dir'])", False, 'import os\n'), ((193, 4, 193, 26), 'random.shuffle', 'random.shuffle', ({(193, 19, 193, 25): 'images'}, {}), '(images)', False, 'import random\n'), ((194, 13, 194, 45), 'numpy.array_split', 'np.array_split', ({(194, 28, 194, 34): 'images', (194, 36, 194, 44): 'n_splits'}, {}), '(images, n_splits)', True, 'import numpy as np\n'), ((60, 11, 60, 34), 'os.path.exists', 'os.path.exists', ({(60, 26, 60, 33): 'out_dir'}, {}), '(out_dir)', False, 'import os\n'), ((62, 8, 62, 28), 'os.makedirs', 'os.makedirs', ({(62, 20, 62, 27): 'out_dir'}, {}), '(out_dir)', False, 'import os\n'), ((122, 13, 122, 38), 'os.path.split', 'os.path.split', ({(122, 27, 122, 37): 'image_path'}, {}), '(image_path)', False, 'import os\n'), ((145, 18, 145, 79), 'os.path.join', 'os.path.join', ({(145, 31, 145, 39): 'data_dir', (145, 41, 145, 58): "parser['out_dir']", (145, 60, 145, 78): "'%i_CV' % n_splits"}, {}), "(data_dir, parser['out_dir'], '%i_CV' % n_splits)", False, 'import os\n'), ((147, 18, 147, 74), 'os.path.join', 'os.path.join', ({(147, 31, 147, 39): 'data_dir', (147, 41, 147, 58): "parser['out_dir']", (147, 60, 147, 73): '"""fixed_split"""'}, {}), "(data_dir, parser['out_dir'], 'fixed_split')", False, 'import os\n'), ((171, 18, 171, 45), 'os.path.join', 'os.path.join', ({(171, 31, 171, 37): 'im_dir', (171, 39, 171, 44): 'regex'}, {}), '(im_dir, regex)', False, 'import os\n'), ((182, 16, 182, 43), 'numpy.ceil', 'np.ceil', ({(182, 24, 182, 42): 'N_total * val_frac'}, {}), '(N_total * val_frac)', True, 'import numpy as np\n'), ((216, 21, 216, 54), 'os.path.join', 'os.path.join', ({(216, 34, 216, 44): 'split_path', (216, 46, 216, 53): '"""train"""'}, {}), "(split_path, 'train')", False, 'import os\n'), ((217, 24, 217, 70), 'os.path.join', 'os.path.join', ({(217, 37, 217, 47): 'train_path', (217, 49, 217, 69): "parser['im_sub_dir']"}, {}), "(train_path, parser['im_sub_dir'])", False, 'import os\n'), ((218, 27, 218, 74), 'os.path.join', 'os.path.join', ({(218, 40, 218, 50): 'train_path', (218, 52, 218, 73): "parser['lab_sub_dir']"}, {}), "(train_path, parser['lab_sub_dir'])", False, 'import os\n'), ((225, 20, 225, 52), 'os.path.join', 'os.path.join', ({(225, 33, 225, 43): 'split_path', (225, 45, 225, 51): '"""test"""'}, {}), "(split_path, 'test')", False, 'import os\n'), ((226, 23, 226, 68), 'os.path.join', 'os.path.join', ({(226, 36, 226, 45): 'test_path', (226, 47, 226, 67): "parser['im_sub_dir']"}, {}), "(test_path, parser['im_sub_dir'])", False, 'import os\n'), ((227, 26, 227, 72), 'os.path.join', 'os.path.join', ({(227, 39, 227, 48): 'test_path', (227, 50, 227, 71): "parser['lab_sub_dir']"}, {}), "(test_path, parser['lab_sub_dir'])", False, 'import os\n'), ((230, 8, 232, 41), 'mpunet.utils.create_folders', 'create_folders', ({(230, 23, 232, 40): '[train_path, val_path, train_im_path, train_label_path, val_im_path,\n val_label_path, test_path, test_im_path, test_label_path]'}, {}), '([train_path, val_path, train_im_path, train_label_path,\n val_im_path, val_label_path, test_path, test_im_path, test_label_path])', False, 'from mpunet.utils import create_folders\n'), ((251, 8, 251, 33), 'random.shuffle', 'random.shuffle', ({(251, 23, 251, 32): 'remaining'}, {}), '(remaining)', False, 'import random\n'), ((53, 15, 53, 35), 'os.path.exists', 'os.path.exists', ({(53, 30, 53, 34): '_dir'}, {}), '(_dir)', False, 'import os\n'), ((66, 24, 66, 61), 'os.path.join', 'os.path.join', ({(66, 37, 66, 44): 'out_dir', (66, 46, 66, 60): "'split_%i' % i"}, {}), "(out_dir, 'split_%i' % i)", False, 'import os\n'), ((68, 12, 68, 31), 'os.mkdir', 'os.mkdir', ({(68, 21, 68, 30): 'split_dir'}, {}), '(split_dir)', False, 'import os\n'), ((102, 24, 102, 59), 'os.path.relpath', 'os.path.relpath', ({(102, 40, 102, 42): 'im', (102, 44, 102, 58): 'im_folder_path'}, {}), '(im, im_folder_path)', False, 'import os\n'), ((103, 24, 103, 63), 'os.path.relpath', 'os.path.relpath', ({(103, 40, 103, 43): 'lab', (103, 45, 103, 62): 'label_folder_path'}, {}), '(lab, label_folder_path)', False, 'import os\n'), ((181, 21, 181, 49), 'numpy.ceil', 'np.ceil', ({(181, 29, 181, 48): 'N_total * test_frac'}, {}), '(N_total * test_frac)', True, 'import numpy as np\n'), ((202, 25, 202, 62), 'os.path.join', 'os.path.join', ({(202, 38, 202, 45): 'out_dir', (202, 47, 202, 61): "'split_%i' % i"}, {}), "(out_dir, 'split_%i' % i)", False, 'import os\n'), ((220, 23, 220, 54), 'os.path.join', 'os.path.join', ({(220, 36, 220, 46): 'split_path', (220, 48, 220, 53): '"""val"""'}, {}), "(split_path, 'val')", False, 'import os\n'), ((221, 26, 221, 70), 'os.path.join', 'os.path.join', ({(221, 39, 221, 47): 'val_path', (221, 49, 221, 69): "parser['im_sub_dir']"}, {}), "(val_path, parser['im_sub_dir'])", False, 'import os\n'), ((222, 29, 222, 74), 'os.path.join', 'os.path.join', ({(222, 42, 222, 50): 'val_path', (222, 52, 222, 73): "parser['lab_sub_dir']"}, {}), "(val_path, parser['lab_sub_dir'])", False, 'import os\n'), ((75, 13, 75, 29), 'os.path.split', 'os.path.split', ({(75, 27, 75, 28): 'i'}, {}), '(i)', False, 'import os\n'), ((80, 18, 80, 34), 'numpy.array', 'np.array', ({(80, 27, 80, 33): 'images'}, {}), '(images)', True, 'import numpy as np\n'), ((90, 24, 90, 41), 'os.path.split', 'os.path.split', ({(90, 38, 90, 40): 'im'}, {}), '(im)', False, 'import os\n'), ((95, 19, 95, 38), 'os.path.exists', 'os.path.exists', ({(95, 34, 95, 37): 'lab'}, {}), '(lab)', False, 'import os\n')] |
gczsjdy/daos | src/client/pydaos/raw/conversion.py | abbd900010562f3acea9c6b1dc2ca98a8d3c71fa | #!/usr/bin/python
"""
(C) Copyright 2018 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
"""
import ctypes
import uuid
def c_uuid_to_str(uuid):
""" utility function to convert a C uuid into a standard string format """
uuid_str = '{:02X}{:02X}{:02X}{:02X}-{:02X}{:02X}-{:02X}{:02X}-{:02X}'\
'{:02X}-{:02X}{:02X}{:02X}{:02X}{:02X}{:02X}'.format(
uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5],
uuid[6], uuid[7], uuid[8], uuid[9], uuid[10], uuid[11],
uuid[12], uuid[13], uuid[14], uuid[15])
return uuid_str
def c_uuid(p_uuid, c_uuid):
""" utility function to create a UUID in C format from a python UUID """
hexstr = p_uuid.hex
for i in range(0, 31, 2):
c_uuid[int(i/2)] = int(hexstr[i:i+2], 16)
def str_to_c_uuid(uuidstr):
""" utility function to convert string format uuid to a C uuid """
uuidstr2 = '{' + uuidstr + '}'
puuid = uuid.UUID(uuidstr2)
cuuid = (ctypes.c_ubyte * 16)()
c_uuid(puuid, cuuid)
return cuuid
| [((45, 12, 45, 31), 'uuid.UUID', 'uuid.UUID', ({(45, 22, 45, 30): 'uuidstr2'}, {}), '(uuidstr2)', False, 'import uuid\n')] |
lvapeab/GroundHog_INMT | experiments/nmt/utils/vocabulary_coverage.py | d5ad1d466eaf5040e99b9aaaa1b28c96402436ce | import cPickle
import argparse
parser = argparse.ArgumentParser(
"Computes the coverage of a shortlist in a corpus file")
parser.add_argument("--vocab",
required=True, help="Vocabulary to use (.pkl)")
parser.add_argument("--text",
required=True, help="Beam size, turns on beam-search")
args = parser.parse_args()
with open(args.vocab, 'rb') as f:
d = cPickle.load(f)
with open(args.text, 'rb') as f:
text = f.read().splitlines()
n_words = 0
n_unks = 0
split_vocab = 0
split_vocabulary = {}
for line in text:
for word in line.split():
if split_vocabulary.get(word) is None:
split_vocabulary[word] = split_vocab
split_vocab += 1
if d.get(word) is None:
n_unks += 1
n_words += 1
print "Coverage: %f (%d unknown words out of %d of a total of %d)"%((float)(split_vocab - n_unks)/split_vocab, n_unks, split_vocab, n_words)
| [] |
diassor/CollectorCity-Market-Place | stores/apps/inventory/migrations/0001_initial.py | 892ad220b8cf1c0fc7433f625213fe61729522b2 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ProductType'
db.create_table('inventory_producttype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('inventory', ['ProductType'])
# Adding model 'Product'
db.create_table('inventory_product', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('shop', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['shops.Shop'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('description', self.gf('django.db.models.fields.TextField')()),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['market.MarketCategory'])),
('subcategory', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['market.MarketSubCategory'])),
('date_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('weight', self.gf('django.db.models.fields.DecimalField')(default='0', max_digits=11, decimal_places=2)),
('type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['inventory.ProductType'], null=True, blank=True)),
))
db.send_create_signal('inventory', ['Product'])
# Adding model 'Coin'
db.create_table('inventory_coin', (
('producttype_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['inventory.ProductType'], unique=True, primary_key=True)),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['market.MarketCategory'], null=True, blank=True)),
('subcategory', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['market.MarketSubCategory'], null=True, blank=True)),
('country_code', self.gf('django.db.models.fields.CharField')(default='us', max_length=2)),
('pcgs_number', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(default='', blank='')),
('year_issued', self.gf('django.db.models.fields.CharField')(default='', max_length=24, blank='')),
('actual_year', self.gf('django.db.models.fields.CharField')(default='', max_length=24, blank='')),
('denomination', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('major_variety', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('die_variety', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('prefix', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('suffix', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('sort_order', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('heading', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('holder_variety', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('holder_variety_2', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('additional_data', self.gf('django.db.models.fields.TextField')(default='', blank='')),
('last_update', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('inventory', ['Coin'])
def backwards(self, orm):
# Deleting model 'ProductType'
db.delete_table('inventory_producttype')
# Deleting model 'Product'
db.delete_table('inventory_product')
# Deleting model 'Coin'
db.delete_table('inventory_coin')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'inventory.coin': {
'Meta': {'object_name': 'Coin', '_ormbases': ['inventory.ProductType']},
'actual_year': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '24', 'blank': "''"}),
'additional_data': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': "''"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketCategory']", 'null': 'True', 'blank': 'True'}),
'country_code': ('django.db.models.fields.CharField', [], {'default': "'us'", 'max_length': '2'}),
'denomination': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': "''"}),
'die_variety': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'heading': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'holder_variety': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'holder_variety_2': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'major_variety': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'pcgs_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'prefix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'producttype_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['inventory.ProductType']", 'unique': 'True', 'primary_key': 'True'}),
'sort_order': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'subcategory': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketSubCategory']", 'null': 'True', 'blank': 'True'}),
'suffix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'year_issued': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '24', 'blank': "''"})
},
'inventory.product': {
'Meta': {'object_name': 'Product'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketCategory']"}),
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']"}),
'subcategory': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketSubCategory']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.ProductType']", 'null': 'True', 'blank': 'True'}),
'weight': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '11', 'decimal_places': '2'})
},
'inventory.producttype': {
'Meta': {'object_name': 'ProductType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'market.marketcategory': {
'Meta': {'object_name': 'MarketCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'db_index': 'True'})
},
'market.marketplace': {
'Meta': {'object_name': 'MarketPlace'},
'base_domain': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '92'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'template_prefix': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '92'})
},
'market.marketsubcategory': {
'Meta': {'unique_together': "(('parent', 'slug'),)", 'object_name': 'MarketSubCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'subcategories'", 'null': 'True', 'to': "orm['market.MarketCategory']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '60', 'db_index': 'True'})
},
'shops.shop': {
'Meta': {'object_name': 'Shop'},
'admin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'bids': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'39.29038,-76.61219'", 'max_length': '255'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['inventory']
| [((15, 8, 15, 59), 'south.db.db.send_create_signal', 'db.send_create_signal', ({(15, 30, 15, 41): '"""inventory"""', (15, 43, 15, 58): "['ProductType']"}, {}), "('inventory', ['ProductType'])", False, 'from south.db import db\n'), ((29, 8, 29, 55), 'south.db.db.send_create_signal', 'db.send_create_signal', ({(29, 30, 29, 41): '"""inventory"""', (29, 43, 29, 54): "['Product']"}, {}), "('inventory', ['Product'])", False, 'from south.db import db\n'), ((53, 8, 53, 52), 'south.db.db.send_create_signal', 'db.send_create_signal', ({(53, 30, 53, 41): '"""inventory"""', (53, 43, 53, 51): "['Coin']"}, {}), "('inventory', ['Coin'])", False, 'from south.db import db\n'), ((59, 8, 59, 48), 'south.db.db.delete_table', 'db.delete_table', ({(59, 24, 59, 47): '"""inventory_producttype"""'}, {}), "('inventory_producttype')", False, 'from south.db import db\n'), ((62, 8, 62, 44), 'south.db.db.delete_table', 'db.delete_table', ({(62, 24, 62, 43): '"""inventory_product"""'}, {}), "('inventory_product')", False, 'from south.db import db\n'), ((65, 8, 65, 41), 'south.db.db.delete_table', 'db.delete_table', ({(65, 24, 65, 40): '"""inventory_coin"""'}, {}), "('inventory_coin')", False, 'from south.db import db\n')] |
vi4m/ralph | src/ralph/deployment/migrations/0005_auto__add_field_archiveddeployment_service__add_field_archiveddeployme.py | 2af767ee23d89be9e6cec0a537350a1ce8840bd1 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ArchivedDeployment.service'
db.add_column('deployment_archiveddeployment', 'service',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['cmdb.CI'], null=True, on_delete=models.SET_NULL),
keep_default=False)
# Adding field 'ArchivedDeployment.device_environment'
db.add_column('deployment_archiveddeployment', 'device_environment',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['cmdb.CI'], null=True, on_delete=models.SET_NULL),
keep_default=False)
# Adding field 'Deployment.service'
db.add_column('deployment_deployment', 'service',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['cmdb.CI'], null=True, on_delete=models.SET_NULL),
keep_default=False)
# Adding field 'Deployment.device_environment'
db.add_column('deployment_deployment', 'device_environment',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['cmdb.CI'], null=True, on_delete=models.SET_NULL),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ArchivedDeployment.service'
db.delete_column('deployment_archiveddeployment', 'service_id')
# Deleting field 'ArchivedDeployment.device_environment'
db.delete_column('deployment_archiveddeployment', 'device_environment_id')
# Deleting field 'Deployment.service'
db.delete_column('deployment_deployment', 'service_id')
# Deleting field 'Deployment.device_environment'
db.delete_column('deployment_deployment', 'device_environment_id')
models = {
'account.profile': {
'Meta': {'object_name': 'Profile'},
'activation_token': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '40', 'blank': 'True'}),
'birth_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'company': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'cost_center': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'country': ('django.db.models.fields.PositiveIntegerField', [], {'default': '153'}),
'department': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'employee_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'gender': ('django.db.models.fields.PositiveIntegerField', [], {'default': '2'}),
'home_page': (u'dj.choices.fields.ChoiceField', [], {'unique': 'False', 'primary_key': 'False', 'db_column': 'None', 'blank': 'False', u'default': '1', 'null': 'False', '_in_south': 'True', 'db_index': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'manager': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'nick': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '30', 'blank': 'True'}),
'profit_center': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'time_zone': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'business.businesssegment': {
'Meta': {'object_name': 'BusinessSegment'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'business.department': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'Department'},
'icon': (u'dj.choices.fields.ChoiceField', [], {'unique': 'False', 'primary_key': 'False', 'db_column': 'None', 'blank': 'True', u'default': 'None', 'null': 'True', '_in_south': 'True', 'db_index': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'business.profitcenter': {
'Meta': {'object_name': 'ProfitCenter'},
'description': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'business.venture': {
'Meta': {'ordering': "(u'parent__symbol', u'symbol')", 'unique_together': "((u'parent', u'symbol'),)", 'object_name': 'Venture'},
'business_segment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['business.BusinessSegment']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'data_center': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['discovery.DataCenter']", 'null': 'True', 'blank': 'True'}),
'department': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['business.Department']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_infrastructure': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'margin_kind': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['discovery.MarginKind']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "u'child_set'", 'null': 'True', 'blank': 'True', 'to': "orm['business.Venture']"}),
'path': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'preboot': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['deployment.Preboot']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'profit_center': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['business.ProfitCenter']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'show_in_ralph': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'symbol': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32', 'blank': 'True'}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'business.venturerole': {
'Meta': {'ordering': "(u'parent__name', u'name')", 'unique_together': "((u'name', u'venture'),)", 'object_name': 'VentureRole'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "u'child_set'", 'null': 'True', 'blank': 'True', 'to': "orm['business.VentureRole']"}),
'path': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'preboot': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['deployment.Preboot']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'venture': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['business.Venture']"})
},
'cmdb.ci': {
'Meta': {'unique_together': "((u'content_type', u'object_id'),)", 'object_name': 'CI'},
'added_manually': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'barcode': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'business_service': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cmdb.CILayer']", 'symmetrical': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'owners': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cmdb.CIOwner']", 'through': "orm['cmdb.CIOwnership']", 'symmetrical': 'False'}),
'pci_scope': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'relations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cmdb.CI']", 'through': "orm['cmdb.CIRelation']", 'symmetrical': 'False'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '2', 'max_length': '11'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2', 'max_length': '11'}),
'technical_service': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CIType']"}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'zabbix_id': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'})
},
'cmdb.cilayer': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'CILayer'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'connected_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cmdb.CIType']", 'symmetrical': 'False', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'icon': (u'dj.choices.fields.ChoiceField', [], {'unique': 'False', 'primary_key': 'False', 'db_column': 'None', 'blank': 'True', u'default': 'None', 'null': 'True', '_in_south': 'True', 'db_index': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cmdb.ciowner': {
'Meta': {'object_name': 'CIOwner'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'profile': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['account.Profile']", 'unique': 'True'})
},
'cmdb.ciownership': {
'Meta': {'object_name': 'CIOwnership'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ci': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CI']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CIOwner']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'cmdb.cirelation': {
'Meta': {'unique_together': "((u'parent', u'child', u'type'),)", 'object_name': 'CIRelation'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'child'", 'to': "orm['cmdb.CI']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'parent'", 'to': "orm['cmdb.CI']"}),
'readonly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.IntegerField', [], {'max_length': '11'})
},
'cmdb.citype': {
'Meta': {'object_name': 'CIType'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'icon_class': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'deployment.archiveddeployment': {
'Meta': {'ordering': "(u'-created',)", 'object_name': 'ArchivedDeployment'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'device': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['discovery.Device']"}),
'device_environment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cmdb.CI']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'done_plugins': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_running': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mac': (u'lck.django.common.models.MACAddressField', [], {'unique': 'False', 'primary_key': 'False', 'db_column': 'None', 'blank': 'False', 'null': 'False', 'db_index': 'False'}),
'mass_deployment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['deployment.MassDeployment']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'preboot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['deployment.Preboot']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cmdb.CI']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'status_lastchanged': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'venture': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['business.Venture']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'venture_role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['business.VentureRole']", 'null': 'True', 'on_delete': 'models.SET_NULL'})
},
'deployment.deployment': {
'Meta': {'ordering': "(u'-created',)", 'object_name': 'Deployment'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'device': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['discovery.Device']"}),
'device_environment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cmdb.CI']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'done_plugins': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_running': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mac': (u'lck.django.common.models.MACAddressField', [], {'unique': 'False', 'primary_key': 'False', 'db_column': 'None', 'blank': 'False', 'null': 'False', 'db_index': 'False'}),
'mass_deployment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['deployment.MassDeployment']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'preboot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['deployment.Preboot']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cmdb.CI']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'status_lastchanged': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'venture': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['business.Venture']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'venture_role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['business.VentureRole']", 'null': 'True', 'on_delete': 'models.SET_NULL'})
},
'deployment.deploymentpoll': {
'Meta': {'object_name': 'DeploymentPoll'},
'checked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'deployment.massdeployment': {
'Meta': {'ordering': "(u'-created',)", 'object_name': 'MassDeployment'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'}),
'csv': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'generated_csv': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'})
},
'deployment.preboot': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'Preboot'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'files': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['deployment.PrebootFile']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'deployment.prebootfile': {
'Meta': {'object_name': 'PrebootFile'},
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'default': 'None', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'ftype': (u'dj.choices.fields.ChoiceField', [], {'unique': 'False', 'primary_key': 'False', 'db_column': 'None', 'blank': 'False', u'default': '101', 'null': 'False', '_in_south': 'True', 'db_index': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
'raw_config': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'discovery.connection': {
'Meta': {'object_name': 'Connection'},
'connection_type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inbound': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'inbound_connections'", 'on_delete': 'models.PROTECT', 'to': "orm['discovery.Device']"}),
'outbound': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'outbound_connections'", 'on_delete': 'models.PROTECT', 'to': "orm['discovery.Device']"})
},
'discovery.datacenter': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'DataCenter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'discovery.deprecationkind': {
'Meta': {'object_name': 'DeprecationKind'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'months': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
'remarks': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'})
},
'discovery.device': {
'Meta': {'object_name': 'Device'},
'barcode': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'boot_firmware': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'cached_cost': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cached_price': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'chassis_position': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'connections': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['discovery.Device']", 'through': "orm['discovery.Connection']", 'symmetrical': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dc': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'deprecation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deprecation_kind': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['discovery.DeprecationKind']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'device_environment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cmdb.CI']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'diag_firmware': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'hard_firmware': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'logical_parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'logicalchild_set'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['discovery.Device']", 'blank': 'True', 'null': 'True'}),
'management': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'managed_set'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['discovery.IPAddress']", 'blank': 'True', 'null': 'True'}),
'margin_kind': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['discovery.MarginKind']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'max_save_priority': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'mgmt_firmware': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'model': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'device_set'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['discovery.DeviceModel']", 'blank': 'True', 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name2': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'child_set'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['discovery.Device']", 'blank': 'True', 'null': 'True'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'price': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'purchase_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'rack': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'remarks': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'save_priorities': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cmdb.CI']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'sn': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'support_expiration_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'support_kind': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'uptime_seconds': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'uptime_timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'venture': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['business.Venture']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'venture_role': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['business.VentureRole']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'warranty_expiration_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'discovery.devicemodel': {
'Meta': {'object_name': 'DeviceModel'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'chassis_size': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_save_priority': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'save_priorities': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {'default': '401'})
},
'discovery.discoveryqueue': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'DiscoveryQueue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'discovery.environment': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'Environment'},
'data_center': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['discovery.DataCenter']"}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'hosts_naming_template': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
'next_server': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32', 'blank': 'True'}),
'queue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['discovery.DiscoveryQueue']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'remarks': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'discovery.ipaddress': {
'Meta': {'object_name': 'IPAddress'},
'address': ('django.db.models.fields.IPAddressField', [], {'default': 'None', 'max_length': '15', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dead_ping_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'device': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['discovery.Device']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'dns_info': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'http_family': ('django.db.models.fields.TextField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_buried': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_management': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_plugins': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'last_puppet': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['discovery.Network']", 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'scan_summary': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['scan.ScanSummary']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'snmp_community': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'snmp_name': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'snmp_version': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'venture': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['business.Venture']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
'discovery.marginkind': {
'Meta': {'object_name': 'MarginKind'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'margin': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
'remarks': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'})
},
'discovery.network': {
'Meta': {'ordering': "(u'vlan',)", 'object_name': 'Network'},
'address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'custom_dns_servers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['dnsedit.DNSServer']", 'null': 'True', 'blank': 'True'}),
'data_center': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['discovery.DataCenter']", 'null': 'True', 'blank': 'True'}),
'dhcp_broadcast': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'dhcp_config': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['discovery.Environment']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'gateway': ('django.db.models.fields.IPAddressField', [], {'default': 'None', 'max_length': '15', 'null': 'True', 'blank': 'True'}),
'gateway_as_int': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignore_addresses': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'kind': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['discovery.NetworkKind']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'last_scan': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'max_ip': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'min_ip': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
'racks': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['discovery.Device']", 'symmetrical': 'False'}),
'remarks': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'reserved': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10'}),
'reserved_top_margin': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'terminators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['discovery.NetworkTerminator']", 'symmetrical': 'False'}),
'vlan': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'discovery.networkkind': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'NetworkKind'},
'icon': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'discovery.networkterminator': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'NetworkTerminator'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'dnsedit.dnsserver': {
'Meta': {'object_name': 'DNSServer'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'unique': 'True', 'max_length': '15'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'})
},
'scan.scansummary': {
'Meta': {'object_name': 'ScanSummary'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'false_positive_checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'previous_checksum': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'tags.tag': {
'Meta': {'object_name': 'Tag'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['account.Profile']"}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'tags_tag_tags'", 'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.PositiveIntegerField', [], {'default': '39'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'official': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'stem': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'related_tags'", 'null': 'True', 'to': "orm['tags.TagStem']"})
},
'tags.tagstem': {
'Meta': {'object_name': 'TagStem'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.PositiveIntegerField', [], {'default': '39'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'tag_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
}
}
complete_apps = ['deployment'] | [((34, 8, 34, 71), 'south.db.db.delete_column', 'db.delete_column', ({(34, 25, 34, 56): '"""deployment_archiveddeployment"""', (34, 58, 34, 70): '"""service_id"""'}, {}), "('deployment_archiveddeployment', 'service_id')", False, 'from south.db import db\n'), ((37, 8, 37, 82), 'south.db.db.delete_column', 'db.delete_column', ({(37, 25, 37, 56): '"""deployment_archiveddeployment"""', (37, 58, 37, 81): '"""device_environment_id"""'}, {}), "('deployment_archiveddeployment', 'device_environment_id')", False, 'from south.db import db\n'), ((40, 8, 40, 63), 'south.db.db.delete_column', 'db.delete_column', ({(40, 25, 40, 48): '"""deployment_deployment"""', (40, 50, 40, 62): '"""service_id"""'}, {}), "('deployment_deployment', 'service_id')", False, 'from south.db import db\n'), ((43, 8, 43, 74), 'south.db.db.delete_column', 'db.delete_column', ({(43, 25, 43, 48): '"""deployment_deployment"""', (43, 50, 43, 73): '"""device_environment_id"""'}, {}), "('deployment_deployment', 'device_environment_id')", False, 'from south.db import db\n')] |
RLReed/unotran | SPH/sphbwr_example2.py | b317107e1a39490dda732f86a731872f5207a167 | import numpy as np
import sys
sys.path.append('/homes/rlreed/workspace/unotran/src')
from coarseBounds import computeBounds, Grouping
import pickle
from makeDLPbasis import makeBasis as makeDLP
from makeKLTbasis import makeBasis as makeKLT
import sph
import sph_dgm
import pydgm
def buildGEO(ass_map):
fine_map = [1]
coarse_map = [1.26]
material_map = [[1], [2], [3], [4], [5], [6], [7], [8], [9], [10]]
npins = len(ass_map)
cm = [0.0]
fm = []
mm = []
for i, ass in enumerate(ass_map):
mm += material_map[ass]
cm += coarse_map
fm += fine_map
cm = np.cumsum(cm)
return npins, fm, cm, mm
def makeDGMXS(G, refXS, dgmstructure, basisType):
if 'klt' in basisType:
makeKLT(basisType, dgmstructure)
else:
makeDLP(dgmstructure)
dgmstructure.fname = '{}_{}'.format(basisType, dgmstructure.fname)
fname = '_homo.'.join(xs_name.split('.'))
refXS.write_homogenized_XS(fname)
nPin, fm, cm, mm = buildGEO(pin_map)
dgm = sph_dgm.DGMSOLVER(G, fname, fm, cm, mm, nPin, dgmstructure, solveFlag=False)
pydgm.dgmsolver.initialize_dgmsolver()
dgm.extractInfo()
pydgm.dgmsolver.finalize_dgmsolver()
pydgm.control.finalize_control()
nCellPerPin = dgm.phi.shape[2] // dgm.npin
return sph_dgm.XS(G, nCellPerPin, dgm.sig_t, dgm.vsig_f, dgm.chi, dgm.sig_s)
if __name__ == '__main__':
np.set_printoptions(precision=6)
G = 44
dgmstructure = computeBounds(G, 'full', 1, 0.0, 1.3, 60)
fname = dgmstructure.fname
xs_name = 'XS/{}gXS.anlxs'.format(G)
pin_map = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
data_path = 'data2'
# Get the homogenized cross sections
refXS = pickle.load(open('{}/refXS_sph_space_{}.p'.format(data_path, G), 'rb'))
for basis in ['dlp', 'klt_full', 'klt_combine', 'klt_pins_full']:
dgmstructure.fname = fname
XS = makeDGMXS(G, refXS, dgmstructure, basis)
pickle.dump(XS, open('{}/refXS_dgm_{}_{}_h{}.p'.format(data_path, dgmstructure.fname, 'fine_mu', 0), 'wb'))
| [((3, 0, 3, 54), 'sys.path.append', 'sys.path.append', ({(3, 16, 3, 53): '"""/homes/rlreed/workspace/unotran/src"""'}, {}), "('/homes/rlreed/workspace/unotran/src')", False, 'import sys\n'), ((28, 9, 28, 22), 'numpy.cumsum', 'np.cumsum', ({(28, 19, 28, 21): 'cm'}, {}), '(cm)', True, 'import numpy as np\n'), ((45, 10, 45, 86), 'sph_dgm.DGMSOLVER', 'sph_dgm.DGMSOLVER', (), '', False, 'import sph_dgm\n'), ((46, 4, 46, 42), 'pydgm.dgmsolver.initialize_dgmsolver', 'pydgm.dgmsolver.initialize_dgmsolver', ({}, {}), '()', False, 'import pydgm\n'), ((49, 4, 49, 40), 'pydgm.dgmsolver.finalize_dgmsolver', 'pydgm.dgmsolver.finalize_dgmsolver', ({}, {}), '()', False, 'import pydgm\n'), ((50, 4, 50, 36), 'pydgm.control.finalize_control', 'pydgm.control.finalize_control', ({}, {}), '()', False, 'import pydgm\n'), ((54, 11, 54, 80), 'sph_dgm.XS', 'sph_dgm.XS', ({(54, 22, 54, 23): 'G', (54, 25, 54, 36): 'nCellPerPin', (54, 38, 54, 47): 'dgm.sig_t', (54, 49, 54, 59): 'dgm.vsig_f', (54, 61, 54, 68): 'dgm.chi', (54, 70, 54, 79): 'dgm.sig_s'}, {}), '(G, nCellPerPin, dgm.sig_t, dgm.vsig_f, dgm.chi, dgm.sig_s)', False, 'import sph_dgm\n'), ((57, 4, 57, 36), 'numpy.set_printoptions', 'np.set_printoptions', (), '', True, 'import numpy as np\n'), ((61, 19, 61, 60), 'coarseBounds.computeBounds', 'computeBounds', ({(61, 33, 61, 34): 'G', (61, 36, 61, 42): '"""full"""', (61, 44, 61, 45): '1', (61, 47, 61, 50): '0.0', (61, 52, 61, 55): '1.3', (61, 57, 61, 59): '60'}, {}), "(G, 'full', 1, 0.0, 1.3, 60)", False, 'from coarseBounds import computeBounds, Grouping\n'), ((34, 8, 34, 40), 'makeKLTbasis.makeBasis', 'makeKLT', ({(34, 16, 34, 25): 'basisType', (34, 27, 34, 39): 'dgmstructure'}, {}), '(basisType, dgmstructure)', True, 'from makeKLTbasis import makeBasis as makeKLT\n'), ((36, 8, 36, 29), 'makeDLPbasis.makeBasis', 'makeDLP', ({(36, 16, 36, 28): 'dgmstructure'}, {}), '(dgmstructure)', True, 'from makeDLPbasis import makeBasis as makeDLP\n')] |
CentroidChef/oci-python-sdk | src/oci/management_agent/models/management_agent_aggregation_dimensions.py | fa406e27a52b40c70e220c20f52dfe2abe6236a3 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ManagementAgentAggregationDimensions(object):
"""
The Aggregation of Management Agent Dimensions
"""
#: A constant which can be used with the availability_status property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "ACTIVE"
AVAILABILITY_STATUS_ACTIVE = "ACTIVE"
#: A constant which can be used with the availability_status property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "SILENT"
AVAILABILITY_STATUS_SILENT = "SILENT"
#: A constant which can be used with the availability_status property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "NOT_AVAILABLE"
AVAILABILITY_STATUS_NOT_AVAILABLE = "NOT_AVAILABLE"
#: A constant which can be used with the platform_type property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "LINUX"
PLATFORM_TYPE_LINUX = "LINUX"
#: A constant which can be used with the platform_type property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "WINDOWS"
PLATFORM_TYPE_WINDOWS = "WINDOWS"
#: A constant which can be used with the install_type property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "AGENT"
INSTALL_TYPE_AGENT = "AGENT"
#: A constant which can be used with the install_type property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "GATEWAY"
INSTALL_TYPE_GATEWAY = "GATEWAY"
def __init__(self, **kwargs):
"""
Initializes a new ManagementAgentAggregationDimensions object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param availability_status:
The value to assign to the availability_status property of this ManagementAgentAggregationDimensions.
Allowed values for this property are: "ACTIVE", "SILENT", "NOT_AVAILABLE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type availability_status: str
:param platform_type:
The value to assign to the platform_type property of this ManagementAgentAggregationDimensions.
Allowed values for this property are: "LINUX", "WINDOWS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type platform_type: str
:param version:
The value to assign to the version property of this ManagementAgentAggregationDimensions.
:type version: str
:param has_plugins:
The value to assign to the has_plugins property of this ManagementAgentAggregationDimensions.
:type has_plugins: bool
:param install_type:
The value to assign to the install_type property of this ManagementAgentAggregationDimensions.
Allowed values for this property are: "AGENT", "GATEWAY", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type install_type: str
"""
self.swagger_types = {
'availability_status': 'str',
'platform_type': 'str',
'version': 'str',
'has_plugins': 'bool',
'install_type': 'str'
}
self.attribute_map = {
'availability_status': 'availabilityStatus',
'platform_type': 'platformType',
'version': 'version',
'has_plugins': 'hasPlugins',
'install_type': 'installType'
}
self._availability_status = None
self._platform_type = None
self._version = None
self._has_plugins = None
self._install_type = None
@property
def availability_status(self):
"""
Gets the availability_status of this ManagementAgentAggregationDimensions.
The availability status of managementAgent
Allowed values for this property are: "ACTIVE", "SILENT", "NOT_AVAILABLE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The availability_status of this ManagementAgentAggregationDimensions.
:rtype: str
"""
return self._availability_status
@availability_status.setter
def availability_status(self, availability_status):
"""
Sets the availability_status of this ManagementAgentAggregationDimensions.
The availability status of managementAgent
:param availability_status: The availability_status of this ManagementAgentAggregationDimensions.
:type: str
"""
allowed_values = ["ACTIVE", "SILENT", "NOT_AVAILABLE"]
if not value_allowed_none_or_none_sentinel(availability_status, allowed_values):
availability_status = 'UNKNOWN_ENUM_VALUE'
self._availability_status = availability_status
@property
def platform_type(self):
"""
Gets the platform_type of this ManagementAgentAggregationDimensions.
Platform Type
Allowed values for this property are: "LINUX", "WINDOWS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The platform_type of this ManagementAgentAggregationDimensions.
:rtype: str
"""
return self._platform_type
@platform_type.setter
def platform_type(self, platform_type):
"""
Sets the platform_type of this ManagementAgentAggregationDimensions.
Platform Type
:param platform_type: The platform_type of this ManagementAgentAggregationDimensions.
:type: str
"""
allowed_values = ["LINUX", "WINDOWS"]
if not value_allowed_none_or_none_sentinel(platform_type, allowed_values):
platform_type = 'UNKNOWN_ENUM_VALUE'
self._platform_type = platform_type
@property
def version(self):
"""
Gets the version of this ManagementAgentAggregationDimensions.
Agent image version
:return: The version of this ManagementAgentAggregationDimensions.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this ManagementAgentAggregationDimensions.
Agent image version
:param version: The version of this ManagementAgentAggregationDimensions.
:type: str
"""
self._version = version
@property
def has_plugins(self):
"""
Gets the has_plugins of this ManagementAgentAggregationDimensions.
Whether or not a managementAgent has at least one plugin
:return: The has_plugins of this ManagementAgentAggregationDimensions.
:rtype: bool
"""
return self._has_plugins
@has_plugins.setter
def has_plugins(self, has_plugins):
"""
Sets the has_plugins of this ManagementAgentAggregationDimensions.
Whether or not a managementAgent has at least one plugin
:param has_plugins: The has_plugins of this ManagementAgentAggregationDimensions.
:type: bool
"""
self._has_plugins = has_plugins
@property
def install_type(self):
"""
Gets the install_type of this ManagementAgentAggregationDimensions.
The install type, either AGENT or GATEWAY
Allowed values for this property are: "AGENT", "GATEWAY", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The install_type of this ManagementAgentAggregationDimensions.
:rtype: str
"""
return self._install_type
@install_type.setter
def install_type(self, install_type):
"""
Sets the install_type of this ManagementAgentAggregationDimensions.
The install type, either AGENT or GATEWAY
:param install_type: The install_type of this ManagementAgentAggregationDimensions.
:type: str
"""
allowed_values = ["AGENT", "GATEWAY"]
if not value_allowed_none_or_none_sentinel(install_type, allowed_values):
install_type = 'UNKNOWN_ENUM_VALUE'
self._install_type = install_type
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| [((237, 15, 237, 40), 'oci.util.formatted_flat_dict', 'formatted_flat_dict', ({(237, 35, 237, 39): 'self'}, {}), '(self)', False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n'), ((124, 15, 124, 87), 'oci.util.value_allowed_none_or_none_sentinel', 'value_allowed_none_or_none_sentinel', ({(124, 51, 124, 70): 'availability_status', (124, 72, 124, 86): 'allowed_values'}, {}), '(availability_status, allowed_values)', False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n'), ((154, 15, 154, 81), 'oci.util.value_allowed_none_or_none_sentinel', 'value_allowed_none_or_none_sentinel', ({(154, 51, 154, 64): 'platform_type', (154, 66, 154, 80): 'allowed_values'}, {}), '(platform_type, allowed_values)', False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n'), ((232, 15, 232, 80), 'oci.util.value_allowed_none_or_none_sentinel', 'value_allowed_none_or_none_sentinel', ({(232, 51, 232, 63): 'install_type', (232, 65, 232, 79): 'allowed_values'}, {}), '(install_type, allowed_values)', False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n')] |
Bashorun97/BuyCoins-Python-SDK | py_buycoins/sending.py | 5b5e1ca6bfbfb56d30e99a737e431ca35b2e712b | from .gcore.queries import GetNetworkFee, GetBalance
from .gcore.mutations import SendCoin
from typing import List, Optional
from .exc import SendLimitError, InvalidClientObject
class Send:
def __init__(self, address: str, cryptocurrency: str, amount: float):
self.address = address
self.cryptocurrency = cryptocurrency
self.amount = amount
limits = {
"bitcoin": 1,
"ethereum": 50,
"litecoin": 50,
"nairatoken": 2000000
}
def execute(self, client):
try:
return client.execute(query=self.send())
except AttributeError:
raise InvalidClientObject("<BuyCoinsClient> object expected received {} instead".format(type(client)))
def get_network_fee(self, response_fields):
_price = GetNetworkFee()
return _price.queryObject(
response_fields=response_fields,
cryptocurrency=self.cryptocurrency, amount=self.amount
)
def check_limit(self):
if Send.limits[self.cryptocurrency.lower()] < self.amount:
return False
else:
return True
def send(self, response_fields):
if self.cryptocurrency.lower() in Send.limits.keys():
if self.check_limit(self.amount, self.cryptocurrency):
return SendCoin().Mutate(
cryptocurrency=self.cryptocurrency,
response_fields=response_fields,
amount=self.amount,
address=self.address
)
else:
raise SendLimitError("Maximum daily transaction amount exceeded")
def balance(self, response_fields: List):
return GetBalance.queryObject(response_fields=response_fields)
| [] |
yp2800/snippet | snippet/example/python/url.py | 054af596655007cbec81340bd166489e706fffe6 | # -*- coding: utf-8 -*-
try:
from urlparse import urlparse, urlunsplit
except ImportError:
from urllib.parse import urlparse, urlunsplit
class URL(object):
DEFAULT_SCHEME = ["http", "https"]
def __init__(self, url, allowed_scheme=None):
self._url = url
self.url = urlparse(self._url)
self._scheme = allowed_scheme if allowed_scheme else self.DEFAULT_SCHEME
def geturl(self):
scheme = self.scheme if self.scheme else self.url.scheme
netloc = self.netloc if self.netloc else self.url.netloc
url = self.path if self.path else self.url.path
params = self.params if self.params else self.url.params
query = self.query if self.query else self.url.query
fragment = self.fragment if self.fragment else self.url.fragment
if params:
url = "%s;%s" % (url, params)
return urlunsplit((scheme, netloc, url, query, fragment))
def get_full_url(self, base=None):
return self.s_get_full_url(self, base)
@staticmethod
def s_get_full_url(url, base=None):
if not base:
if url.scheme in url._scheme:
return url.geturl()
return None
if not url.scheme:
url.scheme = base.scheme
if url.scheme not in url._scheme:
return None
if not url.netloc:
url.netloc = base.netloc
if len(url.path) == 1 and url.path == '/':
return None
if url.path[0] != '/':
path = base.path.split('/')[:-1]
path.append(url.path)
url.path = '/'.join(path)
return url.geturl()
def __getattr__(self, name):
if name == "path":
path = getattr(self.url, name)
if not path:
return '/'
return path
return getattr(self.url, name)
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
def __repr__(self):
s = "URL(scheme='%s', netloc='%s', path='%s', params='%s', query='%s', fragment='%s')"
p = (self.scheme, self.netloc, self.path, self.params, self.query, self.fragment)
return s % p
| [((14, 19, 14, 38), 'urllib.parse.urlparse', 'urlparse', ({(14, 28, 14, 37): 'self._url'}, {}), '(self._url)', False, 'from urllib.parse import urlparse, urlunsplit\n'), ((27, 15, 27, 65), 'urllib.parse.urlunsplit', 'urlunsplit', ({(27, 26, 27, 64): '(scheme, netloc, url, query, fragment)'}, {}), '((scheme, netloc, url, query, fragment))', False, 'from urllib.parse import urlparse, urlunsplit\n')] |
tristen-tooming/netvisor-api-client | netvisor_api_client/services/dimension.py | 37c974dc1e6acf1d0bde7e6298b23ca4d14ffd69 | from .base import Service
from ..requests.dimension import CreateDimensionsRequest, DimensionsListRequest
class DimensionService(Service):
def create(self, data):
request = CreateDimensionsRequest(
self.client,
params={'method': 'add'},
data=data
)
return request.make_request()
def list(self, showhidden=None):
request = DimensionsListRequest(self.client,
params={'showhidden': showhidden})
return request.make_request() | [] |
ONS-SST/cis_households | cishouseholds/filter.py | e475df5929e6763a46cd05aff1f7e960ccbe8e21 | from typing import List
from typing import Union
from pyspark.sql import DataFrame
from pyspark.sql import functions as F
from pyspark.sql.window import Window
def filter_all_not_null(df: DataFrame, reference_columns: List[str]) -> DataFrame:
"""
Filter rows which have NULL values in all the specified columns.
From households_aggregate_processes.xlsx, filter number 2.
Parameters
----------
df
reference_columns
Columns to check for missing values in, all
must be missing for the record to be dropped.
"""
return df.na.drop(how="all", subset=reference_columns)
def filter_duplicates_by_time_and_threshold(
df: DataFrame,
first_reference_column: str,
second_reference_column: str,
third_reference_column: str,
fourth_reference_column: str,
time_threshold: float = 1.5,
float_threshold: float = 0.00001,
) -> DataFrame:
"""
Drop duplicates based on two identitical column values if third and fourth column and not both within
a threshold difference from the first duplicate record.
From households_aggregate_processes.xlsx, filter number 4.
Parameters
----------
df
first_reference_column
First column with duplicate value
second_reference_column
Second column with duplicate value
third_reference_column
Column used for time based threshold difference, timestamp
fourth_reference_column
Column used for numeric based threshold difference, float
"""
window = Window.partitionBy(first_reference_column, second_reference_column).orderBy(third_reference_column)
df = df.withColumn("duplicate_id", F.row_number().over(window))
df = df.withColumn(
"within_time_threshold",
(
F.abs(
F.first(third_reference_column).over(window).cast("long") - F.col(third_reference_column).cast("long")
)
/ (60 * 60)
)
< time_threshold,
)
df = df.withColumn(
"within_float_threshold",
F.abs(F.first(fourth_reference_column).over(window) - F.col(fourth_reference_column)) < float_threshold,
)
df = df.filter((F.col("duplicate_id") == 1) | ~(F.col("within_time_threshold") & (F.col("within_float_threshold"))))
return df.drop("duplicate_id", "within_time_threshold", "within_float_threshold")
def filter_by_cq_diff(
df: DataFrame, comparing_column: str, ordering_column: str, tolerance: float = 0.00001
) -> DataFrame:
"""
This function works out what columns have a float value difference less than 10-^5 or 0.00001
(or any other tolerance value inputed) given all the other columns are the same and
considers it to be the same dropping or deleting the repeated values and only keeping one entry.
Parameters
----------
df
comparing_column
ordering_column
tolerance
"""
column_list = df.columns
column_list.remove(comparing_column)
windowSpec = Window.partitionBy(column_list).orderBy(ordering_column)
df = df.withColumn("first_value_in_duplicates", F.first(comparing_column).over(windowSpec))
df = df.withColumn(
"duplicates_first_record", F.abs(F.col("first_value_in_duplicates") - F.col(comparing_column)) < tolerance
)
difference_window = Window.partitionBy(column_list + ["duplicates_first_record"]).orderBy(ordering_column)
df = df.withColumn("duplicate_number", F.row_number().over(difference_window))
df = df.filter(~(F.col("duplicates_first_record") & (F.col("duplicate_number") != 1)))
df = df.drop("first_value_in_duplicates", "duplicates_first_record", "duplicate_number")
return df
def assign_date_interval_and_flag(
df: DataFrame,
column_name_inside_interval: str,
column_name_time_interval: str,
start_datetime_reference_column: str,
end_datetime_reference_column: str,
lower_interval: Union[int, float],
upper_interval: Union[int, float],
interval_format: str = "hours",
) -> DataFrame:
"""
This function gives the time interval in either hours (by default) or days
in a column by given two date columns and says whether it is inside and
upper and lower interval. If the difference of dates is within the upper and
lower time intervals, the function will output None and an integer 1 if the
difference in dates are outside of those intervals.
Parameters
----------
df
column_name_inside_interval
Name of the column that returns whether the difference in dates are
within the upper/lower limits if within, it will return None, if outside
will return an integer 1.
column_name_time_interval
Name of the column that returns the difference between start and end
date and adds at the end of the column name whether it is in hours or
days
start_datetime_reference_column
Earliest date in string format yyyy-mm-dd hh:mm:ss.
end_datetime_reference_column
Latest date in string format yyyy-mm-dd hh:mm:ss.
lower_interval
Marks how much NEGATIVE time difference can have between
end_datetime_reference_column and start_datetime_reference_column.
Meaning how the end_datetime_reference_column can be earlier than
start_datetime_reference_column
upper_interval
Marks how much POSITIVE time difference can have between
end_datetime_reference_column and start_datetime_reference_column
interval_format
By default will be a string called 'hours' if upper and lower
intervals are input as days, define interval_format to 'days'.
These are the only two possible formats.
Notes
-----
Lower_interval should be a negative value if start_datetime_reference_column
is after end_datetime_reference_column."""
# by default, Hours but if days, apply change factor
if interval_format == "hours": # to convert hours to seconds
conversion_factor = 3600 # 1h has 60s*60min seconds = 3600 seconds
elif interval_format == "days":
conversion_factor = 86400 # 1 day has 60s*60min*24h seconds = 86400 seconds
column_name_time_interval = column_name_time_interval + "_" + interval_format
# FORMULA: (end_datetime_reference_column - start_datetime_reference_column) in
# seconds/conversion_factor in seconds
df = df.withColumn(
column_name_time_interval,
(
F.to_timestamp(F.col(end_datetime_reference_column)).cast("long")
- F.to_timestamp(F.col(start_datetime_reference_column)).cast("long")
)
/ conversion_factor, # 1 day has 60s*60min*24h seconds = 86400 seconds
)
return df.withColumn(
column_name_inside_interval,
F.when(~F.col(column_name_time_interval).between(lower_interval, upper_interval), 1).otherwise(None),
)
def file_exclude(df: DataFrame, source_file_col: str, files_to_exclude: list):
"""
Function to exclude specific files from pipeline processing
Parameters
--------
df
source_file_column = Column in input dataframe which contains the source file
files_to_exclude = List of files to exclude (feed in from config)
"""
for item in files_to_exclude:
df = df.filter(~F.col(source_file_col).isin(item))
return df
| [((49, 13, 49, 80), 'pyspark.sql.window.Window.partitionBy', 'Window.partitionBy', ({(49, 32, 49, 54): 'first_reference_column', (49, 56, 49, 79): 'second_reference_column'}, {}), '(first_reference_column, second_reference_column)', False, 'from pyspark.sql.window import Window\n'), ((91, 17, 91, 48), 'pyspark.sql.window.Window.partitionBy', 'Window.partitionBy', ({(91, 36, 91, 47): 'column_list'}, {}), '(column_list)', False, 'from pyspark.sql.window import Window\n'), ((97, 24, 97, 85), 'pyspark.sql.window.Window.partitionBy', 'Window.partitionBy', ({(97, 43, 97, 84): "column_list + ['duplicates_first_record']"}, {}), "(column_list + ['duplicates_first_record'])", False, 'from pyspark.sql.window import Window\n'), ((51, 39, 51, 53), 'pyspark.sql.functions.row_number', 'F.row_number', ({}, {}), '()', True, 'from pyspark.sql import functions as F\n'), ((69, 20, 69, 41), 'pyspark.sql.functions.col', 'F.col', ({(69, 26, 69, 40): '"""duplicate_id"""'}, {}), "('duplicate_id')", True, 'from pyspark.sql import functions as F\n'), ((92, 52, 92, 77), 'pyspark.sql.functions.first', 'F.first', ({(92, 60, 92, 76): 'comparing_column'}, {}), '(comparing_column)', True, 'from pyspark.sql import functions as F\n'), ((98, 43, 98, 57), 'pyspark.sql.functions.row_number', 'F.row_number', ({}, {}), '()', True, 'from pyspark.sql import functions as F\n'), ((100, 21, 100, 53), 'pyspark.sql.functions.col', 'F.col', ({(100, 27, 100, 52): '"""duplicates_first_record"""'}, {}), "('duplicates_first_record')", True, 'from pyspark.sql import functions as F\n'), ((66, 62, 66, 92), 'pyspark.sql.functions.col', 'F.col', ({(66, 68, 66, 91): 'fourth_reference_column'}, {}), '(fourth_reference_column)', True, 'from pyspark.sql import functions as F\n'), ((69, 52, 69, 82), 'pyspark.sql.functions.col', 'F.col', ({(69, 58, 69, 81): '"""within_time_threshold"""'}, {}), "('within_time_threshold')", True, 'from pyspark.sql import functions as F\n'), ((69, 86, 69, 117), 'pyspark.sql.functions.col', 'F.col', ({(69, 92, 69, 116): '"""within_float_threshold"""'}, {}), "('within_float_threshold')", True, 'from pyspark.sql import functions as F\n'), ((94, 41, 94, 75), 'pyspark.sql.functions.col', 'F.col', ({(94, 47, 94, 74): '"""first_value_in_duplicates"""'}, {}), "('first_value_in_duplicates')", True, 'from pyspark.sql import functions as F\n'), ((94, 78, 94, 101), 'pyspark.sql.functions.col', 'F.col', ({(94, 84, 94, 100): 'comparing_column'}, {}), '(comparing_column)', True, 'from pyspark.sql import functions as F\n'), ((100, 57, 100, 82), 'pyspark.sql.functions.col', 'F.col', ({(100, 63, 100, 81): '"""duplicate_number"""'}, {}), "('duplicate_number')", True, 'from pyspark.sql import functions as F\n'), ((190, 24, 190, 46), 'pyspark.sql.functions.col', 'F.col', ({(190, 30, 190, 45): 'source_file_col'}, {}), '(source_file_col)', True, 'from pyspark.sql import functions as F\n'), ((66, 14, 66, 46), 'pyspark.sql.functions.first', 'F.first', ({(66, 22, 66, 45): 'fourth_reference_column'}, {}), '(fourth_reference_column)', True, 'from pyspark.sql import functions as F\n'), ((167, 27, 167, 63), 'pyspark.sql.functions.col', 'F.col', ({(167, 33, 167, 62): 'end_datetime_reference_column'}, {}), '(end_datetime_reference_column)', True, 'from pyspark.sql import functions as F\n'), ((168, 29, 168, 67), 'pyspark.sql.functions.col', 'F.col', ({(168, 35, 168, 66): 'start_datetime_reference_column'}, {}), '(start_datetime_reference_column)', True, 'from pyspark.sql import functions as F\n'), ((57, 76, 57, 105), 'pyspark.sql.functions.col', 'F.col', ({(57, 82, 57, 104): 'third_reference_column'}, {}), '(third_reference_column)', True, 'from pyspark.sql import functions as F\n'), ((175, 16, 175, 48), 'pyspark.sql.functions.col', 'F.col', ({(175, 22, 175, 47): 'column_name_time_interval'}, {}), '(column_name_time_interval)', True, 'from pyspark.sql import functions as F\n'), ((57, 16, 57, 47), 'pyspark.sql.functions.first', 'F.first', ({(57, 24, 57, 46): 'third_reference_column'}, {}), '(third_reference_column)', True, 'from pyspark.sql import functions as F\n')] |
hpc-unibe-ch/reframe | cscs-checks/cuda/multi_gpu.py | 07f97e25cf4e7319782c37dd1923f7e70a368b99 | # Copyright 2016-2020 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import reframe.utility.sanity as sn
import reframe as rfm
@rfm.required_version('>=2.16-dev0')
@rfm.simple_test
class GpuBandwidthCheck(rfm.RegressionTest):
def __init__(self):
self.valid_systems = ['kesch:cn', 'daint:gpu', 'dom:gpu', 'tiger:gpu',
'arolla:cn', 'tsa:cn']
self.valid_prog_environs = ['PrgEnv-gnu']
if self.current_system.name in ['arolla', 'kesch', 'tsa']:
self.valid_prog_environs = ['PrgEnv-gnu-nompi']
self.exclusive_access = True
self.sourcesdir = os.path.join(
self.current_system.resourcesdir, 'CUDA', 'essentials'
)
self.build_system = 'SingleSource'
# Set nvcc flags
nvidia_sm = '60'
if self.current_system.name == 'kesch':
nvidia_sm = '37'
elif self.current_system.name in ['arolla', 'tsa']:
nvidia_sm = '70'
self.build_system.cxxflags = ['-I.', '-m64', '-arch=sm_%s' % nvidia_sm]
self.sourcepath = 'bandwidthtestflex.cu'
self.executable = 'gpu_bandwidth_check.x'
# Perform a single bandwidth test with a buffer size of 1024MB
self.min_buffer_size = 1073741824
self.max_buffer_size = 1073741824
self.executable_opts = ['device', 'all', '--mode=range',
'--start=%d' % self.min_buffer_size,
'--increment=%d' % self.min_buffer_size,
'--end=%d' % self.max_buffer_size, '--csv']
self.num_tasks = 0
self.num_tasks_per_node = 1
if self.current_system.name in ['daint', 'dom', 'tiger']:
self.modules = ['craype-accel-nvidia60']
self.num_gpus_per_node = 1
elif self.current_system.name == 'kesch':
self.modules = ['cudatoolkit/8.0.61']
self.num_gpus_per_node = 8
elif self.current_system.name in ['arolla', 'tsa']:
self.modules = ['cuda/10.1.243']
self.num_gpus_per_node = 8
# perf_patterns and reference will be set by the sanity check function
self.sanity_patterns = self.do_sanity_check()
self.perf_patterns = {}
self.reference = {}
self.__bwref = {
# FIXME: reference values for Arolla and Tsa need to be updated
# (sanity check fails if they are not defined)
'arolla:cn:h2d': (7583, -0.1, None, 'MB/s'),
'arolla:cn:d2h': (7584, -0.1, None, 'MB/s'),
'arolla:cn:d2d': (137408, -0.1, None, 'MB/s'),
'daint:gpu:h2d': (11881, -0.1, None, 'MB/s'),
'daint:gpu:d2h': (12571, -0.1, None, 'MB/s'),
'daint:gpu:d2d': (499000, -0.1, None, 'MB/s'),
'dom:gpu:h2d': (11881, -0.1, None, 'MB/s'),
'dom:gpu:d2h': (12571, -0.1, None, 'MB/s'),
'dom:gpu:d2d': (499000, -0.1, None, 'MB/s'),
'kesch:cn:h2d': (7583, -0.1, None, 'MB/s'),
'kesch:cn:d2h': (7584, -0.1, None, 'MB/s'),
'kesch:cn:d2d': (137408, -0.1, None, 'MB/s'),
'tiger:gpu:h2d': (0, None, None, 'MB/s'),
'tiger:gpu:d2h': (0, None, None, 'MB/s'),
'tiger:gpu:d2d': (0, None, None, 'MB/s'),
'tsa:cn:h2d': (7583, -0.1, None, 'MB/s'),
'tsa:cn:d2h': (7584, -0.1, None, 'MB/s'),
'tsa:cn:d2d': (137408, -0.1, None, 'MB/s'),
}
self.tags = {'diagnostic', 'benchmark', 'mch',
'craype', 'external-resources'}
self.maintainers = ['AJ', 'SK']
def _xfer_pattern(self, xfer_kind, devno, nodename):
'''generates search pattern for performance analysis'''
if xfer_kind == 'h2d':
first_part = 'bandwidthTest-H2D-Pinned'
elif xfer_kind == 'd2h':
first_part = 'bandwidthTest-D2H-Pinned'
else:
first_part = 'bandwidthTest-D2D'
# Extract the bandwidth corresponding to the maximum buffer size
return (r'^%s[^,]*,\s*%s[^,]*,\s*Bandwidth\s*=\s*(\S+)\s*MB/s([^,]*,)'
r'{2}\s*Size\s*=\s*%d\s*bytes[^,]*,\s*DeviceNo\s*=\s*-1'
r':%s' % (nodename, first_part, self.max_buffer_size, devno))
@sn.sanity_function
def do_sanity_check(self):
failures = []
devices_found = set(sn.extractall(
r'^\s*([^,]*),\s*Detected devices: %s' % self.num_gpus_per_node,
self.stdout, 1
))
sn.evaluate(sn.assert_eq(
self.job.num_tasks, len(devices_found),
msg='requested {0} node(s), got {1} (nodelist: %s)' %
','.join(sorted(devices_found))))
good_nodes = set(sn.extractall(
r'^\s*([^,]*),\s*NID\s*=\s*\S+\s+Result = PASS',
self.stdout, 1
))
sn.evaluate(sn.assert_eq(
devices_found, good_nodes,
msg='check failed on the following node(s): %s' %
','.join(sorted(devices_found - good_nodes)))
)
# Sanity is fine, fill in the perf. patterns based on the exact node id
for nodename in devices_found:
for xfer_kind in ('h2d', 'd2h', 'd2d'):
for devno in range(self.num_gpus_per_node):
perfvar = '%s_gpu_%s_%s_bw' % (nodename, devno, xfer_kind)
perfvar = 'bw_%s_%s_gpu_%s' % (xfer_kind, nodename, devno)
self.perf_patterns[perfvar] = sn.extractsingle(
self._xfer_pattern(xfer_kind, devno, nodename),
self.stdout, 1, float, 0
)
partname = self.current_partition.fullname
refkey = '%s:%s' % (partname, perfvar)
bwkey = '%s:%s' % (partname, xfer_kind)
self.reference[refkey] = self.__bwref[bwkey]
return True
| [((12, 1, 12, 36), 'reframe.required_version', 'rfm.required_version', ({(12, 22, 12, 35): '""">=2.16-dev0"""'}, {}), "('>=2.16-dev0')", True, 'import reframe as rfm\n'), ((23, 26, 25, 9), 'os.path.join', 'os.path.join', ({(24, 12, 24, 44): 'self.current_system.resourcesdir', (24, 46, 24, 52): '"""CUDA"""', (24, 54, 24, 66): '"""essentials"""'}, {}), "(self.current_system.resourcesdir, 'CUDA', 'essentials')", False, 'import os\n'), ((105, 28, 108, 9), 'reframe.utility.sanity.extractall', 'sn.extractall', ({(106, 12, 106, 75): "'^\\\\s*([^,]*),\\\\s*Detected devices: %s' % self.num_gpus_per_node", (107, 12, 107, 23): 'self.stdout', (107, 25, 107, 26): '1'}, {}), "('^\\\\s*([^,]*),\\\\s*Detected devices: %s' % self.\n num_gpus_per_node, self.stdout, 1)", True, 'import reframe.utility.sanity as sn\n'), ((115, 25, 118, 9), 'reframe.utility.sanity.extractall', 'sn.extractall', ({(116, 12, 116, 59): '"""^\\\\s*([^,]*),\\\\s*NID\\\\s*=\\\\s*\\\\S+\\\\s+Result = PASS"""', (117, 12, 117, 23): 'self.stdout', (117, 25, 117, 26): '1'}, {}), "('^\\\\s*([^,]*),\\\\s*NID\\\\s*=\\\\s*\\\\S+\\\\s+Result = PASS', self.\n stdout, 1)", True, 'import reframe.utility.sanity as sn\n')] |
msgoff/sympy | sympy/series/tests/test_demidovich.py | 1e7daef7514902f5e89718fa957b7b36c6669a10 | from sympy import (
limit,
Symbol,
oo,
sqrt,
Rational,
log,
exp,
cos,
sin,
tan,
pi,
asin,
together,
root,
S,
)
# Numbers listed with the tests refer to problem numbers in the book
# "Anti-demidovich, problemas resueltos, Ed. URSS"
x = Symbol("x")
def test_leadterm():
assert (3 + 2 * x ** (log(3) / log(2) - 1)).leadterm(x) == (3, 0)
def root3(x):
return root(x, 3)
def root4(x):
return root(x, 4)
def test_Limits_simple_0():
assert limit((2 ** (x + 1) + 3 ** (x + 1)) / (2 ** x + 3 ** x), x, oo) == 3 # 175
def test_Limits_simple_1():
assert limit((x + 1) * (x + 2) * (x + 3) / x ** 3, x, oo) == 1 # 172
assert limit(sqrt(x + 1) - sqrt(x), x, oo) == 0 # 179
assert (
limit((2 * x - 3) * (3 * x + 5) * (4 * x - 6) / (3 * x ** 3 + x - 1), x, oo)
== 8
) # Primjer 1
assert limit(x / root3(x ** 3 + 10), x, oo) == 1 # Primjer 2
assert limit((x + 1) ** 2 / (x ** 2 + 1), x, oo) == 1 # 181
def test_Limits_simple_2():
assert limit(1000 * x / (x ** 2 - 1), x, oo) == 0 # 182
assert limit((x ** 2 - 5 * x + 1) / (3 * x + 7), x, oo) is oo # 183
assert limit((2 * x ** 2 - x + 3) / (x ** 3 - 8 * x + 5), x, oo) == 0 # 184
assert limit((2 * x ** 2 - 3 * x - 4) / sqrt(x ** 4 + 1), x, oo) == 2 # 186
assert limit((2 * x + 3) / (x + root3(x)), x, oo) == 2 # 187
assert limit(x ** 2 / (10 + x * sqrt(x)), x, oo) is oo # 188
assert limit(root3(x ** 2 + 1) / (x + 1), x, oo) == 0 # 189
assert limit(sqrt(x) / sqrt(x + sqrt(x + sqrt(x))), x, oo) == 1 # 190
def test_Limits_simple_3a():
a = Symbol("a")
# issue 3513
assert together(limit((x ** 2 - (a + 1) * x + a) / (x ** 3 - a ** 3), x, a)) == (
a - 1
) / (
3 * a ** 2
) # 196
def test_Limits_simple_3b():
h = Symbol("h")
assert limit(((x + h) ** 3 - x ** 3) / h, h, 0) == 3 * x ** 2 # 197
assert limit((1 / (1 - x) - 3 / (1 - x ** 3)), x, 1) == -1 # 198
assert (
limit((sqrt(1 + x) - 1) / (root3(1 + x) - 1), x, 0) == Rational(3) / 2
) # Primer 4
assert limit((sqrt(x) - 1) / (x - 1), x, 1) == Rational(1) / 2 # 199
assert limit((sqrt(x) - 8) / (root3(x) - 4), x, 64) == 3 # 200
assert limit((root3(x) - 1) / (root4(x) - 1), x, 1) == Rational(4) / 3 # 201
assert (
limit((root3(x ** 2) - 2 * root3(x) + 1) / (x - 1) ** 2, x, 1)
== Rational(1) / 9
) # 202
def test_Limits_simple_4a():
a = Symbol("a")
assert limit((sqrt(x) - sqrt(a)) / (x - a), x, a) == 1 / (2 * sqrt(a)) # Primer 5
assert limit((sqrt(x) - 1) / (root3(x) - 1), x, 1) == Rational(3, 2) # 205
assert limit((sqrt(1 + x) - sqrt(1 - x)) / x, x, 0) == 1 # 207
assert limit(sqrt(x ** 2 - 5 * x + 6) - x, x, oo) == Rational(-5, 2) # 213
def test_limits_simple_4aa():
assert limit(x * (sqrt(x ** 2 + 1) - x), x, oo) == Rational(1) / 2 # 214
def test_Limits_simple_4b():
# issue 3511
assert limit(x - root3(x ** 3 - 1), x, oo) == 0 # 215
def test_Limits_simple_4c():
assert limit(log(1 + exp(x)) / x, x, -oo) == 0 # 267a
assert limit(log(1 + exp(x)) / x, x, oo) == 1 # 267b
def test_bounded():
assert limit(sin(x) / x, x, oo) == 0 # 216b
assert limit(x * sin(1 / x), x, 0) == 0 # 227a
def test_f1a():
# issue 3508:
assert limit((sin(2 * x) / x) ** (1 + x), x, 0) == 2 # Primer 7
def test_f1a2():
# issue 3509:
assert limit(((x - 1) / (x + 1)) ** x, x, oo) == exp(-2) # Primer 9
def test_f1b():
m = Symbol("m")
n = Symbol("n")
h = Symbol("h")
a = Symbol("a")
assert limit(sin(x) / x, x, 2) == sin(2) / 2 # 216a
assert limit(sin(3 * x) / x, x, 0) == 3 # 217
assert limit(sin(5 * x) / sin(2 * x), x, 0) == Rational(5, 2) # 218
assert limit(sin(pi * x) / sin(3 * pi * x), x, 0) == Rational(1, 3) # 219
assert limit(x * sin(pi / x), x, oo) == pi # 220
assert limit((1 - cos(x)) / x ** 2, x, 0) == S.Half # 221
assert limit(x * sin(1 / x), x, oo) == 1 # 227b
assert limit((cos(m * x) - cos(n * x)) / x ** 2, x, 0) == (
(n ** 2 - m ** 2) / 2
) # 232
assert limit((tan(x) - sin(x)) / x ** 3, x, 0) == S.Half # 233
assert limit((x - sin(2 * x)) / (x + sin(3 * x)), x, 0) == -Rational(1, 4) # 237
assert limit((1 - sqrt(cos(x))) / x ** 2, x, 0) == Rational(1, 4) # 239
assert limit((sqrt(1 + sin(x)) - sqrt(1 - sin(x))) / x, x, 0) == 1 # 240
assert limit((1 + h / x) ** x, x, oo) == exp(h) # Primer 9
assert limit((sin(x) - sin(a)) / (x - a), x, a) == cos(a) # 222, *176
assert limit((cos(x) - cos(a)) / (x - a), x, a) == -sin(a) # 223
assert limit((sin(x + h) - sin(x)) / h, h, 0) == cos(x) # 225
def test_f2a():
assert limit(((x + 1) / (2 * x + 1)) ** (x ** 2), x, oo) == 0 # Primer 8
def test_f2():
assert limit((sqrt(cos(x)) - root3(cos(x))) / (sin(x) ** 2), x, 0) == -Rational(
1, 12
) # *184
def test_f3():
a = Symbol("a")
# issue 3504
assert limit(asin(a * x) / x, x, 0) == a
| [((22, 4, 22, 15), 'sympy.Symbol', 'Symbol', ({(22, 11, 22, 14): '"""x"""'}, {}), "('x')", False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((30, 11, 30, 21), 'sympy.root', 'root', ({(30, 16, 30, 17): 'x', (30, 19, 30, 20): '(3)'}, {}), '(x, 3)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((34, 11, 34, 21), 'sympy.root', 'root', ({(34, 16, 34, 17): 'x', (34, 19, 34, 20): '(4)'}, {}), '(x, 4)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((64, 8, 64, 19), 'sympy.Symbol', 'Symbol', ({(64, 15, 64, 18): '"""a"""'}, {}), "('a')", False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((74, 8, 74, 19), 'sympy.Symbol', 'Symbol', ({(74, 15, 74, 18): '"""h"""'}, {}), "('h')", False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((90, 8, 90, 19), 'sympy.Symbol', 'Symbol', ({(90, 15, 90, 18): '"""a"""'}, {}), "('a')", False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((127, 8, 127, 19), 'sympy.Symbol', 'Symbol', ({(127, 15, 127, 18): '"""m"""'}, {}), "('m')", False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((128, 8, 128, 19), 'sympy.Symbol', 'Symbol', ({(128, 15, 128, 18): '"""n"""'}, {}), "('n')", False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((129, 8, 129, 19), 'sympy.Symbol', 'Symbol', ({(129, 15, 129, 18): '"""h"""'}, {}), "('h')", False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((130, 8, 130, 19), 'sympy.Symbol', 'Symbol', ({(130, 15, 130, 18): '"""a"""'}, {}), "('a')", False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((163, 8, 163, 19), 'sympy.Symbol', 'Symbol', ({(163, 15, 163, 18): '"""a"""'}, {}), "('a')", False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((38, 11, 38, 74), 'sympy.limit', 'limit', ({(38, 17, 38, 66): '((2 ** (x + 1) + 3 ** (x + 1)) / (2 ** x + 3 ** x))', (38, 68, 38, 69): 'x', (38, 71, 38, 73): 'oo'}, {}), '((2 ** (x + 1) + 3 ** (x + 1)) / (2 ** x + 3 ** x), x, oo)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((42, 11, 42, 61), 'sympy.limit', 'limit', ({(42, 17, 42, 53): '((x + 1) * (x + 2) * (x + 3) / x ** 3)', (42, 55, 42, 56): 'x', (42, 58, 42, 60): 'oo'}, {}), '((x + 1) * (x + 2) * (x + 3) / x ** 3, x, oo)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((45, 8, 45, 84), 'sympy.limit', 'limit', ({(45, 14, 45, 76): '((2 * x - 3) * (3 * x + 5) * (4 * x - 6) / (3 * x ** 3 + x - 1))', (45, 78, 45, 79): 'x', (45, 81, 45, 83): 'oo'}, {}), '((2 * x - 3) * (3 * x + 5) * (4 * x - 6) / (3 * x ** 3 + x - 1), x, oo)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((49, 11, 49, 52), 'sympy.limit', 'limit', ({(49, 17, 49, 44): '((x + 1) ** 2 / (x ** 2 + 1))', (49, 46, 49, 47): 'x', (49, 49, 49, 51): 'oo'}, {}), '((x + 1) ** 2 / (x ** 2 + 1), x, oo)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((53, 11, 53, 48), 'sympy.limit', 'limit', ({(53, 17, 53, 40): '(1000 * x / (x ** 2 - 1))', (53, 42, 53, 43): 'x', (53, 45, 53, 47): 'oo'}, {}), '(1000 * x / (x ** 2 - 1), x, oo)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((54, 11, 54, 59), 'sympy.limit', 'limit', ({(54, 17, 54, 51): '((x ** 2 - 5 * x + 1) / (3 * x + 7))', (54, 53, 54, 54): 'x', (54, 56, 54, 58): 'oo'}, {}), '((x ** 2 - 5 * x + 1) / (3 * x + 7), x, oo)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((55, 11, 55, 68), 'sympy.limit', 'limit', ({(55, 17, 55, 60): '((2 * x ** 2 - x + 3) / (x ** 3 - 8 * x + 5))', (55, 62, 55, 63): 'x', (55, 65, 55, 67): 'oo'}, {}), '((2 * x ** 2 - x + 3) / (x ** 3 - 8 * x + 5), x, oo)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((75, 11, 75, 51), 'sympy.limit', 'limit', ({(75, 17, 75, 44): '(((x + h) ** 3 - x ** 3) / h)', (75, 46, 75, 47): 'h', (75, 49, 75, 50): '(0)'}, {}), '(((x + h) ** 3 - x ** 3) / h, h, 0)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((76, 11, 76, 56), 'sympy.limit', 'limit', ({(76, 18, 76, 48): '(1 / (1 - x) - 3 / (1 - x ** 3))', (76, 51, 76, 52): 'x', (76, 54, 76, 55): '(1)'}, {}), '(1 / (1 - x) - 3 / (1 - x ** 3), x, 1)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((92, 58, 92, 72), 'sympy.Rational', 'Rational', ({(92, 67, 92, 68): '(3)', (92, 70, 92, 71): '(2)'}, {}), '(3, 2)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((94, 57, 94, 72), 'sympy.Rational', 'Rational', ({(94, 66, 94, 68): '(-5)', (94, 70, 94, 71): '(2)'}, {}), '(-5, 2)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((123, 11, 123, 49), 'sympy.limit', 'limit', ({(123, 17, 123, 41): '(((x - 1) / (x + 1)) ** x)', (123, 43, 123, 44): 'x', (123, 46, 123, 48): 'oo'}, {}), '(((x - 1) / (x + 1)) ** x, x, oo)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((123, 53, 123, 60), 'sympy.exp', 'exp', ({(123, 57, 123, 59): '(-2)'}, {}), '(-2)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((133, 51, 133, 65), 'sympy.Rational', 'Rational', ({(133, 60, 133, 61): '(5)', (133, 63, 133, 64): '(2)'}, {}), '(5, 2)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((134, 57, 134, 71), 'sympy.Rational', 'Rational', ({(134, 66, 134, 67): '(1)', (134, 69, 134, 70): '(3)'}, {}), '(1, 3)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((143, 55, 143, 69), 'sympy.Rational', 'Rational', ({(143, 64, 143, 65): '(1)', (143, 67, 143, 68): '(4)'}, {}), '(1, 4)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((146, 11, 146, 41), 'sympy.limit', 'limit', ({(146, 17, 146, 33): '((1 + h / x) ** x)', (146, 35, 146, 36): 'x', (146, 38, 146, 40): 'oo'}, {}), '((1 + h / x) ** x, x, oo)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((146, 45, 146, 51), 'sympy.exp', 'exp', ({(146, 49, 146, 50): 'h'}, {}), '(h)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((147, 55, 147, 61), 'sympy.cos', 'cos', ({(147, 59, 147, 60): 'a'}, {}), '(a)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((149, 53, 149, 59), 'sympy.cos', 'cos', ({(149, 57, 149, 58): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((153, 11, 153, 60), 'sympy.limit', 'limit', ({(153, 17, 153, 52): '(((x + 1) / (2 * x + 1)) ** x ** 2)', (153, 54, 153, 55): 'x', (153, 57, 153, 59): 'oo'}, {}), '(((x + 1) / (2 * x + 1)) ** x ** 2, x, oo)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((66, 20, 66, 79), 'sympy.limit', 'limit', ({(66, 26, 66, 72): '((x ** 2 - (a + 1) * x + a) / (x ** 3 - a ** 3))', (66, 74, 66, 75): 'x', (66, 77, 66, 78): 'a'}, {}), '((x ** 2 - (a + 1) * x + a) / (x ** 3 - a ** 3), x, a)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((78, 63, 78, 74), 'sympy.Rational', 'Rational', ({(78, 72, 78, 73): '(3)'}, {}), '(3)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((80, 51, 80, 62), 'sympy.Rational', 'Rational', ({(80, 60, 80, 61): '(1)'}, {}), '(1)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((82, 59, 82, 70), 'sympy.Rational', 'Rational', ({(82, 68, 82, 69): '(4)'}, {}), '(4)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((85, 11, 85, 22), 'sympy.Rational', 'Rational', ({(85, 20, 85, 21): '(1)'}, {}), '(1)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((98, 55, 98, 66), 'sympy.Rational', 'Rational', ({(98, 64, 98, 65): '(1)'}, {}), '(1)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((131, 38, 131, 44), 'sympy.sin', 'sin', ({(131, 42, 131, 43): '(2)'}, {}), '(2)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((142, 64, 142, 78), 'sympy.Rational', 'Rational', ({(142, 73, 142, 74): '(1)', (142, 76, 142, 77): '(4)'}, {}), '(1, 4)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((148, 56, 148, 62), 'sympy.sin', 'sin', ({(148, 60, 148, 61): 'a'}, {}), '(a)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((157, 75, 159, 5), 'sympy.Rational', 'Rational', ({(158, 8, 158, 9): '(1)', (158, 11, 158, 13): '(12)'}, {}), '(1, 12)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((43, 17, 43, 28), 'sympy.sqrt', 'sqrt', ({(43, 22, 43, 27): '(x + 1)'}, {}), '(x + 1)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((43, 31, 43, 38), 'sympy.sqrt', 'sqrt', ({(43, 36, 43, 37): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((56, 44, 56, 60), 'sympy.sqrt', 'sqrt', ({(56, 49, 56, 59): '(x ** 4 + 1)'}, {}), '(x ** 4 + 1)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((60, 17, 60, 24), 'sympy.sqrt', 'sqrt', ({(60, 22, 60, 23): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((91, 66, 91, 73), 'sympy.sqrt', 'sqrt', ({(91, 71, 91, 72): 'a'}, {}), '(a)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((94, 17, 94, 41), 'sympy.sqrt', 'sqrt', ({(94, 22, 94, 40): '(x ** 2 - 5 * x + 6)'}, {}), '(x ** 2 - 5 * x + 6)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((112, 17, 112, 23), 'sympy.sin', 'sin', ({(112, 21, 112, 22): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((113, 21, 113, 31), 'sympy.sin', 'sin', ({(113, 25, 113, 30): '(1 / x)'}, {}), '(1 / x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((131, 17, 131, 23), 'sympy.sin', 'sin', ({(131, 21, 131, 22): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((132, 17, 132, 27), 'sympy.sin', 'sin', ({(132, 21, 132, 26): '(3 * x)'}, {}), '(3 * x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((133, 17, 133, 27), 'sympy.sin', 'sin', ({(133, 21, 133, 26): '(5 * x)'}, {}), '(5 * x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((133, 30, 133, 40), 'sympy.sin', 'sin', ({(133, 34, 133, 39): '(2 * x)'}, {}), '(2 * x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((134, 17, 134, 28), 'sympy.sin', 'sin', ({(134, 21, 134, 27): '(pi * x)'}, {}), '(pi * x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((134, 31, 134, 46), 'sympy.sin', 'sin', ({(134, 35, 134, 45): '(3 * pi * x)'}, {}), '(3 * pi * x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((135, 21, 135, 32), 'sympy.sin', 'sin', ({(135, 25, 135, 31): '(pi / x)'}, {}), '(pi / x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((137, 21, 137, 31), 'sympy.sin', 'sin', ({(137, 25, 137, 30): '(1 / x)'}, {}), '(1 / x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((165, 17, 165, 28), 'sympy.asin', 'asin', ({(165, 22, 165, 27): '(a * x)'}, {}), '(a * x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((78, 15, 78, 26), 'sympy.sqrt', 'sqrt', ({(78, 20, 78, 25): '(1 + x)'}, {}), '(1 + x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((80, 18, 80, 25), 'sympy.sqrt', 'sqrt', ({(80, 23, 80, 24): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((81, 18, 81, 25), 'sympy.sqrt', 'sqrt', ({(81, 23, 81, 24): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((91, 18, 91, 25), 'sympy.sqrt', 'sqrt', ({(91, 23, 91, 24): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((91, 28, 91, 35), 'sympy.sqrt', 'sqrt', ({(91, 33, 91, 34): 'a'}, {}), '(a)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((92, 18, 92, 25), 'sympy.sqrt', 'sqrt', ({(92, 23, 92, 24): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((93, 18, 93, 29), 'sympy.sqrt', 'sqrt', ({(93, 23, 93, 28): '(1 + x)'}, {}), '(1 + x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((93, 32, 93, 43), 'sympy.sqrt', 'sqrt', ({(93, 37, 93, 42): '(1 - x)'}, {}), '(1 - x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((98, 22, 98, 38), 'sympy.sqrt', 'sqrt', ({(98, 27, 98, 37): '(x ** 2 + 1)'}, {}), '(x ** 2 + 1)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((118, 18, 118, 28), 'sympy.sin', 'sin', ({(118, 22, 118, 27): '(2 * x)'}, {}), '(2 * x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((136, 22, 136, 28), 'sympy.cos', 'cos', ({(136, 26, 136, 27): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((138, 18, 138, 28), 'sympy.cos', 'cos', ({(138, 22, 138, 27): '(m * x)'}, {}), '(m * x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((138, 31, 138, 41), 'sympy.cos', 'cos', ({(138, 35, 138, 40): '(n * x)'}, {}), '(n * x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((141, 18, 141, 24), 'sympy.tan', 'tan', ({(141, 22, 141, 23): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((141, 27, 141, 33), 'sympy.sin', 'sin', ({(141, 31, 141, 32): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((142, 22, 142, 32), 'sympy.sin', 'sin', ({(142, 26, 142, 31): '(2 * x)'}, {}), '(2 * x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((142, 41, 142, 51), 'sympy.sin', 'sin', ({(142, 45, 142, 50): '(3 * x)'}, {}), '(3 * x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((147, 18, 147, 24), 'sympy.sin', 'sin', ({(147, 22, 147, 23): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((147, 27, 147, 33), 'sympy.sin', 'sin', ({(147, 31, 147, 32): 'a'}, {}), '(a)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((148, 18, 148, 24), 'sympy.cos', 'cos', ({(148, 22, 148, 23): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((148, 27, 148, 33), 'sympy.cos', 'cos', ({(148, 31, 148, 32): 'a'}, {}), '(a)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((149, 18, 149, 28), 'sympy.sin', 'sin', ({(149, 22, 149, 27): '(x + h)'}, {}), '(x + h)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((149, 31, 149, 37), 'sympy.sin', 'sin', ({(149, 35, 149, 36): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((157, 51, 157, 57), 'sympy.sin', 'sin', ({(157, 55, 157, 56): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((58, 36, 58, 43), 'sympy.sqrt', 'sqrt', ({(58, 41, 58, 42): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((107, 25, 107, 31), 'sympy.exp', 'exp', ({(107, 29, 107, 30): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((108, 25, 108, 31), 'sympy.exp', 'exp', ({(108, 29, 108, 30): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((143, 27, 143, 33), 'sympy.cos', 'cos', ({(143, 31, 143, 32): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((157, 23, 157, 29), 'sympy.cos', 'cos', ({(157, 27, 157, 28): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((157, 39, 157, 45), 'sympy.cos', 'cos', ({(157, 43, 157, 44): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((144, 27, 144, 33), 'sympy.sin', 'sin', ({(144, 31, 144, 32): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((144, 46, 144, 52), 'sympy.sin', 'sin', ({(144, 50, 144, 51): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((60, 45, 60, 52), 'sympy.sqrt', 'sqrt', ({(60, 50, 60, 51): 'x'}, {}), '(x)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((26, 26, 26, 32), 'sympy.log', 'log', ({(26, 30, 26, 31): '(3)'}, {}), '(3)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n'), ((26, 35, 26, 41), 'sympy.log', 'log', ({(26, 39, 26, 40): '(2)'}, {}), '(2)', False, 'from sympy import limit, Symbol, oo, sqrt, Rational, log, exp, cos, sin, tan, pi, asin, together, root, S\n')] |
jfhbrook/notion-tools | notion/ctx.py | dd7c7fb07f98deaf5bba236aa5b4ea3d09ff0f3d | from notion.client import NotionClient
from notion.settings import Settings
class Context:
def __init__(self):
self.settings = Settings.from_file()
self._client = None
def get_client(self):
if not self._client:
self.settings.validate()
self._client = NotionClient(token_v2=self.settings.token, monitor=False)
return self._client
def update_settings(self, **kwargs):
self.settings = self.settings.update(**kwargs)
| [((7, 24, 7, 44), 'notion.settings.Settings.from_file', 'Settings.from_file', ({}, {}), '()', False, 'from notion.settings import Settings\n'), ((13, 27, 13, 84), 'notion.client.NotionClient', 'NotionClient', (), '', False, 'from notion.client import NotionClient\n')] |
rgooler/bootstrap-pip | setup.py | 34eaa648c81e3f8213b97cd33bda23b50743122a | #!/usr/bin/env python
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
install_requires = []
# install_requires = ['requests >= 2.1.0']
# For SNI support in Python 2, must install the following packages
# if sys.version_info[0] == 2:
# install_requires.append('pyOpenSSL >= 0.14')
# install_requires.append('ndg-httpsclient >= 0.3.3')
# install_requires.append('pyasn1 >= 0.1.7')
setup(
name='mymodule',
packages=['mymodule'],
version='0.1',
description='Desc',
long_description=(read('README.rst') + '\n\n' +
read('HISTORY.rst') + '\n\n' +
read('AUTHORS.rst')),
url='http://github.com/rgooler/bootstrap-pip/',
license='MIT',
author='Ryan Gooler',
author_email='[email protected]',
py_modules=['mymodule'],
install_requires=install_requires,
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| [((11, 14, 11, 34), 'os.path.join', 'os.path.join', ({(11, 27, 11, 33): '*paths'}, {}), '(*paths)', False, 'import os\n')] |
carefree0910/carefree-data | cfdata/tabular/converters/__init__.py | ae0f4ea5724b4efd5d76f2a9d420acf3322c1d19 | from .base import *
from .string import *
from .categorical import *
from .numerical import *
__all__ = ["Converter", "converter_dict"]
| [] |
BronWang/first_github | hello_world.py | 9cdd40458014a448a5121268ebca907e3cba1eee | def hello_world():
"""打印Hello world"""
message = 'hello world'
print(message.title())
hello_world()
| [] |
plasroom46/DesignPattern.Sample | Python/Samples/Observer/UtObserver.py | 86c05c5ae356cb01f3d075f248c45da3e6534d07 | import unittest
from Observers import Observer, ObserverMailServer, ObserverPbx
from Subjects import Subject, SubjectEflow
class UtVisitor(unittest.TestCase):
def test_observer(self):
# Create observers
pbx = ObserverPbx()
ms = ObserverMailServer()
# Create subject
subject = SubjectEflow()
subject.attach(pbx)
subject.attach(ms)
# Notify when JB is leave of absence
subject.notify("JB", "Hachi")
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
| [((26, 4, 26, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((11, 14, 11, 27), 'Observers.ObserverPbx', 'ObserverPbx', ({}, {}), '()', False, 'from Observers import Observer, ObserverMailServer, ObserverPbx\n'), ((12, 13, 12, 33), 'Observers.ObserverMailServer', 'ObserverMailServer', ({}, {}), '()', False, 'from Observers import Observer, ObserverMailServer, ObserverPbx\n'), ((15, 18, 15, 32), 'Subjects.SubjectEflow', 'SubjectEflow', ({}, {}), '()', False, 'from Subjects import Subject, SubjectEflow\n')] |
Relintai/pandemonium_engine | modules/voxelman/config.py | 3de05db75a396b497f145411f71eb363572b38ae |
def can_build(env, platform):
return True
def configure(env):
pass
def get_doc_classes():
return [
"WorldArea",
"VoxelLight",
"VoxelLightNode",
"VoxelLevelGenerator",
"VoxelLevelGeneratorFlat",
"VoxelSurfaceMerger",
"VoxelSurfaceSimple",
"VoxelSurface",
"VoxelLibraryMerger",
"VoxelLibrarySimple",
"VoxelLibrary",
"VoxelLibraryMergerPCM",
"VoxelMaterialCache",
"VoxelMaterialCachePCM",
"VoxelCubePoints",
"VoxelMesherCubic",
"VoxelMeshData",
"MarchingCubesCellData",
"VoxelMesherMarchingCubes",
"VoxelMesher",
"EnvironmentData",
"VoxelChunk",
"VoxelChunkDefault",
"VoxelStructure",
"BlockVoxelStructure",
"VoxelWorld",
"VoxelMesherBlocky",
"VoxelWorldBlocky",
"VoxelChunkBlocky",
"VoxelMesherLiquidBlocky",
"VoxelWorldMarchingCubes",
"VoxelChunkMarchingCubes",
"VoxelMesherCubic",
"VoxelWorldCubic",
"VoxelChunkCubic",
"VoxelMesherDefault",
"VoxelWorldDefault",
"VoxelJob",
"VoxelTerrainJob",
"VoxelLightJob",
"VoxelPropJob",
"VoxelMesherJobStep",
]
def get_doc_path():
return "doc_classes"
| [] |
JonoRicci/Advent-Of-Code | Python/2021/day_04/day_04.py | 1c092410d6ece195f4689788af4b1091acf10fbb | """
Day 04
"""
from logger import logger
def main() -> None:
"""
Import the puzzle input, process and display the results.
"""
puzzle_input = import_list()
logger.debug(puzzle_input)
final_score = play_bingo(puzzle_input)
for result in final_score:
logger.info(f"The final score is: {result}.")
def import_list() -> list:
"""
Import the puzzle input and return a list.
:return: Puzzle input text file as list
:rtype: list
"""
file = open("puzzle-input", "r")
string_list = file.read().splitlines()
file.close()
return string_list
def play_bingo(bingo_cards: list) -> list:
"""
Extract winning numbers, bingo boards from input.
Make a separate 2D list tracking wins.
For each winning number, check every board row and column for a match.
Add matches to the 2D list tracking wins.
Once done, check 2D list for winning columns / rows.
Add winning boards to new list along with winning number.
Multiply to get score.
:param bingo_cards: puzzle input where each line is a string
:return: First and last winning board score
:rtype: list
"""
winning_numbers = [int(x) for x in bingo_cards[0].split(",")]
logger.debug(f" Winning numbers: {winning_numbers}")
single_board = []
all_boards = []
final_score_list = []
# Get Bingo Boards
for line in range(len(bingo_cards)):
if "," not in bingo_cards[line]:
row = [int(x) for x in bingo_cards[line].split()]
if row:
logger.debug(row)
single_board.append(row)
elif single_board:
all_boards.append(single_board)
single_board = []
# Set up separate 2D list tracking matches to winning numbers.
unmarked_tracker = []
for board in all_boards:
assert len(board) == 5 and len(board[0]) == 5
unmarked_tracker.append([[False for _ in range(5)] for _ in range(5)])
# Set up list to track winning boards.
winning_board = [False for _ in range(len(all_boards))]
for number in winning_numbers:
for index, board in enumerate(all_boards):
logger.debug(f"Checking board: {index} for {number}")
# Check for winning numbers.
for row in range(5):
for column in range(5):
if board[row][column] == number:
logger.debug(f"{unmarked_tracker[index][row][column]} "
f"is True.")
unmarked_tracker[index][row][column] = True
# Check for 5 in a row.
won = False
for row in range(5):
ok = True
for column in range(5):
if not unmarked_tracker[index][row][column]:
ok = False
if ok:
won = True
# Check for 5 in a column.
for column in range(5):
ok = True
for row in range(5):
if not unmarked_tracker[index][row][column]:
ok = False
if ok:
won = True
# Check for each winning board.
if won and not winning_board[index]:
winning_board[index] = True
winning_boards_count = len([j for j in range(len(all_boards))
if winning_board[j]])
# If first or last board.
if winning_boards_count == 1 or winning_boards_count == \
len(all_boards):
# Calculate all unmarked.
unmarked = 0
for row in range(5):
for column in range(5):
if not unmarked_tracker[index][row][column]:
unmarked += board[row][column]
final_score_list.append(unmarked * number)
logger.debug(f"The final score is: {final_score_list[-1]}, "
f"which is {unmarked} * {number}.")
return final_score_list
if __name__ == "__main__":
main()
| [((14, 4, 14, 30), 'logger.logger.debug', 'logger.debug', ({(14, 17, 14, 29): 'puzzle_input'}, {}), '(puzzle_input)', False, 'from logger import logger\n'), ((49, 4, 49, 56), 'logger.logger.debug', 'logger.debug', ({(49, 17, 49, 55): 'f""" Winning numbers: {winning_numbers}"""'}, {}), "(f' Winning numbers: {winning_numbers}')", False, 'from logger import logger\n'), ((17, 8, 17, 53), 'logger.logger.info', 'logger.info', ({(17, 20, 17, 52): 'f"""The final score is: {result}."""'}, {}), "(f'The final score is: {result}.')", False, 'from logger import logger\n'), ((77, 12, 77, 65), 'logger.logger.debug', 'logger.debug', ({(77, 25, 77, 64): 'f"""Checking board: {index} for {number}"""'}, {}), "(f'Checking board: {index} for {number}')", False, 'from logger import logger\n'), ((60, 16, 60, 33), 'logger.logger.debug', 'logger.debug', ({(60, 29, 60, 32): 'row'}, {}), '(row)', False, 'from logger import logger\n'), ((123, 20, 124, 68), 'logger.logger.debug', 'logger.debug', ({(123, 33, 124, 67): 'f"""The final score is: {final_score_list[-1]}, which is {unmarked} * {number}."""'}, {}), "(\n f'The final score is: {final_score_list[-1]}, which is {unmarked} * {number}.'\n )", False, 'from logger import logger\n'), ((83, 24, 84, 49), 'logger.logger.debug', 'logger.debug', ({(83, 37, 84, 48): 'f"""{unmarked_tracker[index][row][column]} is True."""'}, {}), "(f'{unmarked_tracker[index][row][column]} is True.')", False, 'from logger import logger\n')] |
HPI-Information-Systems/TimeEval | timeeval_experiments/algorithms/eif.py | 9b2717b89decd57dd09e04ad94c120f13132d7b8 | from durations import Duration
from typing import Any, Dict, Optional
from timeeval import Algorithm, TrainingType, InputDimensionality
from timeeval.adapters import DockerAdapter
from timeeval.params import ParameterConfig
_eif_parameters: Dict[str, Dict[str, Any]] = {
"extension_level": {
"defaultValue": None,
"description": "Extension level 0 resembles standard isolation forest. If unspecified (`None`), then `extension_level=X.shape[1] - 1`.",
"name": "extension_level",
"type": "int"
},
"limit": {
"defaultValue": None,
"description": "The maximum allowed tree depth. This is by default set to average length of unsucessful search in a binary tree.",
"name": "limit",
"type": "int"
},
"max_samples": {
"defaultValue": None,
"description": "The number of samples to draw from X to train each base estimator: `max_samples * X.shape[0]`. If unspecified (`None`), then `max_samples=min(256, X.shape[0])`.",
"name": "max_samples",
"type": "float"
},
"n_trees": {
"defaultValue": 200,
"description": "The number of decision trees (base estimators) in the forest (ensemble).",
"name": "n_trees",
"type": "int"
},
"random_state": {
"defaultValue": 42,
"description": "Seed for random number generation.",
"name": "random_state",
"type": "int"
}
}
def eif(params: ParameterConfig = None, skip_pull: bool = False, timeout: Optional[Duration] = None) -> Algorithm:
return Algorithm(
name="Extended Isolation Forest (EIF)",
main=DockerAdapter(
image_name="registry.gitlab.hpi.de/akita/i/eif",
skip_pull=skip_pull,
timeout=timeout,
group_privileges="akita",
),
preprocess=None,
postprocess=None,
param_schema=_eif_parameters,
param_config=params or ParameterConfig.defaults(),
data_as_file=True,
training_type=TrainingType.UNSUPERVISED,
input_dimensionality=InputDimensionality("multivariate")
)
| [((46, 13, 51, 9), 'timeeval.adapters.DockerAdapter', 'DockerAdapter', (), '', False, 'from timeeval.adapters import DockerAdapter\n'), ((58, 29, 58, 64), 'timeeval.InputDimensionality', 'InputDimensionality', ({(58, 49, 58, 63): '"""multivariate"""'}, {}), "('multivariate')", False, 'from timeeval import Algorithm, TrainingType, InputDimensionality\n'), ((55, 31, 55, 57), 'timeeval.params.ParameterConfig.defaults', 'ParameterConfig.defaults', ({}, {}), '()', False, 'from timeeval.params import ParameterConfig\n')] |
KEHANG/deepchem | deepchem/models/tf_new_models/graph_models.py | 367bea14cab47b1093bf106e0c196bb02d55c755 | """
Convenience classes for assembling graph models.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Han Altae-Tran and Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import tensorflow as tf
from deepchem.nn.layers import GraphGather
from deepchem.models.tf_new_models.graph_topology import GraphTopology
class SequentialGraph(object):
"""An analog of Keras Sequential class for Graph data.
Like the Sequential class from Keras, but automatically passes topology
placeholders from GraphTopology to each graph layer (from layers) added
to the network. Non graph layers don't get the extra placeholders.
"""
def __init__(self, n_feat):
"""
Parameters
----------
n_feat: int
Number of features per atom.
"""
self.graph = tf.Graph()
with self.graph.as_default():
self.graph_topology = GraphTopology(n_feat)
self.output = self.graph_topology.get_atom_features_placeholder()
# Keep track of the layers
self.layers = []
def add(self, layer):
"""Adds a new layer to model."""
with self.graph.as_default():
############################################# DEBUG
#print("start - add()")
#print("self.output")
#print(self.output)
############################################# DEBUG
# For graphical layers, add connectivity placeholders
if type(layer).__name__ in ['GraphConv', 'GraphGather', 'GraphPool']:
if (len(self.layers) > 0 and hasattr(self.layers[-1], "__name__")):
assert self.layers[-1].__name__ != "GraphGather", \
'Cannot use GraphConv or GraphGather layers after a GraphGather'
self.output = layer([self.output] +
self.graph_topology.get_topology_placeholders())
else:
self.output = layer(self.output)
############################################# DEBUG
#print("end- add()")
#print("self.output")
#print(self.output)
############################################# DEBUG
# Add layer to the layer list
self.layers.append(layer)
def get_graph_topology(self):
return self.graph_topology
def get_num_output_features(self):
"""Gets the output shape of the featurization layers of the network"""
return self.layers[-1].output_shape[1]
def return_outputs(self):
return self.output
def return_inputs(self):
return self.graph_topology.get_input_placeholders()
def get_layer(self, layer_id):
return self.layers[layer_id]
class SequentialSupportGraph(object):
"""An analog of Keras Sequential model for test/support models."""
def __init__(self, n_feat):
"""
Parameters
----------
n_feat: int
Number of atomic features.
"""
self.graph = tf.Graph()
with self.graph.as_default():
# Create graph topology and x
self.test_graph_topology = GraphTopology(n_feat, name='test')
self.support_graph_topology = GraphTopology(n_feat, name='support')
self.test = self.test_graph_topology.get_atom_features_placeholder()
self.support = self.support_graph_topology.get_atom_features_placeholder()
# Keep track of the layers
self.layers = []
# Whether or not we have used the GraphGather layer yet
self.bool_pre_gather = True
def add(self, layer):
"""Adds a layer to both test/support stacks.
Note that the layer transformation is performed independently on the
test/support tensors.
"""
with self.graph.as_default():
self.layers.append(layer)
# Update new value of x
if type(layer).__name__ in ['GraphConv', 'GraphGather', 'GraphPool']:
assert self.bool_pre_gather, "Cannot apply graphical layers after gather."
self.test = layer([self.test] + self.test_graph_topology.topology)
self.support = layer([self.support] +
self.support_graph_topology.topology)
else:
self.test = layer(self.test)
self.support = layer(self.support)
if type(layer).__name__ == 'GraphGather':
self.bool_pre_gather = False # Set flag to stop adding topology
def add_test(self, layer):
"""Adds a layer to test."""
with self.graph.as_default():
self.layers.append(layer)
# Update new value of x
if type(layer).__name__ in ['GraphConv', 'GraphPool', 'GraphGather']:
self.test = layer([self.test] + self.test_graph_topology.topology)
else:
self.test = layer(self.test)
def add_support(self, layer):
"""Adds a layer to support."""
with self.graph.as_default():
self.layers.append(layer)
# Update new value of x
if type(layer).__name__ in ['GraphConv', 'GraphPool', 'GraphGather']:
self.support = layer([self.support] +
self.support_graph_topology.topology)
else:
self.support = layer(self.support)
def join(self, layer):
"""Joins test and support to a two input two output layer"""
with self.graph.as_default():
self.layers.append(layer)
self.test, self.support = layer([self.test, self.support])
def get_test_output(self):
return self.test
def get_support_output(self):
return self.support
def return_outputs(self):
return [self.test] + [self.support]
def return_inputs(self):
return (self.test_graph_topology.get_inputs() +
self.support_graph_topology.get_inputs())
| [((32, 17, 32, 27), 'tensorflow.Graph', 'tf.Graph', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((93, 17, 93, 27), 'tensorflow.Graph', 'tf.Graph', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((34, 28, 34, 49), 'deepchem.models.tf_new_models.graph_topology.GraphTopology', 'GraphTopology', ({(34, 42, 34, 48): 'n_feat'}, {}), '(n_feat)', False, 'from deepchem.models.tf_new_models.graph_topology import GraphTopology\n'), ((96, 33, 96, 67), 'deepchem.models.tf_new_models.graph_topology.GraphTopology', 'GraphTopology', (), '', False, 'from deepchem.models.tf_new_models.graph_topology import GraphTopology\n'), ((97, 36, 97, 73), 'deepchem.models.tf_new_models.graph_topology.GraphTopology', 'GraphTopology', (), '', False, 'from deepchem.models.tf_new_models.graph_topology import GraphTopology\n')] |
IIKovalenko/python-sdk | yandex/cloud/access/access_pb2.py | 980e2c5d848eadb42799132b35a9f58ab7b27157 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: yandex/cloud/access/access.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from yandex.cloud import validation_pb2 as yandex_dot_cloud_dot_validation__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='yandex/cloud/access/access.proto',
package='yandex.cloud.access',
syntax='proto3',
serialized_options=_b('Z>github.com/yandex-cloud/go-genproto/yandex/cloud/access;access'),
serialized_pb=_b('\n yandex/cloud/access/access.proto\x12\x13yandex.cloud.access\x1a\x1dyandex/cloud/validation.proto\"-\n\x07Subject\x12\x14\n\x02id\x18\x01 \x01(\tB\x08\x8a\xc8\x31\x04<=50\x12\x0c\n\x04type\x18\x02 \x01(\t\"_\n\rAccessBinding\x12\x19\n\x07role_id\x18\x01 \x01(\tB\x08\x8a\xc8\x31\x04<=50\x12\x33\n\x07subject\x18\x02 \x01(\x0b\x32\x1c.yandex.cloud.access.SubjectB\x04\xe8\xc7\x31\x01\"|\n\x19ListAccessBindingsRequest\x12!\n\x0bresource_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12\x1d\n\tpage_size\x18\x02 \x01(\x03\x42\n\xfa\xc7\x31\x06<=1000\x12\x1d\n\npage_token\x18\x03 \x01(\tB\t\x8a\xc8\x31\x05<=100\"r\n\x1aListAccessBindingsResponse\x12;\n\x0f\x61\x63\x63\x65ss_bindings\x18\x01 \x03(\x0b\x32\".yandex.cloud.access.AccessBinding\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"\x80\x01\n\x18SetAccessBindingsRequest\x12!\n\x0bresource_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12\x41\n\x0f\x61\x63\x63\x65ss_bindings\x18\x02 \x03(\x0b\x32\".yandex.cloud.access.AccessBindingB\x04\xe8\xc7\x31\x01\"0\n\x19SetAccessBindingsMetadata\x12\x13\n\x0bresource_id\x18\x01 \x01(\t\"\x8e\x01\n\x1bUpdateAccessBindingsRequest\x12!\n\x0bresource_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12L\n\x15\x61\x63\x63\x65ss_binding_deltas\x18\x02 \x03(\x0b\x32\'.yandex.cloud.access.AccessBindingDeltaB\x04\xe8\xc7\x31\x01\"3\n\x1cUpdateAccessBindingsMetadata\x12\x13\n\x0bresource_id\x18\x01 \x01(\t\"\x96\x01\n\x12\x41\x63\x63\x65ssBindingDelta\x12>\n\x06\x61\x63tion\x18\x01 \x01(\x0e\x32(.yandex.cloud.access.AccessBindingActionB\x04\xe8\xc7\x31\x01\x12@\n\x0e\x61\x63\x63\x65ss_binding\x18\x02 \x01(\x0b\x32\".yandex.cloud.access.AccessBindingB\x04\xe8\xc7\x31\x01*Q\n\x13\x41\x63\x63\x65ssBindingAction\x12%\n!ACCESS_BINDING_ACTION_UNSPECIFIED\x10\x00\x12\x07\n\x03\x41\x44\x44\x10\x01\x12\n\n\x06REMOVE\x10\x02\x42@Z>github.com/yandex-cloud/go-genproto/yandex/cloud/access;accessb\x06proto3')
,
dependencies=[yandex_dot_cloud_dot_validation__pb2.DESCRIPTOR,])
_ACCESSBINDINGACTION = _descriptor.EnumDescriptor(
name='AccessBindingAction',
full_name='yandex.cloud.access.AccessBindingAction',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ACCESS_BINDING_ACTION_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ADD', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REMOVE', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1006,
serialized_end=1087,
)
_sym_db.RegisterEnumDescriptor(_ACCESSBINDINGACTION)
AccessBindingAction = enum_type_wrapper.EnumTypeWrapper(_ACCESSBINDINGACTION)
ACCESS_BINDING_ACTION_UNSPECIFIED = 0
ADD = 1
REMOVE = 2
_SUBJECT = _descriptor.Descriptor(
name='Subject',
full_name='yandex.cloud.access.Subject',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='yandex.cloud.access.Subject.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\212\3101\004<=50'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='yandex.cloud.access.Subject.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=88,
serialized_end=133,
)
_ACCESSBINDING = _descriptor.Descriptor(
name='AccessBinding',
full_name='yandex.cloud.access.AccessBinding',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='role_id', full_name='yandex.cloud.access.AccessBinding.role_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\212\3101\004<=50'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subject', full_name='yandex.cloud.access.AccessBinding.subject', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\350\3071\001'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=135,
serialized_end=230,
)
_LISTACCESSBINDINGSREQUEST = _descriptor.Descriptor(
name='ListAccessBindingsRequest',
full_name='yandex.cloud.access.ListAccessBindingsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_id', full_name='yandex.cloud.access.ListAccessBindingsRequest.resource_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\350\3071\001\212\3101\004<=50'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_size', full_name='yandex.cloud.access.ListAccessBindingsRequest.page_size', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\372\3071\006<=1000'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_token', full_name='yandex.cloud.access.ListAccessBindingsRequest.page_token', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\212\3101\005<=100'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=232,
serialized_end=356,
)
_LISTACCESSBINDINGSRESPONSE = _descriptor.Descriptor(
name='ListAccessBindingsResponse',
full_name='yandex.cloud.access.ListAccessBindingsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='access_bindings', full_name='yandex.cloud.access.ListAccessBindingsResponse.access_bindings', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='yandex.cloud.access.ListAccessBindingsResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=358,
serialized_end=472,
)
_SETACCESSBINDINGSREQUEST = _descriptor.Descriptor(
name='SetAccessBindingsRequest',
full_name='yandex.cloud.access.SetAccessBindingsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_id', full_name='yandex.cloud.access.SetAccessBindingsRequest.resource_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\350\3071\001\212\3101\004<=50'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='access_bindings', full_name='yandex.cloud.access.SetAccessBindingsRequest.access_bindings', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\350\3071\001'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=475,
serialized_end=603,
)
_SETACCESSBINDINGSMETADATA = _descriptor.Descriptor(
name='SetAccessBindingsMetadata',
full_name='yandex.cloud.access.SetAccessBindingsMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_id', full_name='yandex.cloud.access.SetAccessBindingsMetadata.resource_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=605,
serialized_end=653,
)
_UPDATEACCESSBINDINGSREQUEST = _descriptor.Descriptor(
name='UpdateAccessBindingsRequest',
full_name='yandex.cloud.access.UpdateAccessBindingsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_id', full_name='yandex.cloud.access.UpdateAccessBindingsRequest.resource_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\350\3071\001\212\3101\004<=50'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='access_binding_deltas', full_name='yandex.cloud.access.UpdateAccessBindingsRequest.access_binding_deltas', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\350\3071\001'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=656,
serialized_end=798,
)
_UPDATEACCESSBINDINGSMETADATA = _descriptor.Descriptor(
name='UpdateAccessBindingsMetadata',
full_name='yandex.cloud.access.UpdateAccessBindingsMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_id', full_name='yandex.cloud.access.UpdateAccessBindingsMetadata.resource_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=800,
serialized_end=851,
)
_ACCESSBINDINGDELTA = _descriptor.Descriptor(
name='AccessBindingDelta',
full_name='yandex.cloud.access.AccessBindingDelta',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='action', full_name='yandex.cloud.access.AccessBindingDelta.action', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\350\3071\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='access_binding', full_name='yandex.cloud.access.AccessBindingDelta.access_binding', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\350\3071\001'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=854,
serialized_end=1004,
)
_ACCESSBINDING.fields_by_name['subject'].message_type = _SUBJECT
_LISTACCESSBINDINGSRESPONSE.fields_by_name['access_bindings'].message_type = _ACCESSBINDING
_SETACCESSBINDINGSREQUEST.fields_by_name['access_bindings'].message_type = _ACCESSBINDING
_UPDATEACCESSBINDINGSREQUEST.fields_by_name['access_binding_deltas'].message_type = _ACCESSBINDINGDELTA
_ACCESSBINDINGDELTA.fields_by_name['action'].enum_type = _ACCESSBINDINGACTION
_ACCESSBINDINGDELTA.fields_by_name['access_binding'].message_type = _ACCESSBINDING
DESCRIPTOR.message_types_by_name['Subject'] = _SUBJECT
DESCRIPTOR.message_types_by_name['AccessBinding'] = _ACCESSBINDING
DESCRIPTOR.message_types_by_name['ListAccessBindingsRequest'] = _LISTACCESSBINDINGSREQUEST
DESCRIPTOR.message_types_by_name['ListAccessBindingsResponse'] = _LISTACCESSBINDINGSRESPONSE
DESCRIPTOR.message_types_by_name['SetAccessBindingsRequest'] = _SETACCESSBINDINGSREQUEST
DESCRIPTOR.message_types_by_name['SetAccessBindingsMetadata'] = _SETACCESSBINDINGSMETADATA
DESCRIPTOR.message_types_by_name['UpdateAccessBindingsRequest'] = _UPDATEACCESSBINDINGSREQUEST
DESCRIPTOR.message_types_by_name['UpdateAccessBindingsMetadata'] = _UPDATEACCESSBINDINGSMETADATA
DESCRIPTOR.message_types_by_name['AccessBindingDelta'] = _ACCESSBINDINGDELTA
DESCRIPTOR.enum_types_by_name['AccessBindingAction'] = _ACCESSBINDINGACTION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Subject = _reflection.GeneratedProtocolMessageType('Subject', (_message.Message,), dict(
DESCRIPTOR = _SUBJECT,
__module__ = 'yandex.cloud.access.access_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.access.Subject)
))
_sym_db.RegisterMessage(Subject)
AccessBinding = _reflection.GeneratedProtocolMessageType('AccessBinding', (_message.Message,), dict(
DESCRIPTOR = _ACCESSBINDING,
__module__ = 'yandex.cloud.access.access_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.access.AccessBinding)
))
_sym_db.RegisterMessage(AccessBinding)
ListAccessBindingsRequest = _reflection.GeneratedProtocolMessageType('ListAccessBindingsRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTACCESSBINDINGSREQUEST,
__module__ = 'yandex.cloud.access.access_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.access.ListAccessBindingsRequest)
))
_sym_db.RegisterMessage(ListAccessBindingsRequest)
ListAccessBindingsResponse = _reflection.GeneratedProtocolMessageType('ListAccessBindingsResponse', (_message.Message,), dict(
DESCRIPTOR = _LISTACCESSBINDINGSRESPONSE,
__module__ = 'yandex.cloud.access.access_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.access.ListAccessBindingsResponse)
))
_sym_db.RegisterMessage(ListAccessBindingsResponse)
SetAccessBindingsRequest = _reflection.GeneratedProtocolMessageType('SetAccessBindingsRequest', (_message.Message,), dict(
DESCRIPTOR = _SETACCESSBINDINGSREQUEST,
__module__ = 'yandex.cloud.access.access_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.access.SetAccessBindingsRequest)
))
_sym_db.RegisterMessage(SetAccessBindingsRequest)
SetAccessBindingsMetadata = _reflection.GeneratedProtocolMessageType('SetAccessBindingsMetadata', (_message.Message,), dict(
DESCRIPTOR = _SETACCESSBINDINGSMETADATA,
__module__ = 'yandex.cloud.access.access_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.access.SetAccessBindingsMetadata)
))
_sym_db.RegisterMessage(SetAccessBindingsMetadata)
UpdateAccessBindingsRequest = _reflection.GeneratedProtocolMessageType('UpdateAccessBindingsRequest', (_message.Message,), dict(
DESCRIPTOR = _UPDATEACCESSBINDINGSREQUEST,
__module__ = 'yandex.cloud.access.access_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.access.UpdateAccessBindingsRequest)
))
_sym_db.RegisterMessage(UpdateAccessBindingsRequest)
UpdateAccessBindingsMetadata = _reflection.GeneratedProtocolMessageType('UpdateAccessBindingsMetadata', (_message.Message,), dict(
DESCRIPTOR = _UPDATEACCESSBINDINGSMETADATA,
__module__ = 'yandex.cloud.access.access_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.access.UpdateAccessBindingsMetadata)
))
_sym_db.RegisterMessage(UpdateAccessBindingsMetadata)
AccessBindingDelta = _reflection.GeneratedProtocolMessageType('AccessBindingDelta', (_message.Message,), dict(
DESCRIPTOR = _ACCESSBINDINGDELTA,
__module__ = 'yandex.cloud.access.access_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.access.AccessBindingDelta)
))
_sym_db.RegisterMessage(AccessBindingDelta)
DESCRIPTOR._options = None
_SUBJECT.fields_by_name['id']._options = None
_ACCESSBINDING.fields_by_name['role_id']._options = None
_ACCESSBINDING.fields_by_name['subject']._options = None
_LISTACCESSBINDINGSREQUEST.fields_by_name['resource_id']._options = None
_LISTACCESSBINDINGSREQUEST.fields_by_name['page_size']._options = None
_LISTACCESSBINDINGSREQUEST.fields_by_name['page_token']._options = None
_SETACCESSBINDINGSREQUEST.fields_by_name['resource_id']._options = None
_SETACCESSBINDINGSREQUEST.fields_by_name['access_bindings']._options = None
_UPDATEACCESSBINDINGSREQUEST.fields_by_name['resource_id']._options = None
_UPDATEACCESSBINDINGSREQUEST.fields_by_name['access_binding_deltas']._options = None
_ACCESSBINDINGDELTA.fields_by_name['action']._options = None
_ACCESSBINDINGDELTA.fields_by_name['access_binding']._options = None
# @@protoc_insertion_point(module_scope)
| [((13, 10, 13, 36), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ({}, {}), '()', True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((54, 22, 54, 77), 'google.protobuf.internal.enum_type_wrapper.EnumTypeWrapper', 'enum_type_wrapper.EnumTypeWrapper', ({(54, 56, 54, 76): '_ACCESSBINDINGACTION'}, {}), '(_ACCESSBINDINGACTION)', False, 'from google.protobuf.internal import enum_type_wrapper\n'), ((34, 4, 37, 16), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((38, 4, 41, 16), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((42, 4, 45, 16), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((189, 4, 195, 47), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n')] |
SyafiqTermizi/questionbank | questionbank/users/urls.py | 33e58db1a1610a85bd30a85d2f52e819bc27058b | from django.urls import path
from .views import (
UserListView, UserUpdateView, UserProfileView, UserDeleteView,
AcceptInvitationView, SpecialtyListView, SpecialtyCreateView, SpecialtyUpdateView,
SpecialtyDeleteView
)
app_name = 'users'
urlpatterns = [
path('', UserListView.as_view(), name='list'),
path('<int:pk>/', UserUpdateView.as_view(), name='update'),
path('<int:pk>/delete/', UserDeleteView.as_view(), name='delete'),
path('profile/', UserProfileView.as_view(), name='profile'),
path(
'invite/<str:token>/', AcceptInvitationView.as_view(),
name='accept_invite'
),
path('specialties/', SpecialtyListView.as_view(), name='specialty_list'),
path('specialties/create/', SpecialtyCreateView.as_view(), name='specialty_create'),
path('specialties/<int:pk>/update/', SpecialtyUpdateView.as_view(), name='specialty_update'),
path('specialties/<int:pk>/delete/', SpecialtyDeleteView.as_view(), name='specialty_delete')
]
| [] |
Zoufalc/qiskit-machine-learning | qiskit_machine_learning/algorithms/regressors/neural_network_regressor.py | aae3941214cd9667a53b643f229d11d0bff32c60 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Neural network regressor """
from typing import Union
import numpy as np
from qiskit.algorithms.optimizers import Optimizer
from ...exceptions import QiskitMachineLearningError
from ...neural_networks import NeuralNetwork
from ...utils.loss_functions import (Loss, L1Loss, L2Loss, CrossEntropyLoss,
CrossEntropySigmoidLoss)
class NeuralNetworkRegressor:
""" Quantum neural network regressor"""
def __init__(self, neural_network: NeuralNetwork,
loss: Union[str, Loss] = 'l2',
optimizer: Optimizer = None,
warm_start: bool = False):
"""
Args:
neural_network: An instance of an quantum neural network. If the neural network has a
one-dimensional output, i.e., `neural_network.output_shape=(1,)`, then it is
expected to return values in [-1, +1] and it can only be used for binary
classification. If the output is multi-dimensional, it is assumed that the result
is a probability distribution, i.e., that the entries are non-negative and sum up
to one. Then there are two options, either one-hot encoding or not. In case of
one-hot encoding, each probability vector resulting a neural network is considered
as one sample and the loss function is applied to the whole vector. Otherwise, each
entry of the probability vector is considered as an individual sample and the loss
function is applied to the index and weighted with the corresponding probability.
loss: A target loss function to be used in training. Default is `l2`, i.e. L2 loss.
Can be given either as a string for 'l1', 'l2', 'cross_entropy',
'cross_entropy_sigmoid', or as a loss function implementing the Loss interface.
optimizer: An instance of an optimizer to be used in training.
warm_start: Use weights from previous fit to start next fit.
Raises:
QiskitMachineLearningError: unknown loss, invalid neural network
"""
self._neural_network = neural_network
if len(neural_network.output_shape) > 1:
raise QiskitMachineLearningError('Invalid neural network output shape!')
if isinstance(loss, Loss):
self._loss = loss
else:
if loss.lower() == 'l1':
self._loss = L1Loss()
elif loss.lower() == 'l2':
self._loss = L2Loss()
elif loss.lower() == 'cross_entropy':
self._loss = CrossEntropyLoss()
elif loss.lower() == 'cross_entropy_sigmoid':
self._loss = CrossEntropySigmoidLoss()
else:
raise QiskitMachineLearningError(f'Unknown loss {loss}!')
self._optimizer = optimizer
self._warm_start = warm_start
self._fit_result = None
@property
def neural_network(self):
""" Returns the underlying neural network."""
return self._neural_network
@property
def loss(self):
""" Returns the underlying neural network."""
return self._loss
@property
def warm_start(self) -> bool:
""" Returns the warm start flag."""
return self._warm_start
@warm_start.setter
def warm_start(self, warm_start: bool) -> None:
""" Sets the warm start flag."""
self._warm_start = warm_start
def fit(self, X: np.ndarray, y: np.ndarray): # pylint: disable=invalid-name
"""
Fit the model to data matrix X and target(s) y.
Args:
X: The input data.
y: The target values.
Returns:
self: returns a trained classifier.
Raises:
QiskitMachineLearningError: In case of invalid data (e.g. incompatible with network)
"""
if self._neural_network.output_shape == (1,):
# TODO: we should add some reasonable compatibility checks and raise meaningful errors.
def objective(w):
predict = self._neural_network.forward(X, w)
target = np.array(y).reshape(predict.shape)
value = np.sum(self._loss(predict, target))
return value
def objective_grad(w):
# TODO should store output from forward pass (implement loss interface?)
# TODO: need to be able to turn off input grads if not needed.
output = self._neural_network.forward(X, w)
_, weights_grad = self._neural_network.backward(X, w)
grad = np.zeros((1, self._neural_network.num_weights))
for i in range(len(X)):
grad += self._loss.gradient(output[i][0], y[i]) * weights_grad[i]
return grad
else:
def objective(w):
val = 0.0
probs = self._neural_network.forward(X, w)
for i in range(len(X)):
for y_predict, prob in enumerate(probs[i]):
val += prob * self._loss(y_predict, y[i])
return val
def objective_grad(w):
num_classes = self._neural_network.output_shape[0]
grad = np.zeros((1, self._neural_network.num_weights))
for x, y_target in zip(X, y):
# TODO: do batch eval
_, weight_prob_grad = self._neural_network.backward(x, w)
for i in range(num_classes):
grad += weight_prob_grad[
0, i, :].reshape(grad.shape) * self._loss(i, y_target)
return grad
if self._warm_start and self._fit_result is not None:
initial_point = self._fit_result[0]
else:
initial_point = np.random.rand(self._neural_network.num_weights)
self._fit_result = self._optimizer.optimize(self._neural_network.num_weights, objective,
objective_grad, initial_point=initial_point)
return self
def predict(self, X: np.ndarray) -> np.ndarray: # pylint: disable=invalid-name
"""
Predict using the network specified to the regression.
Args:
X: The input data.
Raises:
QiskitMachineLearningError: Model needs to be fit to some training data first
Returns:
The predicted values.
"""
if self._fit_result is None:
raise QiskitMachineLearningError('Model needs to be fit to some training data first!')
# TODO: proper handling of batching
return self._neural_network.forward(X, self._fit_result[0])
def score(self, X: np.ndarray, y: np.ndarray) -> int: # pylint: disable=invalid-name
"""
Return R-squared on the given test data and targeted values.
Args:
X: Test samples.
y: True target values given `X`.
Raises:
QiskitMachineLearningError: Model needs to be fit to some training data first
Returns:
R-squared value.
"""
if self._fit_result is None:
raise QiskitMachineLearningError('Model needs to be fit to some training data first!')
predict = self.predict(X)
# Compute R2 for score
ss_res = sum(map(lambda k: (k[0] - k[1]) ** 2, zip(y, predict)))
ss_tot = sum([(k - np.mean(y)) ** 2 for k in y])
score = 1 - (ss_res / ss_tot)
if len(np.array(score).shape) > 0:
return score[0]
else:
return score
| [((157, 28, 157, 76), 'numpy.random.rand', 'np.random.rand', ({(157, 43, 157, 75): 'self._neural_network.num_weights'}, {}), '(self._neural_network.num_weights)', True, 'import numpy as np\n'), ((127, 23, 127, 70), 'numpy.zeros', 'np.zeros', ({(127, 32, 127, 69): '(1, self._neural_network.num_weights)'}, {}), '((1, self._neural_network.num_weights))', True, 'import numpy as np\n'), ((145, 23, 145, 70), 'numpy.zeros', 'np.zeros', ({(145, 32, 145, 69): '(1, self._neural_network.num_weights)'}, {}), '((1, self._neural_network.num_weights))', True, 'import numpy as np\n'), ((202, 15, 202, 30), 'numpy.array', 'np.array', ({(202, 24, 202, 29): 'score'}, {}), '(score)', True, 'import numpy as np\n'), ((116, 25, 116, 36), 'numpy.array', 'np.array', ({(116, 34, 116, 35): 'y'}, {}), '(y)', True, 'import numpy as np\n'), ((200, 27, 200, 37), 'numpy.mean', 'np.mean', ({(200, 35, 200, 36): 'y'}, {}), '(y)', True, 'import numpy as np\n')] |
fbob/mplFOAM | residuals.py | 90c9a970ba9975ce115ef5a66eb22fc463b54003 | #!/usr/bin/env python
# encoding: utf-8
import sys
import getopt
import re
import os
import pylab as plt
import numpy as np
# Define the variables for which the residuals will be plotted
variables = ["Ux", "Uy", "T", "p_rgh", "k", "epsilon"]
# Get the arguments of the script
def usage():
print("Usage: residuals.py -l logfile\nPlot the residuals versus Time/Iteration")
try:
options, args = getopt.getopt(sys.argv[1:], 'l:h', ['help', 'logfile='])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in options:
if opt in ("-l", "--logfile"):
log_file = arg
elif opt in ("-h", "--help"):
usage()
sys.exit(1)
# Get the lines of the logfile 'log_file'
lines = open(log_file, "r" ).readlines()
# Get the time and continuity values
time = [] # Time(s) or iterations counter
continuity = [] # Continuity values
for line in lines:
if re.search(r"^Time = ", line): # Search for string 'Time' at the begining of the line in file
start = 'Time = '
value = line.split(start)[1] # Take the Time value as the string just after start
time.append(np.float(value)) # Transform the string in a float value
elif re.search(r"continuity errors :", line): # Search for string 'continuity' in the lines of file 'log_file'
start = 'sum local = '
end = ', global'
value = line.split(start)[1].split(end)[0] # Take the continuity value as string between start and end
continuity.append(np.float(value)) # Transform the string in a float value
# Get the residual values for each variable
for variable in variables:
data = []
for line in lines:
if re.search(r"Solving for " + variable, line):# Search for string variable in line of file 'log_file'
start = 'Final residual = '
end = ', No Iterations'
value = line.split(start)[1].split(end)[0]
data.append(np.float(value))
plt.plot(np.array(time),np.array(data), label=variable) # Plot the residual values of variable
plt.plot(np.array(time),np.array(continuity), label="Continuity") # Plot the continuity values
# Plot
plt.title("Residuals plot:\n * logfile: " + log_file + "\n * case dir: " + os.getcwd().split('/')[-1], loc='left')
plt.xlabel("Time(s)/Iterations")
plt.ylabel("Residuals (Log Scale)")
plt.yscale('log')
plt.legend()
plt.grid()
plt.show()
| [((63, 0, 63, 32), 'pylab.xlabel', 'plt.xlabel', ({(63, 11, 63, 31): '"""Time(s)/Iterations"""'}, {}), "('Time(s)/Iterations')", True, 'import pylab as plt\n'), ((64, 0, 64, 35), 'pylab.ylabel', 'plt.ylabel', ({(64, 11, 64, 34): '"""Residuals (Log Scale)"""'}, {}), "('Residuals (Log Scale)')", True, 'import pylab as plt\n'), ((65, 0, 65, 17), 'pylab.yscale', 'plt.yscale', ({(65, 11, 65, 16): '"""log"""'}, {}), "('log')", True, 'import pylab as plt\n'), ((66, 0, 66, 12), 'pylab.legend', 'plt.legend', ({}, {}), '()', True, 'import pylab as plt\n'), ((67, 0, 67, 10), 'pylab.grid', 'plt.grid', ({}, {}), '()', True, 'import pylab as plt\n'), ((68, 0, 68, 10), 'pylab.show', 'plt.show', ({}, {}), '()', True, 'import pylab as plt\n'), ((19, 20, 19, 76), 'getopt.getopt', 'getopt.getopt', ({(19, 34, 19, 46): 'sys.argv[1:]', (19, 48, 19, 53): '"""l:h"""', (19, 55, 19, 75): "['help', 'logfile=']"}, {}), "(sys.argv[1:], 'l:h', ['help', 'logfile='])", False, 'import getopt\n'), ((38, 7, 38, 35), 're.search', 're.search', ({(38, 17, 38, 28): '"""^Time = """', (38, 30, 38, 34): 'line'}, {}), "('^Time = ', line)", False, 'import re\n'), ((59, 9, 59, 23), 'numpy.array', 'np.array', ({(59, 18, 59, 22): 'time'}, {}), '(time)', True, 'import numpy as np\n'), ((59, 24, 59, 44), 'numpy.array', 'np.array', ({(59, 33, 59, 43): 'continuity'}, {}), '(continuity)', True, 'import numpy as np\n'), ((22, 4, 22, 15), 'sys.exit', 'sys.exit', ({(22, 13, 22, 14): '(2)'}, {}), '(2)', False, 'import sys\n'), ((42, 9, 42, 48), 're.search', 're.search', ({(42, 19, 42, 41): '"""continuity errors :"""', (42, 43, 42, 47): 'line'}, {}), "('continuity errors :', line)", False, 'import re\n'), ((52, 11, 52, 54), 're.search', 're.search', ({(52, 21, 52, 47): "('Solving for ' + variable)", (52, 49, 52, 53): 'line'}, {}), "('Solving for ' + variable, line)", False, 'import re\n'), ((57, 13, 57, 27), 'numpy.array', 'np.array', ({(57, 22, 57, 26): 'time'}, {}), '(time)', True, 'import numpy as np\n'), ((57, 28, 57, 42), 'numpy.array', 'np.array', ({(57, 37, 57, 41): 'data'}, {}), '(data)', True, 'import numpy as np\n'), ((29, 8, 29, 19), 'sys.exit', 'sys.exit', ({(29, 17, 29, 18): '(1)'}, {}), '(1)', False, 'import sys\n'), ((41, 20, 41, 35), 'numpy.float', 'np.float', ({(41, 29, 41, 34): 'value'}, {}), '(value)', True, 'import numpy as np\n'), ((46, 26, 46, 41), 'numpy.float', 'np.float', ({(46, 35, 46, 40): 'value'}, {}), '(value)', True, 'import numpy as np\n'), ((56, 24, 56, 39), 'numpy.float', 'np.float', ({(56, 33, 56, 38): 'value'}, {}), '(value)', True, 'import numpy as np\n'), ((62, 75, 62, 86), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n')] |
empiricalstateofmind/personal_website | content_generator/vitae.py | cb361f9fd6bd1b1dc8dd39c87cc003d847ae4a2c | # Generate the vitae.json file used to populate the Vitae section of the website.
import pandas as pd
import re
from datetime import datetime
from collections import defaultdict
import json
# Publications
def create_publications(filepath):
publications = pd.read_excel(filepath, sheet_name='publications', endcoding='utf-8')
publications = publications.fillna('')
publication_store = defaultdict(list)
for ix, pub in publications.iterrows():
date = pub.publication_date.strftime('%Y')
entry = {'title': pub.title,
'authors': pub.authors,
'arxiv': pub.arxiv_link,
'abstract':pub.abstract,
'date': date}
if pub.journal_link != '':
entry['link'] = pub.journal_link
if pub.journal != '':
entry['journal'] = pub.journal
publication_store[pub.type].append(entry)
return publication_store
def create_conferences(filepath):
conferences = pd.read_excel(filepath, sheet_name='conferences', endcoding='utf-8')
conferences = conferences.fillna('')
categories = [('invited', 'Invited Talks \& Posters'),
('contributed', 'Contributed Talks \& Posters'),
('attended', 'Attended'),
('school', 'Schools')]
conference_store = {}
for key, subtitle in categories:
data = conferences[conferences.type == key]
collection = []
if len(data) > 0:
for ix, conf in data.iterrows():
if conf.include=='no': continue
date = conf.timestamp.strftime('%b. %Y')
if key in ['attended', 'school']:
contribution = 'Attendee'
else:
contribution = "{} {}".format(conf.type.capitalize(), conf.medium.capitalize())
entry = {'title':conf.title,
'location':conf.location,
'date':date,
'contribution': contribution,
}
if conf.link != '':
entry['link'] = conf.link
if (conf.presentation_title != '') & (conf.presentation_authors != ''):
entry['presentation_authors'] = conf.presentation_authors
entry['presentation_title'] = conf.presentation_title
collection.append(entry)
conference_store[key] = collection
return conference_store
def create_teaching(filepath):
teaching = pd.read_excel(filepath, sheet_name='teaching', endcoding='utf-8')
teaching = teaching.fillna('')
teaching_store = []
for ix, teach in teaching.sort_values(by='type').iterrows():
if teach['type'] == 'supervision':
entry = {
'date': teach.date,
'project_award': teach.program,
'title': teach.title,
'student': teach.student_name,
'institution': teach.location
}
teaching_store.append(entry)
return teaching_store
def create_reviewing(filepath):
reviewing = pd.read_excel(filepath, sheet_name='journals', endcoding='utf-8')
reviewing = reviewing.fillna('')
review_store = []
for ix, review in reviewing.iterrows():
entry = {'name': review.journal_name,
'short_name': review.journal_shortname}
review_store.append(entry)
return review_store
if __name__ == "__main__":
# FILEPATH = "D:/Dropbox/projects/personal_cv/vitae.xlsx" # We can pass this as an argument later
FILEPATH = "../../../Projects/personal_cv/vitae.xlsx"
vitae = {'publications':create_publications(FILEPATH),
'conferences':create_conferences(FILEPATH),
'teaching':create_teaching(FILEPATH),
'reviewing':create_reviewing(FILEPATH)}
with open('../app/mod_home/static/vitae.json', 'w') as file:
json.dump(vitae, file, sort_keys=True, indent=4)
with open('../app/static/vitae.json', 'w') as file:
json.dump(vitae, file, sort_keys=True, indent=4) | [((13, 19, 13, 88), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((16, 24, 16, 41), 'collections.defaultdict', 'defaultdict', ({(16, 36, 16, 40): 'list'}, {}), '(list)', False, 'from collections import defaultdict\n'), ((37, 18, 37, 86), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((81, 15, 81, 80), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((102, 16, 102, 81), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((125, 8, 125, 56), 'json.dump', 'json.dump', (), '', False, 'import json\n'), ((127, 8, 127, 56), 'json.dump', 'json.dump', (), '', False, 'import json\n')] |
tomekr/cement | cement/ext/ext_generate.py | fece8629c48bcd598fd61d8aa7457a5df4c4f831 | """
Cement generate extension module.
"""
import re
import os
import inspect
import yaml
import shutil
from .. import Controller, minimal_logger, shell
from ..utils.version import VERSION, get_version
LOG = minimal_logger(__name__)
class GenerateTemplateAbstractBase(Controller):
class Meta:
pass
def _generate(self, source, dest):
msg = 'Generating %s %s in %s' % (
self.app._meta.label, self._meta.label, dest
)
self.app.log.info(msg)
data = {}
# builtin vars
maj_min = float('%s.%s' % (VERSION[0], VERSION[1]))
data['cement'] = {}
data['cement']['version'] = get_version()
data['cement']['major_version'] = VERSION[0]
data['cement']['minor_version'] = VERSION[1]
data['cement']['major_minor_version'] = maj_min
f = open(os.path.join(source, '.generate.yml'))
yaml_load = yaml.full_load if hasattr(yaml, 'full_load') else yaml.load
g_config = yaml_load(f)
f.close()
vars = g_config.get('variables', {})
exclude_list = g_config.get('exclude', [])
ignore_list = g_config.get('ignore', [])
# default ignore the .generate.yml config
g_config_yml = r'^(.*)[\/\\\\]%s[\/\\\\]\.generate\.yml$' % \
self._meta.label
ignore_list.append(g_config_yml)
var_defaults = {
'name': None,
'prompt': None,
'validate': None,
'case': None,
'default': None,
}
for defined_var in vars:
var = var_defaults.copy()
var.update(defined_var)
for key in ['name', 'prompt']:
assert var[key] is not None, \
"Required generate config key missing: %s" % key
val = None
if var['default'] is not None and self.app.pargs.defaults:
val = var['default']
elif var['default'] is not None:
default_text = ' [%s]' % var['default']
else:
default_text = '' # pragma: nocover
if val is None:
class MyPrompt(shell.Prompt):
class Meta:
text = "%s%s:" % (var['prompt'], default_text)
default = var.get('default', None)
p = MyPrompt()
val = p.prompt() # pragma: nocover
if var['case'] in ['lower', 'upper', 'title']:
val = getattr(val, var['case'])()
elif var['case'] is not None:
self.app.log.warning(
"Invalid configuration for variable " +
"'%s': " % var['name'] +
"case must be one of lower, upper, or title."
)
if var['validate'] is not None:
assert re.match(var['validate'], val), \
"Invalid Response (must match: '%s')" % var['validate']
data[var['name']] = val
try:
self.app.template.copy(source, dest, data,
force=self.app.pargs.force,
ignore=ignore_list,
exclude=exclude_list)
except AssertionError as e:
if re.match('(.*)already exists(.*)', e.args[0]):
raise AssertionError(e.args[0] + ' (try: --force)')
else:
raise # pragma: nocover
def _clone(self, source, dest):
msg = 'Cloning %s %s template to %s' % (
self.app._meta.label, self._meta.label, dest
)
self.app.log.info(msg)
if os.path.exists(dest) and self.app.pargs.force is True:
shutil.rmtree(dest)
elif os.path.exists(dest):
msg = "Destination path already exists: %s (try: --force)" % dest
raise AssertionError(msg)
shutil.copytree(source, dest)
def _default(self):
source = self._meta.source_path
dest = self.app.pargs.dest
if self.app.pargs.clone is True:
self._clone(source, dest)
else:
self._generate(source, dest)
def setup_template_items(app):
template_dirs = []
template_items = []
# look in app template dirs
for path in app._meta.template_dirs:
subpath = os.path.join(path, 'generate')
if os.path.exists(subpath) and subpath not in template_dirs:
template_dirs.append(subpath)
# use app template module, find it's path on filesystem
if app._meta.template_module is not None:
mod_parts = app._meta.template_module.split('.')
mod = mod_parts.pop()
try:
mod = app.__import__(mod, from_module='.'.join(mod_parts))
mod_path = os.path.dirname(inspect.getfile(mod))
subpath = os.path.join(mod_path, 'generate')
if os.path.exists(subpath) and subpath not in template_dirs:
template_dirs.append(subpath)
# FIXME: not exactly sure how to test for this so not covering
except AttributeError: # pragma: nocover
msg = 'unable to load template module' + \
'%s from %s' % (mod, '.'.join(mod_parts)) # pragma: nocover
app.log.debug(msg) # pragma: nocover
for path in template_dirs:
for item in os.listdir(path):
if item not in template_items:
template_items.append(item)
class GenerateTemplate(GenerateTemplateAbstractBase):
class Meta:
label = item
stacked_on = 'generate'
stacked_type = 'nested'
help = 'generate %s from template' % item
arguments = [
# ------------------------------------------------------
(['dest'],
{'help': 'destination directory path'}),
# ------------------------------------------------------
(['-f', '--force'],
{'help': 'force operation if destination exists',
'dest': 'force',
'action': 'store_true'}),
# ------------------------------------------------------
(['-D', '--defaults'],
{'help': 'use all default variable values',
'dest': 'defaults',
'action': 'store_true'}),
# ------------------------------------------------------
(['--clone'],
{'help': 'clone this template to destination path',
'dest': 'clone',
'action': 'store_true'}),
]
source_path = os.path.join(path, item)
app.handler.register(GenerateTemplate)
class Generate(Controller):
class Meta:
label = 'generate'
stacked_on = 'base'
stacked_type = 'nested'
config_section = 'generate'
def _setup(self, app):
super(Generate, self)._setup(app)
def _default(self):
self._parser.print_help()
def load(app):
app.handler.register(Generate)
app.hook.register('pre_run', setup_template_items)
| [((121, 8, 121, 37), 'shutil.copytree', 'shutil.copytree', ({(121, 24, 121, 30): 'source', (121, 32, 121, 36): 'dest'}, {}), '(source, dest)', False, 'import shutil\n'), ((139, 18, 139, 48), 'os.path.join', 'os.path.join', ({(139, 31, 139, 35): 'path', (139, 37, 139, 47): '"""generate"""'}, {}), "(path, 'generate')", False, 'import os\n'), ((162, 20, 162, 36), 'os.listdir', 'os.listdir', ({(162, 31, 162, 35): 'path'}, {}), '(path)', False, 'import os\n'), ((35, 17, 35, 54), 'os.path.join', 'os.path.join', ({(35, 30, 35, 36): 'source', (35, 38, 35, 53): '""".generate.yml"""'}, {}), "(source, '.generate.yml')", False, 'import os\n'), ((115, 11, 115, 31), 'os.path.exists', 'os.path.exists', ({(115, 26, 115, 30): 'dest'}, {}), '(dest)', False, 'import os\n'), ((116, 12, 116, 31), 'shutil.rmtree', 'shutil.rmtree', ({(116, 26, 116, 30): 'dest'}, {}), '(dest)', False, 'import shutil\n'), ((117, 13, 117, 33), 'os.path.exists', 'os.path.exists', ({(117, 28, 117, 32): 'dest'}, {}), '(dest)', False, 'import os\n'), ((140, 11, 140, 34), 'os.path.exists', 'os.path.exists', ({(140, 26, 140, 33): 'subpath'}, {}), '(subpath)', False, 'import os\n'), ((150, 22, 150, 56), 'os.path.join', 'os.path.join', ({(150, 35, 150, 43): 'mod_path', (150, 45, 150, 55): '"""generate"""'}, {}), "(mod_path, 'generate')", False, 'import os\n'), ((93, 23, 93, 53), 're.match', 're.match', ({(93, 32, 93, 47): "var['validate']", (93, 49, 93, 52): 'val'}, {}), "(var['validate'], val)", False, 'import re\n'), ((104, 15, 104, 60), 're.match', 're.match', ({(104, 24, 104, 48): '"""(.*)already exists(.*)"""', (104, 50, 104, 59): 'e.args[0]'}, {}), "('(.*)already exists(.*)', e.args[0])", False, 'import re\n'), ((149, 39, 149, 59), 'inspect.getfile', 'inspect.getfile', ({(149, 55, 149, 58): 'mod'}, {}), '(mod)', False, 'import inspect\n'), ((152, 15, 152, 38), 'os.path.exists', 'os.path.exists', ({(152, 30, 152, 37): 'subpath'}, {}), '(subpath)', False, 'import os\n'), ((192, 34, 192, 58), 'os.path.join', 'os.path.join', ({(192, 47, 192, 51): 'path', (192, 53, 192, 57): 'item'}, {}), '(path, item)', False, 'import os\n')] |
Kvoti/ditto | ditto/core/__init__.py | eb4efb241e54bf679222d14afeb71d9d5441c122 | from . import forms
from . import views
ADMIN_ROLE = "Administrator"
MEMBER_ROLE = "Member"
GUEST_ROLE = "Guest"
DEFAULT_ROLES = [ADMIN_ROLE, MEMBER_ROLE, GUEST_ROLE]
| [] |
salwator/training_stats | training_stats/hrm.py | 3f3bacbaa01e90e8658cf5b66bede42a37e3fb6e | from .gpxfile import get_hr_measurements
from .utils import interpolate
from operator import itemgetter
def __calculate_moving_sums(points, window):
""" Calculates hr moving sums of the window len """
time, hrs = zip(*points)
moving_sum = sum(hrs[0:window])
sums = [(time[0], moving_sum)]
for i, t in enumerate(time[1:-1 * window]):
moving_sum += hrs[i + window] - hrs[i]
sums.append((t, moving_sum))
return sums
def calculate_lactate_threshold(hrdata):
""" Given list of (time, hr), returns lactate threshold and selected data"""
test_period = 60 * 30 # test time
measured_period = 60 * 20 # measured period in seconds
hrs = interpolate(hrdata)
time_stamp, max_sum = max(__calculate_moving_sums(hrs, test_period),
key=itemgetter(1))
# your lactate threshold is average of last 20 in 30 minutes of tempo run
start_measure = time_stamp + (test_period - measured_period)
stop_measure = start_measure + measured_period
measured_time, measured_hrs = zip(*hrs[start_measure:stop_measure])
lactate_thr = round(sum(measured_hrs) / measured_period)
return (lactate_thr, measured_time, measured_hrs)
| [((23, 34, 23, 47), 'operator.itemgetter', 'itemgetter', ({(23, 45, 23, 46): '1'}, {}), '(1)', False, 'from operator import itemgetter\n')] |
mozilla-releng/staging-mozilla-vpn-client | scripts/utils/import_languages.py | f31d3762a607ccf2d7c6a016f7b800305fbf0113 | #! /usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import xml.etree.ElementTree as ET
import os
import sys
import shutil
import atexit
import subprocess
# Use the project root as the working directory
prevdir = os.getcwd()
workdir = os.path.join(os.path.dirname(__file__), '..', '..')
os.chdir(workdir)
atexit.register(os.chdir, prevdir)
# Include only locales above this threshold (e.g. 70%) in production
l10n_threshold = 0.70
parser = argparse.ArgumentParser()
parser.add_argument(
'-m', '--macos', default=False, action="store_true", dest="ismacos",
help='Include the MacOS bundle data')
parser.add_argument(
'-q', '--qt_path', default=None, dest="qtpath",
help='The QT binary path. If not set, we try to guess.')
args = parser.parse_args()
stepnum = 1
def title(text):
global stepnum
print(f"\033[96m\033[1mStep {stepnum}\033[0m: \033[97m{text}\033[0m")
stepnum = stepnum+1
# Step 0
title("Find the Qt localization tools...")
def qtquery(qmake, propname):
try:
qtquery = os.popen(f'{qmake} -query {propname}')
qtpath = qtquery.read().strip()
if len(qtpath) > 0:
return qtpath
finally:
pass
return None
qtbinpath = args.qtpath
if qtbinpath is None:
qtbinpath = qtquery('qmake', 'QT_INSTALL_BINS')
if qtbinpath is None:
qtbinpath = qtquery('qmake6', 'QT_INSTALL_BINS')
if qtbinpath is None:
qtbinpath = qtquery('qmake5', 'QT_INSTALL_BINS')
if qtbinpath is None:
qtbinpath = qtquery('qmake-qt5', 'QT_INSTALL_BINS')
if qtbinpath is None:
print('Unable to locate qmake tool.')
sys.exit(1)
if not os.path.isdir(qtbinpath):
print(f"QT path is not a diretory: {qtbinpath}")
sys.exit(1)
lupdate = os.path.join(qtbinpath, 'lupdate')
lconvert = os.path.join(qtbinpath, 'lconvert')
lrelease = os.path.join(qtbinpath, 'lrelease')
# Step 0
# Let's update the i18n repo
os.system(f"git submodule init")
os.system(f"git submodule update --remote --depth 1 i18n")
# Step 1
# Go through the i18n repo, check each XLIFF file and take
# note which locale is complete above the minimum threshold.
# Adds path of .xliff and .ts to l10n_files.
title("Validate the XLIFF file...")
l10n_files = []
for locale in os.listdir('i18n'):
# Skip non folders
if not os.path.isdir(os.path.join('i18n', locale)):
continue
# Skip hidden folders
if locale.startswith('.'):
continue
xliff_path = os.path.join('i18n', locale, 'mozillavpn.xliff')
# If it's the source locale (en), ignore parsing for completeness and
# add it to the list.
if locale == 'en':
print(f'OK\t- en added (reference locale)')
l10n_files.append({
'locale': 'en',
'ts': os.path.join('translations', 'generated', 'mozillavpn_en.ts'),
'xliff': xliff_path
})
continue
tree = ET.parse(xliff_path)
root = tree.getroot()
sources = 0
translations = 0
for element in root.iter('{urn:oasis:names:tc:xliff:document:1.2}source'):
sources += 1
for element in root.iter('{urn:oasis:names:tc:xliff:document:1.2}target'):
translations += 1
completeness = translations/(sources*1.0)
# Ignore locale with less than 70% of completeness
if completeness < l10n_threshold:
print(f'KO\t- {locale} is translated at {round(completeness*100, 2)}%, at least {l10n_threshold*100}% is needed')
continue # Not enough translations next file please
print(f'OK\t- {locale} added ({round(completeness*100, 2)}% translated)')
l10n_files.append({
'locale': locale,
'ts': os.path.join('translations', 'generated', f'mozillavpn_{locale}.ts'),
'xliff': xliff_path
})
# Step 2
title("Create folders and localization files for the languages...")
for file in l10n_files:
locdirectory = os.path.join('translations', 'generated', file['locale'])
os.makedirs(locdirectory, exist_ok=True)
locversion = os.path.join(locdirectory, f'locversion.plist')
with open(locversion, 'w') as locversion_file:
locversion_file.write(f"""<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<!DOCTYPE plist PUBLIC \"-//Apple Computer//DTD PLIST 1.0//EN\"
\"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">
<plist version=\"1.0\">
<dict>
<key>LprojCompatibleVersion</key>
<string>123</string>
<key>LprojLocale</key>
<string>{file['locale']}</string>
<key>LprojRevisionLevel</key>
<string>1</string>
<key>LprojVersion</key>
<string>123</string>
</dict>
</plist>""")
with open(os.path.join('translations', 'generated', 'macos.pri'), 'w') as macospri:
macospri.write('### AUTOGENERATED! DO NOT EDIT!! ###\n')
for file in l10n_files:
macospri.write(f"LANGUAGES_FILES_{file['locale']}.files += $$PWD/{file['locale']}/locversion.plist\n")
macospri.write(f"LANGUAGES_FILES_{file['locale']}.path = Contents/Resources/{file['locale']}.lproj\n")
macospri.write(f"QMAKE_BUNDLE_DATA += LANGUAGES_FILES_{file['locale']}\n\n")
# Step 3
title("Write resource file to import the locales that are ready...")
with open('translations/generated/translations.qrc', 'w') as qrcfile:
qrcfile.write('<!-- AUTOGENERATED! DO NOT EDIT!! -->\n')
qrcfile.write('<RCC>\n')
qrcfile.write(' <qresource prefix="/i18n">\n')
for file in l10n_files:
qrcfile.write(f' <file>mozillavpn_{file["locale"]}.qm</file>\n')
qrcfile.write(' </qresource>\n')
qrcfile.write('</RCC>\n')
# Step 4
title("Generate the Js/C++ string definitions...")
try:
subprocess.call([sys.executable, os.path.join('scripts', 'utils', 'generate_strings.py'),
'-o', os.path.join('translations', 'generated'),
os.path.join('translations', 'strings.yaml')])
except Exception as e:
print("generate_strings.py failed. Try with:\n\tpip3 install -r requirements.txt --user")
print(e)
exit(1)
# Build a dummy project to glob together everything that might contain strings.
title("Scanning for new strings...")
def scan_sources(projfile, dirpath):
projfile.write(f"HEADERS += $$files({dirpath}/*.h, true)\n")
projfile.write(f"SOURCES += $$files({dirpath}/*.cpp, true)\n")
projfile.write(f"RESOURCES += $$files({dirpath}/*.qrc, true)\n\n")
with open('translations/generated/dummy.pro', 'w') as dummyproj:
dummyproj.write('### AUTOGENERATED! DO NOT EDIT!! ###\n')
dummyproj.write(f"HEADERS += l18nstrings.h\n")
dummyproj.write(f"SOURCES += l18nstrings_p.cpp\n")
dummyproj.write(f"SOURCES += ../l18nstrings.cpp\n\n")
for l10n_file in l10n_files:
dummyproj.write(f"TRANSLATIONS += {os.path.basename(l10n_file['ts'])}\n")
dummyproj.write("\n")
scan_sources(dummyproj, '../../src')
scan_sources(dummyproj, '../../nebula')
# Step 5
title("Generate translation resources...")
for l10n_file in l10n_files:
os.system(f"{lconvert} -if xlf -i {l10n_file['xliff']} -o {l10n_file['ts']}")
os.system(f"{lupdate} translations/generated/dummy.pro")
for l10n_file in l10n_files:
os.system(f"{lrelease} -idbased {l10n_file['ts']}")
print(f'Imported {len(l10n_files)} locales')
git = os.popen(f'git submodule status i18n')
git_commit_hash = git.read().strip().replace("+","").split(' ')[0]
print(f'Current commit: https://github.com/mozilla-l10n/mozilla-vpn-client-l10n/commit/{git_commit_hash}')
| [((15, 10, 15, 21), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((17, 0, 17, 17), 'os.chdir', 'os.chdir', ({(17, 9, 17, 16): 'workdir'}, {}), '(workdir)', False, 'import os\n'), ((18, 0, 18, 34), 'atexit.register', 'atexit.register', ({(18, 16, 18, 24): 'os.chdir', (18, 26, 18, 33): 'prevdir'}, {}), '(os.chdir, prevdir)', False, 'import atexit\n'), ((23, 9, 23, 34), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((67, 10, 67, 44), 'os.path.join', 'os.path.join', ({(67, 23, 67, 32): 'qtbinpath', (67, 34, 67, 43): '"""lupdate"""'}, {}), "(qtbinpath, 'lupdate')", False, 'import os\n'), ((68, 11, 68, 46), 'os.path.join', 'os.path.join', ({(68, 24, 68, 33): 'qtbinpath', (68, 35, 68, 45): '"""lconvert"""'}, {}), "(qtbinpath, 'lconvert')", False, 'import os\n'), ((69, 11, 69, 46), 'os.path.join', 'os.path.join', ({(69, 24, 69, 33): 'qtbinpath', (69, 35, 69, 45): '"""lrelease"""'}, {}), "(qtbinpath, 'lrelease')", False, 'import os\n'), ((73, 0, 73, 32), 'os.system', 'os.system', ({(73, 10, 73, 31): 'f"""git submodule init"""'}, {}), "(f'git submodule init')", False, 'import os\n'), ((74, 0, 74, 58), 'os.system', 'os.system', ({(74, 10, 74, 57): 'f"""git submodule update --remote --depth 1 i18n"""'}, {}), "(f'git submodule update --remote --depth 1 i18n')", False, 'import os\n'), ((82, 14, 82, 32), 'os.listdir', 'os.listdir', ({(82, 25, 82, 31): '"""i18n"""'}, {}), "('i18n')", False, 'import os\n'), ((204, 0, 204, 56), 'os.system', 'os.system', ({(204, 10, 204, 55): 'f"""{lupdate} translations/generated/dummy.pro"""'}, {}), "(f'{lupdate} translations/generated/dummy.pro')", False, 'import os\n'), ((210, 6, 210, 44), 'os.popen', 'os.popen', ({(210, 15, 210, 43): 'f"""git submodule status i18n"""'}, {}), "(f'git submodule status i18n')", False, 'import os\n'), ((16, 23, 16, 48), 'os.path.dirname', 'os.path.dirname', ({(16, 39, 16, 47): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((61, 4, 61, 15), 'sys.exit', 'sys.exit', ({(61, 13, 61, 14): '(1)'}, {}), '(1)', False, 'import sys\n'), ((63, 7, 63, 31), 'os.path.isdir', 'os.path.isdir', ({(63, 21, 63, 30): 'qtbinpath'}, {}), '(qtbinpath)', False, 'import os\n'), ((65, 4, 65, 15), 'sys.exit', 'sys.exit', ({(65, 13, 65, 14): '(1)'}, {}), '(1)', False, 'import sys\n'), ((91, 17, 91, 65), 'os.path.join', 'os.path.join', ({(91, 30, 91, 36): '"""i18n"""', (91, 38, 91, 44): 'locale', (91, 46, 91, 64): '"""mozillavpn.xliff"""'}, {}), "('i18n', locale, 'mozillavpn.xliff')", False, 'import os\n'), ((104, 11, 104, 31), 'xml.etree.ElementTree.parse', 'ET.parse', ({(104, 20, 104, 30): 'xliff_path'}, {}), '(xliff_path)', True, 'import xml.etree.ElementTree as ET\n'), ((132, 19, 132, 76), 'os.path.join', 'os.path.join', ({(132, 32, 132, 46): '"""translations"""', (132, 48, 132, 59): '"""generated"""', (132, 61, 132, 75): "file['locale']"}, {}), "('translations', 'generated', file['locale'])", False, 'import os\n'), ((133, 4, 133, 44), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((134, 17, 134, 64), 'os.path.join', 'os.path.join', ({(134, 30, 134, 42): 'locdirectory', (134, 44, 134, 63): 'f"""locversion.plist"""'}, {}), "(locdirectory, f'locversion.plist')", False, 'import os\n'), ((203, 4, 203, 81), 'os.system', 'os.system', ({(203, 14, 203, 80): 'f"""{lconvert} -if xlf -i {l10n_file[\'xliff\']} -o {l10n_file[\'ts\']}"""'}, {}), '(f"{lconvert} -if xlf -i {l10n_file[\'xliff\']} -o {l10n_file[\'ts\']}")', False, 'import os\n'), ((206, 4, 206, 55), 'os.system', 'os.system', ({(206, 14, 206, 54): 'f"""{lrelease} -idbased {l10n_file[\'ts\']}"""'}, {}), '(f"{lrelease} -idbased {l10n_file[\'ts\']}")', False, 'import os\n'), ((42, 18, 42, 56), 'os.popen', 'os.popen', ({(42, 27, 42, 55): 'f"""{qmake} -query {propname}"""'}, {}), "(f'{qmake} -query {propname}')", False, 'import os\n'), ((152, 10, 152, 64), 'os.path.join', 'os.path.join', ({(152, 23, 152, 37): '"""translations"""', (152, 39, 152, 50): '"""generated"""', (152, 52, 152, 63): '"""macos.pri"""'}, {}), "('translations', 'generated', 'macos.pri')", False, 'import os\n'), ((84, 25, 84, 53), 'os.path.join', 'os.path.join', ({(84, 38, 84, 44): '"""i18n"""', (84, 46, 84, 52): 'locale'}, {}), "('i18n', locale)", False, 'import os\n'), ((125, 14, 125, 82), 'os.path.join', 'os.path.join', ({(125, 27, 125, 41): '"""translations"""', (125, 43, 125, 54): '"""generated"""', (125, 56, 125, 81): 'f"""mozillavpn_{locale}.ts"""'}, {}), "('translations', 'generated', f'mozillavpn_{locale}.ts')", False, 'import os\n'), ((173, 37, 173, 92), 'os.path.join', 'os.path.join', ({(173, 50, 173, 59): '"""scripts"""', (173, 61, 173, 68): '"""utils"""', (173, 70, 173, 91): '"""generate_strings.py"""'}, {}), "('scripts', 'utils', 'generate_strings.py')", False, 'import os\n'), ((174, 27, 174, 68), 'os.path.join', 'os.path.join', ({(174, 40, 174, 54): '"""translations"""', (174, 56, 174, 67): '"""generated"""'}, {}), "('translations', 'generated')", False, 'import os\n'), ((175, 21, 175, 65), 'os.path.join', 'os.path.join', ({(175, 34, 175, 48): '"""translations"""', (175, 50, 175, 64): '"""strings.yaml"""'}, {}), "('translations', 'strings.yaml')", False, 'import os\n'), ((99, 18, 99, 79), 'os.path.join', 'os.path.join', ({(99, 31, 99, 45): '"""translations"""', (99, 47, 99, 58): '"""generated"""', (99, 60, 99, 78): '"""mozillavpn_en.ts"""'}, {}), "('translations', 'generated', 'mozillavpn_en.ts')", False, 'import os\n'), ((194, 43, 194, 76), 'os.path.basename', 'os.path.basename', ({(194, 60, 194, 75): "l10n_file['ts']"}, {}), "(l10n_file['ts'])", False, 'import os\n')] |
Velgaster/Discord-User-Vote | cogs/filter.py | 4aacc0bf01a11b948fa5355a3775ef8c7ae9751e | from discord.ext import commands
import discord
def setup(client):
client.add_cog(KeyWordFilter(client))
class KeyWordFilter(commands.Cog):
def __init__(self, client):
self.client = client
self.log_ch = self.client.get_channel(int(self.client.SETTINGS.LOG_CHANNEL))
@commands.Cog.listener()
async def on_message(self, msg):
if any(x in msg.content.split() for x in self.client.SETTINGS.BLACKLIST):
ctx = await self.client.get_context(msg)
await self.event_log(ctx, msg, "A blacklisted phrase was used!")
await msg.delete()
async def event_log(self, ctx, msg, event):
embed = discord.Embed()
embed.colour = discord.Colour.red()
embed.title = event
embed.add_field(name='User', value=msg.author, inline=True)
embed.add_field(name='Channel', value=msg.channel.name, inline=True)
embed.add_field(name='Message', value=f"> {msg.content}", inline=False)
await self.log_ch.send(embed=embed)
| [((14, 5, 14, 28), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', ({}, {}), '()', False, 'from discord.ext import commands\n'), ((22, 16, 22, 31), 'discord.Embed', 'discord.Embed', ({}, {}), '()', False, 'import discord\n'), ((23, 23, 23, 43), 'discord.Colour.red', 'discord.Colour.red', ({}, {}), '()', False, 'import discord\n')] |
sai-krishna-msk/KickAssist | api/app.py | 7fb256e3ef4beff231332f6491ebb975f3fe4b43 | from ml_model.model import KickModel
import numpy as np
import pandas as pd
import eli5
import joblib
import flask
from flask import Flask, render_template, request, jsonify
app = Flask(__name__)
model_oh = joblib.load('ml_model/estimators/model_oh.sav')
model_hel = joblib.load('ml_model/estimators/model_hel.sav')
encoder_oh = joblib.load('ml_model/estimators/encoder_oh.sav')
encoder_hel = joblib.load('ml_model/estimators/encoder_hel.sav')
encoder_label = joblib.load('ml_model/estimators/encoder_label.sav')
def get_predict(launch_date , deadline_date , goal , subcategory , category , currency , country , description, rewards):
pred_dict={
"launched_at":launch_date,
"deadline":deadline_date,
"goal":int(goal),
"sub_category":subcategory,
"category":category,
"currency":currency,
"location_country":country,
"blurb":description,
"rewards":[]
}
try:
for reward in rewards.split(","):
pred_dict["rewards"].append(int(reward))
except Exception as e:
raise Exception(f"Error sanatizing rewards with {e} error")
return pred_dict
@app.route('/predict/<launch_date>/<deadline_date>/<goal>/<subcategory>/<category>/<currency>/<country>/<description>/<rewards>')
def GetURL(launch_date , deadline_date , goal , subcategory , category , currency , country , description, rewards):
pred_dict = get_predict(launch_date , deadline_date , goal , subcategory , category , currency , country , description, rewards)
obj = KickModel(model_oh , model_hel , encoder_oh , encoder_hel , encoder_label)
obj.load_data(pred_dict)
obj.pred()
oh_pred = float(obj.pred_oh[0][1])
hel_pred = float(obj.pred_hel[0][1])
response = {
"prediction_oh":oh_pred,
"prediction_hel":hel_pred,
"prediction_oh_df":obj.pred_oh_intr.to_dict(),
"prediction_hel_intr":obj.pred_hel_intr.to_dict()
}
return response
if __name__=="__main__":
app.run(debug =True) | [((9, 6, 9, 21), 'flask.Flask', 'Flask', ({(9, 12, 9, 20): '__name__'}, {}), '(__name__)', False, 'from flask import Flask, render_template, request, jsonify\n'), ((11, 11, 11, 58), 'joblib.load', 'joblib.load', ({(11, 23, 11, 57): '"""ml_model/estimators/model_oh.sav"""'}, {}), "('ml_model/estimators/model_oh.sav')", False, 'import joblib\n'), ((12, 12, 12, 60), 'joblib.load', 'joblib.load', ({(12, 24, 12, 59): '"""ml_model/estimators/model_hel.sav"""'}, {}), "('ml_model/estimators/model_hel.sav')", False, 'import joblib\n'), ((13, 13, 13, 62), 'joblib.load', 'joblib.load', ({(13, 25, 13, 61): '"""ml_model/estimators/encoder_oh.sav"""'}, {}), "('ml_model/estimators/encoder_oh.sav')", False, 'import joblib\n'), ((14, 14, 14, 64), 'joblib.load', 'joblib.load', ({(14, 26, 14, 63): '"""ml_model/estimators/encoder_hel.sav"""'}, {}), "('ml_model/estimators/encoder_hel.sav')", False, 'import joblib\n'), ((15, 16, 15, 68), 'joblib.load', 'joblib.load', ({(15, 28, 15, 67): '"""ml_model/estimators/encoder_label.sav"""'}, {}), "('ml_model/estimators/encoder_label.sav')", False, 'import joblib\n'), ((42, 10, 42, 84), 'ml_model.model.KickModel', 'KickModel', ({(42, 20, 42, 28): 'model_oh', (42, 31, 42, 40): 'model_hel', (42, 43, 42, 53): 'encoder_oh', (42, 56, 42, 67): 'encoder_hel', (42, 70, 42, 83): 'encoder_label'}, {}), '(model_oh, model_hel, encoder_oh, encoder_hel, encoder_label)', False, 'from ml_model.model import KickModel\n')] |
littleK0i/SnowDDL | snowddl/resolver/primary_key.py | b24cb3676e41fec8876d61a101ba242e7272a18f | from snowddl.blueprint import PrimaryKeyBlueprint
from snowddl.resolver.abc_schema_object_resolver import AbstractSchemaObjectResolver, ResolveResult, ObjectType
class PrimaryKeyResolver(AbstractSchemaObjectResolver):
def get_object_type(self) -> ObjectType:
return ObjectType.PRIMARY_KEY
def get_existing_objects_in_schema(self, schema: dict):
existing_objects = {}
constraints_by_name = {}
cur = self.engine.execute_meta("SHOW PRIMARY KEYS IN SCHEMA {database:i}.{schema:i}", {
"database": schema['database'],
"schema": schema['schema'],
})
for r in cur:
if r['constraint_name'] not in constraints_by_name:
constraints_by_name[r['constraint_name']] = {
"database": r['database_name'],
"schema": r['schema_name'],
"table": r['table_name'],
"columns": {r['key_sequence']: r['column_name']}
}
else:
constraints_by_name[r['constraint_name']]['columns'][r['key_sequence']] = r['column_name']
for c in constraints_by_name.values():
columns_list = [c['columns'][k] for k in sorted(c['columns'])]
full_name = f"{c['database']}.{c['schema']}.{c['table']}({','.join(columns_list)})"
existing_objects[full_name] = {
"database": c['database'],
"schema": c['schema'],
"table": c['table'],
"columns": columns_list,
}
return existing_objects
def get_blueprints(self):
return self.config.get_blueprints_by_type(PrimaryKeyBlueprint)
def create_object(self, bp: PrimaryKeyBlueprint):
self.engine.execute_safe_ddl("ALTER TABLE {table_name:i} ADD PRIMARY KEY ({columns:i})", {
"table_name": bp.table_name,
"columns": bp.columns,
})
return ResolveResult.CREATE
def compare_object(self, bp: PrimaryKeyBlueprint, row: dict):
if [str(c) for c in bp.columns] == row['columns']:
return ResolveResult.NOCHANGE
self.engine.execute_safe_ddl("ALTER TABLE {table_name:i} DROP PRIMARY KEY", {
"table_name": bp.table_name,
})
self.engine.execute_safe_ddl("ALTER TABLE {table_name:i} ADD PRIMARY KEY ({columns:i})", {
"table_name": bp.table_name,
"columns": bp.columns,
})
return ResolveResult.ALTER
def drop_object(self, row: dict):
self.engine.execute_safe_ddl("ALTER TABLE {database:i}.{schema:i}.{table:i} DROP PRIMARY KEY", {
"database": row['database'],
"schema": row['schema'],
"table": row['table'],
})
return ResolveResult.DROP
| [] |
tetov/ITA19 | modules/module0/02_datastructures_and_geometry/datastructures_2b.py | 1af68a8885caf83acd98f4136d0286539ccbe63b | import os
import compas
from compas.datastructures import Mesh
from compas_rhino.artists import MeshArtist
HERE = os.path.dirname(__file__)
DATA = os.path.join(HERE, 'data')
FILE = os.path.join(DATA, 'faces.obj')
mesh = Mesh.from_obj(FILE)
artist = MeshArtist(mesh, layer="Mesh")
artist.draw_vertices(
color={key: (255, 0, 0) for key in mesh.vertices_on_boundary()})
artist.draw_vertexlabels(
text={key: str(mesh.vertex_degree(key)) for key in mesh.vertices()})
artist.draw_edges(
keys=list(mesh.edges_on_boundary()),
color=(255, 0, 0))
artist.draw_faces(
color={key: (150, 255, 150) for key in mesh.faces() if not mesh.is_face_on_boundary(key)})
| [((7, 7, 7, 32), 'os.path.dirname', 'os.path.dirname', ({(7, 23, 7, 31): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((8, 7, 8, 33), 'os.path.join', 'os.path.join', ({(8, 20, 8, 24): 'HERE', (8, 26, 8, 32): '"""data"""'}, {}), "(HERE, 'data')", False, 'import os\n'), ((9, 7, 9, 38), 'os.path.join', 'os.path.join', ({(9, 20, 9, 24): 'DATA', (9, 26, 9, 37): '"""faces.obj"""'}, {}), "(DATA, 'faces.obj')", False, 'import os\n'), ((11, 7, 11, 26), 'compas.datastructures.Mesh.from_obj', 'Mesh.from_obj', ({(11, 21, 11, 25): 'FILE'}, {}), '(FILE)', False, 'from compas.datastructures import Mesh\n'), ((13, 9, 13, 39), 'compas_rhino.artists.MeshArtist', 'MeshArtist', (), '', False, 'from compas_rhino.artists import MeshArtist\n')] |
tankishev/Python | OOP/Exercises/First_steps_in_OOP_Exercises/8_pokemon/project/pokemon.py | 60e511fc901f136b88c681f77f209fe2f8c46447 | # The Pokemon class should receive a name (string) and health (int) upon initialization.
# It should also have a method called pokemon_details that returns the information about the pokemon:
# "{pokemon_name} with health {pokemon_health}"
class Pokemon:
def __init__(self, name: str, health: int) -> None:
self.name = name
self.health = health
def pokemon_details(self) -> str:
return f"{self.name} with health {self.health}"
| [] |
ONSdigital/ons_utils | tests/test_pandas.py | 5ff0952c174984deb601af8ad4c21f26c7b24623 | """Tests for the pandas helpers in the pd_helpers.py module."""
import pytest
from pandas.testing import assert_frame_equal
from tests.conftest import create_dataframe
from ons_utils.pandas import *
def test_nested_dict_to_df():
"""Test for nested_dict_to_df."""
input_d = {
'bones': {
'femur': {'tendons': 24},
'humerus': {'tendons': 14},
},
'muscles': {
'gluteus_maximus': {'tendons': 18},
},
'cars': 7,
}
actual = nested_dict_to_df(
input_d,
columns=['number'],
level_names=('a', 'b', 'c'),
)
expected = create_dataframe([
('a', 'b', 'c', 'number'),
('bones', 'femur', 'tendons', 24),
('bones', 'humerus', 'tendons', 14),
('cars', None, None, 7),
('muscles', 'gluteus_maximus', 'tendons', 18),
])
assert_frame_equal(
# Sort values as dict order not preserved.
actual.sort_values(['a', 'b']),
# Set index because function returns a MultiIndex.
expected.set_index(['a', 'b', 'c'])
)
class TestStacker:
"""Group of tests for Stacker."""
@pytest.mark.skip(reason="test shell")
def test_Stacker(self):
"""Test for Stacker."""
pass
@pytest.mark.skip(reason="test shell")
def test_convert_level_to_datetime():
"""Test for this."""
pass
class TestMultiIndexSlicer:
"""Group of tests for MultiIndexSlicer."""
@pytest.mark.skip(reason="test shell")
def test_MultiIndexSlicer(self):
"""Test for MultiIndexSlicer."""
pass
@pytest.mark.skip(reason="test shell")
def test_get_index_level_values():
"""Test for this."""
pass
@pytest.mark.skip(reason="test shell")
def test_shifted_within_year_apply():
"""Test for this."""
pass
@pytest.mark.skip(reason="test shell")
def test_shifted_within_year_ffill():
"""Test for this."""
pass
| [((54, 1, 54, 38), 'pytest.mark.skip', 'pytest.mark.skip', (), '', False, 'import pytest\n'), ((69, 1, 69, 38), 'pytest.mark.skip', 'pytest.mark.skip', (), '', False, 'import pytest\n'), ((75, 1, 75, 38), 'pytest.mark.skip', 'pytest.mark.skip', (), '', False, 'import pytest\n'), ((81, 1, 81, 38), 'pytest.mark.skip', 'pytest.mark.skip', (), '', False, 'import pytest\n'), ((29, 15, 35, 6), 'tests.conftest.create_dataframe', 'create_dataframe', ({(29, 32, 35, 5): "[('a', 'b', 'c', 'number'), ('bones', 'femur', 'tendons', 24), ('bones',\n 'humerus', 'tendons', 14), ('cars', None, None, 7), ('muscles',\n 'gluteus_maximus', 'tendons', 18)]"}, {}), "([('a', 'b', 'c', 'number'), ('bones', 'femur', 'tendons', \n 24), ('bones', 'humerus', 'tendons', 14), ('cars', None, None, 7), (\n 'muscles', 'gluteus_maximus', 'tendons', 18)])", False, 'from tests.conftest import create_dataframe\n'), ((48, 5, 48, 42), 'pytest.mark.skip', 'pytest.mark.skip', (), '', False, 'import pytest\n'), ((63, 5, 63, 42), 'pytest.mark.skip', 'pytest.mark.skip', (), '', False, 'import pytest\n')] |
seanfisk/lsf-ibutils | lsf_ibutils/ibsub/__init__.py | a22c738376d656ab38f4bfa3572d4693288098cb | """:mod:`lsf_ibutils.ibsub` -- Interactive batch submission utility
"""
| [] |
PaperDevil/pyconfigger | build/lib/configger/fishes/__init__.py | 75c6e3f74e6e70d8ec9565397e2be9ae8815d44e | import os
splited_path = os.path.realpath(__file__).split('\\')[:-1]
fish_path = '\\'.join(splited_path)
fish_json_name = "fish.json"
fish_json_path = os.path.join(fish_path, fish_json_name)
| [((7, 17, 7, 56), 'os.path.join', 'os.path.join', ({(7, 30, 7, 39): 'fish_path', (7, 41, 7, 55): 'fish_json_name'}, {}), '(fish_path, fish_json_name)', False, 'import os\n'), ((3, 15, 3, 41), 'os.path.realpath', 'os.path.realpath', ({(3, 32, 3, 40): '__file__'}, {}), '(__file__)', False, 'import os\n')] |
IntuitionEngineeringTeam/RedBlackPy | setup.py | 99630408153bea7494415c402eb2d9881f3168ee | #
# Created by Soldoskikh Kirill.
# Copyright 2018 Intuition. All rights reserved.
#
import os
import platform
from setuptools import setup
from setuptools.command.build_ext import build_ext
from distutils.extension import Extension
from Cython.Build import cythonize
from rbp_setup_tools.code_generation import generate_from_cython_src
from rbp_setup_tools.types import TYPES
if platform.system() == 'Darwin':
compile_opts = [ '-std=c++11',
'-mmacosx-version-min={:}'.format( platform.mac_ver()[0] ),
'-Ofast' ]
elif platform.system() == 'Linux':
compile_opts = [ '-std=c++11',
'-Ofast' ]
elif platform.system() == 'Windows':
compile_opts = [ '-std=c++11',
'-Ofast' ]
else:
raise EnvironmentError( 'Not supported platform: {plat}'.format(plat=platform.system()) )
#--------------------------------------------------------------------------------------------
# Generate cython code for all supporting types
#--------------------------------------------------------------------------------------------
src_1 = './redblackpy/cython_source/__dtype_tree_processing.pxi'
src_2 = './redblackpy/cython_source/__tree_series_dtype.pxi'
src_3 = './redblackpy/cython_source/__interpolation.pxi'
src_4 = './redblackpy/cython_source/__arithmetic.pxi'
src_1 = open(src_1, 'r')
src_2 = open(src_2, 'r')
src_3 = open(src_3, 'r')
src_4 = open(src_4, 'r')
output_1 = open('./redblackpy/cython_source/dtype_tree_processing.pxi', 'w')
output_2 = open('./redblackpy/cython_source/tree_series_dtype.pxi', 'w')
output_3 = open('./redblackpy/cython_source/interpolation.pxi', 'w')
output_4 = open('./redblackpy/cython_source/arithmetic.pxi', 'w')
generate_from_cython_src(src_1, output_1, TYPES[:-1], 0)
generate_from_cython_src(src_2, output_2, TYPES, 14)
generate_from_cython_src(src_3, output_3, TYPES, 0)
generate_from_cython_src(src_4, output_4, TYPES, 0)
src_1.close()
src_2.close()
src_3.close()
src_4.close()
output_1.close()
output_2.close()
output_3.close()
output_4.close()
#--------------------------------------------------------------------------------------------
ext_modules=[ Extension( "redblackpy.series.tree_series",
sources=["redblackpy/series/tree_series.pyx"],
extra_compile_args=compile_opts,
extra_link_args=compile_opts[:-1],
language = "c++",
include_dirs=['./redblackpy'],
depends=[ 'core/tree/tree.hpp',
'core/tree/rb_tree.tpp'
'core/tree/rb_node.tpp',
'core/tree/rb_node_valued.tpp',
'core/trees_iterator/iterator.hpp',
'core/trees_iterator/iterator.tpp' ], ),
Extension( "redblackpy.series.series_iterator",
sources=["redblackpy/series/series_iterator.pyx"],
extra_compile_args=compile_opts,
extra_link_args=compile_opts[:-1],
language = "c++",
include_dirs=['./redblackpy'],
depends=[ 'core/tree/tree.hpp',
'core/tree/rb_tree.tpp'
'core/tree/rb_node.tpp',
'core/tree/rb_node_valued.tpp',
'core/trees_iterator/iterator.hpp',
'core/trees_iterator/iterator.tpp' ], ),
Extension( "redblackpy.benchmark.timer",
sources=["redblackpy/benchmark/timer.pyx"],
extra_compile_args=compile_opts,
extra_link_args=compile_opts[:-1],
language = "c++",
include_dirs=['./redblackpy'] ) ]
setup( name='redblackpy',
ext_modules = cythonize(ext_modules),
version='0.1.3.0',
author='Solodskikh Kirill',
author_email='[email protected]',
maintainer='Intuition',
maintainer_email='[email protected]',
install_requires=['cython'],
description='Data structures based on red-black trees.',
url='https://intuitionengineeringteam.github.io/RedBlackPy/',
download_url='https://github.com/IntuitionEngineeringTeam/RedBlackPy/archive/master.zip',
zip_safe=False,
packages=[ 'redblackpy', 'redblackpy.series',
'redblackpy.benchmark', 'redblackpy.tree_cython_api'],
package_data={'redblackpy.series': ['*.pxd']},
include_package_data=True,
license='Apache License 2.0',
long_description='RedBlackPy is a light Python library that provides data structures \
aimed to fast insertion, removal and self sorting to manipulating ordered data in efficient way.\
The core part of the library had been written on C++ and then was wrapped in Cython. \
Hope that many would find the primary data structures of this library very handy in working \
with time series. One of the main feature of this structures is an access by arbitrary \
key using interpolation, what makes processing of multiple non synchronized time series very simple.\
All data structures based on red black trees.',
classifiers = [ 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3' ] )
| [((52, 0, 52, 56), 'rbp_setup_tools.code_generation.generate_from_cython_src', 'generate_from_cython_src', ({(52, 25, 52, 30): 'src_1', (52, 32, 52, 40): 'output_1', (52, 42, 52, 52): 'TYPES[:-1]', (52, 54, 52, 55): '(0)'}, {}), '(src_1, output_1, TYPES[:-1], 0)', False, 'from rbp_setup_tools.code_generation import generate_from_cython_src\n'), ((53, 0, 53, 52), 'rbp_setup_tools.code_generation.generate_from_cython_src', 'generate_from_cython_src', ({(53, 25, 53, 30): 'src_2', (53, 32, 53, 40): 'output_2', (53, 42, 53, 47): 'TYPES', (53, 49, 53, 51): '(14)'}, {}), '(src_2, output_2, TYPES, 14)', False, 'from rbp_setup_tools.code_generation import generate_from_cython_src\n'), ((54, 0, 54, 51), 'rbp_setup_tools.code_generation.generate_from_cython_src', 'generate_from_cython_src', ({(54, 25, 54, 30): 'src_3', (54, 32, 54, 40): 'output_3', (54, 42, 54, 47): 'TYPES', (54, 49, 54, 50): '(0)'}, {}), '(src_3, output_3, TYPES, 0)', False, 'from rbp_setup_tools.code_generation import generate_from_cython_src\n'), ((55, 0, 55, 51), 'rbp_setup_tools.code_generation.generate_from_cython_src', 'generate_from_cython_src', ({(55, 25, 55, 30): 'src_4', (55, 32, 55, 40): 'output_4', (55, 42, 55, 47): 'TYPES', (55, 49, 55, 50): '(0)'}, {}), '(src_4, output_4, TYPES, 0)', False, 'from rbp_setup_tools.code_generation import generate_from_cython_src\n'), ((16, 3, 16, 20), 'platform.system', 'platform.system', ({}, {}), '()', False, 'import platform\n'), ((68, 14, 79, 74), 'distutils.extension.Extension', 'Extension', (), '', False, 'from distutils.extension import Extension\n'), ((81, 14, 92, 74), 'distutils.extension.Extension', 'Extension', (), '', False, 'from distutils.extension import Extension\n'), ((94, 14, 99, 56), 'distutils.extension.Extension', 'Extension', (), '', False, 'from distutils.extension import Extension\n'), ((22, 5, 22, 22), 'platform.system', 'platform.system', ({}, {}), '()', False, 'import platform\n'), ((102, 21, 102, 43), 'Cython.Build.cythonize', 'cythonize', ({(102, 31, 102, 42): 'ext_modules'}, {}), '(ext_modules)', False, 'from Cython.Build import cythonize\n'), ((27, 5, 27, 22), 'platform.system', 'platform.system', ({}, {}), '()', False, 'import platform\n'), ((19, 56, 19, 74), 'platform.mac_ver', 'platform.mac_ver', ({}, {}), '()', False, 'import platform\n'), ((33, 73, 33, 90), 'platform.system', 'platform.system', ({}, {}), '()', False, 'import platform\n')] |
kishan2064/hashpy1 | source/accounts/views.py | 2f8c6fddb07e80e9a7b37a5632ed8ab8bf68d264 | from django.contrib.auth import login, authenticate, REDIRECT_FIELD_NAME, get_user_model
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.views import PasswordResetView as BasePasswordResetView, SuccessURLAllowedHostsMixin
from django.shortcuts import get_object_or_404, resolve_url
from django.utils.crypto import get_random_string
from django.utils.decorators import method_decorator
from django.utils.http import is_safe_url
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.utils.translation import gettext_lazy as _
from django.views.generic import RedirectView
from django.views.generic.edit import FormView
from django.conf import settings
from .utils import (
get_login_form, send_activation_email, get_password_reset_form, send_reset_password_email,
send_activation_change_email, is_username_disabled, get_resend_ac_form
)
from .forms import SignUpForm, ProfileEditForm, ChangeEmailForm
from .models import Activation
UserModel = get_user_model()
class SuccessRedirectView(SuccessURLAllowedHostsMixin, FormView):
redirect_field_name = REDIRECT_FIELD_NAME
def get_success_url(self):
url = self.get_redirect_url()
return url or resolve_url(settings.LOGIN_REDIRECT_URL)
def get_redirect_url(self):
redirect_to = self.request.POST.get(
self.redirect_field_name,
self.request.GET.get(self.redirect_field_name, '')
)
url_is_safe = is_safe_url(
url=redirect_to,
allowed_hosts=self.get_success_url_allowed_hosts(),
require_https=self.request.is_secure(),
)
return redirect_to if url_is_safe else ''
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['request'] = self.request
return kwargs
class SignInView(SuccessRedirectView):
template_name = 'accounts/login.html'
form_class = get_login_form()
success_url = '/'
@method_decorator(sensitive_post_parameters('password'))
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
# Sets a test cookie to make sure the user has cookies enabled
request.session.set_test_cookie()
return super(SignInView, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
# If the test cookie worked, go ahead and
# delete it since its no longer needed
if self.request.session.test_cookie_worked():
self.request.session.delete_test_cookie()
login(self.request, form.get_user())
return super(SignInView, self).form_valid(form)
class SignUpView(FormView):
template_name = 'accounts/register.html'
form_class = SignUpForm
success_url = '/'
def form_valid(self, form):
user = form.save(commit=False)
if is_username_disabled():
# Set temporary username
user.username = get_random_string()
else:
user.username = form.cleaned_data.get('username')
if settings.ENABLE_USER_ACTIVATION:
user.is_active = False
user.save()
# Change the username to "user_ID" form
if is_username_disabled():
user.username = 'user_{}'.format(user.id)
user.save()
if settings.ENABLE_USER_ACTIVATION:
send_activation_email(self.request, user)
messages.add_message(self.request, messages.SUCCESS,
_('You are registered. To activate the account, follow the link sent to the mail.'))
else:
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=user.username, password=raw_password)
login(self.request, user)
messages.add_message(self.request, messages.SUCCESS, _('You are successfully registered!'))
return super(SignUpView, self).form_valid(form)
class ActivateView(RedirectView):
permanent = False
query_string = True
pattern_name = 'index'
def get_redirect_url(self, *args, **kwargs):
assert 'code' in kwargs
act = get_object_or_404(Activation, code=kwargs['code'])
# Activate user's profile
user = act.user
user.is_active = True
user.save()
# Remove activation record, it is unneeded
act.delete()
messages.add_message(self.request, messages.SUCCESS, _('You have successfully activated your account!'))
login(self.request, user)
return super(ActivateView, self).get_redirect_url()
class ReSendActivationCodeView(SuccessRedirectView):
template_name = 'accounts/resend_activation_code.html'
form_class = get_resend_ac_form()
success_url = '/'
def form_valid(self, form):
user = form.get_user()
activation = user.activation_set.get()
activation.delete()
send_activation_email(self.request, user)
messages.add_message(self.request, messages.SUCCESS, _('A new activation code has been sent to your e-mail.'))
return super(ReSendActivationCodeView, self).form_valid(form)
class PasswordResetView(BasePasswordResetView):
form_class = get_password_reset_form()
def form_valid(self, form):
send_reset_password_email(self.request, form.get_user())
return super(PasswordResetView, self).form_valid(form)
class ProfileEditView(LoginRequiredMixin, FormView):
template_name = 'accounts/profile/edit.html'
form_class = ProfileEditForm
success_url = '/accounts/profile/edit/'
def get_initial(self):
initial = super(ProfileEditView, self).get_initial()
user = self.request.user
initial['first_name'] = user.first_name
initial['last_name'] = user.last_name
return initial
def form_valid(self, form):
user = self.request.user
user.first_name = form.cleaned_data.get('first_name')
user.last_name = form.cleaned_data.get('last_name')
user.save()
messages.add_message(self.request, messages.SUCCESS, _('Profile data has been successfully updated.'))
return super(ProfileEditView, self).form_valid(form)
class ChangeEmailView(LoginRequiredMixin, FormView):
template_name = 'accounts/profile/change_email.html'
form_class = ChangeEmailForm
success_url = '/accounts/change/email/'
def get_form_kwargs(self):
kwargs = super(ChangeEmailView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def get_initial(self):
initial = super(ChangeEmailView, self).get_initial()
user = self.request.user
initial['email'] = user.email
return initial
def form_valid(self, form):
user = self.request.user
email = form.cleaned_data.get('email')
email = email.lower()
if hasattr(settings, 'EMAIL_ACTIVATION_AFTER_CHANGING') and settings.EMAIL_ACTIVATION_AFTER_CHANGING:
send_activation_change_email(self.request, user, email)
messages.add_message(self.request, messages.SUCCESS,
_('To complete the change of mail, click on the link sent to it.'))
else:
user.email = email
user.save()
messages.add_message(self.request, messages.SUCCESS, _('Email successfully changed.'))
return super(ChangeEmailView, self).form_valid(form)
class ChangeEmailActivateView(RedirectView):
permanent = False
query_string = True
pattern_name = 'change_email'
def get_redirect_url(self, *args, **kwargs):
assert 'code' in kwargs
act = get_object_or_404(Activation, code=kwargs['code'])
# Change user's email
user = act.user
user.email = act.email
user.save()
# Remove activation record, it is unneeded
act.delete()
messages.add_message(self.request, messages.SUCCESS, _('You have successfully changed your email!'))
return super(ChangeEmailActivateView, self).get_redirect_url()
| [((24, 12, 24, 28), 'django.contrib.auth.get_user_model', 'get_user_model', ({}, {}), '()', False, 'from django.contrib.auth import login, authenticate, REDIRECT_FIELD_NAME, get_user_model\n'), ((58, 5, 58, 35), 'django.utils.decorators.method_decorator', 'method_decorator', ({(58, 22, 58, 34): 'csrf_protect'}, {}), '(csrf_protect)', False, 'from django.utils.decorators import method_decorator\n'), ((59, 5, 59, 34), 'django.utils.decorators.method_decorator', 'method_decorator', ({(59, 22, 59, 33): 'never_cache'}, {}), '(never_cache)', False, 'from django.utils.decorators import method_decorator\n'), ((57, 22, 57, 59), 'django.views.decorators.debug.sensitive_post_parameters', 'sensitive_post_parameters', ({(57, 48, 57, 58): '"""password"""'}, {}), "('password')", False, 'from django.views.decorators.debug import sensitive_post_parameters\n'), ((125, 14, 125, 64), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (), '', False, 'from django.shortcuts import get_object_or_404, resolve_url\n'), ((136, 8, 136, 33), 'django.contrib.auth.login', 'login', ({(136, 14, 136, 26): 'self.request', (136, 28, 136, 32): 'user'}, {}), '(self.request, user)', False, 'from django.contrib.auth import login, authenticate, REDIRECT_FIELD_NAME, get_user_model\n'), ((245, 14, 245, 64), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (), '', False, 'from django.shortcuts import get_object_or_404, resolve_url\n'), ((32, 22, 32, 62), 'django.shortcuts.resolve_url', 'resolve_url', ({(32, 34, 32, 61): 'settings.LOGIN_REDIRECT_URL'}, {}), '(settings.LOGIN_REDIRECT_URL)', False, 'from django.shortcuts import get_object_or_404, resolve_url\n'), ((87, 28, 87, 47), 'django.utils.crypto.get_random_string', 'get_random_string', ({}, {}), '()', False, 'from django.utils.crypto import get_random_string\n'), ((109, 19, 109, 78), 'django.contrib.auth.authenticate', 'authenticate', (), '', False, 'from django.contrib.auth import login, authenticate, REDIRECT_FIELD_NAME, get_user_model\n'), ((110, 12, 110, 37), 'django.contrib.auth.login', 'login', ({(110, 18, 110, 30): 'self.request', (110, 32, 110, 36): 'user'}, {}), '(self.request, user)', False, 'from django.contrib.auth import login, authenticate, REDIRECT_FIELD_NAME, get_user_model\n'), ((135, 61, 135, 111), 'django.utils.translation.gettext_lazy', '_', ({(135, 63, 135, 110): '"""You have successfully activated your account!"""'}, {}), "('You have successfully activated your account!')", True, 'from django.utils.translation import gettext_lazy as _\n'), ((154, 61, 154, 117), 'django.utils.translation.gettext_lazy', '_', ({(154, 63, 154, 116): '"""A new activation code has been sent to your e-mail."""'}, {}), "('A new activation code has been sent to your e-mail.')", True, 'from django.utils.translation import gettext_lazy as _\n'), ((191, 61, 191, 109), 'django.utils.translation.gettext_lazy', '_', ({(191, 63, 191, 108): '"""Profile data has been successfully updated."""'}, {}), "('Profile data has been successfully updated.')", True, 'from django.utils.translation import gettext_lazy as _\n'), ((255, 61, 255, 107), 'django.utils.translation.gettext_lazy', '_', ({(255, 63, 255, 106): '"""You have successfully changed your email!"""'}, {}), "('You have successfully changed your email!')", True, 'from django.utils.translation import gettext_lazy as _\n'), ((105, 33, 105, 116), 'django.utils.translation.gettext_lazy', '_', ({(105, 35, 105, 115): '"""You are registered. To activate the account, follow the link sent to the mail."""'}, {}), "('You are registered. To activate the account, follow the link sent to the mail.'\n )", True, 'from django.utils.translation import gettext_lazy as _\n'), ((112, 65, 112, 102), 'django.utils.translation.gettext_lazy', '_', ({(112, 67, 112, 101): '"""You are successfully registered!"""'}, {}), "('You are successfully registered!')", True, 'from django.utils.translation import gettext_lazy as _\n'), ((227, 33, 227, 99), 'django.utils.translation.gettext_lazy', '_', ({(227, 35, 227, 98): '"""To complete the change of mail, click on the link sent to it."""'}, {}), "('To complete the change of mail, click on the link sent to it.')", True, 'from django.utils.translation import gettext_lazy as _\n'), ((232, 65, 232, 97), 'django.utils.translation.gettext_lazy', '_', ({(232, 67, 232, 96): '"""Email successfully changed."""'}, {}), "('Email successfully changed.')", True, 'from django.utils.translation import gettext_lazy as _\n')] |
INDElab/conversationkg | conversationkg/kgs/writers.py | 8bfe09b0afb4954f633a9287f723c61dcd21ce46 | from ..conversations.corpus import Conversation
from ..conversations.emails import Email
from collections import Counter
import matplotlib
import pandas as pd
import json
class JSONWriter:
def __init__(self, kg):
self.kg = kg
self.entities = kg.entities()
self.triples = kg.triples
self.provenances = kg.provenances
def store(self, name, save_mapping=True):
with open(f"{name}.json", "w") as handle:
json.dump(self.translated, handle)
with open(f"{name}.provenances.json", "w") as handle:
json.dump(self.provenances, handle)
if save_mapping:
reversed_d = self.reverse_mapping(self.entity2ind)
json_d = {i:e.to_json() for i, e in reversed_d.items()}
with open(f"{name}.ind2entity.json", "w") as handle:
json.dump(json_d, handle)
reverse_d = self.reverse_mapping(self.pred2ind)
with open(f"{name}.ind2pred.json", "w") as handle:
json.dump(reverse_d, handle)
@classmethod
def restore(cls, name, load_mapping_of=None):
def get_class(cls_name):
for mod in conversations_modules:
try:
cls = getattr(mod, cls_name)
return cls
except AttributeError:
pass
raise AttributeError(f"{cls_name} could not be found in any of the modules!")
def json_to_entity(json_dict):
try:
json_dict["class"]
except KeyError:
print(json_dict.keys())
raise
cls_name = json_dict["class"]
cls = get_class(cls_name)
return cls.from_json(json_dict)
if load_mapping_of is None:
load_mapping_of = name
with open(f"{load_mapping_of}.ind2entity.json") as handle:
loaded_entity_mapping = {int(i): d for i, d in json.load(handle).items()}
ind2entity = {i:json_to_entity(d) for i, d in loaded_entity_mapping.items()}
ind2entity = {i: (Person(x) if type(x) is WholePerson else x)
for i, x in ind2entity.items()}
with open(f"{load_mapping_of}.ind2pred.json") as handle:
ind2pred = {int(i): d for i, d in json.load(handle).items()}
with open(f"{name}.json") as handle:
loaded = json.load(handle)
restored_triples = [(ind2entity[s],
ind2pred[p],
ind2entity[o]) for s, p, o in loaded]
with open(f"{name}.provenances.json") as handle:
provenances = json.load(handle)
kg = KG(restored_triples, provenances)
kg.translated = loaded
kg.entity2ind = kg.reverse_mapping(ind2entity)
kg.pred2ind = kg.reverse_mapping(ind2pred)
return kg
@staticmethod
def reverse_mapping(d):
rev_d = {}
for k, v in d.items():
if not v in rev_d:
rev_d[v] = k
else:
print("duplicate:", v)
if not type(v) is Person:
raise ValueError("Non-bijective mapping!")
return rev_d
class CSVWriter:
def __init__(self, kg):
self.kg = kg
self.entities = kg.entities()
self.triples = kg.triples
self.provenances = kg.provenances
def get_node_df(self):
records = []
sorted_ents = sorted(self.entities, key=lambda x: (str(type(x)), str(x)))
for i, e in enumerate(sorted_ents):
node_id = i # hash(e)
node_t = str(e)
node_type = type(e).__name__
node_u = f"icons/{node_type.lower()}.png"
type_ = "LinkChart" if i == 0 else "0"
if type(e) in {Conversation, Email}:
node_dtopic = e.topic.topic.index
node_dtopic_rate = round(e.topic.score, 5)
else:
node_dtopic = -1
node_dtopic_rate = 1.0
lat = lng = 0.0
records.append(
(
type_, node_type, node_id, node_u, node_t,
node_dtopic, node_dtopic_rate, lat, lng
)
)
return pd.DataFrame.from_records(records,
columns= ['type',
'node_type',
'node_id',
'node_u',
'node_t',
'node_dtopic',
'node_dtopic_rate',
'lat',
'lng']
)
def get_link_df(self):
link_types = {p for s, p, o in self.triples}
link_counts = Counter(self.triples)
colours = dict(zip(link_types, list(matplotlib.colors.cnames.values())))
sorted_ents = dict(zip(sorted(self.entities, key=str),
range(len(self.entities))))
records = []
for i, ((s, p, o), prov) in enumerate(zip(self.triples, self.provenances)):
linkId = i # hash((s, p, o)) # s.time.timestamp()
end1 = sorted_ents[s] # hash(s)
end2 = sorted_ents[o] # hash(o)
linkcount = link_counts[(s,p,o)]
linkcolor = colours[p]
linktype = p
itemID = prov
rec = [linkId,
end1,
end2,
linkcount,
linkcolor,
itemID,
linktype]
records.append(rec)
return pd.DataFrame.from_records(records,
columns=['linkId', 'end1', 'end2', 'linkcount', 'linkcolor', 'itemID', 'linktype'])
def to_csv(self, save_path):
node_df = self.get_node_df()
link_df = self.get_link_df()
node_df.to_csv(save_path + ".nodes.csv",
index=False)
link_df.to_csv(save_path + ".links.csv",
index=False)
from neo4j import GraphDatabase
class Neo4jWriter:
def __init__(self, kg):
self.kg = kg
def to_neo4j(self):
pass
def run(self, clear=True):
self.driver = GraphDatabase.driver("bolt://localhost:7687",
auth=("neo4j", "pwd"), encrypted=False)
if clear:
tx.run("""MATCH (x)
DETACH DELETE x""")
| [((157, 15, 167, 42), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (), '', True, 'import pandas as pd\n'), ((172, 22, 172, 43), 'collections.Counter', 'Counter', ({(172, 30, 172, 42): 'self.triples'}, {}), '(self.triples)', False, 'from collections import Counter\n'), ((198, 15, 199, 124), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (), '', True, 'import pandas as pd\n'), ((235, 22, 236, 77), 'neo4j.GraphDatabase.driver', 'GraphDatabase.driver', (), '', False, 'from neo4j import GraphDatabase\n'), ((24, 12, 24, 46), 'json.dump', 'json.dump', ({(24, 22, 24, 37): 'self.translated', (24, 39, 24, 45): 'handle'}, {}), '(self.translated, handle)', False, 'import json\n'), ((27, 12, 27, 47), 'json.dump', 'json.dump', ({(27, 22, 27, 38): 'self.provenances', (27, 40, 27, 46): 'handle'}, {}), '(self.provenances, handle)', False, 'import json\n'), ((80, 21, 80, 38), 'json.load', 'json.load', ({(80, 31, 80, 37): 'handle'}, {}), '(handle)', False, 'import json\n'), ((88, 26, 88, 43), 'json.load', 'json.load', ({(88, 36, 88, 42): 'handle'}, {}), '(handle)', False, 'import json\n'), ((34, 16, 34, 41), 'json.dump', 'json.dump', ({(34, 26, 34, 32): 'json_d', (34, 34, 34, 40): 'handle'}, {}), '(json_d, handle)', False, 'import json\n'), ((38, 16, 38, 44), 'json.dump', 'json.dump', ({(38, 26, 38, 35): 'reverse_d', (38, 37, 38, 43): 'handle'}, {}), '(reverse_d, handle)', False, 'import json\n'), ((173, 44, 173, 77), 'matplotlib.colors.cnames.values', 'matplotlib.colors.cnames.values', ({}, {}), '()', False, 'import matplotlib\n'), ((69, 59, 69, 76), 'json.load', 'json.load', ({(69, 69, 69, 75): 'handle'}, {}), '(handle)', False, 'import json\n'), ((76, 46, 76, 63), 'json.load', 'json.load', ({(76, 56, 76, 62): 'handle'}, {}), '(handle)', False, 'import json\n')] |
shikew/Handwriting-calculator | model-test.py | 5e0da9f8ceac6dcc815139c6855dfc6fb5af909f | import numpy as np
from PIL import Image
from keras.models import load_model
img_gray = Image.open('1002.png')
number = np.array(img_gray)
print(number.shape)
print('准备的图片的shape:',number.flatten().shape)
print('原number:',number)
number = number.astype('float32')
number = number/255 #归一化
number = number.flatten()
print('处理过后的number.shape:',number.shape)
model = load_model('mnist-dnn.h5')
# model.load_weights('mnist.model.best.hdf5')
# def recognize(photo_data):
# return clf.predict(photo_data)
print(model.predict_classes(np.array([number])))
#print('测试标签为:',test_target[8000]) | [((5, 11, 5, 33), 'PIL.Image.open', 'Image.open', ({(5, 22, 5, 32): '"""1002.png"""'}, {}), "('1002.png')", False, 'from PIL import Image\n'), ((6, 9, 6, 27), 'numpy.array', 'np.array', ({(6, 18, 6, 26): 'img_gray'}, {}), '(img_gray)', True, 'import numpy as np\n'), ((15, 8, 15, 34), 'keras.models.load_model', 'load_model', ({(15, 19, 15, 33): '"""mnist-dnn.h5"""'}, {}), "('mnist-dnn.h5')", False, 'from keras.models import load_model\n'), ((20, 28, 20, 46), 'numpy.array', 'np.array', ({(20, 37, 20, 45): '[number]'}, {}), '([number])', True, 'import numpy as np\n')] |
khrushjing/node-gdal-async | deps/libgdal/gyp-formats/ogr_mem.gyp | 6546b0c8690f2db677d5385b40b407523503b314 | {
"includes": [
"../common.gypi"
],
"targets": [
{
"target_name": "libgdal_ogr_mem_frmt",
"type": "static_library",
"sources": [
"../gdal/ogr/ogrsf_frmts/mem/ogrmemdatasource.cpp",
"../gdal/ogr/ogrsf_frmts/mem/ogrmemlayer.cpp",
"../gdal/ogr/ogrsf_frmts/mem/ogrmemdriver.cpp"
],
"include_dirs": [
"../gdal/ogr/ogrsf_frmts/mem"
]
}
]
}
| [] |
nantille/iblviewer | iblviewer/volume.py | a5dad67e8f4b99a535297ba0803caf07b1107ca1 | from dataclasses import dataclass, field
from typing import Mapping, List, Any
from datetime import datetime
import logging
import pandas as pd
import glob
import numpy as np
import logging
import os
from collections import OrderedDict
import nrrd
import vtk
import vedo
from vtk.util.numpy_support import numpy_to_vtk
from iblviewer.collection import Collection
import iblviewer.objects as obj
import iblviewer.utils as utils
@dataclass
class VolumeModel:
RAW = 'raw'
SEGMENTED = 'segmented'
NORMALIZED_SUFFIX = '_norm'
DATA_TYPE = {RAW:0, SEGMENTED:1}
PREFIX = 'Volume'
__count = 0
def unique_name():
VolumeModel.__count += 1
return f'{VolumeModel.PREFIX}_{VolumeModel.__count}'
name: str = field(default_factory=unique_name)
file_path: str = None
scalars: Collection = field(default_factory=Collection)
axes: List = field(default_factory=lambda: [1, 1, 1])
data_min: float = None
data_max: float = None
data_map_step: float = 1.0
data: np.ndarray = None
data_type: str = RAW
resolution: int = 1
# Default units are microns.
units: float = 1e-06
base_color_map: Any = None
# At IBL, volume mappings are used from ibllib: ibllib.atlas.regions.mappings
mapping_name: str = None
lateralized: bool = False
# Mapping function. If None, the volume will be given as it is.
mapping: Any = None
luts: Collection = field(default_factory=Collection)
slicers: Collection = field(default_factory=Collection)
isosurfaces: Collection = field(default_factory=Collection)
interactive_subsampling: bool = True
volume_visible: bool = True
slices_visible: bool = True
transpose_shape: Any = None
dimensions: np.ndarray = np.zeros(3).astype(float)
center: np.ndarray = np.zeros(3).astype(float)
def compute_size(self):
"""
Compute volume size
"""
if self.data is None:
return
self.dimensions = np.array(self.data.shape)[:3]
if self.resolution is None:
return
self.resolution = int(self.resolution) # TODO: move this to constructor or init
self.dimensions *= self.resolution
self.center = np.ones(3) * self.resolution / 2 + self.dimensions / 2
def compute_range(self, force=False):
"""
Compute min and max range in the volume
:return: Min and max values
"""
if self.data_min is not None and self.data_max is not None and not force:
return self.data_min, self.data_max
self.data_min = np.min(self.data)
self.data_max = np.max(self.data)
#print('Volume min-max', self.data_min, self.data_max)
return self.data_min, self.data_max
def guess_volume_type(self):
"""
Infer the volume type when it was not specified by the user.
We assume here that typical values between -1 and 1 are raw volumes.
"""
if self.data_type is None:
if self.data_min is None or self.data_max is None:
self.compute_range()
if self.data_min >= -1 and self.data_max <= 1:
guess = VolumeModel.RAW
else:
guess = VolumeModel.SEGMENTED
self.data_type = guess
def is_segmented(self, auto_guess=True):
"""
Get whether current volume/image is segmented
:return: Boolean
"""
if self.data_type is None and auto_guess:
self.guess_volume_type()
return self.data_type == VolumeModel.SEGMENTED
def read_volume(self, file_path):
"""
Read local volume. Downloads the file first if it's remote.
:param file_path: Volume path
:return: 3D array
"""
if file_path.startswith('http') or file_path.startswith('ftp'):
downloaded_temp_file_path = vedo.download(file_path, verbose=False)
if file_path.endswith('nrrd'):
data, header = nrrd.read(downloaded_temp_file_path)
else:
data = vedo.loadImageData(downloaded_temp_file_path)
else:
if file_path.endswith('nrrd'):
data, header = nrrd.read(file_path, index_order='C')
else:
data = vedo.loadImageData(file_path)
return data
def load_volume(self, file_path, remap_scalars=False, mapping=None, make_current=True):
"""
Load a volume data file. Supports NRRD and many other formats thanks to vedo/VTK
:param file_path: Volume file path. Could support other file types easily.
:param remap_scalars: Whether scalar values in the volume are replaced by
their row id from a mapping that stores. This is necessary in the case of segmented
volumes with regions that have a discontinuous id.
:param mapping: Pandas Series or a Dictionary
:param make_current: Set the volume data as the current one
:return: 3D array
"""
data = None
if not remap_scalars or mapping is None:
data = self.import_volume(file_path)
else:
time = datetime.now()
new_file_path = utils.change_file_name(file_path, None, None, VolumeModel.NORMALIZED_SUFFIX)
if os.path.exists(new_file_path):
data = self.import_volume(new_file_path)
else:
data = self.import_volume(file_path)
data, mapping = self.remap_slow(data, mapping, new_file_path)
logging.info('Remapped scalar values in: ' + str(utils.time_diff(time)) + 's')
'''
if volume is not None:
logging.info('Opened atlas ' + new_file_path + ' in ' + str(utils.time_diff(time)) + 's')
min_value, max_value = np.amin(data), np.amax(data)
logging.info('Min max scalar values in volume ' + str(min_value) + ' -> ' + str(max_value))
else:
logging.error('Failed to open atlas ' + new_file_path)
'''
if make_current and data is not None:
self.data = data
return data, mapping
def transpose(self, shape=None):
"""
Transpose the volume for visualization in VTK
:param shape: The new shape. If None, will default to self.transpose_shape
"""
if shape is None:
shape = self.transpose_shape
if shape is None:
return
self.data = np.transpose(self.data, shape)
def remap_slow(self, data, mapping=None, write_path=None):
"""
Reassign volume values (slow on large volumes!) so that they're continuous
:param data: Volume ndarray
:param write_path: Where the modified volume will be stored
(to spare going through this method next time)
:param mapping: Pandas Series or a Dictionary that maps raw volume scalars to new ones
:return: Modified volume data
"""
logging.info('\nBuilding appropriate volume from Allen data source...')
#volume = np.vectorize(self.f)(data)
labels = np.sort(np.unique(data))
num_labels = len(labels)
if mapping is None:
mapping = pd.Series(labels)
logging.info('Num regions labeled in volume ' + str(num_labels) + ' from ' + str(mapping.size) + ' in atlas')
logging.info('Reassigning ' + str(num_labels) + ' scalar values...')
for iter_id in range(num_labels):
label = labels[iter_id]
ids = mapping.index[mapping == label].to_list()
if len(ids) < 1:
continue
# On a large volume, this takes a long time
data[data == label] = ids[0]
if num_labels > 10000 and iter_id % 10 == 0:
logging.info(' Progress: ' + str(int(iter_id/num_labels)*100) + '%')
if write_path is not None:
logging.info('Saving volume data under ' + write_path)
nrrd.write(write_path, data, index_order='C')
return data, mapping
def build_lut(self, scalar_map=None, scalar_range=None, color_map=None,
alpha_map=None, zero_is_transparent=True,
noise_amount=0.0, nan_rgba=None, make_active=True):
"""
Build a look-up table (LUT, sometimes known as transfer function) for the volume
:param scalar_map: A 2D list with values in first column from the volume itself and values from
the second column being your scalar values that correspond to such region
:param scalar_range: Min and max values in a list
:param color_map: Color map name to apply
:param alpha_map: Alpha map, either None or a list of values the same length as scalar_map, that
says how transparent a scalar value should be
:param zero_is_transparent: Whether zero values are made transparent, True by default
:param noise_amount: Whether a noise value is applied on the colors
:param nan_rgba: Color and transparency (RGBA) to assign to invalid (out of range or None) scalar values
:param make_active: Whether this one is made active (you still have to update the views after that)
:return: LUTModel
"""
lut_model = LUTModel()
lut_model.build(scalar_map, scalar_range, color_map, alpha_map,
zero_is_transparent, noise_amount, nan_rgba)
self.luts.store(lut_model, set_current=make_active)
return lut_model
def blend_maps(map1, map2, time, total_time):
"""
Blend color maps
"""
weight1 = max(0.0, total_time - time)
weight2 = max(0.0, time)
return map1 * weight1 + map2 * weight2
class Volume(vedo.Volume):
"""
Overwriting of vedo.Volume constructor that is ill-designed as
it transposes the given numpy array without us knowing about it,
not giving us the option to choose about that.
"""
def __init__(self,
inputobj=None,
c='RdBu_r',
alpha=(0.0, 0.0, 0.2, 0.4, 0.8, 1.0),
alphaGradient=None,
alphaUnit=1,
mode=0,
shade=False,
spacing=None,
dims=None,
origin=None,
mapper='smart'):
vtk.vtkVolume.__init__(self)
vedo.BaseGrid.__init__(self)
self.axes = [1, 1, 1]
###################
if isinstance(inputobj, str):
if "https://" in inputobj:
from vedo.io import download
inputobj = download(inputobj, verbose=False) # fpath
elif os.path.isfile(inputobj):
pass
else:
inputobj = sorted(glob.glob(inputobj))
###################
if 'gpu' in mapper:
self._mapper = vtk.vtkGPUVolumeRayCastMapper()
elif 'opengl_gpu' in mapper:
self._mapper = vtk.vtkOpenGLGPUVolumeRayCastMapper()
elif 'smart' in mapper:
self._mapper = vtk.vtkSmartVolumeMapper()
elif 'fixed' in mapper:
self._mapper = vtk.vtkFixedPointVolumeRayCastMapper()
elif isinstance(mapper, vtk.vtkMapper):
self._mapper = mapper
else:
print("Error unknown mapper type", [mapper])
raise RuntimeError()
self.SetMapper(self._mapper)
###################
inputtype = str(type(inputobj))
#colors.printc('Volume inputtype', inputtype)
if inputobj is None:
img = vtk.vtkImageData()
elif vedo.utils.isSequence(inputobj):
if isinstance(inputobj[0], str): # scan sequence of BMP files
ima = vtk.vtkImageAppend()
ima.SetAppendAxis(2)
pb = vedo.utils.ProgressBar(0, len(inputobj))
for i in pb.range():
f = inputobj[i]
picr = vtk.vtkBMPReader()
picr.SetFileName(f)
picr.Update()
mgf = vtk.vtkImageMagnitude()
mgf.SetInputData(picr.GetOutput())
mgf.Update()
ima.AddInputData(mgf.GetOutput())
pb.print('loading...')
ima.Update()
img = ima.GetOutput()
else:
if "ndarray" not in inputtype:
inputobj = np.array(inputobj)
if len(inputobj.shape)==1:
varr = vedo.numpy2vtk(inputobj, dtype=np.float)
else:
# ------------------------------ Nasty lines commented here
#if len(inputobj.shape)>2:
#inputobj = np.transpose(inputobj, axes=[2, 1, 0])
varr = vedo.numpy2vtk(inputobj.ravel(order='F'), dtype=np.float)
varr.SetName('input_scalars')
img = vtk.vtkImageData()
if dims is not None:
img.SetDimensions(dims)
else:
if len(inputobj.shape)==1:
vedo.colors.printc("Error: must set dimensions (dims keyword) in Volume.", c='r')
raise RuntimeError()
img.SetDimensions(inputobj.shape)
img.GetPointData().SetScalars(varr)
#to convert rgb to numpy
# img_scalar = data.GetPointData().GetScalars()
# dims = data.GetDimensions()
# n_comp = img_scalar.GetNumberOfComponents()
# temp = utils.vtk2numpy(img_scalar)
# numpy_data = temp.reshape(dims[1],dims[0],n_comp)
# numpy_data = numpy_data.transpose(0,1,2)
# numpy_data = np.flipud(numpy_data)
elif "ImageData" in inputtype:
img = inputobj
elif isinstance(inputobj, vedo.Volume):
img = inputobj.GetMapper().GetInput()
elif "UniformGrid" in inputtype:
img = inputobj
elif hasattr(inputobj, "GetOutput"): # passing vtk object, try extract imagdedata
if hasattr(inputobj, "Update"):
inputobj.Update()
img = inputobj.GetOutput()
elif isinstance(inputobj, str):
from vedo.io import loadImageData, download
if "https://" in inputobj:
inputobj = download(inputobj, verbose=False)
img = loadImageData(inputobj)
else:
vedo.colors.printc("Volume(): cannot understand input type:\n", inputtype, c='r')
return
if dims is not None:
img.SetDimensions(dims)
if origin is not None:
img.SetOrigin(origin) ### DIFFERENT from volume.origin()!
if spacing is not None:
img.SetSpacing(spacing)
self._data = img
self._mapper.SetInputData(img)
self.mode(mode).color(c).alpha(alpha).alphaGradient(alphaGradient)
self.GetProperty().SetShade(True)
self.GetProperty().SetInterpolationType(1)
self.GetProperty().SetScalarOpacityUnitDistance(alphaUnit)
# remember stuff:
self._mode = mode
self._color = c
self._alpha = alpha
self._alphaGrad = alphaGradient
self._alphaUnit = alphaUnit
@dataclass
class LUTModel:
"""
This class might look slightly convoluted but it's actually simple.
We use double mapping here in order to enable live/interactive visualization
of volumetric data. Instead of replacing values in a 3D volume, we only replace
the colors in the 1D LUT list.
The point is that it's too slow to update a given data, like a segmented
volume with custom values. Instead, we map such custom values to a 1D
array (our LUT) that maps colors to raw volume values.
This is much faster in terms of rendering and it enables interactive visualization.
The scalar_lut is the original LUT for the given scalars (custom values)
and the mapped_lut is the LUT assigned to the surfaces (like slices)
that have copied data from the volume. The volume is given color_map
and alpha_map through vedo methods.
You might say "ok for double mapping, it's the only way for interactive
rendering of a volume, but what about color_map and mapped_lut? Aren't
they the same?". The answer is: they're the same but VTK does not accept
a vtkLookupTable for a volume. Instead, it wants a vtkColorTransferFunction
and a vtkPiecewiseFunction for alpha. There's no way around it.
The color_map will be computed as a vtkColorTransferFunction and
the alpha_map as the vtkPiecewiseFunction.
"""
name: str = NotImplementedError
color_map_function: Any = None
scalar_map: np.ndarray = None
scalar_min: float = 0.0
scalar_max: float = 1.0
scalar_lut: vtk.vtkLookupTable = None
mapped_lut: vtk.vtkLookupTable = None
color_map: np.ndarray = None
alpha_map: np.ndarray = None
base_color_map: np.ndarray = None
def build(self, scalar_map=None, scalar_range=None, color_map=None,
alpha_map=None, zero_is_transparent=True,
noise_amount=0.0, nan_rgba=None):
"""
Build several look-up tables (LUT, sometimes known as transfer function) for the volume.
This is where double-mapping occurs for segmented volumes that have values from 0 to n where
each value defines a sub-volume or region. If we want to assign values (say from another model)
to these regions, we'd have to change the volume values and it would be too slow iterating over
each voxel in 3D. Instead we define colors that represent these values and assign them to
segmented regions in a 1D list.
:param scalar_map: A 2D list with values in first column from the volume itself and values from
the second column being your scalar values that correspond to such region
:param scalar_range: Min and max values in a list
:param color_map: Color map name to apply
:param alpha_map: Alpha map, either None or a list of values the same length as scalar_map, that
says how transparent a scalar value should be
:param zero_is_transparent: Whether zero values are made transparent, True by default
:param noise_amount: Whether a noise value is applied on the colors
:param nan_rgba: Color and alpha values to assign to invalid (out of range or None) scalar values
:return: LUTModel
"""
if color_map is None:
return
if nan_rgba is None:
nan_rgba = [0.0, 0.0, 0.0, 0.0]
if self.base_color_map is None:
self.base_color_map = color_map
colors = []
alphas = []
lut = vtk.vtkLookupTable()
scalar_lut = vtk.vtkLookupTable()
# Use the number of values in the volume
num_steps = len(self.base_color_map) if self.base_color_map is not None else len(color_map)
num_steps = 2655
s_min = 0
s_max = num_steps
if scalar_map is None:
if color_map is None and self.base_color_map is not None:
color_map = self.base_color_map
loop = range(num_steps)
noise = None
if isinstance(noise_amount, float) and noise_amount > 0:
noise = np.random.rand(num_steps) * noise_amount - noise_amount / 2
# Vedo works with nested lists:
# [region_id, [r, g, b]] for color, and [region_id, a] for alpha
if scalar_map is None:
# Standard volume that is not segmented
lut.SetRange(s_min, s_max)
lut.SetNumberOfTableValues(num_steps)
scalar_lut.SetRange(s_min, s_max)
scalar_lut.SetNumberOfTableValues(num_steps)
for r_id in loop:
color = vedo.colors.getColor(color_map[r_id])
color = np.array(color)
if noise is not None:
color = color + noise[r_id]
color = np.maximum(color, 0.0)
color = np.minimum(color, 1.0)
colors.append([r_id, color])
alpha = 1.0 if alpha_map is None else alpha_map[r_id]
if r_id == 0 and zero_is_transparent:
alpha = 0.0
alphas.append([r_id, alpha])
lut.SetTableValue(r_id, *color, alpha)
scalar_lut.SetTableValue(r_id, *color, alpha)
#scalar_map[r_id] = color_map[r_id]
else:
# Segmented volume
s_min, s_max = scalar_range
lut.SetRange(0, num_steps)
lut.SetNumberOfTableValues(num_steps)
color = None
for r_id in range(num_steps):
try:
value = scalar_map[r_id]
except Exception:
value = None
if value is None:# or s_min > value or s_max < value:
color = nan_rgba[:3]
alpha = nan_rgba[3]
else:
color = vedo.colorMap(value, color_map, s_min, s_max)
alpha = 1.0 if alpha_map is None else alpha_map[r_id]
if value == 0 and zero_is_transparent:
alpha = 0.0
colors.append([r_id, color])
alphas.append([r_id, alpha])
lut.SetTableValue(r_id, *color, alpha)
# Real scalar LUT, mainly as a reference for the user
# Here the colors resulting from the given scalar min to max
# are assigned to segmented values in the volume
mock_values = np.linspace(s_min, s_max, num_steps)
scalar_lut.SetRange(s_min, s_max)
scalar_lut.SetNumberOfTableValues(len(mock_values))
for r_id in range(len(mock_values)):
color = list(vedo.colorMap(mock_values[r_id], color_map, s_min, s_max))
alpha = 0.0 if mock_values[r_id] == 0 and zero_is_transparent else 1.0
scalar_lut.SetTableValue(r_id, *color, 1.0)
lut.Build()
scalar_lut.Build()
# Just to avoid confusion: the user can give a string as a color map, like 'viridis'
# but the real color map object is stored in self.color_map. The name 'viridis'
# is stored under self.color_map_function (if needed later on)
self.color_map_function = color_map
self.color_map = colors
self.alpha_map = alphas
self.scalar_map = scalar_map
self.mapped_lut = lut
self.scalar_lut = scalar_lut
def get_sorted_scalars(self):
"""
Get a numpy 2D array of key-value pairs sorted by value
:return: 2D array
"""
sorted_scalars = np.zeros((len(self.scalar_map), 2))
values = list(self.scalar_map.values())
keys = list(self.scalar_map.keys())
sorted_scalars[:, 0] = keys
sorted_scalars[:, 1] = values
sorted_mask = sorted_scalars[:, 1].argsort()
sorted_scalars = sorted_scalars[sorted_mask]
return sorted_scalars
class VolumeController():
"""
Wrapper class that handles both the volume and its slices
"""
def __init__(self, plot, model, initialize=True, clipping=True, slicer_box=True,
center_on_edges=False, alpha_unit_upper_offset=0.0, add_to_scene=True):
"""
Constructor
:param plot: Plot instance
:param model: VolumeModel instance
:param initialize: Whether the initalization
:param clipping: Whether clipping is enabled at init time
:param slicer_box: Whether the slicer box is enabled at init
:param center_on_edges: Whether the volume is offest by half a voxel or not
:param alpha_unit_upper_offset: The offset to apply to alpha unit computation.
If greater than 0, the volume will be less opaque
:param add_to_scene: Whether the volume is added to scene after init
"""
self.plot = plot
self.model = model
self.actor = None
self.picker = None
self.scalars = None
self.mask = None
self.bounding_mesh = None
self.alpha_unit_upper_offset = alpha_unit_upper_offset
self.alpha_factor = 0.001 # * self.model.resolution
self.clipping_planes = None
self.enable_volume_clipping = True
self.clipping_axes = []
self.slicers = OrderedDict()
self.slicers_selectable = False
self.scalar_bar = None
if initialize:
self.initialize(clipping, slicer_box, center_on_edges, add_to_scene)
#msg = 'Volume abs center', self.volume_center, 'position', np.array(self.volume_actor.pos())
#logging.info(msg)
def get_related_actors(self):
"""
Get all 3D actors related to this view (for registering it in the application)
:return: List of VTK objects
"""
actors = []
for slicer_id in self.slicers:
actor = self.slicers[slicer_id].actor
if actor is not None:
actors.append(actor)
for iso_id in self.model.isosurfaces:
actors.append(self.model.isosurfaces[iso_id])
actors.append(self.actor)
return actors
def initialize(self, clipping=True, slicer_box=True, center_on_edges=False, add_to_scene=True):
"""
Set the volume actor for visualization in VTK
:param clipping: Whether clipping is enabled
:param slicer_box: Whether the slicer box mode is enabled (6 clipping planes)
:param center_on_edges: Whether the volume's center is aligned to its edges
rather than the voxel center
:param add_to_scene: Whether the object is added to the scene
"""
self.build_actor(center_on_edges, add_to_scene)
self.initialize_picker()
if slicer_box:
self.initialize_slicer_box()
self.initialize_clipping_planes()
self.set_volume_clipping(clipping)
self.set_color_map()
'''
if use_mask:
self.mask = self.actor.clone()
self.mask.threshold(1, replace=1, replaceOut=0)
self.actor.mapper().SetMaskTypeToBinary()
self.actor.mapper().SetMaskInput(self.mask)
'''
def set_volume_visibility(self, on=True):
"""
Set volume visibility
:param on: Visibility boolean
"""
if self.actor is not None:
self.actor.SetVisibility(on)
def set_slices_visibility(self, on=True):
"""
Set the visibility of slices
:param on: Visibility boolean
"""
for slicer_id in self.slicers:
slicer_view = self.slicers.get(slicer_id)
slicer_view.actor.SetVisibility(on)
def get_slices_opacity(self):
"""
Get the opacity of slices (should be the same value for all slices)
A mean calculation is performed on all slices alpha, just in case
:return: Alpha value
"""
value = 0
num_values = 0
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
if slicer.actor is not None:
slice_alpha = slicer.actor.GetProperty().GetOpacity()
if slice_alpha is None:
continue
value += slice_alpha
num_values += 1
if num_values == 0 or value == 0:
return None
return value / num_values
def set_slices_opacity(self, value):
"""
Set the opacity of slices
:param value: Alpha value
"""
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
if slicer.actor is not None:
slicer.actor.alpha(value)
def get_opacity(self):
"""
Get the relative opacity unit
:return: Float
"""
return self.get_relative_opacity_unit()
def get_relative_opacity_unit(self):
"""
Get the alpha unit relative value
:return: Float
"""
alpha_unit = self.actor.alphaUnit()
r = self.model.resolution
# Inverse function of set_opacity_unit()
value = 1.1 - (alpha_unit / r)**0.5
return value
def set_opacity(self, value):
"""
Set the opacity of the volume like in set_opacity_unit()
:param value: Opacity value between 0.0 and 1.0
:return: Resulting alpha unit
"""
self.set_opacity_unit(value)
def set_opacity_unit(self, value):
"""
Set the opacity of the volume by modifying its alpha unit (a VTK thing).
The alpha unit defines how much a voxel is transparent to incoming ray.
This method normalizes the range between 0.0 and 1.0 as it depends
on the resolution of the volume
:param value: Opacity value between 0.0 and 1.0
:return: Resulting alpha unit
"""
r = self.model.resolution
# 1 is chosen and not 1.0 because when value == 1.0, that would
# mean that the volume is fully opaque and this yields artifacts with VTK
alpha_unit = (1 + self.alpha_unit_upper_offset - value)**2 * r
# vedo calls it "alpha" unit, vtk "opacity" unit. same-same!
self.actor.alphaUnit(alpha_unit)
return alpha_unit
def get_spacing(self):
"""
Get the spacing/resolution of the volume
"""
res = self.model.resolution
spacing = None
if isinstance(res, int) or isinstance(res, float):
spacing = np.array([res]*3)
elif len(res) == 3:
spacing = res
else:
raise ValueError(f'Given volume resolution {self.model.resolution} is invalid')
return spacing
def build_actor(self, center_on_edges=False, add_to_scene=True): #[1, 2]
"""
Set the volume actor for visualization in VTK
:param center_on_edges: Whether alignment by one voxel is applied
:param add_to_scene: Whether the object is added to the scene
"""
spacing = self.get_spacing()
self.actor = Volume(self.model.data, spacing=spacing, mapper='smart')
self.scalars = self.actor._data.GetPointData().GetScalars()
self.actor.name = self.model.name
self.actor.shade(False)
self.actor.mode(0)
self.actor.pickable(True)
self.set_interactive_subsampling(False)
if center_on_edges:
# Moving the volume by one voxel. This is possibly due the use of custom spacing.
self.actor.pos(self.actor.pos() + spacing)
center = np.array(self.actor.pos()) + self.actor.center()
if np.linalg.norm(center - self.model.center) > 0:
#print('Adjusting volume center from', self.model.center, 'to', center)
self.model.center = center
self.set_opacity_unit(0.9)
self.actor.jittering(True)
#self.actor._mapper.AutoAdjustSampleDistancesOn()
#self.actor._mapper.SetBlendModeToAverageIntensity()
#self.actor._mapper.SetSampleDistance(100)
if add_to_scene:
self.plot.add(self.actor, render=False)
def set_position(self, position):
"""
Set the position of the volume
"""
self.actor.pos(position)
# TODO: we're entering in unstable things when we move the volume
# because there is not yet a guaranteed support for updating the slices
# with the correct position
self.reset_clipping_planes()
def mirror_volume(self, axes):
"""
Mirror the volume on given axes
:param mirror_axes: A list of axes (either 0, 1, 2 or 'x', 'y', 'z') on which
the volume will be mirrored. Optional
"""
if axes is None or self.actor is None:
return
axes_str = ['x', 'y', 'z']
for axis in axes:
if isinstance(axis, int) and 0 <= axis <= 2:
axis = axes_str[axis]
if isinstance(axis, str) and len(axis) == 1:
self.actor.mirror(axis=axis.lower())
def initialize_picker(self, opacity_iso_value=0.0001):
"""
Initialize the volume picker
:param opacity_iso_value: Threshold that defines at what accumulated
opacity the picker hits the volume. In the case of a segmented volume,
you want to keep this value very low as the default one.
"""
# As per C++ doc https://vtk.org/Wiki/VTK/Examples/Cxx/VTKConcepts/Scalars
# https://stackoverflow.com/questions/35378796/vtk-value-at-x-y-z-point
picker = vtk.vtkVolumePicker()
picker.PickCroppingPlanesOn()
picker.UseVolumeGradientOpacityOff()
picker.SetTolerance(opacity_iso_value)
# A low OpacityIsoValue is necessary in the case of segmented volumes
picker.SetVolumeOpacityIsovalue(opacity_iso_value)
picker.AddPickList(self.actor)
picker.PickFromListOn()
self.picker = picker
def initialize_slicer_box(self):
"""
Initialize 6 slicing planes as a box.
"""
for axis_id in range(6):
slicer_model = SlicerModel(axis=axis_id)
slicer_model.align_to_axis(axis_id, self.model.dimensions)
self.model.slicers.store(slicer_model)
# It's important in this case to have standalone=False
self.slicers[axis_id] = SlicerView(self.plot, self, slicer_model, standalone=False)
def update_slicer(self, slicer_id, value=None, normal=None):
"""
Update a given slicer with the given value
:param slicer_id: SlicerView id
:param value: Value or 3D point
:param normal: Normal
"""
slicer_view = self.slicers.get(slicer_id)
if slicer_view is None:
return
# This is an important part where the slicing plane is itself sliced by other planes
slicer_model = slicer_view.model
slicer_model.clipping_planes = self.get_clipping_planes(slicer_model.axis)
# Use given value (or point) and normal to guide the below code
result = slicer_model.update(value, normal)
if not result:
return
# Update slicing image
slicer_view.update()
def initialize_clipping_planes(self):
"""
Initialize X, Y and Z clipping planes with two planes per axis
for positive and negative slicing
"""
self.clipping_planes = vtk.vtkPlaneCollection()
slicer_models = self.model.slicers
for slicer_id in slicer_models:
self.clipping_planes.AddItem(vtk.vtkPlane())
self.reset_clipping_planes()
return
def get_clipping_planes(self, except_axis=None):
"""
Get the current clipping planes except the ones on the given axis
:param except_axis: Axis id to ignore. If None, all clipping planes will be returned
:return: vtkPlaneCollection
"""
if not isinstance(except_axis, int):
return self.clipping_planes
exceptions = [except_axis * 2, except_axis * 2 + 1]
planes = vtk.vtkPlaneCollection()
for plane_id in range(self.clipping_planes.GetNumberOfItems()):
if plane_id in exceptions:
continue
plane = self.clipping_planes.GetItem(plane_id)
planes.AddItem(plane)
return planes
def reset_clipping_planes(self):
"""
Reset clipping planes
"""
slicer_models = self.model.slicers
for slicer_id in slicer_models:
slicer_model = slicer_models[slicer_id]
plane_id = slicer_model.get_box_plane_id()
plane = self.clipping_planes.GetItem(plane_id)
plane.SetOrigin(slicer_model.origin + self.actor.pos())
plane.SetNormal(slicer_model.normal)
def clip_on_axis(self, position=None, axis=None, normal=None):
"""
Apply clipping on a single axis
:param position: Position
:param axis: Clipping axis, defaults to 0 (X axis)
:param thickness: Whether a thickness (so two clipping planes) are applied
"""
axis_offset = 0
# This should already be sorted in the model but in case it isn't, we double check here
if normal is not None and normal[axis] < 0:
# This means that the given axis has two
# clipping planes and we take the negative one
axis_offset += 1
#position = self.model.dimensions - position
axis_storage_id = axis * 2 + axis_offset
plane = self.clipping_planes.GetItem(axis_storage_id)
plane.SetOrigin(position)
plane.SetNormal(normal)
def set_volume_clipping(self, on=None):
"""
Set volume clipping on or off.
:param on: Whether clipping is enabled or disabled. If None, then
the state is toggled.
"""
if on is None:
self.enable_volume_clipping = not self.enable_volume_clipping
else:
self.enable_volume_clipping = on
if self.enable_volume_clipping:
self.actor.mapper().SetClippingPlanes(self.clipping_planes)
else:
self.actor.mapper().SetClippingPlanes(None)
def clip_to_bounds(self, bounds):
"""
Clip the volume and move the slicing planes according to 6 boundary points
:param bounds: Six values in a list (xmin, xmax, ymin, ymax, zmin, zmax)
"""
planes = vtk.vtkPlanes()
planes.SetBounds(bounds)
# Normals are reversed with the above code
# so we fix that here with flip_normals=True
self.set_clipping_planes(planes, flip_normals=True)
def box_widget_update(self, widget=None, event=None):
"""
Clip the volume with the current box widget
:param widget: vtkBoxCutter
:param event: vtkEvent
"""
if widget is None:
return
planes = vtk.vtkPlanes()
widget.GetPlanes(planes)
self.set_clipping_planes(planes)
def set_clipping_planes(self, planes, flip_normals=False):
"""
Clip the volume and move the slicing planes according the given planes
:param planes: vtkPlanes
"""
vtk_n = planes.GetNormals()
vtk_pts = planes.GetPoints()
num_pts = vtk_pts.GetNumberOfPoints()
for plane_id in range(num_pts):
normal = vtk_n.GetTuple(plane_id)
origin = vtk_pts.GetPoint(plane_id)
plane = self.clipping_planes.GetItem(plane_id)
current_origin = np.array(plane.GetOrigin())
# We don't need to check the normal because
# we prevent box cutter rotation in our case
if np.linalg.norm(current_origin - origin) < 0.1:
continue
plane.SetOrigin(origin)
if flip_normals:
normal = np.array(normal)*-1
plane.SetNormal(normal)
self.update_slicer(plane_id, origin, normal)
self.clipping_planes.Modified()
self.actor.GetMapper().Update()
def set_alpha_map(self, alpha_map, alpha_factor=None):
"""
Set alpha map to the volume view
:param alpha_map: 2D list of scalar values and alpha values
:param alpha_factor: Alpha factor
"""
if alpha_map is None:
if self.model.luts.current is None:
return
alpha_map = self.model.luts.current.alpha_map
if alpha_factor is None:
alpha_factor = self.alpha_factor
if len(np.array(alpha_map).shape) > 1:
volume_alpha_map = np.ones_like(alpha_map).astype(float)
volume_alpha_map[:] = alpha_map[:]
volume_alpha_map[:, 1] *= alpha_factor
self.actor.alpha(volume_alpha_map)
else:
self.actor.alpha(np.array(alpha_map) * alpha_factor)
def set_color_map(self, color_map=None, alpha_map=None):
"""
Set the color and alpha map to the view objects
:param color_map: Nested list of scalar values and rgb colors
like [[0, [0.0, 0.0, 0.0]], [8, [0.5, 0.8, 0.3]], ...]
:param alpha_map: 2D list of scalar values and alpha values
"""
lut = self.model.luts.current
if color_map is None and lut is not None:
color_map = lut.color_map
if alpha_map is None and lut is not None:
alpha_map = lut.alpha_map
if color_map is None:
return
self.actor.cmap(color_map)
self.set_alpha_map(alpha_map)
if lut is not None:
for surface in self.model.isosurfaces:
surface._mapper.SetLookupTable(lut.opaque_lut)
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
slicer.apply_lut(lut.mapped_lut)
else:
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
slicer.set_color_map(color_map, alpha_map)
def disable_shading(self):
"""
Disable volume shading
"""
volumeProperty = self.actor.GetProperty()
volumeProperty.ShadeOff()
self.actor.SetProperty(volumeProperty)
def enable_shading(self, ambient=0.6, diffuse=0.8, specular=0.9):
"""
Enable volume shading
TODO: See if this method is useful
"""
volumeProperty = self.actor.GetProperty()
volumeProperty.SetInterpolationTypeToLinear()
volumeProperty.ShadeOn()
volumeProperty.SetAmbient(ambient)
volumeProperty.SetDiffuse(diffuse)
volumeProperty.SetSpecular(specular)
volumeProperty.SetScalarOpacityUnitDistance(1)
self.actor.SetProperty(volumeProperty)
def toggle_slices_visibility(self):
"""
Toggle slices visibility
"""
self.model.slices_visible = not self.model.slices_visible
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
self.update_slicer(slicer)
if slicer.actor is not None:
slicer.actor.SetVisibility(self.model.slices_visible)
def toggle_hollow(self):
"""
Toggle hollow mode for volume rendering. This is intended
to work only on segmented (annotated) volumes.
"""
volume_property = self.actor.GetProperty()
# Shout at VTK devs: it's twisted to name properties Disable and then have DisableOff...
disabled = bool(volume_property.GetDisableGradientOpacity())
if disabled:
volume_property.DisableGradientOpacityOff()
alpha_gradient = vtk.vtkPiecewiseFunction()
alpha_gradient.AddPoint(0, 0.0)
alpha_gradient.AddPoint(1, 0.75)
alpha_gradient.AddPoint(2, 1.0)
volume_property.SetGradientOpacity(alpha_gradient)
else:
volume_property.DisableGradientOpacityOn()
return not disabled
def get_value_from_xyz(self, position, normal_step=None, avoid_values=0, cast_to_int=True, none_as_zero=False):
"""
Get a scalar value from the volume with respect to XYZ coordinates and a optionally a normal step,
that is the normal on which to probe multiplied by the distance you want to travel further into
the volume to pick a correct value. Often the "surface point" on a volume with non uniform transparency
is at the boundary between transparent (let's say a 0 value is transparent) and more opaque parts.
So you need to go further into the "cloud" so to speak, in order to find the values you want.
:param position: 3D array
:param normal_step: A vector normal multiplied by the lookup distance, in case the raw position yields
bad or unwanted results
:param avoid_values: Try and find other values than this
:param cast_to_int: Whether the value should be cast to integer
:return: Scalar value
"""
if isinstance(avoid_values, int) or isinstance(avoid_values, float):
avoid_values = [avoid_values]
# TODO: see if this is faster? To be tested
# ijk_result = [0.0, 0.0, 0.0]
# volume_actor._data.TransformPhysicalPointToContinuousIndex(xyz, ijk_result)
# volume_actor._data.GetPoint(ijk_result)
pt_id = self.actor._data.FindPoint(*position)
valid_id = 0 < pt_id < self.scalars.GetNumberOfValues()
value = self.scalars.GetValue(pt_id) if valid_id else None
if not valid_id or (value in avoid_values):
if normal_step is not None:
position += normal_step
pt_id = self.actor._data.FindPoint(*position)
valid_id = 0 < pt_id < self.scalars.GetNumberOfValues()
value = self.scalars.GetValue(pt_id) if valid_id else None
if cast_to_int and value is not None:
value = int(value)
if value is None and none_as_zero:
value = 0
return value
def raycast(self, origin, screen_position):
"""
Shorthand for pick() method
"""
return self.pick(origin, screen_position)
def pick(self, origin, screen_position):
"""
Find the nearest intersection – even on sliced volume – with the ray formed
by an origin and a screen-space position (given by VTK when you click on an actor)
:param origin: Origin of the vector
:param screen_position: 2D position on screen. This is given by vtk events like MouseRelease
:return: The nearest position and its related value queried in the volume image
"""
self.picker.Pick(*screen_position[:2], 0, self.plot.renderer)
position = np.array(self.picker.GetPickPosition())
ray = position - origin
distance = np.linalg.norm(ray)
normal = ray / distance
# Go half a voxel further to make sure we don't hit "void"
vol_position = position # + normal * self.model.resolution / 2
probe_position = position + normal * self.model.resolution * 10
closest_dist = distance
slice_position = None
# See if the line hits any of the slicers (that are image planes)
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
if slicer.got_slice:
hits = slicer.actor.intersectWithLine(origin, probe_position)
if len(hits) != 1:
continue
new_dist = np.linalg.norm(position - hits[0])
if new_dist < closest_dist and new_dist < self.model.resolution * 2:
closest_dist = new_dist
slice_position = hits[0]
if slice_position is None:
position = vol_position
else:
position = slice_position
value = self.get_value_from_xyz(position, normal * self.model.resolution * 4)
return position, value
def add_probe(self, origin, destination, resolution=40, radius=10, color_map=None,
screen_space=True, min_v=None, max_v=None, add_to_scene=True):
"""
Add a series of points along a line probe
:param origin: Probe origin
:param destination: Probe destination point
:param resolution: Number of (equidistant) points that will be probed along that line
:param radius: Radius of the points
:param color_map: Scalars color map
:param screen_space: Whether the points are screen space or spheres
:param min_v: Min scalar value
:param max_v: Max scalar value
:param add_to_scene: Whether the new probe is added to scene
:return: Points
"""
if color_map is None:
color_map = self.model.luts.current.color_map
positions, values = self.probe(origin, destination, resolution)
points_obj = obj.Points(positions, values=values, radius=radius, screen_space=screen_space,
color_map=color_map, min_v=min_v, max_v=max_v)
points_obj.origin = origin
points_obj.destination = destination
# Dynamic properties assignment
points_obj.target = self.actor
points_obj.target_controller = self
if add_to_scene:
self.plot.add(points_obj)
return points_obj
def update_probe(self, origin, destination, points_obj):
"""
Update a probe with given start and end points
:param origin: Start point
:param destination: End point
:param points_obj: Points object
"""
resolution = points_obj._polydata.GetPoints().GetNumberOfPoints()
positions, values = self.probe(origin, destination, resolution)
points_obj.update_data(positions, values)
def probe(self, origin, destination, resolution=40):
"""
Probe a volume with a line
:param origin: Origin of the line probe
:param destination: Destination of the line probe
:param resolution: Number of point samples along the probe
:return: Positions and values
"""
origin = np.array(origin)
destination = np.array(destination)
distance = np.linalg.norm(destination - origin)
ray = destination - origin
ray_norm = ray / distance
step = distance / resolution
positions = [origin + ray_norm * p_id * step for p_id in range(resolution)]
values = np.array([self.get_value_from_xyz(point, none_as_zero=True) for point in positions])
return positions, values
def set_interactive_subsampling(self, on=False):
"""
Set volume subsampling on or off.
This is enabled by default in VTK and we disable it by default in IBLViewer
:param on: Whether volume subsampling in interactive mode is on or off
"""
#self.plot.window.SetDesiredUpdateRate(0)
#self.actor._mapper.SetInteractiveUpdateRate(0)
self.model.interactive_subsampling = on
self.actor._mapper.SetAutoAdjustSampleDistances(on)
if on:
self.actor._mapper.InteractiveAdjustSampleDistancesOn()
else:
self.actor._mapper.InteractiveAdjustSampleDistancesOff()
def isosurface(self, label, exceptions=[0], force_rebuild=False, set_current=True, to_int=True, split_meshes=True):
"""
Creates a surface mesh (isosurface) of a segmented/labelled volume for the given value.
Unlike general isosurfacing, this method extracts only the surface mesh of the
desired region/label/segmentation, not of all values from 0 to label.
:param label: Label (scalar) value found in the volume
:param exceptions: If the label is found in the exceptions list, isosurfacing will not occur
:param force_rebuild: Whether rebuilding is forced in case we find an existing mesh for the given label
:param set_current: Whether the label is set as the current one in the model
:param to_int: Whether the label is cast to integer
:param split_meshes: Whether we split meshes when multiple ones are found
:return: A list of all manifold meshes for the given label
"""
if label is None or label in exceptions:
return
if to_int:
label = int(label)
existing_meshes = self.model.isosurfaces.get(label)
if existing_meshes is not None and not force_rebuild:
return existing_meshes
lut = self.model.luts.current
simple_lut = vtk.vtkLookupTable()
simple_lut.SetNumberOfColors(1)
simple_lut.SetTableRange(0, 1)
simple_lut.SetScaleToLinear()
simple_lut.SetTableValue(0, 0, 0, 0, 0)
simple_lut.SetTableValue(1, *lut.mapped_lut.GetTableValue(label))
simple_lut.Build()
# Generate object boundaries from labelled volume
discrete = vtk.vtkDiscreteMarchingCubes()
discrete.SetInputData(self.actor.imagedata())
discrete.GenerateValues(1, label, label)
smoothing_iterations = 15
pass_band = 0.001
feature_angle = 120.0
smoother = vtk.vtkWindowedSincPolyDataFilter()
smoother.SetInputConnection(discrete.GetOutputPort())
smoother.SetNumberOfIterations(smoothing_iterations)
smoother.BoundarySmoothingOff()
smoother.FeatureEdgeSmoothingOff()
smoother.SetFeatureAngle(feature_angle)
smoother.SetPassBand(pass_band)
smoother.NonManifoldSmoothingOn()
smoother.NormalizeCoordinatesOn()
smoother.Update()
self.model.isosurfaces[label] = []
#splitter = vtk.vtkExtractPolyDataGeometry()
if split_meshes:
splitter = vtk.vtkPolyDataConnectivityFilter()
splitter.SetInputConnection(smoother.GetOutputPort())
splitter.SetExtractionModeToAllRegions()
splitter.ColorRegionsOn()
splitter.Update()
for region_id in range(splitter.GetNumberOfExtractedRegions()):
#splitter.AddSpecifiedRegion(region_id)
#splitter.Update()
#poly = vtk.vtkPolyData()
#poly.ShallowCopy(splitter.GetOutput())
threshold = vtk.vtkThreshold()
threshold.SetInputConnection(splitter.GetOutputPort())
threshold.ThresholdBetween(region_id, region_id)
threshold.Update()
actor = vedo.Mesh(threshold.GetOutput())
#actor._mapper.SetScalarRange(min_value, lut.scalar_max)
#actor._mapper.SetUseLookupTableScalarRange(True)
actor._mapper.SetLookupTable(simple_lut)
actor._mapper.ScalarVisibilityOn()
actor.name = 'Isosurface_' + str(label)
self.model.isosurfaces[label].append(actor)
#actor.cmap(lut.scalar_lut, np.ones(poly.GetNumberOfVerts())*label)
else:
poly = smoother.GetOutput()
actor = vedo.Mesh(poly)
actor._mapper.SetLookupTable(simple_lut)
actor._mapper.ScalarVisibilityOn()
actor.name = 'Isosurface_' + str(label)
self.model.isosurfaces[label].append(actor)
'''
pdnorm = vtk.vtkPolyDataNormals()
pdnorm.SetInputData(smoother.GetOutput())
pdnorm.ComputePointNormalsOn()
pdnorm.ComputeCellNormalsOn()
pdnorm.FlipNormalsOff()
pdnorm.ConsistencyOn()
pdnorm.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(smoother.GetOutputPort())
mapper.SetLookupTable(lut.scalar_lut)
mapper.SetScalarRange(min_value, lut.scalar_max)
'''
if set_current:
self.model.isosurfaces.set_current(label)
return self.model.isosurfaces[label]
@dataclass
class SlicerModel:
PREFIX = '[Slicer]_'
MIN_SLAB_THICKNESS = 1.0 #um
__count = 0
def unique_name():
SlicerModel.__count += 1
return f'{SlicerModel.PREFIX}_{SlicerModel.__count}'
name: str = field(default_factory=unique_name)
# 0, 1 or 2. See the normal for axis orientation
axis: int = None
value: float = 0.0
bounds: np.ndarray = None
#thickness: float = 0.0
origin: np.ndarray = np.array([0.0, 0.0, 0.0])
normal: np.ndarray = np.array([1.0, 0.0, 0.0])
clipping_planes: vtk.vtkPlaneCollection = None
def get_box_plane_id(self):
"""
Get the plane id
:return: Int
"""
if self.axis is None:
return
offset = 0 if self.normal[self.axis] < 0 else 1
return self.axis * 2 + offset
def get_axis_aligned_info(self, vtk_axis):
"""
VTK stores box clipping planes in the order:
-X to +X: 0, 1
-Y to +Y: 2, 3
-Z to +Z: 4, 5
This method retrieves what is the XYZ axis (0, 1 or 2)
and its orientation sign
:return: Int axis and float orientation
"""
orientation = -1.0 if vtk_axis % 2 == 0 else 1.0
axis = (vtk_axis - vtk_axis % 2) // 2
return axis, orientation
def align_to_axis(self, axis, dimensions=None):
"""
Set the axis of the slicer
:param axis: See parameter vtk_axis in SlicerModel.get_axis_aligned_info()
:param dimensions: Dimensions of the volume
"""
if not isinstance(axis, int):
return
normal = np.zeros(3).astype(float)
xyz_axis, orientation = self.get_axis_aligned_info(axis)
normal[xyz_axis] = orientation
self.axis = xyz_axis
if dimensions is not None and orientation < 0:
self.origin = np.zeros(3)
self.origin[xyz_axis] = dimensions[xyz_axis]
self.normal = normal
def flip_normal(self):
"""
Flip the normal of the slicer
"""
self.normal *= -1.0
self.check_normal()
if isinstance(self.axis, int):
self.axis *= -1
def check_normal(self):
"""
Check if the normal is axis-aligned.
If not, the axis is set to None.
"""
zeros = self.normal == 0
if len(self.normal[zeros]) >= 2:
self.axis = 0
def update(self, value=None, normal=None, axis=None):
"""
Update slicer
:param value: Origin of the slicing plane
:param normal: Normal of the slicing plane
:param axis: Axis, if the plane is axis-aligned
:return: True if model changed, False if it didn't
"""
if not(isinstance(value, int) or isinstance(value, float)):
if normal is None:
normal = self.normal
if normal is None:
return False
if normal[1] == 0 and normal[2] == 0:
axis = 0 #if normal[0] > 0 else 1
elif normal[0] == 0 and normal[2] == 0:
axis = 1 #if normal[1] > 0 else 1
elif normal[0] == 0 and normal[1] == 0:
axis = 2 #if normal[2] > 0 else 1
if axis is not None:
value = value[axis]
if axis is None:
axis = self.axis
if self.value == value:
return False
if axis is not None:
self.value = value
self.origin = np.array(normal) * value
else:
self.value = None
self.origin = value
self.normal = normal
self.axis = axis
return True
class SlicerView():
slices = {}
def __init__(self, plot, volume_view, slicer_model, standalone=True):
"""
Constructor
:param plot: Plot instance
:param volume_view: VolumeView instance
:param slicer_model: SlicerModel instance
:param standalone: Whether the slice is a standalone actor that
can be clicked. Set this to False if you want to use transparency,
at the expense that because of a VTK bug, you won't be able to
click on it anymore, requiring you to code another way of detecting
where the user clicked. See more in initialize_mapper()
"""
self.plot = plot
self.volume_view = volume_view
self.model = slicer_model
self.actor = None
self.filter = None
self.filter = None
self.actor = None
self.reslice = None
self.slice_type = -1
self.depth_peeling_enabled = None
self.standalone = standalone
self.got_slice = False
self.color_map = None
self.alpha_map = None
self.initialize()
def initialize(self, render=False):
"""
Initialize the slicer object
"""
if self.filter is None:
self.filter = vtk.vtkImageDataGeometryFilter()
if self.actor is None:
self.actor = vedo.Mesh(self.filter.GetOutput())
# Adding empty actor so that it's updated later on
self.plot.add(self.actor, render=render)
self.actor.lighting('off')
self.actor.name = self.model.name
self.initialize_mapper()
def initialize_mapper(self):
"""
Initialize the object mapper
"""
mapper = self.actor._mapper
mapper.SetScalarModeToUsePointData() #SetScalarModeToUsePointFieldData
mapper.SetColorModeToMapScalars()
mapper.ScalarVisibilityOn()
# We operate on static volumes thanks to the double LUT mapping implemented here
mapper.SetStatic(True)
# Without using scalar range, the mapping will be off
mapper.SetUseLookupTableScalarRange(True)
# We prevent this actor from being pickable as a result of the bug described below
# when we want to use transparency on the slice.
self.actor.pickable(self.standalone)
if self.standalone:
# There is a bug in VTK 9 that prevents clicking on transparent objects
# as reported on vedo's tracker https://github.com/marcomusy/vedo/issues/291
# The "Force opaque fix" below should be gone with the next VTK update hopefully.
# In the meantime, we use this.
# TODO: remove this when this bug is fixed in VTK
self.actor.ForceOpaqueOn()
else:
# We bypass the transparent selection bug when a VolumeView has multiple slicers
# like in box mode because the click detection occurs on the volume and we perform
# an additional test to see if a slicer yields a nearby result. If it does,
# the result is like clicking on the slice and we get transparency for free.
pass
# Make sure we have depth peeling activated, otherwise transparency with volumes
# will look weird and in the wrong order
self.plot.renderer.UseDepthPeelingOn()
self.plot.renderer.UseDepthPeelingForVolumesOn()
segmented = self.volume_view.model.is_segmented()
if segmented:
# This very line below will mess up the entire slice coloring if:
# - you have a segmented volume and this is set to True
# - you have a non-segmented (like raw MRI, CT) volume and this is set to False
mapper.SetInterpolateScalarsBeforeMapping(not segmented)
mapper.Update()
def set_color_map(self, color_map, alpha_map=None):
"""
Set a color map to the slice
:param color_map: Color map, can be a string, a list of colors or more.
See vedo documentation.
"""
self.color_map = color_map
if alpha_map is not None:
self.alpha_map = alpha_map
if self.got_slice and color_map is not None:
self.actor.cmap(self.color_map, alpha=self.alpha_map)
def set_slice_type(self, slice_type):
"""
Set the slice type. 0 for axial, 1 for free slicing
:param slice_type: Int value
"""
if slice_type == 0 and self.slice_type != slice_type:
self.slice_type = slice_type
self.filter.SetInputData(self.volume_view.actor.imagedata())
elif slice_type == 1 and self.slice_type != slice_type:
self.slice_type = slice_type
self.filter.SetInputData(self.reslice.GetOutput())
def slice_on_normal(self, origin, normal):
"""
Slice a volume with a plane oriented by the given normal.
This allows slicing in all directions.
:param origin: Origin of the slicing plane
:param normal: Normal of the slicing plane
:return: Mesh object with the slice as an image texture
"""
'''
mapper = vtk.vtkImageResliceMapper()
mapper.SetInputData(self.volume_view.actor._data)
mapper.SliceFacesCameraOff()
mapper.SliceAtFocalPointOff()
mapper.JumpToNearestSliceOn()
mapper.SetImageSampleFactor(2)
mapper.BorderOn()
mapper.BackgroundOff()
mapper.UpdateInformation()
mapper.GetSlicePlane().SetOrigin(*origin)
mapper.GetSlicePlane().SetNormal(*normal)
mapper.GetSlicePlane().Modified()
mapper.Modified()
mapper.Update()
self.actor = vtk.vtkImageSlice()
self.actor.SetMapper(mapper)
prop = vtk.vtkImageProperty()
if True:
prop.SetInterpolationTypeToLinear()
else:
prop.SetInterpolationTypeToNearest()
self.actor.SetProperty(prop)
return
'''
if self.reslice is None:
reslice = vtk.vtkImageReslice()
reslice.SetInputData(self.volume_view.actor._data)
#reslice.SetInputData(image)
reslice.SetOutputDimensionality(2)
reslice.SetAutoCropOutput(False)
#reslice.SetInterpolationModeToLinear()
reslice.SetInterpolationModeToNearestNeighbor()
reslice.SetSlabNumberOfSlices(1)
reslice.SetOutputSpacing(self.volume_view.get_spacing())
reslice.ReleaseDataFlagOn()
self.reslice = reslice
self.set_slice_type(1)
M, T = utils.get_transformation_matrix(origin, normal)
self.reslice.SetResliceAxes(M)
self.reslice.Update()
self.filter.Update()
if self.actor is None:
self.actor = vedo.Mesh(self.filter.GetOutput())
self.initialize_mapper()
else:
self.actor._update(self.filter.GetOutput())
self.initialize_mapper()
self.actor.SetOrientation(T.GetOrientation())
self.actor.SetPosition(origin)
self.got_slice = True
return self.actor
def x_slice(self, i):
"""
Extract the slice at index `i` of volume along x-axis.
:param i: I index
"""
self.set_slice_type(0)
nx, ny, nz = self.volume_view.actor.GetMapper().GetInput().GetDimensions()
if i <= 1 or i > nx - 1:
return False
self.filter.SetExtent(i, i, 0, ny, 0, nz)
self.filter.Update()
if self.actor is not None:
self.actor._update(self.filter.GetOutput())
else:
self.actor = vedo.Mesh(self.filter.GetOutput())
self.initialize_mapper()
self.got_slice = True
return True
def y_slice(self, j):
"""
Extract the slice at index `j` of volume along y-axis.
:param j: J index
"""
self.set_slice_type(0)
#nx, ny, nz = self.volume_view.model.dimensions / resolution
nx, ny, nz = self.volume_view.actor.GetMapper().GetInput().GetDimensions()
if j <= 1 or j > ny - 1:
return False
self.filter.SetExtent(0, nx, j, j, 0, nz)
self.filter.Update()
if self.actor is not None:
self.actor._update(self.filter.GetOutput())
else:
self.actor = vedo.Mesh(self.filter.GetOutput())
self.initialize_mapper()
self.got_slice = True
return True
def z_slice(self, k):
"""
Extract the slice at index `k` of volume along z-axis.
:param k: K index
"""
self.set_slice_type(0)
nx, ny, nz = self.volume_view.actor.GetMapper().GetInput().GetDimensions()
if k <= 1 or k > nz - 1:
return False
self.filter.SetExtent(0, nx, 0, ny, k, k)
self.filter.Update()
if self.actor is not None:
self.actor._update(self.filter.GetOutput())
else:
self.actor = vedo.Mesh(self.filter.GetOutput())
self.initialize_mapper()
self.got_slice = True
return True
def slice_on_axis(self, value=None, normal=None, axis=None, use_reslice=False):
"""
Slice on standard X, Y or Z axis
:param value: Value on the given axis
:param normal: Axis normal, can be either +1.0 or -1.0 along that axis
:param axis: Axis integer, 0 for X, 1 for Y, 2 for Z
:param use_reslice: if True, this enables vtkImageReslice which is useful when
the normal is not aligned to either X, Y or Z. If you use it on an axis-aligned
normal, some color inaccuracies will appear if you don't tweak the vtkImageResliceMapper.
This is why the default is False.
:return: Result boolean, whether slice occured or not
"""
resolution = self.volume_view.model.resolution
volume_dimensions = self.volume_view.model.dimensions
'''
if normal[axis] < 0:
if value > 0:
# Make value consistent with given normal.
value *= normal[axis]
value = volume_dimensions[axis] + value
'''
in_volume_slice = int(value) // resolution
if use_reslice:
self.slice_on_normal(normal * value, normal)
return
if axis == 0:
result = self.x_slice(in_volume_slice)
elif axis == 1:
result = self.y_slice(in_volume_slice)
elif axis == 2:
result = self.z_slice(in_volume_slice)
return result
def update(self):
"""
Update slice object according to data in the model
"""
had_slice = self.got_slice
result = True
if isinstance(self.model.axis, int) and 0 <= self.model.axis <= 2:
result = self.slice_on_axis(self.model.value, self.model.normal, self.model.axis)
else:
self.slice_on_normal(self.model.origin, self.model.normal)
if not result:
self.plot.remove(self.actor)
self.got_slice = False
return
#self.actor.pos(*(self.volume_view.actor.pos()-self.actor.pos()))
lut = self.volume_view.model.luts.current
if lut is not None:
'''
This is VTK for you...a mesh can use a vtkLookupTable for RGBA mapping
BUT volumes require vtkColorTransferFunction (RGB) and vtkPiecewiseFunction (alpha)
So we have to put a color map, alpha map and a vtkLookupTable
built from both maps in a LUTModel.
Alternatively, we could update the LUT with alpha values but it's a pain.
ctf = self.volume_view.actor.GetProperty().GetRGBTransferFunction()
lut = vedo.utils.ctf2lut(self.volume_view.actor)
otf = self.volume_view.actor.GetProperty().GetScalarOpacity
# using "ctf" would work only for colors, not for transparency!
self.apply_lut(ctf)
'''
self.apply_lut(lut.mapped_lut)
else:
if self.alpha_map is None:
self.actor.cmap(self.color_map)
else:
self.actor.cmap(self.color_map, alpha=self.alpha_map)
if self.model.clipping_planes is not None:
self.actor.mapper().SetClippingPlanes(self.model.clipping_planes)
if not had_slice:
self.plot.add(self.actor, render=True)
def apply_lut(self, lut=None):
"""
Apply a LUT to the volume
:param lut: vtkLookupTable
:param actor: The actor to receive this
"""
if self.actor is None or lut is None:
return
mapper = self.actor._mapper
mapper.SetLookupTable(lut) | [((36, 16, 36, 50), 'dataclasses.field', 'field', (), '', False, 'from dataclasses import dataclass, field\n'), ((39, 26, 39, 59), 'dataclasses.field', 'field', (), '', False, 'from dataclasses import dataclass, field\n'), ((40, 17, 40, 57), 'dataclasses.field', 'field', (), '', False, 'from dataclasses import dataclass, field\n'), ((57, 23, 57, 56), 'dataclasses.field', 'field', (), '', False, 'from dataclasses import dataclass, field\n'), ((58, 26, 58, 59), 'dataclasses.field', 'field', (), '', False, 'from dataclasses import dataclass, field\n'), ((59, 30, 59, 63), 'dataclasses.field', 'field', (), '', False, 'from dataclasses import dataclass, field\n'), ((1374, 16, 1374, 50), 'dataclasses.field', 'field', (), '', False, 'from dataclasses import dataclass, field\n'), ((1380, 25, 1380, 50), 'numpy.array', 'np.array', ({(1380, 34, 1380, 49): '[0.0, 0.0, 0.0]'}, {}), '([0.0, 0.0, 0.0])', True, 'import numpy as np\n'), ((1381, 25, 1381, 50), 'numpy.array', 'np.array', ({(1381, 34, 1381, 49): '[1.0, 0.0, 0.0]'}, {}), '([1.0, 0.0, 0.0])', True, 'import numpy as np\n'), ((89, 24, 89, 41), 'numpy.min', 'np.min', ({(89, 31, 89, 40): 'self.data'}, {}), '(self.data)', True, 'import numpy as np\n'), ((90, 24, 90, 41), 'numpy.max', 'np.max', ({(90, 31, 90, 40): 'self.data'}, {}), '(self.data)', True, 'import numpy as np\n'), ((180, 20, 180, 50), 'numpy.transpose', 'np.transpose', ({(180, 33, 180, 42): 'self.data', (180, 44, 180, 49): 'shape'}, {}), '(self.data, shape)', True, 'import numpy as np\n'), ((191, 8, 191, 79), 'logging.info', 'logging.info', ({(191, 21, 191, 78): '"""\nBuilding appropriate volume from Allen data source..."""'}, {}), '("""\nBuilding appropriate volume from Allen data source...""")', False, 'import logging\n'), ((267, 8, 267, 36), 'vtk.vtkVolume.__init__', 'vtk.vtkVolume.__init__', ({(267, 31, 267, 35): 'self'}, {}), '(self)', False, 'import vtk\n'), ((268, 8, 268, 36), 'vedo.BaseGrid.__init__', 'vedo.BaseGrid.__init__', ({(268, 31, 268, 35): 'self'}, {}), '(self)', False, 'import vedo\n'), ((477, 14, 477, 34), 'vtk.vtkLookupTable', 'vtk.vtkLookupTable', ({}, {}), '()', False, 'import vtk\n'), ((478, 21, 478, 41), 'vtk.vtkLookupTable', 'vtk.vtkLookupTable', ({}, {}), '()', False, 'import vtk\n'), ((614, 23, 614, 36), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((834, 17, 834, 38), 'vtk.vtkVolumePicker', 'vtk.vtkVolumePicker', ({}, {}), '()', False, 'import vtk\n'), ((883, 31, 883, 55), 'vtk.vtkPlaneCollection', 'vtk.vtkPlaneCollection', ({}, {}), '()', False, 'import vtk\n'), ((899, 17, 899, 41), 'vtk.vtkPlaneCollection', 'vtk.vtkPlaneCollection', ({}, {}), '()', False, 'import vtk\n'), ((958, 17, 958, 32), 'vtk.vtkPlanes', 'vtk.vtkPlanes', ({}, {}), '()', False, 'import vtk\n'), ((972, 17, 972, 32), 'vtk.vtkPlanes', 'vtk.vtkPlanes', ({}, {}), '()', False, 'import vtk\n'), ((1153, 19, 1153, 38), 'numpy.linalg.norm', 'np.linalg.norm', ({(1153, 34, 1153, 37): 'ray'}, {}), '(ray)', True, 'import numpy as np\n'), ((1201, 21, 1202, 78), 'iblviewer.objects.Points', 'obj.Points', (), '', True, 'import iblviewer.objects as obj\n'), ((1233, 17, 1233, 33), 'numpy.array', 'np.array', ({(1233, 26, 1233, 32): 'origin'}, {}), '(origin)', True, 'import numpy as np\n'), ((1234, 22, 1234, 43), 'numpy.array', 'np.array', ({(1234, 31, 1234, 42): 'destination'}, {}), '(destination)', True, 'import numpy as np\n'), ((1235, 19, 1235, 55), 'numpy.linalg.norm', 'np.linalg.norm', ({(1235, 34, 1235, 54): 'destination - origin'}, {}), '(destination - origin)', True, 'import numpy as np\n'), ((1280, 21, 1280, 41), 'vtk.vtkLookupTable', 'vtk.vtkLookupTable', ({}, {}), '()', False, 'import vtk\n'), ((1289, 19, 1289, 49), 'vtk.vtkDiscreteMarchingCubes', 'vtk.vtkDiscreteMarchingCubes', ({}, {}), '()', False, 'import vtk\n'), ((1297, 19, 1297, 54), 'vtk.vtkWindowedSincPolyDataFilter', 'vtk.vtkWindowedSincPolyDataFilter', ({}, {}), '()', False, 'import vtk\n'), ((1643, 15, 1643, 62), 'iblviewer.utils.get_transformation_matrix', 'utils.get_transformation_matrix', ({(1643, 47, 1643, 53): 'origin', (1643, 55, 1643, 61): 'normal'}, {}), '(origin, normal)', True, 'import iblviewer.utils as utils\n'), ((66, 29, 66, 40), 'numpy.zeros', 'np.zeros', ({(66, 38, 66, 39): '(3)'}, {}), '(3)', True, 'import numpy as np\n'), ((67, 25, 67, 36), 'numpy.zeros', 'np.zeros', ({(67, 34, 67, 35): '(3)'}, {}), '(3)', True, 'import numpy as np\n'), ((75, 26, 75, 51), 'numpy.array', 'np.array', ({(75, 35, 75, 50): 'self.data.shape'}, {}), '(self.data.shape)', True, 'import numpy as np\n'), ((124, 40, 124, 79), 'vedo.download', 'vedo.download', (), '', False, 'import vedo\n'), ((151, 19, 151, 33), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((152, 28, 152, 104), 'iblviewer.utils.change_file_name', 'utils.change_file_name', ({(152, 51, 152, 60): 'file_path', (152, 62, 152, 66): 'None', (152, 68, 152, 72): 'None', (152, 74, 152, 103): 'VolumeModel.NORMALIZED_SUFFIX'}, {}), '(file_path, None, None, VolumeModel.NORMALIZED_SUFFIX)', True, 'import iblviewer.utils as utils\n'), ((153, 15, 153, 44), 'os.path.exists', 'os.path.exists', ({(153, 30, 153, 43): 'new_file_path'}, {}), '(new_file_path)', False, 'import os\n'), ((193, 25, 193, 40), 'numpy.unique', 'np.unique', ({(193, 35, 193, 39): 'data'}, {}), '(data)', True, 'import numpy as np\n'), ((196, 22, 196, 39), 'pandas.Series', 'pd.Series', ({(196, 32, 196, 38): 'labels'}, {}), '(labels)', True, 'import pandas as pd\n'), ((210, 12, 210, 66), 'logging.info', 'logging.info', ({(210, 25, 210, 65): "('Saving volume data under ' + write_path)"}, {}), "('Saving volume data under ' + write_path)", False, 'import logging\n'), ((211, 12, 211, 57), 'nrrd.write', 'nrrd.write', (), '', False, 'import nrrd\n'), ((285, 27, 285, 58), 'vtk.vtkGPUVolumeRayCastMapper', 'vtk.vtkGPUVolumeRayCastMapper', ({}, {}), '()', False, 'import vtk\n'), ((304, 18, 304, 36), 'vtk.vtkImageData', 'vtk.vtkImageData', ({}, {}), '()', False, 'import vtk\n'), ((306, 13, 306, 44), 'vedo.utils.isSequence', 'vedo.utils.isSequence', ({(306, 35, 306, 43): 'inputobj'}, {}), '(inputobj)', False, 'import vedo\n'), ((544, 26, 544, 62), 'numpy.linspace', 'np.linspace', ({(544, 38, 544, 43): 's_min', (544, 45, 544, 50): 's_max', (544, 52, 544, 61): 'num_steps'}, {}), '(s_min, s_max, num_steps)', True, 'import numpy as np\n'), ((761, 22, 761, 39), 'numpy.array', 'np.array', ({(761, 31, 761, 38): '[res] * 3'}, {}), '([res] * 3)', True, 'import numpy as np\n'), ((1092, 29, 1092, 55), 'vtk.vtkPiecewiseFunction', 'vtk.vtkPiecewiseFunction', ({}, {}), '()', False, 'import vtk\n'), ((1311, 23, 1311, 58), 'vtk.vtkPolyDataConnectivityFilter', 'vtk.vtkPolyDataConnectivityFilter', ({}, {}), '()', False, 'import vtk\n'), ((1338, 20, 1338, 35), 'vedo.Mesh', 'vedo.Mesh', ({(1338, 30, 1338, 34): 'poly'}, {}), '(poly)', False, 'import vedo\n'), ((1421, 26, 1421, 37), 'numpy.zeros', 'np.zeros', ({(1421, 35, 1421, 36): '3'}, {}), '(3)', True, 'import numpy as np\n'), ((1516, 26, 1516, 58), 'vtk.vtkImageDataGeometryFilter', 'vtk.vtkImageDataGeometryFilter', ({}, {}), '()', False, 'import vtk\n'), ((1630, 22, 1630, 43), 'vtk.vtkImageReslice', 'vtk.vtkImageReslice', ({}, {}), '()', False, 'import vtk\n'), ((126, 31, 126, 67), 'nrrd.read', 'nrrd.read', ({(126, 41, 126, 66): 'downloaded_temp_file_path'}, {}), '(downloaded_temp_file_path)', False, 'import nrrd\n'), ((128, 23, 128, 68), 'vedo.loadImageData', 'vedo.loadImageData', ({(128, 42, 128, 67): 'downloaded_temp_file_path'}, {}), '(downloaded_temp_file_path)', False, 'import vedo\n'), ((131, 31, 131, 68), 'nrrd.read', 'nrrd.read', (), '', False, 'import nrrd\n'), ((133, 23, 133, 52), 'vedo.loadImageData', 'vedo.loadImageData', ({(133, 42, 133, 51): 'file_path'}, {}), '(file_path)', False, 'import vedo\n'), ((277, 27, 277, 60), 'vedo.io.download', 'download', (), '', False, 'from vedo.io import loadImageData, download\n'), ((278, 17, 278, 41), 'os.path.isfile', 'os.path.isfile', ({(278, 32, 278, 40): 'inputobj'}, {}), '(inputobj)', False, 'import os\n'), ((287, 27, 287, 64), 'vtk.vtkOpenGLGPUVolumeRayCastMapper', 'vtk.vtkOpenGLGPUVolumeRayCastMapper', ({}, {}), '()', False, 'import vtk\n'), ((504, 24, 504, 61), 'vedo.colors.getColor', 'vedo.colors.getColor', ({(504, 45, 504, 60): 'color_map[r_id]'}, {}), '(color_map[r_id])', False, 'import vedo\n'), ((505, 24, 505, 39), 'numpy.array', 'np.array', ({(505, 33, 505, 38): 'color'}, {}), '(color)', True, 'import numpy as np\n'), ((787, 15, 787, 57), 'numpy.linalg.norm', 'np.linalg.norm', ({(787, 30, 787, 56): '(center - self.model.center)'}, {}), '(center - self.model.center)', True, 'import numpy as np\n'), ((886, 41, 886, 55), 'vtk.vtkPlane', 'vtk.vtkPlane', ({}, {}), '()', False, 'import vtk\n'), ((991, 15, 991, 54), 'numpy.linalg.norm', 'np.linalg.norm', ({(991, 30, 991, 53): '(current_origin - origin)'}, {}), '(current_origin - origin)', True, 'import numpy as np\n'), ((1169, 27, 1169, 61), 'numpy.linalg.norm', 'np.linalg.norm', ({(1169, 42, 1169, 60): 'position - hits[0]'}, {}), '(position - hits[0])', True, 'import numpy as np\n'), ((1324, 28, 1324, 46), 'vtk.vtkThreshold', 'vtk.vtkThreshold', ({}, {}), '()', False, 'import vtk\n'), ((1416, 17, 1416, 28), 'numpy.zeros', 'np.zeros', ({(1416, 26, 1416, 27): '3'}, {}), '(3)', True, 'import numpy as np\n'), ((1470, 26, 1470, 42), 'numpy.array', 'np.array', ({(1470, 35, 1470, 41): 'normal'}, {}), '(normal)', True, 'import numpy as np\n'), ((80, 22, 80, 32), 'numpy.ones', 'np.ones', ({(80, 30, 80, 31): '(3)'}, {}), '(3)', True, 'import numpy as np\n'), ((289, 27, 289, 53), 'vtk.vtkSmartVolumeMapper', 'vtk.vtkSmartVolumeMapper', ({}, {}), '()', False, 'import vtk\n'), ((309, 22, 309, 42), 'vtk.vtkImageAppend', 'vtk.vtkImageAppend', ({}, {}), '()', False, 'import vtk\n'), ((338, 22, 338, 40), 'vtk.vtkImageData', 'vtk.vtkImageData', ({}, {}), '()', False, 'import vtk\n'), ((492, 20, 492, 45), 'numpy.random.rand', 'np.random.rand', ({(492, 35, 492, 44): 'num_steps'}, {}), '(num_steps)', True, 'import numpy as np\n'), ((508, 28, 508, 50), 'numpy.maximum', 'np.maximum', ({(508, 39, 508, 44): 'color', (508, 46, 508, 49): '0.0'}, {}), '(color, 0.0)', True, 'import numpy as np\n'), ((509, 28, 509, 50), 'numpy.minimum', 'np.minimum', ({(509, 39, 509, 44): 'color', (509, 46, 509, 49): '1.0'}, {}), '(color, 1.0)', True, 'import numpy as np\n'), ((533, 28, 533, 73), 'vedo.colorMap', 'vedo.colorMap', ({(533, 42, 533, 47): 'value', (533, 49, 533, 58): 'color_map', (533, 60, 533, 65): 's_min', (533, 67, 533, 72): 's_max'}, {}), '(value, color_map, s_min, s_max)', False, 'import vedo\n'), ((548, 29, 548, 86), 'vedo.colorMap', 'vedo.colorMap', ({(548, 43, 548, 60): 'mock_values[r_id]', (548, 62, 548, 71): 'color_map', (548, 73, 548, 78): 's_min', (548, 80, 548, 85): 's_max'}, {}), '(mock_values[r_id], color_map, s_min, s_max)', False, 'import vedo\n'), ((995, 25, 995, 41), 'numpy.array', 'np.array', ({(995, 34, 995, 40): 'normal'}, {}), '(normal)', True, 'import numpy as np\n'), ((1013, 15, 1013, 34), 'numpy.array', 'np.array', ({(1013, 24, 1013, 33): 'alpha_map'}, {}), '(alpha_map)', True, 'import numpy as np\n'), ((1014, 31, 1014, 54), 'numpy.ones_like', 'np.ones_like', ({(1014, 44, 1014, 53): 'alpha_map'}, {}), '(alpha_map)', True, 'import numpy as np\n'), ((1019, 29, 1019, 48), 'numpy.array', 'np.array', ({(1019, 38, 1019, 47): 'alpha_map'}, {}), '(alpha_map)', True, 'import numpy as np\n'), ((281, 34, 281, 53), 'glob.glob', 'glob.glob', ({(281, 44, 281, 52): 'inputobj'}, {}), '(inputobj)', False, 'import glob\n'), ((291, 27, 291, 65), 'vtk.vtkFixedPointVolumeRayCastMapper', 'vtk.vtkFixedPointVolumeRayCastMapper', ({}, {}), '()', False, 'import vtk\n'), ((314, 27, 314, 45), 'vtk.vtkBMPReader', 'vtk.vtkBMPReader', ({}, {}), '()', False, 'import vtk\n'), ((317, 26, 317, 49), 'vtk.vtkImageMagnitude', 'vtk.vtkImageMagnitude', ({}, {}), '()', False, 'import vtk\n'), ((327, 31, 327, 49), 'numpy.array', 'np.array', ({(327, 40, 327, 48): 'inputobj'}, {}), '(inputobj)', True, 'import numpy as np\n'), ((330, 27, 330, 67), 'vedo.numpy2vtk', 'vedo.numpy2vtk', (), '', False, 'import vedo\n'), ((343, 24, 343, 105), 'vedo.colors.printc', 'vedo.colors.printc', (), '', False, 'import vedo\n'), ((158, 65, 158, 86), 'iblviewer.utils.time_diff', 'utils.time_diff', ({(158, 81, 158, 85): 'time'}, {}), '(time)', True, 'import iblviewer.utils as utils\n'), ((375, 18, 375, 41), 'vedo.io.loadImageData', 'loadImageData', ({(375, 32, 375, 40): 'inputobj'}, {}), '(inputobj)', False, 'from vedo.io import loadImageData, download\n'), ((378, 12, 378, 93), 'vedo.colors.printc', 'vedo.colors.printc', (), '', False, 'import vedo\n'), ((374, 27, 374, 60), 'vedo.io.download', 'download', (), '', False, 'from vedo.io import loadImageData, download\n')] |
LaudateCorpus1/ml-cread | modeling/dataset.py | b5d5aa87faa0ddad0b41b6b0672395a8bf6147ae | #
# For licensing see accompanying LICENSE file.
# Copyright (C) 2021 Apple Inc. All Rights Reserved.
#
'''
Dataset file
'''
import sys
import time
import json
import copy
from itertools import chain
from tqdm import tqdm, trange
import torch
from torch.utils.data import DataLoader, RandomSampler
SPECIAL_TOKENS = {
"bos_token": "<BOS>",
"eos_token": "<EOS>",
"pad_token": "<PAD>",
"sep_token": "<SEP>",
"additional_special_tokens": ["<USR>", "<SYS>", "<M>", "</M>", "<R>", "</R>", "<CUR>"]
}
SPECIAL_TOKENS_VALUES = ["<BOS>", "<EOS>", "<PAD>", "<SEP>", "<USR>", "<SYS>", "<M>", "</M>", "<R>", "</R>", "<CUR>"]
class Dataset(torch.utils.data.Dataset):
def __init__(self, args, tokenizer, data_type, generation, data_size):
assert data_type in ['train', 'dev', 'test']
self.args = args
self.data_size = data_size
self.tokenizer = tokenizer
self.data_type = data_type
self.generation = generation
self._get_special_token_ids()
self._create_examples()
def _get_special_token_ids(self):
self.SPECIAL_TOKENS = SPECIAL_TOKENS
self.SPECIAL_TOKENS_VALUES = SPECIAL_TOKENS_VALUES
self.bos_id = self.tokenizer.convert_tokens_to_ids(self.SPECIAL_TOKENS["bos_token"])
self.eos_id = self.tokenizer.convert_tokens_to_ids(self.SPECIAL_TOKENS["eos_token"])
self.pad_id = self.tokenizer.convert_tokens_to_ids(self.SPECIAL_TOKENS["pad_token"])
self.sep_id = self.tokenizer.convert_tokens_to_ids(self.SPECIAL_TOKENS["sep_token"])
# mention detection vocab
self.mc_cl2idx = {'<N>': 0, '<M>': 1, '</M>': 2} # <N>: none, <M>: start of mention, "</M>": end of mention
self.mc_idx2cl = {v: k for k, v in self.mc_cl2idx.items()}
def prepare_reference_label(self, word_label_index, wordId2tokenId, input_ids):
'''
record the index of start/end of mention and refernece in the input otterance
this info will be used as attention signal in reference resolution step
'''
reconstruct_sentence = self.tokenizer.convert_ids_to_tokens(input_ids)
reconstruct_sentence = [token.replace('Ġ', '') for token in reconstruct_sentence]
token_label_index = []
for start_end_link in word_label_index:
for link_meta in start_end_link:
attention_word_idx, mention_word_idx = link_meta['attention_idx'], link_meta['mention_idx']
if link_meta['mention_type'] == 'start':
attention_token_idx = wordId2tokenId[attention_word_idx][0]
else: # end
attention_token_idx = wordId2tokenId[attention_word_idx][-1]
for mention_token_idx in wordId2tokenId[mention_word_idx]:
link = {}
link['mention_token_idx'] = mention_token_idx
link['attention_token_idx'] = attention_token_idx
assert reconstruct_sentence[mention_token_idx] in link_meta['mention_word']
assert reconstruct_sentence[attention_token_idx] in link_meta['attention_word']
token_label_index.append(link)
return token_label_index
def prepare_binary_label(self, input_ids, wordId2tokenId, binary_rewrite, curr_end_token_idx):
''' only the start of rewriting token receives binary signal '''
binary_label = [-100] * len(input_ids)
assert isinstance(binary_rewrite, bool)
if binary_rewrite == True:
binary_label[curr_end_token_idx] = 1 # rewrite
else:
binary_label[curr_end_token_idx] = 0 # not rewrite
return binary_label
def prepare_mention_label(self, input_ids, word_label_index, wordId2tokenId, curr_start_idx, curr_end_idx):
'''
get label index for mention detection
only the parts of current utterance receive signal, everwhere else will get -100
'''
mention_label = [-100] * len(input_ids)
curr_start_idx = wordId2tokenId[curr_start_idx][0]
curr_end_idx = wordId2tokenId[curr_end_idx-1][-1] + 1
# align class <N> (none) to everywehere in current utterance first
mention_label[curr_start_idx: curr_end_idx] = [ self.mc_cl2idx['<N>'] ] * (curr_end_idx-curr_start_idx)
for start_end_link in word_label_index: # iterate over links in one example
for link_meta in start_end_link: # iterate over start and end of a link
idx = link_meta['mention_idx']
if link_meta['mention_type'] == 'start': # align class <M> (start of mention)
for idx in wordId2tokenId[idx]:
mention_label[idx] = self.mc_cl2idx['<M>']
else: # # align class </M> (end of mention)
idx = wordId2tokenId[idx][-1]
mention_label[idx] = self.mc_cl2idx['</M>']
return mention_label, curr_start_idx, curr_end_idx
def _check_label_index(self, whole_input, links):
''' sanity check for index correctness '''
seq = whole_input.split()
for link in links:
for start_or_end in link:
for word_type in ['mention', 'attention']:
assert seq[start_or_end['{}_idx'.format(word_type)]] == start_or_end['{}_word'.format(word_type)]
def _create_examples(self):
if self.data_type == 'train':
data_file = self.args.train_file
elif self.data_type == 'dev':
data_file = self.args.dev_file
else:
data_file = self.args.test_file
with open(data_file) as f:
data = json.load(f)
self.examples = []
for example_num, example in enumerate(tqdm(data, disable=self.args.disable_display)):
if self.data_size != -1 and example_num == self.data_size:
break
# get data
context = example['dialogue context'] # context, list of str
curr_utt = example['current utterance'] # current utterance, str
rewt_utt = example['rewrite utterance'] # rewrite utterance, str
word_label_index = example['link index'] # index of mention/reference span
binary_rewrite = example['rewrite happen'] # binary label for rewrite or not, bool
# prepare input sequence to model
whole_input = copy.deepcopy(context)
whole_input.append(curr_utt)
curr_start_idx = sum([len(s.split()) for s in context]) # the (word) start idx of current utt
curr_end_idx = curr_start_idx + len(curr_utt.split())
whole_input = " ".join(whole_input)
self._check_label_index(whole_input, word_label_index)
input_ids, wordId2tokenId, tokenId2wordId = self.tokenize_with_map(whole_input)
if rewt_utt == "":
rewt_utt_ids = []
else:
rewt_utt_ids = self.tokenizer(rewt_utt)['input_ids'] # list
target_utt_ids = rewt_utt_ids
target_utt_len = len(target_utt_ids)
if not self.generation:
# input seq: CTX <CUR> current utterance <SEP> rewritten utterance <EOS>
input_ids = input_ids + [self.sep_id] + target_utt_ids + [self.eos_id]
# mention detection signal
mention_label, curr_start_token_idx, curr_end_token_idx = \
self.prepare_mention_label(input_ids, word_label_index, wordId2tokenId, curr_start_idx, curr_end_idx)
# reference resolution signal
reference_label_index = self.prepare_reference_label(word_label_index, wordId2tokenId, input_ids)
# binary classification of rewriting signal
binary_label = self.prepare_binary_label(input_ids, wordId2tokenId, binary_rewrite, curr_end_token_idx)
# rewriting singal
ignore_len = len(input_ids) - target_utt_len - 1 # eos_id
label_ids = [-100] * ignore_len + target_utt_ids + [self.eos_id]
assert len(input_ids) == len(label_ids)
else: # generation
# <sep> is given at first step during decoding
input_ids = input_ids
label_ids = None
mention_label, curr_start_token_idx, curr_end_token_idx = \
self.prepare_mention_label(input_ids, word_label_index, wordId2tokenId, curr_start_idx, curr_end_idx)
reference_label_index = self.prepare_reference_label(word_label_index, wordId2tokenId, input_ids)
binary_label = None
self.examples.append({
'input_ids': input_ids, # list of ids
'label_ids': label_ids, # list of ids
'mention_label_ids': mention_label,
'curr_start_token_idx': curr_start_token_idx,
'curr_end_token_idx': curr_end_token_idx,
'reference_label': reference_label_index,
'wordId2tokenId': wordId2tokenId,
'tokenId2wordId': tokenId2wordId,
'context': context,
'curr_utt': curr_utt,
'whole_input': whole_input,
'rewt_utt': rewt_utt,
'example_id': example['example index'],
'spk': example['speaker'],
'coref_label': word_label_index,
'binary_label_ids': binary_label,
'binary_rewrite': binary_rewrite
})
print('Data Statistics: {} -> {} examples'.format(self.data_type, len(self.examples)))
def _pad(self, sentences, pad_id):
'''
sentences: a list of list with ids
'''
max_len = max((map(len, sentences)))
attention_mask = []
sentences_pad = []
for sent in sentences:
pad_len = max_len - len(sent)
sentences_pad.append( sent + [pad_id]*pad_len )
attention_mask.append( [1]*len(sent) + [0]*pad_len)
return sentences_pad, attention_mask
def __len__(self):
return len(self.examples)
def __getitem__(self, index):
return self.examples[index]
def collate_fn(self, batch):
input_ids = [example['input_ids'] for example in batch]
input_ids, attention_mask = self._pad(input_ids, self.pad_id)
input_ids, attention_mask = torch.tensor(input_ids).long().to(self.args.device), torch.tensor(attention_mask).long().to(self.args.device)
if not self.generation:
label_ids = [example['label_ids'] for example in batch]
label_ids, _ = self._pad(label_ids, -100)
label_ids = torch.tensor(label_ids).long().to(self.args.device)
mention_label_ids = [example['mention_label_ids'] for example in batch]
mention_label_ids, _ = self._pad(mention_label_ids, -100)
mention_label_ids = torch.tensor(mention_label_ids).long().to(self.args.device)
binary_label_ids = [example['binary_label_ids'] for example in batch]
binary_label_ids, _ = self._pad(binary_label_ids, -100)
binary_label_ids = torch.tensor(binary_label_ids).long().to(self.args.device)
else:
label_ids = None
mention_label_ids = [example['mention_label_ids'] for example in batch]
mention_label_ids, _ = self._pad(mention_label_ids, -100)
mention_label_ids = torch.tensor(mention_label_ids).long().to(self.args.device)
binary_label_ids = None
token_type_ids = None # TODO: not sure if this makes any effect to gpt2
# record info
context = [example['context'] for example in batch]
curr_utt = [example['curr_utt'] for example in batch]
rewt_utt = [example['rewt_utt'] for example in batch]
example_ids = [example['example_id'] for example in batch] # record the example idx in batch
curr_start_token_idx = [example['curr_start_token_idx'] for example in batch]
curr_end_token_idx = [example['curr_end_token_idx'] for example in batch]
reference_label = [example['reference_label'] for example in batch]
wordId2tokenId = [example['wordId2tokenId'] for example in batch]
tokenId2wordId = [example['tokenId2wordId'] for example in batch]
whole_input = [example['whole_input'] for example in batch]
spk = [example['spk'] for example in batch]
coref_label = [example['coref_label'] for example in batch]
binary_rewrite = [example['binary_rewrite'] for example in batch]
return {'input_ids': input_ids, 'attention_mask': attention_mask, \
'token_type_ids': token_type_ids, 'label_ids': label_ids, \
'context': context, 'curr_utt': curr_utt, 'rewt_utt': rewt_utt, \
'example_ids': example_ids, 'spk': spk, 'mention_label_ids': mention_label_ids, \
'curr_start_token_idx': curr_start_token_idx, 'curr_end_token_idx': curr_end_token_idx, \
'reference_label': reference_label, 'wordId2tokenId': wordId2tokenId, \
'tokenId2wordId': tokenId2wordId, 'whole_input': whole_input, \
'coref_label': coref_label, 'binary_label_ids': binary_label_ids, \
'binary_rewrite': binary_rewrite}
def tokenize_with_map(self, sentence):
'''
Build the mapping of indexes before/after tokenizer to handel BPE
Input:
sentence: a natural sentence, str
Returns:
wordId2tokenId, a 1-to-many map
tokenId2wordId, a many-to-1 map
'''
assert isinstance(sentence, str)
token_ids = self.tokenizer(sentence)['input_ids']
reconstruct_sentence = self.tokenizer.convert_ids_to_tokens(token_ids)
reconstruct_sentence = [token.replace('Ġ', '') for token in reconstruct_sentence]
sentence = sentence.split()
wordId2tokenId = {}
tokenId = 0
for wordId, word in enumerate(sentence):
wordId2tokenId[wordId] = []
token = ""
while word != token:
wordId2tokenId[wordId].append(tokenId)
token += reconstruct_sentence[tokenId]
tokenId += 1
tokenId2wordId = {}
for wordId, tokenIds in wordId2tokenId.items():
for tokenId in tokenIds:
assert tokenId not in tokenId2wordId
tokenId2wordId[tokenId] = wordId
assert len(wordId2tokenId) == len(sentence)
assert len(tokenId2wordId) == len(reconstruct_sentence)
return token_ids, wordId2tokenId, tokenId2wordId
if __name__ == '__main__':
pass
| [((137, 10, 137, 22), 'json.load', 'json.load', ({(137, 20, 137, 21): 'f'}, {}), '(f)', False, 'import json\n'), ((140, 40, 140, 85), 'tqdm.tqdm', 'tqdm', (), '', False, 'from tqdm import tqdm, trange\n'), ((152, 17, 152, 39), 'copy.deepcopy', 'copy.deepcopy', ({(152, 31, 152, 38): 'context'}, {}), '(context)', False, 'import copy\n'), ((244, 30, 244, 53), 'torch.tensor', 'torch.tensor', ({(244, 43, 244, 52): 'input_ids'}, {}), '(input_ids)', False, 'import torch\n'), ((244, 83, 244, 111), 'torch.tensor', 'torch.tensor', ({(244, 96, 244, 110): 'attention_mask'}, {}), '(attention_mask)', False, 'import torch\n'), ((249, 15, 249, 38), 'torch.tensor', 'torch.tensor', ({(249, 28, 249, 37): 'label_ids'}, {}), '(label_ids)', False, 'import torch\n'), ((252, 23, 252, 54), 'torch.tensor', 'torch.tensor', ({(252, 36, 252, 53): 'mention_label_ids'}, {}), '(mention_label_ids)', False, 'import torch\n'), ((255, 22, 255, 52), 'torch.tensor', 'torch.tensor', ({(255, 35, 255, 51): 'binary_label_ids'}, {}), '(binary_label_ids)', False, 'import torch\n'), ((260, 23, 260, 54), 'torch.tensor', 'torch.tensor', ({(260, 36, 260, 53): 'mention_label_ids'}, {}), '(mention_label_ids)', False, 'import torch\n')] |
schuster-rainer/hy | hy/lex/lexer.py | d969ed63d67c4a9070fd41a8fbff35da845e0619 | # Copyright (c) 2013 Nicolas Dandrimont <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from rply import LexerGenerator
lg = LexerGenerator()
# A regexp for something that should end a quoting/unquoting operator
# i.e. a space or a closing brace/paren/curly
end_quote = r'(?![\s\)\]\}])'
lg.add('LPAREN', r'\(')
lg.add('RPAREN', r'\)')
lg.add('LBRACKET', r'\[')
lg.add('RBRACKET', r'\]')
lg.add('LCURLY', r'\{')
lg.add('RCURLY', r'\}')
lg.add('HLCURLY', r'#\{')
lg.add('QUOTE', r'\'%s' % end_quote)
lg.add('QUASIQUOTE', r'`%s' % end_quote)
lg.add('UNQUOTESPLICE', r'~@%s' % end_quote)
lg.add('UNQUOTE', r'~%s' % end_quote)
lg.add('HASHBANG', r'#!.*[^\r\n]')
lg.add('HASHREADER', r'#[^{]')
# A regexp which matches incomplete strings, used to support
# multi-line strings in the interpreter
partial_string = r'''(?x)
(?:u|r|ur|ru)? # prefix
" # start string
(?:
| [^"\\] # non-quote or backslash
| \\(.|\n) # or escaped single character or newline
| \\x[0-9a-fA-F]{2} # or escaped raw character
| \\u[0-9a-fA-F]{4} # or unicode escape
| \\U[0-9a-fA-F]{8} # or long unicode escape
)* # one or more times
'''
lg.add('STRING', r'%s"' % partial_string)
lg.add('PARTIAL_STRING', partial_string)
lg.add('IDENTIFIER', r'[^()\[\]{}\'"\s;]+')
lg.ignore(r';.*(?=\r|\n|$)')
lg.ignore(r'\s+')
lexer = lg.build()
| [((24, 5, 24, 21), 'rply.LexerGenerator', 'LexerGenerator', ({}, {}), '()', False, 'from rply import LexerGenerator\n')] |
solideveloper/afs-210 | week6/shuffle.py | 2ba0bb7c7617cd3169907458f657696a6987689d | # Python provides a built-in method called random.shuffle that will shuffle the list data type. Do not use this.
# For this assignment, you are to create your own shuffle algorithm that will take as input a sorted list and randomly shuffle the items before returning the list. Try to make your algorithm as efficient as possible.
# Add a comment to your code stating what the time complexity of your algorithm is and why.
# Display list before and after shuffle. Call your shuffle function multiple times, each time on the original sorted list to show the random order of the list items.
data = [7, 20, 26, 31, 40, 51, 55, 63, 74, 81]
ndata = len(data)
import random
def shuffleAlgorithm(data, ndata):
for i in range(ndata-1, 0, -1):
r = random.randint(0, i)
data[i], data[r] = data[r], data[i]
return data
print(data)
print(shuffleAlgorithm(data,ndata))
print(shuffleAlgorithm(data,ndata))
print(shuffleAlgorithm(data,ndata))
print(shuffleAlgorithm(data,ndata))
# fisher yates algorithm - O(n) time complexity because i'm not creating a copy of the list to shuffle through it.
# instead i'm modifying the list in place or at a 'constant space' making it O(n)
# swapping the last item with a random -not previously selected- item and repeating until all items in list have been selected | [((12, 12, 12, 32), 'random.randint', 'random.randint', ({(12, 27, 12, 28): '0', (12, 30, 12, 31): 'i'}, {}), '(0, i)', False, 'import random\n')] |
pr3sto/workbox | workbox/workbox/lib/helpers.py | 558147a1a387dcfbe03be0fbc366d1d793364da6 | # -*- coding: utf-8 -*-
"""Template Helpers used in workbox"""
import logging
import socket
from datetime import datetime
from markupsafe import Markup
import psutil
import tg
log = logging.getLogger(__name__)
def current_year():
""" Return current year. """
now = datetime.now()
return now.strftime('%Y')
def is_docker_enabled():
""" Detect if docker service is started. """
for proc in psutil.process_iter():
if 'docker' in proc.name():
return True
return False
def get_server_load_value():
""" Get server load value. """
return psutil.virtual_memory().percent
def get_free_port():
""" Find and returns free port number. """
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.bind(("", 0))
free_port = soc.getsockname()[1]
soc.close()
return free_port
def get_vagrantfiles_base_folder():
""" Return base folder for vagrantfiles. """
return tg.config.get('workbox.vagrantfiles.basefolder')
def get_hostname():
""" Return hostname. """
return tg.config.get('workbox.hostname')
try:
from webhelpers2 import date, html, number, misc, text
except SyntaxError:
log.error("WebHelpers2 helpers not available with this Python Version")
| [((11, 6, 11, 33), 'logging.getLogger', 'logging.getLogger', ({(11, 24, 11, 32): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((17, 10, 17, 24), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((23, 16, 23, 37), 'psutil.process_iter', 'psutil.process_iter', ({}, {}), '()', False, 'import psutil\n'), ((36, 10, 36, 59), 'socket.socket', 'socket.socket', ({(36, 24, 36, 38): 'socket.AF_INET', (36, 40, 36, 58): 'socket.SOCK_STREAM'}, {}), '(socket.AF_INET, socket.SOCK_STREAM)', False, 'import socket\n'), ((45, 11, 45, 59), 'tg.config.get', 'tg.config.get', ({(45, 25, 45, 58): '"""workbox.vagrantfiles.basefolder"""'}, {}), "('workbox.vagrantfiles.basefolder')", False, 'import tg\n'), ((50, 11, 50, 44), 'tg.config.get', 'tg.config.get', ({(50, 25, 50, 43): '"""workbox.hostname"""'}, {}), "('workbox.hostname')", False, 'import tg\n'), ((31, 11, 31, 34), 'psutil.virtual_memory', 'psutil.virtual_memory', ({}, {}), '()', False, 'import psutil\n')] |
IshitaTakeshi/Tadataka | tadataka/dataset/new_tsukuba.py | 852c7afb904503005e51884408e1492ef0be836f | import csv
import os
from pathlib import Path
from xml.etree import ElementTree as ET
from tqdm import tqdm
from scipy.spatial.transform import Rotation
from skimage.io import imread
import numpy as np
from tadataka.camera import CameraModel, CameraParameters, FOV
from tadataka.dataset.frame import Frame
from tadataka.dataset.base import BaseDataset
from tadataka.pose import Pose
def load_depth(path):
tree = ET.parse(path)
root = tree.getroot()
rows_node, cols_node, dt_node, data_node = root[0]
height, width = int(rows_node.text), int(cols_node.text)
depth_text = data_node.text
depth_text = depth_text.replace('\n', '').strip()
depth_map = np.fromstring(depth_text, sep=' ')
return depth_map.reshape(height, width)
def generate_cache(src_dir, cache_dir, src_extension, loader):
def generate_(subdir):
os.makedirs(str(Path(cache_dir, subdir)))
print(f"Generating cache from {subdir}")
paths = Path(src_dir, subdir).glob("*" + src_extension)
for path in tqdm(list(paths)):
filename = path.name.replace(src_extension, ".npy")
cache_path = Path(cache_dir, subdir, filename)
array = loader(path)
np.save(str(cache_path), array)
generate_("left")
generate_("right")
def generate_image_cache(image_dir, cache_dir):
print("Generating image cache")
generate_cache(image_dir, cache_dir, ".png", imread)
def generate_depth_cache(depth_dir, cache_dir):
print("Generating depth cache")
generate_cache(depth_dir, cache_dir, ".xml", load_depth)
def align_coordinate_system(positions, euler_angles):
# Camera coordinate system and world coordinate system are not aligned
#
# Usually camera coordinate system is represented in the format that
# x: right y: down z: forward
# however, in 'camera_track.txt', they are written in
# x: right y: up z: backward
#
# This means the camera coordinate system is
# rotated 180 degrees around the x-axis from the world coordinate system
# rotate 180 degrees around the x-axis
R = Rotation.from_rotvec([np.pi, 0, 0]).as_matrix()
positions = np.dot(R, positions.T).T
# Reverse rotations around y and z because axes are flipped
# (rot_x, rot_y, rot_z) <- (rot_x, -rot_y, -rot_z)
euler_angles[:, 1:3] = -euler_angles[:, 1:3]
return positions, euler_angles
def load_poses(pose_path):
poses = np.loadtxt(pose_path, delimiter=',')
positions, euler_angles = poses[:, 0:3], poses[:, 3:6]
positions, euler_angles = align_coordinate_system(positions, euler_angles)
rotations = Rotation.from_euler('xyz', euler_angles, degrees=True)
return rotations, positions
def discard_alpha(image):
return image[:, :, 0:3]
def calc_baseline_offset(rotation, baseline_length):
local_offset = np.array([baseline_length, 0, 0])
R = rotation.as_matrix()
return np.dot(R, local_offset)
# TODO download and set dataset_root automatically
class NewTsukubaDataset(BaseDataset):
def __init__(self, dataset_root, condition="daylight"):
self.camera_model = CameraModel(
CameraParameters(focal_length=[615, 615], offset=[320, 240]),
distortion_model=None
)
groundtruth_dir = Path(dataset_root, "groundtruth")
illumination_dir = Path(dataset_root, "illumination")
pose_path = Path(groundtruth_dir, "camera_track.txt")
self.baseline_length = 10.0
self.rotations, self.positions = load_poses(pose_path)
depth_dir = Path(groundtruth_dir, "depth_maps")
depth_cache_dir = Path(groundtruth_dir, "depth_cache")
if not depth_cache_dir.exists():
generate_depth_cache(depth_dir, depth_cache_dir)
self.depth_L_paths = sorted(Path(depth_cache_dir, "left").glob("*.npy"))
self.depth_R_paths = sorted(Path(depth_cache_dir, "right").glob("*.npy"))
image_dir = Path(illumination_dir, condition)
image_cache_dir = Path(illumination_dir, condition + "_cache")
if not image_cache_dir.exists():
generate_image_cache(image_dir, image_cache_dir)
self.image_L_paths = sorted(Path(image_cache_dir, "left").glob("*.npy"))
self.image_R_paths = sorted(Path(image_cache_dir, "right").glob("*.npy"))
assert((len(self.depth_L_paths) == len(self.depth_R_paths) ==
len(self.image_L_paths) == len(self.image_R_paths) ==
len(self.rotations) == len(self.positions)))
for i in range(len(self.positions)):
DL = self.depth_L_paths[i].name
DR = self.depth_R_paths[i].name
IL = self.image_L_paths[i].name
IR = self.image_R_paths[i].name
assert(DL[-8:] == DR[-8:] == IL[-8:] == IR[-8:])
def __len__(self):
return len(self.positions)
def load(self, index):
image_l = np.load(self.image_L_paths[index])
image_r = np.load(self.image_R_paths[index])
image_l = discard_alpha(image_l)
image_r = discard_alpha(image_r)
depth_l = np.load(self.depth_L_paths[index])
depth_r = np.load(self.depth_R_paths[index])
position_center = self.positions[index]
rotation = self.rotations[index]
offset = calc_baseline_offset(rotation, self.baseline_length)
pose_wl = Pose(rotation, position_center - offset / 2.0)
pose_wr = Pose(rotation, position_center + offset / 2.0)
return (
Frame(self.camera_model, pose_wl, image_l, depth_l),
Frame(self.camera_model, pose_wr, image_r, depth_r)
)
| [((18, 11, 18, 25), 'xml.etree.ElementTree.parse', 'ET.parse', ({(18, 20, 18, 24): 'path'}, {}), '(path)', True, 'from xml.etree import ElementTree as ET\n'), ((25, 16, 25, 50), 'numpy.fromstring', 'np.fromstring', (), '', True, 'import numpy as np\n'), ((78, 12, 78, 48), 'numpy.loadtxt', 'np.loadtxt', (), '', True, 'import numpy as np\n'), ((81, 16, 81, 70), 'scipy.spatial.transform.Rotation.from_euler', 'Rotation.from_euler', (), '', False, 'from scipy.spatial.transform import Rotation\n'), ((90, 19, 90, 52), 'numpy.array', 'np.array', ({(90, 28, 90, 51): '[baseline_length, 0, 0]'}, {}), '([baseline_length, 0, 0])', True, 'import numpy as np\n'), ((92, 11, 92, 34), 'numpy.dot', 'np.dot', ({(92, 18, 92, 19): 'R', (92, 21, 92, 33): 'local_offset'}, {}), '(R, local_offset)', True, 'import numpy as np\n'), ((69, 16, 69, 38), 'numpy.dot', 'np.dot', ({(69, 23, 69, 24): 'R', (69, 26, 69, 37): 'positions.T'}, {}), '(R, positions.T)', True, 'import numpy as np\n'), ((102, 26, 102, 59), 'pathlib.Path', 'Path', ({(102, 31, 102, 43): 'dataset_root', (102, 45, 102, 58): '"""groundtruth"""'}, {}), "(dataset_root, 'groundtruth')", False, 'from pathlib import Path\n'), ((103, 27, 103, 61), 'pathlib.Path', 'Path', ({(103, 32, 103, 44): 'dataset_root', (103, 46, 103, 60): '"""illumination"""'}, {}), "(dataset_root, 'illumination')", False, 'from pathlib import Path\n'), ((105, 20, 105, 61), 'pathlib.Path', 'Path', ({(105, 25, 105, 40): 'groundtruth_dir', (105, 42, 105, 60): '"""camera_track.txt"""'}, {}), "(groundtruth_dir, 'camera_track.txt')", False, 'from pathlib import Path\n'), ((110, 20, 110, 55), 'pathlib.Path', 'Path', ({(110, 25, 110, 40): 'groundtruth_dir', (110, 42, 110, 54): '"""depth_maps"""'}, {}), "(groundtruth_dir, 'depth_maps')", False, 'from pathlib import Path\n'), ((111, 26, 111, 62), 'pathlib.Path', 'Path', ({(111, 31, 111, 46): 'groundtruth_dir', (111, 48, 111, 61): '"""depth_cache"""'}, {}), "(groundtruth_dir, 'depth_cache')", False, 'from pathlib import Path\n'), ((119, 20, 119, 53), 'pathlib.Path', 'Path', ({(119, 25, 119, 41): 'illumination_dir', (119, 43, 119, 52): 'condition'}, {}), '(illumination_dir, condition)', False, 'from pathlib import Path\n'), ((120, 26, 120, 70), 'pathlib.Path', 'Path', ({(120, 31, 120, 47): 'illumination_dir', (120, 49, 120, 69): "condition + '_cache'"}, {}), "(illumination_dir, condition + '_cache')", False, 'from pathlib import Path\n'), ((144, 18, 144, 52), 'numpy.load', 'np.load', ({(144, 26, 144, 51): 'self.image_L_paths[index]'}, {}), '(self.image_L_paths[index])', True, 'import numpy as np\n'), ((145, 18, 145, 52), 'numpy.load', 'np.load', ({(145, 26, 145, 51): 'self.image_R_paths[index]'}, {}), '(self.image_R_paths[index])', True, 'import numpy as np\n'), ((150, 18, 150, 52), 'numpy.load', 'np.load', ({(150, 26, 150, 51): 'self.depth_L_paths[index]'}, {}), '(self.depth_L_paths[index])', True, 'import numpy as np\n'), ((151, 18, 151, 52), 'numpy.load', 'np.load', ({(151, 26, 151, 51): 'self.depth_R_paths[index]'}, {}), '(self.depth_R_paths[index])', True, 'import numpy as np\n'), ((157, 18, 157, 64), 'tadataka.pose.Pose', 'Pose', ({(157, 23, 157, 31): 'rotation', (157, 33, 157, 63): 'position_center - offset / 2.0'}, {}), '(rotation, position_center - offset / 2.0)', False, 'from tadataka.pose import Pose\n'), ((158, 18, 158, 64), 'tadataka.pose.Pose', 'Pose', ({(158, 23, 158, 31): 'rotation', (158, 33, 158, 63): 'position_center + offset / 2.0'}, {}), '(rotation, position_center + offset / 2.0)', False, 'from tadataka.pose import Pose\n'), ((38, 25, 38, 58), 'pathlib.Path', 'Path', ({(38, 30, 38, 39): 'cache_dir', (38, 41, 38, 47): 'subdir', (38, 49, 38, 57): 'filename'}, {}), '(cache_dir, subdir, filename)', False, 'from pathlib import Path\n'), ((68, 8, 68, 43), 'scipy.spatial.transform.Rotation.from_rotvec', 'Rotation.from_rotvec', ({(68, 29, 68, 42): '[np.pi, 0, 0]'}, {}), '([np.pi, 0, 0])', False, 'from scipy.spatial.transform import Rotation\n'), ((99, 12, 99, 72), 'tadataka.camera.CameraParameters', 'CameraParameters', (), '', False, 'from tadataka.camera import CameraModel, CameraParameters, FOV\n'), ((160, 12, 160, 63), 'tadataka.dataset.frame.Frame', 'Frame', ({(160, 18, 160, 35): 'self.camera_model', (160, 37, 160, 44): 'pose_wl', (160, 46, 160, 53): 'image_l', (160, 55, 160, 62): 'depth_l'}, {}), '(self.camera_model, pose_wl, image_l, depth_l)', False, 'from tadataka.dataset.frame import Frame\n'), ((161, 12, 161, 63), 'tadataka.dataset.frame.Frame', 'Frame', ({(161, 18, 161, 35): 'self.camera_model', (161, 37, 161, 44): 'pose_wr', (161, 46, 161, 53): 'image_r', (161, 55, 161, 62): 'depth_r'}, {}), '(self.camera_model, pose_wr, image_r, depth_r)', False, 'from tadataka.dataset.frame import Frame\n'), ((31, 24, 31, 47), 'pathlib.Path', 'Path', ({(31, 29, 31, 38): 'cache_dir', (31, 40, 31, 46): 'subdir'}, {}), '(cache_dir, subdir)', False, 'from pathlib import Path\n'), ((35, 16, 35, 37), 'pathlib.Path', 'Path', ({(35, 21, 35, 28): 'src_dir', (35, 30, 35, 36): 'subdir'}, {}), '(src_dir, subdir)', False, 'from pathlib import Path\n'), ((116, 36, 116, 65), 'pathlib.Path', 'Path', ({(116, 41, 116, 56): 'depth_cache_dir', (116, 58, 116, 64): '"""left"""'}, {}), "(depth_cache_dir, 'left')", False, 'from pathlib import Path\n'), ((117, 36, 117, 66), 'pathlib.Path', 'Path', ({(117, 41, 117, 56): 'depth_cache_dir', (117, 58, 117, 65): '"""right"""'}, {}), "(depth_cache_dir, 'right')", False, 'from pathlib import Path\n'), ((125, 36, 125, 65), 'pathlib.Path', 'Path', ({(125, 41, 125, 56): 'image_cache_dir', (125, 58, 125, 64): '"""left"""'}, {}), "(image_cache_dir, 'left')", False, 'from pathlib import Path\n'), ((126, 36, 126, 66), 'pathlib.Path', 'Path', ({(126, 41, 126, 56): 'image_cache_dir', (126, 58, 126, 65): '"""right"""'}, {}), "(image_cache_dir, 'right')", False, 'from pathlib import Path\n')] |
enamrik/krogon | krogon/maybe.py | a41a10ed346b7198509929ed9ba1e9fcf778dc78 | from typing import Callable, TypeVar, Union, Tuple
from krogon.infix import Infix
A = TypeVar('A')
B = TypeVar('B')
E = TypeVar('E')
Maybe = Union[Tuple['just', A], Tuple['nothing']]
def just(value=None):
return "just", value
def nothing():
return "nothing", None
def from_value(value) -> Maybe[B]:
return _cast_to_maybe(value)
def from_value_or_default(value, default) -> Maybe[B]:
return from_maybe(
_cast_to_maybe(value),
dict(if_just=lambda x: just(x),
if_nothing=lambda: _cast_to_maybe(default)))
@Infix
def then(maybe: Maybe[A], func: Callable[[A], Maybe[B]]) -> Maybe[B]:
if maybe[0] == "just":
return _cast_to_maybe(func(maybe[1]))
elif maybe[0] == "nothing":
return maybe
@Infix
def catch_nothing(maybe: Maybe[A], func: Callable[[A], Maybe[B]]) -> Maybe[B]:
if maybe[0] == "nothing":
return _cast_to_maybe(func())
elif maybe[0] == "just":
return maybe
@Infix
def map(maybe: Maybe[A], mapper: Callable[[A], B]) -> Maybe[B]:
if maybe[0] == "just":
return just(mapper(maybe[1]))
elif maybe[0] == "nothing":
return maybe
@Infix
def value_or_default(maybe: Maybe[A], default_value: B):
return maybe | from_maybe | (dict(if_just=lambda x: x, if_nothing=lambda: default_value))
@Infix
def from_maybe(maybe: Maybe[A], dict_args: dict) -> B:
if_just: Callable = dict_args['if_just']
if_nothing: Callable = dict_args['if_nothing']
if maybe[0] == "just" and if_just is not None:
return if_just(maybe[1])
elif maybe[0] == "nothing" and if_nothing is not None:
return if_nothing()
else:
raise Exception('Invalid Maybe: {}, {}'.format(maybe, dict_args))
def _cast_to_maybe(result):
if result is None:
return nothing()
if isinstance(result, tuple) and len(result) == 2:
maybe_type, value = result
if maybe_type == "just" or maybe_type == "nothing":
return result
return just(result)
| [((4, 4, 4, 16), 'typing.TypeVar', 'TypeVar', ({(4, 12, 4, 15): '"""A"""'}, {}), "('A')", False, 'from typing import Callable, TypeVar, Union, Tuple\n'), ((5, 4, 5, 16), 'typing.TypeVar', 'TypeVar', ({(5, 12, 5, 15): '"""B"""'}, {}), "('B')", False, 'from typing import Callable, TypeVar, Union, Tuple\n'), ((6, 4, 6, 16), 'typing.TypeVar', 'TypeVar', ({(6, 12, 6, 15): '"""E"""'}, {}), "('E')", False, 'from typing import Callable, TypeVar, Union, Tuple\n')] |
EbersonDias/html-css | Python (desafios)/desafio 009.py | b05ec122dc7649656bcfce92dc92ded127bbb2cf | # Desafio 009
# Faça um programa que leia um numero inteiro qualquer
# e mostre na tela a sua tabuada.
n = int(input('digite um numero. '))
r1 = n * 1
r2 = (n * 2)
r3 = (n * 3)
r4 = (n * 4)
r5 = (n * 5)
r6 = (n * 6)
r7 = (n * 7)
r8 = (n * 8)
r9 = (n * 9)
r10 = (n * 10)
print('A Tabuada de {} é'.format(n))
print ('{} x 1 = {}'.format(n,r1))
print ('{} x 2 = {}'.format(n,r2))
print ('{} x 3 = {}'.format(n,r3))
print ('{} x 4 = {}'.format(n,r4))
print ('{} x 5 = {}'.format(n,r5))
print ('{} x 6 = {}'.format(n,r6))
print ('{} x 7 = {}'.format(n,r7))
print ('{} x 8 = {}'.format(n,r8))
print ('{} x 9 = {}'.format(n,r9))
print ('{} x 10 = {}'.format(n,r10))
#Outra forma de ser feito
n = int(input('Quanto é a Tabuada de '))
print('A Tabuada de {} é'.format(n))
print('-'*12)
print ('{} x {:2} = {}'.format(n, 1, n*1))
print ('{} x {:2} = {}'.format(n, 2, n*2))
print ('{} x {:2} = {}'.format(n, 3, n*3))
print ('{} x {:2} = {}'.format(n, 4, n*4))
print ('{} x {:2} = {}'.format(n, 5, n*5))
print ('{} x {:2} = {}'.format(n, 6, n*6))
print ('{} x {:2} = {}'.format(n, 7, n*7))
print ('{} x {:2} = {}'.format(n, 8, n*8))
print ('{} x {:2} = {}'.format(n, 9, n*9))
print ('{} x {:2} = {}'.format(n, 10, n*10))
print('-'*12) | [] |
supercatex/TelloEdu | tools/__init__.py | 8f434dbc9866be3025cb119175c40f1d2d7fb5f3 | from tools.TelloEdu import TelloEdu
from tools.Controller import *
from tools.SocketObject import SocketClient | [] |
congnt95/neutron | neutron/agent/ovsdb/native/helpers.py | 6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1 | # Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_config import cfg
from neutron.conf.agent import ovs_conf as agent_ovs_conf
from neutron.conf.plugins.ml2.drivers import ovs_conf as ml2_ovs_conf
from neutron.privileged.agent.ovsdb.native import helpers as priv_helpers
agent_ovs_conf.register_ovs_agent_opts(cfg.CONF)
ml2_ovs_conf.register_ovs_opts(cfg=cfg.CONF)
enable_connection_uri = functools.partial(
priv_helpers.enable_connection_uri,
log_fail_as_error=False, check_exit_code=False,
timeout=cfg.CONF.OVS.ovsdb_timeout,
inactivity_probe=cfg.CONF.OVS.of_inactivity_probe * 1000)
| [((24, 0, 24, 48), 'neutron.conf.agent.ovs_conf.register_ovs_agent_opts', 'agent_ovs_conf.register_ovs_agent_opts', ({(24, 39, 24, 47): 'cfg.CONF'}, {}), '(cfg.CONF)', True, 'from neutron.conf.agent import ovs_conf as agent_ovs_conf\n'), ((25, 0, 25, 44), 'neutron.conf.plugins.ml2.drivers.ovs_conf.register_ovs_opts', 'ml2_ovs_conf.register_ovs_opts', (), '', True, 'from neutron.conf.plugins.ml2.drivers import ovs_conf as ml2_ovs_conf\n'), ((27, 24, 31, 61), 'functools.partial', 'functools.partial', (), '', False, 'import functools\n')] |
kira607/config_loader | conlo/serializer/json_serializer.py | 024f33d48fee1635dfa9ed286f84bb96f22c134a | import json
from .base_serializer import BaseSerializer
class JsonSerializer(BaseSerializer):
'''Json serializer.'''
def _serialize(self, data: dict, **kwargs) -> str:
return json.dumps(data)
def _deserialize(self, data: str, **kwargs) -> dict:
return json.loads(data)
| [((10, 15, 10, 31), 'json.dumps', 'json.dumps', ({(10, 26, 10, 30): 'data'}, {}), '(data)', False, 'import json\n'), ((13, 15, 13, 31), 'json.loads', 'json.loads', ({(13, 26, 13, 30): 'data'}, {}), '(data)', False, 'import json\n')] |
AlBan52/API_weather | console_weather.py | 86779a2da622ad7a4537070e5c28a04235415161 | import requests
locations = ['Лондон', 'Шереметьево', 'Череповец']
payload = {'mnTq': '', 'lang': 'ru'}
for location in locations:
response = requests.get(f'http://wttr.in/{location}', params=payload)
response.raise_for_status()
print(response.text)
| [((7, 15, 7, 73), 'requests.get', 'requests.get', (), '', False, 'import requests\n')] |
Torniojaws/vortech-backend | migrations/versions/576712576c48_added_model_for_photo_comments.py | f775a97eeae089fa720088d86fe92d40bc5d65bc | """Added model for photo comments
Revision ID: 576712576c48
Revises: 75bb906df167
Create Date: 2018-03-30 02:06:22.877079
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '576712576c48'
down_revision = '75bb906df167'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('CommentsPhotos',
sa.Column('CommentID', sa.Integer(), nullable=False),
sa.Column('PhotoID', sa.Integer(), nullable=False),
sa.Column('Comment', sa.Text(), nullable=False),
sa.Column('UserID', sa.Integer(), nullable=False),
sa.Column('Created', sa.DateTime(), nullable=True),
sa.Column('Updated', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['PhotoID'], ['Photos.PhotoID'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['UserID'], ['Users.UserID'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('CommentID')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('CommentsPhotos')
# ### end Alembic commands ###
| [((37, 4, 37, 35), 'alembic.op.drop_table', 'op.drop_table', ({(37, 18, 37, 34): '"""CommentsPhotos"""'}, {}), "('CommentsPhotos')", False, 'from alembic import op\n'), ((28, 4, 28, 80), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (), '', True, 'import sqlalchemy as sa\n'), ((29, 4, 29, 77), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (), '', True, 'import sqlalchemy as sa\n'), ((30, 4, 30, 40), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', ({(30, 28, 30, 39): '"""CommentID"""'}, {}), "('CommentID')", True, 'import sqlalchemy as sa\n'), ((22, 27, 22, 39), 'sqlalchemy.Integer', 'sa.Integer', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((23, 25, 23, 37), 'sqlalchemy.Integer', 'sa.Integer', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((24, 25, 24, 34), 'sqlalchemy.Text', 'sa.Text', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((25, 24, 25, 36), 'sqlalchemy.Integer', 'sa.Integer', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((26, 25, 26, 38), 'sqlalchemy.DateTime', 'sa.DateTime', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((27, 25, 27, 38), 'sqlalchemy.DateTime', 'sa.DateTime', ({}, {}), '()', True, 'import sqlalchemy as sa\n')] |
m3sserschmitt/basic-http | __init__.py | bc09a888b44a11154e2cc9bfaf46fc9fd3a79b82 | import basic_http.session
basic_http.session.LIB_VERSION = 'v0.0.4-beta'
basic_http.session.DEFAULT_AGENT = 'basic-http version ' + basic_http.session.LIB_VERSION
| [] |
truthiswill/usaspending-api | usaspending_api/etl/helpers.py | bd7d915442e2ec94cc830c480ceeffd4479be6c0 | from datetime import datetime
import warnings
import logging
from django.db.models import Q, Case, Value, When
from django.core.cache import caches, CacheKeyWarning
import django.apps
from usaspending_api.references.models import Agency, Location, RefCountryCode
from usaspending_api.references.helpers import canonicalize_location_dict
from usaspending_api.submissions.models import SubmissionAttributes
from usaspending_api.data.daims_maps import daims_maps
warnings.simplefilter("ignore", CacheKeyWarning)
def clear_caches():
for cache_name in ('default', 'locations', 'awards'):
caches[cache_name].clear()
def cleanse_values(row):
"""
Remove textual quirks from CSV values.
"""
row = {k: v.strip() for (k, v) in row.items()}
row = {k: (None if v.lower() == 'null' else v) for (k, v) in row.items()}
return row
def convert_date(date):
if date == "":
return None
return datetime.strptime(date, '%m/%d/%Y').strftime('%Y-%m-%d')
def get_subtier_agency_dict():
"""Returns a dictionary with key = subtier agency code and value = agency id."""
# there's no unique constraint on subtier_code, so the order by below ensures that in the case of duplicate subtier
# codes, the dictionary we return will reflect the most recently updated one
agencies = Agency.objects.all().values(
'id',
'subtier_agency__subtier_code').order_by('subtier_agency__update_date')
subtier_agency_dict = {
a['subtier_agency__subtier_code']: a['id'] for a in agencies
}
return subtier_agency_dict
def fetch_country_code(vendor_country_code):
code_str = up2colon(vendor_country_code)
if code_str == "":
return None
country_code = RefCountryCode.objects.filter(Q(country_code=code_str) | Q(country_name__iexact=code_str)).first()
if not country_code:
# We don't have an exact match on the name or the code, so we need to
# chain filter on the name
query_set = RefCountryCode.objects
for word in code_str.split():
query_set = query_set.filter(country_name__icontains=word)
country_code = query_set.first()
return country_code
location_cache = caches['locations']
def get_or_create_location(row, mapper):
location_dict = mapper(row)
# Country-specific adjustments
if location_dict["location_country_code"] == "USA":
# Apparently zip codes are optional...
if location_dict["location_zip"]:
location_dict.update(
zip5=location_dict["location_zip"][:5],
zip_last4=location_dict["location_zip"][5:])
location_dict.pop("location_zip")
else:
location_dict.update(
foreign_postal_code=location_dict.pop("location_zip", None),
foreign_province=location_dict.pop("state_code", None))
if "city_name" in location_dict:
location_dict['foreign_city_name'] = location_dict.pop("city_name")
location_dict = canonicalize_location_dict(location_dict)
location_tup = tuple(location_dict.items())
location = location_cache.get(location_tup)
if location:
return location
location = Location.objects.filter(**location_dict).first()
if not location:
location = Location.objects.create(**location_dict)
location_cache.set(location_tup, location)
return location
def up2colon(input_string):
'Takes the part of a string before `:`, if any.'
if input_string:
return input_string.split(':')[0].strip()
return ''
def parse_numeric_value(string):
try:
return float(string)
except Exception:
return None
def get_fiscal_quarter(fiscal_reporting_period):
"""
Return the fiscal quarter.
Note: the reporting period being passed should already be in "federal fiscal format",
where period 1 = Oct. and period 12 = Sept.
"""
if fiscal_reporting_period in [1, 2, 3]:
return 1
elif fiscal_reporting_period in [4, 5, 6]:
return 2
elif fiscal_reporting_period in [7, 8, 9]:
return 3
elif fiscal_reporting_period in [10, 11, 12]:
return 4
def get_previous_submission(cgac_code, fiscal_year, fiscal_period):
"""
For the specified CGAC (e.g., department/top-tier agency) and specified fiscal year and quarter, return the
previous submission within the same fiscal year.
"""
previous_submission = SubmissionAttributes.objects \
.filter(
cgac_code=cgac_code,
reporting_fiscal_year=fiscal_year,
reporting_fiscal_period__lt=fiscal_period,
quarter_format_flag=True) \
.order_by('-reporting_fiscal_period') \
.first()
return previous_submission
def update_model_description_fields():
"""
This method searches through every model Django has registered, checks if it
belongs to a list of apps we should update, and updates all fields with
'_description' at the end with their relevant information.
Dictionaries for DAIMS definitions should be stored in:
usaspending_api/data/daims_maps.py
Each map should be <field_name>_map for discoverability.
If there are conflicting maps (i.e., two models use type_description, but
different enumerations) prepend the map name with the model name and a dot.
For examples of these situations, see the documentation in daims_maps.py
"""
logger = logging.getLogger('console')
# This is a list of apps whose models will be checked for description fields
updatable_apps = [
"accounts",
"awards",
"common",
"financial_activities",
"references",
"submissions"
]
# This iterates over every model that Django has registered
for model in django.apps.apps.get_models():
# This checks the app_label of the model, and thus we can skip it if it is not in one of our updatable_apps.
# Thus, we'll skip any django admin apps, like auth, corsheaders, etc.
if model._meta.app_label not in updatable_apps:
continue
if model.__name__[:10] == "Historical":
continue
model_fields = [f.name for f in model._meta.get_fields()]
# This supports multi-case DAIMS
# We must filter on the model level rather than add them to the when clauses, because if there is a FK in the
# when clause Django is not guaranteed to join on that table properly.
#
# This is an array of tuples of the following format
# (Q object of filter, field_names -> case objects map for this filter)
#
# It is initialized with a blank filter and empty list, which is where default updates are stored
model_filtered_update_case_map = [(Q(), {})]
desc_fields = [field for field in model_fields if field.split('_')[-1] ==
"description"[:len(field.split('_')[-1])]]
non_desc_fields = [field for field in model_fields if field not in desc_fields]
desc_fields_mapping = {}
for desc_field in desc_fields:
actual_field_short = "_".join(desc_field.split('_')[:-1])
actual_field = None
for field in non_desc_fields:
if actual_field_short == field:
actual_field = field
elif actual_field_short == field[:len(actual_field_short)]:
actual_field = field
desc_fields_mapping[desc_field] = actual_field
# Loop through each of the models fields to construct a case for each applicable field
for field in model_fields:
# We're looking for field names ending in _description
split_name = field.split("_")
# If the last element in our split name isn't description, skip it
if len(split_name) == 1 or split_name[-1] != "description"[:len(split_name[-1])]:
continue
source_field = "_".join(split_name[:-1])
destination_field = field
# This is the map name, prefixed by model name for when there are non-unique description fields
source_field = desc_fields_mapping[field] if field in desc_fields_mapping else source_field
model_map_name = "{}.{}_map".format(model.__name__, source_field)
map_name = "{}_map".format(source_field)
# This stores a direct reference to the enumeration mapping
code_map = None
# Validate we have the source field
if source_field not in model_fields:
logger.debug("Tried to update '{}' on model '{}', but source field '{}' does not exist.".
format(destination_field, model.__name__, source_field))
continue
# Validate we have a map
# Prefer model_map_name over map_name
if model_map_name in daims_maps.keys():
code_map = daims_maps[model_map_name]
elif map_name in daims_maps.keys():
code_map = daims_maps[map_name]
else:
logger.warn("Tried to update '{}' on model '{}', but neither map '{}' nor '{}' exists.".
format(destination_field, model.__name__, model_map_name, map_name))
continue
# Cases start from 1
case_number = 1
case_name = "case_1"
case_map = "case_1_map"
while case_name in code_map.keys():
case_object = create_case(code_map[case_map], source_field)
# Construct a Q filter for this case
case_filter = Q(**code_map[case_name])
# See if we already have a tuple for this filter
case_tuple = [x for x in model_filtered_update_case_map if x[0] == case_filter]
if len(case_tuple) == 0:
# We don't, so create the tuple
temp_case_dict = {}
temp_case_dict[field] = case_object
model_filtered_update_case_map.append((case_filter, temp_case_dict))
else:
# We do, so just add our case object to that dictionary
case_tuple[0][1][field] = case_object
# Check for the next case
case_number += 1
case_name = "case_{}".format(case_number)
case_map = "case_{}_map".format(case_number)
# If our case number is still 1, then we didn't have any cases. Therefore, we perform the default
if case_number == 1:
case_object = create_case(code_map, source_field)
# Grab the first tuple, which has no filters
case_tuple = model_filtered_update_case_map[0]
# Add it to our dictionary
case_tuple[1][field] = case_object
for filter_tuple in model_filtered_update_case_map:
# For each filter tuple, check if the dictionary has any entries
if len(filter_tuple[1].keys()) > 0:
print("Updating model {}\n FILTERS:\n {}\n FIELDS:\n {}".
format(model.__name__, str(filter_tuple[0]), "\n ".join(filter_tuple[1].keys())))
try:
model.objects.filter(filter_tuple[0]).update(**filter_tuple[1])
except django.db.utils.ProgrammingError as e:
logger.warn(str(e))
logger.warn("(OK if invoked from a migration, when the table may not yet have been created)")
# Utility method for update_model_description_fields, creates the Case object
def create_case(code_map, source_field):
when_list = []
default = None
for code in code_map.keys():
when_args = {}
when_args[source_field] = code
when_args["then"] = Value(code_map[code])
# If our code is blank, change the comparison to ""
if code == "_BLANK":
when_args[source_field] = Value("")
# We handle the default case later
if code == "_DEFAULT":
default = Value(code_map[code])
continue
# Append a new when to our when-list
when_list.append(When(**when_args))
return Case(*when_list, default=default)
| [((14, 0, 14, 48), 'warnings.simplefilter', 'warnings.simplefilter', ({(14, 22, 14, 30): '"""ignore"""', (14, 32, 14, 47): 'CacheKeyWarning'}, {}), "('ignore', CacheKeyWarning)", False, 'import warnings\n'), ((91, 20, 91, 61), 'usaspending_api.references.helpers.canonicalize_location_dict', 'canonicalize_location_dict', ({(91, 47, 91, 60): 'location_dict'}, {}), '(location_dict)', False, 'from usaspending_api.references.helpers import canonicalize_location_dict\n'), ((168, 13, 168, 41), 'logging.getLogger', 'logging.getLogger', ({(168, 31, 168, 40): '"""console"""'}, {}), "('console')", False, 'import logging\n'), ((321, 11, 321, 44), 'django.db.models.Case', 'Case', (), '', False, 'from django.db.models import Q, Case, Value, When\n'), ((100, 19, 100, 59), 'usaspending_api.references.models.Location.objects.create', 'Location.objects.create', ({}, {}), '(**location_dict)', False, 'from usaspending_api.references.models import Agency, Location, RefCountryCode\n'), ((307, 28, 307, 49), 'django.db.models.Value', 'Value', ({(307, 34, 307, 48): 'code_map[code]'}, {}), '(code_map[code])', False, 'from django.db.models import Q, Case, Value, When\n'), ((34, 11, 34, 46), 'datetime.datetime.strptime', 'datetime.strptime', ({(34, 29, 34, 33): 'date', (34, 35, 34, 45): '"""%m/%d/%Y"""'}, {}), "(date, '%m/%d/%Y')", False, 'from datetime import datetime\n'), ((98, 15, 98, 55), 'usaspending_api.references.models.Location.objects.filter', 'Location.objects.filter', ({}, {}), '(**location_dict)', False, 'from usaspending_api.references.models import Agency, Location, RefCountryCode\n'), ((311, 38, 311, 47), 'django.db.models.Value', 'Value', ({(311, 44, 311, 46): '""""""'}, {}), "('')", False, 'from django.db.models import Q, Case, Value, When\n'), ((315, 22, 315, 43), 'django.db.models.Value', 'Value', ({(315, 28, 315, 42): 'code_map[code]'}, {}), '(code_map[code])', False, 'from django.db.models import Q, Case, Value, When\n'), ((319, 25, 319, 42), 'django.db.models.When', 'When', ({}, {}), '(**when_args)', False, 'from django.db.models import Q, Case, Value, When\n'), ((200, 43, 200, 46), 'django.db.models.Q', 'Q', ({}, {}), '()', False, 'from django.db.models import Q, Case, Value, When\n'), ((243, 33, 243, 50), 'usaspending_api.data.daims_maps.daims_maps.keys', 'daims_maps.keys', ({}, {}), '()', False, 'from usaspending_api.data.daims_maps import daims_maps\n'), ((259, 30, 259, 54), 'django.db.models.Q', 'Q', ({}, {}), '(**code_map[case_name])', False, 'from django.db.models import Q, Case, Value, When\n'), ((41, 15, 41, 35), 'usaspending_api.references.models.Agency.objects.all', 'Agency.objects.all', ({}, {}), '()', False, 'from usaspending_api.references.models import Agency, Location, RefCountryCode\n'), ((55, 49, 55, 73), 'django.db.models.Q', 'Q', (), '', False, 'from django.db.models import Q, Case, Value, When\n'), ((55, 76, 55, 108), 'django.db.models.Q', 'Q', (), '', False, 'from django.db.models import Q, Case, Value, When\n'), ((141, 26, 146, 37), 'usaspending_api.submissions.models.SubmissionAttributes.objects.filter', 'SubmissionAttributes.objects.filter', (), '', False, 'from usaspending_api.submissions.models import SubmissionAttributes\n'), ((245, 29, 245, 46), 'usaspending_api.data.daims_maps.daims_maps.keys', 'daims_maps.keys', ({}, {}), '()', False, 'from usaspending_api.data.daims_maps import daims_maps\n')] |
DevAerial/flask-api-template | {{cookiecutter.project_name}}/{{cookiecutter.app_name}}/extensions.py | 6d3f745f2dacb793c4bdc6aaaceb86eb472efe55 | from flask_marshmallow import Marshmallow{% if cookiecutter.use_celery == 'yes'%}
from celery import Celery
celery = Celery(){% endif %}
ma = Marshmallow()
| [] |
young-astronomer/vlpy | code/mapplot.py | 7fd434d307a7cc3593f84a7c6c2f4a4a86865afe | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 21 11:11:56 2020
This program is use to plot polarization map from vlbi fits image.
You should specify the input fits images by -i or --infile,
output file by -o or --output,
contour levs by -l or --levs
contour base by -c or --cmul
polarization parameters by -p or --pol: "icut pcut inc scale"
plot window by -w or --win
restore beam position by -b or --bpos
figsize by -f or --figsize
Installation:
1. copy file
chmod a+x mapplot.py
cp mapplot.py ~/myapp
2. set envioment parameters
Add the following line to ~/.bashrc
export PATH=$PATH:/home/usename/myapp
source ~/.bashrc
Running like this:
mapplot.py -w <win> -f <figsize> -n <normalize> <infile> <cmul>
mapplot.py i <input file list> -o <out.pdf> -c <cmul> -w <win> -p <pol>
Examples:
1. mapplot.py -i cta102.fits -o cta102-color.pdf -c 1.8e-3 -w '18 -8 -20 6' -f '7 6' -n 'power 0.5'
2. mapplot.py -w '18 -8 -20 6' -f '4.0 6' -n 'power 0.5' cta102.fits 1.8e-3
https://matplotlib.org/3.1.1/tutorials/colors/colormaps.html
https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.colors.Normalize.html#matplotlib.colors.Normalize
@author: Li, Xiaofeng
Shanghai Astronomical Observatory, Chinese Academy of Sciences
E-mail: [email protected]; [email protected]
"""
import sys
import getopt
from astropy.io import fits
from astropy.table import Table
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import matplotlib.colors as mcolors
def add_beam(ax, win, h, bpos=None, pad=2.0):
if bpos==None :
x = win[0] - pad * h['bmaj']*3.6E6
y = win[2] + pad * h['bmaj']*3.6E6
bpos = (x, y)
bmaj = h['bmaj'] * 3.6E6
bmin = h['bmin'] * 3.6E6
bpa = 90 - h['bpa']
e = Ellipse(bpos, bmaj, bmin, angle=bpa, ec='k', facecolor='gray')
ax.add_artist(e)
def annotate(ax, notefile=''):
if notefile != '':
tab = Table.read(notefile, format='csv')
for t in tab:
ax.text(t['x'], t['y'], t['text'])
# ax.annotate('%s' % h['object'], xy=(0.125,0.91), xycoords='figure fraction')
# ax.annotate('%.1f GHz' % (h['crval3']/1.0E9), xy=(0.83, 0.91), xycoords='figure fraction')
def cut_cmap(cmap, N_cut=0):
# cmap = mcolors.Colormap(cmap)
cmap = plt.get_cmap(cmap)
x = np.arange(N_cut, 256) / 256.0
color_index = cmap(x)
cmap = mcolors.ListedColormap(color_index)
return cmap
def get_normalize(args, vmin=0.0, vmax=1.0):
if args == '':
norm = mcolors.Normalize(vmin, vmax)
args = args.split(' ')
name = args[0]
if name == 'linear':
if len(args)==3:
vmin, vmax = np.array(args[1:], dtype='f4')
norm = mcolors.Normalize(vmin, vmax, True)
elif name == 'power':
if len(args)==1:
gamma = 0.5
if len(args)==2:
gamma = float(args[1])
elif len(args)==4:
gamma, vmin, vmax = np.array(args[1:], dtype='f4')
if gamma < 1.0 and vmin < 0.0:
vmin = 0.0
norm = mcolors.PowerNorm(gamma, vmin, vmax, True)
elif name == 'log':
if len(args)==3:
vmin, vmax = np.array(args[1:], dtype='f4')
norm = mcolors.LogNorm(vmin, vmax)
elif name == 'symlog':
if len(args)==2:
linthresh = float(args[1])
linscale = 1.0
elif len(args)==3:
linthresh, linscale = np.array(args[1:], dtype='f4')
elif len(args)==5:
linthresh, linscale, vmin, vmax = np.array(args[1:], dtype='f4')
norm = mcolors.SymLogNorm(linthresh, linscale, vmin, vmax)
elif name == 'twoslope':
if len(args)==2:
vcenter = float(args[1])
elif len(args)==4:
vcenter, vmin, vmax = np.array(args[1:], dtype='f4')
norm = mcolors.TwoSlopeNorm(vcenter, vmin, vmax)
return norm
def add_annotation(ax, infile=''):
if infile == '':
return
with open(infile, 'r') as f:
for line in f.readlines():
row = line.split(',')
row = [col.strip() for col in row]
typ = row[0]
args = row[1:]
if typ == 'text':
x, y, text = args
x, y = float(x), float(y)
ax.text(x, y, text)
elif typ == 'arrow':
x1, y1, x2, y2 = np.array(args, dtype='f4')
ax.annotate("", xy=(x1, y1), xytext=(x2, y2),
arrowprops=dict(arrowstyle="->", connectionstyle="arc3"))
elif typ == 'annotation':
x1, y1, x2, y2 = np.array(args[:-1], dtype='f4')
text = args[-1]
ax.annotate(text, xy=(x1, y1), xytext=(x2, y2),
arrowprops=dict(arrowstyle="->", connectionstyle="arc3"))
elif typ == 'ellipse':
x, y, majax, minax, pa = np.array(args, dtype='f4')
e = Ellipse((x,y), majax, minax, angle=pa, lw=0.5, fc='none', ec='k', ls='-')
ax.add_artist(e)
def set_axis(ax, w):
ax.set_aspect('equal')
ax.set_xlabel('Relative R.A. (mas)')
ax.set_ylabel('Relative Dec. (mas)')
ax.set_xlim(w[0],w[1])
ax.set_ylim(w[2],w[3])
ax.tick_params(which='both', direction='in', length=6, right=True, top=True)
ax.tick_params(which='minor',length=4)
ax.minorticks_on()
def word2pix(w, h):
if w == None:
W = [0, h['naxis1'], 0, h['naxis2']]
else:
x0, x1, y0, y1 = w
X0 = h['crpix1'] + x0/(h['cdelt1']*3.6E6)
Y0 = h['crpix2'] + y0/(h['cdelt2']*3.6E6)
X1 = h['crpix1'] + x1/(h['cdelt1']*3.6E6)
Y1 = h['crpix2'] + y1/(h['cdelt2']*3.6E6)
W = [int(X0), int(X1), int(Y0), int(Y1)]
return W
def pix2word(W, h):
if W == None:
W = [0, h['naxis1'], 0, h['naxis2']]
X0, X1, Y0, Y1 = W
x0 = h['cdelt1']*3.6E6 * (X0-h['crpix1'])
y0 = h['cdelt2']*3.6E6 * (Y0-h['crpix2'])
x1 = h['cdelt1']*3.6E6 * (X1-h['crpix1'])
y1 = h['cdelt2']*3.6E6 * (Y1-h['crpix2'])
w = [x0, x1, y0, y1]
return w
def savefig(outfile, dpi=100):
if outfile.lower().endswith('.pdf') :
plt.savefig(outfile)
elif outfile.lower().endswith('.jpg') or outfile.lower().endswith('.jpeg'):
plt.savefig(outfile, dpi=dpi)
elif outfile.lower().endswith('.png'):
plt.savefig(outfile, dpi=dpi)
def mapplot(infile, cmul, outfile='', win=None, levs=None, bpos=None,
figsize=None, dpi=100, annotationfile='', cmap='', N_cut=0,
norm='', fraction=0.05):
hdul = fits.open(infile)
h = hdul[0].header
# img = hdul[0].data[0, 0, :, :]
if levs==None:
levs = cmul*np.array([-1,1,2,4,8,16,32,64,128,256,512,1024,2048,4096])
# print(win)
if figsize == None :
figsize = (6, 6)
if win == None:
win = pix2word(None, h)
W = word2pix(None, h)
else:
W = word2pix(win, h)
img = hdul[0].data[0, 0, W[2]:W[3], W[0]:W[1]]
if cmap == '':
cmap = 'rainbow'
cmap = cut_cmap(cmap, N_cut)
vmin, vmax = np.min(img), np.max(img)
if norm == '':
norm = 'linear %.3f %.3f' % (vmin, vmax)
norm = get_normalize(norm, vmin, vmax)
fig, ax = plt.subplots()
fig.set_size_inches(figsize)
set_axis(ax, win)
add_beam(ax, win, h, bpos=bpos)
add_annotation(ax, annotationfile)
ax.contour(img, levs, extent=win,
linewidths=0.5, colors='k')
pcm = ax.imshow(img, extent=win, origin='lower',
interpolation='none', cmap=cmap, norm=norm)
cbar = fig.colorbar(pcm, ax=ax, fraction=fraction)
# cbar.ax.minorticks_off()
cbar.ax.tick_params('both',direction='in',right=True,top=True,which='both')
cbar.ax.tick_params(axis='y', labelrotation=90)
fig.tight_layout(pad=0.5)
if outfile != '':
savefig(outfile, dpi)
hdul.close()
def myhelp():
print('Help: mapplot.py -w "18 -8 -20 6" -f "7 6" -n "power 0.5" <cta102.fits> <1.8e-3>')
print(' or: mapplot.py -i cta102.fits -o cta102.png -w "18 -8 -20 6" -f "7 6" -n "power 0.5"')
def main(argv):
# infile = r'3c66a-calib/circe-beam.fits'
infile = ''
outfile = ''
annotationfile = ''
cmul = ''
win = None
levs = None
bpos = None
figsize = None
dpi = 100
colormap = ''
N_cut = 0
norm = ''
fraction = 0.05
try:
opts, args = getopt.getopt(argv, "hi:c:o:w:l:b:f:d:a:n:N:",
['help', 'infile=', 'cmul=', 'outfile=', 'win=',
'bpos=', 'figsize=', 'dpi=', 'annotatefile=', 'levs=', 'colormap=',
'N_cut=', 'norm=', 'fraction='])
except getopt.GetoptError:
myhelp()
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
myhelp()
elif opt in ('-i', '--infile'):
infile = arg
elif opt in ('-c', '--cmul'):
cmul = arg
elif opt in ('-o', '--outfile'):
outfile = arg
elif opt in ('-w', '--win'):
win = arg
elif opt in ('-l', '--levs'):
levs = np.array(arg.split(), dtype=np.float64).tolist()
elif opt in ('-b', '--bpos'):
bpos = np.array(arg.split(), dtype=np.float64).tolist()
elif opt in ('-f', '--figsize'):
figsize = np.array(arg.split(), dtype=np.float64).tolist()
elif opt in ('-d', '--dpi'):
dpi = int(arg)
elif opt in ('-a', '--annotatefile'):
annotationfile = arg
elif opt in ('--colormap', ):
colormap = arg
elif opt in ('-N', '--N_cut'):
N_cut = int(arg)
elif opt in ('-n', '--norm'):
norm = arg
elif opt in ('--fraction',):
fraction = float(arg)
if infile=='' and len(args)==2:
infile, cmul = args
if infile=='' and len(args)==3:
infile, outfile, cmul = args
if infile=='' and len(args)==4:
infile, outfile, cmul, win = args
if outfile == '':
outfile = infile.split('.')[0] + '.pdf'
cmul = float(cmul)
if type(win) == str:
win = np.array(win.split(), dtype=np.float64).tolist()
mapplot(infile, cmul, outfile=outfile, win=win, levs=levs, bpos=bpos,
figsize=figsize, dpi=dpi, annotationfile=annotationfile,
cmap=colormap, N_cut=N_cut, norm=norm, fraction=fraction)
if __name__ == '__main__' :
main(sys.argv[1:]) | [((60, 5, 60, 67), 'matplotlib.patches.Ellipse', 'Ellipse', (), '', False, 'from matplotlib.patches import Ellipse\n'), ((73, 8, 73, 26), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', ({(73, 21, 73, 25): 'cmap'}, {}), '(cmap)', True, 'import matplotlib.pyplot as plt\n'), ((76, 8, 76, 43), 'matplotlib.colors.ListedColormap', 'mcolors.ListedColormap', ({(76, 31, 76, 42): 'color_index'}, {}), '(color_index)', True, 'import matplotlib.colors as mcolors\n'), ((191, 8, 191, 25), 'astropy.io.fits.open', 'fits.open', ({(191, 18, 191, 24): 'infile'}, {}), '(infile)', False, 'from astropy.io import fits\n'), ((213, 11, 213, 25), 'matplotlib.pyplot.subplots', 'plt.subplots', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((65, 8, 65, 42), 'astropy.table.Table.read', 'Table.read', (), '', False, 'from astropy.table import Table\n'), ((74, 5, 74, 26), 'numpy.arange', 'np.arange', ({(74, 15, 74, 20): 'N_cut', (74, 22, 74, 25): '(256)'}, {}), '(N_cut, 256)', True, 'import numpy as np\n'), ((81, 9, 81, 38), 'matplotlib.colors.Normalize', 'mcolors.Normalize', ({(81, 27, 81, 31): 'vmin', (81, 33, 81, 37): 'vmax'}, {}), '(vmin, vmax)', True, 'import matplotlib.colors as mcolors\n'), ((87, 9, 87, 44), 'matplotlib.colors.Normalize', 'mcolors.Normalize', ({(87, 27, 87, 31): 'vmin', (87, 33, 87, 37): 'vmax', (87, 39, 87, 43): 'True'}, {}), '(vmin, vmax, True)', True, 'import matplotlib.colors as mcolors\n'), ((182, 2, 182, 22), 'matplotlib.pyplot.savefig', 'plt.savefig', ({(182, 14, 182, 21): 'outfile'}, {}), '(outfile)', True, 'import matplotlib.pyplot as plt\n'), ((209, 14, 209, 25), 'numpy.min', 'np.min', ({(209, 21, 209, 24): 'img'}, {}), '(img)', True, 'import numpy as np\n'), ((209, 27, 209, 38), 'numpy.max', 'np.max', ({(209, 34, 209, 37): 'img'}, {}), '(img)', True, 'import numpy as np\n'), ((253, 15, 256, 35), 'getopt.getopt', 'getopt.getopt', ({(253, 29, 253, 33): 'argv', (253, 35, 253, 60): '"""hi:c:o:w:l:b:f:d:a:n:N:"""', (254, 8, 256, 34): "['help', 'infile=', 'cmul=', 'outfile=', 'win=', 'bpos=', 'figsize=',\n 'dpi=', 'annotatefile=', 'levs=', 'colormap=', 'N_cut=', 'norm=',\n 'fraction=']"}, {}), "(argv, 'hi:c:o:w:l:b:f:d:a:n:N:', ['help', 'infile=', 'cmul=',\n 'outfile=', 'win=', 'bpos=', 'figsize=', 'dpi=', 'annotatefile=',\n 'levs=', 'colormap=', 'N_cut=', 'norm=', 'fraction='])", False, 'import getopt\n'), ((86, 16, 86, 46), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((97, 9, 97, 51), 'matplotlib.colors.PowerNorm', 'mcolors.PowerNorm', ({(97, 27, 97, 32): 'gamma', (97, 34, 97, 38): 'vmin', (97, 40, 97, 44): 'vmax', (97, 46, 97, 50): 'True'}, {}), '(gamma, vmin, vmax, True)', True, 'import matplotlib.colors as mcolors\n'), ((184, 2, 184, 31), 'matplotlib.pyplot.savefig', 'plt.savefig', (), '', True, 'import matplotlib.pyplot as plt\n'), ((196, 14, 196, 72), 'numpy.array', 'np.array', ({(196, 23, 196, 71): '[-1, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096]'}, {}), '([-1, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096])', True, 'import numpy as np\n'), ((259, 2, 259, 13), 'sys.exit', 'sys.exit', ({(259, 11, 259, 12): '(2)'}, {}), '(2)', False, 'import sys\n'), ((101, 9, 101, 36), 'matplotlib.colors.LogNorm', 'mcolors.LogNorm', ({(101, 25, 101, 29): 'vmin', (101, 31, 101, 35): 'vmax'}, {}), '(vmin, vmax)', True, 'import matplotlib.colors as mcolors\n'), ((186, 2, 186, 31), 'matplotlib.pyplot.savefig', 'plt.savefig', (), '', True, 'import matplotlib.pyplot as plt\n'), ((94, 23, 94, 53), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((100, 16, 100, 46), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((110, 9, 110, 60), 'matplotlib.colors.SymLogNorm', 'mcolors.SymLogNorm', ({(110, 28, 110, 37): 'linthresh', (110, 39, 110, 47): 'linscale', (110, 49, 110, 53): 'vmin', (110, 55, 110, 59): 'vmax'}, {}), '(linthresh, linscale, vmin, vmax)', True, 'import matplotlib.colors as mcolors\n'), ((134, 21, 134, 47), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((116, 9, 116, 50), 'matplotlib.colors.TwoSlopeNorm', 'mcolors.TwoSlopeNorm', ({(116, 30, 116, 37): 'vcenter', (116, 39, 116, 43): 'vmin', (116, 45, 116, 49): 'vmax'}, {}), '(vcenter, vmin, vmax)', True, 'import matplotlib.colors as mcolors\n'), ((138, 21, 138, 52), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((107, 25, 107, 55), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((143, 29, 143, 55), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((144, 8, 144, 81), 'matplotlib.patches.Ellipse', 'Ellipse', (), '', False, 'from matplotlib.patches import Ellipse\n'), ((109, 37, 109, 67), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((115, 25, 115, 55), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n')] |
pizhi/umbrella | umbrella/api/v1/router.py | 95027e6e11a6c8df2ab5f7c202b0c1d2183f839a | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from umbrella.api.v1 import api
from umbrella.common import wsgi
class API(wsgi.Router):
"""WSGI router for Glance v1 API requests."""
def __init__(self, mapper):
api_resource = api.create_resource()
mapper.connect("/",
controller=api_resource,
action="index")
mapper.connect("/images",
controller=api_resource,
action='index',
conditions={'method': ['GET']})
mapper.connect("/images/{id}",
controller=api_resource,
action="show",
conditions=dict(method=["GET"]))
mapper.connect("/net/{instance_uuid}",
controller=api_resource,
action="get_net_sample",
conditions=dict(method=["GET"]))
mapper.connect("/cpu/{instance_uuid}",
controller=api_resource,
action="get_cpu_sample",
conditions=dict(method=["GET"]))
mapper.connect("/disk/{instance_uuid}",
controller=api_resource,
action="get_disk_sample",
conditions=dict(method=["GET"]))
mapper.connect("/mem/{instance_uuid}",
controller=api_resource,
action="get_mem_sample",
conditions=dict(method=["GET"]))
super(API, self).__init__(mapper)
| [((27, 23, 27, 44), 'umbrella.api.v1.api.create_resource', 'api.create_resource', ({}, {}), '()', False, 'from umbrella.api.v1 import api\n')] |
butala/TomograPy | exemples/test_thomson_simu.py | a1da41f1e0b7406a1b770e56428789c54175de20 | #!/usr/bin/env python
import time
import numpy as np
import tomograpy
import lo
# object
obj = tomograpy.centered_cubic_map(10, 64)
obj[:] = tomograpy.phantom.shepp_logan(obj.shape)
# data
radius = 200
a = tomograpy.fov(obj, radius)
data = tomograpy.centered_stack(a, 128, n_images=60, radius=radius, max_lon=np.pi)
# model
kwargs = {"pb":"pb", "obj_rmin":1.5, "data_rmin":1.5}
P, D, obj_mask, data_mask = tomograpy.models.thomson(data, obj, u=.5, **kwargs)
# projection
t = time.time()
data[:] = (P * obj.ravel()).reshape(data.shape)
print("projection time : " + str(time.time() - t))
# data
# backprojection
t = time.time()
x0 = P.T * data.ravel()
bpj = x0.reshape(obj.shape)
print("backprojection time : " + str(time.time() - t))
# inversion using scipy.sparse.linalg
t = time.time()
sol = lo.acg(P, data.ravel(), D, 1e-3 * np.ones(3), maxiter=100, tol=1e-8)
sol = sol.reshape(obj.shape)
print("inversion time : " + str(time.time() - t))
| [((7, 6, 7, 42), 'tomograpy.centered_cubic_map', 'tomograpy.centered_cubic_map', ({(7, 35, 7, 37): '10', (7, 39, 7, 41): '64'}, {}), '(10, 64)', False, 'import tomograpy\n'), ((8, 9, 8, 49), 'tomograpy.phantom.shepp_logan', 'tomograpy.phantom.shepp_logan', ({(8, 39, 8, 48): 'obj.shape'}, {}), '(obj.shape)', False, 'import tomograpy\n'), ((11, 4, 11, 30), 'tomograpy.fov', 'tomograpy.fov', ({(11, 18, 11, 21): 'obj', (11, 23, 11, 29): 'radius'}, {}), '(obj, radius)', False, 'import tomograpy\n'), ((12, 7, 12, 82), 'tomograpy.centered_stack', 'tomograpy.centered_stack', (), '', False, 'import tomograpy\n'), ((15, 28, 15, 79), 'tomograpy.models.thomson', 'tomograpy.models.thomson', (), '', False, 'import tomograpy\n'), ((17, 4, 17, 15), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((22, 4, 22, 15), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((27, 4, 27, 15), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((28, 40, 28, 50), 'numpy.ones', 'np.ones', ({(28, 48, 28, 49): '3'}, {}), '(3)', True, 'import numpy as np\n'), ((19, 33, 19, 44), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((25, 37, 25, 48), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((30, 32, 30, 43), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')] |
ulope/geopy | geopy/geocoders/google.py | 605d0d84137a93949ad03820fa31dc2dab77f089 | import logging
from urllib import urlencode
from urllib2 import urlopen
import simplejson
import xml
from xml.parsers.expat import ExpatError
from geopy.geocoders.base import Geocoder
from geopy import Point, Location, util
class Google(Geocoder):
"""Geocoder using the Google Maps API."""
def __init__(self, api_key=None, domain='maps.google.com',
resource='maps/geo', format_string='%s', output_format='kml'):
"""Initialize a customized Google geocoder with location-specific
address information and your Google Maps API key.
``api_key`` should be a valid Google Maps API key. It is required for
the 'maps/geo' resource to work.
``domain`` should be a the Google Maps domain to connect to. The default
is 'maps.google.com', but if you're geocoding address in the UK (for
example), you may want to set it to 'maps.google.co.uk'.
``resource`` is the HTTP resource to give the query parameter.
'maps/geo' is the HTTP geocoder and is a documented API resource.
'maps' is the actual Google Maps interface and its use for just
geocoding is undocumented. Anything else probably won't work.
``format_string`` is a string containing '%s' where the string to
geocode should be interpolated before querying the geocoder.
For example: '%s, Mountain View, CA'. The default is just '%s'.
``output_format`` can be 'json', 'xml', 'kml', 'csv', or 'js' and will
control the output format of Google's response. The default is 'kml'
since it is supported by both the 'maps' and 'maps/geo' resources. The
'js' format is the most likely to break since it parses Google's
JavaScript, which could change. However, it currently returns the best
results for restricted geocoder areas such as the UK.
"""
self.api_key = api_key
self.domain = domain
self.resource = resource
self.format_string = format_string
self.output_format = output_format
@property
def url(self):
domain = self.domain.strip('/')
resource = self.resource.strip('/')
return "http://%(domain)s/%(resource)s?%%s" % locals()
def geocode(self, string, exactly_one=True, language_code=None,
sensor=False, viewport_center=None, viewport_span=None):
params = {'q': self.format_string % string,
'output': self.output_format.lower(),
'sensor': str(sensor).lower(),
}
if language_code:
params.update({'gl': language_code})
if viewport_center and viewport_span:
params.update({
'll': viewport_center,
'spn': viewport_span,
})
if self.resource.rstrip('/').endswith('geo'):
# An API key is only required for the HTTP geocoder.
params['key'] = self.api_key
url = self.url % urlencode(params)
return self.geocode_url(url, exactly_one)
def reverse(self, coord, exactly_one=True):
(lat,lng) = coord
params = {'q': self.format_string % lat+','+self.format_string % lng,
'output': self.output_format.lower()
}
if self.resource.rstrip('/').endswith('geo'):
# An API key is only required for the HTTP geocoder.
params['key'] = self.api_key
url = self.url % urlencode(params)
return self.geocode_url(url, exactly_one, reverse=True)
def geocode_url(self, url, exactly_one=True, reverse=False):
logging.getLogger().info("Fetching %s..." % url)
page = urlopen(url)
dispatch = getattr(self, 'parse_' + self.output_format)
return dispatch(page, exactly_one, reverse)
def parse_xml(self, page, exactly_one=True, reverse=False):
"""Parse a location name, latitude, and longitude from an XML response.
"""
if not isinstance(page, basestring):
page = util.decode_page(page)
try:
doc = xml.dom.minidom.parseString(page)
except ExpatError:
places = []
else:
places = doc.getElementsByTagName('Placemark')
if (exactly_one and len(places) != 1) and (not reverse):
raise ValueError("Didn't find exactly one placemark! " \
"(Found %d.)" % len(places))
def parse_place(place):
location = util.get_first_text(place, ['address', 'name']) or None
points = place.getElementsByTagName('Point')
point = points and points[0] or None
coords = util.get_first_text(point, 'coordinates') or None
if coords:
longitude, latitude = [float(f) for f in coords.split(',')[:2]]
else:
latitude = longitude = None
_, (latitude, longitude) = self.geocode(location)
return (location, (latitude, longitude))
if exactly_one:
return parse_place(places[0])
else:
return (parse_place(place) for place in places)
def parse_csv(self, page, exactly_one=True, reverse=False):
raise NotImplementedError
def parse_kml(self, page, exactly_one=True, reverse=False):
return self.parse_xml(page, exactly_one, reverse)
def parse_json(self, page, exactly_one=True, reverse=False):
if not isinstance(page, basestring):
page = util.decode_page(page)
json = simplejson.loads(page)
places = json.get('Placemark', [])
if (exactly_one and len(places) != 1) and (not reverse):
raise ValueError("Didn't find exactly one placemark! " \
"(Found %d.)" % len(places))
def parse_place(place):
location = place.get('address')
longitude, latitude = place['Point']['coordinates'][:2]
# Add support for pulling out the canonical name
locality = place.get('AddressDetails',{}).get('Country',{}).get('AdministrativeArea',{}).get('Locality',{}).get('LocalityName')
administrative = place.get('AddressDetails',{}).get('Country',{}).get('AdministrativeArea',{}).get('AdministrativeAreaName')
return util.RichResult((location, (latitude, longitude)), locality=locality, administrative=administrative)
if exactly_one:
return parse_place(places[0])
else:
return (parse_place(place) for place in places)
def parse_js(self, page, exactly_one=True, reverse=False):
"""This parses JavaScript returned by queries the actual Google Maps
interface and could thus break easily. However, this is desirable if
the HTTP geocoder doesn't work for addresses in your country (the
UK, for example).
"""
if not isinstance(page, basestring):
page = util.decode_page(page)
LATITUDE = r"[\s,]lat:\s*(?P<latitude>-?\d+\.\d+)"
LONGITUDE = r"[\s,]lng:\s*(?P<longitude>-?\d+\.\d+)"
LOCATION = r"[\s,]laddr:\s*'(?P<location>.*?)(?<!\\)',"
ADDRESS = r"(?P<address>.*?)(?:(?: \(.*?@)|$)"
MARKER = '.*?'.join([LATITUDE, LONGITUDE, LOCATION])
MARKERS = r"{markers: (?P<markers>\[.*?\]),\s*polylines:"
def parse_marker(marker):
latitude, longitude, location = marker
location = re.match(ADDRESS, location).group('address')
latitude, longitude = float(latitude), float(longitude)
return (location, (latitude, longitude))
match = re.search(MARKERS, page)
markers = match and match.group('markers') or ''
markers = re.findall(MARKER, markers)
if exactly_one:
if len(markers) != 1 and (not reverse):
raise ValueError("Didn't find exactly one marker! " \
"(Found %d.)" % len(markers))
marker = markers[0]
return parse_marker(marker)
else:
return (parse_marker(marker) for marker in markers)
| [] |
hiankun/py_sandbox | interactive_grabcut/repo/drag2draw.py | 6623edd0c8ab17641e1ce09fba7da34c4865fc4f | # source: https://www.youtube.com/watch?v=U0sVp1xLiyo
from tkinter import *
def paint(event):
color = 'red'
x1, y1 = (event.x-1), (event.y-1)
x2, y2 = (event.x+1), (event.y+1)
c.create_oval(x1,y1,x2,y2,fill=color,outline=color)
master = Tk()
c = Canvas(master, width=600, height=400, bg='white')
c.pack(expand=True, fill=BOTH)
c.bind('<B1-Motion>', paint)
master.mainloop()
| [] |
lin483/Funny-Nations | migrations/20220114_03_Heqaz-insert-default-serverinfo.py | 2bb1cd23a3d5f1e4a4854c73ac27f62c98127ef6 | """
insert default serverInfo
"""
from yoyo import step
__depends__ = {'20220114_02_lHBKM-new-table-serverinfo'}
steps = [
step("INSERT INTO `serverInfo` (`onlineMinute`) VALUES (0);")
]
| [((10, 4, 10, 65), 'yoyo.step', 'step', ({(10, 9, 10, 64): '"""INSERT INTO `serverInfo` (`onlineMinute`) VALUES (0);"""'}, {}), "('INSERT INTO `serverInfo` (`onlineMinute`) VALUES (0);')", False, 'from yoyo import step\n')] |
slawqo/python-neutronclient | neutronclient/osc/v2/vpnaas/ipsec_site_connection.py | ee08644c5f2424a40c70010dcf0fa2ad84809bfc | # Copyright 2017 FUJITSU LIMITED
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from osc_lib.cli import format_columns
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from osc_lib.utils import columns as column_util
from oslo_log import log as logging
from neutronclient._i18n import _
from neutronclient.common import utils as nc_utils
from neutronclient.osc import utils as osc_utils
from neutronclient.osc.v2.vpnaas import utils as vpn_utils
LOG = logging.getLogger(__name__)
_formatters = {
'peer_cidrs': format_columns.ListColumn
}
_attr_map = (
('id', 'ID', column_util.LIST_BOTH),
('name', 'Name', column_util.LIST_BOTH),
('peer_address', 'Peer Address', column_util.LIST_BOTH),
('auth_mode', 'Authentication Algorithm', column_util.LIST_BOTH),
('status', 'Status', column_util.LIST_BOTH),
('tenant_id', 'Project', column_util.LIST_LONG_ONLY),
('peer_cidrs', 'Peer CIDRs', column_util.LIST_LONG_ONLY),
('vpnservice_id', 'VPN Service', column_util.LIST_LONG_ONLY),
('ipsecpolicy_id', 'IPSec Policy', column_util.LIST_LONG_ONLY),
('ikepolicy_id', 'IKE Policy', column_util.LIST_LONG_ONLY),
('mtu', 'MTU', column_util.LIST_LONG_ONLY),
('initiator', 'Initiator', column_util.LIST_LONG_ONLY),
('admin_state_up', 'State', column_util.LIST_LONG_ONLY),
('description', 'Description', column_util.LIST_LONG_ONLY),
('psk', 'Pre-shared Key', column_util.LIST_LONG_ONLY),
('route_mode', 'Route Mode', column_util.LIST_LONG_ONLY),
('local_id', 'Local ID', column_util.LIST_LONG_ONLY),
('peer_id', 'Peer ID', column_util.LIST_LONG_ONLY),
('local_ep_group_id', 'Local Endpoint Group ID',
column_util.LIST_LONG_ONLY),
('peer_ep_group_id', 'Peer Endpoint Group ID', column_util.LIST_LONG_ONLY),
)
def _convert_to_lowercase(string):
return string.lower()
def _get_common_parser(parser, is_create=True):
parser.add_argument(
'--description',
metavar='<description>',
help=_('Description for the connection'))
parser.add_argument(
'--dpd',
metavar="action=ACTION,interval=INTERVAL,timeout=TIMEOUT",
type=nc_utils.str2dict_type(
optional_keys=['action', 'interval', 'timeout']),
help=vpn_utils.dpd_help("IPsec connection"))
parser.add_argument(
'--mtu',
help=_('MTU size for the connection'))
parser.add_argument(
'--initiator',
choices=['bi-directional', 'response-only'],
type=_convert_to_lowercase,
help=_('Initiator state'))
peer_group = parser.add_mutually_exclusive_group()
peer_group.add_argument(
'--peer-cidr',
dest='peer_cidrs',
help=_('Remote subnet(s) in CIDR format. '
'Cannot be specified when using endpoint groups. Only '
'applicable, if subnet provided for VPN service.')
)
peer_group.add_argument(
'--local-endpoint-group',
help=_('Local endpoint group (name or ID) with subnet(s) '
'for IPsec connection')
)
parser.add_argument(
'--peer-endpoint-group',
help=_('Peer endpoint group (name or ID) with CIDR(s) for '
'IPSec connection'))
admin_group = parser.add_mutually_exclusive_group()
admin_group.add_argument(
'--enable',
action='store_true',
help=_("Enable IPSec site connection")
)
admin_group.add_argument(
'--disable',
action='store_true',
help=_("Disable IPSec site connection")
)
parser.add_argument(
'--local-id',
help=_('An ID to be used instead of the external IP '
'address for a virtual router'))
return parser
def _get_common_attrs(client_manager, parsed_args, is_create=True):
attrs = {}
if is_create:
if 'project' in parsed_args and parsed_args.project is not None:
attrs['tenant_id'] = osc_utils.find_project(
client_manager.identity,
parsed_args.project,
parsed_args.project_domain,
).id
if parsed_args.description:
attrs['description'] = str(parsed_args.description)
if parsed_args.mtu:
attrs['mtu'] = parsed_args.mtu
if parsed_args.enable:
attrs['admin_state_up'] = True
if parsed_args.disable:
attrs['admin_state_up'] = False
if parsed_args.initiator:
attrs['initiator'] = parsed_args.initiator
if parsed_args.dpd:
vpn_utils.validate_dpd_dict(parsed_args.dpd)
attrs['dpd'] = parsed_args.dpd
if parsed_args.local_endpoint_group:
_local_epg = client_manager.neutronclient.find_resource(
'endpoint_group',
parsed_args.local_endpoint_group,
cmd_resource='endpoint_group')['id']
attrs['local_ep_group_id'] = _local_epg
if parsed_args.peer_endpoint_group:
_peer_epg = client_manager.neutronclient.find_resource(
'endpoint_group',
parsed_args.peer_endpoint_group,
cmd_resource='endpoint_group')['id']
attrs['peer_ep_group_id'] = _peer_epg
if parsed_args.peer_cidrs:
attrs['peer_cidrs'] = parsed_args.peer_cidrs
if parsed_args.local_id:
attrs['local_id'] = parsed_args.local_id
return attrs
class CreateIPsecSiteConnection(command.ShowOne):
_description = _("Create an IPsec site connection")
def get_parser(self, prog_name):
parser = super(CreateIPsecSiteConnection, self).get_parser(prog_name)
_get_common_parser(parser)
parser.add_argument(
'--peer-id',
required=True,
help=_('Peer router identity for authentication. Can be '
'IPv4/IPv6 address, e-mail address, key id, or FQDN'))
parser.add_argument(
'--peer-address',
required=True,
help=_('Peer gateway public IPv4/IPv6 address or FQDN'))
parser.add_argument(
'--psk',
required=True,
help=_('Pre-shared key string.'))
parser.add_argument(
'--vpnservice',
metavar='VPNSERVICE',
required=True,
help=_('VPN service instance associated with this '
'connection (name or ID)'))
parser.add_argument(
'--ikepolicy',
metavar='IKEPOLICY',
required=True,
help=_('IKE policy associated with this connection (name or ID)'))
parser.add_argument(
'--ipsecpolicy',
metavar='IPSECPOLICY',
required=True,
help=_('IPsec policy associated with this connection '
'(name or ID)'))
parser.add_argument(
'name',
metavar='<name>',
help=_('Set friendly name for the connection'))
osc_utils.add_project_owner_option_to_parser(parser)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
attrs = _get_common_attrs(self.app.client_manager, parsed_args)
if parsed_args.vpnservice:
_vpnservice_id = client.find_resource(
'vpnservice',
parsed_args.vpnservice,
cmd_resource='vpnservice')['id']
attrs['vpnservice_id'] = _vpnservice_id
if parsed_args.ikepolicy:
_ikepolicy_id = client.find_resource(
'ikepolicy',
parsed_args.ikepolicy,
cmd_resource='ikepolicy')['id']
attrs['ikepolicy_id'] = _ikepolicy_id
if parsed_args.ipsecpolicy:
_ipsecpolicy_id = client.find_resource(
'ipsecpolicy',
parsed_args.ipsecpolicy,
cmd_resource='ipsecpolicy')['id']
attrs['ipsecpolicy_id'] = _ipsecpolicy_id
if parsed_args.peer_id:
attrs['peer_id'] = parsed_args.peer_id
if parsed_args.peer_address:
attrs['peer_address'] = parsed_args.peer_address
if parsed_args.psk:
attrs['psk'] = parsed_args.psk
if parsed_args.name:
attrs['name'] = parsed_args.name
if (bool(parsed_args.local_endpoint_group) !=
bool(parsed_args.peer_endpoint_group)):
message = _("You must specify both local and peer endpoint "
"groups")
raise exceptions.CommandError(message)
if not parsed_args.peer_cidrs and not parsed_args.local_endpoint_group:
message = _("You must specify endpoint groups or peer CIDR(s)")
raise exceptions.CommandError(message)
obj = client.create_ipsec_site_connection(
{'ipsec_site_connection': attrs})['ipsec_site_connection']
columns, display_columns = column_util.get_columns(obj, _attr_map)
data = utils.get_dict_properties(obj, columns, formatters=_formatters)
return display_columns, data
class DeleteIPsecSiteConnection(command.Command):
_description = _("Delete IPsec site connection(s)")
def get_parser(self, prog_name):
parser = super(DeleteIPsecSiteConnection, self).get_parser(prog_name)
parser.add_argument(
'ipsec_site_connection',
metavar='<ipsec-site-connection>',
nargs='+',
help=_('IPsec site connection to delete (name or ID)'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
result = 0
for ipsec_conn in parsed_args.ipsec_site_connection:
try:
ipsec_con_id = client.find_resource(
'ipsec_site_connection',
ipsec_conn,
cmd_resource='ipsec_site_connection')['id']
client.delete_ipsec_site_connection(ipsec_con_id)
except Exception as e:
result += 1
LOG.error(_("Failed to delete IPsec site connection with "
"name or ID '%(ipsec_site_conn)s': %(e)s"),
{'ipsec_site_conn': ipsec_conn, 'e': e})
if result > 0:
total = len(parsed_args.ipsec_site_connection)
msg = (_("%(result)s of %(total)s IPsec site connection failed "
"to delete.") % {'result': result, 'total': total})
raise exceptions.CommandError(msg)
class ListIPsecSiteConnection(command.Lister):
_description = _("List IPsec site connections "
"that belong to a given project")
def get_parser(self, prog_name):
parser = super(ListIPsecSiteConnection, self).get_parser(prog_name)
parser.add_argument(
'--long',
action='store_true',
default=False,
help=_("List additional fields in output")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
obj = client.list_ipsec_site_connections()['ipsec_site_connections']
headers, columns = column_util.get_column_definitions(
_attr_map, long_listing=parsed_args.long)
return (headers, (utils.get_dict_properties(
s, columns, formatters=_formatters) for s in obj))
class SetIPsecSiteConnection(command.Command):
_description = _("Set IPsec site connection properties")
def get_parser(self, prog_name):
parser = super(SetIPsecSiteConnection, self).get_parser(prog_name)
_get_common_parser(parser)
parser.add_argument(
'--peer-id',
help=_('Peer router identity for authentication. Can be '
'IPv4/IPv6 address, e-mail address, key id, or FQDN'))
parser.add_argument(
'--peer-address',
help=_('Peer gateway public IPv4/IPv6 address or FQDN'))
parser.add_argument(
'--name',
metavar='<name>',
help=_('Set friendly name for the connection'))
parser.add_argument(
'ipsec_site_connection',
metavar='<ipsec-site-connection>',
help=_('IPsec site connection to set (name or ID)'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
attrs = _get_common_attrs(self.app.client_manager,
parsed_args, is_create=False)
if parsed_args.peer_id:
attrs['peer_id'] = parsed_args.peer_id
if parsed_args.peer_address:
attrs['peer_address'] = parsed_args.peer_address
if parsed_args.name:
attrs['name'] = parsed_args.name
ipsec_conn_id = client.find_resource(
'ipsec_site_connection', parsed_args.ipsec_site_connection,
cmd_resource='ipsec_site_connection')['id']
try:
client.update_ipsec_site_connection(
ipsec_conn_id,
{'ipsec_site_connection': attrs})
except Exception as e:
msg = (_("Failed to set IPsec site "
"connection '%(ipsec_conn)s': %(e)s")
% {'ipsec_conn': parsed_args.ipsec_site_connection, 'e': e})
raise exceptions.CommandError(msg)
class ShowIPsecSiteConnection(command.ShowOne):
_description = _("Show information of a given IPsec site connection")
def get_parser(self, prog_name):
parser = super(ShowIPsecSiteConnection, self).get_parser(prog_name)
parser.add_argument(
'ipsec_site_connection',
metavar='<ipsec-site-connection>',
help=_('IPsec site connection to display (name or ID)'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
ipsec_site_id = client.find_resource(
'ipsec_site_connection', parsed_args.ipsec_site_connection,
cmd_resource='ipsec_site_connection')['id']
obj = client.show_ipsec_site_connection(
ipsec_site_id)['ipsec_site_connection']
columns, display_columns = column_util.get_columns(obj, _attr_map)
data = utils.get_dict_properties(obj, columns, formatters=_formatters)
return (display_columns, data)
| [((30, 6, 30, 33), 'oslo_log.log.getLogger', 'logging.getLogger', ({(30, 24, 30, 32): '__name__'}, {}), '(__name__)', True, 'from oslo_log import log as logging\n'), ((163, 19, 163, 55), 'neutronclient._i18n._', '_', ({(163, 21, 163, 54): '"""Create an IPsec site connection"""'}, {}), "('Create an IPsec site connection')", False, 'from neutronclient._i18n import _\n'), ((250, 19, 250, 55), 'neutronclient._i18n._', '_', ({(250, 21, 250, 54): '"""Delete IPsec site connection(s)"""'}, {}), "('Delete IPsec site connection(s)')", False, 'from neutronclient._i18n import _\n'), ((285, 19, 286, 54), 'neutronclient._i18n._', '_', ({(285, 21, 286, 53): '"""List IPsec site connections that belong to a given project"""'}, {}), "('List IPsec site connections that belong to a given project')", False, 'from neutronclient._i18n import _\n'), ((308, 19, 308, 60), 'neutronclient._i18n._', '_', ({(308, 21, 308, 59): '"""Set IPsec site connection properties"""'}, {}), "('Set IPsec site connection properties')", False, 'from neutronclient._i18n import _\n'), ((355, 19, 355, 73), 'neutronclient._i18n._', '_', ({(355, 21, 355, 72): '"""Show information of a given IPsec site connection"""'}, {}), "('Show information of a given IPsec site connection')", False, 'from neutronclient._i18n import _\n'), ((141, 8, 141, 52), 'neutronclient.osc.v2.vpnaas.utils.validate_dpd_dict', 'vpn_utils.validate_dpd_dict', ({(141, 36, 141, 51): 'parsed_args.dpd'}, {}), '(parsed_args.dpd)', True, 'from neutronclient.osc.v2.vpnaas import utils as vpn_utils\n'), ((202, 8, 202, 60), 'neutronclient.osc.utils.add_project_owner_option_to_parser', 'osc_utils.add_project_owner_option_to_parser', ({(202, 53, 202, 59): 'parser'}, {}), '(parser)', True, 'from neutronclient.osc import utils as osc_utils\n'), ((244, 35, 244, 74), 'osc_lib.utils.columns.get_columns', 'column_util.get_columns', ({(244, 59, 244, 62): 'obj', (244, 64, 244, 73): '_attr_map'}, {}), '(obj, _attr_map)', True, 'from osc_lib.utils import columns as column_util\n'), ((245, 15, 245, 78), 'osc_lib.utils.get_dict_properties', 'utils.get_dict_properties', (), '', False, 'from osc_lib import utils\n'), ((301, 27, 302, 53), 'osc_lib.utils.columns.get_column_definitions', 'column_util.get_column_definitions', (), '', True, 'from osc_lib.utils import columns as column_util\n'), ((372, 35, 372, 74), 'osc_lib.utils.columns.get_columns', 'column_util.get_columns', ({(372, 59, 372, 62): 'obj', (372, 64, 372, 73): '_attr_map'}, {}), '(obj, _attr_map)', True, 'from osc_lib.utils import columns as column_util\n'), ((373, 15, 373, 78), 'osc_lib.utils.get_dict_properties', 'utils.get_dict_properties', (), '', False, 'from osc_lib import utils\n'), ((71, 13, 71, 48), 'neutronclient._i18n._', '_', ({(71, 15, 71, 47): '"""Description for the connection"""'}, {}), "('Description for the connection')", False, 'from neutronclient._i18n import _\n'), ((75, 13, 76, 60), 'neutronclient.common.utils.str2dict_type', 'nc_utils.str2dict_type', (), '', True, 'from neutronclient.common import utils as nc_utils\n'), ((77, 13, 77, 51), 'neutronclient.osc.v2.vpnaas.utils.dpd_help', 'vpn_utils.dpd_help', ({(77, 32, 77, 50): '"""IPsec connection"""'}, {}), "('IPsec connection')", True, 'from neutronclient.osc.v2.vpnaas import utils as vpn_utils\n'), ((80, 13, 80, 45), 'neutronclient._i18n._', '_', ({(80, 15, 80, 44): '"""MTU size for the connection"""'}, {}), "('MTU size for the connection')", False, 'from neutronclient._i18n import _\n'), ((85, 13, 85, 33), 'neutronclient._i18n._', '_', ({(85, 15, 85, 32): '"""Initiator state"""'}, {}), "('Initiator state')", False, 'from neutronclient._i18n import _\n'), ((90, 13, 92, 65), 'neutronclient._i18n._', '_', ({(90, 15, 92, 64): '"""Remote subnet(s) in CIDR format. Cannot be specified when using endpoint groups. Only applicable, if subnet provided for VPN service."""'}, {}), "('Remote subnet(s) in CIDR format. Cannot be specified when using endpoint groups. Only applicable, if subnet provided for VPN service.'\n )", False, 'from neutronclient._i18n import _\n'), ((96, 13, 97, 38), 'neutronclient._i18n._', '_', ({(96, 15, 97, 37): '"""Local endpoint group (name or ID) with subnet(s) for IPsec connection"""'}, {}), "('Local endpoint group (name or ID) with subnet(s) for IPsec connection')", False, 'from neutronclient._i18n import _\n'), ((101, 13, 102, 34), 'neutronclient._i18n._', '_', ({(101, 15, 102, 33): '"""Peer endpoint group (name or ID) with CIDR(s) for IPSec connection"""'}, {}), "('Peer endpoint group (name or ID) with CIDR(s) for IPSec connection')", False, 'from neutronclient._i18n import _\n'), ((107, 13, 107, 46), 'neutronclient._i18n._', '_', ({(107, 15, 107, 45): '"""Enable IPSec site connection"""'}, {}), "('Enable IPSec site connection')", False, 'from neutronclient._i18n import _\n'), ((112, 13, 112, 47), 'neutronclient._i18n._', '_', ({(112, 15, 112, 46): '"""Disable IPSec site connection"""'}, {}), "('Disable IPSec site connection')", False, 'from neutronclient._i18n import _\n'), ((116, 13, 117, 46), 'neutronclient._i18n._', '_', ({(116, 15, 117, 45): '"""An ID to be used instead of the external IP address for a virtual router"""'}, {}), "('An ID to be used instead of the external IP address for a virtual router')", False, 'from neutronclient._i18n import _\n'), ((236, 22, 237, 33), 'neutronclient._i18n._', '_', ({(236, 24, 237, 32): '"""You must specify both local and peer endpoint groups"""'}, {}), "('You must specify both local and peer endpoint groups')", False, 'from neutronclient._i18n import _\n'), ((238, 18, 238, 50), 'osc_lib.exceptions.CommandError', 'exceptions.CommandError', ({(238, 42, 238, 49): 'message'}, {}), '(message)', False, 'from osc_lib import exceptions\n'), ((240, 22, 240, 75), 'neutronclient._i18n._', '_', ({(240, 24, 240, 74): '"""You must specify endpoint groups or peer CIDR(s)"""'}, {}), "('You must specify endpoint groups or peer CIDR(s)')", False, 'from neutronclient._i18n import _\n'), ((241, 18, 241, 50), 'osc_lib.exceptions.CommandError', 'exceptions.CommandError', ({(241, 42, 241, 49): 'message'}, {}), '(message)', False, 'from osc_lib import exceptions\n'), ((281, 18, 281, 46), 'osc_lib.exceptions.CommandError', 'exceptions.CommandError', ({(281, 42, 281, 45): 'msg'}, {}), '(msg)', False, 'from osc_lib import exceptions\n'), ((125, 33, 129, 13), 'neutronclient.osc.utils.find_project', 'osc_utils.find_project', ({(126, 16, 126, 39): 'client_manager.identity', (127, 16, 127, 35): 'parsed_args.project', (128, 16, 128, 42): 'parsed_args.project_domain'}, {}), '(client_manager.identity, parsed_args.project,\n parsed_args.project_domain)', True, 'from neutronclient.osc import utils as osc_utils\n'), ((171, 17, 172, 72), 'neutronclient._i18n._', '_', ({(171, 19, 172, 71): '"""Peer router identity for authentication. Can be IPv4/IPv6 address, e-mail address, key id, or FQDN"""'}, {}), "('Peer router identity for authentication. Can be IPv4/IPv6 address, e-mail address, key id, or FQDN'\n )", False, 'from neutronclient._i18n import _\n'), ((176, 17, 176, 67), 'neutronclient._i18n._', '_', ({(176, 19, 176, 66): '"""Peer gateway public IPv4/IPv6 address or FQDN"""'}, {}), "('Peer gateway public IPv4/IPv6 address or FQDN')", False, 'from neutronclient._i18n import _\n'), ((180, 17, 180, 44), 'neutronclient._i18n._', '_', ({(180, 19, 180, 43): '"""Pre-shared key string."""'}, {}), "('Pre-shared key string.')", False, 'from neutronclient._i18n import _\n'), ((185, 17, 186, 45), 'neutronclient._i18n._', '_', ({(185, 19, 186, 44): '"""VPN service instance associated with this connection (name or ID)"""'}, {}), "('VPN service instance associated with this connection (name or ID)')", False, 'from neutronclient._i18n import _\n'), ((191, 17, 191, 77), 'neutronclient._i18n._', '_', ({(191, 19, 191, 76): '"""IKE policy associated with this connection (name or ID)"""'}, {}), "('IKE policy associated with this connection (name or ID)')", False, 'from neutronclient._i18n import _\n'), ((196, 17, 197, 34), 'neutronclient._i18n._', '_', ({(196, 19, 197, 33): '"""IPsec policy associated with this connection (name or ID)"""'}, {}), "('IPsec policy associated with this connection (name or ID)')", False, 'from neutronclient._i18n import _\n'), ((201, 17, 201, 58), 'neutronclient._i18n._', '_', ({(201, 19, 201, 57): '"""Set friendly name for the connection"""'}, {}), "('Set friendly name for the connection')", False, 'from neutronclient._i18n import _\n'), ((258, 17, 258, 66), 'neutronclient._i18n._', '_', ({(258, 19, 258, 65): '"""IPsec site connection to delete (name or ID)"""'}, {}), "('IPsec site connection to delete (name or ID)')", False, 'from neutronclient._i18n import _\n'), ((279, 19, 280, 34), 'neutronclient._i18n._', '_', ({(279, 21, 280, 33): '"""%(result)s of %(total)s IPsec site connection failed to delete."""'}, {}), "('%(result)s of %(total)s IPsec site connection failed to delete.')", False, 'from neutronclient._i18n import _\n'), ((294, 17, 294, 54), 'neutronclient._i18n._', '_', ({(294, 19, 294, 53): '"""List additional fields in output"""'}, {}), "('List additional fields in output')", False, 'from neutronclient._i18n import _\n'), ((303, 26, 304, 47), 'osc_lib.utils.get_dict_properties', 'utils.get_dict_properties', (), '', False, 'from osc_lib import utils\n'), ((315, 17, 316, 72), 'neutronclient._i18n._', '_', ({(315, 19, 316, 71): '"""Peer router identity for authentication. Can be IPv4/IPv6 address, e-mail address, key id, or FQDN"""'}, {}), "('Peer router identity for authentication. Can be IPv4/IPv6 address, e-mail address, key id, or FQDN'\n )", False, 'from neutronclient._i18n import _\n'), ((319, 17, 319, 67), 'neutronclient._i18n._', '_', ({(319, 19, 319, 66): '"""Peer gateway public IPv4/IPv6 address or FQDN"""'}, {}), "('Peer gateway public IPv4/IPv6 address or FQDN')", False, 'from neutronclient._i18n import _\n'), ((323, 17, 323, 58), 'neutronclient._i18n._', '_', ({(323, 19, 323, 57): '"""Set friendly name for the connection"""'}, {}), "('Set friendly name for the connection')", False, 'from neutronclient._i18n import _\n'), ((327, 17, 327, 63), 'neutronclient._i18n._', '_', ({(327, 19, 327, 62): '"""IPsec site connection to set (name or ID)"""'}, {}), "('IPsec site connection to set (name or ID)')", False, 'from neutronclient._i18n import _\n'), ((351, 18, 351, 46), 'osc_lib.exceptions.CommandError', 'exceptions.CommandError', ({(351, 42, 351, 45): 'msg'}, {}), '(msg)', False, 'from osc_lib import exceptions\n'), ((362, 17, 362, 67), 'neutronclient._i18n._', '_', ({(362, 19, 362, 66): '"""IPsec site connection to display (name or ID)"""'}, {}), "('IPsec site connection to display (name or ID)')", False, 'from neutronclient._i18n import _\n'), ((348, 19, 349, 58), 'neutronclient._i18n._', '_', ({(348, 21, 349, 57): '"""Failed to set IPsec site connection \'%(ipsec_conn)s\': %(e)s"""'}, {}), '("Failed to set IPsec site connection \'%(ipsec_conn)s\': %(e)s")', False, 'from neutronclient._i18n import _\n'), ((273, 26, 274, 70), 'neutronclient._i18n._', '_', ({(273, 28, 274, 69): '"""Failed to delete IPsec site connection with name or ID \'%(ipsec_site_conn)s\': %(e)s"""'}, {}), '("Failed to delete IPsec site connection with name or ID \'%(ipsec_site_conn)s\': %(e)s"\n )', False, 'from neutronclient._i18n import _\n')] |
AlaricGilbert/ArknightsAutoHelper | Arknights/flags.py | 9e2db0c4e0d1be30856df731ab192da396121d94 | TINY_WAIT = 1
SMALL_WAIT = 3
MEDIUM_WAIT = 5
BIG_WAIT = 10
SECURITY_WAIT = 15
BATTLE_FINISH_DETECT = 12
BATTLE_NONE_DETECT_TIME = 90
BATTLE_END_SIGNAL_MAX_EXECUTE_TIME = 15
# 关键动作的偏移
FLAGS_START_BATTLE_BIAS = (50, 25)
FLAGS_ENSURE_TEAM_INFO_BIAS = (25, 50)
# 正方形偏移
FLAGS_CLICK_BIAS_TINY = (3, 3)
FLAGS_CLICK_BIAS_SMALL = (5, 5)
FLAGS_CLICK_BIAS_MEDIUM = (10, 10)
FLAGS_CLICK_BIAS_BIG = (15, 15)
FLAGS_CLICK_BIAS_HUGE = (30, 30)
# 拖动偏移
# 用于左右拖动的偏移,也就是偏移初始坐标点
FLAGS_SWIPE_BIAS_TO_LEFT = ((1, 1), (1, 1))
FLAGS_SWIPE_BIAS_TO_RIGHT = ((1, 1), (1, 1))
| [] |
Conky5/elasticsearch-py | elasticsearch/client/shutdown.py | 93543a7fee51c0da6e898c9155bdb5f965c5bb53 | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params
class ShutdownClient(NamespacedClient):
@query_params()
def delete_node(self, node_id, params=None, headers=None):
"""
Removes a node from the shutdown list
`<https://www.elastic.co/guide/en/elasticsearch/reference/current>`_
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg node_id: The node id of node to be removed from the
shutdown state
"""
if node_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'node_id'.")
return self.transport.perform_request(
"DELETE",
_make_path("_nodes", node_id, "shutdown"),
params=params,
headers=headers,
)
@query_params()
def get_node(self, node_id=None, params=None, headers=None):
"""
Retrieve status of a node or nodes that are currently marked as shutting down
`<https://www.elastic.co/guide/en/elasticsearch/reference/current>`_
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg node_id: Which node for which to retrieve the shutdown
status
"""
return self.transport.perform_request(
"GET",
_make_path("_nodes", node_id, "shutdown"),
params=params,
headers=headers,
)
@query_params()
def put_node(self, node_id, body, params=None, headers=None):
"""
Adds a node to be shut down
`<https://www.elastic.co/guide/en/elasticsearch/reference/current>`_
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg node_id: The node id of node to be shut down
:arg body: The shutdown type definition to register
"""
for param in (node_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"PUT",
_make_path("_nodes", node_id, "shutdown"),
params=params,
headers=headers,
body=body,
)
| [] |
RechercheTech/sushi-chef-arvind-gupta-toys | sushichef.py | 2b381d8942c16ed16b4a44d8fc020fe0a81a18c0 | #!/usr/bin/env python
import os
import requests
import re
import shutil
from arvind import ArvindVideo, ArvindLanguage, YOUTUBE_CACHE_DIR
from bs4 import BeautifulSoup
from bs4.element import NavigableString
from ricecooker.chefs import SushiChef
from ricecooker.classes.files import YouTubeVideoFile
from ricecooker.classes.licenses import get_license
from ricecooker.classes.nodes import VideoNode, TopicNode
ARVIND = "Arvind Gupta Toys"
ARVIND_URL = "http://www.arvindguptatoys.com/films.html"
ROOT_DIR_PATH = os.getcwd()
DOWNLOADS_PATH = os.path.join(ROOT_DIR_PATH, "downloads")
DOWNLOADS_VIDEOS_PATH = os.path.join(DOWNLOADS_PATH, "videos/")
SKIP_VIDEOS_PATH = os.path.join(ROOT_DIR_PATH, "skip_videos.txt")
# These are the languages that has no sub topics on its videos.
SINGLE_TOPIC_LANGUAGES = [
"bhojpuri; bajpuri; bhojapuri", # actual lang_obj.name in le-utils
"bhojpuri", # future-proofing for upcoming lang_obj.name changes
"nepali",
"malayalam",
"telugu",
"bengali",
"odiya",
"punjabi",
"marwari; marwadi", # actual lang_obj.name in le-utils
"marwari", # future-proofing for upcoming lang_obj.name changes
"assamese",
"urdu",
"spanish",
"chinese",
"indonesian",
"sci_edu",
"science/educational",
]
# List of multiple languages on its topics
MULTI_LANGUAGE_TOPIC = ["russian", "french",]
# This are the estimate total count of arvind gupta toys language contents
TOTAL_ARVIND_LANG = 23
SINGLE_TOPIC = "single"
STANDARD_TOPIC = "standard"
MULTI_LANGUAGE = "multi"
YOUTUBE_DOMAINS = ["youtu.be", "youtube.com"]
DEBUG_MODE = True # Print extra debug info durig the chef run (disable in prod)
def clean_video_title(title, lang_obj):
# Remove redundant and misleading words in the video title
clean_title = title
try:
if title != None:
clean_str = title.replace("-", " ").replace("MB", "").replace("|", "")
clean_uplang = clean_str.replace(lang_obj.name.upper(), "")
clean_lowlang = clean_uplang.replace(lang_obj.name.lower(), "")
clean_caplang = clean_lowlang.replace(lang_obj.name.capitalize() , "")
clean_format = clean_caplang.replace(".avi", "").replace(".wmv", "").strip()
clean_extra_spaces = re.sub(" +", " ",clean_format)
is_int = clean_extra_spaces[-2:]
if is_int.isdigit():
clean_extra_spaces = clean_extra_spaces.replace(is_int, "")
clean_title = clean_extra_spaces
print("Cleaned video title ====> ", clean_title)
except Exception as e:
print('Error cleaning this video title: ', clean_title)
return clean_title
def include_video_topic(topic_node, video_data, lang_obj):
# Include video details to the parent topic node
video_id = video_data.uid
video_source_id = 'arvind-video-{0}'.format(video_id)
video_node = VideoNode(
source_id=video_source_id,
title=clean_video_title(video_data.title, lang_obj),
description=video_data.description,
author=ARVIND,
thumbnail=video_data.thumbnail,
license=get_license("CC BY-NC", copyright_holder=ARVIND),
files=[
YouTubeVideoFile(
youtube_id=video_id,
language=video_data.language,
high_resolution=False,
)
])
topic_node.add_child(video_node)
def save_skip_videos(video, topic, lang_obj):
# Compile skip videos into text file
if not os.path.exists(SKIP_VIDEOS_PATH):
open(SKIP_VIDEOS_PATH,"w+")
text_file = open(SKIP_VIDEOS_PATH, "a")
video_info = video.language + " - " + topic + " - " + video.url + " - " + video.license + "\n"
text_file.write(video_info)
text_file.close()
def download_video_topics(data, topic, topic_node, lang_obj):
"""
Scrape, collect, and download the videos and their thumbnails.
"""
video_source_ids = []
for vinfo in data[topic]:
try:
video = ArvindVideo(
url=vinfo['video_url'],
title=vinfo['video_title'],
language=lang_obj.code)
if video.download_info():
if video.license_common:
video_source_id = 'arvind-video-{0}'.format(video.uid)
if video_source_id not in video_source_ids:
include_video_topic(topic_node, video, lang_obj)
video_source_ids.append(video_source_id)
else:
print('Skipping duplicate video: ' + str(vinfo['video_url']))
else:
save_skip_videos(video, topic, lang_obj)
else:
save_skip_videos(video, topic, lang_obj)
except Exception as e:
print('Error downloading this video:', e)
def generate_child_topics(arvind_contents, main_topic, lang_obj, topic_type):
# Create a topic for each languages
data = arvind_contents[lang_obj.name]
for topic_index in data:
topic_name = topic_index
if topic_type == STANDARD_TOPIC:
source_id = 'arvind-child-topic-{0}'.format(topic_name)
topic_node = TopicNode(title=topic_name, source_id=source_id)
download_video_topics(data, topic_name, topic_node, lang_obj)
main_topic.add_child(topic_node)
if topic_type == SINGLE_TOPIC:
download_video_topics(data, topic_name, main_topic, lang_obj)
return main_topic
def create_language_data(lang_data, lang_obj):
"""
Process the list of elements in `lang_data` to extract video links.
"""
topic_contents = {}
initial_topics = []
prev_topic = ""
first_count = 1
total_loop = len(lang_data)
lang_name = lang_obj.name.lower()
for item in lang_data:
total_loop -= 1
if isinstance(item, NavigableString) or item.name == 'br':
continue # skip whitespace and <br/> tags
try:
title = item.text.rstrip().strip()
video_link = ""
try:
video_a_tag = item.find('a')
if video_a_tag:
video_link = video_a_tag.get("href") # for videos
else:
video_link = "" # for headings
topic_details = {}
if any(ytd in video_link for ytd in YOUTUBE_DOMAINS):
if lang_name in MULTI_LANGUAGE_TOPIC:
current_lang = title.split()[0].lower()
if first_count == 1:
first_count = 0
prev_topic = current_lang
topic_details['video_url'] = video_link
topic_details['video_title'] = title
if lang_name in MULTI_LANGUAGE_TOPIC:
if prev_topic != current_lang:
topic_contents[prev_topic] = initial_topics
initial_topics = []
prev_topic = current_lang
initial_topics.append(topic_details)
except Exception as e:
print('>> passing on', e)
pass
if first_count == 1:
if ":" in title:
first_count = 0
prev_topic = title.replace(":", "").strip()
if video_link == "":
if ":" in title:
topic_contents[prev_topic] = initial_topics
prev_topic = title.replace(":", "").strip()
initial_topics = []
except Exception as e:
print('>>> passing on', e)
pass
# This wasn't working (last topic in each standard language was missing) ...
# if total_loop == 0:
# topic_contents[prev_topic] = initial_topics
# ... so changed to this:
topic_contents[prev_topic] = initial_topics
return topic_contents
def scrape_arvind_page():
url = ARVIND_URL
response = requests.get(url)
page = BeautifulSoup(response.text, 'html5lib')
content_divs = page.body.div
list_divs = list(content_divs.children)
languages_div_start = 5
languages_list = list(list_divs[languages_div_start].children)
return languages_list
def get_language_details(lang_name):
video_lang = ArvindLanguage(name=lang_name)
if video_lang.get_lang_obj():
return video_lang
return None
def create_language_topic():
arvind_languages = scrape_arvind_page()
main_topic_list = []
if os.path.exists(SKIP_VIDEOS_PATH):
os.remove(SKIP_VIDEOS_PATH)
loop_max = TOTAL_ARVIND_LANG
language_next_int = 7
loop_couter = 0
while (loop_couter != loop_max):
try:
lang_name = arvind_languages[language_next_int].get('id')
lang_obj = get_language_details(lang_name.lower())
if lang_obj != None:
lang_name = lang_obj.name
lang_name_lower = lang_name.lower()
print('== Processing ', lang_name, '='*60)
language_source_id = 'arvind-parent-topic-{0}'.format(lang_name_lower)
# print('language_source_id =', language_source_id)
get_language_data = list(arvind_languages[language_next_int])
# print('len(get_language_data) = ', len(get_language_data))
data_contents = { lang_name: create_language_data(get_language_data, lang_obj) }
# print('len(data_contents[lang_name])', len(data_contents[lang_name]))
language_topic = TopicNode(title=lang_name.capitalize(), source_id=language_source_id)
if lang_name_lower not in SINGLE_TOPIC_LANGUAGES and lang_name_lower not in MULTI_LANGUAGE_TOPIC:
print("=======> This Language is in standard format", lang_name)
topic_type = STANDARD_TOPIC
generate_child_topics(data_contents, language_topic, lang_obj, topic_type)
main_topic_list.append(language_topic)
print("=====>finished", lang_name)
if lang_name_lower in SINGLE_TOPIC_LANGUAGES:
print("=====> This Language is in single topic format ", lang_name)
topic_type = SINGLE_TOPIC
generate_child_topics(data_contents, language_topic, lang_obj, topic_type)
main_topic_list.append(language_topic)
print("=====>finished", lang_name)
if lang_name_lower in MULTI_LANGUAGE_TOPIC:
print("=====> This Language is in multiple langauage topic format ", lang_name)
lang_data = create_language_data(get_language_data, lang_obj)
for lang in lang_data:
current_lang = get_language_details(lang.lower())
if current_lang != None:
parent_source_id = 'arvind-parent-topic-{0}'.format(current_lang.name)
parent_topic = TopicNode(title=lang.capitalize(), source_id=parent_source_id)
data_dic = {current_lang.name: {"": lang_data[lang]}}
topic_type = SINGLE_TOPIC
generate_child_topics(data_dic, parent_topic, current_lang, topic_type)
main_topic_list.append(parent_topic)
print("=====>finished ", lang)
except Exception as e:
print("===> error getting language topics: ", e)
# raise(e)
language_next_int += 4
loop_couter += 1
return main_topic_list
class ArvindChef(SushiChef):
channel_info = {
"CHANNEL_TITLE": "Arvind Gupta Toys",
"CHANNEL_SOURCE_DOMAIN": "arvindguptatoys.com",
"CHANNEL_SOURCE_ID": "toys-from-trash",
"CHANNEL_LANGUAGE": "mul",
"CHANNEL_THUMBNAIL": 'chefdata/arvind_gupta_thumbnail.png',
"CHANNEL_DESCRIPTION": "Math and science activities through low-cost " \
"materials all in the form of videos to provide various pathways for children to explore" \
" and deepen their understanding of concepts in low-resource contexts around the world." \
" Valuable resource library for teachers to incorporate in their lessons, for parents to" \
" work with children at home using readily available, simple, and low-cost materials.",
}
def pre_run(self, args, options):
"""This function will get called by ricecooker before the chef runs."""
if args['update']:
# delete video info .json files cached in chefdata/youtubecache/
print('Deleting vinfo .json files in {}'.format(YOUTUBE_CACHE_DIR))
if os.path.exists(YOUTUBE_CACHE_DIR):
shutil.rmtree(YOUTUBE_CACHE_DIR)
os.makedirs(YOUTUBE_CACHE_DIR)
def construct_channel(self, **kwargs):
channel = self.get_channel(**kwargs)
languages_topic = create_language_topic()
for lang_topic in languages_topic:
channel.add_child(lang_topic)
return channel
if __name__ == "__main__":
"""
Run this script on the command line using:
python sushichef.py -v --reset --token=YOURTOKENHERE9139139f3a23232
"""
chef = ArvindChef()
chef.main()
| [((24, 16, 24, 27), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((25, 17, 25, 57), 'os.path.join', 'os.path.join', ({(25, 30, 25, 43): 'ROOT_DIR_PATH', (25, 45, 25, 56): '"""downloads"""'}, {}), "(ROOT_DIR_PATH, 'downloads')", False, 'import os\n'), ((26, 24, 26, 63), 'os.path.join', 'os.path.join', ({(26, 37, 26, 51): 'DOWNLOADS_PATH', (26, 53, 26, 62): '"""videos/"""'}, {}), "(DOWNLOADS_PATH, 'videos/')", False, 'import os\n'), ((28, 19, 28, 65), 'os.path.join', 'os.path.join', ({(28, 32, 28, 45): 'ROOT_DIR_PATH', (28, 47, 28, 64): '"""skip_videos.txt"""'}, {}), "(ROOT_DIR_PATH, 'skip_videos.txt')", False, 'import os\n'), ((248, 15, 248, 32), 'requests.get', 'requests.get', ({(248, 28, 248, 31): 'url'}, {}), '(url)', False, 'import requests\n'), ((249, 11, 249, 51), 'bs4.BeautifulSoup', 'BeautifulSoup', ({(249, 25, 249, 38): 'response.text', (249, 40, 249, 50): '"""html5lib"""'}, {}), "(response.text, 'html5lib')", False, 'from bs4 import BeautifulSoup\n'), ((257, 17, 257, 47), 'arvind.ArvindLanguage', 'ArvindLanguage', (), '', False, 'from arvind import ArvindVideo, ArvindLanguage, YOUTUBE_CACHE_DIR\n'), ((267, 7, 267, 39), 'os.path.exists', 'os.path.exists', ({(267, 22, 267, 38): 'SKIP_VIDEOS_PATH'}, {}), '(SKIP_VIDEOS_PATH)', False, 'import os\n'), ((115, 11, 115, 43), 'os.path.exists', 'os.path.exists', ({(115, 26, 115, 42): 'SKIP_VIDEOS_PATH'}, {}), '(SKIP_VIDEOS_PATH)', False, 'import os\n'), ((268, 8, 268, 35), 'os.remove', 'os.remove', ({(268, 18, 268, 34): 'SKIP_VIDEOS_PATH'}, {}), '(SKIP_VIDEOS_PATH)', False, 'import os\n'), ((79, 33, 79, 63), 're.sub', 're.sub', ({(79, 40, 79, 44): '""" +"""', (79, 46, 79, 49): '""" """', (79, 50, 79, 62): 'clean_format'}, {}), "(' +', ' ', clean_format)", False, 'import re\n'), ((102, 16, 102, 64), 'ricecooker.classes.licenses.get_license', 'get_license', (), '', False, 'from ricecooker.classes.licenses import get_license\n'), ((130, 20, 133, 39), 'arvind.ArvindVideo', 'ArvindVideo', (), '', False, 'from arvind import ArvindVideo, ArvindLanguage, YOUTUBE_CACHE_DIR\n'), ((160, 25, 160, 73), 'ricecooker.classes.nodes.TopicNode', 'TopicNode', (), '', False, 'from ricecooker.classes.nodes import VideoNode, TopicNode\n'), ((346, 15, 346, 48), 'os.path.exists', 'os.path.exists', ({(346, 30, 346, 47): 'YOUTUBE_CACHE_DIR'}, {}), '(YOUTUBE_CACHE_DIR)', False, 'import os\n'), ((348, 12, 348, 42), 'os.makedirs', 'os.makedirs', ({(348, 24, 348, 41): 'YOUTUBE_CACHE_DIR'}, {}), '(YOUTUBE_CACHE_DIR)', False, 'import os\n'), ((104, 12, 108, 13), 'ricecooker.classes.files.YouTubeVideoFile', 'YouTubeVideoFile', (), '', False, 'from ricecooker.classes.files import YouTubeVideoFile\n'), ((347, 16, 347, 48), 'shutil.rmtree', 'shutil.rmtree', ({(347, 30, 347, 47): 'YOUTUBE_CACHE_DIR'}, {}), '(YOUTUBE_CACHE_DIR)', False, 'import shutil\n')] |
lndba/apasa_backend | api/views/domain.py | e0bb96e22a22f6e2a5a2826f225388113473e7e2 | from rest_framework.viewsets import ModelViewSet,GenericViewSet
from rest_framework.response import Response
from api.serializers.domain import *
from api.pagination.page import MyPageNumberPagination
from api.models import *
class MDomainListViewSet(ModelViewSet):
queryset = MasterDomainName.objects.all().order_by('id')
pagination_class = MyPageNumberPagination
serializer_class = MDomainListSerializers
class DnsListViewSet(GenericViewSet):
def list(self, request, *args, **kwargs):
res = {"count": 0, 'results': None}
domain_id = request.query_params.get('domain')
dns_list = Dns.objects.all().filter(master_domain_name=domain_id)
dns_count = Dns.objects.all().filter(master_domain_name=domain_id).count()
page = MyPageNumberPagination()
page_dns_list = page.paginate_queryset(dns_list,request,self)
ser = DnsListSerializers(instance=page_dns_list,many=True)
res['results'] = ser.data
res['count'] = dns_count
return Response(res)
class DnsUpdataViewSet(ModelViewSet):
queryset = Dns.objects.all().order_by('id')
serializer_class = DnsUpdataSerializers
| [((21, 15, 21, 39), 'api.pagination.page.MyPageNumberPagination', 'MyPageNumberPagination', ({}, {}), '()', False, 'from api.pagination.page import MyPageNumberPagination\n'), ((26, 15, 26, 28), 'rest_framework.response.Response', 'Response', ({(26, 24, 26, 27): 'res'}, {}), '(res)', False, 'from rest_framework.response import Response\n')] |
yuenliou/leetcode | 90-subsets-ii.py | e8a1c6cae6547cbcb6e8494be6df685f3e7c837c | #!/usr/local/bin/python3.7
# -*- coding: utf-8 -*-
from typing import List
class Solution:
def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:
"""
题解:https://leetcode-cn.com/problems/subsets/solution/c-zong-jie-liao-hui-su-wen-ti-lei-xing-dai-ni-gao-/
"""
def backtrack(start, path):
#结束条件:无
res.append(path[:])
for i in range(start, len(nums)):
#和上个数字相等就跳过
if i > start and nums[i] == nums[i - 1]: continue
# 做选择
path.append(nums[i])
# 进入下一行决策
backtrack(i + 1, path)
# 撤销选择
path.pop()
res = []
nums.sort()
backtrack( 0, [])
return res
def main():
param = [1,2,2]
solution = Solution()
ret = solution.subsetsWithDup(param)
print(ret)
'''90. 子集 II
给定一个可能包含重复元素的整数数组 nums,返回该数组所有可能的子集(幂集)。
说明:解集不能包含重复的子集。
示例:
输入: [1,2,2]
输出:
[
[2],
[1],
[1,2,2],
[2,2],
[1,2],
[]
]
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/subsets-ii
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
if __name__ == '__main__':
main()
| [] |
climberwb/bert-pli | tools/output_tool.py | 0e6eda7a23b7502c86eab4c0d889fad1bbb57155 | import json
from .accuracy_tool import gen_micro_macro_result
def null_output_function(data, config, *args, **params):
return ""
def basic_output_function(data, config, *args, **params):
which = config.get("output", "output_value").replace(" ", "").split(",")
temp = gen_micro_macro_result(data)
result = {}
for name in which:
result[name] = temp[name]
return json.dumps(result, sort_keys=True)
| [((17, 11, 17, 45), 'json.dumps', 'json.dumps', (), '', False, 'import json\n')] |
Vjrx/airship-drydock | python/drydock_provisioner/ingester/plugins/deckhand.py | 315fb9864e6d55a66d5266f76c160be55d22c98b | # Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This data ingester will consume YAML site topology documents."""
import yaml
import logging
import jsonschema
import os
import pkg_resources
import copy
import hashlib
import drydock_provisioner.objects.fields as hd_fields
from beaker.cache import CacheManager
from beaker.util import parse_cache_config_options
from drydock_provisioner import error as errors
from drydock_provisioner import objects
from drydock_provisioner.ingester.plugins import IngesterPlugin
cache_opts = {
'cache.type': 'memory',
'expire': 1800,
}
cache = CacheManager(**parse_cache_config_options(cache_opts))
class DeckhandIngester(IngesterPlugin):
def __init__(self):
super().__init__()
self.logger = logging.getLogger('drydock.ingester.deckhand')
self.load_schemas()
def get_name(self):
return "deckhand"
def ingest_data(self, **kwargs):
"""Parse and save design data.
:param content: String of valid Deckhand YAML
:returns: a tuple of a status response and a list of parsed objects from drydock_provisioner.objects
"""
def local_parse():
return self.parse_docs(kwargs.get('content'))
if 'content' in kwargs:
try:
# Hash the input to use as the cache key. This is not a security
# related hash, so use cheap and fast MD5
hv = hashlib.md5(kwargs.get('content', b'')).hexdigest()
local_cache = cache.get_cache('parsed_docs')
results = local_cache.get(key=hv, createfunc=local_parse)
parse_status, models = results
except Exception as ex:
self.logger.debug("Error parsing design - hash %s", hv, exc_info=ex)
raise ex
else:
raise ValueError('Missing parameter "content"')
return parse_status, models
def parse_docs(self, doc_blob):
"""Translate a YAML string into the internal Drydock model.
Returns a tuple of a objects.TaskStatus instance to summarize all
document processing and a list of models yielded by successful processing
:param doc_blob: bytes representing a utf-8 encoded YAML string
"""
models = []
yaml_string = doc_blob.decode()
self.logger.debug("yamlingester:parse_docs - Parsing YAML string.")
try:
parsed_data = yaml.safe_load_all(yaml_string)
except yaml.YAMLError as err:
if hasattr(err, 'problem_mark'):
mark = err.problem_mark
raise errors.IngesterError(
"Error parsing YAML at (l:%s, c:%s): %s" %
(mark.line + 1, mark.column + 1, err))
else:
raise errors.IngesterError("Error parsing YAML: %s" % (err))
# tracking processing status to provide a complete summary of issues
ps = objects.Validation()
ps.set_status(hd_fields.ValidationResult.Success)
for d in parsed_data:
try:
(schema_ns, doc_kind, doc_version) = d.get('schema',
'').split('/')
except ValueError as ex:
self.logger.error(
"Error with document structure.", exc_info=ex)
self.logger.debug("Error document\n%s" % yaml.dump(d))
continue
if schema_ns == 'drydock':
try:
doc_ref = objects.DocumentReference(
doc_type=hd_fields.DocumentType.Deckhand,
doc_schema=d.get('schema'),
doc_name=d.get('metadata', {}).get('name', 'Unknown'))
doc_errors = self.validate_drydock_document(d)
if len(doc_errors) > 0:
for e in doc_errors:
ps.add_detail_msg(
objects.ValidationMessage(
msg="%s:%s schema validation error: %s" %
(doc_kind, doc_version, e),
name="DD001",
docs=[doc_ref],
error=True,
level=hd_fields.MessageLevels.ERROR,
diagnostic=
"Invalid input file - see Drydock Troubleshooting Guide for DD001"
))
ps.set_status(hd_fields.ActionResult.Failure)
continue
model = self.process_drydock_document(d)
model.doc_ref = doc_ref
models.append(model)
except errors.IngesterError as ie:
msg = "Error processing document: %s" % str(ie)
self.logger.warning(msg)
ps.add_detail_msg(
objects.ValidationMessage(
msg=msg,
name="DD000",
error=True,
level=hd_fields.MessageLevels.ERROR,
docs=[doc_ref],
diagnostic="Exception during document processing "
"- see Drydock Troubleshooting Guide "
"for DD000"))
ps.set_status(hd_fields.ActionResult.Failure)
except Exception as ex:
msg = "Unexpected error processing document: %s" % str(ex)
self.logger.error(msg, exc_info=True)
ps.add_detail_msg(
objects.ValidationMessage(
msg=msg,
name="DD000",
error=True,
level=hd_fields.MessageLevels.ERROR,
docs=[doc_ref],
diagnostic="Unexpected exception during document "
"processing - see Drydock Troubleshooting "
"Guide for DD000"))
ps.set_status(hd_fields.ActionResult.Failure)
return (ps, models)
def process_drydock_document(self, doc):
"""Process a parsed YAML document.
:param doc: The dictionary from parsing the YAML document
"""
(schema_ns, kind, version) = doc.get('schema', '').split('/')
if version == 'v1':
doc_processor = DeckhandIngester.v1_doc_handlers.get(kind, None)
else:
doc_processor = None
if doc_processor is None:
raise errors.IngesterError(
"Invalid document - Kind %s and Version %s" % (kind, version))
metadata = doc.get('metadata', {})
doc_name = metadata.get('name')
return doc_processor(self, doc_name, doc.get('data', {}))
def validate_drydock_document(self, doc):
"""Validate a parsed document via jsonschema.
If a schema for a document Kind is not available, the document is
considered valid. Schema is chosen by the doc['kind'] field.
Returns a empty list for valid documents, otherwise returns a list
of all found errors
:param doc: dictionary of the parsed document.
"""
schemaname = doc.get('schema', '')
(schema_ns, doc_kind, doc_version) = schemaname.split('/')
errors_found = []
if doc_version == 'v1':
if schemaname in self.v1_doc_schemas:
validator = jsonschema.Draft4Validator(
self.v1_doc_schemas.get(schemaname))
for error in validator.iter_errors(doc.get('data', [])):
errors_found.append(error.message)
return errors_found
def process_drydock_region(self, name, data):
"""Process the data/spec section of a Region document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.Site()
# Need to add validation logic, we'll assume the input is
# valid for now
model.name = name
model.status = hd_fields.SiteStatus.Unknown
model.source = hd_fields.ModelSource.Designed
model.tag_definitions = objects.NodeTagDefinitionList()
tag_defs = data.get('tag_definitions', [])
for t in tag_defs:
tag_model = objects.NodeTagDefinition()
tag_model.tag = t.get('tag', '')
tag_model.type = t.get('definition_type', '')
tag_model.definition = t.get('definition', '')
if tag_model.type not in ['lshw_xpath']:
raise errors.IngesterError(
'Unknown definition_type in '
'tag_definition instance: %s' % (t.definition_type))
model.tag_definitions.append(tag_model)
auth_keys = data.get('authorized_keys', [])
model.authorized_keys = [k for k in auth_keys]
repos = data.get('repositories', None)
if repos:
model.repositories = self.process_drydock_region_repo_list(repos)
return model
def process_drydock_region_repo_list(self, data):
"""Process a package repository list.
:param data: The data from the ``repositories`` key in a Region document
"""
model = objects.RepositoryList()
for k, v in data.items():
if k == 'remove_unlisted':
model.remove_unlisted = v
else:
model.append(objects.Repository(name=k, **v))
return model
def process_drydock_rack(self, name, data):
"""Process the data/spec section of a Rack document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.Rack()
model.source = hd_fields.ModelSource.Designed
model.name = name
model.tor_switches = objects.TorSwitchList()
tors = data.get('tor_switches', {})
for k, v in tors.items():
tor = objects.TorSwitch()
tor.switch_name = k
tor.mgmt_ip = v.get('mgmt_ip', None)
tor.sdn_api_uri = v.get('sdn_api_url', None)
model.tor_switches.append(tor)
model.location = copy.deepcopy(data.get('location', {}))
model.local_networks = [n for n in data.get('local_networks', [])]
return model
def process_drydock_networklink(self, name, data):
"""Process the data/spec section of a NetworkLink document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.NetworkLink()
model.source = hd_fields.ModelSource.Designed
model.name = name
model.metalabels = data.get('labels', {})
bonding = data.get('bonding', {})
model.bonding_mode = bonding.get(
'mode', hd_fields.NetworkLinkBondingMode.Disabled)
if model.bonding_mode in \
(hd_fields.NetworkLinkBondingMode.LACP,
hd_fields.NetworkLinkBondingMode.RoundRobin,
hd_fields.NetworkLinkBondingMode.Standby):
model.bonding_mon_rate = bonding.get('mon_rate', '100')
model.bonding_up_delay = bonding.get('up_delay', '200')
model.bonding_down_delay = bonding.get('down_delay', '200')
if model.bonding_mode == hd_fields.NetworkLinkBondingMode.LACP:
model.bonding_xmit_hash = bonding.get('hash', 'layer3+4')
model.bonding_peer_rate = bonding.get('peer_rate', 'fast')
model.mtu = data.get('mtu', None)
model.linkspeed = data.get('linkspeed', None)
trunking = data.get('trunking', {})
model.trunk_mode = trunking.get(
'mode', hd_fields.NetworkLinkTrunkingMode.Disabled)
model.native_network = trunking.get('default_network', None)
model.allowed_networks = data.get('allowed_networks', None)
return model
def process_drydock_network(self, name, data):
"""Process the data/spec section of a Network document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.Network()
model.source = hd_fields.ModelSource.Designed
model.name = name
model.metalabels = data.get('labels', {})
model.cidr = data.get('cidr', None)
model.vlan_id = data.get('vlan', None)
model.mtu = data.get('mtu', None)
model.routedomain = data.get('routedomain', None)
dns = data.get('dns', {})
model.dns_domain = dns.get('domain', 'local')
model.dns_servers = dns.get('servers', None)
ranges = data.get('ranges', [])
model.ranges = []
for r in ranges:
model.ranges.append({
'type': r.get('type', None),
'start': r.get('start', None),
'end': r.get('end', None),
})
routes = data.get('routes', [])
model.routes = []
for r in routes:
model.routes.append({
'subnet': r.get('subnet', None),
'gateway': r.get('gateway', None),
'metric': r.get('metric', None),
'routedomain': r.get('routedomain', None),
})
dhcp_relay = data.get('dhcp_relay', None)
if dhcp_relay is not None:
model.dhcp_relay_self_ip = dhcp_relay.get('self_ip', None)
model.dhcp_relay_upstream_target = dhcp_relay.get(
'upstream_target', None)
return model
def process_drydock_hwprofile(self, name, data):
"""Process the data/spec section of a HardwareProfile document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.HardwareProfile()
model.name = name
model.source = hd_fields.ModelSource.Designed
model.vendor = data.get('vendor', None)
model.generation = data.get('generation', None)
model.hw_version = data.get('hw_version', None)
model.bios_version = data.get('bios_version', None)
model.boot_mode = data.get('boot_mode', None)
model.bootstrap_protocol = data.get('bootstrap_protocol', None)
model.pxe_interface = data.get('pxe_interface', None)
model.devices = objects.HardwareDeviceAliasList()
device_aliases = data.get('device_aliases', {})
for d, v in device_aliases.items():
dev_model = objects.HardwareDeviceAlias()
dev_model.source = hd_fields.ModelSource.Designed
dev_model.alias = d
dev_model.bus_type = v.get('bus_type', None)
dev_model.dev_type = v.get('dev_type', None)
dev_model.address = v.get('address', None)
model.devices.append(dev_model)
model.cpu_sets = data.get('cpu_sets', None) or dict()
model.hugepages_confs = objects.HugepagesConfList()
for c, d in data.get('hugepages', {}).items():
conf = objects.HugepagesConf(
name=c, size=d.get('size'), count=d.get('count'))
model.hugepages_confs.append(conf)
return model
def process_drydock_hostprofile(self, name, data):
"""Process the data/spec section of a HostProfile document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.HostProfile()
model.name = name
model.source = hd_fields.ModelSource.Designed
self.process_host_common_fields(data, model)
return model
def process_drydock_bootaction(self, name, data):
"""Process the data/spec section of a BootAction document.
:param name: the document name attribute
:Param data: the dictionary of the parsed data/spec section
"""
model = objects.BootAction()
model.name = name
model.source = hd_fields.ModelSource.Designed
assets = data.get('assets')
model.asset_list = objects.BootActionAssetList()
for a in assets:
ba = self.process_bootaction_asset(a)
model.asset_list.append(ba)
node_filter = data.get('node_filter', None)
if node_filter is not None:
nfs = self.process_bootaction_nodefilter(node_filter)
model.node_filter = nfs
model.signaling = data.get('signaling', None)
return model
def process_bootaction_asset(self, asset_dict):
"""Process a dictionary representing a BootAction Data Asset.
:param asset_dict: dictionary representing the bootaction asset
"""
model = objects.BootActionAsset(**asset_dict)
return model
def process_bootaction_nodefilter(self, nf):
"""Process a dictionary representing a BootAction NodeFilter Set.
:param nf: dictionary representing the bootaction nodefilter set.
"""
model = objects.NodeFilterSet()
model.filter_set_type = nf.get('filter_set_type', None)
model.filter_set = []
for nf in nf.get('filter_set', []):
nf_model = objects.NodeFilter(**nf)
model.filter_set.append(nf_model)
return model
def process_drydock_node(self, name, data):
"""Process the data/spec section of a BaremetalNode document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.BaremetalNode()
model.name = name
model.source = hd_fields.ModelSource.Designed
self.process_host_common_fields(data, model)
node_metadata = data.get('metadata', {})
model.boot_mac = node_metadata.get('boot_mac', None)
addresses = data.get('addressing', [])
if len(addresses) == 0:
raise errors.IngesterError('BaremetalNode needs at least'
' 1 assigned address')
model.addressing = objects.IpAddressAssignmentList()
for a in addresses:
assignment = objects.IpAddressAssignment()
address = a.get('address', '')
if address == 'dhcp':
assignment.type = 'dhcp'
assignment.address = None
assignment.network = a.get('network')
model.addressing.append(assignment)
elif address != '':
assignment.type = 'static'
assignment.address = a.get('address')
assignment.network = a.get('network')
model.addressing.append(assignment)
else:
self.log.error("Invalid address assignment %s on Node %s" %
(address, self.name))
return model
def process_host_common_fields(self, data, model):
"""Process fields common to the host-based documents.
Update the provided model with the values of fields common
to BaremetalNode and HostProfile documents.
:param data: dictionary from YAML parsing of the document data/spec section
:param model: instance of objects.HostProfile or objects.BaremetalNode to update
"""
model.parent_profile = data.get('host_profile', None)
model.hardware_profile = data.get('hardware_profile', None)
oob = data.get('oob', {})
model.oob_parameters = {}
for k, v in oob.items():
if k == 'type':
model.oob_type = oob.get('type', None)
else:
model.oob_parameters[k] = v
(model.storage_devices,
model.volume_groups) = self.process_node_storage(
data.get('storage', {}))
interfaces = data.get('interfaces', {})
model.interfaces = objects.HostInterfaceList()
for k, v in interfaces.items():
int_model = objects.HostInterface()
# A null value indicates this interface should be removed
# from any parent profiles
if v is None:
int_model.device_name = '!' + k
continue
int_model.device_name = k
int_model.network_link = v.get('device_link', None)
int_model.hardware_slaves = []
slaves = v.get('slaves', [])
for s in slaves:
int_model.hardware_slaves.append(s)
int_model.networks = []
networks = v.get('networks', [])
for n in networks:
int_model.networks.append(n)
if 'sriov' in v:
int_model.sriov = True
int_model.vf_count = v.get('sriov', {}).get('vf_count', 0)
int_model.trustedmode = v.get('sriov', {}).get(
'trustedmode', False)
model.interfaces.append(int_model)
platform = data.get('platform', {})
model.image = platform.get('image', None)
model.kernel = platform.get('kernel', None)
model.kernel_params = {}
for k, v in platform.get('kernel_params', {}).items():
model.kernel_params[k] = v
model.primary_network = data.get('primary_network', None)
node_metadata = data.get('metadata', {})
metadata_tags = node_metadata.get('tags', [])
model.tags = metadata_tags
owner_data = node_metadata.get('owner_data', {})
model.owner_data = {}
for k, v in owner_data.items():
model.owner_data[k] = v
model.rack = node_metadata.get('rack', None)
return model
def process_node_storage(self, storage):
"""Process the storage data for a node-based document.
Return a tuple of of two lists the first is a StorageDeviceList, the
second is a VolumeGroupList.
:param storage: dictionary of the storage section of a document
"""
phys_devs = storage.get('physical_devices', {})
storage_devices = objects.HostStorageDeviceList()
for k, v in phys_devs.items():
sd = objects.HostStorageDevice(name=k)
sd.source = hd_fields.ModelSource.Designed
if 'labels' in v:
sd.labels = v.get('labels').copy()
if 'volume_group' in v:
vg = v.get('volume_group')
sd.volume_group = vg
elif 'partitions' in v:
sd.partitions = objects.HostPartitionList()
for vv in v.get('partitions', []):
part_model = objects.HostPartition()
part_model.name = vv.get('name')
part_model.source = hd_fields.ModelSource.Designed
part_model.part_uuid = vv.get('part_uuid', None)
part_model.size = vv.get('size', None)
if 'labels' in vv:
part_model.labels = vv.get('labels').copy()
if 'volume_group' in vv:
part_model.volume_group = vv.get('vg')
elif 'filesystem' in vv:
fs_info = vv.get('filesystem', {})
part_model.mountpoint = fs_info.get('mountpoint', None)
part_model.fstype = fs_info.get('fstype', 'ext4')
part_model.mount_options = fs_info.get(
'mount_options', 'defaults')
part_model.fs_uuid = fs_info.get('fs_uuid', None)
part_model.fs_label = fs_info.get('fs_label', None)
sd.partitions.append(part_model)
storage_devices.append(sd)
volume_groups = objects.HostVolumeGroupList()
vol_groups = storage.get('volume_groups', {})
for k, v in vol_groups.items():
vg = objects.HostVolumeGroup(name=k)
vg.vg_uuid = v.get('vg_uuid', None)
vg.logical_volumes = objects.HostVolumeList()
volume_groups.append(vg)
for vv in v.get('logical_volumes', []):
lv = objects.HostVolume(name=vv.get('name'))
lv.size = vv.get('size', None)
lv.lv_uuid = vv.get('lv_uuid', None)
if 'filesystem' in vv:
fs_info = vv.get('filesystem', {})
lv.mountpoint = fs_info.get('mountpoint', None)
lv.fstype = fs_info.get('fstype', 'ext4')
lv.mount_options = fs_info.get('mount_options', 'defaults')
lv.fs_uuid = fs_info.get('fs_uuid', None)
lv.fs_label = fs_info.get('fs_label', None)
vg.logical_volumes.append(lv)
return (storage_devices, volume_groups)
def load_schemas(self):
self.v1_doc_schemas = dict()
schema_dir = self._get_schema_dir()
for schema_file in os.listdir(schema_dir):
f = open(os.path.join(schema_dir, schema_file), 'r')
for schema in yaml.safe_load_all(f):
schema_for = schema['metadata']['name']
if schema_for in self.v1_doc_schemas:
self.logger.warning(
"Duplicate document schemas found for document kind %s."
% schema_for)
self.logger.debug(
"Loaded schema for document kind %s." % schema_for)
self.v1_doc_schemas[schema_for] = schema.get('data')
f.close()
def _get_schema_dir(self):
return pkg_resources.resource_filename('drydock_provisioner',
'schemas')
# Mapping of handlers for different document kinds
v1_doc_handlers = {
'Region': process_drydock_region,
'Rack': process_drydock_rack,
'NetworkLink': process_drydock_networklink,
'Network': process_drydock_network,
'HardwareProfile': process_drydock_hwprofile,
'HostProfile': process_drydock_hostprofile,
'BaremetalNode': process_drydock_node,
'BootAction': process_drydock_bootaction,
}
| [((38, 23, 38, 61), 'beaker.util.parse_cache_config_options', 'parse_cache_config_options', ({(38, 50, 38, 60): 'cache_opts'}, {}), '(cache_opts)', False, 'from beaker.util import parse_cache_config_options\n'), ((43, 22, 43, 68), 'logging.getLogger', 'logging.getLogger', ({(43, 40, 43, 67): '"""drydock.ingester.deckhand"""'}, {}), "('drydock.ingester.deckhand')", False, 'import logging\n'), ((99, 13, 99, 33), 'drydock_provisioner.objects.Validation', 'objects.Validation', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((214, 16, 214, 30), 'drydock_provisioner.objects.Site', 'objects.Site', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((222, 32, 222, 63), 'drydock_provisioner.objects.NodeTagDefinitionList', 'objects.NodeTagDefinitionList', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((254, 16, 254, 40), 'drydock_provisioner.objects.RepositoryList', 'objects.RepositoryList', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((270, 16, 270, 30), 'drydock_provisioner.objects.Rack', 'objects.Rack', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((275, 29, 275, 52), 'drydock_provisioner.objects.TorSwitchList', 'objects.TorSwitchList', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((297, 16, 297, 37), 'drydock_provisioner.objects.NetworkLink', 'objects.NetworkLink', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((339, 16, 339, 33), 'drydock_provisioner.objects.Network', 'objects.Network', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((391, 16, 391, 41), 'drydock_provisioner.objects.HardwareProfile', 'objects.HardwareProfile', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((404, 24, 404, 57), 'drydock_provisioner.objects.HardwareDeviceAliasList', 'objects.HardwareDeviceAliasList', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((419, 32, 419, 59), 'drydock_provisioner.objects.HugepagesConfList', 'objects.HugepagesConfList', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((434, 16, 434, 37), 'drydock_provisioner.objects.HostProfile', 'objects.HostProfile', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((448, 16, 448, 36), 'drydock_provisioner.objects.BootAction', 'objects.BootAction', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((454, 27, 454, 56), 'drydock_provisioner.objects.BootActionAssetList', 'objects.BootActionAssetList', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((474, 16, 474, 53), 'drydock_provisioner.objects.BootActionAsset', 'objects.BootActionAsset', ({}, {}), '(**asset_dict)', False, 'from drydock_provisioner import objects\n'), ((482, 16, 482, 39), 'drydock_provisioner.objects.NodeFilterSet', 'objects.NodeFilterSet', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((498, 16, 498, 39), 'drydock_provisioner.objects.BaremetalNode', 'objects.BaremetalNode', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((513, 27, 513, 60), 'drydock_provisioner.objects.IpAddressAssignmentList', 'objects.IpAddressAssignmentList', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((563, 27, 563, 54), 'drydock_provisioner.objects.HostInterfaceList', 'objects.HostInterfaceList', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((633, 26, 633, 57), 'drydock_provisioner.objects.HostStorageDeviceList', 'objects.HostStorageDeviceList', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((672, 24, 672, 53), 'drydock_provisioner.objects.HostVolumeGroupList', 'objects.HostVolumeGroupList', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((700, 27, 700, 49), 'os.listdir', 'os.listdir', ({(700, 38, 700, 48): 'schema_dir'}, {}), '(schema_dir)', False, 'import os\n'), ((714, 15, 715, 57), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', ({(714, 47, 714, 68): '"""drydock_provisioner"""', (715, 47, 715, 56): '"""schemas"""'}, {}), "('drydock_provisioner', 'schemas')", False, 'import pkg_resources\n'), ((88, 26, 88, 57), 'yaml.safe_load_all', 'yaml.safe_load_all', ({(88, 45, 88, 56): 'yaml_string'}, {}), '(yaml_string)', False, 'import yaml\n'), ((177, 18, 178, 78), 'drydock_provisioner.error.IngesterError', 'errors.IngesterError', ({(178, 16, 178, 77): "('Invalid document - Kind %s and Version %s' % (kind, version))"}, {}), "('Invalid document - Kind %s and Version %s' % (kind,\n version))", True, 'from drydock_provisioner import error as errors\n'), ((227, 24, 227, 51), 'drydock_provisioner.objects.NodeTagDefinition', 'objects.NodeTagDefinition', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((279, 18, 279, 37), 'drydock_provisioner.objects.TorSwitch', 'objects.TorSwitch', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((409, 24, 409, 53), 'drydock_provisioner.objects.HardwareDeviceAlias', 'objects.HardwareDeviceAlias', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((487, 23, 487, 47), 'drydock_provisioner.objects.NodeFilter', 'objects.NodeFilter', ({}, {}), '(**nf)', False, 'from drydock_provisioner import objects\n'), ((510, 18, 511, 61), 'drydock_provisioner.error.IngesterError', 'errors.IngesterError', ({(510, 39, 511, 60): '"""BaremetalNode needs at least 1 assigned address"""'}, {}), "('BaremetalNode needs at least 1 assigned address')", True, 'from drydock_provisioner import error as errors\n'), ((516, 25, 516, 54), 'drydock_provisioner.objects.IpAddressAssignment', 'objects.IpAddressAssignment', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((566, 24, 566, 47), 'drydock_provisioner.objects.HostInterface', 'objects.HostInterface', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((636, 17, 636, 50), 'drydock_provisioner.objects.HostStorageDevice', 'objects.HostStorageDevice', (), '', False, 'from drydock_provisioner import objects\n'), ((676, 17, 676, 48), 'drydock_provisioner.objects.HostVolumeGroup', 'objects.HostVolumeGroup', (), '', False, 'from drydock_provisioner import objects\n'), ((678, 33, 678, 57), 'drydock_provisioner.objects.HostVolumeList', 'objects.HostVolumeList', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((702, 26, 702, 47), 'yaml.safe_load_all', 'yaml.safe_load_all', ({(702, 45, 702, 46): 'f'}, {}), '(f)', False, 'import yaml\n'), ((233, 22, 235, 72), 'drydock_provisioner.error.IngesterError', 'errors.IngesterError', ({(234, 20, 235, 71): "('Unknown definition_type in tag_definition instance: %s' % t.definition_type)"}, {}), "(\n 'Unknown definition_type in tag_definition instance: %s' % t.\n definition_type)", True, 'from drydock_provisioner import error as errors\n'), ((701, 21, 701, 58), 'os.path.join', 'os.path.join', ({(701, 34, 701, 44): 'schema_dir', (701, 46, 701, 57): 'schema_file'}, {}), '(schema_dir, schema_file)', False, 'import os\n'), ((92, 22, 94, 58), 'drydock_provisioner.error.IngesterError', 'errors.IngesterError', ({(93, 20, 94, 57): "('Error parsing YAML at (l:%s, c:%s): %s' % (mark.line + 1, mark.column + 1,\n err))"}, {}), "('Error parsing YAML at (l:%s, c:%s): %s' % (mark.line +\n 1, mark.column + 1, err))", True, 'from drydock_provisioner import error as errors\n'), ((96, 22, 96, 76), 'drydock_provisioner.error.IngesterError', 'errors.IngesterError', ({(96, 43, 96, 75): "('Error parsing YAML: %s' % err)"}, {}), "('Error parsing YAML: %s' % err)", True, 'from drydock_provisioner import error as errors\n'), ((260, 29, 260, 60), 'drydock_provisioner.objects.Repository', 'objects.Repository', (), '', False, 'from drydock_provisioner import objects\n'), ((646, 32, 646, 59), 'drydock_provisioner.objects.HostPartitionList', 'objects.HostPartitionList', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((648, 33, 648, 56), 'drydock_provisioner.objects.HostPartition', 'objects.HostPartition', ({}, {}), '()', False, 'from drydock_provisioner import objects\n'), ((108, 57, 108, 69), 'yaml.dump', 'yaml.dump', ({(108, 67, 108, 68): 'd'}, {}), '(d)', False, 'import yaml\n'), ((139, 24, 147, 40), 'drydock_provisioner.objects.ValidationMessage', 'objects.ValidationMessage', (), '', False, 'from drydock_provisioner import objects\n'), ((153, 24, 161, 46), 'drydock_provisioner.objects.ValidationMessage', 'objects.ValidationMessage', (), '', False, 'from drydock_provisioner import objects\n'), ((120, 32, 129, 33), 'drydock_provisioner.objects.ValidationMessage', 'objects.ValidationMessage', (), '', False, 'from drydock_provisioner import objects\n')] |
nreplogle/ros2-migration-tools | porting_tools/package_xml_porter.py | 8e422731dea52df19da6de780319a17516f60f7c | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
""" Contains a class and method for porting a package.xml file from catkin to ament"""
import xml.etree.ElementTree as etree
from .constants import CatkinToAmentMigration, PACKAGE_XML_ELEMENT_ORDER
from .utils import get_functions_with
def new_element(tag, text="", tail="\n", attrib=None):
""" Helper function to make creating an element with a text and tail easier """
if not attrib:
attrib = {}
element = etree.Element(tag, attrib=attrib)
element.text = text
element.tail = tail
return element
def tag_order(tag):
""" Returns integer to order tags """
if tag in PACKAGE_XML_ELEMENT_ORDER:
return PACKAGE_XML_ELEMENT_ORDER.index(tag)
return float("inf")
class PackageXMLPorter:
"""A class for porting a package.xml file from catkin to ament"""
@staticmethod
def port(tree, extra_rules=[]):
"""
Ports package.xml from catkin to ament
Arguments:
tree - the xml tree representing the package.xml file (output of etree.parse("package.xml"))
extra_rules - a list of functions to apply to the xml tree
Returns:
The new xml tree
"""
# Pulls out all methods in this class with name starting with "rule"
rules = get_functions_with(criteria=lambda name: name.startswith("rule"),
from_class=PackageXMLPorter)
package_root = tree.getroot()
for rule in rules + extra_rules:
rule(package_root)
# Make sure there's a final newline
package_root.tail = "\n"
# Reorder the elements
package_root[:] = sorted(list(package_root), key=lambda elem: tag_order(elem.tag))
# Correct indentation
PackageXMLPorter.indent_tree(elem=package_root, level=0)
#########################
# RULES #
#########################
@staticmethod
def rule_set_format(package_root):
# ROS 2 supports formats 2,3
package_root.set("format", "3")
@staticmethod
def rule_set_build_tool(package_root):
for elem in package_root.findall("buildtool_depend"):
if elem.text and elem.text.strip() == "catkin":
package_root.remove(elem)
package_root.append(new_element(tag="buildtool_depend", text="ament_cmake"))
@staticmethod
def rule_set_client_library(package_root):
for elem in list(package_root):
if elem.text and elem.text.strip() in CatkinToAmentMigration.CLIENT_CONVERSION:
elem.text = CatkinToAmentMigration.CLIENT_CONVERSION[elem.text.strip()]
@staticmethod
def rule_add_export_build_type(package_root):
build_elem = new_element(tag="build_type", text="ament_cmake", tail="\n ")
export_elem = new_element(tag="export", text="\n ")
export_elem.append(build_elem)
package_root.append(export_elem)
@staticmethod
def rule_set_run_to_exec_depend(package_root):
for elem in package_root.findall("run_depend"):
elem.tag = "exec_depend"
@staticmethod
def rule_set_depend_to_run_exec(package_root):
for elem in package_root.findall("depend"):
elem.tag = "build_depend"
package_root.append(new_element(tag="exec_depend", text=elem.text, attrib=elem.attrib))
@staticmethod
def rule_update_message_gen_dependency(package_root):
message_generation_used = False
for elem in list(package_root):
if elem.text and elem.text == "message_generation" or elem.text == "message_runtime":
package_root.remove(elem)
message_generation_used = True
if message_generation_used:
package_root.append(new_element(tag="buildtool_depend", text="rosidl_default_generators"))
package_root.append(new_element(tag="build_depend", text="builtin_interfaces"))
package_root.append(new_element(tag="exec_depend", text="builtin_interfaces"))
package_root.append(new_element(tag="exec_depend", text="rosidl_default_runtime"))
package_root.append(new_element(tag="member_of_group", text="rosidl_interface_packages"))
#########################
# HELPERS #
#########################
@staticmethod
def indent_tree(elem, level):
if len(elem) > 0: # element has children
if elem.text is None or len(elem.text) == 0:
elem.text = "\n" + (" "*(level+1)) # sets the indent for the children
list(elem)[-1].tail = "\n" + " "*level
for child in list(elem)[:-1]:
child.tail = "\n" + (" "*(level+1))
PackageXMLPorter.indent_tree(elem=child, level=level+1)
if __name__ == '__main__':
tree = etree.parse("package.xml")
PackageXMLPorter.port(tree=tree)
tree.write("updated_package.xml", encoding="utf-8", xml_declaration=True)
| [((25, 14, 25, 47), 'xml.etree.ElementTree.Element', 'etree.Element', (), '', True, 'import xml.etree.ElementTree as etree\n'), ((138, 11, 138, 37), 'xml.etree.ElementTree.parse', 'etree.parse', ({(138, 23, 138, 36): '"""package.xml"""'}, {}), "('package.xml')", True, 'import xml.etree.ElementTree as etree\n')] |
giuseppe/quay | endpoints/api/permission_models_interface.py | a1b7e4b51974edfe86f66788621011eef2667e6a | import sys
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from six import add_metaclass
class SaveException(Exception):
def __init__(self, other):
self.traceback = sys.exc_info()
super(SaveException, self).__init__(str(other))
class DeleteException(Exception):
def __init__(self, other):
self.traceback = sys.exc_info()
super(DeleteException, self).__init__(str(other))
class Role(namedtuple("Role", ["role_name"])):
def to_dict(self):
return {
"role": self.role_name,
}
class UserPermission(
namedtuple(
"UserPermission",
[
"role_name",
"username",
"is_robot",
"avatar",
"is_org_member",
"has_org",
],
)
):
def to_dict(self):
perm_dict = {
"role": self.role_name,
"name": self.username,
"is_robot": self.is_robot,
"avatar": self.avatar,
}
if self.has_org:
perm_dict["is_org_member"] = self.is_org_member
return perm_dict
class RobotPermission(
namedtuple(
"RobotPermission",
[
"role_name",
"username",
"is_robot",
"is_org_member",
],
)
):
def to_dict(self, user=None, team=None, org_members=None):
return {
"role": self.role_name,
"name": self.username,
"is_robot": True,
"is_org_member": self.is_org_member,
}
class TeamPermission(
namedtuple(
"TeamPermission",
[
"role_name",
"team_name",
"avatar",
],
)
):
def to_dict(self):
return {
"role": self.role_name,
"name": self.team_name,
"avatar": self.avatar,
}
@add_metaclass(ABCMeta)
class PermissionDataInterface(object):
"""
Data interface used by permissions API.
"""
@abstractmethod
def get_repo_permissions_by_user(self, namespace_name, repository_name):
"""
Args:
namespace_name: string
repository_name: string
Returns:
list(UserPermission)
"""
@abstractmethod
def get_repo_roles(self, username, namespace_name, repository_name):
"""
Args:
username: string
namespace_name: string
repository_name: string
Returns:
list(Role) or None
"""
@abstractmethod
def get_repo_permission_for_user(self, username, namespace_name, repository_name):
"""
Args:
username: string
namespace_name: string
repository_name: string
Returns:
UserPermission
"""
@abstractmethod
def set_repo_permission_for_user(self, username, namespace_name, repository_name, role_name):
"""
Args:
username: string
namespace_name: string
repository_name: string
role_name: string
Returns:
UserPermission
Raises:
SaveException
"""
@abstractmethod
def delete_repo_permission_for_user(self, username, namespace_name, repository_name):
"""
Args:
username: string
namespace_name: string
repository_name: string
Returns:
void
Raises:
DeleteException
"""
@abstractmethod
def get_repo_permissions_by_team(self, namespace_name, repository_name):
"""
Args:
namespace_name: string
repository_name: string
Returns:
list(TeamPermission)
"""
@abstractmethod
def get_repo_role_for_team(self, team_name, namespace_name, repository_name):
"""
Args:
team_name: string
namespace_name: string
repository_name: string
Returns:
Role
"""
@abstractmethod
def set_repo_permission_for_team(self, team_name, namespace_name, repository_name, permission):
"""
Args:
team_name: string
namespace_name: string
repository_name: string
permission: string
Returns:
TeamPermission
Raises:
SaveException
"""
@abstractmethod
def delete_repo_permission_for_team(self, team_name, namespace_name, repository_name):
"""
Args:
team_name: string
namespace_name: string
repository_name: string
Returns:
TeamPermission
Raises:
DeleteException
"""
| [((20, 11, 20, 44), 'collections.namedtuple', 'namedtuple', ({(20, 22, 20, 28): '"""Role"""', (20, 30, 20, 43): "['role_name']"}, {}), "('Role', ['role_name'])", False, 'from collections import namedtuple\n'), ((28, 4, 38, 5), 'collections.namedtuple', 'namedtuple', ({(29, 8, 29, 24): '"""UserPermission"""', (30, 8, 37, 9): "['role_name', 'username', 'is_robot', 'avatar', 'is_org_member', 'has_org']"}, {}), "('UserPermission', ['role_name', 'username', 'is_robot', 'avatar',\n 'is_org_member', 'has_org'])", False, 'from collections import namedtuple\n'), ((53, 4, 61, 5), 'collections.namedtuple', 'namedtuple', ({(54, 8, 54, 25): '"""RobotPermission"""', (55, 8, 60, 9): "['role_name', 'username', 'is_robot', 'is_org_member']"}, {}), "('RobotPermission', ['role_name', 'username', 'is_robot',\n 'is_org_member'])", False, 'from collections import namedtuple\n'), ((73, 4, 80, 5), 'collections.namedtuple', 'namedtuple', ({(74, 8, 74, 24): '"""TeamPermission"""', (75, 8, 79, 9): "['role_name', 'team_name', 'avatar']"}, {}), "('TeamPermission', ['role_name', 'team_name', 'avatar'])", False, 'from collections import namedtuple\n'), ((90, 1, 90, 23), 'six.add_metaclass', 'add_metaclass', ({(90, 15, 90, 22): 'ABCMeta'}, {}), '(ABCMeta)', False, 'from six import add_metaclass\n'), ((10, 25, 10, 39), 'sys.exc_info', 'sys.exc_info', ({}, {}), '()', False, 'import sys\n'), ((16, 25, 16, 39), 'sys.exc_info', 'sys.exc_info', ({}, {}), '()', False, 'import sys\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.