repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
680k
|
---|---|---|---|---|
INSRapperswil/nornir-web | backend/api/management/commands/create_testdb.py | 458e6b24bc373197044b4b7b5da74f16f93a9459 | """
Setup DB with example data for tests
"""
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User, Group
from django.core.management.base import BaseCommand
from api import models
class Command(BaseCommand):
help = 'Setup DB with example data for tests'
def handle(self, *args, **options):
print('---- Creating Users ----')
User.objects.get_or_create(username='thomastest', password=make_password('imatestin'))
thomas = User.objects.get(username='thomastest')
User.objects.get_or_create(username='norbert', password=make_password('netzwerk'))
norbert = User.objects.get(username='norbert')
User.objects.get_or_create(username='stefan', password=make_password('helldesk'))
stefan = User.objects.get(username='stefan')
superuser = Group.objects.get(name='superuser')
superuser.user_set.add(thomas)
netadmin = Group.objects.get(name='netadmin')
netadmin.user_set.add(norbert)
support = Group.objects.get(name='support')
support.user_set.add(stefan)
print('---- Creating Inventory ----')
models.Inventory.objects.create(name='Example', hosts_file='web_nornir/nornir_config/example_config/hosts.yaml',
groups_file='web_nornir/nornir_config/example_config/groups.yaml', type=1)
models.Inventory.objects.create(name='INS Lab', hosts_file='web_nornir/nornir_config/inslab_config/hosts.yaml',
groups_file='web_nornir/nornir_config/inslab_config/groups.yaml', type=1)
print('---- Creating Job Templates ----')
models.JobTemplate.objects.create(name='hello_world', description='This prints a hello world',
file_name='hello_world.py', created_by_id=1)
models.JobTemplate.objects.create(name='Get CDP Neighbors', description='Lists all CDP neighbors',
file_name='get_cdp_neighbors.py', created_by_id=1)
models.JobTemplate.objects.create(name='Get Interfaces',
description='Gets brief information about all interfaces, sh ip int br',
file_name='get_interfaces.py', created_by_id=1)
models.JobTemplate.objects.create(name='Ping Device',
description='Pings a chosen network device and reports if reachable',
file_name='ping.py', variables=['target'], created_by_id=1)
models.JobTemplate.objects.create(name='Get Configuration', description='Gets all configuration from device',
file_name='get_configuration.py', created_by_id=1)
print('---- Creating Tasks ----')
models.Task.objects.create(name='Get Hello World', created_by_id=1, template_id=1, inventory_id=1)
models.Task.objects.create(name='Get CDP neighbors of INS lab', created_by_id=2, template_id=2, inventory_id=2)
models.Task.objects.create(name='Get interfaces of INS lab', created_by_id=2, template_id=3, inventory_id=2)
print('---- ALL DONE!! ----')
| [((525, 564), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': '"""thomastest"""'}), "(username='thomastest')\n", (541, 564), False, 'from django.contrib.auth.models import User, Group\n'), ((678, 714), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': '"""norbert"""'}), "(username='norbert')\n", (694, 714), False, 'from django.contrib.auth.models import User, Group\n'), ((826, 861), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': '"""stefan"""'}), "(username='stefan')\n", (842, 861), False, 'from django.contrib.auth.models import User, Group\n'), ((885, 920), 'django.contrib.auth.models.Group.objects.get', 'Group.objects.get', ([], {'name': '"""superuser"""'}), "(name='superuser')\n", (902, 920), False, 'from django.contrib.auth.models import User, Group\n'), ((983, 1017), 'django.contrib.auth.models.Group.objects.get', 'Group.objects.get', ([], {'name': '"""netadmin"""'}), "(name='netadmin')\n", (1000, 1017), False, 'from django.contrib.auth.models import User, Group\n'), ((1079, 1112), 'django.contrib.auth.models.Group.objects.get', 'Group.objects.get', ([], {'name': '"""support"""'}), "(name='support')\n", (1096, 1112), False, 'from django.contrib.auth.models import User, Group\n'), ((1215, 1412), 'api.models.Inventory.objects.create', 'models.Inventory.objects.create', ([], {'name': '"""Example"""', 'hosts_file': '"""web_nornir/nornir_config/example_config/hosts.yaml"""', 'groups_file': '"""web_nornir/nornir_config/example_config/groups.yaml"""', 'type': '(1)'}), "(name='Example', hosts_file=\n 'web_nornir/nornir_config/example_config/hosts.yaml', groups_file=\n 'web_nornir/nornir_config/example_config/groups.yaml', type=1)\n", (1246, 1412), False, 'from api import models\n'), ((1453, 1648), 'api.models.Inventory.objects.create', 'models.Inventory.objects.create', ([], {'name': '"""INS Lab"""', 'hosts_file': '"""web_nornir/nornir_config/inslab_config/hosts.yaml"""', 'groups_file': '"""web_nornir/nornir_config/inslab_config/groups.yaml"""', 'type': '(1)'}), "(name='INS Lab', hosts_file=\n 'web_nornir/nornir_config/inslab_config/hosts.yaml', groups_file=\n 'web_nornir/nornir_config/inslab_config/groups.yaml', type=1)\n", (1484, 1648), False, 'from api import models\n'), ((1748, 1892), 'api.models.JobTemplate.objects.create', 'models.JobTemplate.objects.create', ([], {'name': '"""hello_world"""', 'description': '"""This prints a hello world"""', 'file_name': '"""hello_world.py"""', 'created_by_id': '(1)'}), "(name='hello_world', description=\n 'This prints a hello world', file_name='hello_world.py', created_by_id=1)\n", (1781, 1892), False, 'from api import models\n'), ((1940, 2098), 'api.models.JobTemplate.objects.create', 'models.JobTemplate.objects.create', ([], {'name': '"""Get CDP Neighbors"""', 'description': '"""Lists all CDP neighbors"""', 'file_name': '"""get_cdp_neighbors.py"""', 'created_by_id': '(1)'}), "(name='Get CDP Neighbors', description=\n 'Lists all CDP neighbors', file_name='get_cdp_neighbors.py',\n created_by_id=1)\n", (1973, 2098), False, 'from api import models\n'), ((2142, 2329), 'api.models.JobTemplate.objects.create', 'models.JobTemplate.objects.create', ([], {'name': '"""Get Interfaces"""', 'description': '"""Gets brief information about all interfaces, sh ip int br"""', 'file_name': '"""get_interfaces.py"""', 'created_by_id': '(1)'}), "(name='Get Interfaces', description=\n 'Gets brief information about all interfaces, sh ip int br', file_name=\n 'get_interfaces.py', created_by_id=1)\n", (2175, 2329), False, 'from api import models\n'), ((2415, 2608), 'api.models.JobTemplate.objects.create', 'models.JobTemplate.objects.create', ([], {'name': '"""Ping Device"""', 'description': '"""Pings a chosen network device and reports if reachable"""', 'file_name': '"""ping.py"""', 'variables': "['target']", 'created_by_id': '(1)'}), "(name='Ping Device', description=\n 'Pings a chosen network device and reports if reachable', file_name=\n 'ping.py', variables=['target'], created_by_id=1)\n", (2448, 2608), False, 'from api import models\n'), ((2694, 2863), 'api.models.JobTemplate.objects.create', 'models.JobTemplate.objects.create', ([], {'name': '"""Get Configuration"""', 'description': '"""Gets all configuration from device"""', 'file_name': '"""get_configuration.py"""', 'created_by_id': '(1)'}), "(name='Get Configuration', description=\n 'Gets all configuration from device', file_name='get_configuration.py',\n created_by_id=1)\n", (2727, 2863), False, 'from api import models\n'), ((2958, 3060), 'api.models.Task.objects.create', 'models.Task.objects.create', ([], {'name': '"""Get Hello World"""', 'created_by_id': '(1)', 'template_id': '(1)', 'inventory_id': '(1)'}), "(name='Get Hello World', created_by_id=1,\n template_id=1, inventory_id=1)\n", (2984, 3060), False, 'from api import models\n'), ((3066, 3181), 'api.models.Task.objects.create', 'models.Task.objects.create', ([], {'name': '"""Get CDP neighbors of INS lab"""', 'created_by_id': '(2)', 'template_id': '(2)', 'inventory_id': '(2)'}), "(name='Get CDP neighbors of INS lab',\n created_by_id=2, template_id=2, inventory_id=2)\n", (3092, 3181), False, 'from api import models\n'), ((3187, 3300), 'api.models.Task.objects.create', 'models.Task.objects.create', ([], {'name': '"""Get interfaces of INS lab"""', 'created_by_id': '(2)', 'template_id': '(3)', 'inventory_id': '(2)'}), "(name='Get interfaces of INS lab', created_by_id=\n 2, template_id=3, inventory_id=2)\n", (3213, 3300), False, 'from api import models\n'), ((479, 505), 'django.contrib.auth.hashers.make_password', 'make_password', (['"""imatestin"""'], {}), "('imatestin')\n", (492, 505), False, 'from django.contrib.auth.hashers import make_password\n'), ((632, 657), 'django.contrib.auth.hashers.make_password', 'make_password', (['"""netzwerk"""'], {}), "('netzwerk')\n", (645, 657), False, 'from django.contrib.auth.hashers import make_password\n'), ((781, 806), 'django.contrib.auth.hashers.make_password', 'make_password', (['"""helldesk"""'], {}), "('helldesk')\n", (794, 806), False, 'from django.contrib.auth.hashers import make_password\n')] |
charles-l/pyinfra | pyinfra/facts/util/distro.py | 1992d98ff31d41404427dbb3cc6095a7bebd4052 | from __future__ import absolute_import, unicode_literals
import os
import distro
def get_distro_info(root_dir):
# We point _UNIXCONFDIR to root_dir
old_value = distro._UNIXCONFDIR
distro._UNIXCONFDIR = os.path.join(root_dir, 'etc')
obj = distro.LinuxDistribution(include_lsb=False, include_uname=False)
# NOTE: The parsing of LinuxDistribution distro information is done in a lazy way.
# This will force the parsing to happen before we restore the old value of _UNIXCONFDIR.
_ = obj.info()
distro._UNIXCONFDIR = old_value
return obj
| [((218, 247), 'os.path.join', 'os.path.join', (['root_dir', '"""etc"""'], {}), "(root_dir, 'etc')\n", (230, 247), False, 'import os\n'), ((259, 323), 'distro.LinuxDistribution', 'distro.LinuxDistribution', ([], {'include_lsb': '(False)', 'include_uname': '(False)'}), '(include_lsb=False, include_uname=False)\n', (283, 323), False, 'import distro\n')] |
salabogdan/python-client | appium/webdriver/common/multi_action.py | 66208fdbbc8f0a8b0e90376b404135b57e797fa5 | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The Selenium team implemented something like the Multi Action API in the form of
# "action chains" (https://code.google.com/p/selenium/source/browse/py/selenium/webdriver/common/action_chains.py).
# These do not quite work for this situation, and do not allow for ad hoc action
# chaining as the spec requires.
import copy
from typing import TYPE_CHECKING, Dict, List, Optional, TypeVar, Union
from appium.webdriver.mobilecommand import MobileCommand as Command
if TYPE_CHECKING:
from appium.webdriver.common.touch_action import TouchAction
from appium.webdriver.webdriver import WebDriver
from appium.webdriver.webelement import WebElement
T = TypeVar('T', bound='MultiAction')
class MultiAction:
def __init__(self, driver: 'WebDriver', element: Optional['WebElement'] = None) -> None:
self._driver = driver
self._element = element
self._touch_actions: List['TouchAction'] = []
def add(self, *touch_actions: 'TouchAction') -> None:
"""Add TouchAction objects to the MultiAction, to be performed later.
Args:
touch_actions: one or more TouchAction objects describing a chain of actions to be performed by one finger
Usage:
| a1 = TouchAction(driver)
| a1.press(el1).move_to(el2).release()
| a2 = TouchAction(driver)
| a2.press(el2).move_to(el1).release()
| MultiAction(driver).add(a1, a2)
Returns:
`MultiAction`: Self instance
"""
for touch_action in touch_actions:
if self._touch_actions is None:
self._touch_actions = []
self._touch_actions.append(copy.copy(touch_action))
def perform(self: T) -> T:
"""Perform the actions stored in the object.
Usage:
| a1 = TouchAction(driver)
| a1.press(el1).move_to(el2).release()
| a2 = TouchAction(driver)
| a2.press(el2).move_to(el1).release()
| MultiAction(driver).add(a1, a2).perform()
Returns:
`MultiAction`: Self instance
"""
self._driver.execute(Command.MULTI_ACTION, self.json_wire_gestures)
# clean up and be ready for the next batch
self._touch_actions = []
return self
@property
def json_wire_gestures(self) -> Dict[str, Union[List, str]]:
actions = []
for action in self._touch_actions:
actions.append(action.json_wire_gestures)
if self._element is not None:
return {'actions': actions, 'elementId': self._element.id}
return {'actions': actions}
| [((1232, 1265), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {'bound': '"""MultiAction"""'}), "('T', bound='MultiAction')\n", (1239, 1265), False, 'from typing import TYPE_CHECKING, Dict, List, Optional, TypeVar, Union\n'), ((2248, 2271), 'copy.copy', 'copy.copy', (['touch_action'], {}), '(touch_action)\n', (2257, 2271), False, 'import copy\n')] |
JonasFrey96/PLR2 | src/visu/visualizer.py | a0498e6ff283a27c6db11b3d57d3b3100026f069 | import numpy as np
import sys
import os
from PIL import Image
from visu.helper_functions import save_image
from scipy.spatial.transform import Rotation as R
from helper import re_quat
import copy
import torch
import numpy as np
import k3d
class Visualizer():
def __init__(self, p_visu, writer=None):
if p_visu[-1] != '/':
p_visu = p_visu + '/'
self.p_visu = p_visu
self.writer = writer
if not os.path.exists(self.p_visu):
os.makedirs(self.p_visu)
def plot_estimated_pose(self, tag, epoch, img, points, trans=[[0, 0, 0]], rot_mat=[[1, 0, 0], [0, 1, 0], [0, 0, 1]], cam_cx=0, cam_cy=0, cam_fx=0, cam_fy=0, store=False, jupyter=False, w=2):
"""
tag := tensorboard tag
epoch := tensorboard epoche
store := ture -> stores the image to standard path
path := != None creats the path and store to it path/tag.png
img:= original_image, [widht,height,RGB]
points:= points of the object model [length,x,y,z]
trans: [1,3]
rot: [3,3]
"""
img_d = copy.deepcopy(img)
points = np.dot(points, rot_mat.T)
points = np.add(points, trans[0, :])
for i in range(0, points.shape[0]):
p_x = points[i, 0]
p_y = points[i, 1]
p_z = points[i, 2]
u = int(((p_x / p_z) * cam_fx) + cam_cx)
v = int(((p_y / p_z) * cam_fy) + cam_cy)
try:
img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0
img_d[v - w:v + w + 1, u - w:u + w + 1, 1] = 255
img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0
except:
#print("out of bounce")
pass
if jupyter:
display(Image.fromarray(img_d))
if store:
#store_ar = (img_d* 255).round().astype(np.uint8)
#print("IMAGE D:" ,img_d,img_d.shape )
save_image(img_d, tag=str(epoch) + tag, p_store=self.p_visu)
if self.writer is not None:
self.writer.add_image(tag, img_d.astype(
np.uint8), global_step=epoch, dataformats='HWC')
def plot_bounding_box(self, tag, epoch, img, rmin=0, rmax=0, cmin=0, cmax=0, str_width=2, store=False, jupyter=False, b=None):
"""
tag := tensorboard tag
epoch := tensorboard epoche
store := ture -> stores the image to standard path
path := != None creats the path and store to it path/tag.png
img:= original_image, [widht,height,RGB]
"""
if isinstance(b, dict):
rmin = b['rmin']
rmax = b['rmax']
cmin = b['cmin']
cmax = b['cmax']
# ToDo check Input data
img_d = np.array(copy.deepcopy(img))
c = [0, 0, 255]
rmin_mi = max(0, rmin - str_width)
rmin_ma = min(img_d.shape[0], rmin + str_width)
rmax_mi = max(0, rmax - str_width)
rmax_ma = min(img_d.shape[0], rmax + str_width)
cmin_mi = max(0, cmin - str_width)
cmin_ma = min(img_d.shape[1], cmin + str_width)
cmax_mi = max(0, cmax - str_width)
cmax_ma = min(img_d.shape[1], cmax + str_width)
img_d[rmin_mi:rmin_ma, cmin:cmax, :] = c
img_d[rmax_mi:rmax_ma, cmin:cmax, :] = c
img_d[rmin:rmax, cmin_mi:cmin_ma, :] = c
img_d[rmin:rmax, cmax_mi:cmax_ma, :] = c
print("STORE", store)
img_d = img_d.astype(np.uint8)
if store:
#store_ar = (img_d* 255).round().astype(np.uint8)
save_image(img_d, tag=str(epoch) + tag, p_store=self.p_visu)
if jupyter:
display(Image.fromarray(img_d))
if self.writer is not None:
self.writer.add_image(tag, img_d.astype(
np.uint8), global_step=epoch, dataformats='HWC')
def plot_pcd(x, point_size=0.005, c='g'):
"""
x: point_nr,3
"""
if c == 'b':
k = 245
elif c == 'g':
k = 25811000
elif c == 'r':
k = 11801000
elif c == 'black':
k = 2580
else:
k = 2580
colors = np.ones(x.shape[0]) * k
plot = k3d.plot(name='points')
plt_points = k3d.points(x, colors.astype(np.uint32), point_size=point_size)
plot += plt_points
plt_points.shader = '3d'
plot.display()
def plot_two_pcd(x, y, point_size=0.005, c1='g', c2='r'):
if c1 == 'b':
k = 245
elif c1 == 'g':
k = 25811000
elif c1 == 'r':
k = 11801000
elif c1 == 'black':
k = 2580
else:
k = 2580
if c2 == 'b':
k2 = 245
elif c2 == 'g':
k2 = 25811000
elif c2 == 'r':
k2 = 11801000
elif c2 == 'black':
k2 = 2580
else:
k2 = 2580
col1 = np.ones(x.shape[0]) * k
col2 = np.ones(y.shape[0]) * k2
plot = k3d.plot(name='points')
plt_points = k3d.points(x, col1.astype(np.uint32), point_size=point_size)
plot += plt_points
plt_points = k3d.points(y, col2.astype(np.uint32), point_size=point_size)
plot += plt_points
plt_points.shader = '3d'
plot.display()
class SequenceVisualizer():
def __init__(self, seq_data, images_path, output_path=None):
self.seq_data = seq_data
self.images_path = images_path
self.output_path = output_path
def plot_points_on_image(self, seq_no, frame_no, jupyter=False, store=False, pose_type='filtered'):
seq_data = self.seq_data
images_path = self.images_path
output_path = self.output_path
frame = seq_data[seq_no][frame_no]
unique_desig = frame['dl_dict']['unique_desig'][0]
if pose_type == 'ground_truth':
# ground truth
t = frame['dl_dict']['gt_trans'].reshape(1, 3)
rot_quat = re_quat(copy.deepcopy(
frame['dl_dict']['gt_rot_wxyz'][0]), 'wxyz')
rot = R.from_quat(rot_quat).as_matrix()
elif pose_type == 'filtered':
# filter pred
t = np.array(frame['filter_pred']['t']).reshape(1, 3)
rot_quat = re_quat(copy.deepcopy(
frame['filter_pred']['r_wxyz']), 'wxyz')
rot = R.from_quat(rot_quat).as_matrix()
elif pose_type == 'final_pred_obs':
# final pred
t = np.array(frame['final_pred_obs']['t']).reshape(1, 3)
rot_quat = re_quat(copy.deepcopy(
frame['final_pred_obs']['r_wxyz']), 'wxyz')
rot = R.from_quat(rot_quat).as_matrix()
else:
raise Exception('Pose type not implemented.')
w = 2
if type(unique_desig) != str:
im = np.array(Image.open(
images_path + unique_desig[0] + '-color.png')) # ycb
else:
im = np.array(Image.open(
images_path + unique_desig + '.png')) # laval
img_d = copy.deepcopy(im)
dl_dict = frame['dl_dict']
points = copy.deepcopy(
seq_data[seq_no][0]['dl_dict']['model_points'][0, :, :])
points = np.dot(points, rot.T)
points = np.add(points, t[0, :])
cam_cx = dl_dict['cam_cal'][0][0]
cam_cy = dl_dict['cam_cal'][0][1]
cam_fx = dl_dict['cam_cal'][0][2]
cam_fy = dl_dict['cam_cal'][0][3]
for i in range(0, points.shape[0]):
p_x = points[i, 0]
p_y = points[i, 1]
p_z = points[i, 2]
u = int(((p_x / p_z) * cam_fx) + cam_cx)
v = int(((p_y / p_z) * cam_fy) + cam_cy)
try:
img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0
img_d[v - w:v + w + 1, u - w:u + w + 1, 1] = 255
img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0
except:
#print("out of bounds")
pass
img_disp = Image.fromarray(img_d)
if jupyter:
display(img_disp)
if store:
outpath = output_path + \
'{}_{}_{}.png'.format(pose_type, seq_no, frame_no)
img_disp.save(outpath, "PNG", compress_level=1)
print("Saved image to {}".format(outpath))
def save_sequence(self, seq_no, pose_type='filtered', name=''):
for fn in range(len(self.seq_data)):
self.plot_points_on_image(seq_no, fn, False, True, pose_type)
if name:
video_name = '{}_{}_{}'.format(name, pose_type, seq_no)
else:
video_name = '{}_{}'.format(pose_type, seq_no)
cmd = "cd {} && ffmpeg -r 10 -i ./filtered_{}_%d.png -vcodec mpeg4 -y {}.mp4".format(
self.output_path, seq_no, video_name)
os.system(cmd)
| [((4150, 4173), 'k3d.plot', 'k3d.plot', ([], {'name': '"""points"""'}), "(name='points')\n", (4158, 4173), False, 'import k3d\n'), ((4842, 4865), 'k3d.plot', 'k3d.plot', ([], {'name': '"""points"""'}), "(name='points')\n", (4850, 4865), False, 'import k3d\n'), ((1091, 1109), 'copy.deepcopy', 'copy.deepcopy', (['img'], {}), '(img)\n', (1104, 1109), False, 'import copy\n'), ((1127, 1152), 'numpy.dot', 'np.dot', (['points', 'rot_mat.T'], {}), '(points, rot_mat.T)\n', (1133, 1152), True, 'import numpy as np\n'), ((1170, 1199), 'numpy.add', 'np.add', (['points', 'trans[(0), :]'], {}), '(points, trans[(0), :])\n', (1176, 1199), True, 'import numpy as np\n'), ((4115, 4134), 'numpy.ones', 'np.ones', (['x.shape[0]'], {}), '(x.shape[0])\n', (4122, 4134), True, 'import numpy as np\n'), ((4771, 4790), 'numpy.ones', 'np.ones', (['x.shape[0]'], {}), '(x.shape[0])\n', (4778, 4790), True, 'import numpy as np\n'), ((4806, 4825), 'numpy.ones', 'np.ones', (['y.shape[0]'], {}), '(y.shape[0])\n', (4813, 4825), True, 'import numpy as np\n'), ((6871, 6888), 'copy.deepcopy', 'copy.deepcopy', (['im'], {}), '(im)\n', (6884, 6888), False, 'import copy\n'), ((6942, 7014), 'copy.deepcopy', 'copy.deepcopy', (["seq_data[seq_no][0]['dl_dict']['model_points'][(0), :, :]"], {}), "(seq_data[seq_no][0]['dl_dict']['model_points'][(0), :, :])\n", (6955, 7014), False, 'import copy\n'), ((7043, 7064), 'numpy.dot', 'np.dot', (['points', 'rot.T'], {}), '(points, rot.T)\n', (7049, 7064), True, 'import numpy as np\n'), ((7082, 7107), 'numpy.add', 'np.add', (['points', 't[(0), :]'], {}), '(points, t[(0), :])\n', (7088, 7107), True, 'import numpy as np\n'), ((7827, 7849), 'PIL.Image.fromarray', 'Image.fromarray', (['img_d'], {}), '(img_d)\n', (7842, 7849), False, 'from PIL import Image\n'), ((8637, 8651), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (8646, 8651), False, 'import os\n'), ((444, 471), 'os.path.exists', 'os.path.exists', (['self.p_visu'], {}), '(self.p_visu)\n', (458, 471), False, 'import os\n'), ((485, 509), 'os.makedirs', 'os.makedirs', (['self.p_visu'], {}), '(self.p_visu)\n', (496, 509), False, 'import os\n'), ((2763, 2781), 'copy.deepcopy', 'copy.deepcopy', (['img'], {}), '(img)\n', (2776, 2781), False, 'import copy\n'), ((1771, 1793), 'PIL.Image.fromarray', 'Image.fromarray', (['img_d'], {}), '(img_d)\n', (1786, 1793), False, 'from PIL import Image\n'), ((3666, 3688), 'PIL.Image.fromarray', 'Image.fromarray', (['img_d'], {}), '(img_d)\n', (3681, 3688), False, 'from PIL import Image\n'), ((5798, 5847), 'copy.deepcopy', 'copy.deepcopy', (["frame['dl_dict']['gt_rot_wxyz'][0]"], {}), "(frame['dl_dict']['gt_rot_wxyz'][0])\n", (5811, 5847), False, 'import copy\n'), ((6658, 6714), 'PIL.Image.open', 'Image.open', (["(images_path + unique_desig[0] + '-color.png')"], {}), "(images_path + unique_desig[0] + '-color.png')\n", (6668, 6714), False, 'from PIL import Image\n'), ((6780, 6827), 'PIL.Image.open', 'Image.open', (["(images_path + unique_desig + '.png')"], {}), "(images_path + unique_desig + '.png')\n", (6790, 6827), False, 'from PIL import Image\n'), ((5892, 5913), 'scipy.spatial.transform.Rotation.from_quat', 'R.from_quat', (['rot_quat'], {}), '(rot_quat)\n', (5903, 5913), True, 'from scipy.spatial.transform import Rotation as R\n'), ((6087, 6132), 'copy.deepcopy', 'copy.deepcopy', (["frame['filter_pred']['r_wxyz']"], {}), "(frame['filter_pred']['r_wxyz'])\n", (6100, 6132), False, 'import copy\n'), ((6006, 6041), 'numpy.array', 'np.array', (["frame['filter_pred']['t']"], {}), "(frame['filter_pred']['t'])\n", (6014, 6041), True, 'import numpy as np\n'), ((6177, 6198), 'scipy.spatial.transform.Rotation.from_quat', 'R.from_quat', (['rot_quat'], {}), '(rot_quat)\n', (6188, 6198), True, 'from scipy.spatial.transform import Rotation as R\n'), ((6380, 6428), 'copy.deepcopy', 'copy.deepcopy', (["frame['final_pred_obs']['r_wxyz']"], {}), "(frame['final_pred_obs']['r_wxyz'])\n", (6393, 6428), False, 'import copy\n'), ((6296, 6334), 'numpy.array', 'np.array', (["frame['final_pred_obs']['t']"], {}), "(frame['final_pred_obs']['t'])\n", (6304, 6334), True, 'import numpy as np\n'), ((6473, 6494), 'scipy.spatial.transform.Rotation.from_quat', 'R.from_quat', (['rot_quat'], {}), '(rot_quat)\n', (6484, 6494), True, 'from scipy.spatial.transform import Rotation as R\n')] |
FreesiaLikesPomelo/-offer | leetCode_Q37_serializeTree.py | 14ac73cb46d13c7f5bbc294329a14f3c5995bc7a | '''
面试题37. 序列化二叉树
请实现两个函数,分别用来序列化和反序列化二叉树。
示例:
你可以将以下二叉树:
1
/ \
2 3
/ \
4 5
序列化为 "[1,2,3,null,null,4,5]"
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# 执行用时 :240 ms, 在所有 Python3 提交中击败了22.75%的用户
# 内存消耗 :31 MB, 在所有 Python3 提交中击败了100.00%的用户
class Codec:
def __init__(self):
self.tree = []
self.temp = []
self.flag = 1 # non-None element appears again add self.temp to self.tree
def traByLayer(self, tree: List[TreeNode]):
if tree==[]:
return
else:
temp = tree.pop(0)
if temp!=None:
self.tree+=self.temp
self.temp = []
self.tree.append(temp.val)
tree.append(temp.left)
tree.append(temp.right)
else:
self.temp.append(None)
#print("trabylary",self.tree)
self.traByLayer(tree)
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
if root==None:
return ''
tree = [root]
self.traByLayer(tree)
print(str(self.tree))
return str(self.tree)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
#data = '[1, 2, 3, 1, 3, 2, 4]'
if data=='':
return None
start = 0
end = 0
tree = []
for i in range(len(data)):
if data[i]==',' or data[i]==']':
start = end+1
end = i
if data[start:end]!=' None':
#print(start,end,data[start:end])
tree.append(int(data[start:end]))
else:
tree.append(None)
#print("Tree",tree,"then build the Tree")
root = TreeNode(tree.pop(0))
self.buildTreeByList([root],tree)
return root
def buildTreeByList(self,r:List[TreeNode], data: List[int]):
if r==[] or data==[]:
return
root = r.pop(0)
datalen = len(data)
if datalen==0:
return
elif datalen<=2:
#print("root",root.val,"tree",data,"datalen",datalen)
temp = data.pop(0)
if temp!=None:
root.left = TreeNode(temp)
r.append(root.left)
if data!=[]:
temp = data.pop(0)
if temp!=None:
root.right = TreeNode(temp)
r.append(root.right)
return
else:
#print("root",root.val,"tree",data,"datalen",datalen)
temp = data.pop(0)
if temp!=None:
root.left = TreeNode(temp)
r.append(root.left)
temp = data.pop(0)
if temp!=None:
root.right = TreeNode(temp)
r.append(root.right)
self.buildTreeByList(r,data)
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
| [] |
maiamcc/ipuz | ipuz/puzzlekinds/__init__.py | fbe6f663b28ad42754622bf2d3bbe59a26be2615 | from .acrostic import IPUZ_ACROSTIC_VALIDATORS
from .answer import IPUZ_ANSWER_VALIDATORS
from .block import IPUZ_BLOCK_VALIDATORS
from .crossword import IPUZ_CROSSWORD_VALIDATORS
from .fill import IPUZ_FILL_VALIDATORS
from .sudoku import IPUZ_SUDOKU_VALIDATORS
from .wordsearch import IPUZ_WORDSEARCH_VALIDATORS
IPUZ_PUZZLEKINDS = {
"http://ipuz.org/acrostic": {
"mandatory": (
"puzzle",
),
"validators": {
1: IPUZ_ACROSTIC_VALIDATORS,
},
},
"http://ipuz.org/answer": {
"mandatory": (),
"validators": {
1: IPUZ_ANSWER_VALIDATORS,
},
},
"http://ipuz.org/block": {
"mandatory": (
"dimensions",
),
"validators": {
1: IPUZ_BLOCK_VALIDATORS,
},
},
"http://ipuz.org/crossword": {
"mandatory": (
"dimensions",
"puzzle",
),
"validators": {
1: IPUZ_CROSSWORD_VALIDATORS,
},
},
"http://ipuz.org/fill": {
"mandatory": (),
"validators": {
1: IPUZ_FILL_VALIDATORS,
},
},
"http://ipuz.org/sudoku": {
"mandatory": (
"puzzle",
),
"validators": {
1: IPUZ_SUDOKU_VALIDATORS,
},
},
"http://ipuz.org/wordsearch": {
"mandatory": (
"dimensions",
),
"validators": {
1: IPUZ_WORDSEARCH_VALIDATORS,
},
},
}
| [] |
MrQubo/CTFd | CTFd/api/v1/users.py | 5c8ffff1412ea91ad6cf87135cb3d175a1223544 | from flask import session, request, abort
from flask_restplus import Namespace, Resource
from CTFd.models import (
db,
Users,
Solves,
Awards,
Tracking,
Unlocks,
Submissions,
Notifications,
)
from CTFd.utils.decorators import authed_only, admins_only, ratelimit
from CTFd.cache import clear_standings
from CTFd.utils.user import get_current_user, is_admin
from CTFd.utils.decorators.visibility import (
check_account_visibility,
check_score_visibility,
)
from CTFd.schemas.submissions import SubmissionSchema
from CTFd.schemas.awards import AwardSchema
from CTFd.schemas.users import UserSchema
users_namespace = Namespace("users", description="Endpoint to retrieve Users")
@users_namespace.route("")
class UserList(Resource):
@check_account_visibility
def get(self):
users = Users.query.filter_by(banned=False, hidden=False)
response = UserSchema(view="user", many=True).dump(users)
if response.errors:
return {"success": False, "errors": response.errors}, 400
return {"success": True, "data": response.data}
@admins_only
def post(self):
req = request.get_json()
schema = UserSchema("admin")
response = schema.load(req)
if response.errors:
return {"success": False, "errors": response.errors}, 400
db.session.add(response.data)
db.session.commit()
if request.args.get("notify"):
name = response.data.name
password = req.get("password")
clear_standings()
response = schema.dump(response.data)
return {"success": True, "data": response.data}
@users_namespace.route("/<int:user_id>")
@users_namespace.param("user_id", "User ID")
class UserPublic(Resource):
@check_account_visibility
def get(self, user_id):
user = Users.query.filter_by(id=user_id).first_or_404()
if (user.banned or user.hidden) and is_admin() is False:
abort(404)
response = UserSchema(view=session.get("type", "user")).dump(user)
if response.errors:
return {"success": False, "errors": response.errors}, 400
response.data["place"] = user.place
response.data["score"] = user.score
return {"success": True, "data": response.data}
@admins_only
def patch(self, user_id):
user = Users.query.filter_by(id=user_id).first_or_404()
data = request.get_json()
data["id"] = user_id
schema = UserSchema(view="admin", instance=user, partial=True)
response = schema.load(data)
if response.errors:
return {"success": False, "errors": response.errors}, 400
db.session.commit()
response = schema.dump(response.data)
db.session.close()
clear_standings()
return {"success": True, "data": response}
@admins_only
def delete(self, user_id):
Notifications.query.filter_by(user_id=user_id).delete()
Awards.query.filter_by(user_id=user_id).delete()
Unlocks.query.filter_by(user_id=user_id).delete()
Submissions.query.filter_by(user_id=user_id).delete()
Solves.query.filter_by(user_id=user_id).delete()
Tracking.query.filter_by(user_id=user_id).delete()
Users.query.filter_by(id=user_id).delete()
db.session.commit()
db.session.close()
clear_standings()
return {"success": True}
@users_namespace.route("/me")
class UserPrivate(Resource):
@authed_only
def get(self):
user = get_current_user()
response = UserSchema("self").dump(user).data
response["place"] = user.place
response["score"] = user.score
return {"success": True, "data": response}
@authed_only
def patch(self):
user = get_current_user()
data = request.get_json()
schema = UserSchema(view="self", instance=user, partial=True)
response = schema.load(data)
if response.errors:
return {"success": False, "errors": response.errors}, 400
db.session.commit()
response = schema.dump(response.data)
db.session.close()
clear_standings()
return {"success": True, "data": response.data}
@users_namespace.route("/me/solves")
class UserPrivateSolves(Resource):
@authed_only
def get(self):
user = get_current_user()
solves = user.get_solves(admin=True)
view = "user" if not is_admin() else "admin"
response = SubmissionSchema(view=view, many=True).dump(solves)
if response.errors:
return {"success": False, "errors": response.errors}, 400
return {"success": True, "data": response.data}
@users_namespace.route("/me/fails")
class UserPrivateFails(Resource):
@authed_only
def get(self):
user = get_current_user()
fails = user.get_fails(admin=True)
view = "user" if not is_admin() else "admin"
response = SubmissionSchema(view=view, many=True).dump(fails)
if response.errors:
return {"success": False, "errors": response.errors}, 400
if is_admin():
data = response.data
else:
data = []
count = len(response.data)
return {"success": True, "data": data, "meta": {"count": count}}
@users_namespace.route("/me/awards")
@users_namespace.param("user_id", "User ID")
class UserPrivateAwards(Resource):
@authed_only
def get(self):
user = get_current_user()
awards = user.get_awards(admin=True)
view = "user" if not is_admin() else "admin"
response = AwardSchema(view=view, many=True).dump(awards)
if response.errors:
return {"success": False, "errors": response.errors}, 400
return {"success": True, "data": response.data}
@users_namespace.route("/<user_id>/solves")
@users_namespace.param("user_id", "User ID")
class UserPublicSolves(Resource):
@check_account_visibility
@check_score_visibility
def get(self, user_id):
user = Users.query.filter_by(id=user_id).first_or_404()
if (user.banned or user.hidden) and is_admin() is False:
abort(404)
solves = user.get_solves(admin=is_admin())
view = "user" if not is_admin() else "admin"
response = SubmissionSchema(view=view, many=True).dump(solves)
if response.errors:
return {"success": False, "errors": response.errors}, 400
# return {"success": True, "data": response.data}
return {"success": True, "data": None}
@users_namespace.route("/<user_id>/fails")
@users_namespace.param("user_id", "User ID")
class UserPublicFails(Resource):
@check_account_visibility
@check_score_visibility
def get(self, user_id):
user = Users.query.filter_by(id=user_id).first_or_404()
if (user.banned or user.hidden) and is_admin() is False:
abort(404)
fails = user.get_fails(admin=is_admin())
view = "user" if not is_admin() else "admin"
response = SubmissionSchema(view=view, many=True).dump(fails)
if response.errors:
return {"success": False, "errors": response.errors}, 400
if is_admin():
data = response.data
else:
data = []
count = len(response.data)
# return {"success": True, "data": data, "meta": {"count": count}}
return {"success": True, "data": None, "meta": {"count": None}}
@users_namespace.route("/<user_id>/awards")
@users_namespace.param("user_id", "User ID or 'me'")
class UserPublicAwards(Resource):
@check_account_visibility
@check_score_visibility
def get(self, user_id):
user = Users.query.filter_by(id=user_id).first_or_404()
if (user.banned or user.hidden) and is_admin() is False:
abort(404)
awards = user.get_awards(admin=is_admin())
view = "user" if not is_admin() else "admin"
response = AwardSchema(view=view, many=True).dump(awards)
if response.errors:
return {"success": False, "errors": response.errors}, 400
# return {"success": True, "data": response.data}
return {"success": True, "data": None}
| [((655, 715), 'flask_restplus.Namespace', 'Namespace', (['"""users"""'], {'description': '"""Endpoint to retrieve Users"""'}), "('users', description='Endpoint to retrieve Users')\n", (664, 715), False, 'from flask_restplus import Namespace, Resource\n'), ((836, 885), 'CTFd.models.Users.query.filter_by', 'Users.query.filter_by', ([], {'banned': '(False)', 'hidden': '(False)'}), '(banned=False, hidden=False)\n', (857, 885), False, 'from CTFd.models import db, Users, Solves, Awards, Tracking, Unlocks, Submissions, Notifications\n'), ((1160, 1178), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1176, 1178), False, 'from flask import session, request, abort\n'), ((1196, 1215), 'CTFd.schemas.users.UserSchema', 'UserSchema', (['"""admin"""'], {}), "('admin')\n", (1206, 1215), False, 'from CTFd.schemas.users import UserSchema\n'), ((1360, 1389), 'CTFd.models.db.session.add', 'db.session.add', (['response.data'], {}), '(response.data)\n', (1374, 1389), False, 'from CTFd.models import db, Users, Solves, Awards, Tracking, Unlocks, Submissions, Notifications\n'), ((1398, 1417), 'CTFd.models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1415, 1417), False, 'from CTFd.models import db, Users, Solves, Awards, Tracking, Unlocks, Submissions, Notifications\n'), ((1430, 1456), 'flask.request.args.get', 'request.args.get', (['"""notify"""'], {}), "('notify')\n", (1446, 1456), False, 'from flask import session, request, abort\n'), ((1548, 1565), 'CTFd.cache.clear_standings', 'clear_standings', ([], {}), '()\n', (1563, 1565), False, 'from CTFd.cache import clear_standings\n'), ((2445, 2463), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (2461, 2463), False, 'from flask import session, request, abort\n'), ((2510, 2563), 'CTFd.schemas.users.UserSchema', 'UserSchema', ([], {'view': '"""admin"""', 'instance': 'user', 'partial': '(True)'}), "(view='admin', instance=user, partial=True)\n", (2520, 2563), False, 'from CTFd.schemas.users import UserSchema\n'), ((2708, 2727), 'CTFd.models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2725, 2727), False, 'from CTFd.models import db, Users, Solves, Awards, Tracking, Unlocks, Submissions, Notifications\n'), ((2784, 2802), 'CTFd.models.db.session.close', 'db.session.close', ([], {}), '()\n', (2800, 2802), False, 'from CTFd.models import db, Users, Solves, Awards, Tracking, Unlocks, Submissions, Notifications\n'), ((2812, 2829), 'CTFd.cache.clear_standings', 'clear_standings', ([], {}), '()\n', (2827, 2829), False, 'from CTFd.cache import clear_standings\n'), ((3347, 3366), 'CTFd.models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (3364, 3366), False, 'from CTFd.models import db, Users, Solves, Awards, Tracking, Unlocks, Submissions, Notifications\n'), ((3375, 3393), 'CTFd.models.db.session.close', 'db.session.close', ([], {}), '()\n', (3391, 3393), False, 'from CTFd.models import db, Users, Solves, Awards, Tracking, Unlocks, Submissions, Notifications\n'), ((3403, 3420), 'CTFd.cache.clear_standings', 'clear_standings', ([], {}), '()\n', (3418, 3420), False, 'from CTFd.cache import clear_standings\n'), ((3567, 3585), 'CTFd.utils.user.get_current_user', 'get_current_user', ([], {}), '()\n', (3583, 3585), False, 'from CTFd.utils.user import get_current_user, is_admin\n'), ((3823, 3841), 'CTFd.utils.user.get_current_user', 'get_current_user', ([], {}), '()\n', (3839, 3841), False, 'from CTFd.utils.user import get_current_user, is_admin\n'), ((3857, 3875), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (3873, 3875), False, 'from flask import session, request, abort\n'), ((3893, 3945), 'CTFd.schemas.users.UserSchema', 'UserSchema', ([], {'view': '"""self"""', 'instance': 'user', 'partial': '(True)'}), "(view='self', instance=user, partial=True)\n", (3903, 3945), False, 'from CTFd.schemas.users import UserSchema\n'), ((4090, 4109), 'CTFd.models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (4107, 4109), False, 'from CTFd.models import db, Users, Solves, Awards, Tracking, Unlocks, Submissions, Notifications\n'), ((4165, 4183), 'CTFd.models.db.session.close', 'db.session.close', ([], {}), '()\n', (4181, 4183), False, 'from CTFd.models import db, Users, Solves, Awards, Tracking, Unlocks, Submissions, Notifications\n'), ((4193, 4210), 'CTFd.cache.clear_standings', 'clear_standings', ([], {}), '()\n', (4208, 4210), False, 'from CTFd.cache import clear_standings\n'), ((4393, 4411), 'CTFd.utils.user.get_current_user', 'get_current_user', ([], {}), '()\n', (4409, 4411), False, 'from CTFd.utils.user import get_current_user, is_admin\n'), ((4861, 4879), 'CTFd.utils.user.get_current_user', 'get_current_user', ([], {}), '()\n', (4877, 4879), False, 'from CTFd.utils.user import get_current_user, is_admin\n'), ((5157, 5167), 'CTFd.utils.user.is_admin', 'is_admin', ([], {}), '()\n', (5165, 5167), False, 'from CTFd.utils.user import get_current_user, is_admin\n'), ((5517, 5535), 'CTFd.utils.user.get_current_user', 'get_current_user', ([], {}), '()\n', (5533, 5535), False, 'from CTFd.utils.user import get_current_user, is_admin\n'), ((7249, 7259), 'CTFd.utils.user.is_admin', 'is_admin', ([], {}), '()\n', (7257, 7259), False, 'from CTFd.utils.user import get_current_user, is_admin\n'), ((1986, 1996), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (1991, 1996), False, 'from flask import session, request, abort\n'), ((6210, 6220), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (6215, 6220), False, 'from flask import session, request, abort\n'), ((6955, 6965), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (6960, 6965), False, 'from flask import session, request, abort\n'), ((7875, 7885), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (7880, 7885), False, 'from flask import session, request, abort\n'), ((905, 939), 'CTFd.schemas.users.UserSchema', 'UserSchema', ([], {'view': '"""user"""', 'many': '(True)'}), "(view='user', many=True)\n", (915, 939), False, 'from CTFd.schemas.users import UserSchema\n'), ((1859, 1892), 'CTFd.models.Users.query.filter_by', 'Users.query.filter_by', ([], {'id': 'user_id'}), '(id=user_id)\n', (1880, 1892), False, 'from CTFd.models import db, Users, Solves, Awards, Tracking, Unlocks, Submissions, Notifications\n'), ((1953, 1963), 'CTFd.utils.user.is_admin', 'is_admin', ([], {}), '()\n', (1961, 1963), False, 'from CTFd.utils.user import get_current_user, is_admin\n'), ((2381, 2414), 'CTFd.models.Users.query.filter_by', 'Users.query.filter_by', ([], {'id': 'user_id'}), '(id=user_id)\n', (2402, 2414), False, 'from CTFd.models import db, Users, Solves, Awards, Tracking, Unlocks, Submissions, Notifications\n'), ((2939, 2985), 'CTFd.models.Notifications.query.filter_by', 'Notifications.query.filter_by', ([], {'user_id': 'user_id'}), '(user_id=user_id)\n', (2968, 2985), False, 'from CTFd.models import db, Users, Solves, Awards, Tracking, Unlocks, Submissions, Notifications\n'), ((3003, 3042), 'CTFd.models.Awards.query.filter_by', 'Awards.query.filter_by', ([], {'user_id': 'user_id'}), '(user_id=user_id)\n', (3025, 3042), False, 'from CTFd.models import db, Users, Solves, Awards, Tracking, Unlocks, Submissions, Notifications\n'), ((3060, 3100), 'CTFd.models.Unlocks.query.filter_by', 'Unlocks.query.filter_by', ([], {'user_id': 'user_id'}), '(user_id=user_id)\n', (3083, 3100), False, 'from CTFd.models import db, Users, Solves, Awards, Tracking, Unlocks, Submissions, Notifications\n'), ((3118, 3162), 'CTFd.models.Submissions.query.filter_by', 'Submissions.query.filter_by', ([], {'user_id': 'user_id'}), '(user_id=user_id)\n', (3145, 3162), False, 'from CTFd.models import db, Users, Solves, Awards, Tracking, Unlocks, Submissions, Notifications\n'), ((3180, 3219), 'CTFd.models.Solves.query.filter_by', 'Solves.query.filter_by', ([], {'user_id': 'user_id'}), '(user_id=user_id)\n', (3202, 3219), False, 'from CTFd.models import db, Users, Solves, Awards, Tracking, Unlocks, Submissions, Notifications\n'), ((3237, 3278), 'CTFd.models.Tracking.query.filter_by', 'Tracking.query.filter_by', ([], {'user_id': 'user_id'}), '(user_id=user_id)\n', (3261, 3278), False, 'from CTFd.models import db, Users, Solves, Awards, Tracking, Unlocks, Submissions, Notifications\n'), ((3296, 3329), 'CTFd.models.Users.query.filter_by', 'Users.query.filter_by', ([], {'id': 'user_id'}), '(id=user_id)\n', (3317, 3329), False, 'from CTFd.models import db, Users, Solves, Awards, Tracking, Unlocks, Submissions, Notifications\n'), ((4487, 4497), 'CTFd.utils.user.is_admin', 'is_admin', ([], {}), '()\n', (4495, 4497), False, 'from CTFd.utils.user import get_current_user, is_admin\n'), ((4530, 4568), 'CTFd.schemas.submissions.SubmissionSchema', 'SubmissionSchema', ([], {'view': 'view', 'many': '(True)'}), '(view=view, many=True)\n', (4546, 4568), False, 'from CTFd.schemas.submissions import SubmissionSchema\n'), ((4953, 4963), 'CTFd.utils.user.is_admin', 'is_admin', ([], {}), '()\n', (4961, 4963), False, 'from CTFd.utils.user import get_current_user, is_admin\n'), ((4996, 5034), 'CTFd.schemas.submissions.SubmissionSchema', 'SubmissionSchema', ([], {'view': 'view', 'many': '(True)'}), '(view=view, many=True)\n', (5012, 5034), False, 'from CTFd.schemas.submissions import SubmissionSchema\n'), ((5611, 5621), 'CTFd.utils.user.is_admin', 'is_admin', ([], {}), '()\n', (5619, 5621), False, 'from CTFd.utils.user import get_current_user, is_admin\n'), ((5654, 5687), 'CTFd.schemas.awards.AwardSchema', 'AwardSchema', ([], {'view': 'view', 'many': '(True)'}), '(view=view, many=True)\n', (5665, 5687), False, 'from CTFd.schemas.awards import AwardSchema\n'), ((6083, 6116), 'CTFd.models.Users.query.filter_by', 'Users.query.filter_by', ([], {'id': 'user_id'}), '(id=user_id)\n', (6104, 6116), False, 'from CTFd.models import db, Users, Solves, Awards, Tracking, Unlocks, Submissions, Notifications\n'), ((6177, 6187), 'CTFd.utils.user.is_admin', 'is_admin', ([], {}), '()\n', (6185, 6187), False, 'from CTFd.utils.user import get_current_user, is_admin\n'), ((6261, 6271), 'CTFd.utils.user.is_admin', 'is_admin', ([], {}), '()\n', (6269, 6271), False, 'from CTFd.utils.user import get_current_user, is_admin\n'), ((6303, 6313), 'CTFd.utils.user.is_admin', 'is_admin', ([], {}), '()\n', (6311, 6313), False, 'from CTFd.utils.user import get_current_user, is_admin\n'), ((6346, 6384), 'CTFd.schemas.submissions.SubmissionSchema', 'SubmissionSchema', ([], {'view': 'view', 'many': '(True)'}), '(view=view, many=True)\n', (6362, 6384), False, 'from CTFd.schemas.submissions import SubmissionSchema\n'), ((6828, 6861), 'CTFd.models.Users.query.filter_by', 'Users.query.filter_by', ([], {'id': 'user_id'}), '(id=user_id)\n', (6849, 6861), False, 'from CTFd.models import db, Users, Solves, Awards, Tracking, Unlocks, Submissions, Notifications\n'), ((6922, 6932), 'CTFd.utils.user.is_admin', 'is_admin', ([], {}), '()\n', (6930, 6932), False, 'from CTFd.utils.user import get_current_user, is_admin\n'), ((7003, 7013), 'CTFd.utils.user.is_admin', 'is_admin', ([], {}), '()\n', (7011, 7013), False, 'from CTFd.utils.user import get_current_user, is_admin\n'), ((7045, 7055), 'CTFd.utils.user.is_admin', 'is_admin', ([], {}), '()\n', (7053, 7055), False, 'from CTFd.utils.user import get_current_user, is_admin\n'), ((7088, 7126), 'CTFd.schemas.submissions.SubmissionSchema', 'SubmissionSchema', ([], {'view': 'view', 'many': '(True)'}), '(view=view, many=True)\n', (7104, 7126), False, 'from CTFd.schemas.submissions import SubmissionSchema\n'), ((7748, 7781), 'CTFd.models.Users.query.filter_by', 'Users.query.filter_by', ([], {'id': 'user_id'}), '(id=user_id)\n', (7769, 7781), False, 'from CTFd.models import db, Users, Solves, Awards, Tracking, Unlocks, Submissions, Notifications\n'), ((7842, 7852), 'CTFd.utils.user.is_admin', 'is_admin', ([], {}), '()\n', (7850, 7852), False, 'from CTFd.utils.user import get_current_user, is_admin\n'), ((7925, 7935), 'CTFd.utils.user.is_admin', 'is_admin', ([], {}), '()\n', (7933, 7935), False, 'from CTFd.utils.user import get_current_user, is_admin\n'), ((7967, 7977), 'CTFd.utils.user.is_admin', 'is_admin', ([], {}), '()\n', (7975, 7977), False, 'from CTFd.utils.user import get_current_user, is_admin\n'), ((8010, 8043), 'CTFd.schemas.awards.AwardSchema', 'AwardSchema', ([], {'view': 'view', 'many': '(True)'}), '(view=view, many=True)\n', (8021, 8043), False, 'from CTFd.schemas.awards import AwardSchema\n'), ((3605, 3623), 'CTFd.schemas.users.UserSchema', 'UserSchema', (['"""self"""'], {}), "('self')\n", (3615, 3623), False, 'from CTFd.schemas.users import UserSchema\n'), ((2033, 2060), 'flask.session.get', 'session.get', (['"""type"""', '"""user"""'], {}), "('type', 'user')\n", (2044, 2060), False, 'from flask import session, request, abort\n')] |
emilhe/dash-extensions-docs | getting_started/pages.py | f44edba1c955242fc503185954ea5f3be69eb122 | import dash_labs as dl
from dash_extensions.enrich import DashBlueprint, DashProxy, html, Output, Input
def page_name(i: int):
return f"page{i}"
def make_page(i: int):
page = DashBlueprint()
page.layout = html.Div([html.H2(f"Page {i}"), html.Button('Click me!', id='btn'), html.Div(id='log')])
@page.callback(Output('log', 'children'), Input('btn', 'n_clicks'))
def on_click(n_clicks):
return f"Hello world {n_clicks} from page {i}!"
return page
app = DashProxy(prevent_initial_callbacks=True, plugins=[dl.plugins.pages])
# Register a few pages.
n_pages = 5
for i in range(n_pages):
page = make_page(i)
page.register(app, page_name(i), prefix=str(i))
# Setup main app layout.
app_shell = [html.H1("App shell"), dl.plugins.page_container]
navigation = html.Ul([html.Li(html.A(page_name(i), href=page_name(i))) for i in range(n_pages)])
app.layout = html.Div(app_shell + [navigation], style=dict(display="block"))
if __name__ == '__main__':
app.run_server() | [((489, 558), 'dash_extensions.enrich.DashProxy', 'DashProxy', ([], {'prevent_initial_callbacks': '(True)', 'plugins': '[dl.plugins.pages]'}), '(prevent_initial_callbacks=True, plugins=[dl.plugins.pages])\n', (498, 558), False, 'from dash_extensions.enrich import DashBlueprint, DashProxy, html, Output, Input\n'), ((185, 200), 'dash_extensions.enrich.DashBlueprint', 'DashBlueprint', ([], {}), '()\n', (198, 200), False, 'from dash_extensions.enrich import DashBlueprint, DashProxy, html, Output, Input\n'), ((734, 754), 'dash_extensions.enrich.html.H1', 'html.H1', (['"""App shell"""'], {}), "('App shell')\n", (741, 754), False, 'from dash_extensions.enrich import DashBlueprint, DashProxy, html, Output, Input\n'), ((328, 353), 'dash_extensions.enrich.Output', 'Output', (['"""log"""', '"""children"""'], {}), "('log', 'children')\n", (334, 353), False, 'from dash_extensions.enrich import DashBlueprint, DashProxy, html, Output, Input\n'), ((355, 379), 'dash_extensions.enrich.Input', 'Input', (['"""btn"""', '"""n_clicks"""'], {}), "('btn', 'n_clicks')\n", (360, 379), False, 'from dash_extensions.enrich import DashBlueprint, DashProxy, html, Output, Input\n'), ((229, 249), 'dash_extensions.enrich.html.H2', 'html.H2', (['f"""Page {i}"""'], {}), "(f'Page {i}')\n", (236, 249), False, 'from dash_extensions.enrich import DashBlueprint, DashProxy, html, Output, Input\n'), ((251, 285), 'dash_extensions.enrich.html.Button', 'html.Button', (['"""Click me!"""'], {'id': '"""btn"""'}), "('Click me!', id='btn')\n", (262, 285), False, 'from dash_extensions.enrich import DashBlueprint, DashProxy, html, Output, Input\n'), ((287, 305), 'dash_extensions.enrich.html.Div', 'html.Div', ([], {'id': '"""log"""'}), "(id='log')\n", (295, 305), False, 'from dash_extensions.enrich import DashBlueprint, DashProxy, html, Output, Input\n')] |
vkmc/zaqar-websocket | zaqar/transport/wsgi/v2_0/homedoc.py | a93c460a28e541b5cc8b425d5fb4d69e78ab9f4b | # Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import json
# NOTE(kgriffs): http://tools.ietf.org/html/draft-nottingham-json-home-03
JSON_HOME = {
'resources': {
# -----------------------------------------------------------------
# Queues
# -----------------------------------------------------------------
'rel/queues': {
'href-template': '/v2/queues{?marker,limit,detailed}',
'href-vars': {
'marker': 'param/marker',
'limit': 'param/queue_limit',
'detailed': 'param/detailed',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/queue': {
'href-template': '/v2/queues/{queue_name}',
'href-vars': {
'queue_name': 'param/queue_name',
},
'hints': {
'allow': ['PUT', 'DELETE'],
'formats': {
'application/json': {},
},
},
},
'rel/queue_stats': {
'href-template': '/v2/queues/{queue_name}/stats',
'href-vars': {
'queue_name': 'param/queue_name',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
# -----------------------------------------------------------------
# Messages
# -----------------------------------------------------------------
'rel/messages': {
'href-template': ('/v2/queues/{queue_name}/messages'
'{?marker,limit,echo,include_claimed}'),
'href-vars': {
'queue_name': 'param/queue_name',
'marker': 'param/marker',
'limit': 'param/messages_limit',
'echo': 'param/echo',
'include_claimed': 'param/include_claimed',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/post_messages': {
'href-template': '/v2/queues/{queue_name}/messages',
'href-vars': {
'queue_name': 'param/queue_name',
},
'hints': {
'allow': ['POST'],
'formats': {
'application/json': {},
},
'accept-post': ['application/json'],
},
},
'rel/messages_delete': {
'href-template': '/v2/queues/{queue_name}/messages{?ids,pop}',
'href-vars': {
'queue_name': 'param/queue_name',
'ids': 'param/ids',
'pop': 'param/pop'
},
'hints': {
'allow': [
'DELETE'
],
'formats': {
'application/json': {}
}
}
},
'rel/message_delete': {
'href-template': '/v2/queues/{queue_name}/messages/{message_id}{?claim}', # noqa
'href-vars': {
'queue_name': 'param/queue_name',
'message_id': 'param/message_id',
'claim': 'param/claim_id'
},
'hints': {
'allow': [
'DELETE'
],
'formats': {
'application/json': {}
}
}
},
# -----------------------------------------------------------------
# Claims
# -----------------------------------------------------------------
'rel/claim': {
'href-template': '/v2/queues/{queue_name}/claims/{claim_id}',
'href-vars': {
'queue_name': 'param/queue_name',
'claim_id': 'param/claim_id',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/post_claim': {
'href-template': '/v2/queues/{queue_name}/claims{?limit}',
'href-vars': {
'queue_name': 'param/queue_name',
'limit': 'param/claim_limit',
},
'hints': {
'allow': ['POST'],
'formats': {
'application/json': {},
},
'accept-post': ['application/json']
},
},
'rel/patch_claim': {
'href-template': '/v2/queues/{queue_name}/claims/{claim_id}',
'href-vars': {
'queue_name': 'param/queue_name',
'claim_id': 'param/claim_id',
},
'hints': {
'allow': ['PATCH'],
'formats': {
'application/json': {},
},
'accept-post': ['application/json']
},
},
'rel/delete_claim': {
'href-template': '/v2/queues/{queue_name}/claims/{claim_id}',
'href-vars': {
'queue_name': 'param/queue_name',
'claim_id': 'param/claim_id',
},
'hints': {
'allow': ['DELETE'],
'formats': {
'application/json': {},
},
},
},
}
}
ADMIN_RESOURCES = {
# -----------------------------------------------------------------
# Pools
# -----------------------------------------------------------------
'rel/pools': {
'href-template': '/v2/pools{?detailed,limit,marker}',
'href-vars': {
'detailed': 'param/detailed',
'limit': 'param/pool_limit',
'marker': 'param/marker',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/pool': {
'href-template': '/v2/pools/{pool_name}',
'href-vars': {
'pool_name': 'param/pool_name',
},
'hints': {
'allow': ['GET', 'PUT', 'PATCH', 'DELETE'],
'formats': {
'application/json': {},
},
},
},
# -----------------------------------------------------------------
# Flavors
# -----------------------------------------------------------------
'rel/flavors': {
'href-template': '/v2/flavors{?detailed,limit,marker}',
'href-vars': {
'detailed': 'param/detailed',
'limit': 'param/flavor_limit',
'marker': 'param/marker',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/flavor': {
'href-template': '/v2/flavors/{flavor_name}',
'href-vars': {
'flavor_name': 'param/flavor_name',
},
'hints': {
'allow': ['GET', 'PUT', 'PATCH', 'DELETE'],
'formats': {
'application/json': {},
},
},
},
# -----------------------------------------------------------------
# Health
# -----------------------------------------------------------------
'rel/health': {
'href': '/v2/health',
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
}
class Resource(object):
def __init__(self, conf):
if conf.admin_mode:
JSON_HOME['resources'].update(ADMIN_RESOURCES)
document = json.dumps(JSON_HOME, ensure_ascii=False, indent=4)
self.document_utf8 = document.encode('utf-8')
def on_get(self, req, resp, project_id):
resp.data = self.document_utf8
resp.content_type = 'application/json-home'
resp.cache_control = ['max-age=86400']
# status defaults to 200
| [((8466, 8517), 'json.dumps', 'json.dumps', (['JSON_HOME'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(JSON_HOME, ensure_ascii=False, indent=4)\n', (8476, 8517), False, 'import json\n')] |
vertexproject/synapse | synapse/models/infotech.py | 9712e2aee63914441c59ce6cfc060fe06a2e5920 | import asyncio
import logging
import synapse.exc as s_exc
import synapse.lib.types as s_types
import synapse.lib.module as s_module
import synapse.lib.version as s_version
logger = logging.getLogger(__name__)
class Cpe23Str(s_types.Str):
'''
CPE 2.3 Formatted String
https://nvlpubs.nist.gov/nistpubs/Legacy/IR/nistir7695.pdf
(Section 6.2)
cpe:2.3: part : vendor : product : version : update : edition :
language : sw_edition : target_sw : target_hw : other
* = "any"
- = N/A
'''
def __init__(self, modl, name, info, opts):
opts['lower'] = True
s_types.Str.__init__(self, modl, name, info, opts)
def _splitCpe23(self, text):
part = ''
parts = []
genr = iter(text)
try:
while True:
c = next(genr)
if c == '\\':
c += next(genr)
if c == ':':
parts.append(part)
part = ''
continue
part += c
except StopIteration:
parts.append(part)
return parts
def _normPyStr(self, valu):
if not valu.startswith('cpe:2.3:'):
mesg = 'CPE 2.3 string is expected to start with "cpe:2.3:"'
raise s_exc.BadTypeValu(valu=valu, mesg=mesg)
text, info = s_types.Str._normPyStr(self, valu)
parts = self._splitCpe23(text)
if len(parts) != 13:
mesg = f'CPE 2.3 string has {len(parts)} parts, expected 13.'
raise s_exc.BadTypeValu(valu=valu, mesg=mesg)
subs = {
'part': parts[2],
'vendor': parts[3],
'product': parts[4],
'version': parts[5],
'update': parts[6],
'edition': parts[7],
'language': parts[8],
'sw_edition': parts[9],
'target_sw': parts[10],
'target_hw': parts[11],
'other': parts[12],
}
return ':'.join(parts), {'subs': subs}
class SemVer(s_types.Int):
'''
Provides support for parsing a semantic version string into its component
parts. This normalizes a version string into an integer to allow version
ordering. Prerelease information is disregarded for integer comparison
purposes, as we cannot map an arbitrary pre-release version into a integer
value
Major, minor and patch levels are represented as integers, with a max
width of 20 bits. The comparable integer value representing the semver
is the bitwise concatenation of the major, minor and patch levels.
Prerelease and build information will be parsed out and available as
strings if that information is present.
'''
def postTypeInit(self):
s_types.Int.postTypeInit(self)
self.setNormFunc(str, self._normPyStr)
self.setNormFunc(int, self._normPyInt)
def _normPyStr(self, valu):
valu = valu.strip()
if not valu:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='No text left after stripping whitespace')
subs = s_version.parseSemver(valu)
if subs is None:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='Unable to parse string as a semver.')
valu = s_version.packVersion(subs.get('major'), subs.get('minor'), subs.get('patch'))
return valu, {'subs': subs}
def _normPyInt(self, valu):
if valu < 0:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='Cannot norm a negative integer as a semver.')
if valu > s_version.mask60:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='Cannot norm a integer larger than 1152921504606846975 as a semver.')
major, minor, patch = s_version.unpackVersion(valu)
valu = s_version.packVersion(major, minor, patch)
subs = {'major': major,
'minor': minor,
'patch': patch}
return valu, {'subs': subs}
def repr(self, valu):
major, minor, patch = s_version.unpackVersion(valu)
valu = s_version.fmtVersion(major, minor, patch)
return valu
loglevels = (
(10, 'debug'),
(20, 'info'),
(30, 'notice'),
(40, 'warning'),
(50, 'err'),
(60, 'crit'),
(70, 'alert'),
(80, 'emerg'),
)
class ItModule(s_module.CoreModule):
async def initCoreModule(self):
self.model.form('it:dev:str').onAdd(self._onFormItDevStr)
self.model.form('it:dev:pipe').onAdd(self._onFormMakeDevStr)
self.model.form('it:dev:mutex').onAdd(self._onFormMakeDevStr)
self.model.form('it:dev:regkey').onAdd(self._onFormMakeDevStr)
self.model.prop('it:prod:softver:arch').onSet(self._onPropSoftverArch)
self.model.prop('it:prod:softver:vers').onSet(self._onPropSoftverVers)
self.model.prop('it:prod:softver:software').onSet(self._onPropSoftverSoft)
def bruteVersionStr(self, valu):
'''
Brute force the version out of a string.
Args:
valu (str): String to attempt to get version information for.
Notes:
This first attempts to parse strings using the it:semver normalization
before attempting to extract version parts out of the string.
Returns:
int, dict: The system normalized version integer and a subs dictionary.
'''
try:
valu, info = self.core.model.type('it:semver').norm(valu)
subs = info.get('subs')
return valu, subs
except s_exc.BadTypeValu:
# Try doing version part extraction by noming through the string
subs = s_version.parseVersionParts(valu)
if subs is None:
raise s_exc.BadTypeValu(valu=valu, name='bruteVersionStr',
mesg='Unable to brute force version parts out of the string')
if subs:
valu = s_version.packVersion(subs.get('major'),
subs.get('minor', 0),
subs.get('patch', 0))
return valu, subs
async def _onFormItDevStr(self, node):
await node.set('norm', node.ndef[1])
async def _onFormMakeDevStr(self, node):
pprop = node.ndef[1]
await node.snap.addNode('it:dev:str', pprop)
async def _onPropSoftverSoft(self, node, oldv):
# Check to see if name is available and set it if possible
prop = node.get('software')
if prop:
opts = {'vars': {'soft': prop}}
nodes = await node.snap.nodes('it:prod:soft=$soft', opts=opts)
if nodes:
name = nodes[0].get('name')
if name:
await node.set('software:name', name)
async def _onPropSoftverArch(self, node, oldv):
# make it:dev:str for arch
prop = node.get('arch')
if prop:
await node.snap.addNode('it:dev:str', prop)
async def _onPropSoftverVers(self, node, oldv):
# Set vers:norm and make it's normed valu
prop = node.get('vers')
if not prop:
return
await node.set('vers:norm', prop)
# Make it:dev:str from version str
await node.snap.addNode('it:dev:str', prop)
# form the semver properly or bruteforce parts
try:
valu, subs = self.bruteVersionStr(prop)
await node.set('semver', valu)
for k, v in subs.items():
await node.set(f'semver:{k}', v)
except asyncio.CancelledError: # pragma: no cover
raise
except Exception:
logger.exception('Failed to brute force version string [%s]', prop)
def getModelDefs(self):
modl = {
'ctors': (
('it:semver', 'synapse.models.infotech.SemVer', {}, {
'doc': 'Semantic Version type.',
}),
('it:sec:cpe', 'synapse.models.infotech.Cpe23Str', {}, {
'doc': 'A NIST CPE 2.3 Formatted String',
}),
),
'types': (
('it:hostname', ('str', {'strip': True, 'lower': True}), {
'doc': 'The name of a host or system.',
}),
('it:host', ('guid', {}), {
'doc': 'A GUID that represents a host or system.'
}),
('it:log:event', ('guid', {}), {
'doc': 'A GUID representing an individual log event.',
'interfaces': ('it:host:activity',),
}),
('it:network', ('guid', {}), {
'doc': 'A GUID that represents a logical network.'
}),
('it:domain', ('guid', {}), {
'doc': 'A logical boundary of authentication and configuration such as a windows domain.'
}),
('it:account', ('guid', {}), {
'doc': 'A GUID that represents an account on a host or network.'
}),
('it:group', ('guid', {}), {
'doc': 'A GUID that represents a group on a host or network.'
}),
('it:logon', ('guid', {}), {
'doc': 'A GUID that represents an individual logon/logoff event.'
}),
('it:hosturl', ('comp', {'fields': (('host', 'it:host'), ('url', 'inet:url'))}), {
'doc': 'A url hosted on or served by a host or system.',
}),
('it:sec:cve', ('str', {'lower': True, 'regex': r'(?i)^CVE-[0-9]{4}-[0-9]{4,}$'}), {
'doc': 'A vulnerability as designated by a Common Vulnerabilities and Exposures (CVE) number.',
'ex': 'cve-2012-0158'
}),
('it:sec:cwe', ('str', {'regex': r'^CWE-[0-9]{1,8}$'}), {
'doc': 'NIST NVD Common Weaknesses Enumeration Specification',
'ex': 'CWE-120',
}),
('it:mitre:attack:status', ('str', {'enums': 'current,deprecated,withdrawn'}), {
'doc': 'A Mitre ATT&CK element status.',
'ex': 'current',
}),
('it:mitre:attack:group', ('str', {'regex': r'^G[0-9]{4}$'}), {
'doc': 'A Mitre ATT&CK Group ID.',
'ex': 'G0100',
}),
('it:mitre:attack:tactic', ('str', {'regex': r'^TA[0-9]{4}$'}), {
'doc': 'A Mitre ATT&CK Tactic ID.',
'ex': 'TA0040',
}),
('it:mitre:attack:technique', ('str', {'regex': r'^T[0-9]{4}(.[0-9]{3})?$'}), {
'doc': 'A Mitre ATT&CK Technique ID.',
'ex': 'T1548',
}),
('it:mitre:attack:mitigation', ('str', {'regex': r'^M[0-9]{4}$'}), {
'doc': 'A Mitre ATT&CK Mitigation ID.',
'ex': 'M1036',
}),
('it:mitre:attack:software', ('str', {'regex': r'^S[0-9]{4}$'}), {
'doc': 'A Mitre ATT&CK Software ID.',
'ex': 'S0154',
}),
('it:dev:str', ('str', {}), {
'doc': 'A developer-selected string.'
}),
('it:dev:pipe', ('str', {}), {
'doc': 'A string representing a named pipe.',
}),
('it:dev:mutex', ('str', {}), {
'doc': 'A string representing a mutex.',
}),
('it:dev:int', ('int', {}), {
'doc': 'A developer selected integer constant.',
}),
('it:dev:regkey', ('str', {}), {
'doc': 'A Windows registry key.',
'ex': 'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Run',
}),
('it:dev:regval', ('guid', {}), {
'doc': 'A Windows registry key/value pair.',
}),
('it:prod:soft', ('guid', {}), {
'doc': 'A arbitrary, unversioned software product.',
}),
('it:adid', ('str', {'lower': True, 'strip': True}), {
'doc': 'An advertising identification string.'}),
('it:os:windows:sid', ('str', {'regex': r'^S-1-[0-59]-\d{2}-\d{8,10}-\d{8,10}-\d{8,10}-[1-9]\d{3}$'}), {
'doc': 'A Microsoft Windows Security Identifier.',
'ex': 'S-1-5-21-1220945662-1202665555-839525555-5555',
}),
('it:os:ios:idfa', ('it:adid', {}), {
'doc': 'An iOS advertising identification string.'}),
('it:os:android:aaid', ('it:adid', {}), {
'doc': 'An android advertising identification string.'}),
('it:os:android:perm', ('str', {}), {
'doc': 'An android permission string.'}),
('it:os:android:intent', ('str', {}), {
'doc': 'An android intent string.'}),
('it:os:android:reqperm', ('comp', {'fields': (
('app', 'it:prod:soft'),
('perm', 'it:os:android:perm'))}), {
'doc': 'The given software requests the android permission.'}),
('it:os:android:ilisten', ('comp', {'fields': (
('app', 'it:prod:soft'),
('intent', 'it:os:android:intent'))}), {
'doc': 'The given software listens for an android intent.'}),
('it:os:android:ibroadcast', ('comp', {'fields': (
('app', 'it:prod:soft'),
('intent', 'it:os:android:intent')
)}), {
'doc': 'The given software broadcasts the given Android intent.'}),
('it:prod:softver', ('guid', {}), {
'doc': 'A specific version of a software product.'}),
('it:prod:softfile', ('comp', {'fields': (
('soft', 'it:prod:softver'),
('file', 'file:bytes'))}), {
'doc': 'A file is distributed by a specific software version.'}),
('it:prod:softlib', ('comp', {'fields': (
('soft', 'it:prod:softver'),
('lib', 'it:prod:softver'))}), {
'doc': 'A software version contains a library software version.'}),
('it:prod:softos', ('comp', {'fields': (
('soft', 'it:prod:softver'),
('os', 'it:prod:softver'))}), {
'doc': 'The software version is known to be compatible with the given os software version.'}),
('it:hostsoft', ('comp', {'fields': (('host', 'it:host'), ('softver', 'it:prod:softver'))}), {
'doc': 'A version of a software product which is present on a given host.',
}),
('it:av:sig', ('comp', {'fields': (('soft', 'it:prod:soft'), ('name', ('str', {'lower': True})))}), {
'doc': 'A signature name within the namespace of an antivirus engine name.'
}),
('it:av:filehit', ('comp', {'fields': (('file', 'file:bytes'), ('sig', 'it:av:sig'))}), {
'doc': 'A file that triggered an alert on a specific antivirus signature.',
}),
('it:av:prochit', ('guid', {}), {
'doc': 'An instance of a process triggering an alert on a specific antivirus signature.'
}),
('it:auth:passwdhash', ('guid', {}), {
'doc': 'An instance of a password hash.',
}),
('it:exec:proc', ('guid', {}), {
'doc': 'A process executing on a host. May be an actual (e.g., endpoint) or virtual (e.g., malware sandbox) host.',
}),
('it:exec:thread', ('guid', {}), {
'doc': 'A thread executing in a process.',
}),
('it:exec:loadlib', ('guid', {}), {
'doc': 'A library load event in a process.',
}),
('it:exec:mmap', ('guid', {}), {
'doc': 'A memory mapped segment located in a process.',
}),
('it:cmd', ('str', {'strip': True}), {
'doc': 'A unique command-line string.',
'ex': 'foo.exe --dostuff bar',
}),
('it:exec:mutex', ('guid', {}), {
'doc': 'A mutex created by a process at runtime.',
}),
('it:exec:pipe', ('guid', {}), {
'doc': 'A named pipe created by a process at runtime.',
}),
('it:exec:url', ('guid', {}), {
'doc': 'An instance of a host requesting a URL.',
}),
('it:exec:bind', ('guid', {}), {
'doc': 'An instance of a host binding a listening port.',
}),
('it:fs:file', ('guid', {}), {
'doc': 'A file on a host.'
}),
('it:exec:file:add', ('guid', {}), {
'doc': 'An instance of a host adding a file to a filesystem.',
}),
('it:exec:file:del', ('guid', {}), {
'doc': 'An instance of a host deleting a file from a filesystem.',
}),
('it:exec:file:read', ('guid', {}), {
'doc': 'An instance of a host reading a file from a filesystem.',
}),
('it:exec:file:write', ('guid', {}), {
'doc': 'An instance of a host writing a file to a filesystem.',
}),
('it:exec:reg:get', ('guid', {}), {
'doc': 'An instance of a host getting a registry key.',
}),
('it:exec:reg:set', ('guid', {}), {
'doc': 'An instance of a host creating or setting a registry key.',
}),
('it:exec:reg:del', ('guid', {}), {
'doc': 'An instance of a host deleting a registry key.',
}),
('it:app:yara:rule', ('guid', {}), {
'doc': 'A YARA rule unique identifier.',
}),
('it:app:yara:match', ('comp', {'fields': (('rule', 'it:app:yara:rule'), ('file', 'file:bytes'))}), {
'doc': 'A YARA rule match to a file.',
}),
('it:app:yara:procmatch', ('guid', {}), {
'doc': 'An instance of a YARA rule match to a process.',
}),
('it:app:snort:rule', ('guid', {}), {
'doc': 'A snort rule unique identifier.',
}),
('it:app:snort:hit', ('guid', {}), {
'doc': 'An instance of a snort rule hit.',
}),
('it:reveng:function', ('guid', {}), {
'doc': 'A function inside an executable.',
}),
('it:reveng:filefunc', ('comp', {'fields': (('file', 'file:bytes'), ('function', 'it:reveng:function'))}), {
'doc': 'An instance of a function in an executable.',
}),
('it:reveng:funcstr', ('comp', {'fields': (('function', 'it:reveng:function'), ('string', 'str'))}), {
'deprecated': True,
'doc': 'A reference to a string inside a function.',
}),
('it:reveng:impfunc', ('str', {'lower': 1}), {
'doc': 'A function from an imported library.',
}),
),
'interfaces': (
('it:host:activity', {
'props': (
('exe', ('file:bytes', {}), {
'doc': 'The executable file which caused the activity.'}),
('proc', ('it:exec:proc', {}), {
'doc': 'The host process which caused the activity.'}),
('thread', ('it:exec:thread', {}), {
'doc': 'The host thread which caused the activity.'}),
('host', ('it:host', {}), {
'doc': 'The host on which the activity occurred.'}),
('time', ('time', {}), {
'doc': 'The time that the activity started.'}),
),
}),
),
'forms': (
('it:hostname', {}, ()),
('it:host', {}, (
('name', ('it:hostname', {}), {
'doc': 'The name of the host or system.',
}),
('desc', ('str', {}), {
'doc': 'A free-form description of the host.',
}),
('domain', ('it:domain', {}), {
'doc': 'The authentication domain that the host is a member of.',
}),
('ipv4', ('inet:ipv4', {}), {
'doc': 'The last known ipv4 address for the host.'
}),
('latlong', ('geo:latlong', {}), {
'doc': 'The last known location for the host.'
}),
('place', ('geo:place', {}), {
'doc': 'The place where the host resides.',
}),
('loc', ('loc', {}), {
'doc': 'The geo-political location string for the node.',
}),
('os', ('it:prod:softver', {}), {
'doc': 'The operating system of the host.'
}),
('manu', ('str', {}), {
'doc': 'The manufacturer of the host.',
}),
('model', ('str', {}), {
'doc': 'The product model of the host.',
}),
('serial', ('str', {}), {
'doc': 'The serial number of the host.',
}),
('operator', ('ps:contact', {}), {
'doc': 'The operator of the host.',
}),
('org', ('ou:org', {}), {
'doc': 'The org that operates the given host.',
}),
)),
('it:log:event', {}, (
('mesg', ('str', {}), {
'doc': 'The log messsage text.',
}),
('severity', ('int', {'enums': loglevels}), {
'doc': 'A log level integer that increases with severity.',
}),
('data', ('data', {}), {
'doc': 'A raw JSON record of the log event.',
}),
)),
('it:domain', {}, (
('name', ('str', {'lower': True, 'strip': True, 'onespace': True}), {
'doc': 'The name of the domain.',
}),
('desc', ('str', {}), {
'doc': 'A brief description of the domain.',
}),
('org', ('ou:org', {}), {
'doc': 'The org that operates the given domain.',
}),
)),
('it:network', {}, (
('name', ('str', {'lower': True, 'strip': True, 'onespace': True}), {
'doc': 'The name of the network.',
}),
('desc', ('str', {}), {
'doc': 'A brief description of the network.',
}),
('org', ('ou:org', {}), {
'doc': 'The org that owns/operates the network.',
}),
('net4', ('inet:net4', {}), {
'doc': 'The optional contiguous IPv4 address range of this network.',
}),
('net6', ('inet:net6', {}), {
'doc': 'The optional contiguous IPv6 address range of this network.',
}),
)),
('it:account', {}, (
('user', ('inet:user', {}), {
'doc': 'The username associated with the account',
}),
('contact', ('ps:contact', {}), {
'doc': 'Additional contact information associated with this account.',
}),
('host', ('it:host', {}), {
'doc': 'The host where the account is registered.',
}),
('domain', ('it:domain', {}), {
'doc': 'The authentication domain where the account is registered.',
}),
('posix:uid', ('int', {}), {
'doc': 'The user ID of the account.',
'ex': '1001',
}),
('posix:gid', ('int', {}), {
'doc': 'The primary group ID of the account.',
'ex': '1001',
}),
('posix:gecos', ('int', {}), {
'doc': 'The GECOS field for the POSIX account.',
}),
('posix:home', ('file:path', {}), {
'doc': "The path to the POSIX account's home directory.",
'ex': '/home/visi',
}),
('posix:shell', ('file:path', {}), {
'doc': "The path to the POSIX account's default shell.",
'ex': '/bin/bash',
}),
('windows:sid', ('it:os:windows:sid', {}), {
'doc': 'The Microsoft Windows Security Identifier of the account.',
}),
('groups', ('array', {'type': 'it:group'}), {
'doc': 'An array of groups that the account is a member of.',
}),
)),
('it:group', {}, (
('name', ('str', {'lower': True, 'strip': True, 'onespace': True}), {
'doc': 'The name of the group.',
}),
('desc', ('str', {}), {
'doc': 'A brief description of the group.',
}),
('host', ('it:host', {}), {
'doc': 'The host where the group is registered.',
}),
('domain', ('it:domain', {}), {
'doc': 'The authentication domain where the group is registered.',
}),
('groups', ('array', {'type': 'it:group'}), {
'doc': 'Groups that are a member of this group.',
}),
('posix:gid', ('int', {}), {
'doc': 'The primary group ID of the account.',
'ex': '1001',
}),
('windows:sid', ('it:os:windows:sid', {}), {
'doc': 'The Microsoft Windows Security Identifier of the group.',
}),
)),
('it:logon', {}, (
('time', ('time', {}), {
'doc': 'The time the logon occured.',
}),
('success', ('bool', {}), {
'doc': 'Set to false to indicate an unsuccessful logon attempt.',
}),
('logoff:time', ('time', {}), {
'doc': 'The time the logon session ended.',
}),
('host', ('it:host', {}), {
'doc': 'The host that the account logged in to.',
}),
('account', ('it:account', {}), {
'doc': 'The account that logged in.',
}),
('creds', ('auth:creds', {}), {
'doc': 'The credentials that were used for the logon.',
}),
('duration', ('duration', {}), {
'doc': 'The duration of the logon session.',
}),
('client:host', ('it:host', {}), {
'doc': 'The host where the logon originated.',
}),
('client:ipv4', ('inet:ipv4', {}), {
'doc': 'The IPv4 where the logon originated.',
}),
('client:ipv6', ('inet:ipv6', {}), {
'doc': 'The IPv6 where the logon originated.',
}),
)),
('it:hosturl', {}, (
('host', ('it:host', {}), {
'ro': True,
'doc': 'Host serving a url.',
}),
('url', ('inet:url', {}), {
'ro': True,
'doc': 'URL available on the host.',
}),
)),
('it:dev:str', {}, (
('norm', ('str', {'lower': True}), {
'doc': 'Lower case normalized version of the it:dev:str.',
}),
)),
('it:sec:cve', {}, (
('desc', ('str', {}), {
'doc': 'A free-form description of the CVE vulnerability.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'A URL linking this CVE to a full description.',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the CVE ID.',
}),
)),
('it:sec:cpe', {}, (
('part', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "part" field from the CPE 2.3 string.'}),
('vendor', ('ou:name', {}), {
'ro': True,
'doc': 'The "vendor" field from the CPE 2.3 string.'}),
('product', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "product" field from the CPE 2.3 string.'}),
('version', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "version" field from the CPE 2.3 string.'}),
('update', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "update" field from the CPE 2.3 string.'}),
('edition', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "edition" field from the CPE 2.3 string.'}),
('language', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "language" field from the CPE 2.3 string.'}),
('sw_edition', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "sw_edition" field from the CPE 2.3 string.'}),
('target_sw', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "target_sw" field from the CPE 2.3 string.'}),
('target_hw', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "target_hw" field from the CPE 2.3 string.'}),
('other', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "other" field from the CPE 2.3 string.'}),
)),
('it:sec:cwe', {}, (
('name', ('str', {}), {
'doc': 'The CWE description field.',
'ex': 'Buffer Copy without Checking Size of Input (Classic Buffer Overflow)',
}),
('desc', ('str', {}), {
'doc': 'The CWE description field.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'A URL linking this CWE to a full description.',
}),
('parents', ('array', {'type': 'it:sec:cwe',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ChildOf CWE Relationships.'
}),
)),
('it:mitre:attack:group', {}, (
('org', ('ou:org', {}), {
'doc': 'Used to map an ATT&CK group to a synapse ou:org.',
}),
('name', ('ou:name', {}), {
'doc': 'The primary name for the ATT&CK group.',
}),
('names', ('array', {'type': 'ou:name', 'uniq': True, 'sorted': True}), {
'doc': 'An array of alternate names for the ATT&CK group.',
}),
('desc', ('str', {}), {
'doc': 'A description of the ATT&CK group.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK group.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK group ID.',
'ex': 'cno.mitre.g0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK group.',
}),
('techniques', ('array', {'type': 'it:mitre:attack:technique',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ATT&CK technique IDs used by the group.',
}),
('software', ('array', {'type': 'it:mitre:attack:software',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ATT&CK software IDs used by the group.',
}),
)),
('it:mitre:attack:tactic', {}, (
('name', ('str', {'strip': True}), {
'doc': 'The primary name for the ATT&CK tactic.',
}),
('desc', ('str', {}), {
'doc': 'A description of the ATT&CK tactic.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK tactic.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK tactic.',
'ex': 'cno.mitre.ta0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK tactic.',
}),
)),
('it:mitre:attack:technique', {}, (
('name', ('str', {'strip': True}), {
'doc': 'The primary name for the ATT&CK technique.',
}),
('status', ('it:mitre:attack:status', {}), {
'doc': 'The status of this ATT&CK technique.',
}),
('isnow', ('it:mitre:attack:technique', {}), {
'doc': 'If deprecated, this field may contain the current value for the technique.',
}),
('desc', ('str', {'strip': True}), {
'doc': 'A description of the ATT&CK technique.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK technique.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK technique.',
'ex': 'cno.mitre.t0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK technique.',
}),
('parent', ('it:mitre:attack:technique', {}), {
'doc': 'The parent ATT&CK technique on this sub-technique.',
}),
('tactics', ('array', {'type': 'it:mitre:attack:tactic',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ATT&CK tactics that include this technique.',
}),
)),
('it:mitre:attack:software', {}, (
('software', ('it:prod:soft', {}), {
'doc': 'Used to map an ATT&CK software to a synapse it:prod:soft.',
}),
('name', ('str', {'strip': True}), {
'doc': 'The primary name for the ATT&CK software.',
}),
('names', ('array', {'type': 'str', 'uniq': True, 'sorted': True}), {
'doc': 'Associated names for the ATT&CK software.',
}),
('desc', ('str', {'strip': True}), {
'doc': 'A description of the ATT&CK software.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK software.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK software.',
'ex': 'cno.mitre.s0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK software.',
}),
('techniques', ('array', {'type': 'it:mitre:attack:technique',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of techniques used by the software.',
}),
)),
('it:mitre:attack:mitigation', {}, (
# TODO map to an eventual risk:mitigation
('name', ('str', {'strip': True}), {
'doc': 'The primary name for the ATT&CK mitigation.',
}),
('desc', ('str', {'strip': True}), {
'doc': 'A description of the ATT&CK mitigation.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK mitigation.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK mitigation.',
'ex': 'cno.mitre.m0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK mitigation.',
}),
('addresses', ('array', {'type': 'it:mitre:attack:technique',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ATT&CK technique IDs addressed by the mitigation.',
}),
)),
('it:dev:int', {}, ()),
('it:dev:pipe', {}, ()),
('it:dev:mutex', {}, ()),
('it:dev:regkey', {}, ()),
('it:dev:regval', {}, (
('key', ('it:dev:regkey', {}), {
'doc': 'The Windows registry key.',
}),
('str', ('it:dev:str', {}), {
'doc': 'The value of the registry key, if the value is a string.',
}),
('int', ('it:dev:int', {}), {
'doc': 'The value of the registry key, if the value is an integer.',
}),
('bytes', ('file:bytes', {}), {
'doc': 'The file representing the value of the registry key, if the value is binary data.',
}),
)),
('it:prod:soft', {}, (
('name', ('str', {'lower': True, 'strip': True}), {
'doc': 'Name of the software.',
}),
('names', ('array', {'type': 'it:dev:str', 'uniq': True, 'sorted': True}), {
'doc': 'Observed/variant names for this software.',
}),
('desc', ('str', {}), {
'doc': 'A description of the software.',
'disp': {'hint': 'text'},
}),
('desc:short', ('str', {'lower': True}), {
'doc': 'A short description of the software.',
}),
('cpe', ('it:sec:cpe', {}), {
'doc': 'The NIST CPE 2.3 string specifying this software.',
}),
('author', ('ps:contact', {}), {
'doc': 'The contact information of the org or person who authored the software.',
}),
('author:org', ('ou:org', {}), {
'deprecated': True,
'doc': 'Organization which authored the software.',
}),
('author:acct', ('inet:web:acct', {}), {
'deprecated': True,
'doc': 'Web account of the software author.',
}),
('author:email', ('inet:email', {}), {
'deprecated': True,
'doc': 'Email address of the sofware author.',
}),
('author:person', ('ps:person', {}), {
'deprecated': True,
'doc': 'Person who authored the software.',
}),
('url', ('inet:url', {}), {
'doc': 'URL relevant for the software.',
}),
('isos', ('bool', {}), {
'doc': 'Set to True if the software is an operating system.'}),
('islib', ('bool', {}), {
'doc': 'Set to True if the software is a library.'}),
)),
('it:adid', {}, ()),
('it:os:ios:idfa', {}, ()),
('it:os:android:aaid', {}, ()),
('it:os:android:perm', {}, ()),
('it:os:android:intent', {}, ()),
('it:os:android:reqperm', {}, (
('app', ('it:prod:softver', {}), {'ro': True,
'doc': 'The android app which requests the permission.'}),
('perm', ('it:os:android:perm', {}), {'ro': True,
'doc': 'The android permission requested by the app.'}),
)),
('it:prod:softos', {}, (
('soft', ('it:prod:softver', {}), {'ro': True,
'doc': 'The software which can run on the operating system.'}),
('os', ('it:prod:softver', {}), {'ro': True,
'doc': 'The operating system which the software can run on.'}),
)),
('it:os:android:ilisten', {}, (
('app', ('it:prod:softver', {}), {'ro': True,
'doc': 'The app software which listens for the android intent.'}),
('intent', ('it:os:android:intent', {}), {'ro': True,
'doc': 'The android intent which is listened for by the app.'}),
)),
('it:os:android:ibroadcast', {}, (
('app', ('it:prod:softver', {}), {'ro': True,
'doc': 'The app software which broadcasts the android intent.'}),
('intent', ('it:os:android:intent', {}), {'ro': True,
'doc': 'The android intent which is broadcast by the app.'}),
)),
('it:prod:softver', {}, (
('software', ('it:prod:soft', {}), {
'doc': 'Software associated with this version instance.',
}),
('software:name', ('str', {'lower': True, 'strip': True}), {
'doc': 'The name of the software at a particular version.',
}),
('names', ('array', {'type': 'it:dev:str', 'uniq': True, 'sorted': True}), {
'doc': 'Observed/variant names for this software version.',
}),
('cpe', ('it:sec:cpe', {}), {
'doc': 'The NIST CPE 2.3 string specifying this software version',
}),
('cves', ('array', {'type': 'it:sec:cve', 'uniq': True, 'sorted': True}), {
'doc': 'A list of CVEs that apply to this software version.',
}),
('vers', ('it:dev:str', {}), {
'doc': 'Version string associated with this version instance.',
}),
('vers:norm', ('str', {'lower': True}), {
'doc': 'Normalized version of the version string.',
}),
('arch', ('it:dev:str', {}), {
'doc': 'Software architecture.',
}),
('released', ('time', {}), {
'doc': 'Timestamp for when this version of the software was released.',
}),
('semver', ('it:semver', {}), {
'doc': 'System normalized semantic version number.',
}),
('semver:major', ('int', {}), {
'doc': 'Version major number.',
}),
('semver:minor', ('int', {}), {
'doc': 'Version minor number.',
}),
('semver:patch', ('int', {}), {
'doc': 'Version patch number.',
}),
('semver:pre', ('str', {}), {
'doc': 'Semver prerelease string.',
}),
('semver:build', ('str', {}), {
'doc': 'Semver build string.',
}),
('url', ('inet:url', {}), {
'doc': 'URL where a specific version of the software is available from.',
}),
)),
('it:prod:softlib', {}, (
('soft', ('it:prod:softver', {}), {'ro': True,
'doc': 'The software version that contains the library.'}),
('lib', ('it:prod:softver', {}), {'ro': True,
'doc': 'The library software version.'}),
)),
('it:prod:softfile', {}, (
('soft', ('it:prod:softver', {}), {'ro': True,
'doc': 'The software which distributes the file.'}),
('file', ('file:bytes', {}), {'ro': True,
'doc': 'The file distributed by the software.'}),
('path', ('file:path', {}), {
'doc': 'The default installation path of the file.'}),
)),
('it:hostsoft', {}, (
('host', ('it:host', {}), {'ro': True,
'doc': 'Host with the software.'}),
('softver', ('it:prod:softver', {}), {'ro': True,
'doc': 'Software on the host.'})
)),
('it:av:sig', {}, (
('soft', ('it:prod:soft', {}), {
'ro': True,
'doc': 'The anti-virus product which contains the signature.',
}),
('name', ('str', {'lower': True}), {
'ro': True,
'doc': 'The signature name.'
}),
('desc', ('str', {}), {
'doc': 'A free-form description of the signature.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'A reference URL for information about the signature.',
})
)),
('it:av:filehit', {}, (
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file that triggered the signature hit.',
}),
('sig', ('it:av:sig', {}), {
'ro': True,
'doc': 'The signature that the file triggered on.'
}),
('sig:name', ('str', {'lower': True}), {
'ro': True,
'doc': 'The signature name.',
}),
('sig:soft', ('it:prod:soft', {}), {
'ro': True,
'doc': 'The anti-virus product which contains the signature.',
}),
)),
('it:av:prochit', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The file that triggered the signature hit.',
}),
('sig', ('it:av:sig', {}), {
'doc': 'The signature that the file triggered on.'
}),
('time', ('time', {}), {
'doc': 'The time that the AV engine detected the signature.'
}),
)),
('it:auth:passwdhash', {}, (
('salt', ('hex', {}), {
'doc': 'The (optional) hex encoded salt value used to calculate the password hash.',
}),
('hash:md5', ('hash:md5', {}), {
'doc': 'The MD5 password hash value.',
}),
('hash:sha1', ('hash:sha1', {}), {
'doc': 'The SHA1 password hash value.',
}),
('hash:sha256', ('hash:sha256', {}), {
'doc': 'The SHA256 password hash value.',
}),
('hash:sha512', ('hash:sha512', {}), {
'doc': 'The SHA512 password hash value.',
}),
('hash:lm', ('hash:lm', {}), {
'doc': 'The LM password hash value.',
}),
('hash:ntlm', ('hash:ntlm', {}), {
'doc': 'The NTLM password hash value.',
}),
('passwd', ('inet:passwd', {}), {
'doc': 'The (optional) clear text password for this password hash.',
}),
)),
('it:cmd', {}, ()),
('it:exec:proc', {}, (
('host', ('it:host', {}), {
'doc': 'The host that executed the process. May be an actual or a virtual / notional host.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The file considered the "main" executable for the process. For example, rundll32.exe may be considered the "main" executable for DLLs loaded by that program.',
}),
('cmd', ('it:cmd', {}), {
'doc': 'The command string used to launch the process, including any command line parameters.',
'disp': {'hint': 'text'},
}),
('pid', ('int', {}), {
'doc': 'The process ID.',
}),
('time', ('time', {}), {
'doc': 'The start time for the process.',
}),
('exited', ('time', {}), {
'doc': 'The time the process exited.',
}),
('exitcode', ('int', {}), {
'doc': 'The exit code for the process.',
}),
('user', ('inet:user', {}), {
'doc': 'The user name of the process owner.',
}),
('path', ('file:path', {}), {
'doc': 'The path to the executable of the process.',
}),
('src:exe', ('file:path', {}), {
'doc': 'The path to the executable which started the process.',
}),
('src:proc', ('it:exec:proc', {}), {
'doc': 'The process which created the process.'
}),
('killedby', ('it:exec:proc', {}), {
'doc': 'The process which killed this process.',
}),
)),
('it:exec:thread', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The process which contains the thread.',
}),
('created', ('time', {}), {
'doc': 'The time the thread was created.',
}),
('exited', ('time', {}), {
'doc': 'The time the thread exited.',
}),
('exitcode', ('int', {}), {
'doc': 'The exit code or return value for the thread.',
}),
('src:proc', ('it:exec:proc', {}), {
'doc': 'An external process which created the thread.',
}),
('src:thread', ('it:exec:thread', {}), {
'doc': 'The thread which created this thread.',
}),
)),
('it:exec:loadlib', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The process where the library was loaded.',
}),
('va', ('int', {}), {
'doc': 'The base memory address where the library was loaded in the process.',
}),
('loaded', ('time', {}), {
'doc': 'The time the library was loaded.',
}),
('unloaded', ('time', {}), {
'doc': 'The time the library was unloaded.',
}),
('path', ('file:path', {}), {
'doc': 'The path that the library was loaded from.',
}),
('file', ('file:bytes', {}), {
'doc': 'The library file that was loaded.',
}),
)),
('it:exec:mmap', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The process where the memory was mapped.',
}),
('va', ('int', {}), {
'doc': 'The base memory address where the map was created in the process.',
}),
('size', ('int', {}), {
'doc': 'The size of the memory map in bytes.',
}),
('perms:read', ('bool', {}), {
'doc': 'True if the mmap is mapped with read permissions.',
}),
('perms:write', ('bool', {}), {
'doc': 'True if the mmap is mapped with write permissions.',
}),
('perms:execute', ('bool', {}), {
'doc': 'True if the mmap is mapped with execute permissions.',
}),
('created', ('time', {}), {
'doc': 'The time the memory map was created.',
}),
('deleted', ('time', {}), {
'doc': 'The time the memory map was deleted.',
}),
('path', ('file:path', {}), {
'doc': 'The file path if the mmap is a mapped view of a file.',
}),
('hash:sha256', ('hash:sha256', {}), {
'doc': 'A SHA256 hash of the memory map. Bytes may optionally be present in the axon.',
}),
)),
('it:exec:mutex', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that created the mutex.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that created the mutex. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that created the mutex. May or may not be the same :exe specified in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the mutex was created.',
}),
('name', ('it:dev:mutex', {}), {
'doc': 'The mutex string.',
}),
)),
('it:exec:pipe', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that created the named pipe.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that created the named pipe. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that created the named pipe. May or may not be the same :exe specified in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the named pipe was created.',
}),
('name', ('it:dev:pipe', {}), {
'doc': 'The named pipe string.',
}),
)),
('it:exec:url', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that requested the URL.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that requested the URL. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that requested the URL. May or may not be the same :exe specified in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the URL was requested.',
}),
('url', ('inet:url', {}), {
'doc': 'The URL that was requested.',
}),
('client', ('inet:client', {}), {
'doc': 'The address of the client during the URL retrieval.'
}),
('client:ipv4', ('inet:ipv4', {}), {
'doc': 'The IPv4 of the client during the URL retrieval..'
}),
('client:ipv6', ('inet:ipv6', {}), {
'doc': 'The IPv6 of the client during the URL retrieval..'
}),
('client:port', ('inet:port', {}), {
'doc': 'The client port during the URL retrieval..'
}),
)),
('it:exec:bind', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that bound the listening port.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that bound the listening port. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that bound the listening port. May or may not be the same :exe specified in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the port was bound.',
}),
('server', ('inet:server', {}), {
'doc': 'The inet:addr of the server when binding the port.'
}),
('server:ipv4', ('inet:ipv4', {}), {
'doc': 'The IPv4 address specified to bind().'
}),
('server:ipv6', ('inet:ipv6', {}), {
'doc': 'The IPv6 address specified to bind().'
}),
('server:port', ('inet:port', {}), {
'doc': 'The bound (listening) TCP port.'
}),
)),
('it:fs:file', {}, (
('host', ('it:host', {}), {
'doc': 'The host containing the file.',
}),
('path', ('file:path', {}), {
'doc': 'The path for the file.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file on the host.',
}),
('ctime', ('time', {}), {
'doc': 'The file creation time.',
}),
('mtime', ('time', {}), {
'doc': 'The file modification time.',
}),
('atime', ('time', {}), {
'doc': 'The file access time.',
}),
('user', ('inet:user', {}), {
'doc': 'The owner of the file.',
}),
('group', ('inet:user', {}), {
'doc': 'The group owner of the file.',
}),
)),
('it:exec:file:add', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that created the new file.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that created the new file. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that created the new file. May or may not be the same :exe specified in :proc, if present.'}),
('time', ('time', {}), {
'doc': 'The time the file was created.',
}),
('path', ('file:path', {}), {
'doc': 'The path where the file was created.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file that was created.',
}),
)),
('it:exec:file:del', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that deleted the file.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that deleted the file. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that deleted the file. May or may not be the same :exe specified in :proc, if present.'}),
('time', ('time', {}), {
'doc': 'The time the file was deleted.',
}),
('path', ('file:path', {}), {
'doc': 'The path where the file was deleted.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file that was deleted.',
}),
)),
('it:exec:file:read', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that read the file.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that read the file. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that read the file. May or may not be the same :exe specified in :proc, if present.'}),
('time', ('time', {}), {
'doc': 'The time the file was read.',
}),
('path', ('file:path', {}), {
'doc': 'The path where the file was read.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file that was read.',
}),
)),
('it:exec:file:write', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that wrote to / modified the existing file.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that wrote to the file. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that wrote to the file. May or may not be the same :exe specified in :proc, if present.'}),
('time', ('time', {}), {
'doc': 'The time the file was written to/modified.',
}),
('path', ('file:path', {}), {
'doc': 'The path where the file was written to/modified.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file that was modified.',
}),
)),
('it:exec:reg:get', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that read the registry.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that read the registry. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that read the registry. May or may not be the same :exe referenced in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the registry was read.',
}),
('reg', ('it:dev:regval', {}), {
'doc': 'The registry key or value that was read.',
}),
)),
('it:exec:reg:set', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that wrote to the registry.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that wrote to the registry. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that wrote to the registry. May or may not be the same :exe referenced in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the registry was written to.',
}),
('reg', ('it:dev:regval', {}), {
'doc': 'The registry key or value that was written to.',
}),
)),
('it:exec:reg:del', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that deleted data from the registry.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that deleted data from the registry. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that deleted data from the registry. May or may not be the same :exe referenced in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the data from the registry was deleted.',
}),
('reg', ('it:dev:regval', {}), {
'doc': 'The registry key or value that was deleted.',
}),
)),
('it:app:snort:rule', {}, (
('text', ('str', {}), {
'doc': 'The snort rule text.',
'disp': {'hint': 'text'},
}),
('name', ('str', {}), {
'doc': 'The name of the snort rule.'}),
('version', ('it:semver', {}), {
'doc': 'The current version of the rule.'}),
)),
('it:app:snort:hit', {}, (
('rule', ('it:app:snort:rule', {}), {
'doc': 'The snort rule that matched the file.'}),
('flow', ('inet:flow', {}), {
'doc': 'The inet:flow that matched the snort rule.'}),
('src', ('inet:addr', {}), {
'doc': 'The source address of flow that caused the hit.'}),
('src:ipv4', ('inet:ipv4', {}), {
'doc': 'The source IPv4 address of the flow that caused the hit.'}),
('src:ipv6', ('inet:ipv6', {}), {
'doc': 'The source IPv6 address of the flow that caused the hit.'}),
('src:port', ('inet:port', {}), {
'doc': 'The source port of the flow that caused the hit.'}),
('dst', ('inet:addr', {}), {
'doc': 'The destination address of the trigger.'}),
('dst:ipv4', ('inet:ipv4', {}), {
'doc': 'The destination IPv4 address of the flow that caused the hit.'}),
('dst:ipv6', ('inet:ipv6', {}), {
'doc': 'The destination IPv4 address of the flow that caused the hit.'}),
('dst:port', ('inet:port', {}), {
'doc': 'The destination port of the flow that caused the hit.'}),
('time', ('time', {}), {
'doc': 'The time of the network flow that caused the hit.'}),
('sensor', ('it:host', {}), {
'doc': 'The sensor host node that produced the hit.'}),
('version', ('it:semver', {}), {
'doc': 'The version of the rule at the time of match.'}),
)),
('it:app:yara:rule', {}, (
('text', ('str', {}), {
'doc': 'The YARA rule text.',
'disp': {'hint': 'text'},
}),
('name', ('str', {}), {
'doc': 'The name of the YARA rule.'}),
('author', ('ps:contact', {}), {
'doc': 'Contact info for the author of the YARA rule.'}),
('version', ('it:semver', {}), {
'doc': 'The current version of the rule.'}),
('enabled', ('bool', {}), {
'doc': 'The rule enabled status to be used for YARA evaluation engines.'}),
)),
('it:app:yara:match', {}, (
('rule', ('it:app:yara:rule', {}), {
'ro': True,
'doc': 'The YARA rule that matched the file.'}),
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file that matched the YARA rule.'}),
('version', ('it:semver', {}), {
'doc': 'The most recent version of the rule evaluated as a match.'}),
)),
('it:app:yara:procmatch', {}, (
('rule', ('it:app:yara:rule', {}), {
'doc': 'The YARA rule that matched the file.'}),
('proc', ('it:exec:proc', {}), {
'doc': 'The process that matched the YARA rule.'}),
('time', ('time', {}), {
'doc': 'The time that the YARA engine matched the process to the rule.'}),
('version', ('it:semver', {}), {
'doc': 'The most recent version of the rule evaluated as a match.'}),
)),
('it:reveng:function', {}, (
('name', ('str', {}), {
'doc': 'The name of the function.'}),
('description', ('str', {}), {
'doc': 'Notes concerning the function.'}),
('impcalls', ('array', {'type': 'it:reveng:impfunc'}), {
'doc': 'Calls to imported library functions within the scope of the function.',
}),
('strings', ('array', {'type': 'it:dev:str', 'uniq': True}), {
'doc': 'An array of strings referenced within the function.',
}),
)),
('it:reveng:filefunc', {}, (
('function', ('it:reveng:function', {}), {
'ro': True,
'doc': 'The guid matching the function.'}),
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file that contains the function.'}),
('va', ('int', {}), {
'doc': 'The virtual address of the first codeblock of the function.'}),
('rank', ('int', {}), {
'doc': 'The function rank score used to evaluate if it exhibits interesting behavior.'}),
('complexity', ('int', {}), {
'doc': 'The complexity of the function.'}),
('funccalls', ('array', {'type': 'it:reveng:filefunc'}), {
'doc': 'Other function calls within the scope of the function.',
}),
)),
('it:reveng:funcstr', {}, (
('function', ('it:reveng:function', {}), {
'ro': True,
'doc': 'The guid matching the function.'}),
('string', ('str', {}), {
'ro': True,
'doc': 'The string that the function references.'}),
)),
('it:reveng:impfunc', {}, ()),
),
}
name = 'it'
return ((name, modl), )
| [((183, 210), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (200, 210), False, 'import logging\n'), ((612, 662), 'synapse.lib.types.Str.__init__', 's_types.Str.__init__', (['self', 'modl', 'name', 'info', 'opts'], {}), '(self, modl, name, info, opts)\n', (632, 662), True, 'import synapse.lib.types as s_types\n'), ((1368, 1402), 'synapse.lib.types.Str._normPyStr', 's_types.Str._normPyStr', (['self', 'valu'], {}), '(self, valu)\n', (1390, 1402), True, 'import synapse.lib.types as s_types\n'), ((2787, 2817), 'synapse.lib.types.Int.postTypeInit', 's_types.Int.postTypeInit', (['self'], {}), '(self)\n', (2811, 2817), True, 'import synapse.lib.types as s_types\n'), ((3157, 3184), 'synapse.lib.version.parseSemver', 's_version.parseSemver', (['valu'], {}), '(valu)\n', (3178, 3184), True, 'import synapse.lib.version as s_version\n'), ((3928, 3957), 'synapse.lib.version.unpackVersion', 's_version.unpackVersion', (['valu'], {}), '(valu)\n', (3951, 3957), True, 'import synapse.lib.version as s_version\n'), ((3973, 4015), 'synapse.lib.version.packVersion', 's_version.packVersion', (['major', 'minor', 'patch'], {}), '(major, minor, patch)\n', (3994, 4015), True, 'import synapse.lib.version as s_version\n'), ((4205, 4234), 'synapse.lib.version.unpackVersion', 's_version.unpackVersion', (['valu'], {}), '(valu)\n', (4228, 4234), True, 'import synapse.lib.version as s_version\n'), ((4250, 4291), 'synapse.lib.version.fmtVersion', 's_version.fmtVersion', (['major', 'minor', 'patch'], {}), '(major, minor, patch)\n', (4270, 4291), True, 'import synapse.lib.version as s_version\n'), ((1306, 1345), 'synapse.exc.BadTypeValu', 's_exc.BadTypeValu', ([], {'valu': 'valu', 'mesg': 'mesg'}), '(valu=valu, mesg=mesg)\n', (1323, 1345), True, 'import synapse.exc as s_exc\n'), ((1564, 1603), 'synapse.exc.BadTypeValu', 's_exc.BadTypeValu', ([], {'valu': 'valu', 'mesg': 'mesg'}), '(valu=valu, mesg=mesg)\n', (1581, 1603), True, 'import synapse.exc as s_exc\n'), ((3012, 3109), 'synapse.exc.BadTypeValu', 's_exc.BadTypeValu', ([], {'valu': 'valu', 'name': 'self.name', 'mesg': '"""No text left after stripping whitespace"""'}), "(valu=valu, name=self.name, mesg=\n 'No text left after stripping whitespace')\n", (3029, 3109), True, 'import synapse.exc as s_exc\n'), ((3228, 3321), 'synapse.exc.BadTypeValu', 's_exc.BadTypeValu', ([], {'valu': 'valu', 'name': 'self.name', 'mesg': '"""Unable to parse string as a semver."""'}), "(valu=valu, name=self.name, mesg=\n 'Unable to parse string as a semver.')\n", (3245, 3321), True, 'import synapse.exc as s_exc\n'), ((3555, 3656), 'synapse.exc.BadTypeValu', 's_exc.BadTypeValu', ([], {'valu': 'valu', 'name': 'self.name', 'mesg': '"""Cannot norm a negative integer as a semver."""'}), "(valu=valu, name=self.name, mesg=\n 'Cannot norm a negative integer as a semver.')\n", (3572, 3656), True, 'import synapse.exc as s_exc\n'), ((3742, 3866), 'synapse.exc.BadTypeValu', 's_exc.BadTypeValu', ([], {'valu': 'valu', 'name': 'self.name', 'mesg': '"""Cannot norm a integer larger than 1152921504606846975 as a semver."""'}), "(valu=valu, name=self.name, mesg=\n 'Cannot norm a integer larger than 1152921504606846975 as a semver.')\n", (3759, 3866), True, 'import synapse.exc as s_exc\n'), ((5825, 5858), 'synapse.lib.version.parseVersionParts', 's_version.parseVersionParts', (['valu'], {}), '(valu)\n', (5852, 5858), True, 'import synapse.lib.version as s_version\n'), ((5910, 6029), 'synapse.exc.BadTypeValu', 's_exc.BadTypeValu', ([], {'valu': 'valu', 'name': '"""bruteVersionStr"""', 'mesg': '"""Unable to brute force version parts out of the string"""'}), "(valu=valu, name='bruteVersionStr', mesg=\n 'Unable to brute force version parts out of the string')\n", (5927, 6029), True, 'import synapse.exc as s_exc\n')] |
bciar/ppp-web | test/test.py | 1afe39a3c8d2197595ad0e2610c612db210cd62e | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Unit tests."""
import os
import unittest
from copy import copy
from webui.app import create_app
class TestRoutes(unittest.TestCase):
"""Test routes."""
ignore_routes = ('/static/<path:filename>',)
ignore_end_patterns = ('>',)
def setUp(self):
"""Set up: Put Flask app in test mode."""
app = create_app()
self.initial_app = copy(app)
app.testing = True
self.app = app.test_client()
@staticmethod
def valid_route(route):
"""Validate route.
Args:
route (str): Route url pattern.
Returns:
bool: True if valid, else False.
"""
if route in TestRoutes.ignore_routes \
or route.endswith(TestRoutes.ignore_end_patterns):
return False
return True
def test_routes(self):
"""Smoke test routes to ensure no runtime errors.."""
routes = [route.rule for route in self.initial_app.url_map.iter_rules()
if self.valid_route(route.rule)]
for route in routes:
self.app.get(route)
if __name__ == '__main__':
from test.utils.doctest_unittest_runner import doctest_unittest_runner
TEST_DIR = os.path.dirname(os.path.realpath(__file__)) + '/'
doctest_unittest_runner(test_dir=TEST_DIR, relative_path_to_root='../',
package_names=['webui', 'test'])
| [((1314, 1422), 'test.utils.doctest_unittest_runner.doctest_unittest_runner', 'doctest_unittest_runner', ([], {'test_dir': 'TEST_DIR', 'relative_path_to_root': '"""../"""', 'package_names': "['webui', 'test']"}), "(test_dir=TEST_DIR, relative_path_to_root='../',\n package_names=['webui', 'test'])\n", (1337, 1422), False, 'from test.utils.doctest_unittest_runner import doctest_unittest_runner\n'), ((378, 390), 'webui.app.create_app', 'create_app', ([], {}), '()\n', (388, 390), False, 'from webui.app import create_app\n'), ((418, 427), 'copy.copy', 'copy', (['app'], {}), '(app)\n', (422, 427), False, 'from copy import copy\n'), ((1276, 1302), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1292, 1302), False, 'import os\n')] |
Justin-Fisher/webots | tests/sources/test_clang_format.py | 8a39e8e4390612919a8d82c7815aa914f4c079a4 | #!/usr/bin/env python
# Copyright 1996-2021 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that the C, C++ and shader source code is compliant with ClangFormat."""
import unittest
import difflib
import os
import subprocess
from io import open
from distutils.spawn import find_executable
class TestClangFormat(unittest.TestCase):
"""Unit test for ClangFormat compliance."""
def setUp(self):
"""Set up called before each test."""
self.WEBOTS_HOME = os.environ['WEBOTS_HOME']
def _runClangFormat(self, f):
"""Run clang format on 'f' file."""
return subprocess.check_output(['clang-format', '-style=file', f])
def test_clang_format_is_correctly_installed(self):
"""Test ClangFormat is correctly installed."""
self.assertTrue(
find_executable('clang-format') is not None,
msg='ClangFormat is not installed on this computer.'
)
clangFormatConfigFile = self.WEBOTS_HOME + os.sep + '.clang-format'
self.assertTrue(
os.path.exists(clangFormatConfigFile),
msg=clangFormatConfigFile + ' not found.'
)
def test_sources_are_clang_format_compliant(self):
"""Test that sources are ClangFormat compliant."""
directories = [
'include/controller',
'projects',
'resources/projects',
'resources/wren/shaders',
'tests',
'include/wren',
'src/controller/c',
'src/controller/cpp',
'src/license/sign',
'src/webots',
'src/wren'
]
skippedPaths = [
'projects/default/controllers/ros/include',
'projects/robots/gctronic/e-puck/transfer',
'projects/robots/mobsya/thymio/controllers/thymio2_aseba/aseba',
'projects/robots/mobsya/thymio/libraries/dashel',
'projects/robots/mobsya/thymio/libraries/dashel-src',
'projects/robots/robotis/darwin-op/libraries/libssh',
'projects/robots/robotis/darwin-op/libraries/libzip',
'projects/robots/robotis/darwin-op/libraries/robotis-op2/robotis',
'projects/robots/robotis/darwin-op/remote_control/libjpeg-turbo',
'projects/vehicles/controllers/ros_automobile/include',
'src/webots/external'
]
skippedFiles = [
'projects/robots/robotis/darwin-op/plugins/remote_controls/robotis-op2_tcpip/stb_image.h'
]
skippedDirectories = [
'build',
'python',
'java'
]
extensions = ['c', 'h', 'cpp', 'hpp', 'cc', 'hh', 'c++', 'h++', 'vert', 'frag']
modified_files = os.path.join(self.WEBOTS_HOME, 'tests', 'sources', 'modified_files.txt')
sources = []
if os.path.isfile(modified_files):
with open(modified_files, 'r') as file:
for line in file:
line = line.strip()
extension = os.path.splitext(line)[1][1:].lower()
if extension not in extensions:
continue
found = False
for directory in directories:
if line.startswith(directory):
found = True
break
if not found:
continue
found = False
for directory in skippedPaths + skippedFiles:
if line.startswith(directory):
found = True
break
if found:
continue
for directory in skippedDirectories:
currentDirectories = line.split(os.sep)
if directory in currentDirectories:
found = True
if found:
continue
sources.append(line.replace('/', os.sep))
else:
for directory in directories:
path = self.WEBOTS_HOME + os.sep + directory.replace('/', os.sep)
for rootPath, dirNames, fileNames in os.walk(path):
shouldContinue = False
for path in skippedPaths:
if rootPath.startswith(self.WEBOTS_HOME + os.sep + path.replace('/', os.sep)):
shouldContinue = True
break
for directory in skippedDirectories:
currentDirectories = rootPath.replace(self.WEBOTS_HOME, '').split(os.sep)
if directory in currentDirectories:
shouldContinue = True
break
if shouldContinue:
continue
for fileName in fileNames:
extension = os.path.splitext(fileName)[1][1:].lower()
if extension not in extensions:
continue
path = os.path.normpath(os.path.join(rootPath, fileName))
skipFile = False
for file in skippedFiles:
if os.path.normpath((self.WEBOTS_HOME + os.sep + file.replace('/', os.sep))) == path:
skipFile = True
break
if not skipFile:
sources.append(path)
curdir = os.getcwd()
os.chdir(self.WEBOTS_HOME)
for source in sources:
diff = ''
with open(source, encoding='utf8') as file:
try:
for line in difflib.context_diff(self._runClangFormat(source).decode('utf-8').splitlines(),
file.read().splitlines()):
diff += line + '\n'
except UnicodeDecodeError:
self.assertTrue(False, msg='utf-8 decode problem in %s' % source)
self.assertTrue(
len(diff) == 0,
msg='Source file "%s" is not compliant with ClangFormat:\n\nDIFF:%s' % (source, diff)
)
os.chdir(curdir)
if __name__ == '__main__':
unittest.main()
| [((6922, 6937), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6935, 6937), False, 'import unittest\n'), ((1123, 1182), 'subprocess.check_output', 'subprocess.check_output', (["['clang-format', '-style=file', f]"], {}), "(['clang-format', '-style=file', f])\n", (1146, 1182), False, 'import subprocess\n'), ((3239, 3311), 'os.path.join', 'os.path.join', (['self.WEBOTS_HOME', '"""tests"""', '"""sources"""', '"""modified_files.txt"""'], {}), "(self.WEBOTS_HOME, 'tests', 'sources', 'modified_files.txt')\n", (3251, 3311), False, 'import os\n'), ((3344, 3374), 'os.path.isfile', 'os.path.isfile', (['modified_files'], {}), '(modified_files)\n', (3358, 3374), False, 'import os\n'), ((6129, 6140), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6138, 6140), False, 'import os\n'), ((6149, 6175), 'os.chdir', 'os.chdir', (['self.WEBOTS_HOME'], {}), '(self.WEBOTS_HOME)\n', (6157, 6175), False, 'import os\n'), ((6872, 6888), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (6880, 6888), False, 'import os\n'), ((1565, 1602), 'os.path.exists', 'os.path.exists', (['clangFormatConfigFile'], {}), '(clangFormatConfigFile)\n', (1579, 1602), False, 'import os\n'), ((1332, 1363), 'distutils.spawn.find_executable', 'find_executable', (['"""clang-format"""'], {}), "('clang-format')\n", (1347, 1363), False, 'from distutils.spawn import find_executable\n'), ((3393, 3418), 'io.open', 'open', (['modified_files', '"""r"""'], {}), "(modified_files, 'r')\n", (3397, 3418), False, 'from io import open\n'), ((4769, 4782), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (4776, 4782), False, 'import os\n'), ((6246, 6275), 'io.open', 'open', (['source'], {'encoding': '"""utf8"""'}), "(source, encoding='utf8')\n", (6250, 6275), False, 'from io import open\n'), ((5697, 5729), 'os.path.join', 'os.path.join', (['rootPath', 'fileName'], {}), '(rootPath, fileName)\n', (5709, 5729), False, 'import os\n'), ((3534, 3556), 'os.path.splitext', 'os.path.splitext', (['line'], {}), '(line)\n', (3550, 3556), False, 'import os\n'), ((5514, 5540), 'os.path.splitext', 'os.path.splitext', (['fileName'], {}), '(fileName)\n', (5530, 5540), False, 'import os\n')] |
sanketsaurav/clusterfuzz | src/python/tests/core/system/shell_test.py | 9f7efba7781614d50cdc6ab136b9bcf19607731c | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""shell tests."""
import mock
import os
import unittest
from pyfakefs import fake_filesystem_unittest
from system import environment
from system import shell
from tests.test_libs import helpers as test_helpers
from tests.test_libs import test_utils
class RemoveEmptyFilesTest(fake_filesystem_unittest.TestCase):
"""Tests for remove_empty_files."""
def setUp(self):
# FIXME: Add support for Windows.
if not environment.is_posix():
self.skipTest('Process tests are only applicable for posix platforms.')
test_utils.set_up_pyfakefs(self)
def test_remove(self):
"""Test remove."""
self.fs.CreateFile('/test/aa/bb.txt', contents='s')
self.fs.CreateFile('/test/aa/cc.txt', contents='')
self.fs.CreateFile('/test/aa/aa/dd.txt', contents='s')
self.fs.CreateFile('/test/aa/aa/aa.txt', contents='')
shell.remove_empty_files('/test')
self.assertTrue(os.path.exists('/test/aa/bb.txt'))
self.assertTrue(os.path.exists('/test/aa/aa/dd.txt'))
self.assertFalse(os.path.exists('/test/aa/cc.txt'))
self.assertFalse(os.path.exists('/test/aa/aa/aa.txt'))
def test_ignore_file(self):
self.fs.CreateFile('/test/aa/cc.txt', contents='')
shell.remove_empty_files('/test/aa/cc.txt')
self.assertTrue(os.path.exists('/test/aa/cc.txt'))
@mock.patch('os.remove', autospec=True)
def test_exception(self, mock_remove):
# bypass pyfakefs's os.remove.
os.remove = mock_remove
mock_remove.side_effect = OSError()
self.fs.CreateFile('/test/aa/cc.txt', contents='')
shell.remove_empty_files('/test')
self.assertTrue(os.path.exists('/test/aa/cc.txt'))
class RemoveDirectoryTest(unittest.TestCase):
"""Tests for remove_directory."""
def setUp(self):
test_helpers.patch(self, [
'os.chmod',
'os.mkdir',
'os.path.exists',
'os.system',
'system.environment.platform',
'metrics.logs.log_error',
'metrics.logs.log_warn',
'shutil.rmtree',
])
def _test_remove_os_specific(self, platform, recreate, raise_mkdir_error):
"""Helper for testing removing dir with os-specific command."""
self.mock.platform.return_value = platform
self.mock.exists.side_effect = [True, False, False]
if raise_mkdir_error:
self.mock.mkdir.side_effect = OSError()
result = shell.remove_directory('dir', recreate=recreate)
if recreate:
self.assertEqual(not raise_mkdir_error, result)
else:
self.assertTrue(result)
self.mock.rmtree.assert_has_calls([])
if recreate:
self.mock.mkdir.assert_has_calls([mock.call('dir')])
else:
self.mock.mkdir.assert_has_calls([])
def test_remove_os_specific_windows(self):
"""Test remove with os-specific command on windows."""
self._test_remove_os_specific('WINDOWS', True, False)
self.mock.system.assert_has_calls([mock.call('rd /s /q "dir" > nul 2>&1')])
def test_remove_os_specific_non_windows(self):
"""Test remove with os-specific command on non-windows."""
self._test_remove_os_specific('LINUX', True, False)
self.mock.system.assert_has_calls(
[mock.call('rm -rf "dir" > /dev/null 2>&1')])
def test_remove_without_recreate(self):
"""Test remove without recreate."""
self._test_remove_os_specific('LINUX', False, True)
def test_remove_with_mkdir_error(self):
"""Test remove when mkdir errors."""
self._test_remove_os_specific('LINUX', True, True)
def test_remove_shutil_success(self):
"""Test remove with shutil."""
self.mock.exists.side_effect = [True, True, False]
self.assertTrue(shell.remove_directory('dir'))
self.mock.system.assert_has_calls(
[mock.call('rm -rf "dir" > /dev/null 2>&1')])
self.mock.rmtree.assert_has_calls([mock.call('dir', onerror=mock.ANY)])
def test_remove_shutil_failure(self):
"""Test remove with shutil but fails."""
self.mock.exists.side_effect = [True, True, True]
self.assertFalse(shell.remove_directory('dir'))
self.mock.log_error.assert_has_calls(
[mock.call('Failed to clear directory dir.')])
self.assertEqual(0, self.mock.log_warn.call_count)
self.mock.system.assert_has_calls(
[mock.call('rm -rf "dir" > /dev/null 2>&1')])
self.mock.rmtree.assert_has_calls([mock.call('dir', onerror=mock.ANY)])
def test_remove_shutil_failure_ignore_errors(self):
self.mock.exists.side_effect = [True, True, True]
self.assertFalse(shell.remove_directory('dir', ignore_errors=True))
self.mock.log_warn.assert_has_calls(
[mock.call('Failed to clear directory dir.')])
self.assertEqual(0, self.mock.log_error.call_count)
self.mock.system.assert_has_calls(
[mock.call('rm -rf "dir" > /dev/null 2>&1')])
self.mock.rmtree.assert_has_calls([mock.call('dir', onerror=mock.ANY)])
def test_remove_shutil_onerror(self):
"""Test shutil invoking onerror."""
self.mock.exists.side_effect = [True, True, False]
self.assertTrue(shell.remove_directory('dir'))
self.mock.system.assert_has_calls(
[mock.call('rm -rf "dir" > /dev/null 2>&1')])
self.mock.rmtree.assert_has_calls([mock.call('dir', onerror=mock.ANY)])
onerror = self.mock.rmtree.call_args[1]['onerror']
fake_fn = mock.MagicMock()
fake_fn.side_effect = OSError()
onerror(fake_fn, 'dir/child', ImportError())
self.mock.chmod.assert_has_calls([mock.call('dir/child', 0o750)])
fake_fn.assert_has_calls([mock.call('dir/child')])
class GetDirectoryFileCount(fake_filesystem_unittest.TestCase):
"""Tests for get_directory_file_count."""
def setUp(self):
test_utils.set_up_pyfakefs(self)
def test(self):
"""Test get_directory_file_count."""
self.fs.CreateFile('/test/aa/bb.txt', contents='abc')
self.fs.CreateFile('/test/aa/cc.txt', contents='def')
self.fs.CreateFile('/test/aa/aa/aa.txt', contents='ghi')
self.fs.CreateFile('/test/aa/aa/dd.txt', contents='t')
self.assertEqual(shell.get_directory_file_count('/test/aa'), 4)
class GetDirectorySizeTest(fake_filesystem_unittest.TestCase):
"""Tests for get_directory_size."""
def setUp(self):
test_utils.set_up_pyfakefs(self)
def test(self):
"""Test get_directory_size."""
self.fs.CreateFile('/test/aa/bb.txt', contents='abc')
self.fs.CreateFile('/test/aa/cc.txt', contents='def')
self.fs.CreateFile('/test/aa/aa/aa.txt', contents='ghi')
self.fs.CreateFile('/test/aa/aa/dd.txt', contents='t')
self.assertEqual(shell.get_directory_size('/test/aa'), 10)
class WhichTest(fake_filesystem_unittest.TestCase):
"""Tests for which (shutil.which)."""
def setUp(self):
# FIXME: Add support for Windows.
if not environment.is_posix():
self.skipTest('Which test is only supported on posix platforms.')
def test(self):
self.assertEqual('/bin/ls', shell.which('ls'))
class ClearSystemTempDirectoryTest(fake_filesystem_unittest.TestCase):
"""Tests for clear_system_temp_directory."""
def setUp(self):
test_helpers.patch(self, [
'tempfile.gettempdir',
])
self.mock.gettempdir.return_value = '/tmp'
test_utils.set_up_pyfakefs(self)
def test(self):
"""Test clear_system_temp_directory works as expected."""
self.fs.CreateFile('/tmp/aa/bb.txt', contents='abc')
self.fs.CreateFile('/tmp/cc/dd/ee.txt', contents='def')
self.fs.CreateDirectory('/tmp/ff/gg')
self.fs.CreateDirectory('/tmp/hh')
self.fs.CreateDirectory('/unrelated')
self.fs.CreateFile('/unrelated/zz.txt', contents='zzz')
os.symlink('/unrelated/zz.txt', '/tmp/hh/gg.txt')
os.symlink('/unrelated', '/tmp/ii')
shell.clear_system_temp_directory()
self.assertTrue(os.path.exists('/tmp'))
self.assertTrue(os.path.exists('/unrelated'))
self.assertEqual(shell.get_directory_file_count('/tmp'), 0)
self.assertEqual(shell.get_directory_file_count('/unrelated'), 1)
self.assertFalse(os.path.exists('/tmp/aa/bb.txt'))
self.assertFalse(os.path.exists('/tmp/cc/dd/ee.txt'))
self.assertFalse(os.path.exists('/tmp/ff/gg'))
self.assertFalse(os.path.exists('/tmp/hh'))
class GetExecuteCommand(unittest.TestCase):
"""Test that the correct commands to run files are returned."""
def call_and_assert_helper(self, expected_command, file_to_execute):
"""Call get_execute_command on |file_to_execute| and assert result equal to
|expected_command|."""
self.assertEqual(expected_command,
shell.get_execute_command(file_to_execute))
def test_standard_script(self):
"""Test correct command returned for python script."""
script_name = 'script.py'
expected_command = 'python %s' % script_name
self.call_and_assert_helper(expected_command, script_name)
def test_java(self):
"""Test correct launch command returned for Java class."""
script_name = 'javaclassfile.class'
expected_command = 'java javaclassfile'
self.call_and_assert_helper(expected_command, script_name)
def test_binary(self):
"""Test correct launch command returned for a binary (executable) file."""
executable_name = 'executable'
self.call_and_assert_helper(executable_name, executable_name)
executable_name += '.exe'
self.call_and_assert_helper(executable_name, executable_name)
class GetInterpreter(object):
"""Test that the correct interpreters to execute a file are returned."""
def get_interpreted_file_test(self):
"""Test correct interpreter is returned for a file that needs one."""
self.assertEqual('python', shell.get_interpreter('run.py'))
def get_non_interpreter_file_test(self):
"""Test that None is returned for a file that doesn't need one. We don't
want empty string since this is easier to than None. """
self.assertIsNone(shell.get_interpreter('executable'))
| [((1877, 1915), 'mock.patch', 'mock.patch', (['"""os.remove"""'], {'autospec': '(True)'}), "('os.remove', autospec=True)\n", (1887, 1915), False, 'import mock\n'), ((1106, 1138), 'tests.test_libs.test_utils.set_up_pyfakefs', 'test_utils.set_up_pyfakefs', (['self'], {}), '(self)\n', (1132, 1138), False, 'from tests.test_libs import test_utils\n'), ((1421, 1454), 'system.shell.remove_empty_files', 'shell.remove_empty_files', (['"""/test"""'], {}), "('/test')\n", (1445, 1454), False, 'from system import shell\n'), ((1774, 1817), 'system.shell.remove_empty_files', 'shell.remove_empty_files', (['"""/test/aa/cc.txt"""'], {}), "('/test/aa/cc.txt')\n", (1798, 1817), False, 'from system import shell\n'), ((2120, 2153), 'system.shell.remove_empty_files', 'shell.remove_empty_files', (['"""/test"""'], {}), "('/test')\n", (2144, 2153), False, 'from system import shell\n'), ((2317, 2505), 'tests.test_libs.helpers.patch', 'test_helpers.patch', (['self', "['os.chmod', 'os.mkdir', 'os.path.exists', 'os.system',\n 'system.environment.platform', 'metrics.logs.log_error',\n 'metrics.logs.log_warn', 'shutil.rmtree']"], {}), "(self, ['os.chmod', 'os.mkdir', 'os.path.exists',\n 'os.system', 'system.environment.platform', 'metrics.logs.log_error',\n 'metrics.logs.log_warn', 'shutil.rmtree'])\n", (2335, 2505), True, 'from tests.test_libs import helpers as test_helpers\n'), ((2904, 2952), 'system.shell.remove_directory', 'shell.remove_directory', (['"""dir"""'], {'recreate': 'recreate'}), "('dir', recreate=recreate)\n", (2926, 2952), False, 'from system import shell\n'), ((5812, 5828), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (5826, 5828), False, 'import mock\n'), ((6175, 6207), 'tests.test_libs.test_utils.set_up_pyfakefs', 'test_utils.set_up_pyfakefs', (['self'], {}), '(self)\n', (6201, 6207), False, 'from tests.test_libs import test_utils\n'), ((6700, 6732), 'tests.test_libs.test_utils.set_up_pyfakefs', 'test_utils.set_up_pyfakefs', (['self'], {}), '(self)\n', (6726, 6732), False, 'from tests.test_libs import test_utils\n'), ((7560, 7609), 'tests.test_libs.helpers.patch', 'test_helpers.patch', (['self', "['tempfile.gettempdir']"], {}), "(self, ['tempfile.gettempdir'])\n", (7578, 7609), True, 'from tests.test_libs import helpers as test_helpers\n'), ((7677, 7709), 'tests.test_libs.test_utils.set_up_pyfakefs', 'test_utils.set_up_pyfakefs', (['self'], {}), '(self)\n', (7703, 7709), False, 'from tests.test_libs import test_utils\n'), ((8095, 8144), 'os.symlink', 'os.symlink', (['"""/unrelated/zz.txt"""', '"""/tmp/hh/gg.txt"""'], {}), "('/unrelated/zz.txt', '/tmp/hh/gg.txt')\n", (8105, 8144), False, 'import os\n'), ((8149, 8184), 'os.symlink', 'os.symlink', (['"""/unrelated"""', '"""/tmp/ii"""'], {}), "('/unrelated', '/tmp/ii')\n", (8159, 8184), False, 'import os\n'), ((8190, 8225), 'system.shell.clear_system_temp_directory', 'shell.clear_system_temp_directory', ([], {}), '()\n', (8223, 8225), False, 'from system import shell\n'), ((999, 1021), 'system.environment.is_posix', 'environment.is_posix', ([], {}), '()\n', (1019, 1021), False, 'from system import environment\n'), ((1476, 1509), 'os.path.exists', 'os.path.exists', (['"""/test/aa/bb.txt"""'], {}), "('/test/aa/bb.txt')\n", (1490, 1509), False, 'import os\n'), ((1531, 1567), 'os.path.exists', 'os.path.exists', (['"""/test/aa/aa/dd.txt"""'], {}), "('/test/aa/aa/dd.txt')\n", (1545, 1567), False, 'import os\n'), ((1590, 1623), 'os.path.exists', 'os.path.exists', (['"""/test/aa/cc.txt"""'], {}), "('/test/aa/cc.txt')\n", (1604, 1623), False, 'import os\n'), ((1646, 1682), 'os.path.exists', 'os.path.exists', (['"""/test/aa/aa/aa.txt"""'], {}), "('/test/aa/aa/aa.txt')\n", (1660, 1682), False, 'import os\n'), ((1838, 1871), 'os.path.exists', 'os.path.exists', (['"""/test/aa/cc.txt"""'], {}), "('/test/aa/cc.txt')\n", (1852, 1871), False, 'import os\n'), ((2174, 2207), 'os.path.exists', 'os.path.exists', (['"""/test/aa/cc.txt"""'], {}), "('/test/aa/cc.txt')\n", (2188, 2207), False, 'import os\n'), ((4171, 4200), 'system.shell.remove_directory', 'shell.remove_directory', (['"""dir"""'], {}), "('dir')\n", (4193, 4200), False, 'from system import shell\n'), ((4532, 4561), 'system.shell.remove_directory', 'shell.remove_directory', (['"""dir"""'], {}), "('dir')\n", (4554, 4561), False, 'from system import shell\n'), ((5014, 5063), 'system.shell.remove_directory', 'shell.remove_directory', (['"""dir"""'], {'ignore_errors': '(True)'}), "('dir', ignore_errors=True)\n", (5036, 5063), False, 'from system import shell\n'), ((5542, 5571), 'system.shell.remove_directory', 'shell.remove_directory', (['"""dir"""'], {}), "('dir')\n", (5564, 5571), False, 'from system import shell\n'), ((6526, 6568), 'system.shell.get_directory_file_count', 'shell.get_directory_file_count', (['"""/test/aa"""'], {}), "('/test/aa')\n", (6556, 6568), False, 'from system import shell\n'), ((7045, 7081), 'system.shell.get_directory_size', 'shell.get_directory_size', (['"""/test/aa"""'], {}), "('/test/aa')\n", (7069, 7081), False, 'from system import shell\n'), ((7250, 7272), 'system.environment.is_posix', 'environment.is_posix', ([], {}), '()\n', (7270, 7272), False, 'from system import environment\n'), ((7397, 7414), 'system.shell.which', 'shell.which', (['"""ls"""'], {}), "('ls')\n", (7408, 7414), False, 'from system import shell\n'), ((8247, 8269), 'os.path.exists', 'os.path.exists', (['"""/tmp"""'], {}), "('/tmp')\n", (8261, 8269), False, 'import os\n'), ((8291, 8319), 'os.path.exists', 'os.path.exists', (['"""/unrelated"""'], {}), "('/unrelated')\n", (8305, 8319), False, 'import os\n'), ((8342, 8380), 'system.shell.get_directory_file_count', 'shell.get_directory_file_count', (['"""/tmp"""'], {}), "('/tmp')\n", (8372, 8380), False, 'from system import shell\n'), ((8406, 8450), 'system.shell.get_directory_file_count', 'shell.get_directory_file_count', (['"""/unrelated"""'], {}), "('/unrelated')\n", (8436, 8450), False, 'from system import shell\n'), ((8476, 8508), 'os.path.exists', 'os.path.exists', (['"""/tmp/aa/bb.txt"""'], {}), "('/tmp/aa/bb.txt')\n", (8490, 8508), False, 'import os\n'), ((8531, 8566), 'os.path.exists', 'os.path.exists', (['"""/tmp/cc/dd/ee.txt"""'], {}), "('/tmp/cc/dd/ee.txt')\n", (8545, 8566), False, 'import os\n'), ((8589, 8617), 'os.path.exists', 'os.path.exists', (['"""/tmp/ff/gg"""'], {}), "('/tmp/ff/gg')\n", (8603, 8617), False, 'import os\n'), ((8640, 8665), 'os.path.exists', 'os.path.exists', (['"""/tmp/hh"""'], {}), "('/tmp/hh')\n", (8654, 8665), False, 'import os\n'), ((9018, 9060), 'system.shell.get_execute_command', 'shell.get_execute_command', (['file_to_execute'], {}), '(file_to_execute)\n', (9043, 9060), False, 'from system import shell\n'), ((10087, 10118), 'system.shell.get_interpreter', 'shell.get_interpreter', (['"""run.py"""'], {}), "('run.py')\n", (10108, 10118), False, 'from system import shell\n'), ((10324, 10359), 'system.shell.get_interpreter', 'shell.get_interpreter', (['"""executable"""'], {}), "('executable')\n", (10345, 10359), False, 'from system import shell\n'), ((3439, 3477), 'mock.call', 'mock.call', (['"""rd /s /q "dir" > nul 2>&1"""'], {}), '(\'rd /s /q "dir" > nul 2>&1\')\n', (3448, 3477), False, 'import mock\n'), ((3697, 3739), 'mock.call', 'mock.call', (['"""rm -rf "dir" > /dev/null 2>&1"""'], {}), '(\'rm -rf "dir" > /dev/null 2>&1\')\n', (3706, 3739), False, 'import mock\n'), ((4250, 4292), 'mock.call', 'mock.call', (['"""rm -rf "dir" > /dev/null 2>&1"""'], {}), '(\'rm -rf "dir" > /dev/null 2>&1\')\n', (4259, 4292), False, 'import mock\n'), ((4334, 4368), 'mock.call', 'mock.call', (['"""dir"""'], {'onerror': 'mock.ANY'}), "('dir', onerror=mock.ANY)\n", (4343, 4368), False, 'import mock\n'), ((4614, 4657), 'mock.call', 'mock.call', (['"""Failed to clear directory dir."""'], {}), "('Failed to clear directory dir.')\n", (4623, 4657), False, 'import mock\n'), ((4763, 4805), 'mock.call', 'mock.call', (['"""rm -rf "dir" > /dev/null 2>&1"""'], {}), '(\'rm -rf "dir" > /dev/null 2>&1\')\n', (4772, 4805), False, 'import mock\n'), ((4847, 4881), 'mock.call', 'mock.call', (['"""dir"""'], {'onerror': 'mock.ANY'}), "('dir', onerror=mock.ANY)\n", (4856, 4881), False, 'import mock\n'), ((5115, 5158), 'mock.call', 'mock.call', (['"""Failed to clear directory dir."""'], {}), "('Failed to clear directory dir.')\n", (5124, 5158), False, 'import mock\n'), ((5265, 5307), 'mock.call', 'mock.call', (['"""rm -rf "dir" > /dev/null 2>&1"""'], {}), '(\'rm -rf "dir" > /dev/null 2>&1\')\n', (5274, 5307), False, 'import mock\n'), ((5349, 5383), 'mock.call', 'mock.call', (['"""dir"""'], {'onerror': 'mock.ANY'}), "('dir', onerror=mock.ANY)\n", (5358, 5383), False, 'import mock\n'), ((5621, 5663), 'mock.call', 'mock.call', (['"""rm -rf "dir" > /dev/null 2>&1"""'], {}), '(\'rm -rf "dir" > /dev/null 2>&1\')\n', (5630, 5663), False, 'import mock\n'), ((5705, 5739), 'mock.call', 'mock.call', (['"""dir"""'], {'onerror': 'mock.ANY'}), "('dir', onerror=mock.ANY)\n", (5714, 5739), False, 'import mock\n'), ((5954, 5981), 'mock.call', 'mock.call', (['"""dir/child"""', '(488)'], {}), "('dir/child', 488)\n", (5963, 5981), False, 'import mock\n'), ((6016, 6038), 'mock.call', 'mock.call', (['"""dir/child"""'], {}), "('dir/child')\n", (6025, 6038), False, 'import mock\n'), ((3165, 3181), 'mock.call', 'mock.call', (['"""dir"""'], {}), "('dir')\n", (3174, 3181), False, 'import mock\n')] |
osamaqureshi/NLP-for-Urdu | Language Model/birnn/model.py | 864550dbf27244900c2be86e0bedcfb5bb519cb6 | import numpy as np
import tensorflow as tf
class Bidirectional(tf.keras.Model):
def __init__(self, units: int,
projection_units: int):
super(Bidirectional, self).__init__()
self.units = units
self.projection_units = projection_units
self.Layers = [tf.keras.layers.Bidirectional(tf.keras.layers.GRU(self.units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform',
name='birnn')),
tf.keras.layers.Dense(self.projection_units, name='projection')]
def call(self, inp):
out, _, _ = self.Layers[0](inp)
out = self.Layers[1](out)
return out
class BiRNN(tf.keras.Model):
def __init__(self, units: int,projection_units: int,max_seq_length: int,
vocab_size: int,embedding_dim: int,embedding_matrix = None):
super(BiRNN, self).__init__()
self.units = units
self.projection_units=projection_units
self.max_seq_length = max_seq_length
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.embeddings = tf.keras.layers.Embedding(self.vocab_size, self.embedding_dim,
weights = [embedding_matrix],
trainable=False, name='embeddings')
self.Layers = [Bidirectional(units=self.units, projection_units=self.projection_units),
tf.keras.layers.Add(),
Bidirectional(units=self.units, projection_units=self.projection_units),
tf.keras.layers.Dense(self.vocab_size, activation='softmax', name='softmax')]
def call(self, inp, predict=False):
inp = self.embeddings(inp)
out1 = self.Layers[0](inp)
out2 = self.Layers[1]([inp, out1])
out3 = self.Layers[2](out2)
if predict is False:
return out3
else:
out4 = self.Layers[3](out3)
return out4
def loss_function(real, pred, loss_object):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
def mask_sequences(seq, t):
mask = np.zeros(seq.shape)
mask[:,:t] = 1
inp = tf.math.multiply(seq, mask)
mask[:,:t+1] = 1
tar = tf.math.multiply(seq, mask)
return inp, tar | [((2455, 2487), 'tensorflow.cast', 'tf.cast', (['mask'], {'dtype': 'loss_.dtype'}), '(mask, dtype=loss_.dtype)\n', (2462, 2487), True, 'import tensorflow as tf\n'), ((2517, 2538), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss_'], {}), '(loss_)\n', (2531, 2538), True, 'import tensorflow as tf\n'), ((2579, 2598), 'numpy.zeros', 'np.zeros', (['seq.shape'], {}), '(seq.shape)\n', (2587, 2598), True, 'import numpy as np\n'), ((2628, 2655), 'tensorflow.math.multiply', 'tf.math.multiply', (['seq', 'mask'], {}), '(seq, mask)\n', (2644, 2655), True, 'import tensorflow as tf\n'), ((2687, 2714), 'tensorflow.math.multiply', 'tf.math.multiply', (['seq', 'mask'], {}), '(seq, mask)\n', (2703, 2714), True, 'import tensorflow as tf\n'), ((1414, 1545), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['self.vocab_size', 'self.embedding_dim'], {'weights': '[embedding_matrix]', 'trainable': '(False)', 'name': '"""embeddings"""'}), "(self.vocab_size, self.embedding_dim, weights=[\n embedding_matrix], trainable=False, name='embeddings')\n", (1439, 1545), True, 'import tensorflow as tf\n'), ((2384, 2406), 'tensorflow.math.equal', 'tf.math.equal', (['real', '(0)'], {}), '(real, 0)\n', (2397, 2406), True, 'import tensorflow as tf\n'), ((782, 845), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['self.projection_units'], {'name': '"""projection"""'}), "(self.projection_units, name='projection')\n", (803, 845), True, 'import tensorflow as tf\n'), ((1767, 1788), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (1786, 1788), True, 'import tensorflow as tf\n'), ((1909, 1985), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['self.vocab_size'], {'activation': '"""softmax"""', 'name': '"""softmax"""'}), "(self.vocab_size, activation='softmax', name='softmax')\n", (1930, 1985), True, 'import tensorflow as tf\n'), ((333, 464), 'tensorflow.keras.layers.GRU', 'tf.keras.layers.GRU', (['self.units'], {'return_sequences': '(True)', 'return_state': '(True)', 'recurrent_initializer': '"""glorot_uniform"""', 'name': '"""birnn"""'}), "(self.units, return_sequences=True, return_state=True,\n recurrent_initializer='glorot_uniform', name='birnn')\n", (352, 464), True, 'import tensorflow as tf\n')] |
jamesliu/ray | python/ray/train/__init__.py | 11ab412db1fa3603a3006e8ed414e80dd1f11c0c | from ray.train.backend import BackendConfig
from ray.train.callbacks import TrainingCallback
from ray.train.checkpoint import CheckpointStrategy
from ray.train.session import (get_dataset_shard, local_rank, load_checkpoint,
report, save_checkpoint, world_rank, world_size)
from ray.train.trainer import Trainer, TrainingIterator
__all__ = [
"BackendConfig", "CheckpointStrategy", "get_dataset_shard",
"load_checkpoint", "local_rank", "report", "save_checkpoint",
"TrainingIterator", "TrainingCallback", "Trainer", "world_rank",
"world_size"
]
| [] |
anastas11a/python_training | test/test_contact_in_group.py | 1daceddb193d92542f7f7313026a7e67af4d89bb | from model.contact import Contact
from model.group import Group
import random
def test_add_contact_in_group(app, db):
app.open_home_page()
contact = db.get_contact_list()
if len(contact) == 0:
app.contact.create(Contact(firstname = "test firstname changed"))
group = db.get_group_list()
if len(group) == 0:
app.group.create(Group(name="test"))
contact_rand = random.choice(contact)
group_rand = random.choice(group)
app.contact.add_contact_to_group(contact_rand.id, group_rand.id)
l = db.get_contacts_in_group(Group(id=group_rand.id))
assert contact_rand in l
def test_del_contact_from_group(app, db):
app.open_home_page()
contact = db.get_contact_list()
if len(contact) == 0:
app.contact.create(Contact(firstname = "test firstname changed"))
group = db.get_group_list()
if len(group) == 0:
app.group.create(Group(name="test"))
group_rand = random.choice(group)
app.contact.open_contacts_in_group(group_rand.id)
contacts_in_group = db.get_contacts_in_group(Group(id=group_rand.id))
if len(contacts_in_group) == 0:
app.contact.view_all_contacts()
contact_rand = random.choice(contact)
app.contact.add_contact_to_group(contact_rand.id, group_rand.id)
app.contact.open_contacts_in_group(group_rand.id)
db.get_contacts_in_group(Group(id=group_rand.id))
app.contact.del_contact_from_group()
l = db.get_contacts_in_group(Group(id=group_rand.id))
assert contact_rand.id not in l
| [((400, 422), 'random.choice', 'random.choice', (['contact'], {}), '(contact)\n', (413, 422), False, 'import random\n'), ((440, 460), 'random.choice', 'random.choice', (['group'], {}), '(group)\n', (453, 460), False, 'import random\n'), ((940, 960), 'random.choice', 'random.choice', (['group'], {}), '(group)\n', (953, 960), False, 'import random\n'), ((563, 586), 'model.group.Group', 'Group', ([], {'id': 'group_rand.id'}), '(id=group_rand.id)\n', (568, 586), False, 'from model.group import Group\n'), ((1064, 1087), 'model.group.Group', 'Group', ([], {'id': 'group_rand.id'}), '(id=group_rand.id)\n', (1069, 1087), False, 'from model.group import Group\n'), ((1188, 1210), 'random.choice', 'random.choice', (['contact'], {}), '(contact)\n', (1201, 1210), False, 'import random\n'), ((1474, 1497), 'model.group.Group', 'Group', ([], {'id': 'group_rand.id'}), '(id=group_rand.id)\n', (1479, 1497), False, 'from model.group import Group\n'), ((233, 276), 'model.contact.Contact', 'Contact', ([], {'firstname': '"""test firstname changed"""'}), "(firstname='test firstname changed')\n", (240, 276), False, 'from model.contact import Contact\n'), ((361, 379), 'model.group.Group', 'Group', ([], {'name': '"""test"""'}), "(name='test')\n", (366, 379), False, 'from model.group import Group\n'), ((775, 818), 'model.contact.Contact', 'Contact', ([], {'firstname': '"""test firstname changed"""'}), "(firstname='test firstname changed')\n", (782, 818), False, 'from model.contact import Contact\n'), ((903, 921), 'model.group.Group', 'Group', ([], {'name': '"""test"""'}), "(name='test')\n", (908, 921), False, 'from model.group import Group\n'), ((1375, 1398), 'model.group.Group', 'Group', ([], {'id': 'group_rand.id'}), '(id=group_rand.id)\n', (1380, 1398), False, 'from model.group import Group\n')] |
LikeLion-CAU-9th/Django-fancy-coder | byurak/accounts/admin.py | 53c770f4c1891f9076bed8c89d0b942b77e67667 | from django.contrib import admin
from accounts.models import User, Profile, UserFollow
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
list_display = ['email', 'nickname']
list_display_links = ['email', 'nickname']
admin.site.register(Profile)
admin.site.register(UserFollow)
| [((90, 110), 'django.contrib.admin.register', 'admin.register', (['User'], {}), '(User)\n', (104, 110), False, 'from django.contrib import admin\n'), ((239, 267), 'django.contrib.admin.site.register', 'admin.site.register', (['Profile'], {}), '(Profile)\n', (258, 267), False, 'from django.contrib import admin\n'), ((268, 299), 'django.contrib.admin.site.register', 'admin.site.register', (['UserFollow'], {}), '(UserFollow)\n', (287, 299), False, 'from django.contrib import admin\n')] |
olmozavala/eoas-pyutils | viz_utils/eoa_viz.py | f552a512e250f8aa16e1f3ababf8b4644253918b | import os
from PIL import Image
import cv2
from os import listdir
from os.path import join
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.colors import LogNorm
from io_utils.io_common import create_folder
from viz_utils.constants import PlotMode, BackgroundType
import pylab
import numpy as np
import cmocean
import shapely
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import cartopy
def select_colormap(field_name):
'''
Based on the name if the field it chooses a colormap from cmocean
Args:
field_name:
Returns:
'''
if np.any([field_name.find(x) != -1 for x in ('ssh', 'srfhgt', 'adt','surf_el')]):
# cmaps_fields.append(cmocean.cm.deep_r)
return cmocean.cm.curl
elif np.any([field_name.find(x) != -1 for x in ('temp', 'sst', 'temperature')]):
return cmocean.cm.thermal
elif np.any([field_name.find(x) != -1 for x in ('vorticity', 'vort')]):
return cmocean.cm.curl
elif np.any([field_name.find(x) != -1 for x in ('salin', 'sss', 'sal')]):
return cmocean.cm.haline
elif field_name.find('error') != -1:
return cmocean.cm.diff
elif field_name.find('binary') != -1:
return cmocean.cm.oxy
elif np.any([field_name.find(x) != -1 for x in ('u_', 'v_', 'u-vel.', 'v-vel.','velocity')]):
return cmocean.cm.speed
class EOAImageVisualizer:
"""This class makes plenty of plots assuming we are plotting Geospatial data (maps).
It is made to read xarrays, numpy arrays, and numpy arrays in dictionaries
vizobj = new EOAImageVisualizer(disp_images=True, output_folder='output',
lats=[lats],lons=[lons])
"""
_COLORS = ['y', 'r', 'c', 'b', 'g', 'w', 'k', 'y', 'r', 'c', 'b', 'g', 'w', 'k']
_figsize = 8
_font_size = 30
_units = ''
_max_imgs_per_row = 4
_mincbar = np.nan # User can set a min and max colorbar values to 'force' same color bar to all plots
_maxcbar = np.nan
_flip_data = True
_eoas_pyutils_path = './eoas_pyutils'# This is the path where the eoas_utils folder is stored with respect to the main project
_contourf = False # When plotting non-regular grids and need precision
_background = BackgroundType.BLUE_MARBLE_LR # Select the background to use
_auto_colormap = True # Selects the colormap based on the name of the field
_show_var_names = False # Includes the name of the field name in the titles
_additional_polygons = [] # MUST BE SHAPELY GEOMETRIES In case we want to include additional polygons in the plots (all of them)
# If you want to add a streamplot of a vector field. It must be a dictionary with keys x,y,u,v
# and optional density, color, cmap, arrowsize, arrowstyle, minlength
_vector_field = None
_norm = None # Use to normalize the colormap. For example with LogNorm
# vizobj = EOAImageVisualizer(disp_images=True, output_folder='output',
# lats=[lats],lons=[lons])
def __init__(self, disp_images=True, output_folder='output',
lats=[-90,90], lons =[-180,180],
projection=ccrs.PlateCarree(), **kwargs):
# All the arguments that are passed to the constructor of the class MUST have its name on it.
self._disp_images = disp_images
self._output_folder = output_folder
self._projection = projection
bbox = self.getExtent(lats, lons)
self._extent = bbox
self._lats = lats
self._lons = lons
self._fig_prop = (bbox[1]-bbox[0])/(bbox[3]-bbox[2])
self._contour_labels = False
for arg_name, arg_value in kwargs.items():
self.__dict__["_" + arg_name] = arg_value
print(self.__dict__["_" + arg_name])
def __getattr__(self, attr):
'''Generic getter for all the properties of the class'''
return self.__dict__["_" + attr]
def __setattr__(self, attr, value):
'''Generic setter for all the properties of the class'''
self.__dict__["_" + attr] = value
def add_colorbar(self, fig, im, ax, show_color_bar, label=""):
# https://matplotlib.org/api/_as_gen/matplotlib.pyplot.colorbar.html
if show_color_bar:
font_size_cbar = self._font_size * .5
# TODO how to make this automatic and works always
cbar = fig.colorbar(im, ax=ax, shrink=.7)
cbar.ax.tick_params(labelsize=font_size_cbar)
if label != "":
cbar.set_label(label, fontsize=font_size_cbar*1.2)
else:
cbar.set_label(self._units, fontsize=font_size_cbar*1.2)
def plot_slice_eoa(self, c_img, ax, cmap='gray', mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan) -> None:
"""
Plots a 2D img for EOA data.
:param c_img: 2D array
:param ax: geoaxes
:return:
"""
c_ax = ax
if self._flip_data:
origin = 'lower'
else:
origin = 'upper'
if self._background == BackgroundType.CARTO_DEF:
c_ax.stock_img()
else:
if self._background == BackgroundType.BLUE_MARBLE_LR:
img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble.png'))
if self._background == BackgroundType.BLUE_MARBLE_HR:
img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble_5400x2700.jpg'))
if self._background == BackgroundType.TOPO:
img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/etopo.png'))
if self._background == BackgroundType.BATHYMETRY:
img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bathymetry_3600x1800.jpg'))
c_ax.imshow(img, origin='upper', extent=(-180,180,-90,90), transform=ccrs.PlateCarree())
if mode == PlotMode.RASTER or mode == PlotMode.MERGED:
if self._contourf:
im = c_ax.contourf(self._lons, self._lats, c_img, num_colors=255, cmap='inferno', extent=self._extent)
else:
if np.isnan(mincbar):
im = c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap, transform=self._projection, norm=self._norm)
else:
im = c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap, vmin=mincbar, vmax=maxcbar, transform=self._projection, norm=self._norm)
if mode == PlotMode.CONTOUR or mode == PlotMode.MERGED:
c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons)))
if mode == PlotMode.CONTOUR:
im = c_ax.contour(c_img, extent=self._extent, transform=self._projection)
if mode == PlotMode.MERGED:
if self._contour_labels:
c_ax.contour(c_img, self._contour_labels, colors='r', extent=self._extent, transform=self._projection)
else:
c_ax.contour(c_img, extent=self._extent, transform=self._projection)
if len(self._additional_polygons) > 0:
pol_lats = []
pol_lons = []
for c_polygon in self._additional_polygons:
if isinstance(c_polygon, shapely.geometry.linestring.LineString):
x,y = c_polygon.xy
elif isinstance(c_polygon, shapely.geometry.polygon.Polygon):
x, y = c_polygon.exterior.xy
pol_lats += y
pol_lons += x
c_ax.plot(x,y, transform=self._projection, c='r')
# Adds a threshold to the plot to see the polygons
c_ax.set_extent(self.getExtent(list(self._lats) + pol_lats, list(self._lons) + pol_lons, 0.5))
if self._vector_field != None:
try:
u = self._vector_field['u']
v = self._vector_field['v']
x = self._vector_field['x']
y = self._vector_field['y']
vec_keys = self._vector_field.keys()
c = 'r'
density = 1
linewidth = 3
vec_cmap = cmocean.cm.solar
if 'color' in vec_keys:
c = self._vector_field['color']
if 'density' in vec_keys:
density = self._vector_field['density']
if 'linewidth' in vec_keys:
linewidth = self._vector_field['linewidth']
if 'cmap' in vec_keys:
vec_cmap = self._vector_field['cmap']
c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons)))
c_ax.streamplot(x, y, u, v, transform=self._projection, density=density, color=c,
cmap=vec_cmap, linewidth=linewidth)
except Exception as e:
print(F"Couldn't add vector field e:{e}")
gl = c_ax.gridlines(draw_labels=True, color='grey', alpha=0.5, linestyle='--')
# gl.xlabel_style = {'size': self._font_size/2, 'color': '#aaaaaa', 'weight':'bold'}
font_coords = {'size': self._font_size*.6}
gl.xlabel_style = font_coords
gl.ylabel_style = font_coords
gl.top_labels = False
gl.right_labels = False
return im
def get_proper_size(self, rows, cols):
"""
Obtains the proper size for a figure.
:param rows: how many rows will the figure have
:param cols: how many colswill the figure have
:param prop: Proportion is the proportion to use w/h
:return:
"""
if rows == 1:
return self._figsize * cols * self._fig_prop, self._figsize
else:
return self._figsize * cols * self._fig_prop, self._figsize * rows
def _close_figure(self):
"""Depending on what is disp_images, the figures are displayed or just closed"""
if self._disp_images:
plt.show()
else:
plt.close()
def getExtent(self, lats, lons, expand_ext=0.0):
'''
Obtains the bbox of the coordinates. If included threshold then increases the bbox in all directions with that thres
Args:
lats:
lons:
inc_threshold:
Returns:
'''
minLat = np.amin(lats) - expand_ext
maxLat = np.amax(lats) + expand_ext
minLon = np.amin(lons) - expand_ext
maxLon = np.amax(lons) + expand_ext
bbox = (minLon, maxLon, minLat, maxLat)
return bbox
def xr_summary(self, ds):
""" Prints a summary of the netcdf (global attributes, variables, etc)
:param ds:
:return:
"""
print("\n========== Global attributes =========")
for name in ds.attrs:
print(F"{name} = {getattr(ds, name)}")
print("\n========== Dimensions =========")
for name in ds.dims:
print(F"{name}: {ds[name].shape}")
print("\n========== Coordinates =========")
for name in ds.coords:
print(F"{name}: {ds[name].shape}")
print("\n========== Variables =========")
for cur_variable_name in ds.variables:
cur_var = ds[cur_variable_name]
print(F"{cur_variable_name}: {cur_var.dims} {cur_var.shape}")
def nc_summary(self, ds):
""" Prints a summary of the netcdf (global attributes, variables, etc)
:param ds:
:return:
"""
print("\n========== Global attributes =========")
for name in ds.ncattrs():
print(F"{name} = {getattr(ds, name)}")
print("\n========== Variables =========")
netCDFvars = ds.variables
for cur_variable_name in netCDFvars.keys():
cur_var = ds.variables[cur_variable_name]
print(F"Dimensions for {cur_variable_name}: {cur_var.dimensions} {cur_var.shape}")
def add_roads(self, ax):
# Names come from: https://www.naturalearthdata.com/features/
# -- Add states
roads = cfeature.NaturalEarthFeature(
category='cultural',
name='roads',
scale='10m',
facecolor='none')
ax.add_feature(roads, edgecolor='black')
return ax
def add_states(self, ax):
# Names come from: https://www.naturalearthdata.com/features/
# -- Add states
states_provinces = cfeature.NaturalEarthFeature(
category='cultural',
name='admin_1_states_provinces_lines',
scale='50m',
facecolor='none')
ax.add_feature(states_provinces, edgecolor='gray')
return ax
def plot_scatter_data(self, lats=None, lons=None, bbox=None, s=1, c='blue', cmap='plasma', title=''):
'''
This function plots points in a map
:param bbox:
:return:
'''
if bbox is None:
bbox = (-180, 180, -90, 90)
if lats is None:
lats = self.lats
if lons is None:
lons = self.lons
fig, ax = plt.subplots(1, 1, figsize=(self._figsize, self._figsize), subplot_kw={'projection': ccrs.PlateCarree()})
ax.set_extent(bbox) # If we do not set this, it will cropp it to the limits of the locations
ax.gridlines()
im = ax.scatter(lons, lats, s=s, c=c, cmap=cmap)
fig.colorbar(im, ax=ax, shrink=0.7)
ax.coastlines()
plt.title(title)
plt.show()
def plot_3d_data_npdict(self, np_variables:list, var_names:list, z_levels= [], title='',
file_name_prefix='', cmap=None, z_names = [],
show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan):
"""
Plots multiple z_levels for multiple fields.
It uses rows for each depth, and columns for each variable
"""
create_folder(self._output_folder)
orig_cmap = cmap
# If the user do not requires any z-leve, then all are plotted
if len(z_levels) == 0:
z_levels = range(np_variables[var_names[0]].shape[0])
cols = np.min((self._max_imgs_per_row, len(var_names)))
if cols == len(var_names):
rows = len(z_levels)
else:
rows = int(len(z_levels) * np.ceil(len(var_names)/cols))
fig, _axs = plt.subplots(rows, cols,
figsize=self.get_proper_size(rows, cols),
subplot_kw={'projection': self._projection})
for c_zlevel, c_slice in enumerate(z_levels): # Iterates over the z-levels
# Verify the index of the z_levels are the original ones.
if len(z_names) != 0:
c_slice_txt = z_names[c_slice]
else:
c_slice_txt = c_slice
c_mincbar = np.nan
c_maxcbar = np.nan
for idx_var, c_var in enumerate(var_names): # Iterate over the fields
if rows*cols == 1: # Single figure
ax = _axs
else:
ax = _axs.flatten()[c_zlevel*len(var_names) + idx_var]
# Here we chose the min and max colorbars for each field
if not(np.all(np.isnan(mincbar))):
if type(mincbar) is list:
c_mincbar = mincbar[idx_var]
else:
c_mincbar = mincbar
if not(np.all(np.isnan(maxcbar))):
if type(mincbar) is list:
c_maxcbar = maxcbar[idx_var]
else:
c_maxcbar = maxcbar
# By default we select the colorbar from the name of the variable
if self._auto_colormap and orig_cmap is None:
cmap = select_colormap(c_var)
else:
# If there is an array of colormaps we select the one for this field
if type(orig_cmap) is list:
cmap = orig_cmap[idx_var]
else:
# If it is just one cmap, then we use it for all the fields
cmap = orig_cmap
im = self.plot_slice_eoa(np_variables[c_var][c_slice,:,:], ax, cmap=cmap, mode=plot_mode,
mincbar=c_mincbar, maxcbar=c_maxcbar)
if self._show_var_names:
c_title = F'{var_names[idx_var]} {title}'
else:
c_title = F'{title}'
if len(z_levels) > 1:
c_title += F"Z - level: {c_slice_txt}"
ax.set_title(c_title, fontsize=self._font_size)
self.add_colorbar(fig, im, ax, show_color_bar)
plt.tight_layout(pad=.5)
file_name = F'{file_name_prefix}'
pylab.savefig(join(self._output_folder, F'{file_name}.png'), bbox_inches='tight')
self._close_figure()
def plot_2d_data_xr(self, np_variables:list, var_names:list, title='',
file_name_prefix='', cmap='viridis', show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan):
'''
Wrapper function to receive raw 2D numpy data. It calls the 'main' function for 3D plotting
:param np_variables:
:param var_names:
:param title:
:param file_name_prefix:
:param cmap:
:param flip_data:
:param rot_90:
:param show_color_bar:
:param plot_mode:
:param mincbar:
:param maxcbar:
:return:
'''
npdict_3d = {}
for i, field_name in enumerate(var_names):
npdict_3d[field_name] = np.expand_dims(np_variables[field_name], axis=0)
self.plot_3d_data_npdict(npdict_3d, var_names, z_levels=[0], title=title,
file_name_prefix=file_name_prefix, cmap=cmap, z_names = [],
show_color_bar=show_color_bar, plot_mode=plot_mode, mincbar=mincbar, maxcbar=maxcbar)
def plot_2d_data_np(self, np_variables:list, var_names:list, title='',
file_name_prefix='', cmap=None, flip_data=False,
rot_90=False, show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan):
'''
Wrapper function to receive raw 2D numpy data. It calls the 'main' function for 3D plotting
:param np_variables: Numpy variables. They can be with shape [fields, x, y] or just a single field with shape [x,y]
:param var_names:
:param title:
:param file_name_prefix:
:param cmap:
:param flip_data:
:param rot_90:
:param show_color_bar:
:param plot_mode:
:param mincbar:
:param maxcbar:
:return:
'''
npdict_3d = {}
for i, field_name in enumerate(var_names):
if len(np_variables.shape) == 3:
c_np_data = np_variables[i, :, :]
else:
c_np_data = np_variables # Single field
if rot_90:
c_np_data = np.rot90(c_np_data)
if flip_data:
c_np_data = np.flip(np.flip(c_np_data), axis=1)
npdict_3d[field_name] = np.expand_dims(c_np_data, axis=0)
self.plot_3d_data_npdict(npdict_3d, var_names, z_levels=[0], title=title,
file_name_prefix=file_name_prefix, cmap=cmap, z_names = [],
show_color_bar=show_color_bar, plot_mode=plot_mode, mincbar=mincbar, maxcbar=maxcbar)
def make_video_from_images(self, input_folder, output_file, fps=24):
files = listdir(input_folder)
files.sort()
print(F"Generating video file: {output_file}")
out_video = -1
for i, file_name in enumerate(files[0:36]):
if i % 10 == 0:
print(F"Adding file # {i}: {file_name}")
c_file = join(input_folder, file_name)
im = Image.open(c_file)
np_im = np.asarray(im)[:, :, :3]
if i == 0:
video_size = (np_im.shape[1], np_im.shape[0])
out_video = cv2.VideoWriter(output_file, cv2.VideoWriter_fourcc(*'mp4v'), fps, video_size, True)
out_video.write(np_im[:, :, ::-1])
out_video.release()
cv2.destroyAllWindows()
print("Done! yeah babe!") | [((3165, 3183), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (3181, 3183), True, 'import cartopy.crs as ccrs\n'), ((12073, 12171), 'cartopy.feature.NaturalEarthFeature', 'cfeature.NaturalEarthFeature', ([], {'category': '"""cultural"""', 'name': '"""roads"""', 'scale': '"""10m"""', 'facecolor': '"""none"""'}), "(category='cultural', name='roads', scale='10m',\n facecolor='none')\n", (12101, 12171), True, 'import cartopy.feature as cfeature\n'), ((12437, 12561), 'cartopy.feature.NaturalEarthFeature', 'cfeature.NaturalEarthFeature', ([], {'category': '"""cultural"""', 'name': '"""admin_1_states_provinces_lines"""', 'scale': '"""50m"""', 'facecolor': '"""none"""'}), "(category='cultural', name=\n 'admin_1_states_provinces_lines', scale='50m', facecolor='none')\n", (12465, 12561), True, 'import cartopy.feature as cfeature\n'), ((13453, 13469), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (13462, 13469), True, 'import matplotlib.pyplot as plt\n'), ((13478, 13488), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13486, 13488), True, 'import matplotlib.pyplot as plt\n'), ((13914, 13948), 'io_utils.io_common.create_folder', 'create_folder', (['self._output_folder'], {}), '(self._output_folder)\n', (13927, 13948), False, 'from io_utils.io_common import create_folder\n'), ((16832, 16857), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(0.5)'}), '(pad=0.5)\n', (16848, 16857), True, 'import matplotlib.pyplot as plt\n'), ((19744, 19765), 'os.listdir', 'listdir', (['input_folder'], {}), '(input_folder)\n', (19751, 19765), False, 'from os import listdir\n'), ((20417, 20440), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (20438, 20440), False, 'import cv2\n'), ((9968, 9978), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9976, 9978), True, 'import matplotlib.pyplot as plt\n'), ((10005, 10016), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10014, 10016), True, 'import matplotlib.pyplot as plt\n'), ((10333, 10346), 'numpy.amin', 'np.amin', (['lats'], {}), '(lats)\n', (10340, 10346), True, 'import numpy as np\n'), ((10377, 10390), 'numpy.amax', 'np.amax', (['lats'], {}), '(lats)\n', (10384, 10390), True, 'import numpy as np\n'), ((10421, 10434), 'numpy.amin', 'np.amin', (['lons'], {}), '(lons)\n', (10428, 10434), True, 'import numpy as np\n'), ((10465, 10478), 'numpy.amax', 'np.amax', (['lons'], {}), '(lons)\n', (10472, 10478), True, 'import numpy as np\n'), ((16921, 16966), 'os.path.join', 'join', (['self._output_folder', 'f"""{file_name}.png"""'], {}), "(self._output_folder, f'{file_name}.png')\n", (16925, 16966), False, 'from os.path import join\n'), ((17777, 17825), 'numpy.expand_dims', 'np.expand_dims', (['np_variables[field_name]'], {'axis': '(0)'}), '(np_variables[field_name], axis=0)\n', (17791, 17825), True, 'import numpy as np\n'), ((19343, 19376), 'numpy.expand_dims', 'np.expand_dims', (['c_np_data'], {'axis': '(0)'}), '(c_np_data, axis=0)\n', (19357, 19376), True, 'import numpy as np\n'), ((20024, 20053), 'os.path.join', 'join', (['input_folder', 'file_name'], {}), '(input_folder, file_name)\n', (20028, 20053), False, 'from os.path import join\n'), ((20071, 20089), 'PIL.Image.open', 'Image.open', (['c_file'], {}), '(c_file)\n', (20081, 20089), False, 'from PIL import Image\n'), ((6138, 6155), 'numpy.isnan', 'np.isnan', (['mincbar'], {}), '(mincbar)\n', (6146, 6155), True, 'import numpy as np\n'), ((19197, 19216), 'numpy.rot90', 'np.rot90', (['c_np_data'], {}), '(c_np_data)\n', (19205, 19216), True, 'import numpy as np\n'), ((20110, 20124), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (20120, 20124), True, 'import numpy as np\n'), ((5236, 5298), 'os.path.join', 'join', (['self._eoas_pyutils_path', '"""viz_utils/imgs/bluemarble.png"""'], {}), "(self._eoas_pyutils_path, 'viz_utils/imgs/bluemarble.png')\n", (5240, 5298), False, 'from os.path import join\n'), ((5398, 5470), 'os.path.join', 'join', (['self._eoas_pyutils_path', '"""viz_utils/imgs/bluemarble_5400x2700.jpg"""'], {}), "(self._eoas_pyutils_path, 'viz_utils/imgs/bluemarble_5400x2700.jpg')\n", (5402, 5470), False, 'from os.path import join\n'), ((5560, 5617), 'os.path.join', 'join', (['self._eoas_pyutils_path', '"""viz_utils/imgs/etopo.png"""'], {}), "(self._eoas_pyutils_path, 'viz_utils/imgs/etopo.png')\n", (5564, 5617), False, 'from os.path import join\n'), ((5713, 5785), 'os.path.join', 'join', (['self._eoas_pyutils_path', '"""viz_utils/imgs/bathymetry_3600x1800.jpg"""'], {}), "(self._eoas_pyutils_path, 'viz_utils/imgs/bathymetry_3600x1800.jpg')\n", (5717, 5785), False, 'from os.path import join\n'), ((5867, 5885), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (5883, 5885), True, 'import cartopy.crs as ccrs\n'), ((13174, 13192), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (13190, 13192), True, 'import cartopy.crs as ccrs\n'), ((19279, 19297), 'numpy.flip', 'np.flip', (['c_np_data'], {}), '(c_np_data)\n', (19286, 19297), True, 'import numpy as np\n'), ((20277, 20308), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (20299, 20308), False, 'import cv2\n'), ((15278, 15295), 'numpy.isnan', 'np.isnan', (['mincbar'], {}), '(mincbar)\n', (15286, 15295), True, 'import numpy as np\n'), ((15498, 15515), 'numpy.isnan', 'np.isnan', (['maxcbar'], {}), '(maxcbar)\n', (15506, 15515), True, 'import numpy as np\n')] |
fcendra/PSPnet18 | ade20kScripts/setup.py | bc4f4292f4ddd09dba7076ca0b587c8f60dfa043 | from os import listdir
from os.path import isfile, join
from path import Path
import numpy as np
import cv2
# Dataset path
target_path = Path('target/')
annotation_images_path = Path('dataset/ade20k/annotations/training/').abspath()
dataset = [ f for f in listdir(annotation_images_path) if isfile(join(annotation_images_path,f))]
images = np.empty(len(dataset), dtype = object)
count = 1
# Iterate all Training Images
for n in range(0, len(dataset)):
# Read image
images[n] = cv2.imread(join(annotation_images_path,dataset[n]))
# Convert it to array
array = np.asarray(images[n],dtype=np.int8)
# Conditions when the value equal less than 1, change it to 255.
# If it is >= 1, increment it by -1
arr = np.where(array < 1, 255, array -1)
#Saved it to another file
if count < 10:
cv2.imwrite(target_path +'ADE_train_0000000'+ str(count) + ".png", arr)
elif count < 100 and count > 9:
cv2.imwrite(target_path +'ADE_train_000000'+ str(count) + ".png", arr)
elif count < 1000 and count > 99:
cv2.imwrite(target_path +'ADE_train_00000'+ str(count) + ".png", arr)
elif count < 10000 and count > 999:
cv2.imwrite(target_path +'ADE_train_0000'+ str(count) + ".png", arr)
else:
cv2.imwrite(target_path +'ADE_train_000'+ str(count) + ".png", arr)
print(str(count) + ".png is printed")
count += 1
| [((138, 153), 'path.Path', 'Path', (['"""target/"""'], {}), "('target/')\n", (142, 153), False, 'from path import Path\n'), ((577, 613), 'numpy.asarray', 'np.asarray', (['images[n]'], {'dtype': 'np.int8'}), '(images[n], dtype=np.int8)\n', (587, 613), True, 'import numpy as np\n'), ((734, 769), 'numpy.where', 'np.where', (['(array < 1)', '(255)', '(array - 1)'], {}), '(array < 1, 255, array - 1)\n', (742, 769), True, 'import numpy as np\n'), ((179, 223), 'path.Path', 'Path', (['"""dataset/ade20k/annotations/training/"""'], {}), "('dataset/ade20k/annotations/training/')\n", (183, 223), False, 'from path import Path\n'), ((257, 288), 'os.listdir', 'listdir', (['annotation_images_path'], {}), '(annotation_images_path)\n', (264, 288), False, 'from os import listdir\n'), ((497, 537), 'os.path.join', 'join', (['annotation_images_path', 'dataset[n]'], {}), '(annotation_images_path, dataset[n])\n', (501, 537), False, 'from os.path import isfile, join\n'), ((299, 330), 'os.path.join', 'join', (['annotation_images_path', 'f'], {}), '(annotation_images_path, f)\n', (303, 330), False, 'from os.path import isfile, join\n')] |
Mannan2812/azure-cli-extensions | src/mesh/azext_mesh/servicefabricmesh/mgmt/servicefabricmesh/models/__init__.py | e2b34efe23795f6db9c59100534a40f0813c3d95 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .available_operation_display import AvailableOperationDisplay
from .error_details_model import ErrorDetailsModel
from .error_error_model import ErrorErrorModel
from .error_model import ErrorModel, ErrorModelException
from .operation_result import OperationResult
from .provisioned_resource_properties import ProvisionedResourceProperties
from .proxy_resource import ProxyResource
from .managed_proxy_resource import ManagedProxyResource
from .resource import Resource
from .tracked_resource import TrackedResource
from .secret_resource_properties import SecretResourceProperties
from .inlined_value_secret_resource_properties import InlinedValueSecretResourceProperties
from .secret_resource_properties_base import SecretResourcePropertiesBase
from .secret_resource_description import SecretResourceDescription
from .secret_value import SecretValue
from .secret_value_properties import SecretValueProperties
from .secret_value_resource_description import SecretValueResourceDescription
from .volume_provider_parameters_azure_file import VolumeProviderParametersAzureFile
from .volume_properties import VolumeProperties
from .volume_reference import VolumeReference
from .application_scoped_volume_creation_parameters import ApplicationScopedVolumeCreationParameters
from .application_scoped_volume import ApplicationScopedVolume
from .application_scoped_volume_creation_parameters_service_fabric_volume_disk import ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk
from .volume_resource_description import VolumeResourceDescription
from .network_resource_properties import NetworkResourceProperties
from .local_network_resource_properties import LocalNetworkResourceProperties
from .endpoint_ref import EndpointRef
from .network_ref import NetworkRef
from .network_resource_properties_base import NetworkResourcePropertiesBase
from .network_resource_description import NetworkResourceDescription
from .gateway_destination import GatewayDestination
from .tcp_config import TcpConfig
from .http_route_match_path import HttpRouteMatchPath
from .http_route_match_header import HttpRouteMatchHeader
from .http_route_match_rule import HttpRouteMatchRule
from .http_route_config import HttpRouteConfig
from .http_host_config import HttpHostConfig
from .http_config import HttpConfig
from .gateway_properties import GatewayProperties
from .gateway_resource_description import GatewayResourceDescription
from .image_registry_credential import ImageRegistryCredential
from .environment_variable import EnvironmentVariable
from .setting import Setting
from .container_label import ContainerLabel
from .endpoint_properties import EndpointProperties
from .resource_requests import ResourceRequests
from .resource_limits import ResourceLimits
from .resource_requirements import ResourceRequirements
from .diagnostics_ref import DiagnosticsRef
from .reliable_collections_ref import ReliableCollectionsRef
from .container_state import ContainerState
from .container_event import ContainerEvent
from .container_instance_view import ContainerInstanceView
from .container_code_package_properties import ContainerCodePackageProperties
from .auto_scaling_trigger import AutoScalingTrigger
from .auto_scaling_mechanism import AutoScalingMechanism
from .auto_scaling_policy import AutoScalingPolicy
from .service_resource_description import ServiceResourceDescription
from .diagnostics_sink_properties import DiagnosticsSinkProperties
from .diagnostics_description import DiagnosticsDescription
from .application_properties import ApplicationProperties
from .azure_internal_monitoring_pipeline_sink_description import AzureInternalMonitoringPipelineSinkDescription
from .application_resource_description import ApplicationResourceDescription
from .add_remove_replica_scaling_mechanism import AddRemoveReplicaScalingMechanism
from .auto_scaling_metric import AutoScalingMetric
from .auto_scaling_resource_metric import AutoScalingResourceMetric
from .service_properties import ServiceProperties
from .service_replica_properties import ServiceReplicaProperties
from .service_replica_description import ServiceReplicaDescription
from .average_load_scaling_trigger import AverageLoadScalingTrigger
from .container_logs import ContainerLogs
from .operation_result_paged import OperationResultPaged
from .secret_resource_description_paged import SecretResourceDescriptionPaged
from .secret_value_resource_description_paged import SecretValueResourceDescriptionPaged
from .volume_resource_description_paged import VolumeResourceDescriptionPaged
from .network_resource_description_paged import NetworkResourceDescriptionPaged
from .gateway_resource_description_paged import GatewayResourceDescriptionPaged
from .application_resource_description_paged import ApplicationResourceDescriptionPaged
from .service_resource_description_paged import ServiceResourceDescriptionPaged
from .service_replica_description_paged import ServiceReplicaDescriptionPaged
from .service_fabric_mesh_management_client_enums import (
ResourceStatus,
HealthState,
SecretKind,
VolumeProvider,
SizeTypes,
ApplicationScopedVolumeKind,
NetworkKind,
HeaderMatchType,
OperatingSystemType,
DiagnosticsSinkKind,
AutoScalingMechanismKind,
AutoScalingMetricKind,
AutoScalingResourceMetricName,
AutoScalingTriggerKind,
)
__all__ = [
'AvailableOperationDisplay',
'ErrorDetailsModel',
'ErrorErrorModel',
'ErrorModel', 'ErrorModelException',
'OperationResult',
'ProvisionedResourceProperties',
'ProxyResource',
'ManagedProxyResource',
'Resource',
'TrackedResource',
'SecretResourceProperties',
'InlinedValueSecretResourceProperties',
'SecretResourcePropertiesBase',
'SecretResourceDescription',
'SecretValue',
'SecretValueProperties',
'SecretValueResourceDescription',
'VolumeProviderParametersAzureFile',
'VolumeProperties',
'VolumeReference',
'ApplicationScopedVolumeCreationParameters',
'ApplicationScopedVolume',
'ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk',
'VolumeResourceDescription',
'NetworkResourceProperties',
'LocalNetworkResourceProperties',
'EndpointRef',
'NetworkRef',
'NetworkResourcePropertiesBase',
'NetworkResourceDescription',
'GatewayDestination',
'TcpConfig',
'HttpRouteMatchPath',
'HttpRouteMatchHeader',
'HttpRouteMatchRule',
'HttpRouteConfig',
'HttpHostConfig',
'HttpConfig',
'GatewayProperties',
'GatewayResourceDescription',
'ImageRegistryCredential',
'EnvironmentVariable',
'Setting',
'ContainerLabel',
'EndpointProperties',
'ResourceRequests',
'ResourceLimits',
'ResourceRequirements',
'DiagnosticsRef',
'ReliableCollectionsRef',
'ContainerState',
'ContainerEvent',
'ContainerInstanceView',
'ContainerCodePackageProperties',
'AutoScalingTrigger',
'AutoScalingMechanism',
'AutoScalingPolicy',
'ServiceResourceDescription',
'DiagnosticsSinkProperties',
'DiagnosticsDescription',
'ApplicationProperties',
'AzureInternalMonitoringPipelineSinkDescription',
'ApplicationResourceDescription',
'AddRemoveReplicaScalingMechanism',
'AutoScalingMetric',
'AutoScalingResourceMetric',
'ServiceProperties',
'ServiceReplicaProperties',
'ServiceReplicaDescription',
'AverageLoadScalingTrigger',
'ContainerLogs',
'OperationResultPaged',
'SecretResourceDescriptionPaged',
'SecretValueResourceDescriptionPaged',
'VolumeResourceDescriptionPaged',
'NetworkResourceDescriptionPaged',
'GatewayResourceDescriptionPaged',
'ApplicationResourceDescriptionPaged',
'ServiceResourceDescriptionPaged',
'ServiceReplicaDescriptionPaged',
'ResourceStatus',
'HealthState',
'SecretKind',
'VolumeProvider',
'SizeTypes',
'ApplicationScopedVolumeKind',
'NetworkKind',
'HeaderMatchType',
'OperatingSystemType',
'DiagnosticsSinkKind',
'AutoScalingMechanismKind',
'AutoScalingMetricKind',
'AutoScalingResourceMetricName',
'AutoScalingTriggerKind',
]
| [] |
Scoppio/Rogue-EVE | Core/managers/InputPeripherals.py | a46f1faa9c7835e8c5838f6270fb5d75b349936b | import logging
from models.GenericObjects import Vector2
logger = logging.getLogger('Rogue-EVE')
class MouseController(object):
"""
Mouse controller needs the map, get over it
"""
def __init__(self, map=None, object_pool=None):
self.mouse_coord = (0, 0)
self.map = map
self.object_pool = object_pool
self.camera = None
def set_map(self, map):
self.map = map
def set_object_pool(self, object_pool):
self.object_pool = object_pool
def get_mouse_coord(self):
return self.mouse_coord
def set_mouse_coord(self, new_coord):
self.mouse_coord = new_coord
logger.debug("mouse position {}".format(self.mouse_coord))
def get_names_under_mouse(self):
# return a string with the names of all objects under the mouse
(x, y) = self.camera.camera_coord + Vector2(*self.mouse_coord)
# create a list with the names of all objects at the mouse's coordinates and in FOV
objects = self.object_pool.get_objects_as_list()
names = ""
if self.map and self.object_pool:
if objects and self.map:
names = [obj.name for obj in objects
if obj.coord.X == x and obj.coord.Y == y and (x,y) in self.map.get_visible_tiles()]
names = ', '.join(names) # join the names, separated by commas
else:
logger.warning("map or object pool not initialized!")
return names.capitalize()
| [((67, 97), 'logging.getLogger', 'logging.getLogger', (['"""Rogue-EVE"""'], {}), "('Rogue-EVE')\n", (84, 97), False, 'import logging\n'), ((871, 897), 'models.GenericObjects.Vector2', 'Vector2', (['*self.mouse_coord'], {}), '(*self.mouse_coord)\n', (878, 897), False, 'from models.GenericObjects import Vector2\n')] |
vromanuk/async_techniques | the_unsync/thesync.py | 7e1c6efcd4c81c322002eb3002d5bb929c5bc623 | from unsync import unsync
import asyncio
import datetime
import math
import aiohttp
import requests
def main():
t0 = datetime.datetime.now()
tasks = [
compute_some(),
compute_some(),
compute_some(),
download_some(),
download_some(),
download_some(),
download_some_more(),
download_some_more(),
wait_some(),
wait_some(),
wait_some(),
wait_some()]
[t.result() for t in tasks]
dt = datetime.datetime.now() - t0
print('Unsync version done in {:,.2f} seconds.'.format(dt.total_seconds()))
@unsync(cpu_bound=True)
def compute_some():
print('Computing...')
for _ in range(1, 10_000_000):
math.sqrt(25 ** 25 + .01)
@unsync()
async def download_some():
print('Downloading...')
url = 'https://talkpython.fm/episodes/show/174/coming-into-python-from-another-industry-part-2'
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False)) as session:
async with session.get(url) as resp:
resp.raise_for_status()
text = await resp.text()
print('Downloaded (more) {:,} characters'.format(len(text)))
@unsync()
def download_some_more():
print('Downloading more...')
url = 'https://pythonbytes.fm./episodes/show/92/will-your-python-be-compiled'
resp = requests.get(url)
resp.raise_for_status()
text = resp.text
print('Downloaded (more) {:,} characters'.format(len(text)))
@unsync()
async def wait_some():
print('Waiting...')
for _ in range(1, 1000):
await asyncio.sleep(.001)
if __name__ == '__main__':
main()
| [((606, 628), 'unsync.unsync', 'unsync', ([], {'cpu_bound': '(True)'}), '(cpu_bound=True)\n', (612, 628), False, 'from unsync import unsync\n'), ((747, 755), 'unsync.unsync', 'unsync', ([], {}), '()\n', (753, 755), False, 'from unsync import unsync\n'), ((1197, 1205), 'unsync.unsync', 'unsync', ([], {}), '()\n', (1203, 1205), False, 'from unsync import unsync\n'), ((1493, 1501), 'unsync.unsync', 'unsync', ([], {}), '()\n', (1499, 1501), False, 'from unsync import unsync\n'), ((123, 146), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (144, 146), False, 'import datetime\n'), ((1358, 1375), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1370, 1375), False, 'import requests\n'), ((494, 517), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (515, 517), False, 'import datetime\n'), ((718, 744), 'math.sqrt', 'math.sqrt', (['(25 ** 25 + 0.01)'], {}), '(25 ** 25 + 0.01)\n', (727, 744), False, 'import math\n'), ((1592, 1612), 'asyncio.sleep', 'asyncio.sleep', (['(0.001)'], {}), '(0.001)\n', (1605, 1612), False, 'import asyncio\n'), ((958, 989), 'aiohttp.TCPConnector', 'aiohttp.TCPConnector', ([], {'ssl': '(False)'}), '(ssl=False)\n', (978, 989), False, 'import aiohttp\n')] |
henriktao/pulumi-azure | sdk/python/pulumi_azure/desktopvirtualization/workspace.py | f1cbcf100b42b916da36d8fe28be3a159abaf022 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['WorkspaceArgs', 'Workspace']
@pulumi.input_type
class WorkspaceArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Workspace resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the Virtual Desktop Workspace. Changing the resource group name forces
a new resource to be created.
:param pulumi.Input[str] description: A description for the Virtual Desktop Workspace.
:param pulumi.Input[str] friendly_name: A friendly name for the Virtual Desktop Workspace.
:param pulumi.Input[str] location: The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created.
:param pulumi.Input[str] name: The name of the Virtual Desktop Workspace. Changing the name
forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if description is not None:
pulumi.set(__self__, "description", description)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group in which to
create the Virtual Desktop Workspace. Changing the resource group name forces
a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description for the Virtual Desktop Workspace.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
A friendly name for the Virtual Desktop Workspace.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Virtual Desktop Workspace. Changing the name
forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _WorkspaceState:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering Workspace resources.
:param pulumi.Input[str] description: A description for the Virtual Desktop Workspace.
:param pulumi.Input[str] friendly_name: A friendly name for the Virtual Desktop Workspace.
:param pulumi.Input[str] location: The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created.
:param pulumi.Input[str] name: The name of the Virtual Desktop Workspace. Changing the name
forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the Virtual Desktop Workspace. Changing the resource group name forces
a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description for the Virtual Desktop Workspace.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
A friendly name for the Virtual Desktop Workspace.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Virtual Desktop Workspace. Changing the name
forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group in which to
create the Virtual Desktop Workspace. Changing the resource group name forces
a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Workspace(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Manages a Virtual Desktop Workspace.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.core.ResourceGroup("example", location="West Europe")
workspace = azure.desktopvirtualization.Workspace("workspace",
location=example.location,
resource_group_name=example.name,
friendly_name="FriendlyName",
description="A description of my workspace")
```
## Import
Virtual Desktop Workspaces can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:desktopvirtualization/workspace:Workspace example /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/myGroup1/providers/Microsoft.DesktopVirtualization/workspaces/myworkspace
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A description for the Virtual Desktop Workspace.
:param pulumi.Input[str] friendly_name: A friendly name for the Virtual Desktop Workspace.
:param pulumi.Input[str] location: The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created.
:param pulumi.Input[str] name: The name of the Virtual Desktop Workspace. Changing the name
forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the Virtual Desktop Workspace. Changing the resource group name forces
a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: WorkspaceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Virtual Desktop Workspace.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.core.ResourceGroup("example", location="West Europe")
workspace = azure.desktopvirtualization.Workspace("workspace",
location=example.location,
resource_group_name=example.name,
friendly_name="FriendlyName",
description="A description of my workspace")
```
## Import
Virtual Desktop Workspaces can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:desktopvirtualization/workspace:Workspace example /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/myGroup1/providers/Microsoft.DesktopVirtualization/workspaces/myworkspace
```
:param str resource_name: The name of the resource.
:param WorkspaceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(WorkspaceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = WorkspaceArgs.__new__(WorkspaceArgs)
__props__.__dict__["description"] = description
__props__.__dict__["friendly_name"] = friendly_name
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
super(Workspace, __self__).__init__(
'azure:desktopvirtualization/workspace:Workspace',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Workspace':
"""
Get an existing Workspace resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A description for the Virtual Desktop Workspace.
:param pulumi.Input[str] friendly_name: A friendly name for the Virtual Desktop Workspace.
:param pulumi.Input[str] location: The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created.
:param pulumi.Input[str] name: The name of the Virtual Desktop Workspace. Changing the name
forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the Virtual Desktop Workspace. Changing the resource group name forces
a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _WorkspaceState.__new__(_WorkspaceState)
__props__.__dict__["description"] = description
__props__.__dict__["friendly_name"] = friendly_name
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
return Workspace(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description for the Virtual Desktop Workspace.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> pulumi.Output[Optional[str]]:
"""
A friendly name for the Virtual Desktop Workspace.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the Virtual Desktop Workspace. Changing the name
forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which to
create the Virtual Desktop Workspace. Changing the resource group name forces
a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
| [((2341, 2380), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""resourceGroupName"""'}), "(name='resourceGroupName')\n", (2354, 2380), False, 'import pulumi\n'), ((3215, 3249), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""friendlyName"""'}), "(name='friendlyName')\n", (3228, 3249), False, 'import pulumi\n'), ((7159, 7193), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""friendlyName"""'}), "(name='friendlyName')\n", (7172, 7193), False, 'import pulumi\n'), ((8349, 8388), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""resourceGroupName"""'}), "(name='resourceGroupName')\n", (8362, 8388), False, 'import pulumi\n'), ((17848, 17882), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""friendlyName"""'}), "(name='friendlyName')\n", (17861, 17882), False, 'import pulumi\n'), ((18640, 18679), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""resourceGroupName"""'}), "(name='resourceGroupName')\n", (18653, 18679), False, 'import pulumi\n'), ((1816, 1880), 'pulumi.set', 'pulumi.set', (['__self__', '"""resource_group_name"""', 'resource_group_name'], {}), "(__self__, 'resource_group_name', resource_group_name)\n", (1826, 1880), False, 'import pulumi\n'), ((2651, 2690), 'pulumi.get', 'pulumi.get', (['self', '"""resource_group_name"""'], {}), "(self, 'resource_group_name')\n", (2661, 2690), False, 'import pulumi\n'), ((2793, 2839), 'pulumi.set', 'pulumi.set', (['self', '"""resource_group_name"""', 'value'], {}), "(self, 'resource_group_name', value)\n", (2803, 2839), False, 'import pulumi\n'), ((3028, 3059), 'pulumi.get', 'pulumi.get', (['self', '"""description"""'], {}), "(self, 'description')\n", (3038, 3059), False, 'import pulumi\n'), ((3156, 3194), 'pulumi.set', 'pulumi.set', (['self', '"""description"""', 'value'], {}), "(self, 'description', value)\n", (3166, 3194), False, 'import pulumi\n'), ((3408, 3441), 'pulumi.get', 'pulumi.get', (['self', '"""friendly_name"""'], {}), "(self, 'friendly_name')\n", (3418, 3441), False, 'import pulumi\n'), ((3542, 3582), 'pulumi.set', 'pulumi.set', (['self', '"""friendly_name"""', 'value'], {}), "(self, 'friendly_name', value)\n", (3552, 3582), False, 'import pulumi\n'), ((3853, 3881), 'pulumi.get', 'pulumi.get', (['self', '"""location"""'], {}), "(self, 'location')\n", (3863, 3881), False, 'import pulumi\n'), ((3972, 4007), 'pulumi.set', 'pulumi.set', (['self', '"""location"""', 'value'], {}), "(self, 'location', value)\n", (3982, 4007), False, 'import pulumi\n'), ((4246, 4270), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (4256, 4270), False, 'import pulumi\n'), ((4353, 4384), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (4363, 4384), False, 'import pulumi\n'), ((4590, 4614), 'pulumi.get', 'pulumi.get', (['self', '"""tags"""'], {}), "(self, 'tags')\n", (4600, 4614), False, 'import pulumi\n'), ((4725, 4756), 'pulumi.set', 'pulumi.set', (['self', '"""tags"""', 'value'], {}), "(self, 'tags', value)\n", (4735, 4756), False, 'import pulumi\n'), ((6972, 7003), 'pulumi.get', 'pulumi.get', (['self', '"""description"""'], {}), "(self, 'description')\n", (6982, 7003), False, 'import pulumi\n'), ((7100, 7138), 'pulumi.set', 'pulumi.set', (['self', '"""description"""', 'value'], {}), "(self, 'description', value)\n", (7110, 7138), False, 'import pulumi\n'), ((7352, 7385), 'pulumi.get', 'pulumi.get', (['self', '"""friendly_name"""'], {}), "(self, 'friendly_name')\n", (7362, 7385), False, 'import pulumi\n'), ((7486, 7526), 'pulumi.set', 'pulumi.set', (['self', '"""friendly_name"""', 'value'], {}), "(self, 'friendly_name', value)\n", (7496, 7526), False, 'import pulumi\n'), ((7797, 7825), 'pulumi.get', 'pulumi.get', (['self', '"""location"""'], {}), "(self, 'location')\n", (7807, 7825), False, 'import pulumi\n'), ((7916, 7951), 'pulumi.set', 'pulumi.set', (['self', '"""location"""', 'value'], {}), "(self, 'location', value)\n", (7926, 7951), False, 'import pulumi\n'), ((8190, 8214), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (8200, 8214), False, 'import pulumi\n'), ((8297, 8328), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (8307, 8328), False, 'import pulumi\n'), ((8669, 8708), 'pulumi.get', 'pulumi.get', (['self', '"""resource_group_name"""'], {}), "(self, 'resource_group_name')\n", (8679, 8708), False, 'import pulumi\n'), ((8821, 8867), 'pulumi.set', 'pulumi.set', (['self', '"""resource_group_name"""', 'value'], {}), "(self, 'resource_group_name', value)\n", (8831, 8867), False, 'import pulumi\n'), ((9073, 9097), 'pulumi.get', 'pulumi.get', (['self', '"""tags"""'], {}), "(self, 'tags')\n", (9083, 9097), False, 'import pulumi\n'), ((9208, 9239), 'pulumi.set', 'pulumi.set', (['self', '"""tags"""', 'value'], {}), "(self, 'tags', value)\n", (9218, 9239), False, 'import pulumi\n'), ((17796, 17827), 'pulumi.get', 'pulumi.get', (['self', '"""description"""'], {}), "(self, 'description')\n", (17806, 17827), False, 'import pulumi\n'), ((18042, 18075), 'pulumi.get', 'pulumi.get', (['self', '"""friendly_name"""'], {}), "(self, 'friendly_name')\n", (18052, 18075), False, 'import pulumi\n'), ((18337, 18365), 'pulumi.get', 'pulumi.get', (['self', '"""location"""'], {}), "(self, 'location')\n", (18347, 18365), False, 'import pulumi\n'), ((18595, 18619), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (18605, 18619), False, 'import pulumi\n'), ((18951, 18990), 'pulumi.get', 'pulumi.get', (['self', '"""resource_group_name"""'], {}), "(self, 'resource_group_name')\n", (18961, 18990), False, 'import pulumi\n'), ((19183, 19207), 'pulumi.get', 'pulumi.get', (['self', '"""tags"""'], {}), "(self, 'tags')\n", (19193, 19207), False, 'import pulumi\n'), ((1929, 1977), 'pulumi.set', 'pulumi.set', (['__self__', '"""description"""', 'description'], {}), "(__self__, 'description', description)\n", (1939, 1977), False, 'import pulumi\n'), ((2028, 2080), 'pulumi.set', 'pulumi.set', (['__self__', '"""friendly_name"""', 'friendly_name'], {}), "(__self__, 'friendly_name', friendly_name)\n", (2038, 2080), False, 'import pulumi\n'), ((2126, 2168), 'pulumi.set', 'pulumi.set', (['__self__', '"""location"""', 'location'], {}), "(__self__, 'location', location)\n", (2136, 2168), False, 'import pulumi\n'), ((2210, 2244), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (2220, 2244), False, 'import pulumi\n'), ((2286, 2320), 'pulumi.set', 'pulumi.set', (['__self__', '"""tags"""', 'tags'], {}), "(__self__, 'tags', tags)\n", (2296, 2320), False, 'import pulumi\n'), ((6271, 6319), 'pulumi.set', 'pulumi.set', (['__self__', '"""description"""', 'description'], {}), "(__self__, 'description', description)\n", (6281, 6319), False, 'import pulumi\n'), ((6370, 6422), 'pulumi.set', 'pulumi.set', (['__self__', '"""friendly_name"""', 'friendly_name'], {}), "(__self__, 'friendly_name', friendly_name)\n", (6380, 6422), False, 'import pulumi\n'), ((6468, 6510), 'pulumi.set', 'pulumi.set', (['__self__', '"""location"""', 'location'], {}), "(__self__, 'location', location)\n", (6478, 6510), False, 'import pulumi\n'), ((6552, 6586), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (6562, 6586), False, 'import pulumi\n'), ((6643, 6707), 'pulumi.set', 'pulumi.set', (['__self__', '"""resource_group_name"""', 'resource_group_name'], {}), "(__self__, 'resource_group_name', resource_group_name)\n", (6653, 6707), False, 'import pulumi\n'), ((6749, 6783), 'pulumi.set', 'pulumi.set', (['__self__', '"""tags"""', 'tags'], {}), "(__self__, 'tags', tags)\n", (6759, 6783), False, 'import pulumi\n'), ((14064, 14088), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {}), '()\n', (14086, 14088), False, 'import pulumi\n'), ((17119, 17148), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'id': 'id'}), '(id=id)\n', (17141, 17148), False, 'import pulumi\n')] |
betatim/jupyanno | jupyanno/sheets.py | 11fbb1825c8e6966260620758768e0e1fa5cecc9 | """Code for reading and writing results to google sheets"""
from bs4 import BeautifulSoup
import requests
import warnings
import json
import pandas as pd
from six.moves.urllib.parse import urlparse, parse_qs
from six.moves.urllib.request import urlopen
_CELLSET_ID = "AIzaSyC8Zo-9EbXgHfqNzDxVb_YS_IIZBWtvoJ4"
def get_task_sheet(in_task):
return get_sheet_as_df(sheet_api_url(in_task.sheet_id), _CELLSET_ID)
def get_sheet_as_df(base_url, kk, columns="A:AG"):
"""
Gets the sheet as a list of Dicts (directly importable to Pandas)
:return:
"""
try:
# TODO: we should probably get the whole sheet
all_vals = "{base_url}/{cols}?key={kk}".format(base_url=base_url,
cols=columns,
kk=kk)
t_data = json.loads(urlopen(all_vals).read().decode('latin1'))[
'values']
frow = t_data.pop(0)
return pd.DataFrame([
dict([(key, '' if idx >= len(irow) else irow[idx])
for idx, key in enumerate(frow)]) for irow in
t_data])
except IOError as e:
warnings.warn(
'Sheet could not be accessed, check internet connectivity, \
proxies and permissions: {}'.format(
e))
return pd.DataFrame([{}])
def sheet_api_url(sheet_id):
return "https://sheets.googleapis.com/v4/spreadsheets/{id}/values".format(
id=sheet_id)
def get_questions(in_url):
res = urlopen(in_url)
soup = BeautifulSoup(res.read(), 'html.parser')
def get_names(f):
return [v for k, v in f.attrs.items() if 'label' in k]
def get_name(f):
return get_names(f)[0] if len(
get_names(f)) > 0 else 'unknown'
all_questions = soup.form.findChildren(
attrs={'name': lambda x: x and x.startswith('entry.')})
return {get_name(q): q['name'] for q in all_questions}
def submit_response(form_url, cur_questions, verbose=False, **answers):
submit_url = form_url.replace('/viewform', '/formResponse')
form_data = {'draftResponse': [],
'pageHistory': 0}
for v in cur_questions.values():
form_data[v] = ''
for k, v in answers.items():
if k in cur_questions:
form_data[cur_questions[k]] = v
else:
warnings.warn('Unknown Question: {}'.format(k), RuntimeWarning)
if verbose:
print(form_data)
user_agent = {'Referer': form_url,
'User-Agent': "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537\
.36 (KHTML, like Gecko) Chrome/28.0.1500.52 Safari/537.36"}
return requests.post(submit_url, data=form_data, headers=user_agent)
| [((1532, 1547), 'six.moves.urllib.request.urlopen', 'urlopen', (['in_url'], {}), '(in_url)\n', (1539, 1547), False, 'from six.moves.urllib.request import urlopen\n'), ((2681, 2742), 'requests.post', 'requests.post', (['submit_url'], {'data': 'form_data', 'headers': 'user_agent'}), '(submit_url, data=form_data, headers=user_agent)\n', (2694, 2742), False, 'import requests\n'), ((1343, 1361), 'pandas.DataFrame', 'pd.DataFrame', (['[{}]'], {}), '([{}])\n', (1355, 1361), True, 'import pandas as pd\n'), ((864, 881), 'six.moves.urllib.request.urlopen', 'urlopen', (['all_vals'], {}), '(all_vals)\n', (871, 881), False, 'from six.moves.urllib.request import urlopen\n')] |
zhou7rui/algorithm | sorting/python/max_heap.py | 9b5500ac3d8bdfd223bf9aec55e68675f2df7c59 | # -*- coding: utf-8 -*
'''
最大堆实现
98
/ \
96 84
/ \ / \
92 82 78 47
/ \ / \ / \ / \
33 26 51 85 50 15 44 60
/ \ / \ / \ / \ / \ / \ / \ / \
40 51 98 51 7 17 94 82 32 21 64 60 7 44 63 63
'''
import random
class Maxheap(object):
def __init__(self,cpacity,arr = None):
self.data = [None] * (cpacity + 1)
self.cpacity = cpacity
if arr is None:
self.count = 0
else:
for i in range(0,cpacity):
self.data[i + 1]= arr[i]
self.count = cpacity
for i in range(self.count / 2, 0, -1):
self.__shifDown(i)
def size(self):
return self.count
def isEmpty(self):
return self.count == 0
def __shiftUp(self,k):
while k > 1 and self.data[k] > self.data[int(k / 2)]:
self.data[k],self.data[int(k / 2)] = self.data[int(k / 2)], self.data[k]
k =int(k/2)
def insert(self,data):
self.data[self.count + 1] = data
self.count += 1
self.__shiftUp(self.count)
def __shifDown(self,k):
while k * 2 <= self.count:
j = k * 2
if self.count >= j + 1 and self.data[j + 1] > self.data[j]:
j += 1
if self.data[k] > self.data[j]:
break
self.data[k], self.data[j] = self.data[j],self.data[k]
k = j
def extractMax(self):
ret = self.data[1]
self.data[1], self.data[self.count] = self.data[self.count], self.data[1]
self.count -= 1
self.__shifDown(1)
return ret
if __name__ == '__main__':
N = 31
M = 100
heap = Maxheap(N)
for i in range(0,N):
k = random.randint(1, M)
heap.insert(k)
# arr = [random.randint(1,M) for i in range(N)]
# heap = Maxheap(len(arr),arr)
print(heap.size())
print(heap.data)
print(heap.extractMax())
| [((1986, 2006), 'random.randint', 'random.randint', (['(1)', 'M'], {}), '(1, M)\n', (2000, 2006), False, 'import random\n')] |
greipfrut/pdftohtml5canvas | ink2canvas/svg/Use.py | bd4b829a5fd02b503e6b32c268b265daa92e92e5 | from ink2canvas.svg.AbstractShape import AbstractShape
class Use(AbstractShape):
def drawClone(self):
drawables = self.rootTree.getDrawable()
OriginName = self.getCloneId()
OriginObject = self.rootTree.searchElementById(OriginName,drawables)
OriginObject.runDraw()
def draw(self, isClip=False):
if self.hasTransform():
transMatrix = self.getTransform()
self.canvasContext.transform(*transMatrix)
self.drawClone()
def getCloneId(self):
return self.attr("href","xlink")[1:] | [] |
HanSooLim/DIL-Project | docs/source/tutorial/code/read_csv.py | 069fa7e35a2e1edfff30dc2540d9b87f5db95dde | import pandas
datas = pandas.read_csv("../../Sample/example_dataset.csv", index_col=0)
print(datas)
| [((23, 87), 'pandas.read_csv', 'pandas.read_csv', (['"""../../Sample/example_dataset.csv"""'], {'index_col': '(0)'}), "('../../Sample/example_dataset.csv', index_col=0)\n", (38, 87), False, 'import pandas\n')] |
rghose/lol3 | app.py | c902e61bd5d69c541b46c834a5183e4da8eec591 | from flask import *
app = Flask(__name__)
import botty
# ----------------------------------
@app.route("/", methods=['GET', 'POST'])
def hello():
if request.method == 'POST':
data = request.form["query"]
return render_template("index.html",data=data)
return render_template("main.html")
# -----------------------------------
# -----------------------------------
@app.route("/request", methods=['POST'])
def respond():
data = request.form["data"]
return botty.botty_get_response(data)
# -----------------------------------
if __name__ == "__main__":
app.debug = True
app.run(host="0.0.0.0")
| [((495, 525), 'botty.botty_get_response', 'botty.botty_get_response', (['data'], {}), '(data)\n', (519, 525), False, 'import botty\n')] |
metarom-quality/gooseberry | config.py | 544503c52edd360a53d09f69ea6b4a0645aa617a | #!/usr/bin/env python3
import os
DATABASE="/home/tomate/Warehouse/syte/meta.db"
XLSDIR = "/mnt/c/Users/Natacha/Documents/TempDocs/progen/Formula/"
temp = [i for i in next(os.walk(XLSDIR))[2] if i.endswith("xlsx") or i.endswith("xls")]
flist = {}
for i in temp:
name = i.split(" ")[0].split("-")[0].split(".")[0]
if name.startswith("~") or name.startswith("PR") or name.startswith("FAB"):
continue
else:
flist[name] = i
| [((174, 189), 'os.walk', 'os.walk', (['XLSDIR'], {}), '(XLSDIR)\n', (181, 189), False, 'import os\n')] |
markostrajkov/range-requests-proxy | setup.py | 74d4bfee93098854c7b9f723c03c2316e729f295 | #!/usr/bin/env python
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass into py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
setup(
name='range-requests-proxy',
version='0.1',
description='Asynchronous HTTP proxy for HTTP Range Requests',
author='Marko Trajkov',
author_email='[email protected]',
cmdclass={'test': PyTest},
tests_require=['pytest>=2.8.0', 'mock==2.0.0'],
install_requires=['tornado==4.4.1', 'pycurl==7.43.0'],
packages=['rangerequestsproxy'],
license='BSD',
url='https://github.com/markostrajkov/range-requests-proxy',
)
| [((591, 1035), 'setuptools.setup', 'setup', ([], {'name': '"""range-requests-proxy"""', 'version': '"""0.1"""', 'description': '"""Asynchronous HTTP proxy for HTTP Range Requests"""', 'author': '"""Marko Trajkov"""', 'author_email': '"""[email protected]"""', 'cmdclass': "{'test': PyTest}", 'tests_require': "['pytest>=2.8.0', 'mock==2.0.0']", 'install_requires': "['tornado==4.4.1', 'pycurl==7.43.0']", 'packages': "['rangerequestsproxy']", 'license': '"""BSD"""', 'url': '"""https://github.com/markostrajkov/range-requests-proxy"""'}), "(name='range-requests-proxy', version='0.1', description=\n 'Asynchronous HTTP proxy for HTTP Range Requests', author=\n 'Marko Trajkov', author_email='[email protected]', cmdclass={\n 'test': PyTest}, tests_require=['pytest>=2.8.0', 'mock==2.0.0'],\n install_requires=['tornado==4.4.1', 'pycurl==7.43.0'], packages=[\n 'rangerequestsproxy'], license='BSD', url=\n 'https://github.com/markostrajkov/range-requests-proxy')\n", (596, 1035), False, 'from setuptools import setup\n'), ((268, 304), 'setuptools.command.test.test.initialize_options', 'TestCommand.initialize_options', (['self'], {}), '(self)\n', (298, 304), True, 'from setuptools.command.test import test as TestCommand\n'), ((376, 410), 'setuptools.command.test.test.finalize_options', 'TestCommand.finalize_options', (['self'], {}), '(self)\n', (404, 410), True, 'from setuptools.command.test import test as TestCommand\n'), ((535, 564), 'pytest.main', 'pytest.main', (['self.pytest_args'], {}), '(self.pytest_args)\n', (546, 564), False, 'import pytest\n'), ((573, 588), 'sys.exit', 'sys.exit', (['errno'], {}), '(errno)\n', (581, 588), False, 'import sys\n')] |
kmaehashi/pytorch-pfn-extras | tests/pytorch_pfn_extras_tests/onnx/test_load_model.py | 70b5db0dad8a8e342cc231e8a18c6f32ce250d1c | import os
import pytest
import torch
import pytorch_pfn_extras.onnx as tou
from tests.pytorch_pfn_extras_tests.onnx.test_export_testcase import Net
@pytest.mark.filterwarnings("ignore:Named tensors .* experimental:UserWarning")
def test_onnx_load_model():
model = Net()
outdir = "out/load_model_test"
tou.export_testcase(model, torch.rand(1, 1, 28, 28), outdir,
training=True, do_constant_folding=False)
tou.load_model(os.path.join(outdir, "model.onnx"))
@pytest.mark.filterwarnings("ignore:.*ONNX contains stripped .*:UserWarning")
def test_stripped_onnx_load_model():
model = Net()
outdir = "out/stripped_load_model_test"
tou.export_testcase(model, torch.rand(1, 1, 28, 28), outdir,
strip_large_tensor_data=True, training=True,
do_constant_folding=False)
tou.load_model(os.path.join(outdir, "model.onnx"))
| [((153, 231), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:Named tensors .* experimental:UserWarning"""'], {}), "('ignore:Named tensors .* experimental:UserWarning')\n", (179, 231), False, 'import pytest\n'), ((502, 578), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:.*ONNX contains stripped .*:UserWarning"""'], {}), "('ignore:.*ONNX contains stripped .*:UserWarning')\n", (528, 578), False, 'import pytest\n'), ((272, 277), 'tests.pytorch_pfn_extras_tests.onnx.test_export_testcase.Net', 'Net', ([], {}), '()\n', (275, 277), False, 'from tests.pytorch_pfn_extras_tests.onnx.test_export_testcase import Net\n'), ((628, 633), 'tests.pytorch_pfn_extras_tests.onnx.test_export_testcase.Net', 'Net', ([], {}), '()\n', (631, 633), False, 'from tests.pytorch_pfn_extras_tests.onnx.test_export_testcase import Net\n'), ((344, 368), 'torch.rand', 'torch.rand', (['(1)', '(1)', '(28)', '(28)'], {}), '(1, 1, 28, 28)\n', (354, 368), False, 'import torch\n'), ((463, 497), 'os.path.join', 'os.path.join', (['outdir', '"""model.onnx"""'], {}), "(outdir, 'model.onnx')\n", (475, 497), False, 'import os\n'), ((709, 733), 'torch.rand', 'torch.rand', (['(1)', '(1)', '(28)', '(28)'], {}), '(1, 1, 28, 28)\n', (719, 733), False, 'import torch\n'), ((882, 916), 'os.path.join', 'os.path.join', (['outdir', '"""model.onnx"""'], {}), "(outdir, 'model.onnx')\n", (894, 916), False, 'import os\n')] |
huzidabanzhang/Python | validate/v1/base.py | 7b304290e5be7db4bce253edb069a12dcbc3c998 | #!/usr/bin/env python
# -*- coding:UTF-8 -*-
'''
@Description: 数据库验证器
@Author: Zpp
@Date: 2020-05-28 13:44:29
@LastEditors: Zpp
@LastEditTime: 2020-05-28 14:02:02
'''
params = {
# 验证字段
'fields': {
'type': {
'name': '导出类型',
'type': 'int',
'between': [1, 2, 3],
'required': True
},
'document': {
'name': '数据库文件',
'type': 'file',
'required': True,
'msg': '请选择上传数据库文件'
},
'admin_id': {
'name': '管理员编号',
'type': 'str',
'required': True
},
'time': {
'name': '查询时间',
'type': 'str',
'required': True
}
},
# 导出数据库
'Export': ['type'],
# 导入数据库
'Import': ['document'],
# 首页登录清空
'Login': ['admin_id', 'time']
}
| [] |
axbaretto/mxnet | example/speech_recognition/stt_layer_slice.py | 5f593885356ff6d14f5519fa18e79b944beb51cd | import mxnet as mx
def slice_symbol_to_seq_symobls(net, seq_len, axis=1, squeeze_axis=True):
net = mx.sym.SliceChannel(data=net, num_outputs=seq_len, axis=axis, squeeze_axis=squeeze_axis)
hidden_all = []
for seq_index in range(seq_len):
hidden_all.append(net[seq_index])
net = hidden_all
return net
| [((105, 198), 'mxnet.sym.SliceChannel', 'mx.sym.SliceChannel', ([], {'data': 'net', 'num_outputs': 'seq_len', 'axis': 'axis', 'squeeze_axis': 'squeeze_axis'}), '(data=net, num_outputs=seq_len, axis=axis, squeeze_axis=\n squeeze_axis)\n', (124, 198), True, 'import mxnet as mx\n')] |
fergalmoran/dss.api | api/auth.py | d1b9fb674b6dbaee9b46b9a3daa2027ab8d28073 | import datetime
import json
from calendar import timegm
from urllib.parse import parse_qsl
import requests
from allauth.socialaccount import models as aamodels
from requests_oauthlib import OAuth1
from rest_framework import parsers, renderers
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework_jwt.settings import api_settings
from rest_framework_jwt.utils import jwt_payload_handler, jwt_encode_handler
from dss import settings
from spa.models import UserProfile
from spa.models.socialaccountlink import SocialAccountLink
def _temp_reverse_user(uid, provider, access_token, access_token_secret, payload):
"""
Do some magic here to find user account and deprecate psa
1. Look for account in
"""
user = None
try:
sa = SocialAccountLink.objects.get(social_id=uid)
sa.type = provider
sa.social_id = uid
sa.access_token = access_token
sa.access_token_secret = access_token_secret
sa.provider_data = payload
sa.save()
user = UserProfile.objects.get(id=sa.user.id)
except SocialAccountLink.DoesNotExist:
# try allauth
try:
aa = aamodels.SocialAccount.objects.get(uid=uid)
try:
user = UserProfile.objects.get(user__id=aa.user_id)
except UserProfile.DoesNotExist:
print('Need to create UserProfile')
# we got an allauth, create the SocialAccountLink
sa = SocialAccountLink()
sa.user = user
sa.social_id = aa.uid
sa.type = aa.provider
sa.access_token = access_token
sa.access_token_secret = access_token_secret
sa.provider_data = payload
sa.save()
except aamodels.SocialAccount.DoesNotExist:
print('Need to create social model')
return user if user else None
class SocialLoginHandler(APIView):
"""View to authenticate users through social media."""
permission_classes = (AllowAny,)
def post(self, request):
uid = None
backend = request.query_params.get('backend')
user = None
if backend in ['twitter']:
request_token_url = 'https://api.twitter.com/oauth/request_token'
access_token_url = 'https://api.twitter.com/oauth/access_token'
access_token = ""
access_token_secret = ""
if request.data.get('oauth_token') and request.data.get('oauth_verifier'):
auth = OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY,
client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET,
resource_owner_key=request.data.get('oauth_token'),
verifier=request.data.get('oauth_verifier'))
r = requests.post(access_token_url, auth=auth)
profile = dict(parse_qsl(r.text))
payload = json.dumps(profile)
uid = profile.get('user_id')
access_token = profile.get('oauth_token')
access_token_secret = profile.get('oauth_token_secret')
user = _temp_reverse_user(uid, 'twitter', access_token, access_token_secret, payload)
else:
oauth = OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY,
client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET,
callback_uri=settings.TWITTER_CALLBACK_URL)
r = requests.post(request_token_url, auth=oauth)
access_token = dict(parse_qsl(r.text))
return Response(access_token)
elif backend in ['facebook']:
access_token_url = 'https://graph.facebook.com/v2.3/oauth/access_token'
graph_api_url = 'https://graph.facebook.com/v2.3/me'
access_token = ""
access_token_secret = ""
params = {
'client_id': request.data.get('clientId'),
'redirect_uri': request.data.get('redirectUri'),
'client_secret': settings.SOCIAL_AUTH_FACEBOOK_SECRET,
'code': request.data.get('code')
}
# Step 1. Exchange authorization code for access token.
r = requests.get(access_token_url, params=params)
token = json.loads(r.text)
# Step 2. Retrieve information about the current user.
r = requests.get(graph_api_url, params=token)
profile = json.loads(r.text)
access_token = token.get('access_token')
uid = profile.get('id')
user = _temp_reverse_user(uid, 'facebook', access_token, access_token_secret, r.text)
elif backend in ['google']:
access_token_url = 'https://accounts.google.com/o/oauth2/token'
people_api_url = 'https://www.googleapis.com/plus/v1/people/me/openIdConnect'
access_token = ""
access_token_secret = ""
payload = dict(client_id=request.data.get('clientId'),
redirect_uri=request.data.get('redirectUri'),
client_secret=settings.SOCIAL_AUTH_GOOGLE_OAUTH_SECRET,
code=request.data.get('code'),
grant_type='authorization_code')
# Step 1. Exchange authorization code for access token.
r = requests.post(access_token_url, data=payload)
token = json.loads(r.text)
headers = {'Authorization': 'Bearer {0}'.format(token['access_token'])}
# Step 2. Retrieve information about the current user.
r = requests.get(people_api_url, headers=headers)
profile = json.loads(r.text)
uid = profile.get('sub')
user = _temp_reverse_user(uid, 'google', access_token, access_token_secret, r.text)
if uid is not None and user is not None:
if not user.user.is_active:
return Response({
'status': 'Unauthorized',
'message': 'User account disabled'
}, status=status.HTTP_401_UNAUTHORIZED)
payload = jwt_payload_handler(user.user)
if api_settings.JWT_ALLOW_REFRESH:
payload['orig_iat'] = timegm(
datetime.datetime.utcnow().utctimetuple()
)
response_data = {
'token': jwt_encode_handler(payload),
'session': user.get_session_id()
}
return Response(response_data)
return Response({
'status': 'Bad request',
'message': 'Authentication could not be performed with received data.'
}, status=status.HTTP_400_BAD_REQUEST)
class ObtainUser(APIView):
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
serializer_class = AuthTokenSerializer
model = Token
def post(self, request):
return self.get(request)
def get(self, request):
if request.user.is_authenticated():
return Response(
status=status.HTTP_200_OK, data={
'id': request.user.id,
'name': request.user.username,
'session': request.user.userprofile.get_session_id(),
'slug': request.user.userprofile.slug,
'session': request.user.userprofile.get_session_id(),
'userRole': 'user',
})
else:
return Response(status=status.HTTP_401_UNAUTHORIZED)
| [((1090, 1134), 'spa.models.socialaccountlink.SocialAccountLink.objects.get', 'SocialAccountLink.objects.get', ([], {'social_id': 'uid'}), '(social_id=uid)\n', (1119, 1134), False, 'from spa.models.socialaccountlink import SocialAccountLink\n'), ((1349, 1387), 'spa.models.UserProfile.objects.get', 'UserProfile.objects.get', ([], {'id': 'sa.user.id'}), '(id=sa.user.id)\n', (1372, 1387), False, 'from spa.models import UserProfile\n'), ((6887, 7039), 'rest_framework.response.Response', 'Response', (["{'status': 'Bad request', 'message':\n 'Authentication could not be performed with received data.'}"], {'status': 'status.HTTP_400_BAD_REQUEST'}), "({'status': 'Bad request', 'message':\n 'Authentication could not be performed with received data.'}, status=\n status.HTTP_400_BAD_REQUEST)\n", (6895, 7039), False, 'from rest_framework.response import Response\n'), ((6475, 6505), 'rest_framework_jwt.utils.jwt_payload_handler', 'jwt_payload_handler', (['user.user'], {}), '(user.user)\n', (6494, 6505), False, 'from rest_framework_jwt.utils import jwt_payload_handler, jwt_encode_handler\n'), ((6847, 6870), 'rest_framework.response.Response', 'Response', (['response_data'], {}), '(response_data)\n', (6855, 6870), False, 'from rest_framework.response import Response\n'), ((7954, 7999), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_401_UNAUTHORIZED'}), '(status=status.HTTP_401_UNAUTHORIZED)\n', (7962, 7999), False, 'from rest_framework.response import Response\n'), ((1483, 1526), 'allauth.socialaccount.models.SocialAccount.objects.get', 'aamodels.SocialAccount.objects.get', ([], {'uid': 'uid'}), '(uid=uid)\n', (1517, 1526), True, 'from allauth.socialaccount import models as aamodels\n'), ((1788, 1807), 'spa.models.socialaccountlink.SocialAccountLink', 'SocialAccountLink', ([], {}), '()\n', (1805, 1807), False, 'from spa.models.socialaccountlink import SocialAccountLink\n'), ((3121, 3163), 'requests.post', 'requests.post', (['access_token_url'], {'auth': 'auth'}), '(access_token_url, auth=auth)\n', (3134, 3163), False, 'import requests\n'), ((3240, 3259), 'json.dumps', 'json.dumps', (['profile'], {}), '(profile)\n', (3250, 3259), False, 'import json\n'), ((3579, 3719), 'requests_oauthlib.OAuth1', 'OAuth1', (['settings.SOCIAL_AUTH_TWITTER_KEY'], {'client_secret': 'settings.SOCIAL_AUTH_TWITTER_SECRET', 'callback_uri': 'settings.TWITTER_CALLBACK_URL'}), '(settings.SOCIAL_AUTH_TWITTER_KEY, client_secret=settings.\n SOCIAL_AUTH_TWITTER_SECRET, callback_uri=settings.TWITTER_CALLBACK_URL)\n', (3585, 3719), False, 'from requests_oauthlib import OAuth1\n'), ((3797, 3841), 'requests.post', 'requests.post', (['request_token_url'], {'auth': 'oauth'}), '(request_token_url, auth=oauth)\n', (3810, 3841), False, 'import requests\n'), ((3920, 3942), 'rest_framework.response.Response', 'Response', (['access_token'], {}), '(access_token)\n', (3928, 3942), False, 'from rest_framework.response import Response\n'), ((4564, 4609), 'requests.get', 'requests.get', (['access_token_url'], {'params': 'params'}), '(access_token_url, params=params)\n', (4576, 4609), False, 'import requests\n'), ((4630, 4648), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (4640, 4648), False, 'import json\n'), ((4733, 4774), 'requests.get', 'requests.get', (['graph_api_url'], {'params': 'token'}), '(graph_api_url, params=token)\n', (4745, 4774), False, 'import requests\n'), ((4797, 4815), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (4807, 4815), False, 'import json\n'), ((6284, 6397), 'rest_framework.response.Response', 'Response', (["{'status': 'Unauthorized', 'message': 'User account disabled'}"], {'status': 'status.HTTP_401_UNAUTHORIZED'}), "({'status': 'Unauthorized', 'message': 'User account disabled'},\n status=status.HTTP_401_UNAUTHORIZED)\n", (6292, 6397), False, 'from rest_framework.response import Response\n'), ((6735, 6762), 'rest_framework_jwt.utils.jwt_encode_handler', 'jwt_encode_handler', (['payload'], {}), '(payload)\n', (6753, 6762), False, 'from rest_framework_jwt.utils import jwt_payload_handler, jwt_encode_handler\n'), ((1567, 1611), 'spa.models.UserProfile.objects.get', 'UserProfile.objects.get', ([], {'user__id': 'aa.user_id'}), '(user__id=aa.user_id)\n', (1590, 1611), False, 'from spa.models import UserProfile\n'), ((3195, 3212), 'urllib.parse.parse_qsl', 'parse_qsl', (['r.text'], {}), '(r.text)\n', (3204, 3212), False, 'from urllib.parse import parse_qsl\n'), ((3878, 3895), 'urllib.parse.parse_qsl', 'parse_qsl', (['r.text'], {}), '(r.text)\n', (3887, 3895), False, 'from urllib.parse import parse_qsl\n'), ((5698, 5743), 'requests.post', 'requests.post', (['access_token_url'], {'data': 'payload'}), '(access_token_url, data=payload)\n', (5711, 5743), False, 'import requests\n'), ((5764, 5782), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (5774, 5782), False, 'import json\n'), ((5951, 5996), 'requests.get', 'requests.get', (['people_api_url'], {'headers': 'headers'}), '(people_api_url, headers=headers)\n', (5963, 5996), False, 'import requests\n'), ((6019, 6037), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (6029, 6037), False, 'import json\n'), ((6619, 6645), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (6643, 6645), False, 'import datetime\n')] |
aeturnum/bcgs | bcgs/disqus_objects.py | e5ae4c9f4cdd45b47615f00581dcc3792c281ea3 | import requests
import aiohttp
from constants import API_KEY
class User(object):
def __init__(self, author_info):
# "author": {
# "about": "",
# "avatar": {
# "cache": "//a.disquscdn.com/1519942534/images/noavatar92.png",
# "isCustom": false,
# "large": {
# "cache": "//a.disquscdn.com/1519942534/images/noavatar92.png",
# "permalink": "https://disqus.com/api/users/avatars/felix1999.jpg"
# },
# "permalink": "https://disqus.com/api/users/avatars/felix1999.jpg",
# "small": {
# "cache": "//a.disquscdn.com/1519942534/images/noavatar32.png",
# "permalink": "https://disqus.com/api/users/avatars/felix1999.jpg"
# }
# },
# "disable3rdPartyTrackers": false,
# "id": "5472588",
# "isAnonymous": false,
# "isPowerContributor": false,
# "isPrimary": true,
# "isPrivate": true,
# "joinedAt": "2010-11-20T04:45:33",
# "location": "",
# "name": "felix1999",
# "profileUrl": "https://disqus.com/by/felix1999/",
# "signedUrl": "",
# "url": "",
# "username": "felix1999"
# },
self._basic_info = author_info
self._detailed_info = None
async def load(self):
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session:
user_info = await session.get(
'https://disqus.com/api/3.0/users/details.json',
params={'user': self.id, 'api_key': API_KEY}
)
detail_json = await user_info.json()
if detail_json['code'] != 0:
print(f'Problem with getting user details from user {self.id}')
print(detail_json)
self._detailed_info = detail_json['response']
def _get_detailed_info(self):
# https://disqus.com/api/3.0/users/details.json?user=137780765&api_key=E8Uh5l5fHZ6gD8U3KycjAIAk46f68Zw7C6eW8WSjZvCLXebZ7p0r1yrYDrLilk2F
# {
# "code": 0,
# "response": {
# "about": "",
# "avatar": {
# "cache": "https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551",
# "isCustom": true,
# "large": {
# "cache": "https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551",
# "permalink": "https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg"
# },
# "permalink": "https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg",
# "small": {
# "cache": "https://c.disquscdn.com/uploads/users/13778/765/avatar32.jpg?1433896551",
# "permalink": "https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg"
# }
# },
# "disable3rdPartyTrackers": false,
# "id": "137780765",
# "isAnonymous": false,
# "isPowerContributor": false,
# "isPrimary": true,
# "isPrivate": false,
# "joinedAt": "2015-01-02T18:40:14",
# "location": "",
# "name": "Bob",
# "numFollowers": 2,
# "numFollowing": 0,
# "numForumsFollowing": 0,
# "numLikesReceived": 8967,
# "numPosts": 4147,
# "profileUrl": "https://disqus.com/by/disqus_FqhLpDGmTT/",
# "rep": 3.5297520000000002,
# "reputation": 3.5297520000000002,
# "reputationLabel": "High",
# "signedUrl": "",
# "url": "",
# "username": "disqus_FqhLpDGmTT"
# }
# }
print("WARNING: auto-loading user in async version of code!!!!")
details = requests.get(
'https://disqus.com/api/3.0/users/details.json',
{'user': self.id, 'api_key': API_KEY}
)
detail_json = details.json()
if detail_json['code'] != 0:
print(f'Problem with getting user details from user {self.id}')
print(detail_json)
self._detailed_info = detail_json['response']
@property
def anonymous(self):
return 'id' not in self._basic_info
@property
def private(self):
return self.anonymous or self._basic_info.get('isPrivate')
@property
def id(self):
if self.private:
return 'Private'
return self._basic_info.get('id', 'Anonymous')
@property
def name(self):
return self._basic_info.get('name')
@property
def username(self):
return self._basic_info.get('username')
@property
def location(self):
return self._basic_info.get('location')
@property
def joined_at(self):
return self._basic_info.get('joinedAt')
@property
def profile_url(self):
return self._basic_info.get('profileUrl')
@property
def total_posts(self):
if self._detailed_info is None:
self._get_detailed_info()
return self._detailed_info.get('numPosts')
@property
def total_likes(self):
if self._detailed_info is None:
self._get_detailed_info()
return self._detailed_info.get('numLikesReceived')
@property
def user_info_row(self):
return [
self.id,
self.name,
self.username,
self.total_posts,
self.total_likes,
self.location,
self.joined_at,
self.profile_url
]
| [((4374, 4479), 'requests.get', 'requests.get', (['"""https://disqus.com/api/3.0/users/details.json"""', "{'user': self.id, 'api_key': API_KEY}"], {}), "('https://disqus.com/api/3.0/users/details.json', {'user': self\n .id, 'api_key': API_KEY})\n", (4386, 4479), False, 'import requests\n'), ((1751, 1789), 'aiohttp.TCPConnector', 'aiohttp.TCPConnector', ([], {'verify_ssl': '(False)'}), '(verify_ssl=False)\n', (1771, 1789), False, 'import aiohttp\n')] |
LtGlahn/estimat_gulstripe | nvdbgeotricks.py | 8bb93d52131bdda9846810dbd6bac7f872377859 | """
En samling hjelpefunksjoner som bruker nvdbapiv3-funksjonene til å gjøre nyttige ting, f.eks. lagre geografiske datasett
Disse hjelpefunksjonene forutsetter fungerende installasjon av geopandas, shapely og en del andre ting som må
installeres separat. Noen av disse bibliotekene kunne historisk av og til være plundrete å installere, evt
ha versjonskonflikter seg i mellom, spesielt på windows. Slikt plunder hører historien til (stort sett)
Anbefalingen er like fullt å bruke (ana)conda installasjon i et eget "environment". Dette er god kodehygiene
og sikrer minimalt med kluss, samt ikke minst: Eventuelt kluss lar seg greit reparere ved å lage nytt "enviroment",
uten at det påvirker hele python-installasjonen din.
"""
import re
import pdb
from shapely import wkt
# from shapely.ops import unary_union
import pandas as pd
import geopandas as gpd
from datetime import datetime
import nvdbapiv3
from apiforbindelse import apiforbindelse
def nvdb2gpkg( objekttyper, filnavn='datadump', mittfilter=None, vegnett=True, vegsegmenter=False, geometri=True):
"""
Lagrer NVDB vegnett og angitte objekttyper til geopackage
ARGUMENTS
objekttyper: Liste med objekttyper du vil lagre
KEYWORDS
mittfilter=None : Dictionary med filter til søkeobjekt i nvdbapiv3.py, for eksempel { 'kommune' : 5001 }
Samme filter brukes på både vegnett og fagdata
vegnett=True : Bool, default=True. Angir om vi skal ta med data om vegnett eller ikke
vegsegmenter=False : Bool, default=False. Angir om vi skal repetere objektet delt inn etter vegsegementer
geometri=True : Bool, default=True. Angir om vi skal hente geometri fra egengeometri (hvis det finnes)
Hvis du ønsker å presentere vegobjekt ut fra objektets stedfesting langs veg så bruker du kombinasjonen
vegsegmenter=True, geometri=False
RETURNS
None
"""
if not '.gpkg' in filnavn:
filnavn = filnavn + datetime.today().strftime('%Y-%m-%d') + '.gpkg'
if not isinstance(objekttyper, list ):
objekttyper = [ objekttyper ]
for enObjTypeId in objekttyper:
enObjTypeId = int( enObjTypeId )
sok = nvdbapiv3.nvdbFagdata( enObjTypeId )
if mittfilter:
sok.filter( mittfilter )
stat = sok.statistikk()
objtypenavn = sok.objektTypeDef['navn']
print( 'Henter', stat['antall'], 'forekomster av objekttype', sok.objektTypeId, objtypenavn )
lagnavn = 'type' + str(enObjTypeId) + '_' + nvdbapiv3.esriSikkerTekst( objtypenavn.lower() )
rec = sok.to_records( vegsegmenter=vegsegmenter, geometri=geometri )
if len( rec ) > 0:
mindf = pd.DataFrame( rec )
# Må trickse litt for å unngå navnekollisjon
kolonner = list( mindf.columns )
lowerkolonner = [ x.lower() for x in kolonner ]
# Duplicate element indices in list
# Using list comprehension + list slicing
# https://www.geeksforgeeks.org/python-duplicate-element-indices-in-list/
res = [idx for idx, val in enumerate(lowerkolonner) if val in lowerkolonner[:idx]]
for ii, dublett in enumerate( res):
mindf.rename(columns={ mindf.columns[dublett] : kolonner[dublett] + '_' + str( ii+1 ) }, inplace=True )
mindf['geometry'] = mindf['geometri'].apply( wkt.loads )
minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 )
# må droppe kolonne vegsegmenter hvis du har vegsegmenter=False
if 'vegsegmenter' in minGdf.columns:
minGdf.drop( 'vegsegmenter', 1, inplace=True)
minGdf.drop( 'geometri', 1, inplace=True)
minGdf.to_file( filnavn, layer=lagnavn, driver="GPKG")
else:
print( 'Ingen forekomster av', objtypenavn, 'for filter', mittfilter)
if vegnett:
veg = nvdbapiv3.nvdbVegnett()
if mittfilter:
junk = mittfilter.pop( 'egenskap', None)
junk = mittfilter.pop( 'overlapp', None)
veg.filter( mittfilter )
print( 'Henter vegnett')
rec = veg.to_records()
mindf = pd.DataFrame( rec)
mindf['geometry'] = mindf['geometri'].apply( wkt.loads )
mindf.drop( 'geometri', 1, inplace=True)
minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 )
minGdf.to_file( filnavn, layer='vegnett', driver="GPKG")
def dumpkontraktsomr( komr = [] ):
"""
Dumper et har (hardkodede) kontraktsområder
"""
if not komr:
komr = [ '9302 Haugesund 2020-2025', '9304 Bergen', '9305 Sunnfjord' ]
komr = [ '9253 Agder elektro og veglys 2021-2024']
objliste = [ 540, # Trafikkmengde
105, # Fartsgrense
810, # Vinterdriftsklasse
482, # trafikkregistreringsstasjon
153, # Værstasjon
64, # Ferjeleie
39, # Rasteplass
48, # Fortau
199, # Trær
15, # Grasdekker
274, # Blomsterbeplanting
511, # Busker
300 , # Naturområde (ingen treff i Haugesund kontrakt)
517, # Artsrik vegkant
800, # Fremmede arter
67, # Tunnelløp
846, # Skredsikring, bremsekjegler
850 # Skredsikring, forbygning
]
objliste = []
for enkontrakt in komr:
filnavn = nvdbapiv3.esriSikkerTekst( enkontrakt )
nvdb2gpkg( objliste, filnavn=filnavn, mittfilter={'kontraktsomrade' : enkontrakt })
def firefeltrapport( mittfilter={}):
"""
Finner alle firefeltsveger i Norge, evt innafor angitt søkekriterie
Bruker søkeobjektet nvdbapiv3.nvdbVegnett fra biblioteket https://github.com/LtGlahn/nvdbapi-V3
ARGUMENTS
None
KEYWORDS:
mittfilter: Dictionary med søkefilter
RETURNS
geodataframe med resultatet
"""
v = nvdbapiv3.nvdbVegnett()
# Legger til filter på kun fase = V (eksistende veg), såfremt det ikke kommer i konflikt med anna filter
if not 'vegsystemreferanse' in mittfilter.keys():
mittfilter['vegsystemreferanse'] = 'Ev,Rv,Fv,Kv,Sv,Pv'
if not 'kryssystem' in mittfilter.keys():
mittfilter['kryssystem'] = 'false'
if not 'sideanlegg' in mittfilter.keys():
mittfilter['sideanlegg'] = 'false'
v.filter( mittfilter )
# Kun kjørende, og kun øverste topologinivå, og ikke adskiltelop=MOT
v.filter( { 'trafikantgruppe' : 'K', 'detaljniva' : 'VT,VTKB', 'adskiltelop' : 'med,nei' } )
data = []
vegsegment = v.nesteForekomst()
while vegsegment:
if sjekkfelt( vegsegment, felttype='firefelt'):
vegsegment['feltoversikt'] = ','.join( vegsegment['feltoversikt'] )
vegsegment['geometri'] = vegsegment['geometri']['wkt']
vegsegment['vref'] = vegsegment['vegsystemreferanse']['kortform']
vegsegment['vegnr'] = vegsegment['vref'].split()[0]
vegsegment['vegkategori'] = vegsegment['vref'][0]
vegsegment['adskilte løp'] = vegsegment['vegsystemreferanse']['strekning']['adskilte_løp']
data.append( vegsegment )
vegsegment = v.nesteForekomst()
if len( data ) > 1:
mindf = pd.DataFrame( data )
mindf['geometry'] = mindf['geometri'].apply( wkt.loads )
mindf.drop( 'geometri', 1, inplace=True)
mindf.drop( 'kontraktsområder', 1, inplace=True)
mindf.drop( 'riksvegruter', 1, inplace=True)
mindf.drop( 'href', 1, inplace=True)
mindf.drop( 'metadata', 1, inplace=True)
mindf.drop( 'kortform', 1, inplace=True)
mindf.drop( 'veglenkenummer', 1, inplace=True)
mindf.drop( 'segmentnummer', 1, inplace=True)
mindf.drop( 'startnode', 1, inplace=True)
mindf.drop( 'sluttnode', 1, inplace=True)
mindf.drop( 'referanse', 1, inplace=True)
mindf.drop( 'målemetode', 1, inplace=True)
mindf.drop( 'måledato', 1, inplace=True)
minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 )
return minGdf
else:
return None
def sjekkfelt( vegsegment, felttype='firefelt' ):
"""
Sjekker hva slags felt som finnes på et vegsegment
ARGUMENTS:
vegsegment - dicionary med data om en bit av vegnettet hentet fra https://nvdbapiles-v3.atlas.vegvesen.no/vegnett/veglenkesekvenser/segmentert/
KEYWORDS:
felttype - hva slags felttype som skal sjekkes. Mulige verdier:
firefelt (default). Antar at firefeltsveg betyr at kjørefeltnummer 1-4 er brukt og er enten vanlig kj.felt, kollektivfelt eller reversibelt felt
(flere varianter kommer når de trengs)
RETURNS
boolean - True hvis kjørefeltene er av riktig type
"""
svar = False
vr = 'vegsystemreferanse'
sr = 'strekning'
if felttype == 'firefelt':
if 'feltoversikt' in vegsegment.keys() and 'detaljnivå' in vegsegment.keys() and 'Vegtrase' in vegsegment['detaljnivå']:
kjfelt = set( filtrerfeltoversikt( vegsegment['feltoversikt'], mittfilter=['vanlig', 'K', 'R']) )
if vr in vegsegment.keys():
if sr in vegsegment[vr] and 'adskilte_løp' in vegsegment[vr][sr]:
if vegsegment[vr][sr]['adskilte_løp'] == 'Nei' and kjfelt.issuperset( { 1, 2, 3, 4}):
svar = True
# Siste klausul her har f.eks. forekommet på Fv5724, envegskjørt tunnel ved Oldenvatnet.
elif vegsegment[vr][sr]['adskilte_løp'] == 'Med' and len( kjfelt ) >= 2 and not kjfelt.issuperset( {1, 2} ):
svar = True
return svar
else:
raise NotImplementedError('Sjekkfelt: Sjekk for felt av type: ' + felttype + 'er ikke implementert (ennå)' )
def filtrerfeltoversikt( feltoversikt, mittfilter=['vanlig', 'K', 'R' ]):
"""
Returnerer liste med kjørefeltnummer filtrert på hva slags feltkode vi evt har
ARGUMENTS
feltoversikt - Liste med feltkoder for et vegsegment.
KEYWORDS
mittfilter=['vanlig', 'K', 'R' ] - Liste med koder for hva slags felt vi skal telle med. Sjekk håndbok v830
Nasjonalt vegreferansesystem https://www.vegvesen.no/_attachment/61505 for mulige verdier, kortversjon:
'vanlig' - Helt vanlig kjørefelt, kjørefeltnumemr er angitt som heltall uten noen bokstaver.
'K' - kollektivfelt
'R' - reversibelt felt
'S' - Sykkelfelt
'H' - Svingefelt mot høyre
'V' - Svingefelt mot venstre
'B' - Ekstra felt for bompengeinnkreving
RETURNS
Liste med kjørefeltnummer hvor kun kjørefelt som angitt med mittfilter-nøkkelord er inkludert
"""
data = [ ]
for felt in feltoversikt:
feltbokstav = re.findall( '[A-Za-z]', felt)
if feltbokstav:
feltbokstav = feltbokstav[0]
else:
feltbokstav = 'vanlig'
if feltbokstav in mittfilter:
feltnummer = int( re.split( '[A-Z]', felt)[0] )
data.append( feltnummer )
return data
| [((6144, 6167), 'nvdbapiv3.nvdbVegnett', 'nvdbapiv3.nvdbVegnett', ([], {}), '()\n', (6165, 6167), False, 'import nvdbapiv3\n'), ((2203, 2237), 'nvdbapiv3.nvdbFagdata', 'nvdbapiv3.nvdbFagdata', (['enObjTypeId'], {}), '(enObjTypeId)\n', (2224, 2237), False, 'import nvdbapiv3\n'), ((3956, 3979), 'nvdbapiv3.nvdbVegnett', 'nvdbapiv3.nvdbVegnett', ([], {}), '()\n', (3977, 3979), False, 'import nvdbapiv3\n'), ((4227, 4244), 'pandas.DataFrame', 'pd.DataFrame', (['rec'], {}), '(rec)\n', (4239, 4244), True, 'import pandas as pd\n'), ((4377, 4431), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['mindf'], {'geometry': '"""geometry"""', 'crs': '(5973)'}), "(mindf, geometry='geometry', crs=5973)\n", (4393, 4431), True, 'import geopandas as gpd\n'), ((5632, 5669), 'nvdbapiv3.esriSikkerTekst', 'nvdbapiv3.esriSikkerTekst', (['enkontrakt'], {}), '(enkontrakt)\n', (5657, 5669), False, 'import nvdbapiv3\n'), ((7514, 7532), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (7526, 7532), True, 'import pandas as pd\n'), ((8289, 8343), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['mindf'], {'geometry': '"""geometry"""', 'crs': '(5973)'}), "(mindf, geometry='geometry', crs=5973)\n", (8305, 8343), True, 'import geopandas as gpd\n'), ((11200, 11228), 're.findall', 're.findall', (['"""[A-Za-z]"""', 'felt'], {}), "('[A-Za-z]', felt)\n", (11210, 11228), False, 'import re\n'), ((2714, 2731), 'pandas.DataFrame', 'pd.DataFrame', (['rec'], {}), '(rec)\n', (2726, 2731), True, 'import pandas as pd\n'), ((3443, 3497), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['mindf'], {'geometry': '"""geometry"""', 'crs': '(5973)'}), "(mindf, geometry='geometry', crs=5973)\n", (3459, 3497), True, 'import geopandas as gpd\n'), ((11424, 11447), 're.split', 're.split', (['"""[A-Z]"""', 'felt'], {}), "('[A-Z]', felt)\n", (11432, 11447), False, 'import re\n'), ((1977, 1993), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (1991, 1993), False, 'from datetime import datetime\n')] |
joetache4/project-euler | 019_CountingSundays.py | 82f9e25b414929d9f62d94905906ba2f57db7935 | """
You are given the following information, but you may prefer to do some research for yourself.
1 Jan 1900 was a Monday.
Thirty days has September,
April, June and November.
All the rest have thirty-one,
Saving February alone,
Which has twenty-eight, rain or shine.
And on leap years, twenty-nine.
A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.
How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
ans: 171
"""
# set to day of week for 1 Jan 1901 (Tuesday)
dow = 2
def no_days(month, year):
if month in [0,2,4,6,7,9,11]:
return 31
elif month in [3,5,8,10]:
return 30
elif year % 400 == 0:
return 29
elif year % 100 == 0:
return 28
elif year % 4 == 0:
return 29
else:
return 28
sum = 0
for y in range(1901, 2001):
for m in range(0, 12):
if dow == 0:
sum += 1
dow = (dow + no_days(m, y)) % 7
print(sum) | [] |
aagaard/dbservice | setup.py | 47daadab307e6744ef151dd4e0aacff27dcda881 | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
Setup for the dbservice
"""
from setuptools import setup, find_packages
setup(
name='dbservice',
version='0.9',
description="Database service for storing meter data",
author="Søren Aagaard Mikkelsen",
author_email='[email protected]',
url='https://github.com/dbservice/dbservice',
packages=find_packages(),
package_data={'': ['static/*.*', 'templates/*.*']},
scripts=['manage.py'],
)
| [((371, 386), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (384, 386), False, 'from setuptools import setup, find_packages\n')] |
usegalaxy-no/usegalaxy | venv/lib/python3.6/site-packages/ansible_collections/junipernetworks/junos/plugins/module_utils/network/junos/argspec/facts/facts.py | 75dad095769fe918eb39677f2c887e681a747f3a | #
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The arg spec for the junos facts module.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class FactsArgs(object):
""" The arg spec for the junos facts module
"""
def __init__(self, **kwargs):
pass
argument_spec = {
"gather_subset": dict(
default=["!config"], type="list", elements="str"
),
"config_format": dict(
default="text", choices=["xml", "text", "set", "json"]
),
"gather_network_resources": dict(type="list", elements="str"),
"available_network_resources": {"type": "bool", "default": False},
}
| [] |
ripry/umakaviewer | server/dbcls/api/resources/authenticate.py | e3df32313219d1b9d65edb6d180b2b4799d87e25 | from flask_restful import Resource, reqparse
from firebase_admin import auth as firebase_auth
from dbcls.models import User
parser = reqparse.RequestParser()
parser.add_argument('token', type=str, required=True, nullable=False)
class Authenticate(Resource):
def post(self):
try:
args = parser.parse_args()
decoded_token = firebase_auth.verify_id_token(args['token'])
except (ValueError, firebase_auth.AuthError) as e:
return {'message': f'{e}'}, 400
firebase_uid = decoded_token['uid']
user = User.query.filter_by(firebase_uid=firebase_uid).first()
if not user:
return {'message': 'user not found. You have to sign up.'}, 400
custom_token = firebase_auth.create_custom_token(firebase_uid)
return {
'custom_token': custom_token.decode(),
'display_name': user.display_name,
'contact_uri': user.contact_uri,
'roles': [role.role_type for role in user.user_roles],
}
| [((136, 160), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (158, 160), False, 'from flask_restful import Resource, reqparse\n'), ((748, 795), 'firebase_admin.auth.create_custom_token', 'firebase_auth.create_custom_token', (['firebase_uid'], {}), '(firebase_uid)\n', (781, 795), True, 'from firebase_admin import auth as firebase_auth\n'), ((363, 407), 'firebase_admin.auth.verify_id_token', 'firebase_auth.verify_id_token', (["args['token']"], {}), "(args['token'])\n", (392, 407), True, 'from firebase_admin import auth as firebase_auth\n'), ((571, 618), 'dbcls.models.User.query.filter_by', 'User.query.filter_by', ([], {'firebase_uid': 'firebase_uid'}), '(firebase_uid=firebase_uid)\n', (591, 618), False, 'from dbcls.models import User\n')] |
Feiyi-Ding/2021A | GetJSONData_NLPParser.py | f599f0a21e05964fffce3dcf2d32ef70ddc3c75d | #Import required modules
import requests
import json
# Get json results for the required input
InputString = "kobe is a basketball player"
headers = {
'Content-type': 'application/json',
}
data = '{"text":InputString = '+ InputString + '}'
response = requests.post('http://66.76.242.198:9888/', data=data).json()
#Adding a test comment to check if the automatic git pull is working or not
#print(json.dumps(response, indent=4, sort_keys=True))
| [((275, 329), 'requests.post', 'requests.post', (['"""http://66.76.242.198:9888/"""'], {'data': 'data'}), "('http://66.76.242.198:9888/', data=data)\n", (288, 329), False, 'import requests\n')] |
Xtuden-com/language | language/bert_extraction/steal_bert_classifier/utils/wiki103_sentencize.py | 70c0328968d5ffa1201c6fdecde45bbc4fec19fc | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Sentencize the raw wikitext103."""
import tensorflow.compat.v1 as tf
app = tf.app
flags = tf.flags
gfile = tf.gfile
logging = tf.logging
flags.DEFINE_string("wiki103_raw", None,
"Path to raw wikitext103 train corpus.")
flags.DEFINE_string("output_path", None,
"Path to output the processed dataset.")
FLAGS = flags.FLAGS
def main(_):
with open(FLAGS.wiki103_raw, "r") as f:
data = f.read().strip().split("\n")
data = [x.split(" . ") for x in data if x.strip() and x.strip()[0] != "="]
sentences = []
for para in data:
for sent in para:
sentences.append(sent + ".")
data = "\n".join(sentences)
data = data.replace(" @.@ ", ".").replace(" @-@ ", "-").replace(" ,", ",")
data = data.replace(" \'", "\'").replace(" )", ")").replace("( ", "(")
data = data.replace(" ;", ";")
data = "\n".join([x for x in data.split("\n") if len(x.split()) > 3])
logging.info("length = %d", len(data.split("\n")))
with open(FLAGS.output_path, "w") as f:
f.write(data)
if __name__ == "__main__":
app.run(main)
| [] |
budacom/trading-bots | example_bots/any_to_any/__init__.py | 9ac362cc21ce185e7b974bf9bcc7480ff9c6b2aa | default_bot = 'example_bots.any_to_any.bot.AnyToAny'
| [] |
owenjones/CaBot | helpers.py | dd47c077b21cbcf52c0ffd2e30b47fb736a41ebc | from server import roles
def hasRole(member, roleID):
role = member.guild.get_role(roleID)
return role in member.roles
def gainedRole(before, after, roleID):
role = before.guild.get_role(roleID)
return (role not in before.roles) and (role in after.roles)
def isExplorer(ctx):
return hasRole(ctx.author, roles["explorer"])
def isNetwork(ctx):
return hasRole(ctx.author, roles["network"])
def isLeader(ctx):
return hasRole(ctx.author, roles["leader"])
def isAdmin(ctx):
return hasRole(ctx.author, roles["admin"])
def isBot(ctx):
return hasRole(ctx.author, roles["bot"])
class Colours:
DEFAULT = 0
AQUA = 1752220
GREEN = 3066993
BLUE = 3447003
PURPLE = 10181046
GOLD = 15844367
ORANGE = 15105570
RED = 15158332
GREY = 9807270
DARKER_GREY = 8359053
NAVY = 3426654
DARK_AQUA = 1146986
DARK_GREEN = 2067276
DARK_BLUE = 2123412
DARK_PURPLE = 7419530
DARK_GOLD = 12745742
DARK_ORANGE = 11027200
DARK_RED = 10038562
DARK_GREY = 9936031
LIGHT_GREY = 12370112
DARK_NAVY = 2899536
LUMINOUS_VIVID_PINK = 16580705
DARK_VIVID_PINK = 12320855
| [] |
davcamer/amundsendatabuilder | databuilder/loader/file_system_neo4j_csv_loader.py | 1bd6cd5c30413640d4c377dc3c59c283e86347eb | import csv
import logging
import os
import shutil
from csv import DictWriter # noqa: F401
from pyhocon import ConfigTree, ConfigFactory # noqa: F401
from typing import Dict, Any # noqa: F401
from databuilder.job.base_job import Job
from databuilder.loader.base_loader import Loader
from databuilder.models.neo4j_csv_serde import NODE_LABEL, \
RELATION_START_LABEL, RELATION_END_LABEL, RELATION_TYPE
from databuilder.models.neo4j_csv_serde import Neo4jCsvSerializable # noqa: F401
from databuilder.utils.closer import Closer
LOGGER = logging.getLogger(__name__)
class FsNeo4jCSVLoader(Loader):
"""
Write node and relationship CSV file(s) that can be consumed by
Neo4jCsvPublisher.
It assumes that the record it consumes is instance of Neo4jCsvSerializable
"""
# Config keys
NODE_DIR_PATH = 'node_dir_path'
RELATION_DIR_PATH = 'relationship_dir_path'
FORCE_CREATE_DIR = 'force_create_directory'
SHOULD_DELETE_CREATED_DIR = 'delete_created_directories'
_DEFAULT_CONFIG = ConfigFactory.from_dict({
SHOULD_DELETE_CREATED_DIR: True,
FORCE_CREATE_DIR: False
})
def __init__(self):
# type: () -> None
self._node_file_mapping = {} # type: Dict[Any, DictWriter]
self._relation_file_mapping = {} # type: Dict[Any, DictWriter]
self._closer = Closer()
def init(self, conf):
# type: (ConfigTree) -> None
"""
Initializing FsNeo4jCsvLoader by creating directory for node files
and relationship files. Note that the directory defined in
configuration should not exist.
:param conf:
:return:
"""
conf = conf.with_fallback(FsNeo4jCSVLoader._DEFAULT_CONFIG)
self._node_dir = conf.get_string(FsNeo4jCSVLoader.NODE_DIR_PATH)
self._relation_dir = \
conf.get_string(FsNeo4jCSVLoader.RELATION_DIR_PATH)
self._delete_created_dir = \
conf.get_bool(FsNeo4jCSVLoader.SHOULD_DELETE_CREATED_DIR)
self._force_create_dir = conf.get_bool(FsNeo4jCSVLoader.FORCE_CREATE_DIR)
self._create_directory(self._node_dir)
self._create_directory(self._relation_dir)
def _create_directory(self, path):
# type: (str) -> None
"""
Validate directory does not exist, creates it, register deletion of
created directory function to Job.closer.
:param path:
:return:
"""
if os.path.exists(path):
if self._force_create_dir:
LOGGER.info('Directory exist. Deleting directory {}'.format(path))
shutil.rmtree(path)
else:
raise RuntimeError('Directory should not exist: {}'.format(path))
os.makedirs(path)
def _delete_dir():
# type: () -> None
if not self._delete_created_dir:
LOGGER.warn('Skip Deleting directory {}'.format(path))
return
LOGGER.info('Deleting directory {}'.format(path))
shutil.rmtree(path)
# Directory should be deleted after publish is finished
Job.closer.register(_delete_dir)
def load(self, csv_serializable):
# type: (Neo4jCsvSerializable) -> None
"""
Writes Neo4jCsvSerializable into CSV files.
There are multiple CSV files that this method writes.
This is because there're not only node and relationship, but also it
can also have different nodes, and relationships.
Common pattern for both nodes and relations:
1. retrieve csv row (a dict where keys represent a header,
values represent a row)
2. using this dict to get a appropriate csv writer and write to it.
3. repeat 1 and 2
:param csv_serializable:
:return:
"""
node_dict = csv_serializable.next_node()
while node_dict:
key = (node_dict[NODE_LABEL], len(node_dict))
file_suffix = '{}_{}'.format(*key)
node_writer = self._get_writer(node_dict,
self._node_file_mapping,
key,
self._node_dir,
file_suffix)
node_writer.writerow(node_dict)
node_dict = csv_serializable.next_node()
relation_dict = csv_serializable.next_relation()
while relation_dict:
key2 = (relation_dict[RELATION_START_LABEL],
relation_dict[RELATION_END_LABEL],
relation_dict[RELATION_TYPE],
len(relation_dict))
file_suffix = '{}_{}_{}'.format(key2[0], key2[1], key2[2])
relation_writer = self._get_writer(relation_dict,
self._relation_file_mapping,
key2,
self._relation_dir,
file_suffix)
relation_writer.writerow(relation_dict)
relation_dict = csv_serializable.next_relation()
def _get_writer(self,
csv_record_dict, # type: Dict[str, Any]
file_mapping, # type: Dict[Any, DictWriter]
key, # type: Any
dir_path, # type: str
file_suffix # type: str
):
# type: (...) -> DictWriter
"""
Finds a writer based on csv record, key.
If writer does not exist, it's creates a csv writer and update the
mapping.
:param csv_record_dict:
:param file_mapping:
:param key:
:param file_suffix:
:return:
"""
writer = file_mapping.get(key)
if writer:
return writer
LOGGER.info('Creating file for {}'.format(key))
file_out = open('{}/{}.csv'.format(dir_path, file_suffix), 'w')
def file_out_close():
# type: () -> None
LOGGER.info('Closing file IO {}'.format(file_out))
file_out.close()
self._closer.register(file_out_close)
writer = csv.DictWriter(file_out, fieldnames=csv_record_dict.keys(),
quoting=csv.QUOTE_NONNUMERIC)
writer.writeheader()
file_mapping[key] = writer
return writer
def close(self):
# type: () -> None
"""
Any closeable callable registered in _closer, it will close.
:return:
"""
self._closer.close()
def get_scope(self):
# type: () -> str
return "loader.filesystem_csv_neo4j"
| [((544, 571), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (561, 571), False, 'import logging\n'), ((1026, 1113), 'pyhocon.ConfigFactory.from_dict', 'ConfigFactory.from_dict', (['{SHOULD_DELETE_CREATED_DIR: True, FORCE_CREATE_DIR: False}'], {}), '({SHOULD_DELETE_CREATED_DIR: True, FORCE_CREATE_DIR:\n False})\n', (1049, 1113), False, 'from pyhocon import ConfigTree, ConfigFactory\n'), ((1347, 1355), 'databuilder.utils.closer.Closer', 'Closer', ([], {}), '()\n', (1353, 1355), False, 'from databuilder.utils.closer import Closer\n'), ((2458, 2478), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2472, 2478), False, 'import os\n'), ((2747, 2764), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (2758, 2764), False, 'import os\n'), ((3131, 3163), 'databuilder.job.base_job.Job.closer.register', 'Job.closer.register', (['_delete_dir'], {}), '(_delete_dir)\n', (3150, 3163), False, 'from databuilder.job.base_job import Job\n'), ((3038, 3057), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (3051, 3057), False, 'import shutil\n'), ((2618, 2637), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (2631, 2637), False, 'import shutil\n')] |
pepsinal/python_doe_kspub | sample_program_04_02_knn.py | 65ae5c2d214f1a34fa242fee7d63453c81d56bfe | # -*- coding: utf-8 -*-
"""
@author: Hiromasa Kaneko
"""
import pandas as pd
from sklearn.neighbors import NearestNeighbors # k-NN
k_in_knn = 5 # k-NN における k
rate_of_training_samples_inside_ad = 0.96 # AD 内となるトレーニングデータの割合。AD のしきい値を決めるときに使用
dataset = pd.read_csv('resin.csv', index_col=0, header=0)
x_prediction = pd.read_csv('resin_prediction.csv', index_col=0, header=0)
# データ分割
y = dataset.iloc[:, 0] # 目的変数
x = dataset.iloc[:, 1:] # 説明変数
# 標準偏差が 0 の特徴量の削除
deleting_variables = x.columns[x.std() == 0]
x = x.drop(deleting_variables, axis=1)
x_prediction = x_prediction.drop(deleting_variables, axis=1)
# オートスケーリング
autoscaled_x = (x - x.mean()) / x.std()
autoscaled_x_prediction = (x_prediction - x.mean()) / x.std()
# k-NN による AD
ad_model = NearestNeighbors(n_neighbors=k_in_knn, metric='euclidean') # AD モデルの宣言
ad_model.fit(autoscaled_x) # k-NN による AD では、トレーニングデータの x を model_ad に格納することに対応
# サンプルごとの k 最近傍サンプルとの距離に加えて、k 最近傍サンプルのインデックス番号も一緒に出力されるため、出力用の変数を 2 つに
# トレーニングデータでは k 最近傍サンプルの中に自分も含まれ、自分との距離の 0 を除いた距離を考える必要があるため、k_in_knn + 1 個と設定
knn_distance_train, knn_index_train = ad_model.kneighbors(autoscaled_x, n_neighbors=k_in_knn + 1)
knn_distance_train = pd.DataFrame(knn_distance_train, index=autoscaled_x.index) # DataFrame型に変換
mean_of_knn_distance_train = pd.DataFrame(knn_distance_train.iloc[:, 1:].mean(axis=1),
columns=['mean_of_knn_distance']) # 自分以外の k_in_knn 個の距離の平均
mean_of_knn_distance_train.to_csv('mean_of_knn_distance_train.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意
# トレーニングデータのサンプルの rate_of_training_samples_inside_ad * 100 % が含まれるようにしきい値を設定
sorted_mean_of_knn_distance_train = mean_of_knn_distance_train.iloc[:, 0].sort_values(ascending=True) # 距離の平均の小さい順に並び替え
ad_threshold = sorted_mean_of_knn_distance_train.iloc[
round(autoscaled_x.shape[0] * rate_of_training_samples_inside_ad) - 1]
# トレーニングデータに対して、AD の中か外かを判定
inside_ad_flag_train = mean_of_knn_distance_train <= ad_threshold # AD 内のサンプルのみ TRUE
inside_ad_flag_train.columns=['inside_ad_flag']
inside_ad_flag_train.to_csv('inside_ad_flag_train_knn.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意
# 予測用データに対する k-NN 距離の計算
knn_distance_prediction, knn_index_prediction = ad_model.kneighbors(autoscaled_x_prediction)
knn_distance_prediction = pd.DataFrame(knn_distance_prediction, index=x_prediction.index) # DataFrame型に変換
mean_of_knn_distance_prediction = pd.DataFrame(knn_distance_prediction.mean(axis=1),
columns=['mean_of_knn_distance']) # k_in_knn 個の距離の平均
mean_of_knn_distance_prediction.to_csv('mean_of_knn_distance_prediction.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意
# 予測用データに対して、AD の中か外かを判定
inside_ad_flag_prediction = mean_of_knn_distance_prediction <= ad_threshold # AD 内のサンプルのみ TRUE
inside_ad_flag_prediction.columns=['inside_ad_flag']
inside_ad_flag_prediction.to_csv('inside_ad_flag_prediction_knn.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意
| [((267, 314), 'pandas.read_csv', 'pd.read_csv', (['"""resin.csv"""'], {'index_col': '(0)', 'header': '(0)'}), "('resin.csv', index_col=0, header=0)\n", (278, 314), True, 'import pandas as pd\n'), ((331, 389), 'pandas.read_csv', 'pd.read_csv', (['"""resin_prediction.csv"""'], {'index_col': '(0)', 'header': '(0)'}), "('resin_prediction.csv', index_col=0, header=0)\n", (342, 389), True, 'import pandas as pd\n'), ((783, 841), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'k_in_knn', 'metric': '"""euclidean"""'}), "(n_neighbors=k_in_knn, metric='euclidean')\n", (799, 841), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((1211, 1269), 'pandas.DataFrame', 'pd.DataFrame', (['knn_distance_train'], {'index': 'autoscaled_x.index'}), '(knn_distance_train, index=autoscaled_x.index)\n', (1223, 1269), True, 'import pandas as pd\n'), ((2340, 2403), 'pandas.DataFrame', 'pd.DataFrame', (['knn_distance_prediction'], {'index': 'x_prediction.index'}), '(knn_distance_prediction, index=x_prediction.index)\n', (2352, 2403), True, 'import pandas as pd\n')] |
destinysky/nsh_sfc | topology.py | 290fa49df2880527e0b7844bf3bec4d55c4945a6 | #!/usr/bin/python
"""
"""
from mininet.net import Mininet
from mininet.node import Controller, RemoteController, OVSKernelSwitch,UserSwitch
#OVSLegacyKernelSwitch, UserSwitch
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.link import Link, TCLink
#conf_port=50000
conf_ip_1='10.0.0.254'
conf_mac_1='11:12:13:14:15:16'
def topology():
"Create a network."
net = Mininet( controller=RemoteController, link=TCLink, switch=OVSKernelSwitch )
print "*** Creating nodes"
h1 = net.addHost( 'h1', mac='00:00:00:00:00:01', ip='10.0.0.1/24' )
h2 = net.addHost( 'h2', mac='00:00:00:00:00:02', ip='10.0.0.2/24' )
h3 = net.addHost( 'h3', mac='00:00:00:00:00:03', ip='10.0.0.3/24' )
h4 = net.addHost( 'h4', mac='00:00:00:00:00:04', ip='10.0.0.4/24' )
h5 = net.addHost( 'h5', mac='00:00:00:00:00:05', ip='10.0.0.5/24' )
s1 = net.addSwitch( 's1', listenPort=6671 )
s2 = net.addSwitch( 's2', listenPort=6672 )
s3 = net.addSwitch( 's3', listenPort=6673 )
s4 = net.addSwitch( 's4', listenPort=6674 )
s5 = net.addSwitch( 's5', listenPort=6675 )
c1 = net.addController( 'c1', controller=RemoteController, ip='127.0.0.1', port=6633 )
print "*** Creating links"
net.addLink(s1, h1)
net.addLink(s2, h2)
net.addLink(s3, h3)
net.addLink(s4, h4)
net.addLink(s5, h5)
net.addLink(s1, s2)
net.addLink(s2, s3)
net.addLink(s3, s4)
net.addLink(s4, s5)
print "*** Starting network"
net.build()
h1.cmd('ip route add '+conf_ip_1+'/32 dev h1-eth0')
h1.cmd('sudo arp -i h1-eth0 -s '+conf_ip_1+' '+conf_mac_1)
h1.cmd('sysctl -w net.ipv4.ip_forward=1')
h1.cmd('python3 listen.py &')
h2.cmd('ip route add '+conf_ip_1+'/32 dev h2-eth0')
h2.cmd('sudo arp -i h2-eth0 -s '+conf_ip_1+' '+conf_mac_1)
h2.cmd('sysctl -w net.ipv4.ip_forward=1')
h2.cmd('python3 listen.py &')
h3.cmd('ip route add '+conf_ip_1+'/32 dev h3-eth0')
h3.cmd('sudo arp -i h3-eth0 -s '+conf_ip_1+' '+conf_mac_1)
h3.cmd('sysctl -w net.ipv4.ip_forward=1')
h3.cmd('python3 listen.py &')
h4.cmd('ip route add '+conf_ip_1+'/32 dev h4-eth0')
h4.cmd('sudo arp -i h4-eth0 -s '+conf_ip_1+' '+conf_mac_1)
h4.cmd('sysctl -w net.ipv4.ip_forward=1')
h4.cmd('python3 listen.py &')
h5.cmd('ip route add '+conf_ip_1+'/32 dev h5-eth0')
h5.cmd('sudo arp -i h5-eth0 -s '+conf_ip_1+' '+conf_mac_1)
h5.cmd('sysctl -w net.ipv4.ip_forward=1')
h5.cmd('python3 listen.py &')
c1.start()
s1.start( [c1] )
s2.start( [c1] )
s3.start( [c1] )
s4.start( [c1] )
s5.start( [c1] )
print "*** Running CLI"
CLI( net )
print "*** Stopping network"
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
topology() | [] |
gventuraagramonte/python | lampara/lamp.py | d96796c302f2f423a8e949f9c7d33a3bfabf8a0f |
#Definicion de la clase
#antes de empezar una clase se declara de la siguiente manera
class Lamp:
_LAMPS = ['''
.
. | ,
\ ' /
` ,-. '
--- ( ) ---
\ /
_|=|_
|_____|
''',
'''
,-.
( )
\ /
_|=|_
|_____|
''']
def __init__(self, is_turned_on): #metodo instancia e init es el constructar osea es el primero que se ejecuta
self._is_turned_on = is_turned_on
def turn_on(self):
self._is_turned_on = True
self._display_image()
def turn_off(self):
self._is_turned_on = False
self._display_image()
def _display_image(self):
if self._is_turned_on:
print(self._LAMPS[0])
else:
print(self._LAMPS[1])
| [] |
sneumann/galaxy | lib/galaxy/model/migrate/versions/0084_add_ldda_id_to_implicit_conversion_table.py | f6011bab5b8adbabae4986a45849bb9158ffc8bb | """
Migration script to add 'ldda_id' column to the implicitly_converted_dataset_association table.
"""
from __future__ import print_function
import logging
from sqlalchemy import (
Column,
ForeignKey,
Integer,
MetaData
)
from galaxy.model.migrate.versions.util import (
add_column,
drop_column
)
log = logging.getLogger(__name__)
metadata = MetaData()
def upgrade(migrate_engine):
print(__doc__)
metadata.bind = migrate_engine
metadata.reflect()
# SQLAlchemy Migrate has a bug when adding a column with both a ForeignKey and a index in SQLite
if migrate_engine.name != 'sqlite':
c = Column("ldda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True, nullable=True)
else:
c = Column("ldda_id", Integer, index=True, nullable=True)
add_column(c, 'implicitly_converted_dataset_association', metadata, index_name='ix_implicitly_converted_ds_assoc_ldda_id')
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
drop_column('ldda_id', 'implicitly_converted_dataset_association', metadata)
| [((331, 358), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (348, 358), False, 'import logging\n'), ((370, 380), 'sqlalchemy.MetaData', 'MetaData', ([], {}), '()\n', (378, 380), False, 'from sqlalchemy import Column, ForeignKey, Integer, MetaData\n'), ((831, 957), 'galaxy.model.migrate.versions.util.add_column', 'add_column', (['c', '"""implicitly_converted_dataset_association"""', 'metadata'], {'index_name': '"""ix_implicitly_converted_ds_assoc_ldda_id"""'}), "(c, 'implicitly_converted_dataset_association', metadata,\n index_name='ix_implicitly_converted_ds_assoc_ldda_id')\n", (841, 957), False, 'from galaxy.model.migrate.versions.util import add_column, drop_column\n'), ((1050, 1126), 'galaxy.model.migrate.versions.util.drop_column', 'drop_column', (['"""ldda_id"""', '"""implicitly_converted_dataset_association"""', 'metadata'], {}), "('ldda_id', 'implicitly_converted_dataset_association', metadata)\n", (1061, 1126), False, 'from galaxy.model.migrate.versions.util import add_column, drop_column\n'), ((773, 826), 'sqlalchemy.Column', 'Column', (['"""ldda_id"""', 'Integer'], {'index': '(True)', 'nullable': '(True)'}), "('ldda_id', Integer, index=True, nullable=True)\n", (779, 826), False, 'from sqlalchemy import Column, ForeignKey, Integer, MetaData\n'), ((670, 722), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""library_dataset_dataset_association.id"""'], {}), "('library_dataset_dataset_association.id')\n", (680, 722), False, 'from sqlalchemy import Column, ForeignKey, Integer, MetaData\n')] |
tobiichiorigami1/csp | ds.py | e1f419869a0a1aa3e39aeb5888571267be5d80bd | votes_t_shape = [3, 0, 1, 2]
for i in range(6 - 4):
votes_t_shape += [i + 4]
print(votes_t_shape)
| [] |
TimDettmers/sched | scripts/adam/cc100_baselines.py | e16735f2c2eb6a51f5cf29ead534041574034e2e | import numpy as np
import itertools
import gpuscheduler
import argparse
import os
import uuid
import hashlib
import glob
import math
from itertools import product
from torch.optim.lr_scheduler import OneCycleLR
from os.path import join
parser = argparse.ArgumentParser(description='Compute script.')
parser.add_argument('--dry', action='store_true')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--p', type=float, default=1.0, help='Probability with which to select a configuration.')
args = parser.parse_args()
gpus = 128
cmd = 'fairseq-train /private/home/namangoyal/dataset/data-bin/bookwiki_CC-NEWS_openwebtext_stories_cc100-mmap2-bin --distributed-world-size {0} --distributed-port 54187 --fp16 --memory-efficient-fp16 --num-workers 2 --criterion cross_entropy --task language_modeling --sample-break-mode none --log-interval 25 --tokens-per-sample 1024 --arch transformer_lm_big --share-decoder-input-output-embed --decoder-layers 28 --decoder-attention-heads 16 --dropout 0.0 --attention-dropout 0.0 --activation-dropout 0.0 --activation-fn relu --no-epoch-checkpoints --keep-best-checkpoints 0 --keep-interval-updates 0 --keep-last-epochs 0 --save-interval-updates 1000 --log-format simple --fp16-no-flatten-grads --ignore-unused-valid-subsets'.format(gpus)
args2 = {}
name = 'blockwise5'
constraint = 'volta32gb'
# 1024 tokens * 8 update_freq * 56250 steps = 0.4608e9 tokens -> optimal batch size 3460
# model sizes: 1.92bn, 2.43bn, 1.41bn
logfolder = 'adam/cc100/{0}'.format(name)
ckp_name = logfolder
#time_hours = 24*2
cores_per_job = 5
mem = 56*(8 if gpus > 8 else gpus)
num_seeds = 1
seed_offset = 5
time_hours = 72
time_minutes = 0
#partition = 'learnlab,learnfair,scavenge'
partition = 'learnfair,learnlab'
#partition = 'learnfair'
#partition = 'uninterruptible'
change_dir = 'fairseq_private'
repo = 'fairseq_private'
exclude = ''
s = gpuscheduler.HyakScheduler(verbose=args.verbose, account='', partition=partition, use_gres=False)
fp16 = True
args3 = {}
args2['lr-scheduler'] = 'polynomial_decay'
args2['warmup-updates'] = 2000
args2['max-update'] = 56250
args2['total-num-update'] = 56250
#args2['lr-scheduler'] = 'cosine'
#args2['warmup-updates'] = 3000
#args2['max-update'] = 56250*4
args2['fp16-scale-window'] = 250
args2['clip-norm'] = 0.4
#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1), (False, 8, 'quantile', 1), (False, 8, 'dynamic_tree', 1), (False, 8, 'quantile', 25)]
#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)]#, (False, 8, 'quantile', 1), (False, 8, 'dynamic_tree', 1), (False, 8, 'quantile', 25)]
#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)]
#args3['adam8bits-offset'] = [1/512]
#args3['prob-quant'] = [False]
#args3['dist-scale'] = [1.0]
#args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)]
#args3['decoder-embed-dim'] = [2048+256]
#args3['decoder-ffn-embed-dim'] = [8192+2048]
#args3['max-tokens'] = [3072]
#args3['update-freq'] = [2]
key = ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq', 'lr')
#key = ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq')
args3[key] = []
#lrkey = ('lr', 'warmup-init-lr')
#args3[lrkey] = []
# 32-bit baseline
#args3['optimizer'] = ['adam']
#args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)]
#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)]
##args3[key].append((2048,2048,8192,8, 0.00075))
#args3[key].append((2048,2048,8192,2))
#
#lr = 0.003239 + (-0.0001395*math.log(1.41e9))
#args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1 + 1e-8))
# adafactor
#args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)]
#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 32, 'quantile', 1)]
#args2['optimizer'] = 'adafactor'
#args2['beta1'] = 0.9
#args2['decay-rate'] = 0.999
##args3[key].append((2048,2048,8192,8, 0.00075))
#args3[key].append((2048,2048+256,8192+2048,2))
##args3[key].append((2048,2688,10752,2))
#
#lr = 0.003239 + (-0.0001395*math.log(1.92e9))
#args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1 + 1e-8))
# 8-bit
#args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)]
#args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)]
#args3[('percentile-clipping', 'clip-norm')] = [(5, 0.0)]
#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'quantile', 1)]
#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'dynamic_tree', 1)]
#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'dynamic_tree', 1), (False, 8, 'quantile', 1)]
args3['optimizer'] = ['adam']
args3[('use-bnb', 'optim-bits')] = [(True, 8)]
args3[('stable-emb', 'no-scale-embedding')] = [(True, True)]
#args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] = [(True, True, True), (False, False, False)]
#args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] = [(False, False, False)]
#args3[('use-bnb', 'stable-emb', 'no-scale-embedding', 'optim-bits')] = [(True, True, True, True)]
args3[key].append((2048,2048,8192,8, 0.00075))
#args3[key].append((2048,2048,8192,8, 0.00045))
#args3[key].append((2048,2688,10752,2))
#args3['use-emb-norm'] = [True]
#lr = 0.003239 + (-0.0001395*math.log(2.43e9))
#args3[lrkey].append((lr, 0.0))
#args2['train-subset'] = 'train11'
args4 = []
args5 = {}
args6 = {}
rdm = np.random.RandomState(5345)
for key, value in args2.items():
cmd = cmd + ' --{0} {1}'.format(key, value)
args_prod = []
for key, values in args3.items():
if isinstance(key, tuple):
keyvalues = []
for tups in values:
arg = ''
for i, v in enumerate(tups):
if v is True: v = ''
if v is False: continue
if len(key[i]) == 0:
arg += '{0} '.format(v)
else:
arg += '--{0} {1} '.format(key[i], v)
keyvalues.append(arg)
elif isinstance(key, str):
keyvalues = []
for v in values:
if v is True: v = ''
if v is False:
keyvalues.append('')
else:
keyvalues.append(' --{0} {1}'.format(key, v))
args_prod.append(keyvalues)
if len(args_prod) >= 2:
args_prod = list(product(*args_prod))
else:
new_args = []
if len(args_prod) > 0:
for arg in args_prod[0]:
new_args.append([arg])
args_prod = new_args
jobs = []
if len(args4) == 0: args4.append('')
for seed in range(num_seeds):
seed = seed + seed_offset
for arg4 in args4:
if len(args_prod) == 0: args_prod.append(('', ''))
for i, values in enumerate(args_prod):
job_cmd = cmd + arg4
for val in values:
job_cmd += ' {0}' .format(val)
#job_cmd += ' --checkpoint /checkpoint/timdettmers/{1}/{0}/model.pt'.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name)
if not fp16: job_cmd = job_cmd.replace('--fp16 ', ' ')
job_cmd = job_cmd + ' --seed {0}'.format(seed)
checkpoint_dir = '/checkpoint/timdettmers/{1}/{0} '.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name)
save_dir = ' --save-dir {0}'.format(checkpoint_dir)
job_cmd = job_cmd + save_dir
cmds = [job_cmd]
if rdm.rand(1) <= args.p:
jobs.append(job_cmd)
s.add_job(logfolder, repo, change_dir, cmds, time_hours, fp16, cores=cores_per_job, mem=mem, constraint=constraint, exclude=exclude, time_minutes=time_minutes, gpus=gpus)
if args.dry:
for i, job in enumerate(jobs):
print(i, job)
print('')
print('Total jobs', len(jobs))
print('Time hours: {0}'.format(time_hours))
print('GPUs: {0}'.format(gpus))
print('Jobs will be written to: {0}'.format(join('/private/home/timdettmers/logs/', logfolder)))
print('Jobs will be run on: {0}'.format(partition))
print('Run in folder: {0}'.format(change_dir))
if not args.dry:
s.run_jobs()
| [((247, 301), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compute script."""'}), "(description='Compute script.')\n", (270, 301), False, 'import argparse\n'), ((1923, 2025), 'gpuscheduler.HyakScheduler', 'gpuscheduler.HyakScheduler', ([], {'verbose': 'args.verbose', 'account': '""""""', 'partition': 'partition', 'use_gres': '(False)'}), "(verbose=args.verbose, account='', partition=\n partition, use_gres=False)\n", (1949, 2025), False, 'import gpuscheduler\n'), ((5545, 5572), 'numpy.random.RandomState', 'np.random.RandomState', (['(5345)'], {}), '(5345)\n', (5566, 5572), True, 'import numpy as np\n'), ((6455, 6474), 'itertools.product', 'product', (['*args_prod'], {}), '(*args_prod)\n', (6462, 6474), False, 'from itertools import product\n'), ((8035, 8085), 'os.path.join', 'join', (['"""/private/home/timdettmers/logs/"""', 'logfolder'], {}), "('/private/home/timdettmers/logs/', logfolder)\n", (8039, 8085), False, 'from os.path import join\n')] |
hal0x2328/neo3-boa | boa3_test/test_sc/event_test/EventNep5Transfer.py | 6825a3533384cb01660773050719402a9703065b | from boa3.builtin import public
from boa3.builtin.contract import Nep5TransferEvent
transfer = Nep5TransferEvent
@public
def Main(from_addr: bytes, to_addr: bytes, amount: int):
transfer(from_addr, to_addr, amount)
| [] |
SchuylerGoodman/topicalguide | abtest/views.py | 7c26c8be8e1dddb7bf2be33ea9a7ba59034bf620 | # The Topical Guide
# Copyright 2010-2011 Brigham Young University
#
# This file is part of the Topical Guide <http://nlp.cs.byu.edu/topic_browser>.
#
# The Topical Guide is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# The Topical Guide is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License
# for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with the Topical Guide. If not, see <http://www.gnu.org/licenses/>.
#
# If you have inquiries regarding any further use of the Topical Guide, please
# contact the Copyright Licensing Office, Brigham Young University, 3760 HBLL,
# Provo, UT 84602, (801) 422-9339 or 422-3821, e-mail [email protected].
from __future__ import print_function
from django.shortcuts import render, redirect
from django.http import HttpResponse
import abtest
from abtest.settings import TEST_LIST
from visualize import root
# Create your views here.
def test(request, arg, *args, **kwargs):
if arg not in TEST_LIST:
print("Error! Unknown view should have been hit instead")
package_list = TEST_LIST[arg]['VIEW_PACKAGE'].split('.')
view_package = package_list.pop()
package = ".".join(package_list)
view = getattr(__import__(package, fromlist=[view_package]), view_package)
return view(request, args, kwargs)
# This view is called when the given url does not match anything
def unknown(request, arg, *args, **kwargs):
# redirect to the root view
return redirect('/')
| [((1830, 1843), 'django.shortcuts.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (1838, 1843), False, 'from django.shortcuts import render, redirect\n')] |
sulantha2006/neurodocker | neurodocker/reprozip/tests/test_merge.py | d03fe865ae05fea2f7ce9a8b417717dae7bd640f | """Tests for merge.py."""
from __future__ import absolute_import, division, print_function
from glob import glob
import os
import tarfile
import tempfile
from neurodocker.docker import client
from neurodocker.reprozip.trace import ReproZipMinimizer
from neurodocker.reprozip.merge import merge_pack_files
def _create_packfile(commands, dir):
"""Create packfile from list `commands` in debian:stretch container."""
container = client.containers.run('debian:stretch', detach=True, tty=True,
security_opt=['seccomp:unconfined'])
try:
minimizer = ReproZipMinimizer(container.id, commands,
packfile_save_dir=dir)
packfile_path = minimizer.run()
except:
raise
finally:
container.stop()
container.remove()
return packfile_path
def test_merge_pack_files():
tmpdir = tempfile.mkdtemp()
cmd = ["du -sh /usr", "rm --help"]
packpath = _create_packfile(cmd, tmpdir)
new_name = "first-pack.rpz"
os.rename(packpath, os.path.join(tmpdir, new_name))
cmd = ["ls -l /", "grep --help"]
_create_packfile(cmd, tmpdir)
pattern = os.path.join(tmpdir, '*.rpz')
packfiles = glob(pattern)
assert packfiles, "packfiles not found"
outfile = os.path.join(tmpdir, 'merged.rpz')
merge_pack_files(outfile=outfile, packfiles=packfiles)
with tarfile.open(outfile) as tar:
tar.extractall(path=tmpdir)
datafile = os.path.join(tmpdir, 'DATA.tar.gz')
with tarfile.open(datafile) as tardata:
tardata.extractall(path=tmpdir)
usr_path = os.path.join(tmpdir, 'DATA', 'usr', 'bin')
assert os.path.isfile(os.path.join(usr_path, 'du'))
assert os.path.isfile(os.path.join(usr_path, 'grep'))
assert os.path.isfile(os.path.join(usr_path, 'ls'))
assert os.path.isfile(os.path.join(usr_path, 'rm'))
assert not os.path.isfile(os.path.join(usr_path, 'sed'))
assert not os.path.isfile(os.path.join(usr_path, 'tar'))
| [((439, 543), 'neurodocker.docker.client.containers.run', 'client.containers.run', (['"""debian:stretch"""'], {'detach': '(True)', 'tty': '(True)', 'security_opt': "['seccomp:unconfined']"}), "('debian:stretch', detach=True, tty=True, security_opt\n =['seccomp:unconfined'])\n", (460, 543), False, 'from neurodocker.docker import client\n'), ((909, 927), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (925, 927), False, 'import tempfile\n'), ((1188, 1217), 'os.path.join', 'os.path.join', (['tmpdir', '"""*.rpz"""'], {}), "(tmpdir, '*.rpz')\n", (1200, 1217), False, 'import os\n'), ((1234, 1247), 'glob.glob', 'glob', (['pattern'], {}), '(pattern)\n', (1238, 1247), False, 'from glob import glob\n'), ((1307, 1341), 'os.path.join', 'os.path.join', (['tmpdir', '"""merged.rpz"""'], {}), "(tmpdir, 'merged.rpz')\n", (1319, 1341), False, 'import os\n'), ((1346, 1400), 'neurodocker.reprozip.merge.merge_pack_files', 'merge_pack_files', ([], {'outfile': 'outfile', 'packfiles': 'packfiles'}), '(outfile=outfile, packfiles=packfiles)\n', (1362, 1400), False, 'from neurodocker.reprozip.merge import merge_pack_files\n'), ((606, 670), 'neurodocker.reprozip.trace.ReproZipMinimizer', 'ReproZipMinimizer', (['container.id', 'commands'], {'packfile_save_dir': 'dir'}), '(container.id, commands, packfile_save_dir=dir)\n', (623, 670), False, 'from neurodocker.reprozip.trace import ReproZipMinimizer\n'), ((1069, 1099), 'os.path.join', 'os.path.join', (['tmpdir', 'new_name'], {}), '(tmpdir, new_name)\n', (1081, 1099), False, 'import os\n'), ((1411, 1432), 'tarfile.open', 'tarfile.open', (['outfile'], {}), '(outfile)\n', (1423, 1432), False, 'import tarfile\n'), ((1496, 1531), 'os.path.join', 'os.path.join', (['tmpdir', '"""DATA.tar.gz"""'], {}), "(tmpdir, 'DATA.tar.gz')\n", (1508, 1531), False, 'import os\n'), ((1545, 1567), 'tarfile.open', 'tarfile.open', (['datafile'], {}), '(datafile)\n', (1557, 1567), False, 'import tarfile\n'), ((1647, 1689), 'os.path.join', 'os.path.join', (['tmpdir', '"""DATA"""', '"""usr"""', '"""bin"""'], {}), "(tmpdir, 'DATA', 'usr', 'bin')\n", (1659, 1689), False, 'import os\n'), ((1724, 1752), 'os.path.join', 'os.path.join', (['usr_path', '"""du"""'], {}), "(usr_path, 'du')\n", (1736, 1752), False, 'import os\n'), ((1788, 1818), 'os.path.join', 'os.path.join', (['usr_path', '"""grep"""'], {}), "(usr_path, 'grep')\n", (1800, 1818), False, 'import os\n'), ((1854, 1882), 'os.path.join', 'os.path.join', (['usr_path', '"""ls"""'], {}), "(usr_path, 'ls')\n", (1866, 1882), False, 'import os\n'), ((1918, 1946), 'os.path.join', 'os.path.join', (['usr_path', '"""rm"""'], {}), "(usr_path, 'rm')\n", (1930, 1946), False, 'import os\n'), ((1986, 2015), 'os.path.join', 'os.path.join', (['usr_path', '"""sed"""'], {}), "(usr_path, 'sed')\n", (1998, 2015), False, 'import os\n'), ((2055, 2084), 'os.path.join', 'os.path.join', (['usr_path', '"""tar"""'], {}), "(usr_path, 'tar')\n", (2067, 2084), False, 'import os\n')] |
dolang/build-kivy-linux | build/step-3-kivy-almost-manylinux/scripts/redirect_html5.py | bb3e6dce956659d94604b524aa6702e8c390e15a | """
HTML5 contexts.
:author: Dominik Lang
:license: MIT
"""
import contextlib
import io
import sys
__all__ = ['create_document', 'tag', 'as_link']
class create_document(contextlib.redirect_stdout):
"""Redirect output to an HTML5 document specified by new_target.
A HTML document title can be specified, but should not consist of
whitespace only. Default is a dash.
For serialisation, an encoding is included and defaults to UTF-8.
Make sure the output (likely ``new_target``) uses the correct one.
Arguments are not checked for validity.
"""
def __init__(self, new_target, *, title='-', encoding='utf-8'):
super().__init__(new_target)
self._title = str(title)
self._encoding = encoding
def __enter__(self):
new_target = contextlib.redirect_stdout.__enter__(self)
html5 = ('<!DOCTYPE html>\n'
'<html>\n'
'<title>{}</title>\n'
'<meta charset="{}">'.format(self._title, self._encoding))
print(html5)
return new_target
@contextlib.contextmanager
def tag(name):
"""Enclose output in an HTML tag denoted by the name."""
print('<{}>'.format(name))
yield
print('</{}>'.format(name))
class LinkStringIO(io.StringIO):
def __init__(self):
super().__init__()
self._write_text = False # switch between link href="..." and text
def write(self, s):
if not s:
return
# else:
if s.isspace():
return super().write(s)
# else:
if self._write_text:
count = super().write('<a href="')
count += super().write(s)
count += super().write('">')
else:
count = super().write(s)
count += super().write('</a>')
self._write_text = not self._write_text
return count
class write_link(contextlib.redirect_stdout):
"""Combine any two subsequent non-empty writes into an HTML link."""
def __init__(self):
super().__init__(LinkStringIO())
def __exit__(self, exctype, excinst, exctb):
super().__exit__(exctype, excinst, exctb)
with contextlib.closing(self._new_target):
self._new_target.seek(0)
sys.stdout.write(self._new_target.read())
| [((853, 895), 'contextlib.redirect_stdout.__enter__', 'contextlib.redirect_stdout.__enter__', (['self'], {}), '(self)\n', (889, 895), False, 'import contextlib\n'), ((2297, 2333), 'contextlib.closing', 'contextlib.closing', (['self._new_target'], {}), '(self._new_target)\n', (2315, 2333), False, 'import contextlib\n')] |
jzacsh/neuralnets-cmp464 | lab/hw03-part-i_nov14.py | de35bbba93b87446b231bf012a8de5acc7896a04 | """
Jonathan Zacsh's solution to homework #3, Nov 14., Part I
"""
# Per homework instructions, following lead from matlab example by professor:
# http://comet.lehman.cuny.edu/schneider/Fall17/CMP464/Maple/PartialDerivatives1.pdf
import sys
import tensorflow as tf
import tempfile
import os
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
# not really doing intersting things in this lab, so just ignore optimization
class Differentiable:
""" encapsulation of a function and its derivative """
def __init__(self, label, f, d):
self.func = f
self.deriv = d
self.func.name = label
self.deriv.name = "%sDeriv" % label
# g(x) = x^4+2x-7 ; per matlab example
# g'(x) = 4x^3+2
fExFourth = Differentiable("fExFourth",
lambda x: tf.add_n([tf.pow(x, 4), tf.multiply(2, x), -7]),
lambda x: tf.add_n([tf.multiply(4, tf.pow(x, 3)), 2]))
tFofTwo = fExFourth.func(2)
tFofDerivTwo = fExFourth.deriv(2)
log_dir = tempfile.mkdtemp(prefix="hw3-nov14-parti")
print(log_dir)
with tf.Session() as sess:
writer = tf.summary.FileWriter(log_dir, sess.graph)
fOfTwo, fDerivOfTwo = results = sess.run([tFofTwo, tFofDerivTwo])
sys.stderr.write("results:\n\tf(2)=%s\n\tf'(2)=%s\n" % (fOfTwo, fDerivOfTwo))
# note: only needed when doing a *loop* of sess.run() calls, and want to see
# intermediary results per-loop.
#writer.add_summary(results)
writer.flush()
writer.close()
| [((969, 1011), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': '"""hw3-nov14-parti"""'}), "(prefix='hw3-nov14-parti')\n", (985, 1011), False, 'import tempfile\n'), ((1032, 1044), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1042, 1044), True, 'import tensorflow as tf\n'), ((1067, 1109), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['log_dir', 'sess.graph'], {}), '(log_dir, sess.graph)\n', (1088, 1109), True, 'import tensorflow as tf\n'), ((1185, 1261), 'sys.stderr.write', 'sys.stderr.write', (['("""results:\n\tf(2)=%s\n\tf\'(2)=%s\n""" % (fOfTwo, fDerivOfTwo))'], {}), '("""results:\n\tf(2)=%s\n\tf\'(2)=%s\n""" % (fOfTwo, fDerivOfTwo))\n', (1201, 1261), False, 'import sys\n'), ((793, 805), 'tensorflow.pow', 'tf.pow', (['x', '(4)'], {}), '(x, 4)\n', (799, 805), True, 'import tensorflow as tf\n'), ((807, 824), 'tensorflow.multiply', 'tf.multiply', (['(2)', 'x'], {}), '(2, x)\n', (818, 824), True, 'import tensorflow as tf\n'), ((875, 887), 'tensorflow.pow', 'tf.pow', (['x', '(3)'], {}), '(x, 3)\n', (881, 887), True, 'import tensorflow as tf\n')] |
GChrysostomou/tasc | modules/experiments_bc/set_tp.py | d943de343d725b99fa1a1ad201b32a21e5970801 | import torch
import torch.nn as nn
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import *
from sklearn.metrics import precision_recall_fscore_support as prfs
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def degrading_model_perf(data, model, save_path, data_size, largest = True):
print("\n--- Degrading Model Performance \n")
modulo = round(len(data) / 10) + 1
model.embedding.weight.requires_grad_(True)
actual = []
results = {}
results["random"] = []
results["attention"]= []
results["gradient"] = []
results["grad_attention"] = []
results["grad*attention"] = []
_, _, lengths, _ = next(iter(data))
maximum = max(lengths)
if max(lengths) <= 10 :
maximum = max(lengths) - 1
elif max(lengths) > 10 :
maximum = 10
print(maximum)
grad_set = torch.zeros([data_size, maximum]).long().to(device)
att_set = torch.zeros([data_size, maximum]).long().to(device)
rand_set = torch.zeros([data_size, maximum]).long().to(device)
att_grad_set = torch.zeros([data_size, maximum]).long().to(device)
att_x_grad_set = torch.zeros([data_size, maximum]).long().to(device)
actual_set = torch.zeros([data_size, 1]).long().to(device)
docs = []
for batchi, (doc_id, sentences, lengths, labels) in enumerate(data):
model.train()
torch.cuda.empty_cache()
model.zero_grad()
sentences, lengths, labels = sentences.to(device), lengths.to(device), labels.to(device)
yhat, weights_or = model(sentences, lengths, retain_gradient = True)
masking = yhat.max(-1)[1] == labels
if largest == False:
masking = yhat.max(-1)[1] != labels
yhat.max(-1)[0].sum().backward(retain_graph = True)
maxi = max(lengths)
doc_id = doc_id[masking]
yhat = yhat[masking]
sentences = sentences[masking]
labels = labels[masking]
lengths = lengths[masking]
weights_or = weights_or[masking]
docs.extend(doc_id)
g = model.embed.grad[masking]
weights_def_grad = model.weights.grad[masking]
max_lengths = max(max(lengths), maxi)
model_masks = model.masks[masking]
with torch.no_grad():
weights = weights_or.clone()
weight_mul_grad = weights_or * weights_def_grad
weight_mul_grad[model_masks[:,:max_lengths]] = float("-inf")
weights_def_grad_soft = weights_def_grad.clone()
weights_def_grad_soft[model_masks[:,:max_lengths]] = float("-inf")
em = model.embed[masking]
g1 = (g* em).sum(-1)[:,:max_lengths]
g1[model_masks[:,:max_lengths]] = float("-inf")
sentence_att = sentences.clone()[:,:max_lengths]
sentence_grad = sentences.clone()[:,:max_lengths]
sentence_rand = sentences.clone()[:,:max_lengths]
sentence_att_grad = sentences.clone()[:,:max_lengths]
sentence_att_mul_grad = sentences.clone()[:,:max_lengths]
g1[model_masks[:,:max_lengths]] = float("-inf")
top_grad = torch.topk(g1, k = g1.size(1), largest = largest)[1]
top_att = torch.topk(weights, k = weights.size(1),
largest = largest)[1]
top_rand = torch.randn(top_att.shape)
top_rand = torch.topk(top_rand, k = weights.size(1),
largest = largest)[1]
top_att_grad = torch.topk(weights_def_grad_soft,
k = weights.size(1),
largest = largest)[1]
top_att_mul_grad = torch.topk(weight_mul_grad,
k = weights.size(1),
largest = largest)[1]
temp_pred = []
temp_act = []
temp_act.append(labels.cpu().data.numpy())
temp_pred.append(yhat.max(-1)[1].cpu().data.numpy())
model.eval()
actual_set[doc_id] = labels.unsqueeze(-1)
rand_set[doc_id, 0] = yhat.max(-1)[1]
att_set[doc_id, 0] = yhat.max(-1)[1]
grad_set[doc_id, 0] = yhat.max(-1)[1]
att_grad_set[doc_id, 0] = yhat.max(-1)[1]
att_x_grad_set[doc_id, 0] = yhat.max(-1)[1]
rows = torch.arange(sentences.size(0))
for _j_ in range(1,maximum):
sentence_grad[rows, top_grad[:,_j_]] = 0
sentence_att[rows, top_att[:,_j_]] = 0
sentence_att_grad[rows, top_att_grad[:,_j_]] = 0
sentence_att_mul_grad[rows, top_att_mul_grad[:,_j_]] = 0
sentence_rand[rows, top_rand[:,_j_]] = 0
yhat_rand, _ = model(sentence_rand,lengths)
rand_set[doc_id, _j_] = yhat_rand.max(-1)[1]
yhat_att, _ = model(sentence_att,lengths)
att_set[doc_id, _j_] = yhat_att.max(-1)[1]
yhat_grad, _ = model(sentence_grad,lengths)
grad_set[doc_id, _j_] = yhat_grad.max(-1)[1]
yhat_att_grad, _ = model(sentence_att_grad,lengths)
att_grad_set[doc_id, _j_] = yhat_att_grad.max(-1)[1]
yhat_att_x_grad, _ = model(sentence_att_mul_grad,lengths)
att_x_grad_set[doc_id, _j_] = yhat_att_x_grad.max(-1)[1]
if batchi % modulo == 0 :
print("Remaining: ", len(data)- batchi)
docs = torch.LongTensor(docs)
rand_set = rand_set[docs]
att_set = att_set[docs]
grad_set = grad_set[docs]
att_grad_set = att_grad_set[docs]
att_x_grad_set = att_x_grad_set[docs]
actual_set = actual_set[docs]
for _k_ in range(0,maximum):
actual = actual_set.flatten().cpu().data.numpy()
rand_pred = classification_report(actual,
rand_set[:,_k_].cpu().data.numpy(),
output_dict = True)["macro avg"]["f1-score"]
att_pred = classification_report(actual,
att_set[:,_k_].cpu().data.numpy(),
output_dict = True)["macro avg"]["f1-score"]
grad_pred = classification_report(actual,
grad_set[:,_k_].cpu().data.numpy(),
output_dict = True)["macro avg"]["f1-score"]
att_grad_pred = classification_report(actual,
att_grad_set[:,_k_].cpu().data.numpy(),
output_dict = True)["macro avg"]["f1-score"]
att_x_grad_pred = classification_report(actual,
att_x_grad_set[:,_k_].cpu().data.numpy(),
output_dict = True)["macro avg"]["f1-score"]
results["random"].append(rand_pred)
results["attention"].append(att_pred)
results["gradient"].append(grad_pred)
results["grad_attention"].append(att_grad_pred)
results["grad*attention"].append(att_x_grad_pred)
results = pd.DataFrame.from_dict(results)
results.plot(kind = "line", figsize = (18,10))
ordering = "ascending"
if largest:
ordering = "descending"
plt.savefig(save_path + "_correct_classified_" + ordering + ".png")
results.to_csv(save_path + "_correct_classified_" + ordering + ".csv")
| [((72, 93), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (86, 93), False, 'import matplotlib\n'), ((6334, 6356), 'torch.LongTensor', 'torch.LongTensor', (['docs'], {}), '(docs)\n', (6350, 6356), False, 'import torch\n'), ((8097, 8128), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['results'], {}), '(results)\n', (8119, 8128), True, 'import pandas as pd\n'), ((8288, 8355), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_path + '_correct_classified_' + ordering + '.png')"], {}), "(save_path + '_correct_classified_' + ordering + '.png')\n", (8299, 8355), True, 'import matplotlib.pyplot as plt\n'), ((279, 304), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (302, 304), False, 'import torch\n'), ((1540, 1564), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (1562, 1564), False, 'import torch\n'), ((2533, 2548), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2546, 2548), False, 'import torch\n'), ((3750, 3776), 'torch.randn', 'torch.randn', (['top_att.shape'], {}), '(top_att.shape)\n', (3761, 3776), False, 'import torch\n'), ((1026, 1059), 'torch.zeros', 'torch.zeros', (['[data_size, maximum]'], {}), '([data_size, maximum])\n', (1037, 1059), False, 'import torch\n'), ((1092, 1125), 'torch.zeros', 'torch.zeros', (['[data_size, maximum]'], {}), '([data_size, maximum])\n', (1103, 1125), False, 'import torch\n'), ((1159, 1192), 'torch.zeros', 'torch.zeros', (['[data_size, maximum]'], {}), '([data_size, maximum])\n', (1170, 1192), False, 'import torch\n'), ((1230, 1263), 'torch.zeros', 'torch.zeros', (['[data_size, maximum]'], {}), '([data_size, maximum])\n', (1241, 1263), False, 'import torch\n'), ((1303, 1336), 'torch.zeros', 'torch.zeros', (['[data_size, maximum]'], {}), '([data_size, maximum])\n', (1314, 1336), False, 'import torch\n'), ((1372, 1399), 'torch.zeros', 'torch.zeros', (['[data_size, 1]'], {}), '([data_size, 1])\n', (1383, 1399), False, 'import torch\n')] |
mattmurch/helios-server | helios/tasks.py | c4f5409bbf7117fc561774208c07801b9ae61ff2 | """
Celery queued tasks for Helios
2010-08-01
[email protected]
"""
import copy
from celery import shared_task
from celery.utils.log import get_logger
import signals
from models import CastVote, Election, Voter, VoterFile
from view_utils import render_template_raw
@shared_task
def cast_vote_verify_and_store(cast_vote_id, status_update_message=None, **kwargs):
cast_vote = CastVote.objects.get(id=cast_vote_id)
result = cast_vote.verify_and_store()
voter = cast_vote.voter
election = voter.election
user = voter.get_user()
if result:
# send the signal
signals.vote_cast.send(sender=election, election=election, user=user, voter=voter, cast_vote=cast_vote)
if status_update_message and user.can_update_status():
user.update_status(status_update_message)
else:
logger = get_logger(cast_vote_verify_and_store.__name__)
logger.error("Failed to verify and store %d" % cast_vote_id)
@shared_task
def voters_email(election_id, subject_template, body_template, extra_vars={},
voter_constraints_include=None, voter_constraints_exclude=None):
"""
voter_constraints_include are conditions on including voters
voter_constraints_exclude are conditions on excluding voters
"""
election = Election.objects.get(id=election_id)
# select the right list of voters
voters = election.voter_set.all()
if voter_constraints_include:
voters = voters.filter(**voter_constraints_include)
if voter_constraints_exclude:
voters = voters.exclude(**voter_constraints_exclude)
for voter in voters:
single_voter_email.delay(voter.uuid, subject_template, body_template, extra_vars)
@shared_task
def voters_notify(election_id, notification_template, extra_vars={}):
election = Election.objects.get(id=election_id)
for voter in election.voter_set.all():
single_voter_notify.delay(voter.uuid, notification_template, extra_vars)
@shared_task
def single_voter_email(voter_uuid, subject_template, body_template, extra_vars={}):
voter = Voter.objects.get(uuid=voter_uuid)
the_vars = copy.copy(extra_vars)
the_vars.update({'voter': voter})
subject = render_template_raw(None, subject_template, the_vars)
body = render_template_raw(None, body_template, the_vars)
voter.send_message(subject, body)
@shared_task
def single_voter_notify(voter_uuid, notification_template, extra_vars={}):
voter = Voter.objects.get(uuid=voter_uuid)
the_vars = copy.copy(extra_vars)
the_vars.update({'voter': voter})
notification = render_template_raw(None, notification_template, the_vars)
voter.send_notification(notification)
@shared_task
def election_compute_tally(election_id):
election = Election.objects.get(id=election_id)
election.compute_tally()
election_notify_admin.delay(election_id=election_id,
subject="encrypted tally computed",
body="""
The encrypted tally for election %s has been computed.
--
Helios
""" % election.name)
if election.has_helios_trustee():
tally_helios_decrypt.delay(election_id=election.id)
@shared_task
def tally_helios_decrypt(election_id):
election = Election.objects.get(id=election_id)
election.helios_trustee_decrypt()
election_notify_admin.delay(election_id=election_id,
subject='Helios Decrypt',
body="""
Helios has decrypted its portion of the tally
for election %s.
--
Helios
""" % election.name)
@shared_task
def voter_file_process(voter_file_id):
voter_file = VoterFile.objects.get(id=voter_file_id)
voter_file.process()
election_notify_admin.delay(election_id=voter_file.election.id,
subject='voter file processed',
body="""
Your voter file upload for election %s
has been processed.
%s voters have been created.
--
Helios
""" % (voter_file.election.name, voter_file.num_voters))
@shared_task
def election_notify_admin(election_id, subject, body):
election = Election.objects.get(id=election_id)
election.admin.send_message(subject, body)
| [((378, 415), 'models.CastVote.objects.get', 'CastVote.objects.get', ([], {'id': 'cast_vote_id'}), '(id=cast_vote_id)\n', (398, 415), False, 'from models import CastVote, Election, Voter, VoterFile\n'), ((1297, 1333), 'models.Election.objects.get', 'Election.objects.get', ([], {'id': 'election_id'}), '(id=election_id)\n', (1317, 1333), False, 'from models import CastVote, Election, Voter, VoterFile\n'), ((1816, 1852), 'models.Election.objects.get', 'Election.objects.get', ([], {'id': 'election_id'}), '(id=election_id)\n', (1836, 1852), False, 'from models import CastVote, Election, Voter, VoterFile\n'), ((2088, 2122), 'models.Voter.objects.get', 'Voter.objects.get', ([], {'uuid': 'voter_uuid'}), '(uuid=voter_uuid)\n', (2105, 2122), False, 'from models import CastVote, Election, Voter, VoterFile\n'), ((2139, 2160), 'copy.copy', 'copy.copy', (['extra_vars'], {}), '(extra_vars)\n', (2148, 2160), False, 'import copy\n'), ((2214, 2267), 'view_utils.render_template_raw', 'render_template_raw', (['None', 'subject_template', 'the_vars'], {}), '(None, subject_template, the_vars)\n', (2233, 2267), False, 'from view_utils import render_template_raw\n'), ((2279, 2329), 'view_utils.render_template_raw', 'render_template_raw', (['None', 'body_template', 'the_vars'], {}), '(None, body_template, the_vars)\n', (2298, 2329), False, 'from view_utils import render_template_raw\n'), ((2471, 2505), 'models.Voter.objects.get', 'Voter.objects.get', ([], {'uuid': 'voter_uuid'}), '(uuid=voter_uuid)\n', (2488, 2505), False, 'from models import CastVote, Election, Voter, VoterFile\n'), ((2522, 2543), 'copy.copy', 'copy.copy', (['extra_vars'], {}), '(extra_vars)\n', (2531, 2543), False, 'import copy\n'), ((2602, 2660), 'view_utils.render_template_raw', 'render_template_raw', (['None', 'notification_template', 'the_vars'], {}), '(None, notification_template, the_vars)\n', (2621, 2660), False, 'from view_utils import render_template_raw\n'), ((2775, 2811), 'models.Election.objects.get', 'Election.objects.get', ([], {'id': 'election_id'}), '(id=election_id)\n', (2795, 2811), False, 'from models import CastVote, Election, Voter, VoterFile\n'), ((3263, 3299), 'models.Election.objects.get', 'Election.objects.get', ([], {'id': 'election_id'}), '(id=election_id)\n', (3283, 3299), False, 'from models import CastVote, Election, Voter, VoterFile\n'), ((3660, 3699), 'models.VoterFile.objects.get', 'VoterFile.objects.get', ([], {'id': 'voter_file_id'}), '(id=voter_file_id)\n', (3681, 3699), False, 'from models import CastVote, Election, Voter, VoterFile\n'), ((4140, 4176), 'models.Election.objects.get', 'Election.objects.get', ([], {'id': 'election_id'}), '(id=election_id)\n', (4160, 4176), False, 'from models import CastVote, Election, Voter, VoterFile\n'), ((595, 703), 'signals.vote_cast.send', 'signals.vote_cast.send', ([], {'sender': 'election', 'election': 'election', 'user': 'user', 'voter': 'voter', 'cast_vote': 'cast_vote'}), '(sender=election, election=election, user=user, voter\n =voter, cast_vote=cast_vote)\n', (617, 703), False, 'import signals\n'), ((844, 891), 'celery.utils.log.get_logger', 'get_logger', (['cast_vote_verify_and_store.__name__'], {}), '(cast_vote_verify_and_store.__name__)\n', (854, 891), False, 'from celery.utils.log import get_logger\n')] |
AlanRosenthal/virtual-dealer | tests/conftest.py | 5c5689172b38b122a69e5ca244497646bf9d8fa8 | """
pytest fixtures
"""
import unittest.mock as mock
import pytest
import virtual_dealer.api
@pytest.fixture(name="client")
def fixture_client():
"""
Client test fixture for testing flask APIs
"""
return virtual_dealer.api.app.test_client()
@pytest.fixture(name="store")
def fixture_store():
"""
Mock for store::Store
"""
with mock.patch("virtual_dealer.api.store", autospec=True) as mock_store:
yield mock_store
@pytest.fixture(name="datastore")
def fixture_datastore():
"""
Client test fixture for testing Google's datastore APIs
"""
with mock.patch("virtual_dealer.store.datastore", autospec=True) as mock_datastore:
yield mock_datastore
@pytest.fixture(name="datastore_key")
def fixture_datastore_key():
"""
Datastore Key Mock
"""
return mock.MagicMock()
@pytest.fixture(name="datastore_entity")
def fixture_datastore_entity():
"""
Datastore Entity Mock
"""
return mock.MagicMock()
| [((96, 125), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""client"""'}), "(name='client')\n", (110, 125), False, 'import pytest\n'), ((262, 290), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""store"""'}), "(name='store')\n", (276, 290), False, 'import pytest\n'), ((460, 492), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""datastore"""'}), "(name='datastore')\n", (474, 492), False, 'import pytest\n'), ((714, 750), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""datastore_key"""'}), "(name='datastore_key')\n", (728, 750), False, 'import pytest\n'), ((850, 889), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""datastore_entity"""'}), "(name='datastore_entity')\n", (864, 889), False, 'import pytest\n'), ((830, 846), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (844, 846), True, 'import unittest.mock as mock\n'), ((975, 991), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (989, 991), True, 'import unittest.mock as mock\n'), ((363, 416), 'unittest.mock.patch', 'mock.patch', (['"""virtual_dealer.api.store"""'], {'autospec': '(True)'}), "('virtual_dealer.api.store', autospec=True)\n", (373, 416), True, 'import unittest.mock as mock\n'), ((603, 662), 'unittest.mock.patch', 'mock.patch', (['"""virtual_dealer.store.datastore"""'], {'autospec': '(True)'}), "('virtual_dealer.store.datastore', autospec=True)\n", (613, 662), True, 'import unittest.mock as mock\n')] |
dslowikowski/commcare-hq | corehq/apps/fixtures/tests.py | ad8885cf8dab69dc85cb64f37aeaf06106124797 | from xml.etree import ElementTree
from casexml.apps.case.tests.util import check_xml_line_by_line
from casexml.apps.case.xml import V2
from corehq.apps.fixtures import fixturegenerators
from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType, FixtureOwnership, FixtureTypeField, \
FixtureItemField, FieldList
from corehq.apps.fixtures.views import update_tables
from corehq.apps.fixtures.exceptions import FixtureVersionError
from corehq.apps.users.models import CommCareUser
from django.test import TestCase
class FixtureDataTest(TestCase):
def setUp(self):
self.domain = 'qwerty'
self.tag = "district"
self.data_type = FixtureDataType(
domain=self.domain,
tag=self.tag,
name="Districts",
fields=[
FixtureTypeField(
field_name="state_name",
properties=[]
),
FixtureTypeField(
field_name="district_name",
properties=["lang"]
),
FixtureTypeField(
field_name="district_id",
properties=[]
)
],
item_attributes=[],
)
self.data_type.save()
self.data_item = FixtureDataItem(
domain=self.domain,
data_type_id=self.data_type.get_id,
fields= {
"state_name": FieldList(
field_list=[
FixtureItemField(
field_value="Delhi_state",
properties={}
)
]
),
"district_name": FieldList(
field_list=[
FixtureItemField(
field_value="Delhi_in_HIN",
properties={"lang": "hin"}
),
FixtureItemField(
field_value="Delhi_in_ENG",
properties={"lang": "eng"}
)
]
),
"district_id": FieldList(
field_list=[
FixtureItemField(
field_value="Delhi_id",
properties={}
)
]
)
},
item_attributes={},
)
self.data_item.save()
self.user = CommCareUser.create(self.domain, 'to_delete', '***')
self.fixture_ownership = FixtureOwnership(
domain=self.domain,
owner_id=self.user.get_id,
owner_type='user',
data_item_id=self.data_item.get_id
)
self.fixture_ownership.save()
def tearDown(self):
self.data_type.delete()
self.data_item.delete()
self.user.delete()
self.fixture_ownership.delete()
def test_xml(self):
check_xml_line_by_line(self, """
<district>
<state_name>Delhi_state</state_name>
<district_name lang="hin">Delhi_in_HIN</district_name>
<district_name lang="eng">Delhi_in_ENG</district_name>
<district_id>Delhi_id</district_id>
</district>
""", ElementTree.tostring(self.data_item.to_xml()))
def test_ownership(self):
self.assertItemsEqual([self.data_item.get_id], FixtureDataItem.by_user(self.user, wrap=False))
self.assertItemsEqual([self.user.get_id], self.data_item.get_all_users(wrap=False))
fixture, = fixturegenerators.item_lists(self.user, V2)
check_xml_line_by_line(self, """
<fixture id="item-list:district" user_id="%s">
<district_list>
<district>
<state_name>Delhi_state</state_name>
<district_name lang="hin">Delhi_in_HIN</district_name>
<district_name lang="eng">Delhi_in_ENG</district_name>
<district_id>Delhi_id</district_id>
</district>
</district_list>
</fixture>
""" % self.user.user_id, ElementTree.tostring(fixture))
self.data_item.remove_user(self.user)
self.assertItemsEqual([], self.data_item.get_all_users())
self.fixture_ownership = self.data_item.add_user(self.user)
self.assertItemsEqual([self.user.get_id], self.data_item.get_all_users(wrap=False))
def test_get_indexed_items(self):
with self.assertRaises(FixtureVersionError):
fixtures = FixtureDataItem.get_indexed_items(self.domain,
self.tag, 'state_name')
delhi_id = fixtures['Delhi_state']['district_id']
self.assertEqual(delhi_id, 'Delhi_id')
| [((2589, 2641), 'corehq.apps.users.models.CommCareUser.create', 'CommCareUser.create', (['self.domain', '"""to_delete"""', '"""***"""'], {}), "(self.domain, 'to_delete', '***')\n", (2608, 2641), False, 'from corehq.apps.users.models import CommCareUser\n'), ((2676, 2799), 'corehq.apps.fixtures.models.FixtureOwnership', 'FixtureOwnership', ([], {'domain': 'self.domain', 'owner_id': 'self.user.get_id', 'owner_type': '"""user"""', 'data_item_id': 'self.data_item.get_id'}), "(domain=self.domain, owner_id=self.user.get_id, owner_type=\n 'user', data_item_id=self.data_item.get_id)\n", (2692, 2799), False, 'from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType, FixtureOwnership, FixtureTypeField, FixtureItemField, FieldList\n'), ((3689, 3732), 'corehq.apps.fixtures.fixturegenerators.item_lists', 'fixturegenerators.item_lists', (['self.user', 'V2'], {}), '(self.user, V2)\n', (3717, 3732), False, 'from corehq.apps.fixtures import fixturegenerators\n'), ((3529, 3575), 'corehq.apps.fixtures.models.FixtureDataItem.by_user', 'FixtureDataItem.by_user', (['self.user'], {'wrap': '(False)'}), '(self.user, wrap=False)\n', (3552, 3575), False, 'from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType, FixtureOwnership, FixtureTypeField, FixtureItemField, FieldList\n'), ((4269, 4298), 'xml.etree.ElementTree.tostring', 'ElementTree.tostring', (['fixture'], {}), '(fixture)\n', (4289, 4298), False, 'from xml.etree import ElementTree\n'), ((4689, 4759), 'corehq.apps.fixtures.models.FixtureDataItem.get_indexed_items', 'FixtureDataItem.get_indexed_items', (['self.domain', 'self.tag', '"""state_name"""'], {}), "(self.domain, self.tag, 'state_name')\n", (4722, 4759), False, 'from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType, FixtureOwnership, FixtureTypeField, FixtureItemField, FieldList\n'), ((814, 870), 'corehq.apps.fixtures.models.FixtureTypeField', 'FixtureTypeField', ([], {'field_name': '"""state_name"""', 'properties': '[]'}), "(field_name='state_name', properties=[])\n", (830, 870), False, 'from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType, FixtureOwnership, FixtureTypeField, FixtureItemField, FieldList\n'), ((946, 1011), 'corehq.apps.fixtures.models.FixtureTypeField', 'FixtureTypeField', ([], {'field_name': '"""district_name"""', 'properties': "['lang']"}), "(field_name='district_name', properties=['lang'])\n", (962, 1011), False, 'from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType, FixtureOwnership, FixtureTypeField, FixtureItemField, FieldList\n'), ((1087, 1144), 'corehq.apps.fixtures.models.FixtureTypeField', 'FixtureTypeField', ([], {'field_name': '"""district_id"""', 'properties': '[]'}), "(field_name='district_id', properties=[])\n", (1103, 1144), False, 'from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType, FixtureOwnership, FixtureTypeField, FixtureItemField, FieldList\n'), ((1545, 1603), 'corehq.apps.fixtures.models.FixtureItemField', 'FixtureItemField', ([], {'field_value': '"""Delhi_state"""', 'properties': '{}'}), "(field_value='Delhi_state', properties={})\n", (1561, 1603), False, 'from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType, FixtureOwnership, FixtureTypeField, FixtureItemField, FieldList\n'), ((1828, 1900), 'corehq.apps.fixtures.models.FixtureItemField', 'FixtureItemField', ([], {'field_value': '"""Delhi_in_HIN"""', 'properties': "{'lang': 'hin'}"}), "(field_value='Delhi_in_HIN', properties={'lang': 'hin'})\n", (1844, 1900), False, 'from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType, FixtureOwnership, FixtureTypeField, FixtureItemField, FieldList\n'), ((2008, 2080), 'corehq.apps.fixtures.models.FixtureItemField', 'FixtureItemField', ([], {'field_value': '"""Delhi_in_ENG"""', 'properties': "{'lang': 'eng'}"}), "(field_value='Delhi_in_ENG', properties={'lang': 'eng'})\n", (2024, 2080), False, 'from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType, FixtureOwnership, FixtureTypeField, FixtureItemField, FieldList\n'), ((2303, 2358), 'corehq.apps.fixtures.models.FixtureItemField', 'FixtureItemField', ([], {'field_value': '"""Delhi_id"""', 'properties': '{}'}), "(field_value='Delhi_id', properties={})\n", (2319, 2358), False, 'from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType, FixtureOwnership, FixtureTypeField, FixtureItemField, FieldList\n')] |
agarwalrounak/readthedocs.org | readthedocs/search/signals.py | 4911600c230809bd6fb3585d1903121db2928ad6 | # -*- coding: utf-8 -*-
"""We define custom Django signals to trigger before executing searches."""
from django.db.models.signals import post_save, pre_delete
from django.dispatch import receiver
from django_elasticsearch_dsl.apps import DEDConfig
from readthedocs.projects.models import HTMLFile, Project
from readthedocs.projects.signals import bulk_post_create, bulk_post_delete
from readthedocs.search.tasks import delete_objects_in_es, index_objects_to_es
@receiver(bulk_post_create, sender=HTMLFile)
def index_html_file(instance_list, **_):
"""Handle indexing from the build process."""
from readthedocs.search.documents import PageDocument
kwargs = {
'app_label': HTMLFile._meta.app_label,
'model_name': HTMLFile.__name__,
'document_class': str(PageDocument),
'objects_id': [obj.id for obj in instance_list],
}
# Do not index if autosync is disabled globally
if DEDConfig.autosync_enabled():
index_objects_to_es(**kwargs)
@receiver(bulk_post_delete, sender=HTMLFile)
def remove_html_file(instance_list, **_):
"""Remove deleted files from the build process."""
from readthedocs.search.documents import PageDocument
kwargs = {
'app_label': HTMLFile._meta.app_label,
'model_name': HTMLFile.__name__,
'document_class': str(PageDocument),
'objects_id': [obj.id for obj in instance_list],
}
# Do not index if autosync is disabled globally
if DEDConfig.autosync_enabled():
delete_objects_in_es(**kwargs)
@receiver(post_save, sender=Project)
def index_project_save(instance, *args, **kwargs):
"""
Save a Project instance based on the post_save signal.post_save.
This uses Celery to do it async, replacing how django-elasticsearch-dsl does
it.
"""
from readthedocs.search.documents import ProjectDocument
kwargs = {
'app_label': Project._meta.app_label,
'model_name': Project.__name__,
'document_class': str(ProjectDocument),
'objects_id': [instance.id],
}
# Do not index if autosync is disabled globally
if DEDConfig.autosync_enabled():
index_objects_to_es.delay(**kwargs)
@receiver(pre_delete, sender=Project)
def remove_project_delete(instance, *args, **kwargs):
from readthedocs.search.documents import ProjectDocument
kwargs = {
'app_label': Project._meta.app_label,
'model_name': Project.__name__,
'document_class': str(ProjectDocument),
'objects_id': [instance.id],
}
# Don't `delay` this because the objects will be deleted already
if DEDConfig.autosync_enabled():
delete_objects_in_es(**kwargs)
| [((466, 509), 'django.dispatch.receiver', 'receiver', (['bulk_post_create'], {'sender': 'HTMLFile'}), '(bulk_post_create, sender=HTMLFile)\n', (474, 509), False, 'from django.dispatch import receiver\n'), ((1001, 1044), 'django.dispatch.receiver', 'receiver', (['bulk_post_delete'], {'sender': 'HTMLFile'}), '(bulk_post_delete, sender=HTMLFile)\n', (1009, 1044), False, 'from django.dispatch import receiver\n'), ((1543, 1578), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'Project'}), '(post_save, sender=Project)\n', (1551, 1578), False, 'from django.dispatch import receiver\n'), ((2195, 2231), 'django.dispatch.receiver', 'receiver', (['pre_delete'], {'sender': 'Project'}), '(pre_delete, sender=Project)\n', (2203, 2231), False, 'from django.dispatch import receiver\n'), ((930, 958), 'django_elasticsearch_dsl.apps.DEDConfig.autosync_enabled', 'DEDConfig.autosync_enabled', ([], {}), '()\n', (956, 958), False, 'from django_elasticsearch_dsl.apps import DEDConfig\n'), ((1471, 1499), 'django_elasticsearch_dsl.apps.DEDConfig.autosync_enabled', 'DEDConfig.autosync_enabled', ([], {}), '()\n', (1497, 1499), False, 'from django_elasticsearch_dsl.apps import DEDConfig\n'), ((2118, 2146), 'django_elasticsearch_dsl.apps.DEDConfig.autosync_enabled', 'DEDConfig.autosync_enabled', ([], {}), '()\n', (2144, 2146), False, 'from django_elasticsearch_dsl.apps import DEDConfig\n'), ((2616, 2644), 'django_elasticsearch_dsl.apps.DEDConfig.autosync_enabled', 'DEDConfig.autosync_enabled', ([], {}), '()\n', (2642, 2644), False, 'from django_elasticsearch_dsl.apps import DEDConfig\n'), ((968, 997), 'readthedocs.search.tasks.index_objects_to_es', 'index_objects_to_es', ([], {}), '(**kwargs)\n', (987, 997), False, 'from readthedocs.search.tasks import delete_objects_in_es, index_objects_to_es\n'), ((1509, 1539), 'readthedocs.search.tasks.delete_objects_in_es', 'delete_objects_in_es', ([], {}), '(**kwargs)\n', (1529, 1539), False, 'from readthedocs.search.tasks import delete_objects_in_es, index_objects_to_es\n'), ((2156, 2191), 'readthedocs.search.tasks.index_objects_to_es.delay', 'index_objects_to_es.delay', ([], {}), '(**kwargs)\n', (2181, 2191), False, 'from readthedocs.search.tasks import delete_objects_in_es, index_objects_to_es\n'), ((2654, 2684), 'readthedocs.search.tasks.delete_objects_in_es', 'delete_objects_in_es', ([], {}), '(**kwargs)\n', (2674, 2684), False, 'from readthedocs.search.tasks import delete_objects_in_es, index_objects_to_es\n')] |
kra-ts/falconpy | src/falconpy/_endpoint/_filevantage.py | c7c4ed93cb3b56cdfd86757f573fde57e4ccf857 | """Internal API endpoint constant library.
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
_filevantage_endpoints = [
[
"getChanges",
"GET",
"/filevantage/entities/changes/v2",
"Retrieve information on changes",
"filevantage",
[
{
"type": "array",
"items": {
"type": "string"
},
"collectionFormat": "multi",
"description": "Comma separated values of change ids",
"name": "ids",
"in": "query",
"required": True
}
]
],
[
"queryChanges",
"GET",
"/filevantage/queries/changes/v2",
"Returns one or more change IDs",
"filevantage",
[
{
"minimum": 0,
"type": "integer",
"description": "The first change index to return in the response. "
"If not provided it will default to '0'. "
"Use with the `limit` parameter to manage pagination of results.",
"name": "offset",
"in": "query"
},
{
"type": "integer",
"description": "The maximum number of changes to return in the response "
"(default: 100; max: 500). "
"Use with the `offset` parameter to manage pagination of results",
"name": "limit",
"in": "query"
},
{
"type": "string",
"description": "Sort changes using options like:\n\n"
"- `action_timestamp` (timestamp of the change occurrence) \n\n "
"Sort either `asc` (ascending) or `desc` (descending). "
"For example: `action_timestamp|asc`.\n"
"The full list of allowed sorting options can be reviewed in our API documentation.",
"name": "sort",
"in": "query"
},
{
"type": "string",
"description": "Filter changes using a query in Falcon Query Language (FQL). \n\n"
"Common filter options include:\n\n - `host.host_name`\n - `action_timestamp`\n\n "
"The full list of allowed filter parameters can be reviewed in our API documentation.",
"name": "filter",
"in": "query"
}
]
]
]
| [] |
JE-Chen/je_old_repo | TimeWrapper_JE/venv/Lib/site-packages/pip/_internal/cli/progress_bars.py | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | import itertools
import sys
from signal import SIGINT, default_int_handler, signal
from typing import Any, Dict, List
from pip._vendor.progress.bar import Bar, FillingCirclesBar, IncrementalBar
from pip._vendor.progress.spinner import Spinner
from pip._internal.utils.compat import WINDOWS
from pip._internal.utils.logging import get_indentation
from pip._internal.utils.misc import format_size
try:
from pip._vendor import colorama
# Lots of different errors can come from this, including SystemError and
# ImportError.
except Exception:
colorama = None
def _select_progress_class(preferred, fallback):
# type: (Bar, Bar) -> Bar
encoding = getattr(preferred.file, "encoding", None)
# If we don't know what encoding this file is in, then we'll just assume
# that it doesn't support unicode and use the ASCII bar.
if not encoding:
return fallback
# Collect all of the possible characters we want to use with the preferred
# bar.
characters = [
getattr(preferred, "empty_fill", ""),
getattr(preferred, "fill", ""),
]
characters += list(getattr(preferred, "phases", []))
# Try to decode the characters we're using for the bar using the encoding
# of the given file, if this works then we'll assume that we can use the
# fancier bar and if not we'll fall back to the plaintext bar.
try:
"".join(characters).encode(encoding)
except UnicodeEncodeError:
return fallback
else:
return preferred
_BaseBar = _select_progress_class(IncrementalBar, Bar) # type: Any
class InterruptibleMixin:
"""
Helper to ensure that self.finish() gets called on keyboard interrupt.
This allows downloads to be interrupted without leaving temporary state
(like hidden cursors) behind.
This class is similar to the progress library's existing SigIntMixin
helper, but as of version 1.2, that helper has the following problems:
1. It calls sys.exit().
2. It discards the existing SIGINT handler completely.
3. It leaves its own handler in place even after an uninterrupted finish,
which will have unexpected delayed effects if the user triggers an
unrelated keyboard interrupt some time after a progress-displaying
download has already completed, for example.
"""
def __init__(self, *args, **kwargs):
# type: (List[Any], Dict[Any, Any]) -> None
"""
Save the original SIGINT handler for later.
"""
# https://github.com/python/mypy/issues/5887
super().__init__(*args, **kwargs) # type: ignore
self.original_handler = signal(SIGINT, self.handle_sigint)
# If signal() returns None, the previous handler was not installed from
# Python, and we cannot restore it. This probably should not happen,
# but if it does, we must restore something sensible instead, at least.
# The least bad option should be Python's default SIGINT handler, which
# just raises KeyboardInterrupt.
if self.original_handler is None:
self.original_handler = default_int_handler
def finish(self):
# type: () -> None
"""
Restore the original SIGINT handler after finishing.
This should happen regardless of whether the progress display finishes
normally, or gets interrupted.
"""
super().finish() # type: ignore
signal(SIGINT, self.original_handler)
def handle_sigint(self, signum, frame): # type: ignore
"""
Call self.finish() before delegating to the original SIGINT handler.
This handler should only be in place while the progress display is
active.
"""
self.finish()
self.original_handler(signum, frame)
class SilentBar(Bar):
def update(self):
# type: () -> None
pass
class BlueEmojiBar(IncrementalBar):
suffix = "%(percent)d%%"
bar_prefix = " "
bar_suffix = " "
phases = ("\U0001F539", "\U0001F537", "\U0001F535")
class DownloadProgressMixin:
def __init__(self, *args, **kwargs):
# type: (List[Any], Dict[Any, Any]) -> None
# https://github.com/python/mypy/issues/5887
super().__init__(*args, **kwargs) # type: ignore
self.message = (" " * (get_indentation() + 2)) + self.message # type: str
@property
def downloaded(self):
# type: () -> str
return format_size(self.index) # type: ignore
@property
def download_speed(self):
# type: () -> str
# Avoid zero division errors...
if self.avg == 0.0: # type: ignore
return "..."
return format_size(1 / self.avg) + "/s" # type: ignore
@property
def pretty_eta(self):
# type: () -> str
if self.eta: # type: ignore
return f"eta {self.eta_td}" # type: ignore
return ""
def iter(self, it): # type: ignore
for x in it:
yield x
# B305 is incorrectly raised here
# https://github.com/PyCQA/flake8-bugbear/issues/59
self.next(len(x)) # noqa: B305
self.finish()
class WindowsMixin:
def __init__(self, *args, **kwargs):
# type: (List[Any], Dict[Any, Any]) -> None
# The Windows terminal does not support the hide/show cursor ANSI codes
# even with colorama. So we'll ensure that hide_cursor is False on
# Windows.
# This call needs to go before the super() call, so that hide_cursor
# is set in time. The base progress bar class writes the "hide cursor"
# code to the terminal in its init, so if we don't set this soon
# enough, we get a "hide" with no corresponding "show"...
if WINDOWS and self.hide_cursor: # type: ignore
self.hide_cursor = False
# https://github.com/python/mypy/issues/5887
super().__init__(*args, **kwargs) # type: ignore
# Check if we are running on Windows and we have the colorama module,
# if we do then wrap our file with it.
if WINDOWS and colorama:
self.file = colorama.AnsiToWin32(self.file) # type: ignore
# The progress code expects to be able to call self.file.isatty()
# but the colorama.AnsiToWin32() object doesn't have that, so we'll
# add it.
self.file.isatty = lambda: self.file.wrapped.isatty()
# The progress code expects to be able to call self.file.flush()
# but the colorama.AnsiToWin32() object doesn't have that, so we'll
# add it.
self.file.flush = lambda: self.file.wrapped.flush()
class BaseDownloadProgressBar(WindowsMixin, InterruptibleMixin, DownloadProgressMixin):
file = sys.stdout
message = "%(percent)d%%"
suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s"
class DefaultDownloadProgressBar(BaseDownloadProgressBar, _BaseBar):
pass
class DownloadSilentBar(BaseDownloadProgressBar, SilentBar):
pass
class DownloadBar(BaseDownloadProgressBar, Bar):
pass
class DownloadFillingCirclesBar(BaseDownloadProgressBar, FillingCirclesBar):
pass
class DownloadBlueEmojiProgressBar(BaseDownloadProgressBar, BlueEmojiBar):
pass
class DownloadProgressSpinner(
WindowsMixin, InterruptibleMixin, DownloadProgressMixin, Spinner
):
file = sys.stdout
suffix = "%(downloaded)s %(download_speed)s"
def next_phase(self):
# type: () -> str
if not hasattr(self, "_phaser"):
self._phaser = itertools.cycle(self.phases)
return next(self._phaser)
def update(self):
# type: () -> None
message = self.message % self
phase = self.next_phase()
suffix = self.suffix % self
line = "".join(
[
message,
" " if message else "",
phase,
" " if suffix else "",
suffix,
]
)
self.writeln(line)
BAR_TYPES = {
"off": (DownloadSilentBar, DownloadSilentBar),
"on": (DefaultDownloadProgressBar, DownloadProgressSpinner),
"ascii": (DownloadBar, DownloadProgressSpinner),
"pretty": (DownloadFillingCirclesBar, DownloadProgressSpinner),
"emoji": (DownloadBlueEmojiProgressBar, DownloadProgressSpinner),
}
def DownloadProgressProvider(progress_bar, max=None): # type: ignore
if max is None or max == 0:
return BAR_TYPES[progress_bar][1]().iter
else:
return BAR_TYPES[progress_bar][0](max=max).iter
| [((2720, 2754), 'signal.signal', 'signal', (['SIGINT', 'self.handle_sigint'], {}), '(SIGINT, self.handle_sigint)\n', (2726, 2754), False, 'from signal import SIGINT, default_int_handler, signal\n'), ((3534, 3571), 'signal.signal', 'signal', (['SIGINT', 'self.original_handler'], {}), '(SIGINT, self.original_handler)\n', (3540, 3571), False, 'from signal import SIGINT, default_int_handler, signal\n'), ((4582, 4605), 'pip._internal.utils.misc.format_size', 'format_size', (['self.index'], {}), '(self.index)\n', (4593, 4605), False, 'from pip._internal.utils.misc import format_size\n'), ((4825, 4850), 'pip._internal.utils.misc.format_size', 'format_size', (['(1 / self.avg)'], {}), '(1 / self.avg)\n', (4836, 4850), False, 'from pip._internal.utils.misc import format_size\n'), ((6320, 6351), 'pip._vendor.colorama.AnsiToWin32', 'colorama.AnsiToWin32', (['self.file'], {}), '(self.file)\n', (6340, 6351), False, 'from pip._vendor import colorama\n'), ((7796, 7824), 'itertools.cycle', 'itertools.cycle', (['self.phases'], {}), '(self.phases)\n', (7811, 7824), False, 'import itertools\n'), ((4443, 4460), 'pip._internal.utils.logging.get_indentation', 'get_indentation', ([], {}), '()\n', (4458, 4460), False, 'from pip._internal.utils.logging import get_indentation\n')] |
kzbnb/numerical_bugs | scripts/study_case/ID_5/matchzoo/auto/tuner/tune.py | bc22e72bcc06df6ce7889a25e0aeed027bde910b | import typing
import numpy as np
import scripts.study_case.ID_5.matchzoo as mz
from scripts.study_case.ID_5.matchzoo.engine.base_metric import BaseMetric
from .tuner import Tuner
def tune(
params: 'mz.ParamTable',
optimizer: str = 'adam',
trainloader: mz.dataloader.DataLoader = None,
validloader: mz.dataloader.DataLoader = None,
embedding: np.ndarray = None,
fit_kwargs: dict = None,
metric: typing.Union[str, BaseMetric] = None,
mode: str = 'maximize',
num_runs: int = 10,
verbose=1
):
"""
Tune model hyper-parameters.
A simple shorthand for using :class:`matchzoo.auto.Tuner`.
`model.params.hyper_space` reprensents the model's hyper-parameters
search space, which is the cross-product of individual hyper parameter's
hyper space. When a `Tuner` builds a model, for each hyper parameter in
`model.params`, if the hyper-parameter has a hyper-space, then a sample
will be taken in the space. However, if the hyper-parameter does not
have a hyper-space, then the default value of the hyper-parameter will
be used.
See `tutorials/model_tuning.ipynb` for a detailed walkthrough on usage.
:param params: A completed parameter table to tune. Usually `model.params`
of the desired model to tune. `params.completed()` should be `True`.
:param optimizer: Str or `Optimizer` class. Optimizer for optimizing model.
:param trainloader: Training data to use. Should be a `DataLoader`.
:param validloader: Testing data to use. Should be a `DataLoader`.
:param embedding: Embedding used by model.
:param fit_kwargs: Extra keyword arguments to pass to `fit`.
(default: `dict(epochs=10, verbose=0)`)
:param metric: Metric to tune upon. Must be one of the metrics in
`model.params['task'].metrics`. (default: the first metric in
`params.['task'].metrics`.
:param mode: Either `maximize` the metric or `minimize` the metric.
(default: 'maximize')
:param num_runs: Number of runs. Each run takes a sample in
`params.hyper_space` and build a model based on the sample.
(default: 10)
:param callbacks: A list of callbacks to handle. Handled sequentially
at every callback point.
:param verbose: Verbosity. (default: 1)
Example:
>>> import scripts.study_case.ID_5.matchzoo as mz
>>> import numpy as np
>>> train = mz.datasets.toy.load_data('train')
>>> valid = mz.datasets.toy.load_data('dev')
>>> prpr = mz.models.DenseBaseline.get_default_preprocessor()
>>> train = prpr.fit_transform(train, verbose=0)
>>> valid = prpr.transform(valid, verbose=0)
>>> trainset = mz.dataloader.Dataset(train)
>>> validset = mz.dataloader.Dataset(valid)
>>> padding = mz.models.DenseBaseline.get_default_padding_callback()
>>> trainloader = mz.dataloader.DataLoader(trainset, callback=padding)
>>> validloader = mz.dataloader.DataLoader(validset, callback=padding)
>>> model = mz.models.DenseBaseline()
>>> model.params['task'] = mz.tasks.Ranking()
>>> optimizer = 'adam'
>>> embedding = np.random.uniform(-0.2, 0.2,
... (prpr.context['vocab_size'], 100))
>>> tuner = mz.auto.Tuner(
... params=model.params,
... optimizer=optimizer,
... trainloader=trainloader,
... validloader=validloader,
... embedding=embedding,
... num_runs=1,
... verbose=0
... )
>>> results = tuner.tune()
>>> sorted(results['best'].keys())
['#', 'params', 'sample', 'score']
"""
tuner = Tuner(
params=params,
optimizer=optimizer,
trainloader=trainloader,
validloader=validloader,
embedding=embedding,
fit_kwargs=fit_kwargs,
metric=metric,
mode=mode,
num_runs=num_runs,
verbose=verbose
)
return tuner.tune()
| [] |
maxgold/icml22 | libs/gym/tests/wrappers/test_pixel_observation.py | 49f026dd2314091639b52f5b8364a29e8000b738 | """Tests for the pixel observation wrapper."""
from typing import Optional
import pytest
import numpy as np
import gym
from gym import spaces
from gym.wrappers.pixel_observation import PixelObservationWrapper, STATE_KEY
class FakeEnvironment(gym.Env):
def __init__(self):
self.action_space = spaces.Box(shape=(1,), low=-1, high=1, dtype=np.float32)
def render(self, width=32, height=32, *args, **kwargs):
del args
del kwargs
image_shape = (height, width, 3)
return np.zeros(image_shape, dtype=np.uint8)
def reset(self, seed: Optional[int] = None):
super().reset(seed=seed)
observation = self.observation_space.sample()
return observation
def step(self, action):
del action
observation = self.observation_space.sample()
reward, terminal, info = 0.0, False, {}
return observation, reward, terminal, info
class FakeArrayObservationEnvironment(FakeEnvironment):
def __init__(self, *args, **kwargs):
self.observation_space = spaces.Box(
shape=(2,), low=-1, high=1, dtype=np.float32
)
super(FakeArrayObservationEnvironment, self).__init__(*args, **kwargs)
class FakeDictObservationEnvironment(FakeEnvironment):
def __init__(self, *args, **kwargs):
self.observation_space = spaces.Dict(
{
"state": spaces.Box(shape=(2,), low=-1, high=1, dtype=np.float32),
}
)
super(FakeDictObservationEnvironment, self).__init__(*args, **kwargs)
class TestPixelObservationWrapper(object):
@pytest.mark.parametrize("pixels_only", (True, False))
def test_dict_observation(self, pixels_only):
pixel_key = "rgb"
env = FakeDictObservationEnvironment()
# Make sure we are testing the right environment for the test.
observation_space = env.observation_space
assert isinstance(observation_space, spaces.Dict)
width, height = (320, 240)
# The wrapper should only add one observation.
wrapped_env = PixelObservationWrapper(
env,
pixel_keys=(pixel_key,),
pixels_only=pixels_only,
render_kwargs={pixel_key: {"width": width, "height": height}},
)
assert isinstance(wrapped_env.observation_space, spaces.Dict)
if pixels_only:
assert len(wrapped_env.observation_space.spaces) == 1
assert list(wrapped_env.observation_space.spaces.keys()) == [pixel_key]
else:
assert (
len(wrapped_env.observation_space.spaces)
== len(observation_space.spaces) + 1
)
expected_keys = list(observation_space.spaces.keys()) + [pixel_key]
assert list(wrapped_env.observation_space.spaces.keys()) == expected_keys
# Check that the added space item is consistent with the added observation.
observation = wrapped_env.reset()
rgb_observation = observation[pixel_key]
assert rgb_observation.shape == (height, width, 3)
assert rgb_observation.dtype == np.uint8
@pytest.mark.parametrize("pixels_only", (True, False))
def test_single_array_observation(self, pixels_only):
pixel_key = "depth"
env = FakeArrayObservationEnvironment()
observation_space = env.observation_space
assert isinstance(observation_space, spaces.Box)
wrapped_env = PixelObservationWrapper(
env, pixel_keys=(pixel_key,), pixels_only=pixels_only
)
wrapped_env.observation_space = wrapped_env.observation_space
assert isinstance(wrapped_env.observation_space, spaces.Dict)
if pixels_only:
assert len(wrapped_env.observation_space.spaces) == 1
assert list(wrapped_env.observation_space.spaces.keys()) == [pixel_key]
else:
assert len(wrapped_env.observation_space.spaces) == 2
assert list(wrapped_env.observation_space.spaces.keys()) == [
STATE_KEY,
pixel_key,
]
observation = wrapped_env.reset()
depth_observation = observation[pixel_key]
assert depth_observation.shape == (32, 32, 3)
assert depth_observation.dtype == np.uint8
if not pixels_only:
assert isinstance(observation[STATE_KEY], np.ndarray)
| [((1604, 1657), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pixels_only"""', '(True, False)'], {}), "('pixels_only', (True, False))\n", (1627, 1657), False, 'import pytest\n'), ((3140, 3193), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pixels_only"""', '(True, False)'], {}), "('pixels_only', (True, False))\n", (3163, 3193), False, 'import pytest\n'), ((308, 364), 'gym.spaces.Box', 'spaces.Box', ([], {'shape': '(1,)', 'low': '(-1)', 'high': '(1)', 'dtype': 'np.float32'}), '(shape=(1,), low=-1, high=1, dtype=np.float32)\n', (318, 364), False, 'from gym import spaces\n'), ((518, 555), 'numpy.zeros', 'np.zeros', (['image_shape'], {'dtype': 'np.uint8'}), '(image_shape, dtype=np.uint8)\n', (526, 555), True, 'import numpy as np\n'), ((1053, 1109), 'gym.spaces.Box', 'spaces.Box', ([], {'shape': '(2,)', 'low': '(-1)', 'high': '(1)', 'dtype': 'np.float32'}), '(shape=(2,), low=-1, high=1, dtype=np.float32)\n', (1063, 1109), False, 'from gym import spaces\n'), ((2076, 2222), 'gym.wrappers.pixel_observation.PixelObservationWrapper', 'PixelObservationWrapper', (['env'], {'pixel_keys': '(pixel_key,)', 'pixels_only': 'pixels_only', 'render_kwargs': "{pixel_key: {'width': width, 'height': height}}"}), "(env, pixel_keys=(pixel_key,), pixels_only=\n pixels_only, render_kwargs={pixel_key: {'width': width, 'height': height}})\n", (2099, 2222), False, 'from gym.wrappers.pixel_observation import PixelObservationWrapper, STATE_KEY\n'), ((3459, 3537), 'gym.wrappers.pixel_observation.PixelObservationWrapper', 'PixelObservationWrapper', (['env'], {'pixel_keys': '(pixel_key,)', 'pixels_only': 'pixels_only'}), '(env, pixel_keys=(pixel_key,), pixels_only=pixels_only)\n', (3482, 3537), False, 'from gym.wrappers.pixel_observation import PixelObservationWrapper, STATE_KEY\n'), ((1394, 1450), 'gym.spaces.Box', 'spaces.Box', ([], {'shape': '(2,)', 'low': '(-1)', 'high': '(1)', 'dtype': 'np.float32'}), '(shape=(2,), low=-1, high=1, dtype=np.float32)\n', (1404, 1450), False, 'from gym import spaces\n')] |
MuAuan/Scipy-Swan | real_plot_fft_stft_impl.py | 2d79175e8fc2ab8179ea95e1b22918c29d88b7b5 | import pyaudio
import wave
from scipy.fftpack import fft, ifft
import numpy as np
import matplotlib.pyplot as plt
import cv2
from scipy import signal
from swan import pycwt
CHUNK = 1024
FORMAT = pyaudio.paInt16 # int16型
CHANNELS = 1 # 1;monoral 2;ステレオ-
RATE = 22100 # 22.1kHz 44.1kHz
RECORD_SECONDS = 5 # 5秒録音
WAVE_OUTPUT_FILENAME = "output2.wav"
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
s=1
# figureの初期化
fig = plt.figure(figsize=(12, 10))
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
ax2.axis([0, 5, 200,20000])
ax2.set_yscale('log')
while True:
fig.delaxes(ax1)
fig.delaxes(ax3)
ax1 = fig.add_subplot(311)
ax3 = fig.add_subplot(313)
print("* recording")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("* done recording")
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
wavfile = WAVE_OUTPUT_FILENAME
wr = wave.open(wavfile, "rb")
ch = CHANNELS #wr.getnchannels()
width = p.get_sample_size(FORMAT) #wr.getsampwidth()
fr = RATE #wr.getframerate()
fn = wr.getnframes()
fs = fn / fr
origin = wr.readframes(wr.getnframes())
data = origin[:fn]
wr.close()
sig = np.frombuffer(data, dtype="int16") /32768.0
t = np.linspace(0,fs, fn/2, endpoint=False)
ax1.axis([0, 5, -0.0075,0.0075])
ax1.plot(t, sig)
nperseg = 256
f, t, Zxx = signal.stft(sig, fs=fs*fn/50, nperseg=nperseg)
ax2.pcolormesh(t, 5*f, np.abs(Zxx), cmap='hsv')
freq =fft(sig,int(fn/2))
Pyy = np.sqrt(freq*freq.conj())*2/fn
f = np.arange(int(fn/2))
ax3.axis([200, 20000, 0,0.000075])
ax3.set_xscale('log')
ax3.plot(f,Pyy)
plt.pause(1)
plt.savefig('figure'+str(s)+'.png')
s += 1
| [((383, 400), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (398, 400), False, 'import pyaudio\n'), ((572, 600), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 10)'}), '(figsize=(12, 10))\n', (582, 600), True, 'import matplotlib.pyplot as plt\n'), ((1057, 1094), 'wave.open', 'wave.open', (['WAVE_OUTPUT_FILENAME', '"""wb"""'], {}), "(WAVE_OUTPUT_FILENAME, 'wb')\n", (1066, 1094), False, 'import wave\n'), ((1295, 1319), 'wave.open', 'wave.open', (['wavfile', '"""rb"""'], {}), "(wavfile, 'rb')\n", (1304, 1319), False, 'import wave\n'), ((1641, 1683), 'numpy.linspace', 'np.linspace', (['(0)', 'fs', '(fn / 2)'], {'endpoint': '(False)'}), '(0, fs, fn / 2, endpoint=False)\n', (1652, 1683), True, 'import numpy as np\n'), ((1779, 1829), 'scipy.signal.stft', 'signal.stft', (['sig'], {'fs': '(fs * fn / 50)', 'nperseg': 'nperseg'}), '(sig, fs=fs * fn / 50, nperseg=nperseg)\n', (1790, 1829), False, 'from scipy import signal\n'), ((2071, 2083), 'matplotlib.pyplot.pause', 'plt.pause', (['(1)'], {}), '(1)\n', (2080, 2083), True, 'import matplotlib.pyplot as plt\n'), ((1588, 1622), 'numpy.frombuffer', 'np.frombuffer', (['data'], {'dtype': '"""int16"""'}), "(data, dtype='int16')\n", (1601, 1622), True, 'import numpy as np\n'), ((1853, 1864), 'numpy.abs', 'np.abs', (['Zxx'], {}), '(Zxx)\n', (1859, 1864), True, 'import numpy as np\n')] |
gengxf0505/pxt | tests/pydecompile-test/baselines/events_in_code_blocks.py | eca93a0e0605e68adcfbebce778cc5912a10efcf | #/ <reference path="./testBlocks/mb.ts" />
def function_0():
basic.showNumber(7)
basic.forever(function_0) | [] |
l756302098/ros_practice | PID/PDControl.py | 4da8b4ddb25ada2e6f1adb3c0f8b34576aedf6b7 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import random
import numpy as np
import matplotlib.pyplot as plt
class Robot(object):
def __init__(self, length=20.0):
"""
Creates robotand initializes location/orientation to 0, 0, 0.
"""
self.x = 0.0
self.y = 0.0
self.orientation = 0.0
self.length =length
self.steering_noise = 0.0
self.distance_noise = 0.0
self.steering_drift = 0.0
def set(self, x,y, orientation):
"""
Sets a robotcoordinate.
"""
self.x = x
self.y = y
self.orientation = orientation % (2.0 * np.pi)
def set_noise(self, steering_noise, distance_noise):
"""
Sets thenoise parameters.
"""
# makes itpossible to change the noise parameters
# this isoften useful in particle filters
self.steering_noise = steering_noise
self.distance_noise = distance_noise
def set_steering_drift(self, drift):
"""
Sets thesystematical steering drift parameter
"""
self.steering_drift = drift
def move(self,steering, distance, tolerance=0.001, max_steering_angle=np.pi / 4.0):
"""
steering =front wheel steering angle, limited by max_steering_angle
distance =total distance driven, most be non-negative
"""
if steering> max_steering_angle:
steering= max_steering_angle
if steering <-max_steering_angle:
steering= -max_steering_angle
if distance< 0.0:
distance= 0.0
# apply noise
steering2 =random.gauss(steering, self.steering_noise)
distance2 =random.gauss(distance, self.distance_noise)
# applysteering drift
steering2 +=self.steering_drift
# Execute motion
turn =np.tan(steering2) * distance2 / self.length
if abs(turn)< tolerance:
#approximate by straight line motion
self.x +=distance2 * np.cos(self.orientation)
self.y +=distance2 * np.sin(self.orientation)
self.orientation = (self.orientation + turn) % (2.0 * np.pi)
else:
#approximate bicycle model for motion
radius =distance2 / turn
cx =self.x - (np.sin(self.orientation) * radius)
cy =self.y + (np.cos(self.orientation) * radius)
self.orientation = (self.orientation + turn) % (2.0 * np.pi)
self.x =cx + (np.sin(self.orientation) * radius)
self.y =cy - (np.cos(self.orientation) * radius)
def __repr__(self):
return'[x=%.5f y=%.5f orient=%.5f]' % (self.x, self.y, self.orientation)
def run_p(robot, tau, n=100, speed=1.0):
x_trajectory = []
y_trajectory = []
for i in range(n):
cte = robot.y
steer = -tau* cte
robot.move(steer, speed)
x_trajectory.append(robot.x)
y_trajectory.append(robot.y)
return x_trajectory, y_trajectory
robot = Robot()
robot.set(0, 1, 0)
robot.set_noise(0.1,0.05)
def run(robot, tau_p, tau_d, n=100, speed=1.0):
x_trajectory = []
y_trajectory = []
#steering =-tau_p * CTE - tau_d * diff_CTE
crosstrack_error= []
crosstrack_error.append(0.0)
diff_CTE = 0.0
startX = robot.x
startY = robot.y
startOrientation= robot.orientation
distance = 0.0
for i in range(n):
steering =-tau_p * crosstrack_error[i] - tau_d * diff_CTE
distance =speed
robot.move(steering, distance)
x_trajectory.append(robot.x)
y_trajectory.append(robot.y)
# when in theoriginal path, x=robot.x ,caculate y.
x1 = robot.x
y1 = startY +(x1 - startX) * np.tan(startOrientation)
crosstrack =(robot.y - y1) * np.cos(startOrientation)
crosstrack_error.append(crosstrack)
diff_CTE =crosstrack_error[i+1] - crosstrack_error[i]
print("{} [{}, {}] {}, {}".format(i,robot.x, robot.y,steering, crosstrack))
return x_trajectory, y_trajectory
x_trajectory, y_trajectory = run(robot, 0.1, 1.0)
n = len(x_trajectory)
fig, ax1 = plt.subplots(1, 1, figsize=(8, 8))
ax1.plot(x_trajectory, y_trajectory, 'g', label='PDcontroller')
ax1.plot(x_trajectory, np.zeros(n), 'r', label='reference')
plt.show()
| [((3974, 4008), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 8)'}), '(1, 1, figsize=(8, 8))\n', (3986, 4008), True, 'import matplotlib.pyplot as plt\n'), ((4136, 4146), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4144, 4146), True, 'import matplotlib.pyplot as plt\n'), ((4098, 4109), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (4106, 4109), True, 'import numpy as np\n'), ((1552, 1595), 'random.gauss', 'random.gauss', (['steering', 'self.steering_noise'], {}), '(steering, self.steering_noise)\n', (1564, 1595), False, 'import random\n'), ((1613, 1656), 'random.gauss', 'random.gauss', (['distance', 'self.distance_noise'], {}), '(distance, self.distance_noise)\n', (1625, 1656), False, 'import random\n'), ((3626, 3650), 'numpy.cos', 'np.cos', (['startOrientation'], {}), '(startOrientation)\n', (3632, 3650), True, 'import numpy as np\n'), ((1755, 1772), 'numpy.tan', 'np.tan', (['steering2'], {}), '(steering2)\n', (1761, 1772), True, 'import numpy as np\n'), ((1906, 1930), 'numpy.cos', 'np.cos', (['self.orientation'], {}), '(self.orientation)\n', (1912, 1930), True, 'import numpy as np\n'), ((1960, 1984), 'numpy.sin', 'np.sin', (['self.orientation'], {}), '(self.orientation)\n', (1966, 1984), True, 'import numpy as np\n'), ((3566, 3590), 'numpy.tan', 'np.tan', (['startOrientation'], {}), '(startOrientation)\n', (3572, 3590), True, 'import numpy as np\n'), ((2167, 2191), 'numpy.sin', 'np.sin', (['self.orientation'], {}), '(self.orientation)\n', (2173, 2191), True, 'import numpy as np\n'), ((2224, 2248), 'numpy.cos', 'np.cos', (['self.orientation'], {}), '(self.orientation)\n', (2230, 2248), True, 'import numpy as np\n'), ((2350, 2374), 'numpy.sin', 'np.sin', (['self.orientation'], {}), '(self.orientation)\n', (2356, 2374), True, 'import numpy as np\n'), ((2407, 2431), 'numpy.cos', 'np.cos', (['self.orientation'], {}), '(self.orientation)\n', (2413, 2431), True, 'import numpy as np\n')] |
yoshitomo-matsubara/vision | torchvision/datasets/samplers/__init__.py | 03d11338f3faf94a0749549912593ddb8b70be17 | from .clip_sampler import DistributedSampler, UniformClipSampler, RandomClipSampler
__all__ = ("DistributedSampler", "UniformClipSampler", "RandomClipSampler")
| [] |
agustinhenze/mibs.snmplabs.com | pysnmp/HH3C-PPPOE-SERVER-MIB.py | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | #
# PySNMP MIB module HH3C-PPPOE-SERVER-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HH3C-PPPOE-SERVER-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:16:17 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
hh3cCommon, = mibBuilder.importSymbols("HH3C-OID-MIB", "hh3cCommon")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ObjectIdentity, Integer32, IpAddress, NotificationType, Unsigned32, iso, MibIdentifier, Counter64, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, ModuleIdentity, Bits, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Integer32", "IpAddress", "NotificationType", "Unsigned32", "iso", "MibIdentifier", "Counter64", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "ModuleIdentity", "Bits", "TimeTicks")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
hh3cPPPoEServer = ModuleIdentity((1, 3, 6, 1, 4, 1, 25506, 2, 102))
hh3cPPPoEServer.setRevisions(('2009-05-06 00:00',))
if mibBuilder.loadTexts: hh3cPPPoEServer.setLastUpdated('200905060000Z')
if mibBuilder.loadTexts: hh3cPPPoEServer.setOrganization('Hangzhou H3C Technologies Co., Ltd.')
hh3cPPPoEServerObject = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1))
hh3cPPPoEServerMaxSessions = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cPPPoEServerMaxSessions.setStatus('current')
hh3cPPPoEServerCurrSessions = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cPPPoEServerCurrSessions.setStatus('current')
hh3cPPPoEServerAuthRequests = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cPPPoEServerAuthRequests.setStatus('current')
hh3cPPPoEServerAuthSuccesses = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cPPPoEServerAuthSuccesses.setStatus('current')
hh3cPPPoEServerAuthFailures = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cPPPoEServerAuthFailures.setStatus('current')
hh3cPPPoESAbnormOffsThreshold = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hh3cPPPoESAbnormOffsThreshold.setStatus('current')
hh3cPPPoESAbnormOffPerThreshold = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hh3cPPPoESAbnormOffPerThreshold.setStatus('current')
hh3cPPPoESNormOffPerThreshold = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hh3cPPPoESNormOffPerThreshold.setStatus('current')
hh3cPPPoEServerTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 102, 2))
hh3cPPPoeServerTrapPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 102, 2, 0))
hh3cPPPoESAbnormOffsAlarm = NotificationType((1, 3, 6, 1, 4, 1, 25506, 2, 102, 2, 0, 1))
if mibBuilder.loadTexts: hh3cPPPoESAbnormOffsAlarm.setStatus('current')
hh3cPPPoESAbnormOffPerAlarm = NotificationType((1, 3, 6, 1, 4, 1, 25506, 2, 102, 2, 0, 2))
if mibBuilder.loadTexts: hh3cPPPoESAbnormOffPerAlarm.setStatus('current')
hh3cPPPoESNormOffPerAlarm = NotificationType((1, 3, 6, 1, 4, 1, 25506, 2, 102, 2, 0, 3))
if mibBuilder.loadTexts: hh3cPPPoESNormOffPerAlarm.setStatus('current')
mibBuilder.exportSymbols("HH3C-PPPOE-SERVER-MIB", hh3cPPPoEServerMaxSessions=hh3cPPPoEServerMaxSessions, hh3cPPPoEServerObject=hh3cPPPoEServerObject, hh3cPPPoeServerTrapPrefix=hh3cPPPoeServerTrapPrefix, hh3cPPPoEServerAuthFailures=hh3cPPPoEServerAuthFailures, hh3cPPPoEServer=hh3cPPPoEServer, PYSNMP_MODULE_ID=hh3cPPPoEServer, hh3cPPPoESAbnormOffsAlarm=hh3cPPPoESAbnormOffsAlarm, hh3cPPPoEServerAuthRequests=hh3cPPPoEServerAuthRequests, hh3cPPPoEServerAuthSuccesses=hh3cPPPoEServerAuthSuccesses, hh3cPPPoESNormOffPerThreshold=hh3cPPPoESNormOffPerThreshold, hh3cPPPoEServerCurrSessions=hh3cPPPoEServerCurrSessions, hh3cPPPoEServerTraps=hh3cPPPoEServerTraps, hh3cPPPoESAbnormOffPerThreshold=hh3cPPPoESAbnormOffPerThreshold, hh3cPPPoESAbnormOffPerAlarm=hh3cPPPoESAbnormOffPerAlarm, hh3cPPPoESAbnormOffsThreshold=hh3cPPPoESAbnormOffsThreshold, hh3cPPPoESNormOffPerAlarm=hh3cPPPoESNormOffPerAlarm)
| [] |
suhaili99/python-share | Pyshare2019/02 - if + Nesteed if/Nesteed-IF.py | 6c65faaff722b8bd9e381650a6b277f56d1ae4c9 | name = input("masukkan nama pembeli = ")
alamat= input("Alamat = ")
NoTelp = input("No Telp = ")
print("\n")
print("=================INFORMASI HARGA MOBIL DEALER JAYA ABADI===============")
print("Pilih Jenis Mobil :")
print("\t 1.Daihatsu ")
print("\t 2.Honda ")
print("\t 3.Toyota ")
print("")
pilihan = int(input("Pilih jenis mobil yang ingin dibeli : "))
print("")
if (pilihan==1):
print("<<<<<<<< Macam macam mobil pada Daihatsu >>>>>>>>>")
print("\ta.Grand New Xenia")
print("\tb.All New Terios")
print("\tc.New Ayla")
Pilih1 = input("Mana yang ingin anda pilih ?? = ")
if(Pilih1 == "a"):
print("Harga mobil Grand New Xenia adalah 183 juta ")
elif(Pilih1== "b"):
print("Harga mobil All New Terios adalah 215 juta")
elif(Pilih1== "c"):
print("Harga mobil New Ayla adalah 110 juta")
else:
print("Tidak terdefinisi")
elif (pilihan==2):
print("<<<<<<<< Macam macam mobil pada Honda >>>>>>>>>")
print("\ta.Honda Brio Satya S")
print("\tb.Honda Jazz ")
print("\tb.Honda Mobilio ")
pilih2 = input("Mana yang ingin anda pilih??")
if(pilih2=="a"):
print("Harga mobil HOnda Brio Satya S adalah 131 juta")
elif(pilih2=="b"):
print("Harga mobil Honda Jazz adalah 232 juta")
elif(pilih2=="c"):
print("Harga mobil Honda mobilio adalah 189 juta")
else:
print("Tidak terdefinisi")
elif (pilihan==3):
print("<<<<<<<< Macam macam mobil pada Toyota>>>>>>>>?")
print("\ta.Alphard")
print("\tb.Camry")
print("\tc.Fortuner")
pilih3 = input("Mana yang ingin anda pilih??")
if (pilih3=="a"):
print("Harga mobil Alphard adalah 870 juta")
elif (pilih3=="b"):
print("Harga mobil Camry adalah 560 Juta")
elif (pilih3=="c"):
print("Harga mobil Fortuner adalah 492 Juta")
| [] |
wanghongsheng01/framework_enflame | oneflow/python/test/ops/test_l1loss.py | debf613e05e3f5ea8084c3e79b60d0dd9e349526 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
import numpy as np
import oneflow.typing as tp
from test_util import GenArgList
import unittest
from collections import OrderedDict
from typing import Dict
import os
def _compare_l1loss_with_np(
input_shape, target_shape, device_type, machine_ids, device_counts
):
input = np.random.random(size=input_shape).astype(np.float32)
target = np.random.random(size=target_shape).astype(np.float32)
assert device_type in ["cpu", "gpu"]
func_config = flow.FunctionConfig()
flow.clear_default_session()
if device_type == "cpu":
flow.config.cpu_device_num(device_counts)
else:
flow.config.gpu_device_num(device_counts)
func_config.default_placement_scope(flow.scope.placement(device_type, machine_ids))
func_config.default_logical_view(flow.scope.consistent_view())
def np_l1loss(np_input, np_target):
np_l1 = np.abs(np_target - np_input)
np_l1_mean = np.mean(np_l1)
np_l1_sum = np.sum(np_l1)
np_l1_dict = {
"np_l1_loss": np_l1,
"np_l1_loss_mean": np_l1_mean,
"np_l1_loss_sum": np_l1_sum,
}
return np_l1_dict
def np_l1_loss_diff(np_input, np_target):
# Use numpy to compute diff
original_shape = np_target.shape
elemcnt = np_target.size
prediction = np_input.reshape(-1)
label = np_target.reshape(-1)
prediction_grad = np.zeros((elemcnt)).astype(prediction.dtype)
for i in np.arange(elemcnt):
diff = prediction[i] - label[i]
prediction_grad[i] = np.sign(diff)
grad_mean = prediction_grad.reshape(original_shape) / elemcnt
# TODO: if you want to get the grad when the reduction = "sum", you can use the follow code
# grad_sum = prediction_grad.reshape(original_shape)
grad_dict = {
"np_grad_mean": grad_mean,
}
return grad_dict
# Use Numpy to compute l1 loss
np_out_l1loss_dict = np_l1loss(input, target)
# Use Numpy to compute l1 grad
np_grad_dict = np_l1_loss_diff(input, target)
def assert_prediction_grad(blob: tp.Numpy):
# Evaluate the gradient. Here we only test the reduction type == "mean"
assert np.allclose(blob, np_grad_dict["np_grad_mean"])
@flow.global_function(type="train", function_config=func_config)
def oneflow_l1loss(
of_input: tp.Numpy.Placeholder(shape=input.shape),
of_target: tp.Numpy.Placeholder(shape=target.shape),
) -> Dict[str, tp.Numpy]:
with flow.scope.placement(device_type, "0:0"):
v = flow.get_variable(
shape=target.shape,
dtype=flow.float32,
initializer=flow.constant_initializer(0),
name="v",
)
x_var = of_input + v
# watch the diff
flow.watch_diff(x_var, assert_prediction_grad)
l1loss = flow.nn.L1Loss(x_var, of_target, reduction="none", name="of_l1loss")
l1loss_mean = flow.nn.L1Loss(
x_var, of_target, reduction="mean", name="of_l1loss_mean"
)
l1loss_sum = flow.nn.L1Loss(
x_var, of_target, reduction="sum", name="of_l1loss_sum"
)
with flow.scope.placement(device_type, "0:0"):
# We only test reduction="mean" diff
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0
).minimize(l1loss_mean)
return {
"of_l1_loss": l1loss,
"of_l1_loss_mean": l1loss_mean,
"of_l1_loss_sum": l1loss_sum,
}
of_out_l1loss_dict = oneflow_l1loss(input, target)
assert np.allclose(
of_out_l1loss_dict["of_l1_loss"], np_out_l1loss_dict["np_l1_loss"]
)
assert np.allclose(
of_out_l1loss_dict["of_l1_loss_mean"][0], np_out_l1loss_dict["np_l1_loss_mean"]
)
assert np.allclose(
of_out_l1loss_dict["of_l1_loss_sum"][0], np_out_l1loss_dict["np_l1_loss_sum"]
)
def _gen_arg_dict(shape, device_type, machine_ids, device_counts):
# Generate a dict to pass parameter to test case
arg_dict = OrderedDict()
arg_dict["input_shape"] = [shape]
arg_dict["target_shape"] = [shape]
arg_dict["device_type"] = [device_type]
arg_dict["machine_ids"] = [machine_ids]
arg_dict["device_counts"] = [device_counts]
return arg_dict
@flow.unittest.skip_unless_1n1d()
class Testl1loss1n1d(flow.unittest.TestCase):
def test_l1loss_cpu(test_case):
arg_dict = _gen_arg_dict(
shape=(16, 3), device_type="cpu", machine_ids="0:0", device_counts=1
)
for arg in GenArgList(arg_dict):
_compare_l1loss_with_np(*arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_l1loss_gpu(test_case):
arg_dict = _gen_arg_dict(
shape=(3, 16, 32), device_type="gpu", machine_ids="0:0", device_counts=1
)
for arg in GenArgList(arg_dict):
_compare_l1loss_with_np(*arg)
@flow.unittest.skip_unless_1n2d()
class Testl1loss1n2d(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_l1loss_gpu_1n2d(test_case):
arg_dict = _gen_arg_dict(
shape=(3, 32, 16), device_type="gpu", machine_ids="0:0-1", device_counts=2
)
for arg in GenArgList(arg_dict):
_compare_l1loss_with_np(*arg)
if __name__ == "__main__":
unittest.main()
| [((5020, 5052), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (5050, 5052), True, 'import oneflow as flow\n'), ((5675, 5707), 'oneflow.unittest.skip_unless_1n2d', 'flow.unittest.skip_unless_1n2d', ([], {}), '()\n', (5705, 5707), True, 'import oneflow as flow\n'), ((1079, 1100), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1098, 1100), True, 'import oneflow as flow\n'), ((1106, 1134), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1132, 1134), True, 'import oneflow as flow\n'), ((2901, 2964), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (2921, 2964), True, 'import oneflow as flow\n'), ((4305, 4384), 'numpy.allclose', 'np.allclose', (["of_out_l1loss_dict['of_l1_loss']", "np_out_l1loss_dict['np_l1_loss']"], {}), "(of_out_l1loss_dict['of_l1_loss'], np_out_l1loss_dict['np_l1_loss'])\n", (4316, 4384), True, 'import numpy as np\n'), ((4410, 4507), 'numpy.allclose', 'np.allclose', (["of_out_l1loss_dict['of_l1_loss_mean'][0]", "np_out_l1loss_dict['np_l1_loss_mean']"], {}), "(of_out_l1loss_dict['of_l1_loss_mean'][0], np_out_l1loss_dict[\n 'np_l1_loss_mean'])\n", (4421, 4507), True, 'import numpy as np\n'), ((4528, 4623), 'numpy.allclose', 'np.allclose', (["of_out_l1loss_dict['of_l1_loss_sum'][0]", "np_out_l1loss_dict['np_l1_loss_sum']"], {}), "(of_out_l1loss_dict['of_l1_loss_sum'][0], np_out_l1loss_dict[\n 'np_l1_loss_sum'])\n", (4539, 4623), True, 'import numpy as np\n'), ((4770, 4783), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4781, 4783), False, 'from collections import OrderedDict\n'), ((6122, 6137), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6135, 6137), False, 'import unittest\n'), ((1172, 1213), 'oneflow.config.cpu_device_num', 'flow.config.cpu_device_num', (['device_counts'], {}), '(device_counts)\n', (1198, 1213), True, 'import oneflow as flow\n'), ((1232, 1273), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['device_counts'], {}), '(device_counts)\n', (1258, 1273), True, 'import oneflow as flow\n'), ((1315, 1361), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', 'machine_ids'], {}), '(device_type, machine_ids)\n', (1335, 1361), True, 'import oneflow as flow\n'), ((1400, 1428), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (1426, 1428), True, 'import oneflow as flow\n'), ((1487, 1515), 'numpy.abs', 'np.abs', (['(np_target - np_input)'], {}), '(np_target - np_input)\n', (1493, 1515), True, 'import numpy as np\n'), ((1537, 1551), 'numpy.mean', 'np.mean', (['np_l1'], {}), '(np_l1)\n', (1544, 1551), True, 'import numpy as np\n'), ((1572, 1585), 'numpy.sum', 'np.sum', (['np_l1'], {}), '(np_l1)\n', (1578, 1585), True, 'import numpy as np\n'), ((2090, 2108), 'numpy.arange', 'np.arange', (['elemcnt'], {}), '(elemcnt)\n', (2099, 2108), True, 'import numpy as np\n'), ((2847, 2894), 'numpy.allclose', 'np.allclose', (['blob', "np_grad_dict['np_grad_mean']"], {}), "(blob, np_grad_dict['np_grad_mean'])\n", (2858, 2894), True, 'import numpy as np\n'), ((3467, 3513), 'oneflow.watch_diff', 'flow.watch_diff', (['x_var', 'assert_prediction_grad'], {}), '(x_var, assert_prediction_grad)\n', (3482, 3513), True, 'import oneflow as flow\n'), ((3532, 3600), 'oneflow.nn.L1Loss', 'flow.nn.L1Loss', (['x_var', 'of_target'], {'reduction': '"""none"""', 'name': '"""of_l1loss"""'}), "(x_var, of_target, reduction='none', name='of_l1loss')\n", (3546, 3600), True, 'import oneflow as flow\n'), ((3623, 3696), 'oneflow.nn.L1Loss', 'flow.nn.L1Loss', (['x_var', 'of_target'], {'reduction': '"""mean"""', 'name': '"""of_l1loss_mean"""'}), "(x_var, of_target, reduction='mean', name='of_l1loss_mean')\n", (3637, 3696), True, 'import oneflow as flow\n'), ((3740, 3811), 'oneflow.nn.L1Loss', 'flow.nn.L1Loss', (['x_var', 'of_target'], {'reduction': '"""sum"""', 'name': '"""of_l1loss_sum"""'}), "(x_var, of_target, reduction='sum', name='of_l1loss_sum')\n", (3754, 3811), True, 'import oneflow as flow\n'), ((5279, 5299), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5289, 5299), False, 'from test_util import GenArgList\n'), ((5608, 5628), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5618, 5628), False, 'from test_util import GenArgList\n'), ((5365, 5399), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (5374, 5399), False, 'import os\n'), ((6025, 6045), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (6035, 6045), False, 'from test_util import GenArgList\n'), ((5775, 5809), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (5784, 5809), False, 'import os\n'), ((896, 930), 'numpy.random.random', 'np.random.random', ([], {'size': 'input_shape'}), '(size=input_shape)\n', (912, 930), True, 'import numpy as np\n'), ((963, 998), 'numpy.random.random', 'np.random.random', ([], {'size': 'target_shape'}), '(size=target_shape)\n', (979, 998), True, 'import numpy as np\n'), ((2187, 2200), 'numpy.sign', 'np.sign', (['diff'], {}), '(diff)\n', (2194, 2200), True, 'import numpy as np\n'), ((3007, 3046), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', ([], {'shape': 'input.shape'}), '(shape=input.shape)\n', (3027, 3046), True, 'import oneflow.typing as tp\n'), ((3067, 3107), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', ([], {'shape': 'target.shape'}), '(shape=target.shape)\n', (3087, 3107), True, 'import oneflow.typing as tp\n'), ((3153, 3193), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (3173, 3193), True, 'import oneflow as flow\n'), ((3848, 3888), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (3868, 3888), True, 'import oneflow as flow\n'), ((2027, 2044), 'numpy.zeros', 'np.zeros', (['elemcnt'], {}), '(elemcnt)\n', (2035, 2044), True, 'import numpy as np\n'), ((3330, 3358), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (3355, 3358), True, 'import oneflow as flow\n'), ((3987, 4041), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.001]'], {}), '([], [0.001])\n', (4028, 4041), True, 'import oneflow as flow\n')] |
Dog-Egg/dida | tests/test_schema.py | 17fd8dce0fe198e65effb48816a2339802234974 | import unittest
import datetime
from dida import schemas, triggers
from marshmallow import ValidationError
class TestTriggerSchema(unittest.TestCase):
def test_dump_trigger(self):
result = schemas.TriggerSchema().dump(triggers.IntervalTrigger())
print('IntervalTrigger dump:', result)
result = schemas.TriggerSchema().dump(triggers.DateTrigger())
print('DateTrigger dump:', result)
def test_load_trigger(self):
self.assertRaises(ValidationError, schemas.TriggerSchema().load, {"type": "unknown"})
obj = schemas.TriggerSchema().load({'type': "interval"})
self.assertIsInstance(obj, triggers.IntervalTrigger)
obj = schemas.TriggerSchema().load({'type': 'date', "params": {'run_date': "2020-01-01 00:00:00"}})
self.assertEqual(obj.run_date, datetime.datetime(2020, 1, 1).astimezone())
| [((233, 259), 'dida.triggers.IntervalTrigger', 'triggers.IntervalTrigger', ([], {}), '()\n', (257, 259), False, 'from dida import schemas, triggers\n'), ((355, 377), 'dida.triggers.DateTrigger', 'triggers.DateTrigger', ([], {}), '()\n', (375, 377), False, 'from dida import schemas, triggers\n'), ((204, 227), 'dida.schemas.TriggerSchema', 'schemas.TriggerSchema', ([], {}), '()\n', (225, 227), False, 'from dida import schemas, triggers\n'), ((326, 349), 'dida.schemas.TriggerSchema', 'schemas.TriggerSchema', ([], {}), '()\n', (347, 349), False, 'from dida import schemas, triggers\n'), ((499, 522), 'dida.schemas.TriggerSchema', 'schemas.TriggerSchema', ([], {}), '()\n', (520, 522), False, 'from dida import schemas, triggers\n'), ((565, 588), 'dida.schemas.TriggerSchema', 'schemas.TriggerSchema', ([], {}), '()\n', (586, 588), False, 'from dida import schemas, triggers\n'), ((692, 715), 'dida.schemas.TriggerSchema', 'schemas.TriggerSchema', ([], {}), '()\n', (713, 715), False, 'from dida import schemas, triggers\n'), ((825, 854), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (842, 854), False, 'import datetime\n')] |
Sunbird-Ed/evolve-api | apps/content/views.py | 371b39422839762e32401340456c13858cb8e1e9 | from django.shortcuts import render
from rest_framework import status
from rest_framework.generics import (
ListAPIView,
ListCreateAPIView,
ListAPIView,
RetrieveUpdateAPIView,)
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import permission_classes
from apps.configuration.models import Book
from apps.hardspot.models import HardSpot
from .models import Content,ContentContributors
from .serializers import (
ContentListSerializer,
BookNestedSerializer,
BookListSerializer,
ContentStatusListSerializer,
SectionKeywordSerializer,
SubSectionKeywordSerializer,
SectionKeywordsSerializer,
ChapterKeywordsSerializer,
SubSectionKeywordsSerializer,
KeywordSerializer,
ContentContributorSerializer,
ApprovedContentSerializer,
ContentStatusSerializer,
HardSpotCreateSerializer,
ContentContributorsSerializer,
SubSubSectionKeywordsSerializer,
ContentStatusSerializerFileFormat,
)
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import permission_required
from rest_framework.parsers import MultiPartParser
from apps.dataupload.models import (Chapter,
Section,
SubSection,
ChapterKeyword,
SectionKeyword,
SubSectionKeyword,
SubSubSectionKeyword,
)
import json
import pandas as pd
from evolve import settings
from evolve import settings
from azure.storage.blob import (
BlockBlobService,
ContainerPermissions
)
from datetime import datetime, timedelta
import os
import itertools
from django.db.models import Q
import threading
account_name = settings.AZURE_ACCOUNT_NAME
account_key = settings.AZURE_ACCOUNT_KEY
CONTAINER_NAME= settings.AZURE_CONTAINER
block_blob_service = BlockBlobService(account_name=account_name, account_key=account_key)
class ContentList(ListCreateAPIView):
queryset = Content.objects.all()
serializer_class = KeywordSerializer
parser_classes = (MultiPartParser,)
def get(self, request):
try:
queryset = self.get_queryset()
serializer = ContentStatusListSerializer(queryset, many=True)
context = {"success": True, "message": "Chapter List","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Chapter list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def post(self, request,format=None):
try:
serializer = ContentListSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
context = {"success": True, "message": "Created Successful", "data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
context = {"success": False, "message": "Invalid Input Data to create content"}
return Response(context, status=status.HTTP_400_BAD_REQUEST)
except Exception as error:
context = {'success': "false", 'message': 'Failed to create content.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@permission_classes((IsAuthenticated,))
class ContentRetrieveUpdate(RetrieveUpdateAPIView):
queryset = Content.objects.all()
serializer_class = ContentListSerializer
def get(self, request):
try:
queryset = self.get_object()
serializer = ContentListSerializer(queryset, many=True)
context = {"success": True, "message": "Chapter List","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get content list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def put(self, request, pk, format=None):
try:
try:
content_list = self.get_object()
except Exception as error:
context = {'success': "false", 'message': 'content Id does not exist.'}
return Response(context, status=status.HTTP_404_NOT_FOUND)
serializer = ContentListSerializer(content_list, data=request.data, context={"user":request.user}, partial=True)
if serializer.is_valid():
serializer.save()
context = {"success": True, "message": "Updation Successful","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
context = {"success": False, "message": "Updation Failed"}
return Response(context, status=status.HTTP_400_BAD_REQUEST)
except Exception as error:
context = {'success': "false", 'message': 'Failed To Update content Details.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class BookNestedList(ListAPIView):
queryset = Book.objects.all()
serializer_class = BookNestedSerializer
def get(self, request):
try:
subject = request.query_params.get('subject', None)
if subject is not None:
queryset=self.get_queryset().filter(subject__id=subject, content_only=True)
else:
queryset = self.get_queryset().filter(content_only=True)
serializer = BookNestedSerializer(queryset, many=True)
context = {"success": True, "message": "Conetent List","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Content list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class BookListView(ListAPIView):
queryset = Book.objects.all()
serializer_class = BookListSerializer
def get(self, request):
try:
subject = request.query_params.get('subject', None)
if subject is not None:
queryset=self.get_queryset().filter(subject__id=subject)
else:
queryset = self.get_queryset()
serializer = BookListSerializer(queryset, many=True)
context = {"success": True, "message": "Content List","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Conetent list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentApprovedList(ListAPIView):
queryset = Content.objects.all()
serializer_class = KeywordSerializer
def get(self, request):
try:
chapter_id = request.query_params.get('chapter', None)
section_id = request.query_params.get('section', None)
sub_section_id = request.query_params.get('sub_section', None)
sub_sub_section_id = request.query_params.get('sub_sub_section',None)
if chapter_id is not None:
queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=True)
elif section_id is not None:
queryset = self.get_queryset().filter(section__id=section_id, approved=True)
elif sub_section_id is not None:
queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=True)
elif sub_sub_section_id is not None:
queryset = self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=True)
else:
queryset = self.get_queryset().filter(approved=True)
serializer = KeywordSerializer(queryset, many=True)
context = {"success": True, "message": "Content Approved List", "data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Content Approved list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentPendingList(ListAPIView):
queryset = Content.objects.all()
serializer_class = KeywordSerializer
def get(self, request):
try:
chapter_id = request.query_params.get('chapter', None)
section_id = request.query_params.get('section', None)
sub_section_id = request.query_params.get('sub_section', None)
sub_sub_section_id = request.query_params.get('sub_sub_section',None)
if chapter_id is not None:
queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False, approved_by=None)
elif section_id is not None:
queryset = self.get_queryset().filter(section__id=section_id, approved=False, approved_by=None)
elif sub_section_id is not None:
queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=False, approved_by=None)
elif sub_sub_section_id is not None:
queryset = self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=False,approved_by=None)
else:
queryset = self.get_queryset().filter(approved=False, approved_by=None)
serializer = KeywordSerializer(queryset, many=True)
context = {"success": True, "message": "Content Pending List","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Content Pending list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentStatusList(ListCreateAPIView):
queryset = Content.objects.all()
serializer_class = ContentListSerializer
def get(self, request):
try:
if request.query_params.get('chapter', None) is not None:
queryset=self.get_queryset().filter(chapter_id=request.query_params.get('chapter', None))
elif request.query_params.get('section', None) is not None:
queryset=self.get_queryset().filter(chapter_id=request.query_params.get('section', None))
elif request.query_params.get('section', None) is not None:
queryset=self.get_queryset().filter(chapter_id=request.query_params.get('sub_section', None))
else:
queryset = self.get_queryset()
serializer = ContentListSerializer(queryset, many=True)
context = {"success": True, "message": "Content Status List","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Content Status list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentRejectedList(ListAPIView):
queryset = Content.objects.all()
serializer_class = ContentListSerializer
def get(self, request):
try:
chapter_id = request.query_params.get('chapter', None)
section_id = request.query_params.get('section', None)
sub_section_id = request.query_params.get('sub_section', None)
sub_sub_section_id = request.query_params.get('sub_sub_section',None)
if chapter_id is not None:
queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False).exclude(approved_by=None)
elif section_id is not None:
queryset = self.get_queryset().filter(section__id=section_id, approved=False).exclude(approved_by=None)
elif sub_section_id is not None:
queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=False).exclude(approved_by=None)
elif sub_sub_section_id is not None:
queryset =self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id , approved = False).exclude(approved_by=None)
else:
queryset = self.get_queryset().filter(approved=False).exclude(approved_by=None)
serializer = KeywordSerializer(queryset, many=True)
context = {"success": True, "message": "Content Rejected List","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Content Rejected list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class Keywords(ListAPIView):
queryset = Content.objects.all()
def get(self, request):
try:
chapter_id = request.query_params.get('chapter', None)
section_id = request.query_params.get('section', None)
sub_section_id = request.query_params.get('sub_section', None)
sub_sub_section_id = request.query_params.get('sub_sub_section', None)
if chapter_id is not None:
queryset=ChapterKeyword.objects.filter(chapter__id = chapter_id)
serializer = ChapterKeywordsSerializer(queryset, many=True)
elif section_id is not None:
queryset = SectionKeyword.objects.filter(section__id = section_id)
serializer = SectionKeywordsSerializer(queryset, many=True)
elif sub_section_id is not None:
queryset = SubSectionKeyword.objects.filter(sub_section__id = sub_section_id)
serializer = SubSectionKeywordsSerializer(queryset, many=True)
elif sub_sub_section_id is not None:
queryset = SubSubSectionKeyword.objects.filter(sub_sub_section__id = sub_sub_section_id)
serializer = SubSubSectionKeywordsSerializer(queryset, many=True)
else:
queryset = self.get_queryset()
serializer = KeywordSerializer(queryset, many=True)
context = {"success": True, "message": "Content List","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Content list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentContributorCreateView(ListCreateAPIView):
queryset = ContentContributors.objects.all()
serializer_class = ContentContributorSerializer
def post(self, request):
try:
queryset = ContentContributors.objects.filter(first_name__iexact=request.data['first_name'].strip(),last_name__iexact=request.data['last_name'].strip(), mobile=request.data['mobile'].strip()).first()
if queryset is not None:
if str(queryset.email) == "" and request.data['email'] is not None:
ContentContributors.objects.filter(id=queryset.id).update(email=request.data['email'])
queryset.refresh_from_db()
serializer = ContentContributorSerializer(queryset)
context = {"success": True, "message": "Successful", "data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
else:
serializer = ContentContributorSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
context = {"success": True, "message": "Successful", "data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
context = {"success": False, "message": "Invalid Input Data to create Pesonal details"}
return Response(context, status=status.HTTP_400_BAD_REQUEST)
except Exception as error:
context = {'success': "false", 'message': 'Failed to Personal Details.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@permission_classes((IsAuthenticated,))
class ApprovedContentDownloadView(ListAPIView):
queryset = Book.objects.all()
def get(self, request):
try:
final_list = []
import os
from shutil import copyfile
book = request.query_params.get('book', None)
chapters=Chapter.objects.filter(book_id=book).order_by('id')
serializer = ApprovedContentSerializer(chapters, many=True)
for data in serializer.data:
for d in data['chapter']:
final_list.append(d)
repeat_list=['Content Name','Content Link/Video Link','Content Rating (By Reviewer)','Comment (By Reviewer)', 'linked_keywords']
data_frame = pd.DataFrame(final_list , columns=['Board', 'Medium', 'Grade', 'Subject', 'Textbook Name', 'Level 1 Textbook Unit', 'Level 2 Textbook Unit', 'Level 3 Textbook Unit','Level 4 Textbook Unit', 'Keywords',]+(list(itertools.chain.from_iterable(itertools.repeat(repeat_list, 5)))))
exists = os.path.isfile('ApprovedContent.csv')
path = settings.MEDIA_ROOT + '/files/'
if exists:
os.remove('ApprovedContent.csv')
data_frame.to_csv(path + 'ApprovedContent.csv', encoding="utf-8-sig", index=False)
context = {"success": True, "message": "Activity List", "data": 'media/files/ApprovedContent.csv'}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Activity list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentStatusDownloadView(RetrieveUpdateAPIView):
queryset = HardSpot.objects.all()
serializer_class = HardSpotCreateSerializer
def get(self, request):
try:
final_list = []
import os
from shutil import copyfile
book_id = request.query_params.get('book', None)
book_name=""
if book_id is not None:
book_name=Book.objects.get(id=book_id)
chapters=Chapter.objects.filter(book__id=book_id).order_by('id')
serializer = ContentStatusSerializer(chapters, many=True)
for data in serializer.data:
for d in data['chapter']:
final_list.append(d)
data_frame = pd.DataFrame(final_list , columns=['Board', 'Medium','Grade', 'Subject', 'Textbook Name', 'Level 1 Textbook Unit', 'Level 2 Textbook Unit', 'Level 3 Textbook Unit','Level 4 Textbook Unit', 'total', 'approved_contents', 'rejected_contents', 'pending_contents', 'hard_spots'])
exists = os.path.isfile('{}_contentstatus.csv'.format(book_name))
path = settings.MEDIA_ROOT + '/files/'
if exists:
os.remove('{}_contentstatus.csv'.format(book_name))
# data_frame.to_excel(path + 'contentstatus.xlsx')
data_frame.to_csv(path + str(book_name)+'_contentstatus.csv', encoding="utf-8-sig", index=False)
context = {"success": True, "message": "Activity List","data": 'media/files/{}_contentstatus.csv'.format(book_name)}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Activity list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@permission_classes((IsAuthenticated,))
class ContentContributorsDownloadView(RetrieveUpdateAPIView):
queryset = Content.objects.all()
serializer_class = HardSpotCreateSerializer
def get(self, request):
try:
final_list = []
import os
from shutil import copyfile
state_id = request.query_params.get('state', None)
if state_id is not None:
queryset = Content.objects.filter(Q(sub_sub_section__subsection__section__chapter__book__subject__grade__medium__state__id=state_id) | Q(sub_section__section__chapter__book__subject__grade__medium__state__id = state_id) | Q(section__chapter__book__subject__grade__medium__state__id= state_id) | Q(chapter__book__subject__grade__medium__state__id = state_id) ).distinct()
else:
queryset = self.get_queryset()
serializer = ContentContributorsSerializer(queryset, many=True)
res_list = []
for i in range(len(serializer.data)):
if serializer.data[i] not in serializer.data[i + 1:]:
res_list.append(serializer.data[i])
for data in res_list:
for d in res_list:
final_list.append(d)
data_frame = pd.DataFrame(final_list , columns=['first_name', 'last_name','mobile', 'email','city_name','school_name','textbook_name']).drop_duplicates()
exists = os.path.isfile('content_contributers.csv')
path = settings.MEDIA_ROOT + '/files/'
if exists:
os.remove('content_contributers.csv')
# data_frame.to_excel(path + 'content_contributers.xlsx')
data_frame.to_csv(path + 'content_contributers.csv', encoding="utf-8-sig", index=False)
context = {"success": True, "message": "Activity List","data": 'media/files/content_contributers.csv'}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = { 'success': "false", 'message': 'Failed to get Activity list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class GetSASView(ListAPIView):
def get(self,request):
try:
sas_url = block_blob_service.generate_container_shared_access_signature(
CONTAINER_NAME,
ContainerPermissions.WRITE,
datetime.utcnow() + timedelta(hours=1),
)
base_url=account_name+".blob.core.windows.net/"+CONTAINER_NAME
context = {"success": True, "message": "url link", "token":sas_url,"base_url":base_url}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Activity list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class GetSasDownloadView(ListAPIView):
def get(self,request):
from evolve import settings
accountName = settings.AZURE_ACCOUNT_NAME
accountKey = settings.AZURE_ACCOUNT_KEY
containerName= settings.AZURE_CONTAINER
try:
blobService = BlockBlobService(account_name=accountName, account_key=accountKey)
sas_token = blobService.generate_container_shared_access_signature(containerName,ContainerPermissions.READ, datetime.utcnow() + timedelta(hours=10))
context = {"success": True, "token":sas_token}
return Response(context, status=status.HTTP_200_OK)
except:
return None
class ContentListUrlUpdate(ListAPIView):
queryset = Content.objects.all()
serializer_class = ContentStatusSerializer
def get(self, request):
try:
queryset = self.get_queryset().filter(approved=True)
serializer = ContentStatusSerializerFileFormat(queryset, many=True)
context = {"success": True, "message": "OtherContent Approved List", "data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get OtherContent Approved list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentListUrlPutRequest(RetrieveUpdateAPIView):
queryset = Content.objects.all()
serializer_class = ContentStatusSerializer
def post(self, request):
try:
datalist = request.data
print(datalist)
for data in datalist:
print(data)
Content.objects.filter(pk=data['content_id']).update(video=data['video'])
context = {"success": True, "message": "update successfull"}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get OtherContent Approved list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentListUrlPutRequestRevert(RetrieveUpdateAPIView):
queryset = Content.objects.all()
serializer_class = ContentStatusSerializer
def post(self, request):
try:
datalist = request.data
print(datalist)
for data in datalist:
Content.objects.filter(pk=data['content_id']).update(video=data['file_path_from_database'])
context = {"success": True, "message": "update successfull"}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get OtherContent Approved list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class BackupContent(ListAPIView):
queryset = Book.objects.all()
def get(self,request):
try:
t = threading.Thread(target=self.index, args=(), kwargs={})
t.setDaemon(True)
t.start()
context = {"success": True, "message": "Activity List", "data": 'media/files/BackupContent.csv'}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Activity list.' ,"error" :str(error)}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def index(self):
final_list,final = [],[]
queryset = Content.objects.filter(approved=True)
for i in queryset:
try:
if i.video is not None :
final=[i.id,i.video]
final_list.append(final)
except Exception as e:
pass
path = settings.MEDIA_ROOT + '/files/'
data_frame = pd.DataFrame(final_list , columns=['id','url'])
data_frame.to_csv(path+ 'BackupContent.csv', encoding="utf-8-sig", index=False)
| [((1819, 1887), 'azure.storage.blob.BlockBlobService', 'BlockBlobService', ([], {'account_name': 'account_name', 'account_key': 'account_key'}), '(account_name=account_name, account_key=account_key)\n', (1835, 1887), False, 'from azure.storage.blob import BlockBlobService, ContainerPermissions\n'), ((3304, 3342), 'rest_framework.decorators.permission_classes', 'permission_classes', (['(IsAuthenticated,)'], {}), '((IsAuthenticated,))\n', (3322, 3342), False, 'from rest_framework.decorators import permission_classes\n'), ((16341, 16379), 'rest_framework.decorators.permission_classes', 'permission_classes', (['(IsAuthenticated,)'], {}), '((IsAuthenticated,))\n', (16359, 16379), False, 'from rest_framework.decorators import permission_classes\n'), ((19848, 19886), 'rest_framework.decorators.permission_classes', 'permission_classes', (['(IsAuthenticated,)'], {}), '((IsAuthenticated,))\n', (19866, 19886), False, 'from rest_framework.decorators import permission_classes\n'), ((5090, 5108), 'apps.configuration.models.Book.objects.all', 'Book.objects.all', ([], {}), '()\n', (5106, 5108), False, 'from apps.configuration.models import Book\n'), ((6003, 6021), 'apps.configuration.models.Book.objects.all', 'Book.objects.all', ([], {}), '()\n', (6019, 6021), False, 'from apps.configuration.models import Book\n'), ((16443, 16461), 'apps.configuration.models.Book.objects.all', 'Book.objects.all', ([], {}), '()\n', (16459, 16461), False, 'from apps.configuration.models import Book\n'), ((18101, 18123), 'apps.hardspot.models.HardSpot.objects.all', 'HardSpot.objects.all', ([], {}), '()\n', (18121, 18123), False, 'from apps.hardspot.models import HardSpot\n'), ((25749, 25767), 'apps.configuration.models.Book.objects.all', 'Book.objects.all', ([], {}), '()\n', (25765, 25767), False, 'from apps.configuration.models import Book\n'), ((26743, 26790), 'pandas.DataFrame', 'pd.DataFrame', (['final_list'], {'columns': "['id', 'url']"}), "(final_list, columns=['id', 'url'])\n", (26755, 26790), True, 'import pandas as pd\n'), ((2315, 2359), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_200_OK'}), '(context, status=status.HTTP_200_OK)\n', (2323, 2359), False, 'from rest_framework.response import Response\n'), ((3047, 3100), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(context, status=status.HTTP_400_BAD_REQUEST)\n', (3055, 3100), False, 'from rest_framework.response import Response\n'), ((3738, 3782), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_200_OK'}), '(context, status=status.HTTP_200_OK)\n', (3746, 3782), False, 'from rest_framework.response import Response\n'), ((4772, 4825), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(context, status=status.HTTP_400_BAD_REQUEST)\n', (4780, 4825), False, 'from rest_framework.response import Response\n'), ((5692, 5736), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_200_OK'}), '(context, status=status.HTTP_200_OK)\n', (5700, 5736), False, 'from rest_framework.response import Response\n'), ((6555, 6599), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_200_OK'}), '(context, status=status.HTTP_200_OK)\n', (6563, 6599), False, 'from rest_framework.response import Response\n'), ((8112, 8156), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_200_OK'}), '(context, status=status.HTTP_200_OK)\n', (8120, 8156), False, 'from rest_framework.response import Response\n'), ((9759, 9803), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_200_OK'}), '(context, status=status.HTTP_200_OK)\n', (9767, 9803), False, 'from rest_framework.response import Response\n'), ((10985, 11029), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_200_OK'}), '(context, status=status.HTTP_200_OK)\n', (10993, 11029), False, 'from rest_framework.response import Response\n'), ((12676, 12720), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_200_OK'}), '(context, status=status.HTTP_200_OK)\n', (12684, 12720), False, 'from rest_framework.response import Response\n'), ((14433, 14477), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_200_OK'}), '(context, status=status.HTTP_200_OK)\n', (14441, 14477), False, 'from rest_framework.response import Response\n'), ((17386, 17423), 'os.path.isfile', 'os.path.isfile', (['"""ApprovedContent.csv"""'], {}), "('ApprovedContent.csv')\n", (17400, 17423), False, 'import os\n'), ((17779, 17823), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_200_OK'}), '(context, status=status.HTTP_200_OK)\n', (17787, 17823), False, 'from rest_framework.response import Response\n'), ((18782, 19074), 'pandas.DataFrame', 'pd.DataFrame', (['final_list'], {'columns': "['Board', 'Medium', 'Grade', 'Subject', 'Textbook Name',\n 'Level 1 Textbook Unit', 'Level 2 Textbook Unit',\n 'Level 3 Textbook Unit', 'Level 4 Textbook Unit', 'total',\n 'approved_contents', 'rejected_contents', 'pending_contents', 'hard_spots']"}), "(final_list, columns=['Board', 'Medium', 'Grade', 'Subject',\n 'Textbook Name', 'Level 1 Textbook Unit', 'Level 2 Textbook Unit',\n 'Level 3 Textbook Unit', 'Level 4 Textbook Unit', 'total',\n 'approved_contents', 'rejected_contents', 'pending_contents', 'hard_spots']\n )\n", (18794, 19074), True, 'import pandas as pd\n'), ((19597, 19641), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_200_OK'}), '(context, status=status.HTTP_200_OK)\n', (19605, 19641), False, 'from rest_framework.response import Response\n'), ((21297, 21339), 'os.path.isfile', 'os.path.isfile', (['"""content_contributers.csv"""'], {}), "('content_contributers.csv')\n", (21311, 21339), False, 'import os\n'), ((21772, 21816), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_200_OK'}), '(context, status=status.HTTP_200_OK)\n', (21780, 21816), False, 'from rest_framework.response import Response\n'), ((22522, 22566), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_200_OK'}), '(context, status=status.HTTP_200_OK)\n', (22530, 22566), False, 'from rest_framework.response import Response\n'), ((23066, 23132), 'azure.storage.blob.BlockBlobService', 'BlockBlobService', ([], {'account_name': 'accountName', 'account_key': 'accountKey'}), '(account_name=accountName, account_key=accountKey)\n', (23082, 23132), False, 'from azure.storage.blob import BlockBlobService, ContainerPermissions\n'), ((23372, 23416), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_200_OK'}), '(context, status=status.HTTP_200_OK)\n', (23380, 23416), False, 'from rest_framework.response import Response\n'), ((23918, 23962), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_200_OK'}), '(context, status=status.HTTP_200_OK)\n', (23926, 23962), False, 'from rest_framework.response import Response\n'), ((24671, 24715), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_200_OK'}), '(context, status=status.HTTP_200_OK)\n', (24679, 24715), False, 'from rest_framework.response import Response\n'), ((25434, 25478), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_200_OK'}), '(context, status=status.HTTP_200_OK)\n', (25442, 25478), False, 'from rest_framework.response import Response\n'), ((25825, 25880), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.index', 'args': '()', 'kwargs': '{}'}), '(target=self.index, args=(), kwargs={})\n', (25841, 25880), False, 'import threading\n'), ((26066, 26110), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_200_OK'}), '(context, status=status.HTTP_200_OK)\n', (26074, 26110), False, 'from rest_framework.response import Response\n'), ((2499, 2562), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), '(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n', (2507, 2562), False, 'from rest_framework.response import Response\n'), ((2891, 2935), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_200_OK'}), '(context, status=status.HTTP_200_OK)\n', (2899, 2935), False, 'from rest_framework.response import Response\n'), ((3238, 3301), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), '(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n', (3246, 3301), False, 'from rest_framework.response import Response\n'), ((3922, 3985), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), '(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n', (3930, 3985), False, 'from rest_framework.response import Response\n'), ((4637, 4681), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_200_OK'}), '(context, status=status.HTTP_200_OK)\n', (4645, 4681), False, 'from rest_framework.response import Response\n'), ((4971, 5034), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), '(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n', (4979, 5034), False, 'from rest_framework.response import Response\n'), ((5888, 5951), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), '(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n', (5896, 5951), False, 'from rest_framework.response import Response\n'), ((6752, 6815), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), '(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n', (6760, 6815), False, 'from rest_framework.response import Response\n'), ((8305, 8368), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), '(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n', (8313, 8368), False, 'from rest_framework.response import Response\n'), ((9951, 10014), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), '(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n', (9959, 10014), False, 'from rest_framework.response import Response\n'), ((11176, 11239), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), '(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n', (11184, 11239), False, 'from rest_framework.response import Response\n'), ((12869, 12932), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), '(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n', (12877, 12932), False, 'from rest_framework.response import Response\n'), ((13400, 13453), 'apps.dataupload.models.ChapterKeyword.objects.filter', 'ChapterKeyword.objects.filter', ([], {'chapter__id': 'chapter_id'}), '(chapter__id=chapter_id)\n', (13429, 13453), False, 'from apps.dataupload.models import Chapter, Section, SubSection, ChapterKeyword, SectionKeyword, SubSectionKeyword, SubSubSectionKeyword\n'), ((14617, 14680), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), '(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n', (14625, 14680), False, 'from rest_framework.response import Response\n'), ((15561, 15605), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_200_OK'}), '(context, status=status.HTTP_200_OK)\n', (15569, 15605), False, 'from rest_framework.response import Response\n'), ((16079, 16132), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(context, status=status.HTTP_400_BAD_REQUEST)\n', (16087, 16132), False, 'from rest_framework.response import Response\n'), ((16272, 16335), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), '(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n', (16280, 16335), False, 'from rest_framework.response import Response\n'), ((17514, 17546), 'os.remove', 'os.remove', (['"""ApprovedContent.csv"""'], {}), "('ApprovedContent.csv')\n", (17523, 17546), False, 'import os\n'), ((17964, 18027), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), '(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n', (17972, 18027), False, 'from rest_framework.response import Response\n'), ((18452, 18480), 'apps.configuration.models.Book.objects.get', 'Book.objects.get', ([], {'id': 'book_id'}), '(id=book_id)\n', (18468, 18480), False, 'from apps.configuration.models import Book\n'), ((19782, 19845), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), '(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n', (19790, 19845), False, 'from rest_framework.response import Response\n'), ((21430, 21467), 'os.remove', 'os.remove', (['"""content_contributers.csv"""'], {}), "('content_contributers.csv')\n", (21439, 21467), False, 'import os\n'), ((21958, 22021), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), '(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n', (21966, 22021), False, 'from rest_framework.response import Response\n'), ((22708, 22771), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), '(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n', (22716, 22771), False, 'from rest_framework.response import Response\n'), ((24116, 24179), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), '(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n', (24124, 24179), False, 'from rest_framework.response import Response\n'), ((24869, 24932), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), '(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n', (24877, 24932), False, 'from rest_framework.response import Response\n'), ((25632, 25695), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), '(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n', (25640, 25695), False, 'from rest_framework.response import Response\n'), ((26272, 26335), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), '(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n', (26280, 26335), False, 'from rest_framework.response import Response\n'), ((4262, 4313), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_404_NOT_FOUND'}), '(context, status=status.HTTP_404_NOT_FOUND)\n', (4270, 4313), False, 'from rest_framework.response import Response\n'), ((13600, 13653), 'apps.dataupload.models.SectionKeyword.objects.filter', 'SectionKeyword.objects.filter', ([], {'section__id': 'section_id'}), '(section__id=section_id)\n', (13629, 13653), False, 'from apps.dataupload.models import Chapter, Section, SubSection, ChapterKeyword, SectionKeyword, SubSectionKeyword, SubSubSectionKeyword\n'), ((15907, 15951), 'rest_framework.response.Response', 'Response', (['context'], {'status': 'status.HTTP_200_OK'}), '(context, status=status.HTTP_200_OK)\n', (15915, 15951), False, 'from rest_framework.response import Response\n'), ((16673, 16709), 'apps.dataupload.models.Chapter.objects.filter', 'Chapter.objects.filter', ([], {'book_id': 'book'}), '(book_id=book)\n', (16695, 16709), False, 'from apps.dataupload.models import Chapter, Section, SubSection, ChapterKeyword, SectionKeyword, SubSectionKeyword, SubSubSectionKeyword\n'), ((21135, 21264), 'pandas.DataFrame', 'pd.DataFrame', (['final_list'], {'columns': "['first_name', 'last_name', 'mobile', 'email', 'city_name', 'school_name',\n 'textbook_name']"}), "(final_list, columns=['first_name', 'last_name', 'mobile',\n 'email', 'city_name', 'school_name', 'textbook_name'])\n", (21147, 21264), True, 'import pandas as pd\n'), ((22274, 22291), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (22289, 22291), False, 'from datetime import datetime, timedelta\n'), ((22294, 22312), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (22303, 22312), False, 'from datetime import datetime, timedelta\n'), ((23253, 23270), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (23268, 23270), False, 'from datetime import datetime, timedelta\n'), ((23273, 23292), 'datetime.timedelta', 'timedelta', ([], {'hours': '(10)'}), '(hours=10)\n', (23282, 23292), False, 'from datetime import datetime, timedelta\n'), ((13804, 13868), 'apps.dataupload.models.SubSectionKeyword.objects.filter', 'SubSectionKeyword.objects.filter', ([], {'sub_section__id': 'sub_section_id'}), '(sub_section__id=sub_section_id)\n', (13836, 13868), False, 'from apps.dataupload.models import Chapter, Section, SubSection, ChapterKeyword, SectionKeyword, SubSectionKeyword, SubSubSectionKeyword\n'), ((18506, 18546), 'apps.dataupload.models.Chapter.objects.filter', 'Chapter.objects.filter', ([], {'book__id': 'book_id'}), '(book__id=book_id)\n', (18528, 18546), False, 'from apps.dataupload.models import Chapter, Section, SubSection, ChapterKeyword, SectionKeyword, SubSectionKeyword, SubSubSectionKeyword\n'), ((14026, 14101), 'apps.dataupload.models.SubSubSectionKeyword.objects.filter', 'SubSubSectionKeyword.objects.filter', ([], {'sub_sub_section__id': 'sub_sub_section_id'}), '(sub_sub_section__id=sub_sub_section_id)\n', (14061, 14101), False, 'from apps.dataupload.models import Chapter, Section, SubSection, ChapterKeyword, SectionKeyword, SubSectionKeyword, SubSubSectionKeyword\n'), ((17328, 17360), 'itertools.repeat', 'itertools.repeat', (['repeat_list', '(5)'], {}), '(repeat_list, 5)\n', (17344, 17360), False, 'import itertools\n'), ((20577, 20637), 'django.db.models.Q', 'Q', ([], {'chapter__book__subject__grade__medium__state__id': 'state_id'}), '(chapter__book__subject__grade__medium__state__id=state_id)\n', (20578, 20637), False, 'from django.db.models import Q\n'), ((20504, 20573), 'django.db.models.Q', 'Q', ([], {'section__chapter__book__subject__grade__medium__state__id': 'state_id'}), '(section__chapter__book__subject__grade__medium__state__id=state_id)\n', (20505, 20573), False, 'from django.db.models import Q\n'), ((20316, 20419), 'django.db.models.Q', 'Q', ([], {'sub_sub_section__subsection__section__chapter__book__subject__grade__medium__state__id': 'state_id'}), '(sub_sub_section__subsection__section__chapter__book__subject__grade__medium__state__id\n =state_id)\n', (20317, 20419), False, 'from django.db.models import Q\n'), ((20417, 20504), 'django.db.models.Q', 'Q', ([], {'sub_section__section__chapter__book__subject__grade__medium__state__id': 'state_id'}), '(sub_section__section__chapter__book__subject__grade__medium__state__id=\n state_id)\n', (20418, 20504), False, 'from django.db.models import Q\n')] |
GuoJingyao/cornac | examples/given_data.py | e7529990ec1dfa586c4af3de98e4b3e00a786578 | # -*- coding: utf-8 -*-
"""
Example to train and evaluate a model with given data
@author: Quoc-Tuan Truong <[email protected]>
"""
from cornac.data import Reader
from cornac.eval_methods import BaseMethod
from cornac.models import MF
from cornac.metrics import MAE, RMSE
from cornac.utils import cache
# Download MovieLens 100K provided training and test splits
reader = Reader()
train_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.base'))
test_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.test'))
eval_method = BaseMethod.from_splits(train_data=train_data, test_data=test_data,
exclude_unknowns=False, verbose=True)
mf = MF(k=10, max_iter=25, learning_rate=0.01, lambda_reg=0.02,
use_bias=True, early_stop=True, verbose=True)
# Evaluation
result = eval_method.evaluate(model=mf, metrics=[MAE(), RMSE()], user_based=True)
print(result)
| [((379, 387), 'cornac.data.Reader', 'Reader', ([], {}), '()\n', (385, 387), False, 'from cornac.data import Reader\n'), ((604, 712), 'cornac.eval_methods.BaseMethod.from_splits', 'BaseMethod.from_splits', ([], {'train_data': 'train_data', 'test_data': 'test_data', 'exclude_unknowns': '(False)', 'verbose': '(True)'}), '(train_data=train_data, test_data=test_data,\n exclude_unknowns=False, verbose=True)\n', (626, 712), False, 'from cornac.eval_methods import BaseMethod\n'), ((752, 860), 'cornac.models.MF', 'MF', ([], {'k': '(10)', 'max_iter': '(25)', 'learning_rate': '(0.01)', 'lambda_reg': '(0.02)', 'use_bias': '(True)', 'early_stop': '(True)', 'verbose': '(True)'}), '(k=10, max_iter=25, learning_rate=0.01, lambda_reg=0.02, use_bias=True,\n early_stop=True, verbose=True)\n', (754, 860), False, 'from cornac.models import MF\n'), ((413, 487), 'cornac.utils.cache', 'cache', ([], {'url': '"""http://files.grouplens.org/datasets/movielens/ml-100k/u1.base"""'}), "(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.base')\n", (418, 487), False, 'from cornac.utils import cache\n'), ((513, 587), 'cornac.utils.cache', 'cache', ([], {'url': '"""http://files.grouplens.org/datasets/movielens/ml-100k/u1.test"""'}), "(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.test')\n", (518, 587), False, 'from cornac.utils import cache\n'), ((928, 933), 'cornac.metrics.MAE', 'MAE', ([], {}), '()\n', (931, 933), False, 'from cornac.metrics import MAE, RMSE\n'), ((935, 941), 'cornac.metrics.RMSE', 'RMSE', ([], {}), '()\n', (939, 941), False, 'from cornac.metrics import MAE, RMSE\n')] |
ZlodeiBaal/taming | taming/data/ade20k.py | b6c0f896992881f154bdfd910a8163ee754df83a | import os
import numpy as np
import cv2
import albumentations
from PIL import Image
from torch.utils.data import Dataset
from taming.data.sflckr import SegmentationBase # for examples included in repo
class Examples(SegmentationBase):
def __init__(self, size=256, random_crop=False, interpolation="bicubic"):
super().__init__(data_csv="data/ade20k_examples.txt",
data_root="data/ade20k_images",
segmentation_root="data/ade20k_segmentations",
size=size, random_crop=random_crop,
interpolation=interpolation,
n_labels=151, shift_segmentation=False)
# With semantic map and scene label
class ADE20kBase(Dataset):
def __init__(self, config=None, size=None, random_crop=False, interpolation="bicubic", crop_size=None):
self.split = self.get_split()
self.n_labels = 151 # unknown + 150
self.data_csv = {"train": "data/ade20k_train.txt",
"validation": "data/ade20k_test.txt"}[self.split]
self.data_root = "./data/ade20k_root"
with open(os.path.join(self.data_root, "sceneCategories.txt"), "r") as f:
self.scene_categories = f.read().splitlines()
self.scene_categories = dict(line.split() for line in self.scene_categories)
with open(self.data_csv, "r") as f:
self.image_paths = f.read().splitlines()
self._length = len(self.image_paths)
ss = self.split
if ss=='train':
ss='training'
self.labels = {
"relative_file_path_": [l for l in self.image_paths],
"file_path_": [os.path.join(self.data_root, "images",ss, l)
for l in self.image_paths],
"relative_segmentation_path_": [l.replace(".jpg", ".png")
for l in self.image_paths],
"segmentation_path_": [os.path.join(self.data_root, "annotations",ss,
l.replace(".jpg", ".png"))
for l in self.image_paths],
"scene_category": [self.scene_categories[l.replace(".jpg", "")]
for l in self.image_paths],
}
size = None if size is not None and size<=0 else size
self.size = size
if crop_size is None:
self.crop_size = size if size is not None else None
else:
self.crop_size = crop_size
if self.size is not None:
self.interpolation = interpolation
self.interpolation = {
"nearest": cv2.INTER_NEAREST,
"bilinear": cv2.INTER_LINEAR,
"bicubic": cv2.INTER_CUBIC,
"area": cv2.INTER_AREA,
"lanczos": cv2.INTER_LANCZOS4}[self.interpolation]
self.image_rescaler = albumentations.SmallestMaxSize(max_size=self.size,
interpolation=self.interpolation)
self.segmentation_rescaler = albumentations.SmallestMaxSize(max_size=self.size,
interpolation=cv2.INTER_NEAREST)
if crop_size is not None:
self.center_crop = not random_crop
if self.center_crop:
self.cropper = albumentations.CenterCrop(height=self.crop_size, width=self.crop_size)
else:
self.cropper = albumentations.RandomCrop(height=self.crop_size, width=self.crop_size)
self.preprocessor = self.cropper
def __len__(self):
return self._length
def __getitem__(self, i):
example = dict((k, self.labels[k][i]) for k in self.labels)
image = Image.open(example["file_path_"])
if not image.mode == "RGB":
image = image.convert("RGB")
image = np.array(image).astype(np.uint8)
if self.size is not None:
image = self.image_rescaler(image=image)["image"]
segmentation = Image.open(example["segmentation_path_"])
segmentation = np.array(segmentation).astype(np.uint8)
if self.size is not None:
segmentation = self.segmentation_rescaler(image=segmentation)["image"]
if self.size is not None:
processed = self.preprocessor(image=image, mask=segmentation)
else:
processed = {"image": image, "mask": segmentation}
example["image"] = (processed["image"]/127.5 - 1.0).astype(np.float32)
segmentation = processed["mask"]
onehot = np.eye(self.n_labels)[segmentation]
example["segmentation"] = onehot
return example
class ADE20kTrain(ADE20kBase):
# default to random_crop=True
def __init__(self, config=None, size=None, random_crop=True, interpolation="bicubic", crop_size=None):
super().__init__(config=config, size=size, random_crop=random_crop,
interpolation=interpolation, crop_size=crop_size)
def get_split(self):
return "train"
class ADE20kValidation(ADE20kBase):
def get_split(self):
return "validation"
if __name__ == "__main__":
dset = ADE20kValidation()
ex = dset[0]
for k in ["image", "scene_category", "segmentation"]:
print(type(ex[k]))
try:
print(ex[k].shape)
except:
print(ex[k])
| [((3811, 3844), 'PIL.Image.open', 'Image.open', (["example['file_path_']"], {}), "(example['file_path_'])\n", (3821, 3844), False, 'from PIL import Image\n'), ((4090, 4131), 'PIL.Image.open', 'Image.open', (["example['segmentation_path_']"], {}), "(example['segmentation_path_'])\n", (4100, 4131), False, 'from PIL import Image\n'), ((2915, 3004), 'albumentations.SmallestMaxSize', 'albumentations.SmallestMaxSize', ([], {'max_size': 'self.size', 'interpolation': 'self.interpolation'}), '(max_size=self.size, interpolation=self.\n interpolation)\n', (2945, 3004), False, 'import albumentations\n'), ((3106, 3194), 'albumentations.SmallestMaxSize', 'albumentations.SmallestMaxSize', ([], {'max_size': 'self.size', 'interpolation': 'cv2.INTER_NEAREST'}), '(max_size=self.size, interpolation=cv2.\n INTER_NEAREST)\n', (3136, 3194), False, 'import albumentations\n'), ((4634, 4655), 'numpy.eye', 'np.eye', (['self.n_labels'], {}), '(self.n_labels)\n', (4640, 4655), True, 'import numpy as np\n'), ((1140, 1191), 'os.path.join', 'os.path.join', (['self.data_root', '"""sceneCategories.txt"""'], {}), "(self.data_root, 'sceneCategories.txt')\n", (1152, 1191), False, 'import os\n'), ((1680, 1725), 'os.path.join', 'os.path.join', (['self.data_root', '"""images"""', 'ss', 'l'], {}), "(self.data_root, 'images', ss, l)\n", (1692, 1725), False, 'import os\n'), ((3408, 3478), 'albumentations.CenterCrop', 'albumentations.CenterCrop', ([], {'height': 'self.crop_size', 'width': 'self.crop_size'}), '(height=self.crop_size, width=self.crop_size)\n', (3433, 3478), False, 'import albumentations\n'), ((3528, 3598), 'albumentations.RandomCrop', 'albumentations.RandomCrop', ([], {'height': 'self.crop_size', 'width': 'self.crop_size'}), '(height=self.crop_size, width=self.crop_size)\n', (3553, 3598), False, 'import albumentations\n'), ((3938, 3953), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (3946, 3953), True, 'import numpy as np\n'), ((4155, 4177), 'numpy.array', 'np.array', (['segmentation'], {}), '(segmentation)\n', (4163, 4177), True, 'import numpy as np\n')] |
olehermanse/masterfiles | templates/federated_reporting/distributed_cleanup.py | bcee0a8c0a925e885ba47ba3300b96c722b91f02 | #!/usr/bin/env python3
"""
fr_distributed_cleanup.py - a script to remove hosts which have migrated to
other feeder hubs. To be run on Federated Reporting superhub
after each import of feeder data.
First, to setup, enable fr_distributed_cleanup by setting a class in augments (def.json).
This enables policy in cfe_internal/enterprise/federation/federation.cf
```json
{
"classes": {
"cfengine_mp_enable_fr_distributed_cleanup": [ "any::" ]
}
}
```
After the policy has run on superhub and feeders, run this script
to setup fr_distributed_cleanup role and account on all feeders and superhubs with
proper RBAC settings for normal operation.
You will be prompted for superhub admin credentials and then
admin credentials on each feeder.
"""
import argparse
import logging
import os
import platform
import string
import random
import subprocess
import sys
from getpass import getpass
from nova_api import NovaApi
from cfsecret import read_secret, write_secret
WORKDIR = None
CFE_FR_TABLES = None
# get WORKDIR and CFE_FR_TABLES from config.sh
config_sh_path = os.path.join(os.path.dirname(__file__), "config.sh")
cmd = "source {}; echo $WORKDIR; echo $CFE_FR_TABLES".format(config_sh_path)
with subprocess.Popen(
cmd, stdout=subprocess.PIPE, shell=True, executable="/bin/bash"
) as proc:
lines = proc.stdout.readlines()
WORKDIR = lines[0].decode().strip()
CFE_FR_TABLES = [table.strip() for table in lines[1].decode().split()]
if not WORKDIR or not CFE_FR_TABLES:
print("Unable to get WORKDIR and CFE_FR_TABLES values from config.sh")
sys.exit(1)
# Primary dir in which to place various needed files
DISTRIBUTED_CLEANUP_DIR = "/opt/cfengine/federation/cftransport/distributed_cleanup"
# collect cert files from /var/cfengine/httpd/ssl/certs on
# superhub and feeders and cat all together into hubs.cert
CERT_PATH = os.path.join(DISTRIBUTED_CLEANUP_DIR, "hubs.cert")
# Note: remove the file at DISTRIBUTED_CLEANUP_SECRET_PATH to reset everything.
# api calls will overwrite fr_distributed_cleanup user and role on superhub and all feeders.
DISTRIBUTED_CLEANUP_SECRET_PATH = os.path.join(WORKDIR, "state/fr_distributed_cleanup.cfsecret")
def interactive_setup():
fr_distributed_cleanup_password = "".join(random.choices(string.printable, k=20))
admin_pass = getpass(
prompt="Enter admin password for superhub {}: ".format(platform.node())
)
api = NovaApi(api_user="admin", api_password=admin_pass)
# first confirm that this host is a superhub
status = api.fr_hub_status()
if (
status["status"] == 200
and status["role"] == "superhub"
and status["configured"]
):
logger.debug("This host is a superhub configured for Federated Reporting.")
else:
if status["status"] == 401:
print("admin credentials are incorrect, try again")
sys.exit(1)
else:
print(
"Check the status to ensure role is superhub and configured is True. {}".format(
status
)
)
sys.exit(1)
feederResponse = api.fr_remote_hubs()
if not feederResponse["hubs"]:
print(
"No attached feeders. Please attach at least one feeder hub before running this script."
)
sys.exit(1)
email = input("Enter email for fr_distributed_cleanup accounts: ")
logger.info("Creating fr_distributed_cleanup role on superhub...")
response = api.put(
"role",
"fr_distributed_cleanup",
{
"description": "fr_distributed_cleanup Federated Host Cleanup role",
"includeContext": "cfengine",
},
)
if response["status"] != 201:
print(
"Problem creating fr_distributed_cleanup role on superhub. {}".format(
response
)
)
sys.exit(1)
response = api.put_role_permissions(
"fr_distributed_cleanup", ["query.post", "remoteHub.list", "hubStatus.get"]
)
if response["status"] != 201:
print("Unable to set RBAC permissions on role fr_distributed_cleanup")
sys.exit(1)
logger.info("Creating fr_distributed_cleanup user on superhub")
response = api.put(
"user",
"fr_distributed_cleanup",
{
"description": "fr_distributed_cleanup Federated Host Cleanup user",
"email": "{}".format(email),
"password": "{}".format(fr_distributed_cleanup_password),
"roles": ["fr_distributed_cleanup"],
},
)
if response["status"] != 201:
print(
"Problem creating fr_distributed_cleanup user on superhub. {}".format(
response
)
)
sys.exit(1)
for hub in feederResponse["hubs"]:
feeder_credentials = getpass(
prompt="Enter admin credentials for {} at {}: ".format(
hub["ui_name"], hub["api_url"]
)
)
feeder_hostname = hub["ui_name"]
feeder_api = NovaApi(
api_user="admin",
api_password=feeder_credentials,
cert_path=CERT_PATH,
hostname=feeder_hostname,
)
logger.info("Creating fr_distributed_cleanup role on %s", feeder_hostname)
response = feeder_api.put(
"role",
"fr_distributed_cleanup",
{
"description": "fr_distributed_cleanup Federated Host Cleanup role",
"includeContext": "cfengine",
},
)
if response["status"] != 201:
print(
"Problem creating fr_distributed_cleanup role on superhub. {}".format(
response
)
)
sys.exit(1)
response = feeder_api.put_role_permissions(
"fr_distributed_cleanup", ["host.delete"]
)
if response["status"] != 201:
print("Unable to set RBAC permissions on role fr_distributed_cleanup")
sys.exit(1)
logger.info("Creating fr_distributed_cleanup user on %s", feeder_hostname)
response = feeder_api.put(
"user",
"fr_distributed_cleanup",
{
"description": "fr_distributed_cleanup Federated Host Cleanup user",
"email": "{}".format(email),
"password": "{}".format(fr_distributed_cleanup_password),
"roles": ["fr_distributed_cleanup"],
},
)
if response["status"] != 201:
print(
"Problem creating fr_distributed_cleanup user on {}. {}".format(
feeder_hostname, response
)
)
sys.exit(1)
write_secret(DISTRIBUTED_CLEANUP_SECRET_PATH, fr_distributed_cleanup_password)
def main():
if not os.geteuid() == 0:
sys.exit("\n{} must be run as root".format(os.path.basename(__file__)))
parser = argparse.ArgumentParser(
description="Clean up migrating clients in Federated Reporting setup"
)
group = parser.add_mutually_exclusive_group()
group.add_argument("--debug", action="store_true")
group.add_argument("--inform", action="store_true")
args = parser.parse_args()
global logger
logger = logging.getLogger("fr_distributed_cleanup")
ch = logging.StreamHandler()
if args.debug:
logger.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
if args.inform:
logger.setLevel(logging.INFO)
ch.setLevel(logging.INFO)
logger.addHandler(ch)
if not os.path.exists(DISTRIBUTED_CLEANUP_SECRET_PATH):
if sys.stdout.isatty():
interactive_setup()
else:
print(
"{} requires manual setup, please run as root interactively.".format(
os.path.basename(__file__)
)
)
sys.exit(1)
fr_distributed_cleanup_password = read_secret(DISTRIBUTED_CLEANUP_SECRET_PATH)
api = NovaApi(
api_user="fr_distributed_cleanup", api_password=fr_distributed_cleanup_password
) # defaults to localhost
response = api.fr_hub_status()
if not (
response["status"] == 200
and response["role"] == "superhub"
and response["configured"]
):
print(
"{} can only be run on a Federated Reporting hub configured to be superhub".format(
os.path.basename(__file__)
)
)
sys.exit(1)
response = api.fr_remote_hubs()
if not response["hubs"]:
print(
"No attached feeders. Please attach at least one feeder hub before running this script."
)
for hub in response["hubs"]:
if hub["role"] != "feeder" or hub["target_state"] != "on":
continue
feeder_hostkey = hub["hostkey"]
feeder_hostname = hub["ui_name"]
feeder_api = NovaApi(
api_user="fr_distributed_cleanup",
api_password=fr_distributed_cleanup_password,
cert_path=CERT_PATH,
hostname=feeder_hostname,
)
response = feeder_api.status()
if response["status"] != 200:
print(
"Unable to get status for feeder {}. Skipping".format(feeder_hostname)
)
continue
sql = "SELECT hub_id FROM __hubs WHERE hostkey = '{}'".format(feeder_hostkey)
response = api.query(sql)
if response["status"] != 200:
print("Unable to query for feeder hub_id. Response was {}".format(response))
continue
# query API should return one row, [0], and one column, [0], in rows value
feeder_hubid = response["rows"][0][0]
sql = """
SELECT DISTINCT hosts.hostkey
FROM hosts
WHERE hub_id = '{0}'
AND EXISTS(
SELECT 1 FROM lastseenhosts ls
JOIN (
SELECT hostkey, max(lastseentimestamp) as newesttimestamp
FROM lastseenhosts
WHERE lastseendirection = 'INCOMING'
GROUP BY hostkey
) as newest
ON ls.hostkey = newest.hostkey
AND ls.lastseentimestamp = newest.newesttimestamp
AND ls.hostkey = hosts.hostkey
AND ls.hub_id != '{0}'
)""".format(
feeder_hubid
)
response = api.query(sql)
if response["status"] != 200:
print(
"Unable to query for deletion candidates. Response was {}".format(
response
)
)
sys.exit(1)
logger.debug("Hosts to delete on %s are %s", hub["ui_name"], response["rows"])
hosts_to_delete = response["rows"]
if len(hosts_to_delete) == 0:
logger.info("%s: No hosts to delete. No actions taken.", feeder_hostname)
continue
logger.debug(
"%s host(s) to delete on feeder %s", len(hosts_to_delete), hub["ui_name"]
)
# build up a post-loop SQL statement to delete hosts locally from feeder schemas
# change to feeder schema to make deletions easier/more direct without having to
# specify hub_id in queries
post_sql = "set schema 'hub_{}';\n".format(feeder_hubid)
post_sql += "\\set ON_ERROR STOP on\n"
delete_sql = ""
post_hostkeys = []
for row in hosts_to_delete:
# The query API returns rows which are lists of column values.
# We only selected hostkey so will take the first value.
host_to_delete = row[0]
response = feeder_api.delete("host", host_to_delete)
# both 202 Accepted and 404 Not Found are acceptable responses
if response["status"] not in [202, 404]:
logger.warning(
"Delete %s on feeder %s got %s status code",
host_to_delete,
feeder_hostname,
response["status"],
)
continue
# only add the host_to_delete if it was successfully deleted on the feeder
post_hostkeys.append(host_to_delete)
if len(post_hostkeys) == 0:
logger.info(
"No hosts on feeder %s need processing on superhub so skipping post processing",
feeder_hostname,
)
continue
# simulate the host api delete process by setting current_timestamp in deleted column
# and delete from all federated tables similar to the clear_hosts_references() pgplsql function.
post_sql += "INSERT INTO __hosts (hostkey,deleted) VALUES"
for hostkey in post_hostkeys:
delete_sql += "('{}', CURRENT_TIMESTAMP) ".format(hostkey)
delete_sql += (
"ON CONFLICT (hostkey,hub_id) DO UPDATE SET deleted = excluded.deleted;\n"
)
clear_sql = "set schema 'public';\n"
for table in CFE_FR_TABLES:
# special case of partitioning, operating on parent table will work
if "__promiselog_*" in table:
table = "__promiselog"
clear_sql += (
"DELETE FROM {} WHERE hub_id = {} AND hostkey IN ({});\n".format(
table,
feeder_hubid,
",".join(["'{}'".format(hk) for hk in post_hostkeys]),
)
)
post_sql += delete_sql + clear_sql
logger.debug("Running SQL:\n%s", post_sql)
with subprocess.Popen(
["/var/cfengine/bin/psql", "cfdb"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
) as proc:
logger.debug("got a proc, sending sql...")
outs, errs = proc.communicate(input=post_sql.encode())
if "ERROR" in errs.decode("utf-8"):
print(
"Problem running post processing SQL. returncode was {}, stderr:\n{}\nstdout:\n{}".format(
proc.returncode, errs.decode("utf-8"), outs.decode("utf-8")
)
)
sys.exit(1)
logger.debug(
"Ran post processing SQL. returncode was %s, stderr:\n%s\nstdout:\n%s",
proc.returncode,
errs.decode("utf-8"),
outs.decode("utf-8"),
)
if len(hosts_to_delete) != 0:
logger.info(
"%s: %s host deletions processed",
hub["ui_name"],
len(hosts_to_delete),
)
if __name__ == "__main__":
main()
else:
raise ImportError("fr_distributed_cleanup.py must only be used as a script!")
| [((1852, 1902), 'os.path.join', 'os.path.join', (['DISTRIBUTED_CLEANUP_DIR', '"""hubs.cert"""'], {}), "(DISTRIBUTED_CLEANUP_DIR, 'hubs.cert')\n", (1864, 1902), False, 'import os\n'), ((2111, 2173), 'os.path.join', 'os.path.join', (['WORKDIR', '"""state/fr_distributed_cleanup.cfsecret"""'], {}), "(WORKDIR, 'state/fr_distributed_cleanup.cfsecret')\n", (2123, 2173), False, 'import os\n'), ((1083, 1108), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1098, 1108), False, 'import os\n'), ((1205, 1291), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'shell': '(True)', 'executable': '"""/bin/bash"""'}), "(cmd, stdout=subprocess.PIPE, shell=True, executable=\n '/bin/bash')\n", (1221, 1291), False, 'import subprocess\n'), ((1570, 1581), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1578, 1581), False, 'import sys\n'), ((2409, 2459), 'nova_api.NovaApi', 'NovaApi', ([], {'api_user': '"""admin"""', 'api_password': 'admin_pass'}), "(api_user='admin', api_password=admin_pass)\n", (2416, 2459), False, 'from nova_api import NovaApi\n'), ((6982, 7081), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Clean up migrating clients in Federated Reporting setup"""'}), "(description=\n 'Clean up migrating clients in Federated Reporting setup')\n", (7005, 7081), False, 'import argparse\n'), ((7315, 7358), 'logging.getLogger', 'logging.getLogger', (['"""fr_distributed_cleanup"""'], {}), "('fr_distributed_cleanup')\n", (7332, 7358), False, 'import logging\n'), ((7368, 7391), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (7389, 7391), False, 'import logging\n'), ((7989, 8033), 'cfsecret.read_secret', 'read_secret', (['DISTRIBUTED_CLEANUP_SECRET_PATH'], {}), '(DISTRIBUTED_CLEANUP_SECRET_PATH)\n', (8000, 8033), False, 'from cfsecret import read_secret, write_secret\n'), ((8044, 8137), 'nova_api.NovaApi', 'NovaApi', ([], {'api_user': '"""fr_distributed_cleanup"""', 'api_password': 'fr_distributed_cleanup_password'}), "(api_user='fr_distributed_cleanup', api_password=\n fr_distributed_cleanup_password)\n", (8051, 8137), False, 'from nova_api import NovaApi\n'), ((2247, 2285), 'random.choices', 'random.choices', (['string.printable'], {'k': '(20)'}), '(string.printable, k=20)\n', (2261, 2285), False, 'import random\n'), ((3308, 3319), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3316, 3319), False, 'import sys\n'), ((3877, 3888), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3885, 3888), False, 'import sys\n'), ((4141, 4152), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4149, 4152), False, 'import sys\n'), ((4752, 4763), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4760, 4763), False, 'import sys\n'), ((5043, 5153), 'nova_api.NovaApi', 'NovaApi', ([], {'api_user': '"""admin"""', 'api_password': 'feeder_credentials', 'cert_path': 'CERT_PATH', 'hostname': 'feeder_hostname'}), "(api_user='admin', api_password=feeder_credentials, cert_path=\n CERT_PATH, hostname=feeder_hostname)\n", (5050, 5153), False, 'from nova_api import NovaApi\n'), ((6765, 6843), 'cfsecret.write_secret', 'write_secret', (['DISTRIBUTED_CLEANUP_SECRET_PATH', 'fr_distributed_cleanup_password'], {}), '(DISTRIBUTED_CLEANUP_SECRET_PATH, fr_distributed_cleanup_password)\n', (6777, 6843), False, 'from cfsecret import read_secret, write_secret\n'), ((7615, 7662), 'os.path.exists', 'os.path.exists', (['DISTRIBUTED_CLEANUP_SECRET_PATH'], {}), '(DISTRIBUTED_CLEANUP_SECRET_PATH)\n', (7629, 7662), False, 'import os\n'), ((7675, 7694), 'sys.stdout.isatty', 'sys.stdout.isatty', ([], {}), '()\n', (7692, 7694), False, 'import sys\n'), ((8525, 8536), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8533, 8536), False, 'import sys\n'), ((8954, 9099), 'nova_api.NovaApi', 'NovaApi', ([], {'api_user': '"""fr_distributed_cleanup"""', 'api_password': 'fr_distributed_cleanup_password', 'cert_path': 'CERT_PATH', 'hostname': 'feeder_hostname'}), "(api_user='fr_distributed_cleanup', api_password=\n fr_distributed_cleanup_password, cert_path=CERT_PATH, hostname=\n feeder_hostname)\n", (8961, 9099), False, 'from nova_api import NovaApi\n'), ((2871, 2882), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2879, 2882), False, 'import sys\n'), ((3084, 3095), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3092, 3095), False, 'import sys\n'), ((5772, 5783), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5780, 5783), False, 'import sys\n'), ((6033, 6044), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6041, 6044), False, 'import sys\n'), ((6745, 6756), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6753, 6756), False, 'import sys\n'), ((6869, 6881), 'os.geteuid', 'os.geteuid', ([], {}), '()\n', (6879, 6881), False, 'import os\n'), ((7938, 7949), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7946, 7949), False, 'import sys\n'), ((10501, 10512), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (10509, 10512), False, 'import sys\n'), ((13437, 13564), 'subprocess.Popen', 'subprocess.Popen', (["['/var/cfengine/bin/psql', 'cfdb']"], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(['/var/cfengine/bin/psql', 'cfdb'], stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n", (13453, 13564), False, 'import subprocess\n'), ((2376, 2391), 'platform.node', 'platform.node', ([], {}), '()\n', (2389, 2391), False, 'import platform\n'), ((6939, 6965), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (6955, 6965), False, 'import os\n'), ((8466, 8492), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (8482, 8492), False, 'import os\n'), ((14073, 14084), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (14081, 14084), False, 'import sys\n'), ((7867, 7893), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (7883, 7893), False, 'import os\n')] |
kennethsequeira/Hello-world | Python/Fibonacci.py | 464227bc7d9778a4a2a4044fe415a629003ea77f | #Doesn't work.
import time
fibonacci = [1, 1]
n = int(input())
while len(fibonacci) < n:
fibonacci.append(fibonacci[-1] + fibonacci[-2])
for i in range(n):
print(fibonacci[i], end=' ')
| [] |
kreyoo/csgo-inv-shuffle | setup.py | 6392dd1eef1ca87ec25c9cf4845af3f8df3594a5 | from setuptools import setup
setup(name="csgoinvshuffle")
| [((30, 58), 'setuptools.setup', 'setup', ([], {'name': '"""csgoinvshuffle"""'}), "(name='csgoinvshuffle')\n", (35, 58), False, 'from setuptools import setup\n')] |
EnjoyLifeFund/py36pkgs | py/_log/log.py | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | """
basic logging functionality based on a producer/consumer scheme.
XXX implement this API: (maybe put it into slogger.py?)
log = Logger(
info=py.log.STDOUT,
debug=py.log.STDOUT,
command=None)
log.info("hello", "world")
log.command("hello", "world")
log = Logger(info=Logger(something=...),
debug=py.log.STDOUT,
command=None)
"""
import py, sys
class Message(object):
def __init__(self, keywords, args):
self.keywords = keywords
self.args = args
def content(self):
return " ".join(map(str, self.args))
def prefix(self):
return "[%s] " % (":".join(self.keywords))
def __str__(self):
return self.prefix() + self.content()
class Producer(object):
""" (deprecated) Log producer API which sends messages to be logged
to a 'consumer' object, which then prints them to stdout,
stderr, files, etc. Used extensively by PyPy-1.1.
"""
Message = Message # to allow later customization
keywords2consumer = {}
def __init__(self, keywords, keywordmapper=None, **kw):
if hasattr(keywords, 'split'):
keywords = tuple(keywords.split())
self._keywords = keywords
if keywordmapper is None:
keywordmapper = default_keywordmapper
self._keywordmapper = keywordmapper
def __repr__(self):
return "<py.log.Producer %s>" % ":".join(self._keywords)
def __getattr__(self, name):
if '_' in name:
raise AttributeError(name)
producer = self.__class__(self._keywords + (name,))
setattr(self, name, producer)
return producer
def __call__(self, *args):
""" write a message to the appropriate consumer(s) """
func = self._keywordmapper.getconsumer(self._keywords)
if func is not None:
func(self.Message(self._keywords, args))
class KeywordMapper:
def __init__(self):
self.keywords2consumer = {}
def getstate(self):
return self.keywords2consumer.copy()
def setstate(self, state):
self.keywords2consumer.clear()
self.keywords2consumer.update(state)
def getconsumer(self, keywords):
""" return a consumer matching the given keywords.
tries to find the most suitable consumer by walking, starting from
the back, the list of keywords, the first consumer matching a
keyword is returned (falling back to py.log.default)
"""
for i in range(len(keywords), 0, -1):
try:
return self.keywords2consumer[keywords[:i]]
except KeyError:
continue
return self.keywords2consumer.get('default', default_consumer)
def setconsumer(self, keywords, consumer):
""" set a consumer for a set of keywords. """
# normalize to tuples
if isinstance(keywords, str):
keywords = tuple(filter(None, keywords.split()))
elif hasattr(keywords, '_keywords'):
keywords = keywords._keywords
elif not isinstance(keywords, tuple):
raise TypeError("key %r is not a string or tuple" % (keywords,))
if consumer is not None and not py.builtin.callable(consumer):
if not hasattr(consumer, 'write'):
raise TypeError(
"%r should be None, callable or file-like" % (consumer,))
consumer = File(consumer)
self.keywords2consumer[keywords] = consumer
def default_consumer(msg):
""" the default consumer, prints the message to stdout (using 'print') """
sys.stderr.write(str(msg)+"\n")
default_keywordmapper = KeywordMapper()
def setconsumer(keywords, consumer):
default_keywordmapper.setconsumer(keywords, consumer)
def setstate(state):
default_keywordmapper.setstate(state)
def getstate():
return default_keywordmapper.getstate()
#
# Consumers
#
class File(object):
""" log consumer wrapping a file(-like) object """
def __init__(self, f):
assert hasattr(f, 'write')
#assert isinstance(f, file) or not hasattr(f, 'open')
self._file = f
def __call__(self, msg):
""" write a message to the log """
self._file.write(str(msg) + "\n")
if hasattr(self._file, 'flush'):
self._file.flush()
class Path(object):
""" log consumer that opens and writes to a Path """
def __init__(self, filename, append=False,
delayed_create=False, buffering=False):
self._append = append
self._filename = str(filename)
self._buffering = buffering
if not delayed_create:
self._openfile()
def _openfile(self):
mode = self._append and 'a' or 'w'
f = open(self._filename, mode)
self._file = f
def __call__(self, msg):
""" write a message to the log """
if not hasattr(self, "_file"):
self._openfile()
self._file.write(str(msg) + "\n")
if not self._buffering:
self._file.flush()
def STDOUT(msg):
""" consumer that writes to sys.stdout """
sys.stdout.write(str(msg)+"\n")
def STDERR(msg):
""" consumer that writes to sys.stderr """
sys.stderr.write(str(msg)+"\n")
class Syslog:
""" consumer that writes to the syslog daemon """
def __init__(self, priority = None):
if priority is None:
priority = self.LOG_INFO
self.priority = priority
def __call__(self, msg):
""" write a message to the log """
py.std.syslog.syslog(self.priority, str(msg))
for _prio in "EMERG ALERT CRIT ERR WARNING NOTICE INFO DEBUG".split():
_prio = "LOG_" + _prio
try:
setattr(Syslog, _prio, getattr(py.std.syslog, _prio))
except AttributeError:
pass
| [((3411, 3440), 'py.builtin.callable', 'py.builtin.callable', (['consumer'], {}), '(consumer)\n', (3430, 3440), False, 'import py, sys\n')] |
Sergggio/python_training | test/test_all_contacts.py | 6dfdbed9a503cf9a6810b31c57bdde76b15e4ec4 | import re
from model.contact import Contact
def test_all_contacts(app, db):
contacts_from_db = db.get_contact_list()
phone_list_from_db = db.phones_from_db()
#email_liset_from_db = db.emails_from_db()
phone_list = []
for phone in phone_list_from_db:
phone_list.append(merge_phones_like_on_home_page(phone))
email_list = []
#for email in email_liset_from_db:
# email_list.append(merge_mail_like_on_home_page(email))
contacts_from_home_page = sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
phones_from_home_page = [con.all_phones_from_home_page for con in contacts_from_home_page]
#emails_from_home_page = [con.all_mail_from_home_page for con in contacts_from_home_page]
assert phone_list == phones_from_home_page
#assert email_list == emails_from_home_page
assert contacts_from_db == contacts_from_home_page
def clear(s):
return re.sub("[() -]", "", s)
def remove_spaces(s):
return re.sub(' +', ' ', s).rstrip()
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.home_phone, contact.mobile_phone,
contact.work_phone, contact.secondary_phone]))))
def merge_email_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: remove_spaces(x),
filter(lambda x: x is not None,
[contact.email, contact.email2, contact.email3]))))
| [((918, 941), 're.sub', 're.sub', (['"""[() -]"""', '""""""', 's'], {}), "('[() -]', '', s)\n", (924, 941), False, 'import re\n'), ((977, 997), 're.sub', 're.sub', (['""" +"""', '""" """', 's'], {}), "(' +', ' ', s)\n", (983, 997), False, 'import re\n')] |
jproudlo/PyModel | samples/abp/test_graphics.py | 2ab0e2cf821807206725adaa425409b0c28929b7 | """
ABP analyzer and graphics tests
"""
cases = [
('Run Pymodel Graphics to generate dot file from FSM model, no need use pma',
'pmg ABP'),
('Generate SVG file from dot',
'dotsvg ABP'),
# Now display ABP.dot in browser
('Run PyModel Analyzer to generate FSM from original FSM, should be the same',
'pma ABP'),
('Run PyModel Graphics to generate a file of graphics commands from new FSM',
'pmg ABPFSM'),
('Generate an svg file from the graphics commands',
'dotsvg ABPFSM'),
# Now display ABPFSM.svg in browser, should look the same as ABP.svg
]
| [] |
IceArrow256/game-list | games/migrations/0002_auto_20201026_1221.py | 5f06e0ff80023acdc0290a9a8f814f7c93b45e0e | # Generated by Django 3.1.2 on 2020-10-26 12:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('games', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='game',
name='score',
field=models.FloatField(null=True, verbose_name='Score'),
),
migrations.AlterField(
model_name='game',
name='series',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='games.series'),
),
]
| [((353, 403), 'django.db.models.FloatField', 'models.FloatField', ([], {'null': '(True)', 'verbose_name': '"""Score"""'}), "(null=True, verbose_name='Score')\n", (370, 403), False, 'from django.db import migrations, models\n'), ((523, 619), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""games.series"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n to='games.series')\n", (540, 619), False, 'from django.db import migrations, models\n')] |
mkubux/egenix-mx-base | build/lib.linux-x86_64-2.7_ucs4/mx/Misc/PackageTools.py | 3e6f9186334d9d73743b0219ae857564c7208247 | """ PackageTools - A set of tools to aid working with packages.
Copyright (c) 1998-2000, Marc-Andre Lemburg; mailto:[email protected]
Copyright (c) 2000-2015, eGenix.com Software GmbH; mailto:[email protected]
See the documentation for further information on copyrights,
or contact the author. All Rights Reserved.
"""
__version__ = '0.4.0'
import os,types,sys,re,imp,__builtin__
import mx.Tools.NewBuiltins
# RE to identify Python modules
suffixes = projection(imp.get_suffixes(),0)
module_name = re.compile('(.*)(' + '|'.join(suffixes) + ')$')
initmodule_name = re.compile('__init__(' + '|'.join(suffixes) + ')$')
initmodule_names = []
for suffix in suffixes:
initmodule_names.append('__init__' + suffix)
def find_packages(dir=os.curdir, files_only=0, recursive=0, ignore_modules=0,
pkgbasename='', pkgdict=None,
isdir=os.path.isdir,exists=os.path.exists,
isfile=os.path.isfile,join=os.path.join,listdir=os.listdir,
module_name=module_name,initmodule_name=initmodule_name):
""" Return a list of package names found in dir.
Packages are Python modules and subdirectories that provide an
__init__ module. The .py extension is removed from the
files. The __init__ modules are not considered being seperate
packages.
If files_only is true, only Python files are included in the
search (subdirectories are *not* taken into account). If
ignore_modules is true (default is false), modules are
ignored. If recursive is true the search recurses into package
directories.
pkgbasename and pkgdict are only used during recursion.
"""
l = listdir(dir)
if pkgdict is None:
pkgdict = {}
if files_only:
for filename in l:
m = module_name.match(filename)
if m is not None and \
m.group(1) != '__init__':
pkgdict[pkgbasename + m.group(1)] = 1
else:
for filename in l:
path = join(dir, filename)
if isdir(path):
# Check for __init__ module(s)
for name in initmodule_names:
if isfile(join(path, name)):
pkgname = pkgbasename + filename
pkgdict[pkgname] = 1
if recursive:
find_packages(path,
recursive=1,
pkgbasename=pkgname + '.',
pkgdict=pkgdict)
break
elif not ignore_modules:
m = module_name.match(filename)
if m is not None and \
m.group(1) != '__init__':
pkgdict[pkgbasename + m.group(1)] = 1
return pkgdict.keys()
def find_subpackages(package, recursive=0,
splitpath=os.path.split):
""" Assuming that package points to a loaded package module, this
function tries to identify all subpackages of that package.
Subpackages are all Python files included in the same
directory as the module plus all subdirectories having an
__init__.py file. The modules name is prepended to all
subpackage names.
The module location is found by looking at the __file__
attribute that non-builtin modules define. The function uses
the __all__ attribute from the package __init__ module if
available.
If recursive is true (default is false), then subpackages of
subpackages are recursively also included in the search.
"""
if not recursive:
# Try the __all__ attribute...
try:
subpackages = list(package.__all__)
except (ImportError, AttributeError):
# Did not work, then let's try to find the subpackages by looking
# at the directory where package lives...
subpackages = find_packages(package.__path__[0], recursive=recursive)
else:
# XXX Recursive search does not support the __all__ attribute
subpackages = find_packages(package.__path__[0], recursive=recursive)
basename = package.__name__ + '.'
for i,name in irange(subpackages):
subpackages[i] = basename + name
return subpackages
def _thismodule(upcount=1,
exc_info=sys.exc_info,trange=trange):
""" Returns the module object that the callee is calling from.
upcount can be given to indicate how far up the execution
stack the function is supposed to look (1 == direct callee, 2
== callee of callee, etc.).
"""
try:
1/0
except:
frame = exc_info()[2].tb_frame
for i in trange(upcount):
frame = frame.f_back
name = frame.f_globals['__name__']
del frame
return sys.modules[name]
def _module_loader(name, locals, globals, sysmods, errors='strict',
importer=__import__, reloader=reload, from_list=['*']):
""" Internal API for loading a module
"""
if not sysmods.has_key(name):
is_new = 1
else:
is_new = 0
try:
mod = importer(name, locals, globals, from_list)
if reload and not is_new:
mod = reloader(mod)
except KeyboardInterrupt:
# Pass through; SystemExit will be handled by the error handler
raise
except Exception, why:
if errors == 'ignore':
pass
elif errors == 'strict':
raise
elif callable(errors):
errors(name, sys.exc_info()[0], sys.exc_info()[1])
else:
raise ValueError,'unknown errors value'
else:
return mod
return None
def import_modules(modnames,module=None,errors='strict',reload=0,
thismodule=_thismodule):
""" Import all modules given in modnames into module.
module defaults to the caller's module. modnames may contain
dotted package names.
If errors is 'strict' (default), then ImportErrors and
SyntaxErrors are raised. If set to 'ignore', they are silently
ignored. If errors is a callable object, then it is called
with arguments (modname, errorclass, errorvalue). If the
handler returns, processing continues.
If reload is true (default is false), all already modules
among the list will be forced to reload.
"""
if module is None:
module = _thismodule(2)
locals = module.__dict__
sysmods = sys.modules
for name in modnames:
mod = _module_loader(name, locals, locals, sysmods, errors=errors)
if mod is not None:
locals[name] = mod
def load_modules(modnames,locals=None,globals=None,errors='strict',reload=0):
""" Imports all modules in modnames using the given namespaces and returns
list of corresponding module objects.
If errors is 'strict' (default), then ImportErrors and
SyntaxErrors are raised. If set to 'ignore', they are silently
ignored. If errors is a callable object, then it is called
with arguments (modname, errorclass, errorvalue). If the
handler returns, processing continues.
If reload is true (default is false), all already modules
among the list will be forced to reload.
"""
modules = []
append = modules.append
sysmods = sys.modules
for name in modnames:
mod = _module_loader(name, locals, globals, sysmods, errors=errors)
if mod is not None:
append(mod)
return modules
def import_subpackages(module, reload=0, recursive=0,
import_modules=import_modules,
find_subpackages=find_subpackages):
""" Does a subpackages scan using find_subpackages(module) and then
imports all submodules found into module.
The module location is found by looking at the __file__
attribute that non-builtin modules define. The function uses
the __all__ attribute from the package __init__ module if
available.
If reload is true (default is false), all already modules
among the list will be forced to reload.
"""
import_modules(find_subpackages(module, recursive=recursive),
module, reload=reload)
def load_subpackages(module, locals=None, globals=None, errors='strict', reload=0,
recursive=0,
load_modules=load_modules,
find_subpackages=find_subpackages):
""" Same as import_subpackages but with load_modules
functionality, i.e. imports the modules and also returns a list of
module objects.
If errors is 'strict' (default), then ImportErrors are
raised. If set to 'ignore', they are silently ignored.
If reload is true (default is false), all already modules
among the list will be forced to reload.
"""
return load_modules(find_subpackages(module, recursive=recursive),
locals, globals,
errors=errors, reload=reload)
def modules(names,
extract=extract):
""" Converts a list of module names into a list of module objects.
The modules must already be loaded.
"""
return extract(sys.modules, names)
def package_modules(pkgname):
""" Returns a list of all modules belonging to the package with the
given name.
The package must already be loaded. Only the currently
registered modules are included in the list.
"""
match = pkgname + '.'
match_len = len(match)
mods = [sys.modules[pkgname]]
for k,v in sys.modules.items():
if k[:match_len] == match and v is not None:
mods.append(v)
return mods
def find_classes(mods,baseclass=None,annotated=0,
ClassType=types.ClassType,issubclass=issubclass):
""" Find all subclasses of baseclass or simply all classes (if baseclass
is None) defined by the module objects in list mods.
If annotated is true the returned list will contain tuples
(module_object,name,class_object) for each class found where
module_object is the module where the class is defined.
"""
classes = []
for mod in mods:
for name,obj in mod.__dict__.items():
if type(obj) is ClassType:
if baseclass and not issubclass(obj,baseclass):
continue
if annotated:
classes.append((mod, name, obj))
else:
classes.append(obj)
return classes
def find_instances(mods,baseclass,annotated=0,
InstanceType=types.InstanceType,issubclass=issubclass):
""" Find all instances of baseclass defined by the module objects
in list mods.
If annotated is true the returned list will contain tuples
(module_object,name,instances_object) for each instances found where
module_object is the module where the instances is defined.
"""
instances = []
for mod in mods:
for name,obj in mod.__dict__.items():
if isinstance(obj,baseclass):
if annotated:
instances.append((mod,name,obj))
else:
instances.append(obj)
return instances
| [] |
Kshitijkrishnadas/haribol | Lib/test/test_urllib.py | ca45e633baaabaad3bb923f5633340ccf88d996c | """Regression tests for what was in Python 2's "urllib" module"""
import urllib.parse
import urllib.request
import urllib.error
import http.client
import email.message
import io
import unittest
from unittest.mock import patch
from test import support
import os
try:
import ssl
except ImportError:
ssl = None
import sys
import tempfile
from nturl2path import url2pathname, pathname2url
from base64 import b64encode
import collections
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
# Shortcut for testing FancyURLopener
_urlopener = None
def urlopen(url, data=None, proxies=None):
"""urlopen(url [, data]) -> open file-like object"""
global _urlopener
if proxies is not None:
opener = urllib.request.FancyURLopener(proxies=proxies)
elif not _urlopener:
opener = FancyURLopener()
_urlopener = opener
else:
opener = _urlopener
if data is None:
return opener.open(url)
else:
return opener.open(url, data)
def FancyURLopener():
with support.check_warnings(
('FancyURLopener style of invoking requests is deprecated.',
DeprecationWarning)):
return urllib.request.FancyURLopener()
def fakehttp(fakedata, mock_close=False):
class FakeSocket(io.BytesIO):
io_refs = 1
def sendall(self, data):
FakeHTTPConnection.buf = data
def makefile(self, *args, **kwds):
self.io_refs += 1
return self
def read(self, amt=None):
if self.closed:
return b""
return io.BytesIO.read(self, amt)
def readline(self, length=None):
if self.closed:
return b""
return io.BytesIO.readline(self, length)
def close(self):
self.io_refs -= 1
if self.io_refs == 0:
io.BytesIO.close(self)
class FakeHTTPConnection(http.client.HTTPConnection):
# buffer to store data for verification in urlopen tests.
buf = None
def connect(self):
self.sock = FakeSocket(self.fakedata)
type(self).fakesock = self.sock
if mock_close:
# bpo-36918: HTTPConnection destructor calls close() which calls
# flush(). Problem: flush() calls self.fp.flush() which raises
# "ValueError: I/O operation on closed file" which is logged as an
# "Exception ignored in". Override close() to silence this error.
def close(self):
pass
FakeHTTPConnection.fakedata = fakedata
return FakeHTTPConnection
class FakeHTTPMixin(object):
def fakehttp(self, fakedata, mock_close=False):
fake_http_class = fakehttp(fakedata, mock_close=mock_close)
self._connection_class = http.client.HTTPConnection
http.client.HTTPConnection = fake_http_class
def unfakehttp(self):
http.client.HTTPConnection = self._connection_class
class FakeFTPMixin(object):
def fakeftp(self):
class FakeFtpWrapper(object):
def __init__(self, user, passwd, host, port, dirs, timeout=None,
persistent=True):
pass
def retrfile(self, file, type):
return io.BytesIO(), 0
def close(self):
pass
self._ftpwrapper_class = urllib.request.ftpwrapper
urllib.request.ftpwrapper = FakeFtpWrapper
def unfakeftp(self):
urllib.request.ftpwrapper = self._ftpwrapper_class
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
# Create a temp file to use for testing
self.text = bytes("test_urllib: %s\n" % self.__class__.__name__,
"ascii")
f = open(support.TESTFN, 'wb')
try:
f.write(self.text)
finally:
f.close()
self.pathname = support.TESTFN
self.returned_obj = urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual(b'', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assertIsInstance(file_num, int, "fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it here and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assertIsInstance(self.returned_obj.info(), email.message.Message)
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
self.assertIsNone(self.returned_obj.getcode())
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison.
# Use the iterator in the usual implicit way to test for ticket #4608.
for line in self.returned_obj:
self.assertEqual(line, self.text)
def test_relativelocalfile(self):
self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname)
class ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
self.env = support.EnvironmentVarGuard()
# Delete all proxy related env vars
for k in list(os.environ):
if 'proxy' in k.lower():
self.env.unset(k)
def tearDown(self):
# Restore all proxy related env vars
self.env.__exit__()
del self.env
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
proxies = urllib.request.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEqual('localhost', proxies['no'])
# List of no_proxies with space.
self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234')
self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com'))
self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com:8888'))
self.assertTrue(urllib.request.proxy_bypass_environment('newdomain.com:1234'))
def test_proxy_cgi_ignore(self):
try:
self.env.set('HTTP_PROXY', 'http://somewhere:3128')
proxies = urllib.request.getproxies_environment()
self.assertEqual('http://somewhere:3128', proxies['http'])
self.env.set('REQUEST_METHOD', 'GET')
proxies = urllib.request.getproxies_environment()
self.assertNotIn('http', proxies)
finally:
self.env.unset('REQUEST_METHOD')
self.env.unset('HTTP_PROXY')
def test_proxy_bypass_environment_host_match(self):
bypass = urllib.request.proxy_bypass_environment
self.env.set('NO_PROXY',
'localhost, anotherdomain.com, newdomain.com:1234, .d.o.t')
self.assertTrue(bypass('localhost'))
self.assertTrue(bypass('LocalHost')) # MixedCase
self.assertTrue(bypass('LOCALHOST')) # UPPERCASE
self.assertTrue(bypass('.localhost'))
self.assertTrue(bypass('newdomain.com:1234'))
self.assertTrue(bypass('.newdomain.com:1234'))
self.assertTrue(bypass('foo.d.o.t')) # issue 29142
self.assertTrue(bypass('d.o.t'))
self.assertTrue(bypass('anotherdomain.com:8888'))
self.assertTrue(bypass('.anotherdomain.com:8888'))
self.assertTrue(bypass('www.newdomain.com:1234'))
self.assertFalse(bypass('prelocalhost'))
self.assertFalse(bypass('newdomain.com')) # no port
self.assertFalse(bypass('newdomain.com:1235')) # wrong port
def test_proxy_bypass_environment_always_match(self):
bypass = urllib.request.proxy_bypass_environment
self.env.set('NO_PROXY', '*')
self.assertTrue(bypass('newdomain.com'))
self.assertTrue(bypass('newdomain.com:1234'))
self.env.set('NO_PROXY', '*, anotherdomain.com')
self.assertTrue(bypass('anotherdomain.com'))
self.assertFalse(bypass('newdomain.com'))
self.assertFalse(bypass('newdomain.com:1234'))
def test_proxy_bypass_environment_newline(self):
bypass = urllib.request.proxy_bypass_environment
self.env.set('NO_PROXY',
'localhost, anotherdomain.com, newdomain.com:1234')
self.assertFalse(bypass('localhost\n'))
self.assertFalse(bypass('anotherdomain.com:8888\n'))
self.assertFalse(bypass('newdomain.com:1234\n'))
class ProxyTests_withOrderedEnv(unittest.TestCase):
def setUp(self):
# We need to test conditions, where variable order _is_ significant
self._saved_env = os.environ
# Monkey patch os.environ, start with empty fake environment
os.environ = collections.OrderedDict()
def tearDown(self):
os.environ = self._saved_env
def test_getproxies_environment_prefer_lowercase(self):
# Test lowercase preference with removal
os.environ['no_proxy'] = ''
os.environ['No_Proxy'] = 'localhost'
self.assertFalse(urllib.request.proxy_bypass_environment('localhost'))
self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary'))
os.environ['http_proxy'] = ''
os.environ['HTTP_PROXY'] = 'http://somewhere:3128'
proxies = urllib.request.getproxies_environment()
self.assertEqual({}, proxies)
# Test lowercase preference of proxy bypass and correct matching including ports
os.environ['no_proxy'] = 'localhost, noproxy.com, my.proxy:1234'
os.environ['No_Proxy'] = 'xyz.com'
self.assertTrue(urllib.request.proxy_bypass_environment('localhost'))
self.assertTrue(urllib.request.proxy_bypass_environment('noproxy.com:5678'))
self.assertTrue(urllib.request.proxy_bypass_environment('my.proxy:1234'))
self.assertFalse(urllib.request.proxy_bypass_environment('my.proxy'))
self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary'))
# Test lowercase preference with replacement
os.environ['http_proxy'] = 'http://somewhere:3128'
os.environ['Http_Proxy'] = 'http://somewhereelse:3128'
proxies = urllib.request.getproxies_environment()
self.assertEqual('http://somewhere:3128', proxies['http'])
class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin, FakeFTPMixin):
"""Test urlopen() opening a fake http connection."""
def check_read(self, ver):
self.fakehttp(b"HTTP/" + ver + b" 200 OK\r\n\r\nHello!")
try:
fp = urlopen("http://python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_url_fragment(self):
# Issue #11703: geturl() omits fragments in the original URL.
url = 'http://docs.python.org/library/urllib.html#OK'
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
try:
fp = urllib.request.urlopen(url)
self.assertEqual(fp.geturl(), url)
finally:
self.unfakehttp()
def test_willclose(self):
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
try:
resp = urlopen("http://www.python.org")
self.assertTrue(resp.fp.will_close)
finally:
self.unfakehttp()
@unittest.skipUnless(ssl, "ssl module required")
def test_url_path_with_control_char_rejected(self):
for char_no in list(range(0, 0x21)) + [0x7f]:
char = chr(char_no)
schemeless_url = f"//localhost:7777/test{char}/"
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
try:
# We explicitly test urllib.request.urlopen() instead of the top
# level 'def urlopen()' function defined in this... (quite ugly)
# test suite. They use different url opening codepaths. Plain
# urlopen uses FancyURLOpener which goes via a codepath that
# calls urllib.parse.quote() on the URL which makes all of the
# above attempts at injection within the url _path_ safe.
escaped_char_repr = repr(char).replace('\\', r'\\')
InvalidURL = http.client.InvalidURL
with self.assertRaisesRegex(
InvalidURL, f"contain control.*{escaped_char_repr}"):
urllib.request.urlopen(f"http:{schemeless_url}")
with self.assertRaisesRegex(
InvalidURL, f"contain control.*{escaped_char_repr}"):
urllib.request.urlopen(f"https:{schemeless_url}")
# This code path quotes the URL so there is no injection.
resp = urlopen(f"http:{schemeless_url}")
self.assertNotIn(char, resp.geturl())
finally:
self.unfakehttp()
@unittest.skipUnless(ssl, "ssl module required")
def test_url_path_with_newline_header_injection_rejected(self):
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
host = "localhost:7777?a=1 HTTP/1.1\r\nX-injected: header\r\nTEST: 123"
schemeless_url = "//" + host + ":8080/test/?test=a"
try:
# We explicitly test urllib.request.urlopen() instead of the top
# level 'def urlopen()' function defined in this... (quite ugly)
# test suite. They use different url opening codepaths. Plain
# urlopen uses FancyURLOpener which goes via a codepath that
# calls urllib.parse.quote() on the URL which makes all of the
# above attempts at injection within the url _path_ safe.
InvalidURL = http.client.InvalidURL
with self.assertRaisesRegex(
InvalidURL, r"contain control.*\\r.*(found at least . .)"):
urllib.request.urlopen(f"http:{schemeless_url}")
with self.assertRaisesRegex(InvalidURL, r"contain control.*\\n"):
urllib.request.urlopen(f"https:{schemeless_url}")
# This code path quotes the URL so there is no injection.
resp = urlopen(f"http:{schemeless_url}")
self.assertNotIn(' ', resp.geturl())
self.assertNotIn('\r', resp.geturl())
self.assertNotIn('\n', resp.geturl())
finally:
self.unfakehttp()
@unittest.skipUnless(ssl, "ssl module required")
def test_url_host_with_control_char_rejected(self):
for char_no in list(range(0, 0x21)) + [0x7f]:
char = chr(char_no)
schemeless_url = f"//localhost{char}/test/"
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
try:
escaped_char_repr = repr(char).replace('\\', r'\\')
InvalidURL = http.client.InvalidURL
with self.assertRaisesRegex(
InvalidURL, f"contain control.*{escaped_char_repr}"):
urlopen(f"http:{schemeless_url}")
with self.assertRaisesRegex(InvalidURL, f"contain control.*{escaped_char_repr}"):
urlopen(f"https:{schemeless_url}")
finally:
self.unfakehttp()
@unittest.skipUnless(ssl, "ssl module required")
def test_url_host_with_newline_header_injection_rejected(self):
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
host = "localhost\r\nX-injected: header\r\n"
schemeless_url = "//" + host + ":8080/test/?test=a"
try:
InvalidURL = http.client.InvalidURL
with self.assertRaisesRegex(
InvalidURL, r"contain control.*\\r"):
urlopen(f"http:{schemeless_url}")
with self.assertRaisesRegex(InvalidURL, r"contain control.*\\n"):
urlopen(f"https:{schemeless_url}")
finally:
self.unfakehttp()
def test_read_0_9(self):
# "0.9" response accepted (but not "simple responses" without
# a status line)
self.check_read(b"0.9")
def test_read_1_0(self):
self.check_read(b"1.0")
def test_read_1_1(self):
self.check_read(b"1.1")
def test_read_bogus(self):
# urlopen() should raise OSError for many error codes.
self.fakehttp(b'''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''', mock_close=True)
try:
self.assertRaises(OSError, urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_invalid_redirect(self):
# urlopen() should raise OSError for many error codes.
self.fakehttp(b'''HTTP/1.1 302 Found
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Location: file://guidocomputer.athome.com:/python/license
Connection: close
Content-Type: text/html; charset=iso-8859-1
''', mock_close=True)
try:
msg = "Redirection to url 'file:"
with self.assertRaisesRegex(urllib.error.HTTPError, msg):
urlopen("http://python.org/")
finally:
self.unfakehttp()
def test_redirect_limit_independent(self):
# Ticket #12923: make sure independent requests each use their
# own retry limit.
for i in range(FancyURLopener().maxtries):
self.fakehttp(b'''HTTP/1.1 302 Found
Location: file://guidocomputer.athome.com:/python/license
Connection: close
''', mock_close=True)
try:
self.assertRaises(urllib.error.HTTPError, urlopen,
"http://something")
finally:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() raises OSError if the underlying socket does not send any
# data. (#1680230)
self.fakehttp(b'')
try:
self.assertRaises(OSError, urlopen, "http://something")
finally:
self.unfakehttp()
def test_missing_localfile(self):
# Test for #10836
with self.assertRaises(urllib.error.URLError) as e:
urlopen('file://localhost/a/file/which/doesnot/exists.py')
self.assertTrue(e.exception.filename)
self.assertTrue(e.exception.reason)
def test_file_notexists(self):
fd, tmp_file = tempfile.mkstemp()
tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/')
try:
self.assertTrue(os.path.exists(tmp_file))
with urlopen(tmp_fileurl) as fobj:
self.assertTrue(fobj)
finally:
os.close(fd)
os.unlink(tmp_file)
self.assertFalse(os.path.exists(tmp_file))
with self.assertRaises(urllib.error.URLError):
urlopen(tmp_fileurl)
def test_ftp_nohost(self):
test_ftp_url = 'ftp:///path'
with self.assertRaises(urllib.error.URLError) as e:
urlopen(test_ftp_url)
self.assertFalse(e.exception.filename)
self.assertTrue(e.exception.reason)
def test_ftp_nonexisting(self):
with self.assertRaises(urllib.error.URLError) as e:
urlopen('ftp://localhost/a/file/which/doesnot/exists.py')
self.assertFalse(e.exception.filename)
self.assertTrue(e.exception.reason)
@patch.object(urllib.request, 'MAXFTPCACHE', 0)
def test_ftp_cache_pruning(self):
self.fakeftp()
try:
urllib.request.ftpcache['test'] = urllib.request.ftpwrapper('user', 'pass', 'localhost', 21, [])
urlopen('ftp://localhost')
finally:
self.unfakeftp()
def test_userpass_inurl(self):
self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
try:
fp = urlopen("http://user:[email protected]/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://user:[email protected]/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_userpass_inurl_w_spaces(self):
self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
try:
userpass = "a b:c d"
url = "http://{}@python.org/".format(userpass)
fakehttp_wrapper = http.client.HTTPConnection
authorization = ("Authorization: Basic %s\r\n" %
b64encode(userpass.encode("ASCII")).decode("ASCII"))
fp = urlopen(url)
# The authorization header must be in place
self.assertIn(authorization, fakehttp_wrapper.buf.decode("UTF-8"))
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
# the spaces are quoted in URL so no match
self.assertNotEqual(fp.geturl(), url)
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_URLopener_deprecation(self):
with support.check_warnings(('',DeprecationWarning)):
urllib.request.URLopener()
@unittest.skipUnless(ssl, "ssl module required")
def test_cafile_and_context(self):
context = ssl.create_default_context()
with support.check_warnings(('', DeprecationWarning)):
with self.assertRaises(ValueError):
urllib.request.urlopen(
"https://localhost", cafile="/nonexistent/path", context=context
)
class urlopen_DataTests(unittest.TestCase):
"""Test urlopen() opening a data URL."""
def setUp(self):
# text containing URL special- and unicode-characters
self.text = "test data URLs :;,%=& \u00f6 \u00c4 "
# 2x1 pixel RGB PNG image with one black and one white pixel
self.image = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x02\x00\x00\x00'
b'\x01\x08\x02\x00\x00\x00{@\xe8\xdd\x00\x00\x00\x01sRGB\x00\xae'
b'\xce\x1c\xe9\x00\x00\x00\x0fIDAT\x08\xd7c```\xf8\xff\xff?\x00'
b'\x06\x01\x02\xfe\no/\x1e\x00\x00\x00\x00IEND\xaeB`\x82')
self.text_url = (
"data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3"
"D%26%20%C3%B6%20%C3%84%20")
self.text_url_base64 = (
"data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs"
"sJT0mIPYgxCA%3D")
# base64 encoded data URL that contains ignorable spaces,
# such as "\n", " ", "%0A", and "%20".
self.image_url = (
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\n"
"QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8 "
"vHgAAAABJRU5ErkJggg%3D%3D%0A%20")
self.text_url_resp = urllib.request.urlopen(self.text_url)
self.text_url_base64_resp = urllib.request.urlopen(
self.text_url_base64)
self.image_url_resp = urllib.request.urlopen(self.image_url)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.text_url_resp, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_info(self):
self.assertIsInstance(self.text_url_resp.info(), email.message.Message)
self.assertEqual(self.text_url_base64_resp.info().get_params(),
[('text/plain', ''), ('charset', 'ISO-8859-1')])
self.assertEqual(self.image_url_resp.info()['content-length'],
str(len(self.image)))
self.assertEqual(urllib.request.urlopen("data:,").info().get_params(),
[('text/plain', ''), ('charset', 'US-ASCII')])
def test_geturl(self):
self.assertEqual(self.text_url_resp.geturl(), self.text_url)
self.assertEqual(self.text_url_base64_resp.geturl(),
self.text_url_base64)
self.assertEqual(self.image_url_resp.geturl(), self.image_url)
def test_read_text(self):
self.assertEqual(self.text_url_resp.read().decode(
dict(self.text_url_resp.info().get_params())['charset']), self.text)
def test_read_text_base64(self):
self.assertEqual(self.text_url_base64_resp.read().decode(
dict(self.text_url_base64_resp.info().get_params())['charset']),
self.text)
def test_read_image(self):
self.assertEqual(self.image_url_resp.read(), self.image)
def test_missing_comma(self):
self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain')
def test_invalid_base64_data(self):
# missing padding character
self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=')
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a list of temporary files. Each item in the list is a file
# name (absolute path or relative to the current working directory).
# All files in this list will be deleted in the tearDown method. Note,
# this only helps to makes sure temporary files get deleted, but it
# does nothing about trying to close files that may still be open. It
# is the responsibility of the developer to properly close files even
# when exceptional conditions occur.
self.tempFiles = []
# Create a temporary file.
self.registerFileForCleanUp(support.TESTFN)
self.text = b'testing urllib.urlretrieve'
try:
FILE = open(support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
finally:
try: FILE.close()
except: pass
def tearDown(self):
# Delete the temporary files.
for each in self.tempFiles:
try: os.remove(each)
except: pass
def constructLocalFileUrl(self, filePath):
filePath = os.path.abspath(filePath)
try:
filePath.encode("utf-8")
except UnicodeEncodeError:
raise unittest.SkipTest("filePath is not encodable to utf8")
return "file://%s" % urllib.request.pathname2url(filePath)
def createNewTempFile(self, data=b""):
"""Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file."""
newFd, newFilePath = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, "wb")
newFile.write(data)
newFile.close()
finally:
try: newFile.close()
except: pass
return newFilePath
def registerFileForCleanUp(self, fileName):
self.tempFiles.append(fileName)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.request.urlretrieve("file:%s" % support.TESTFN)
self.assertEqual(result[0], support.TESTFN)
self.assertIsInstance(result[1], email.message.Message,
"did not get an email.message.Message instance "
"as second returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.request.urlretrieve(self.constructLocalFileUrl(
support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assertTrue(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = open(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
finally:
try: FILE.close()
except: pass
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(block_count, block_read_size, file_size, count_holder=[0]):
self.assertIsInstance(block_count, int)
self.assertIsInstance(block_read_size, int)
self.assertIsInstance(file_size, int)
self.assertEqual(block_count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
urllib.request.urlretrieve(
self.constructLocalFileUrl(support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
# Test on zero length file. Should call reporthook only 1 time.
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile()
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
def test_reporthook_5_bytes(self):
# Test on 5 byte file. Should call reporthook only 2 times (once when
# the "network connection" is established and once when the block is
# read).
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile(b"x" * 5)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][2], 5)
self.assertEqual(report[1][2], 5)
def test_reporthook_8193_bytes(self):
# Test on 8193 byte file. Should call reporthook only 3 times (once
# when the "network connection" is established, once for the next 8192
# bytes, and once for the last byte).
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile(b"x" * 8193)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][2], 8193)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[1][1], 8192)
self.assertEqual(report[2][1], 8192)
class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin):
"""Test urllib.urlretrieve() using fake http connections"""
def test_short_content_raises_ContentTooShortError(self):
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
def _reporthook(par1, par2, par3):
pass
with self.assertRaises(urllib.error.ContentTooShortError):
try:
urllib.request.urlretrieve(support.TEST_HTTP_URL,
reporthook=_reporthook)
finally:
self.unfakehttp()
def test_short_content_raises_ContentTooShortError_without_reporthook(self):
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
with self.assertRaises(urllib.error.ContentTooShortError):
try:
urllib.request.urlretrieve(support.TEST_HTTP_URL)
finally:
self.unfakehttp()
class QuotingTests(unittest.TestCase):
r"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 3986 (Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>.
The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a
character properly. Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-~"])
result = urllib.parse.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %r != %r" % (do_not_quote, result))
result = urllib.parse.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %r != %r" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.parse.quote.__defaults__[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.parse.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
result = urllib.parse.quote_plus(quote_by_default,
safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %r != %r" %
(quote_by_default, result))
# Safe expressed as bytes rather than str
result = urllib.parse.quote(quote_by_default, safe=b"<>")
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
# "Safe" non-ASCII characters should have no effect
# (Since URIs are not allowed to have non-ASCII characters)
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
# Same as above, but using a bytes rather than str
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe=b"\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append(r'<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.parse.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): "
"%s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.parse.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.parse.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %r != %r" % (expected, result))
result = urllib.parse.quote_plus(partial_quote)
self.assertEqual(expected, result,
"using quote_plus(): %r != %r" % (expected, result))
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.parse.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %r != %r" % (result, hexescape(' ')))
result = urllib.parse.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %r != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.parse.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
# Test with bytes
self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'),
'alpha%2Bbeta+gamma')
# Test with safe bytes
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'),
'alpha+beta+gamma')
def test_quote_bytes(self):
# Bytes should quote directly to percent-encoded values
given = b"\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Encoding argument should raise type error on bytes input
self.assertRaises(TypeError, urllib.parse.quote, given,
encoding="latin-1")
# quote_from_bytes should work the same
result = urllib.parse.quote_from_bytes(given)
self.assertEqual(expect, result,
"using quote_from_bytes(): %r != %r"
% (expect, result))
def test_quote_with_unicode(self):
# Characters in Latin-1 range, encoded by default in UTF-8
given = "\xa2\xd8ab\xff"
expect = "%C2%A2%C3%98ab%C3%BF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded by with None (default)
result = urllib.parse.quote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded with Latin-1
given = "\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded by default in UTF-8
given = "\u6f22\u5b57" # "Kanji"
expect = "%E6%BC%A2%E5%AD%97"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with Latin-1
given = "\u6f22\u5b57"
self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given,
encoding="latin-1")
# Characters in BMP, encoded with Latin-1, with replace error handling
given = "\u6f22\u5b57"
expect = "%3F%3F" # "??"
result = urllib.parse.quote(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, Latin-1, with xmlcharref error handling
given = "\u6f22\u5b57"
expect = "%26%2328450%3B%26%2323383%3B" # "漢字"
result = urllib.parse.quote(given, encoding="latin-1",
errors="xmlcharrefreplace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
def test_quote_plus_with_unicode(self):
# Encoding (latin-1) test for quote_plus
given = "\xa2\xd8 \xff"
expect = "%A2%D8+%FF"
result = urllib.parse.quote_plus(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
# Errors test for quote_plus
given = "ab\u6f22\u5b57 cd"
expect = "ab%3F%3F+cd"
result = urllib.parse.quote_plus(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.parse.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ())
with support.check_warnings(('', BytesWarning), quiet=True):
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'')
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
# unquote_to_bytes
given = '%xab'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%x'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ())
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits in the percent-escapes
given = '%Ab%eA'
expect = b'\xab\xea'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquote_to_bytes(self):
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = b'br\xc3\xbcckner_sapporo_20050930.doc'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test on a string with unescaped non-ASCII characters
# (Technically an invalid URI; expect those characters to be UTF-8
# encoded).
result = urllib.parse.unquote_to_bytes("\u6f22%C3%BC")
expect = b'\xe6\xbc\xa2\xc3\xbc' # UTF-8 for "\u6f22\u00fc"
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input
given = b'%A2%D8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input, with unescaped non-ASCII bytes
# (Technically an invalid URI; expect those bytes to be preserved)
given = b'%A2\xd8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquote_with_unicode(self):
# Characters in the Latin-1 range, encoded with UTF-8
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = 'br\u00fcckner_sapporo_20050930.doc'
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with None (default)
result = urllib.parse.unquote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with Latin-1
result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc',
encoding="latin-1")
expect = 'br\u00fcckner_sapporo_20050930.doc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with UTF-8
given = "%E6%BC%A2%E5%AD%97"
expect = "\u6f22\u5b57" # "Kanji"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence
given = "%F3%B1"
expect = "\ufffd" # Replacement character
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, replace errors
result = urllib.parse.unquote(given, errors="replace")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, ignoring errors
given = "%F3%B1"
expect = ""
result = urllib.parse.unquote(given, errors="ignore")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, UTF-8
result = urllib.parse.unquote("\u6f22%C3%BC")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, Latin-1
# (Note, the string contains non-Latin-1-representable characters)
result = urllib.parse.unquote("\u6f22%FC", encoding="latin-1")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
def test_unquoting_with_bytes_input(self):
# Bytes not supported yet
with self.assertRaisesRegex(TypeError, 'Expected str, got bytes'):
given = b'bl\xc3\xa5b\xc3\xa6rsyltet\xc3\xb8y'
urllib.parse.unquote(given)
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.parse.urlencode(given)
for expected in expect_somewhere:
self.assertIn(expected, result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.parse.quote_plus(str(['1', '2', '3']))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assertIn(expect, result)
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
def test_empty_sequence(self):
self.assertEqual("", urllib.parse.urlencode({}))
self.assertEqual("", urllib.parse.urlencode([]))
def test_nonstring_values(self):
self.assertEqual("a=1", urllib.parse.urlencode({"a": 1}))
self.assertEqual("a=None", urllib.parse.urlencode({"a": None}))
def test_nonstring_seq_values(self):
self.assertEqual("a=1&a=2", urllib.parse.urlencode({"a": [1, 2]}, True))
self.assertEqual("a=None&a=a",
urllib.parse.urlencode({"a": [None, "a"]}, True))
data = collections.OrderedDict([("a", 1), ("b", 1)])
self.assertEqual("a=a&a=b",
urllib.parse.urlencode({"a": data}, True))
def test_urlencode_encoding(self):
# ASCII encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Default is UTF-8 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
# Latin-1 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_encoding_doseq(self):
# ASCII Encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, doseq=True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# ASCII Encoding. On a sequence of values.
given = (("\u00a0", (1, "\u00c1")),)
expect = '%3F=1&%3F=%3F'
result = urllib.parse.urlencode(given, True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Utf-8
given = (("\u00a0", "\u00c1"),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%C2%A0=42&%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# latin-1
given = (("\u00a0", "\u00c1"),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%A0=42&%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_bytes(self):
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0%24=%C1%24'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# Sequence of values
given = ((b'\xa0\x24', (42, b'\xc1\x24')),)
expect = '%A0%24=42&%A0%24=%C1%24'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
def test_urlencode_encoding_safe_parameter(self):
# Send '$' (\x24) as safe character
# Default utf-8 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, doseq=True, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
# Safe parameter in sequence
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$")
self.assertEqual(expect, result)
# Test all above in latin-1 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$",
encoding="latin-1")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0$=%C1$'
result = urllib.parse.urlencode(given, doseq=True, safe=":$",
encoding="latin-1")
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$",
encoding="latin-1")
self.assertEqual(expect, result)
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.request.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.request.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.parse.quote("quot=ing")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.request.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.parse.quote("make sure")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
@unittest.skipUnless(sys.platform == 'win32',
'test specific to the urllib.url2path function.')
def test_ntpath(self):
given = ('/C:/', '///C:/', '/C|//')
expect = 'C:\\'
for url in given:
result = urllib.request.url2pathname(url)
self.assertEqual(expect, result,
'urllib.request..url2pathname() failed; %s != %s' %
(expect, result))
given = '///C|/path'
expect = 'C:\\path'
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
'urllib.request.url2pathname() failed; %s != %s' %
(expect, result))
class Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
def test_thishost(self):
"""Test the urllib.request.thishost utility function returns a tuple"""
self.assertIsInstance(urllib.request.thishost(), tuple)
class URLopener_Tests(FakeHTTPMixin, unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
class DummyURLopener(urllib.request.URLopener):
def open_spam(self, url):
return url
with support.check_warnings(
('DummyURLopener style of invoking requests is deprecated.',
DeprecationWarning)):
self.assertEqual(DummyURLopener().open(
'spam://example/ /'),'//example/%20/')
# test the safe characters are not quoted by urlopen
self.assertEqual(DummyURLopener().open(
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
@support.ignore_warnings(category=DeprecationWarning)
def test_urlopener_retrieve_file(self):
with support.temp_dir() as tmpdir:
fd, tmpfile = tempfile.mkstemp(dir=tmpdir)
os.close(fd)
fileurl = "file:" + urllib.request.pathname2url(tmpfile)
filename, _ = urllib.request.URLopener().retrieve(fileurl)
# Some buildbots have TEMP folder that uses a lowercase drive letter.
self.assertEqual(os.path.normcase(filename), os.path.normcase(tmpfile))
@support.ignore_warnings(category=DeprecationWarning)
def test_urlopener_retrieve_remote(self):
url = "http://www.python.org/file.txt"
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
self.addCleanup(self.unfakehttp)
filename, _ = urllib.request.URLopener().retrieve(url)
self.assertEqual(os.path.splitext(filename)[1], ".txt")
@support.ignore_warnings(category=DeprecationWarning)
def test_local_file_open(self):
# bpo-35907, CVE-2019-9948: urllib must reject local_file:// scheme
class DummyURLopener(urllib.request.URLopener):
def open_local_file(self, url):
return url
for url in ('local_file://example', 'local-file://example'):
self.assertRaises(OSError, urllib.request.urlopen, url)
self.assertRaises(OSError, urllib.request.URLopener().open, url)
self.assertRaises(OSError, urllib.request.URLopener().retrieve, url)
self.assertRaises(OSError, DummyURLopener().open, url)
self.assertRaises(OSError, DummyURLopener().retrieve, url)
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, sometimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
# If anybody has one of the problematic environments, please help!
# . Facundo
#
# def server(evt):
# import socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen()
# try:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# while cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# except socket.timeout:
# pass
# finally:
# serv.close()
# evt.set()
#
# class FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# import ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
class RequestTests(unittest.TestCase):
"""Unit tests for urllib.request.Request."""
def test_default_values(self):
Request = urllib.request.Request
request = Request("http://www.python.org")
self.assertEqual(request.get_method(), 'GET')
request = Request("http://www.python.org", {})
self.assertEqual(request.get_method(), 'POST')
def test_with_method_arg(self):
Request = urllib.request.Request
request = Request("http://www.python.org", method='HEAD')
self.assertEqual(request.method, 'HEAD')
self.assertEqual(request.get_method(), 'HEAD')
request = Request("http://www.python.org", {}, method='HEAD')
self.assertEqual(request.method, 'HEAD')
self.assertEqual(request.get_method(), 'HEAD')
request = Request("http://www.python.org", method='GET')
self.assertEqual(request.get_method(), 'GET')
request.method = 'HEAD'
self.assertEqual(request.get_method(), 'HEAD')
class URL2PathNameTests(unittest.TestCase):
def test_converting_drive_letter(self):
self.assertEqual(url2pathname("///C|"), 'C:')
self.assertEqual(url2pathname("///C:"), 'C:')
self.assertEqual(url2pathname("///C|/"), 'C:\\')
def test_converting_when_no_drive_letter(self):
# cannot end a raw string in \
self.assertEqual(url2pathname("///C/test/"), r'\\\C\test' '\\')
self.assertEqual(url2pathname("////C/test/"), r'\\C\test' '\\')
def test_simple_compare(self):
self.assertEqual(url2pathname("///C|/foo/bar/spam.foo"),
r'C:\foo\bar\spam.foo')
def test_non_ascii_drive_letter(self):
self.assertRaises(IOError, url2pathname, "///\u00e8|/")
def test_roundtrip_url2pathname(self):
list_of_paths = ['C:',
r'\\\C\test\\',
r'C:\foo\bar\spam.foo'
]
for path in list_of_paths:
self.assertEqual(url2pathname(pathname2url(path)), path)
class PathName2URLTests(unittest.TestCase):
def test_converting_drive_letter(self):
self.assertEqual(pathname2url("C:"), '///C:')
self.assertEqual(pathname2url("C:\\"), '///C:')
def test_converting_when_no_drive_letter(self):
self.assertEqual(pathname2url(r"\\\folder\test" "\\"),
'/////folder/test/')
self.assertEqual(pathname2url(r"\\folder\test" "\\"),
'////folder/test/')
self.assertEqual(pathname2url(r"\folder\test" "\\"),
'/folder/test/')
def test_simple_compare(self):
self.assertEqual(pathname2url(r'C:\foo\bar\spam.foo'),
"///C:/foo/bar/spam.foo" )
def test_long_drive_letter(self):
self.assertRaises(IOError, pathname2url, "XX:\\")
def test_roundtrip_pathname2url(self):
list_of_paths = ['///C:',
'/////folder/test/',
'///C:/foo/bar/spam.foo']
for path in list_of_paths:
self.assertEqual(pathname2url(url2pathname(path)), path)
if __name__ == '__main__':
unittest.main()
| [((13399, 13446), 'unittest.skipUnless', 'unittest.skipUnless', (['ssl', '"""ssl module required"""'], {}), "(ssl, 'ssl module required')\n", (13418, 13446), False, 'import unittest\n'), ((14942, 14989), 'unittest.skipUnless', 'unittest.skipUnless', (['ssl', '"""ssl module required"""'], {}), "(ssl, 'ssl module required')\n", (14961, 14989), False, 'import unittest\n'), ((16414, 16461), 'unittest.skipUnless', 'unittest.skipUnless', (['ssl', '"""ssl module required"""'], {}), "(ssl, 'ssl module required')\n", (16433, 16461), False, 'import unittest\n'), ((17244, 17291), 'unittest.skipUnless', 'unittest.skipUnless', (['ssl', '"""ssl module required"""'], {}), "(ssl, 'ssl module required')\n", (17263, 17291), False, 'import unittest\n'), ((21431, 21477), 'unittest.mock.patch.object', 'patch.object', (['urllib.request', '"""MAXFTPCACHE"""', '(0)'], {}), "(urllib.request, 'MAXFTPCACHE', 0)\n", (21443, 21477), False, 'from unittest.mock import patch\n'), ((23207, 23254), 'unittest.skipUnless', 'unittest.skipUnless', (['ssl', '"""ssl module required"""'], {}), "(ssl, 'ssl module required')\n", (23226, 23254), False, 'import unittest\n'), ((62957, 63055), 'unittest.skipUnless', 'unittest.skipUnless', (["(sys.platform == 'win32')", '"""test specific to the urllib.url2path function."""'], {}), "(sys.platform == 'win32',\n 'test specific to the urllib.url2path function.')\n", (62976, 63055), False, 'import unittest\n'), ((64764, 64816), 'test.support.ignore_warnings', 'support.ignore_warnings', ([], {'category': 'DeprecationWarning'}), '(category=DeprecationWarning)\n', (64787, 64816), False, 'from test import support\n'), ((65296, 65348), 'test.support.ignore_warnings', 'support.ignore_warnings', ([], {'category': 'DeprecationWarning'}), '(category=DeprecationWarning)\n', (65319, 65348), False, 'from test import support\n'), ((65672, 65724), 'test.support.ignore_warnings', 'support.ignore_warnings', ([], {'category': 'DeprecationWarning'}), '(category=DeprecationWarning)\n', (65695, 65724), False, 'from test import support\n'), ((72166, 72181), 'unittest.main', 'unittest.main', ([], {}), '()\n', (72179, 72181), False, 'import unittest\n'), ((1173, 1286), 'test.support.check_warnings', 'support.check_warnings', (["('FancyURLopener style of invoking requests is deprecated.', DeprecationWarning\n )"], {}), "((\n 'FancyURLopener style of invoking requests is deprecated.',\n DeprecationWarning))\n", (1195, 1286), False, 'from test import support\n'), ((4408, 4433), 'os.remove', 'os.remove', (['support.TESTFN'], {}), '(support.TESTFN)\n', (4417, 4433), False, 'import os\n'), ((6996, 7025), 'test.support.EnvironmentVarGuard', 'support.EnvironmentVarGuard', ([], {}), '()\n', (7023, 7025), False, 'from test import support\n'), ((10683, 10708), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (10706, 10708), False, 'import collections\n'), ((20450, 20468), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (20466, 20468), False, 'import tempfile\n'), ((23312, 23340), 'ssl.create_default_context', 'ssl.create_default_context', ([], {}), '()\n', (23338, 23340), False, 'import ssl\n'), ((28196, 28221), 'os.path.abspath', 'os.path.abspath', (['filePath'], {}), '(filePath)\n', (28211, 28221), False, 'import os\n'), ((28721, 28739), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (28737, 28739), False, 'import tempfile\n'), ((56684, 56729), 'collections.OrderedDict', 'collections.OrderedDict', (["[('a', 1), ('b', 1)]"], {}), "([('a', 1), ('b', 1)])\n", (56707, 56729), False, 'import collections\n'), ((61130, 61170), 'os.path.join', 'os.path.join', (['"""parts"""', '"""of"""', '"""a"""', '"""path"""'], {}), "('parts', 'of', 'a', 'path')\n", (61142, 61170), False, 'import os\n'), ((61804, 61845), 'os.path.join', 'os.path.join', (['"""needs"""', '"""quot=ing"""', '"""here"""'], {}), "('needs', 'quot=ing', 'here')\n", (61816, 61845), False, 'import os\n'), ((62346, 62386), 'os.path.join', 'os.path.join', (['"""make sure"""', '"""using_quote"""'], {}), "('make sure', 'using_quote')\n", (62358, 62386), False, 'import os\n'), ((62711, 62753), 'os.path.join', 'os.path.join', (['"""make+sure"""', '"""using_unquote"""'], {}), "('make+sure', 'using_unquote')\n", (62723, 62753), False, 'import os\n'), ((1732, 1758), 'io.BytesIO.read', 'io.BytesIO.read', (['self', 'amt'], {}), '(self, amt)\n', (1747, 1758), False, 'import io\n'), ((1875, 1908), 'io.BytesIO.readline', 'io.BytesIO.readline', (['self', 'length'], {}), '(self, length)\n', (1894, 1908), False, 'import io\n'), ((20729, 20741), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (20737, 20741), False, 'import os\n'), ((20754, 20773), 'os.unlink', 'os.unlink', (['tmp_file'], {}), '(tmp_file)\n', (20763, 20773), False, 'import os\n'), ((20799, 20823), 'os.path.exists', 'os.path.exists', (['tmp_file'], {}), '(tmp_file)\n', (20813, 20823), False, 'import os\n'), ((23113, 23161), 'test.support.check_warnings', 'support.check_warnings', (["('', DeprecationWarning)"], {}), "(('', DeprecationWarning))\n", (23135, 23161), False, 'from test import support\n'), ((23354, 23402), 'test.support.check_warnings', 'support.check_warnings', (["('', DeprecationWarning)"], {}), "(('', DeprecationWarning))\n", (23376, 23402), False, 'from test import support\n'), ((28828, 28850), 'os.fdopen', 'os.fdopen', (['newFd', '"""wb"""'], {}), "(newFd, 'wb')\n", (28837, 28850), False, 'import os\n'), ((29936, 29963), 'os.path.exists', 'os.path.exists', (['second_temp'], {}), '(second_temp)\n', (29950, 29963), False, 'import os\n'), ((45408, 45462), 'test.support.check_warnings', 'support.check_warnings', (["('', BytesWarning)"], {'quiet': '(True)'}), "(('', BytesWarning), quiet=True)\n", (45430, 45462), False, 'from test import support\n'), ((64270, 64383), 'test.support.check_warnings', 'support.check_warnings', (["('DummyURLopener style of invoking requests is deprecated.', DeprecationWarning\n )"], {}), "((\n 'DummyURLopener style of invoking requests is deprecated.',\n DeprecationWarning))\n", (64292, 64383), False, 'from test import support\n'), ((64874, 64892), 'test.support.temp_dir', 'support.temp_dir', ([], {}), '()\n', (64890, 64892), False, 'from test import support\n'), ((64930, 64958), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'dir': 'tmpdir'}), '(dir=tmpdir)\n', (64946, 64958), False, 'import tempfile\n'), ((64971, 64983), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (64979, 64983), False, 'import os\n'), ((70106, 70127), 'nturl2path.url2pathname', 'url2pathname', (['"""///C|"""'], {}), "('///C|')\n", (70118, 70127), False, 'from nturl2path import url2pathname, pathname2url\n'), ((70160, 70181), 'nturl2path.url2pathname', 'url2pathname', (['"""///C:"""'], {}), "('///C:')\n", (70172, 70181), False, 'from nturl2path import url2pathname, pathname2url\n'), ((70214, 70236), 'nturl2path.url2pathname', 'url2pathname', (['"""///C|/"""'], {}), "('///C|/')\n", (70226, 70236), False, 'from nturl2path import url2pathname, pathname2url\n'), ((70363, 70389), 'nturl2path.url2pathname', 'url2pathname', (['"""///C/test/"""'], {}), "('///C/test/')\n", (70375, 70389), False, 'from nturl2path import url2pathname, pathname2url\n'), ((70435, 70462), 'nturl2path.url2pathname', 'url2pathname', (['"""////C/test/"""'], {}), "('////C/test/')\n", (70447, 70462), False, 'from nturl2path import url2pathname, pathname2url\n'), ((70543, 70581), 'nturl2path.url2pathname', 'url2pathname', (['"""///C|/foo/bar/spam.foo"""'], {}), "('///C|/foo/bar/spam.foo')\n", (70555, 70581), False, 'from nturl2path import url2pathname, pathname2url\n'), ((71150, 71168), 'nturl2path.pathname2url', 'pathname2url', (['"""C:"""'], {}), "('C:')\n", (71162, 71168), False, 'from nturl2path import url2pathname, pathname2url\n'), ((71204, 71224), 'nturl2path.pathname2url', 'pathname2url', (['"""C:\\\\"""'], {}), "('C:\\\\')\n", (71216, 71224), False, 'from nturl2path import url2pathname, pathname2url\n'), ((71313, 71349), 'nturl2path.pathname2url', 'pathname2url', (['"""\\\\\\\\\\\\folder\\\\test\\\\"""'], {}), "('\\\\\\\\\\\\folder\\\\test\\\\')\n", (71325, 71349), False, 'from nturl2path import url2pathname, pathname2url\n'), ((71422, 71456), 'nturl2path.pathname2url', 'pathname2url', (['"""\\\\\\\\folder\\\\test\\\\"""'], {}), "('\\\\\\\\folder\\\\test\\\\')\n", (71434, 71456), False, 'from nturl2path import url2pathname, pathname2url\n'), ((71529, 71561), 'nturl2path.pathname2url', 'pathname2url', (['"""\\\\folder\\\\test\\\\"""'], {}), "('\\\\folder\\\\test\\\\')\n", (71541, 71561), False, 'from nturl2path import url2pathname, pathname2url\n'), ((71668, 71706), 'nturl2path.pathname2url', 'pathname2url', (['"""C:\\\\foo\\\\bar\\\\spam.foo"""'], {}), "('C:\\\\foo\\\\bar\\\\spam.foo')\n", (71680, 71706), False, 'from nturl2path import url2pathname, pathname2url\n'), ((2015, 2037), 'io.BytesIO.close', 'io.BytesIO.close', (['self'], {}), '(self)\n', (2031, 2037), False, 'import io\n'), ((20589, 20613), 'os.path.exists', 'os.path.exists', (['tmp_file'], {}), '(tmp_file)\n', (20603, 20613), False, 'import os\n'), ((28088, 28103), 'os.remove', 'os.remove', (['each'], {}), '(each)\n', (28097, 28103), False, 'import os\n'), ((28325, 28379), 'unittest.SkipTest', 'unittest.SkipTest', (['"""filePath is not encodable to utf8"""'], {}), "('filePath is not encodable to utf8')\n", (28342, 28379), False, 'import unittest\n'), ((65235, 65261), 'os.path.normcase', 'os.path.normcase', (['filename'], {}), '(filename)\n', (65251, 65261), False, 'import os\n'), ((65263, 65288), 'os.path.normcase', 'os.path.normcase', (['tmpfile'], {}), '(tmpfile)\n', (65279, 65288), False, 'import os\n'), ((65627, 65653), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (65643, 65653), False, 'import os\n'), ((3410, 3422), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (3420, 3422), False, 'import io\n'), ((71008, 71026), 'nturl2path.pathname2url', 'pathname2url', (['path'], {}), '(path)\n', (71020, 71026), False, 'from nturl2path import url2pathname, pathname2url\n'), ((72107, 72125), 'nturl2path.url2pathname', 'url2pathname', (['path'], {}), '(path)\n', (72119, 72125), False, 'from nturl2path import url2pathname, pathname2url\n')] |
wmak/gapipy | gapipy/resources/tour/transport.py | b6849606d4f6af24b9f871f65e87aaf0d0c013cc | # Python 2 and 3
from __future__ import unicode_literals
from ...models import Address, SeasonalPriceBand
from ..base import Product
class Transport(Product):
_resource_name = 'transports'
_is_listable = False
_as_is_fields = [
'id', 'href', 'availability', 'name', 'product_line', 'sku', 'type', 'sub_type'
]
_date_time_fields_utc = ['date_created', 'date_last_modified']
_model_fields = [('start_address', Address), ('finish_address', Address)]
_model_collection_fields = [('price_bands', SeasonalPriceBand)]
| [] |
VeNoM-hubs/nyx | modules/dare.py | 1d76b3ad50add2e71e70fac40699e0cb513b084e | from discord.ext import commands
import json
import random
with open("assets/json/questions.json") as data:
data = json.load(data)
dares = data["dares"]
class Dare(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(aliases=["d"])
async def dare(self, ctx):
dare = random.choice(dares)
await ctx.send(dare)
def setup(client):
client.add_cog(Dare(client))
| [((121, 136), 'json.load', 'json.load', (['data'], {}), '(data)\n', (130, 136), False, 'import json\n'), ((258, 289), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "['d']"}), "(aliases=['d'])\n", (274, 289), False, 'from discord.ext import commands\n'), ((336, 356), 'random.choice', 'random.choice', (['dares'], {}), '(dares)\n', (349, 356), False, 'import random\n')] |
nicmatth/APIC-EM-HelloWorldv3 | scripts/apic.py | c0645e6decf57dbd87c5a239b6fce36f3dcbef41 | APIC_IP="sandboxapic.cisco.com"
APIC_PORT="443"
GROUP='group-xx'
| [] |
squisher/stella | stella/test/external_func.py | d9f0b2ebbd853b31c6f75cd0f0286037da4bcaf9 | # Copyright 2013-2015 David Mohr
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from random import randint
import mtpy
from . import * # noqa
def seed_const():
mtpy.mt_seed32new(42)
def seed(s):
mtpy.mt_seed32new(s)
def drand_const():
mtpy.mt_seed32new(42)
return mtpy.mt_drand()
def drand(s):
mtpy.mt_seed32new(s)
return mtpy.mt_drand() + mtpy.mt_drand()
@mark.parametrize('f', [seed_const, drand_const])
def test1(f):
make_eq_test(f, ())
@mark.parametrize('arg', single_args([1, 2, 42, 1823828, randint(1, 10000000),
randint(1, 10000000)]))
@mark.parametrize('f', [seed, drand])
def test2(f, arg):
make_eq_test(f, arg)
| [((668, 689), 'mtpy.mt_seed32new', 'mtpy.mt_seed32new', (['(42)'], {}), '(42)\n', (685, 689), False, 'import mtpy\n'), ((709, 729), 'mtpy.mt_seed32new', 'mtpy.mt_seed32new', (['s'], {}), '(s)\n', (726, 729), False, 'import mtpy\n'), ((755, 776), 'mtpy.mt_seed32new', 'mtpy.mt_seed32new', (['(42)'], {}), '(42)\n', (772, 776), False, 'import mtpy\n'), ((788, 803), 'mtpy.mt_drand', 'mtpy.mt_drand', ([], {}), '()\n', (801, 803), False, 'import mtpy\n'), ((824, 844), 'mtpy.mt_seed32new', 'mtpy.mt_seed32new', (['s'], {}), '(s)\n', (841, 844), False, 'import mtpy\n'), ((856, 871), 'mtpy.mt_drand', 'mtpy.mt_drand', ([], {}), '()\n', (869, 871), False, 'import mtpy\n'), ((874, 889), 'mtpy.mt_drand', 'mtpy.mt_drand', ([], {}), '()\n', (887, 889), False, 'import mtpy\n'), ((1039, 1059), 'random.randint', 'randint', (['(1)', '(10000000)'], {}), '(1, 10000000)\n', (1046, 1059), False, 'from random import randint\n'), ((1099, 1119), 'random.randint', 'randint', (['(1)', '(10000000)'], {}), '(1, 10000000)\n', (1106, 1119), False, 'from random import randint\n')] |
ipqhjjybj/bitcoin_trend_strategy | szh_objects.py | 0c85055558591574a4171abd68142ebbeb502958 | # encoding: utf-8
import sys
from market_maker import OrderManager
from settings import *
import os
from pymongo import MongoClient, ASCENDING
from pymongo.errors import ConnectionFailure
from datetime import datetime , timedelta
import numpy as np
########################################################################################################################
# constants
EXCHANGE_BITMEX = "BITMEX"
EMPTY_STRING = ""
EMPTY_FLOAT = 0.0
EMPTY_INT = 0
#----------------------------------------------------------------------
class LoggerEngine(object):
LogDir = "LogDir"
#----------------------------------------------------------------------
def __init__(self, logName , in_debug = True , open_md = "w"):
if os.path.exists(self.LogDir) == False:
os.mkdir( self.LogDir )
self.logPath = os.path.join(self.LogDir , logName)
self.now_debug = in_debug
if self.now_debug:
self.f = open( self.logPath , open_md)
#----------------------------------------------------------------------
def error(self, msg , error_id):
if self.now_debug:
self.f.write(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + " : " + "Error msg %s: %s " % (str(error_id) , msg) + "\n")
self.f.flush()
#----------------------------------------------------------------------
def info(self, msg):
if self.now_debug:
self.f.write(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + " : " + msg + "\n")
self.f.flush()
#----------------------------------------------------------------------
def close(self):
self.f.close()
'''
tick 数据的格式
'''
class TickData(object):
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(TickData, self).__init__()
# 代码相关
self.symbol = EMPTY_STRING # 合约代码
self.exchange = EMPTY_STRING # 交易所代码
self.vtSymbol = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 合约代码.交易所代码
# 成交数据
self.lastPrice = EMPTY_FLOAT # 最新成交价
self.lastVolume = EMPTY_INT # 最新成交量
self.volume = EMPTY_INT # 今天总成交量
self.openInterest = EMPTY_INT # 持仓量
self.time = EMPTY_STRING # 时间 11:20:56.5
self.date = EMPTY_STRING # 日期 20151009
self.datetime = None # python的datetime时间对象
# 常规行情
self.openPrice = EMPTY_FLOAT # 今日开盘价
self.highPrice = EMPTY_FLOAT # 今日最高价
self.lowPrice = EMPTY_FLOAT # 今日最低价
self.preClosePrice = EMPTY_FLOAT
self.upperLimit = EMPTY_FLOAT # 涨停价
self.lowerLimit = EMPTY_FLOAT # 跌停价
# 五档行情
self.bidPrice1 = EMPTY_FLOAT
self.bidPrice2 = EMPTY_FLOAT
self.bidPrice3 = EMPTY_FLOAT
self.bidPrice4 = EMPTY_FLOAT
self.bidPrice5 = EMPTY_FLOAT
self.askPrice1 = EMPTY_FLOAT
self.askPrice2 = EMPTY_FLOAT
self.askPrice3 = EMPTY_FLOAT
self.askPrice4 = EMPTY_FLOAT
self.askPrice5 = EMPTY_FLOAT
self.bidVolume1 = EMPTY_INT
self.bidVolume2 = EMPTY_INT
self.bidVolume3 = EMPTY_INT
self.bidVolume4 = EMPTY_INT
self.bidVolume5 = EMPTY_INT
self.askVolume1 = EMPTY_INT
self.askVolume2 = EMPTY_INT
self.askVolume3 = EMPTY_INT
self.askVolume4 = EMPTY_INT
self.askVolume5 = EMPTY_INT
########################################################################
class BarData(object):
"""K线数据"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(BarData, self).__init__()
self.vtSymbol = EMPTY_STRING # vt系统代码
self.symbol = EMPTY_STRING # 代码
self.exchange = EMPTY_STRING # 交易所
self.open = EMPTY_FLOAT # OHLC
self.high = EMPTY_FLOAT
self.low = EMPTY_FLOAT
self.close = EMPTY_FLOAT
self.date = EMPTY_STRING # bar开始的时间,日期
self.time = EMPTY_STRING # 时间
self.datetime = None # python的datetime时间对象
self.volume = EMPTY_INT # 成交量
self.openInterest = EMPTY_INT # 持仓量
'''
engine的基础类
'''
class EngineBase(object):
#----------------------------------------------------------------------
def writeLog(self, content):
if self.logger:
self.logger.info(content)
#----------------------------------------------------------------------
def writeError(self, content , error_id = 0):
"""
发送错误通知/记录日志文件
:param content:
:return:
"""
if self.logger:
self.logger.error(content , error_id)
'''
主要Engine
'''
class DataEngine(EngineBase):
#----------------------------------------------------------------------
def __init__(self , _host = GLOBAL_MONGO_HOST , _port = GLOBAL_MONGO_PORT):
super(DataEngine, self).__init__()
self.host = _host
self.port = _port
# MongoDB数据库相关
self.dbClient = None # MongoDB客户端对象
self.logger = LoggerEngine("dataEngine.log")
## init the db
self.dbConnect()
#----------------------------------------------------------------------
def dbConnect(self):
"""连接MongoDB数据库"""
if not self.dbClient:
# 读取MongoDB的设置
try:
# 设置MongoDB操作的超时时间为0.5秒
self.dbClient = MongoClient(self.host , self.port , connectTimeoutMS=500)
# 调用server_info查询服务器状态,防止服务器异常并未连接成功
self.dbClient.server_info()
self.writeLog(u'database connection error')
except ConnectionFailure:
self.writeLog( u'fail in db connection')
#----------------------------------------------------------------------
def dbQuery(self, dbName, collectionName, d, sortKey='', sortDirection=ASCENDING):
"""从MongoDB中读取数据,d是查询要求,返回的是数据库查询的指针"""
if self.dbClient:
db = self.dbClient[dbName]
collection = db[collectionName]
if sortKey:
cursor = collection.find(d).sort(sortKey, sortDirection) # 对查询出来的数据进行排序
else:
cursor = collection.find(d)
if cursor:
return list(cursor)
else:
return []
else:
self.writeLog(u'db query failed')
return []
#-----------------------------------------------------------------------
def loadBars( self, dbName = GLOBAL_USE_DBNAME , collectionName = GLOBAL_USE_SYMBOL, days = 2):
today_datetime = datetime.now()
start_datetime = today_datetime - timedelta( days = days)
d = {'datetime':{'$gte':start_datetime , '$lte':today_datetime}}
barData = self.dbQuery(dbName, collectionName, d, 'datetime')
l = []
for d in barData:
bar = BarData()
bar.__dict__ = d
l.append(bar)
return l
########################################################################
class BarManager(object):
"""
K线合成器,支持:
1. 基于Tick合成1分钟K线
2. 基于1分钟K线合成X分钟K线(X可以是2、3、5、10、15、30、60)
"""
#----------------------------------------------------------------------
def __init__(self, onBar, xsec=0, onXsecBar=None , xmin=0 , xhour=0, onXminBar=None , onXhourBar = None, onDayBar=None):
"""Constructor"""
self.bar = None # 1分钟K线对象
self.onBar = onBar # 1分钟K线回调函数
self.xsecBar = None # 多少秒K线对象
self.xsec = xsec # xsec的值
self.onXsecBar = onXsecBar # x秒的回调函数
self.xminBar = None # X分钟K线对象
self.xmin = xmin # X的值
self.onXminBar = onXminBar # X分钟K线的回调函数
self.xhourBar = None # x小时K线对象
self.xhour = xhour # x的值
self.onXhourBar = onXhourBar # x小时K线的回调函数
self.lastTick = None # 上一TICK缓存对象
self.lastSecondTick = None # 用于秒级别的上一根Tick缓存对象
self.dayBar = None # 一个交易日的bar对象
self.onDayBar = onDayBar # 交易日K线的回调函数
self.lastDayBar = None
#----------------------------------------------------------------------
def updateTick(self, tick):
"""TICK更新"""
newMinute = False # 默认不是新的一分钟
# 尚未创建对象
if not self.bar:
self.bar = BarData()
newMinute = True
# 新的一分钟
elif self.bar.datetime.minute != tick.datetime.minute:
# 生成上一分钟K线的时间戳
self.bar.datetime = self.bar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0
self.bar.date = self.bar.datetime.strftime('%Y%m%d')
self.bar.time = self.bar.datetime.strftime('%H:%M:%S.%f')
# 推送已经结束的上一分钟K线
self.onBar(self.bar)
# 创建新的K线对象
self.bar = BarData()
newMinute = True
# 初始化新一分钟的K线数据
if newMinute:
self.bar.vtSymbol = tick.vtSymbol
self.bar.symbol = tick.symbol
self.bar.exchange = tick.exchange
self.bar.open = tick.lastPrice
self.bar.high = tick.lastPrice
self.bar.low = tick.lastPrice
# 累加更新老一分钟的K线数据
else:
self.bar.high = max(self.bar.high, tick.lastPrice)
self.bar.low = min(self.bar.low, tick.lastPrice)
# 通用更新部分
self.bar.close = tick.lastPrice
self.bar.datetime = tick.datetime
self.bar.openInterest = tick.openInterest
if self.lastTick:
self.bar.volume += (tick.volume - self.lastTick.volume) # 当前K线内的成交量
# 缓存Tick
self.lastTick = tick
#----------------------------------------------------------------------
def updateSecond(self, tick ):
"""通过TICK数据更新到秒数据"""
newSecond = False
if not self.xsecBar:
self.xsecBar = BarData()
newSecond = True
elif self.xsecBar.datetime.second != tick.datetime.second and ( (tick.datetime.second) % self.xsec == 0 ):
self.xsecBar.datetime = self.xsecBar.datetime.replace( microsecond=0) # 将秒和微秒设为0
self.xsecBar.date = self.xsecBar.datetime.strftime('%Y%m%d')
self.xsecBar.time = self.xsecBar.datetime.strftime('%H:%M:%S.%f')
# 推送已经结束的上多少秒K线
self.onXsecBar(self.xsecBar)
# 清空老K线缓存对象
self.xsecBar = BarData()
newSecond = True
# 初始化新多少秒的K线数据
if newSecond :
self.xsecBar.datetime = tick.datetime
self.xsecBar.vtSymbol = tick.vtSymbol
self.xsecBar.symbol = tick.symbol
self.xsecBar.exchange = tick.exchange
self.xsecBar.open = tick.lastPrice
self.xsecBar.high = tick.lastPrice
self.xsecBar.low = tick.lastPrice
# 累加更新老几秒的K线数据
else:
self.xsecBar.high = max(self.xsecBar.high, tick.lastPrice)
self.xsecBar.low = min(self.xsecBar.low, tick.lastPrice)
# 通用更新部分
self.xsecBar.close = tick.lastPrice
self.xsecBar.openInterest = tick.openInterest
if self.lastSecondTick:
self.xsecBar.volume += (tick.volume - self.lastSecondTick.volume) # 当前Tick内的成交量
# 缓存 secondTick 对象
self.lastSecondTick = tick
#----------------------------------------------------------------------
def updateBar(self, bar):
"""1分钟K线更新"""
# 尚未创建对象
if not self.xminBar:
self.xminBar = BarData()
self.xminBar.vtSymbol = bar.vtSymbol
self.xminBar.symbol = bar.symbol
self.xminBar.exchange = bar.exchange
self.xminBar.open = bar.open
self.xminBar.high = bar.high
self.xminBar.low = bar.low
self.xminBar.datetime = bar.datetime
# 累加老K线
else:
self.xminBar.high = max(self.xminBar.high, bar.high)
self.xminBar.low = min(self.xminBar.low, bar.low)
# 通用部分
self.xminBar.close = bar.close
self.xminBar.openInterest = bar.openInterest
self.xminBar.volume += float(bar.volume)
# X分钟已经走完
if ( (bar.datetime.minute + 1) % self.xmin ) == 0: # 可以用X整除
# 生成上一X分钟K线的时间戳
self.xminBar.datetime = self.xminBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0
self.xminBar.date = self.xminBar.datetime.strftime('%Y%m%d')
self.xminBar.time = self.xminBar.datetime.strftime('%H:%M:%S')
# 推送
self.onXminBar(self.xminBar)
# 清空老K线缓存对象
self.xminBar = None
#----------------------------------------------------------------------
def updateHourBar(self , bar):
"""1小时K线更新"""
# 尚未创建对象
if not self.xhourBar:
self.xhourBar = BarData()
self.xhourBar.vtSymbol = bar.vtSymbol
self.xhourBar.symbol = bar.symbol
self.xhourBar.exchange = bar.exchange
self.xhourBar.open = bar.open
self.xhourBar.high = bar.high
self.xhourBar.low = bar.low
self.xhourBar.datetime = bar.datetime
else:
self.xhourBar.high = max(self.xhourBar.high, bar.high)
self.xhourBar.low = min(self.xhourBar.low, bar.low)
# 通用部分
self.xhourBar.close = bar.close
self.xhourBar.openInterest = bar.openInterest
self.xhourBar.volume += float(bar.volume)
# X分钟已经走完
if ( (bar.datetime.hour + 1) % self.xhour ) == 0: # 可以用X整除
# 生成上一X分钟K线的时间戳
self.xhourBar.datetime = self.xhourBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0
self.xhourBar.date = self.xhourBar.datetime.strftime('%Y%m%d')
self.xhourBar.time = self.xhourBar.datetime.strftime('%H:%M:%S')
# 推送
self.onXhourBar(self.xhourBar)
# 清空老K线缓存对象
self.xhourBar = None
#----------------------------------------------------------------------------
def updateDayBar(self, bar):
# 一天走完
# 1. 夜盘 , 2.第二天9点
if self.lastDayBar != None \
and ( (self.lastDayBar.time <= "15:30:00" and bar.time >= "15:30:00") \
or (self.lastDayBar.time <= "15:30:00" and bar.time <= self.lastDayBar.time )):
self.dayBar.datetime = self.dayBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0
self.dayBar.date = self.dayBar.datetime.strftime('%Y%m%d')
self.dayBar.time = self.dayBar.datetime.strftime('%H:%M:%S')
# 说明是新的一天了
# 先推送昨天过去
self.onDayBar( self.dayBar)
self.dayBar = BarData()
self.dayBar.vtSymbol = bar.vtSymbol
self.dayBar.symbol = bar.symbol
self.dayBar.exchange = bar.exchange
self.dayBar.open = bar.open
self.dayBar.high = bar.high
self.dayBar.low = bar.low
self.dayBar.datetime = bar.datetime
elif not self.dayBar:
self.dayBar = BarData()
self.dayBar.vtSymbol = bar.vtSymbol
self.dayBar.symbol = bar.symbol
self.dayBar.exchange = bar.exchange
self.dayBar.open = bar.open
self.dayBar.high = bar.high
self.dayBar.low = bar.low
self.dayBar.datetime = bar.datetime
else:
self.dayBar.high = max(self.dayBar.high , bar.high)
self.dayBar.low = min(self.dayBar.low , bar.low)
# 通用部分
self.dayBar.close = bar.close
self.dayBar.openInterest = bar.openInterest
self.dayBar.volume += float(bar.volume)
self.lastDayBar = bar
########################################################################
class ArrayManager(object):
"""
K线序列管理工具,负责:
1. K线时间序列的维护
2. 常用技术指标的计算
"""
#----------------------------------------------------------------------
def __init__(self, size=100):
"""Constructor"""
self.count = 0 # 缓存计数
self.size = size # 缓存大小
self.inited = False # True if count>=size
self.openArray = np.zeros(size) # OHLC
self.highArray = np.zeros(size)
self.lowArray = np.zeros(size)
self.closeArray = np.zeros(size)
self.volumeArray = np.zeros(size)
#----------------------------------------------------------------------
def updateBar(self, bar):
"""更新K线"""
self.count += 1
if not self.inited and self.count >= self.size:
self.inited = True
self.openArray[0:self.size-1] = self.openArray[1:self.size]
self.highArray[0:self.size-1] = self.highArray[1:self.size]
self.lowArray[0:self.size-1] = self.lowArray[1:self.size]
self.closeArray[0:self.size-1] = self.closeArray[1:self.size]
self.volumeArray[0:self.size-1] = self.volumeArray[1:self.size]
self.openArray[-1] = bar.open
self.highArray[-1] = bar.high
self.lowArray[-1] = bar.low
self.closeArray[-1] = bar.close
self.volumeArray[-1] = bar.volume
#----------------------------------------------------------------------
@property
def open(self):
"""获取开盘价序列"""
return self.openArray
#----------------------------------------------------------------------
@property
def high(self):
"""获取最高价序列"""
return self.highArray
#----------------------------------------------------------------------
@property
def low(self):
"""获取最低价序列"""
return self.lowArray
#----------------------------------------------------------------------
@property
def close(self):
"""获取收盘价序列"""
return self.closeArray
#----------------------------------------------------------------------
@property
def volume(self):
"""获取成交量序列"""
return self.volumeArray
| [((844, 878), 'os.path.join', 'os.path.join', (['self.LogDir', 'logName'], {}), '(self.LogDir, logName)\n', (856, 878), False, 'import os\n'), ((7067, 7081), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7079, 7081), False, 'from datetime import datetime, timedelta\n'), ((17027, 17041), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (17035, 17041), True, 'import numpy as np\n'), ((17078, 17092), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (17086, 17092), True, 'import numpy as np\n'), ((17117, 17131), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (17125, 17131), True, 'import numpy as np\n'), ((17158, 17172), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (17166, 17172), True, 'import numpy as np\n'), ((17200, 17214), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (17208, 17214), True, 'import numpy as np\n'), ((746, 773), 'os.path.exists', 'os.path.exists', (['self.LogDir'], {}), '(self.LogDir)\n', (760, 773), False, 'import os\n'), ((796, 817), 'os.mkdir', 'os.mkdir', (['self.LogDir'], {}), '(self.LogDir)\n', (804, 817), False, 'import os\n'), ((7124, 7144), 'datetime.timedelta', 'timedelta', ([], {'days': 'days'}), '(days=days)\n', (7133, 7144), False, 'from datetime import datetime, timedelta\n'), ((5814, 5869), 'pymongo.MongoClient', 'MongoClient', (['self.host', 'self.port'], {'connectTimeoutMS': '(500)'}), '(self.host, self.port, connectTimeoutMS=500)\n', (5825, 5869), False, 'from pymongo import MongoClient, ASCENDING\n'), ((1159, 1173), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1171, 1173), False, 'from datetime import datetime, timedelta\n'), ((1447, 1461), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1459, 1461), False, 'from datetime import datetime, timedelta\n')] |
ishtjot/susereumutep | CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Tools/python/pylint/pyreverse/writer.py | 56e20c1777e0c938ac42bd8056f84af9e0b76e46 | # -*- coding: utf-8 -*-
# Copyright (c) 2008-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:[email protected]
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Utilities for creating VCG and Dot diagrams"""
from logilab.common.vcgutils import VCGPrinter
from logilab.common.graph import DotBackend
from pylint.pyreverse.utils import is_exception
class DiagramWriter(object):
"""base class for writing project diagrams
"""
def __init__(self, config, styles):
self.config = config
self.pkg_edges, self.inh_edges, self.imp_edges, self.ass_edges = styles
self.printer = None # defined in set_printer
def write(self, diadefs):
"""write files for <project> according to <diadefs>
"""
for diagram in diadefs:
basename = diagram.title.strip().replace(' ', '_')
file_name = '%s.%s' % (basename, self.config.output_format)
self.set_printer(file_name, basename)
if diagram.TYPE == 'class':
self.write_classes(diagram)
else:
self.write_packages(diagram)
self.close_graph()
def write_packages(self, diagram):
"""write a package diagram"""
# sorted to get predictable (hence testable) results
for i, obj in enumerate(sorted(diagram.modules(), key=lambda x: x.title)):
self.printer.emit_node(i, label=self.get_title(obj), shape='box')
obj.fig_id = i
# package dependencies
for rel in diagram.get_relationships('depends'):
self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id,
**self.pkg_edges)
def write_classes(self, diagram):
"""write a class diagram"""
# sorted to get predictable (hence testable) results
for i, obj in enumerate(sorted(diagram.objects, key=lambda x: x.title)):
self.printer.emit_node(i, **self.get_values(obj))
obj.fig_id = i
# inheritance links
for rel in diagram.get_relationships('specialization'):
self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id,
**self.inh_edges)
# implementation links
for rel in diagram.get_relationships('implements'):
self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id,
**self.imp_edges)
# generate associations
for rel in diagram.get_relationships('association'):
self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id,
label=rel.name, **self.ass_edges)
def set_printer(self, file_name, basename):
"""set printer"""
raise NotImplementedError
def get_title(self, obj):
"""get project title"""
raise NotImplementedError
def get_values(self, obj):
"""get label and shape for classes."""
raise NotImplementedError
def close_graph(self):
"""finalize the graph"""
raise NotImplementedError
class DotWriter(DiagramWriter):
"""write dot graphs from a diagram definition and a project
"""
def __init__(self, config):
styles = [dict(arrowtail='none', arrowhead="open"),
dict(arrowtail='none', arrowhead='empty'),
dict(arrowtail='node', arrowhead='empty', style='dashed'),
dict(fontcolor='green', arrowtail='none',
arrowhead='diamond', style='solid'),
]
DiagramWriter.__init__(self, config, styles)
def set_printer(self, file_name, basename):
"""initialize DotWriter and add options for layout.
"""
layout = dict(rankdir="BT")
self.printer = DotBackend(basename, additionnal_param=layout)
self.file_name = file_name
def get_title(self, obj):
"""get project title"""
return obj.title
def get_values(self, obj):
"""get label and shape for classes.
The label contains all attributes and methods
"""
label = obj.title
if obj.shape == 'interface':
label = u'«interface»\\n%s' % label
if not self.config.only_classnames:
label = r'%s|%s\l|' % (label, r'\l'.join(obj.attrs))
for func in obj.methods:
label = r'%s%s()\l' % (label, func.name)
label = '{%s}' % label
if is_exception(obj.node):
return dict(fontcolor='red', label=label, shape='record')
return dict(label=label, shape='record')
def close_graph(self):
"""print the dot graph into <file_name>"""
self.printer.generate(self.file_name)
class VCGWriter(DiagramWriter):
"""write vcg graphs from a diagram definition and a project
"""
def __init__(self, config):
styles = [dict(arrowstyle='solid', backarrowstyle='none',
backarrowsize=0),
dict(arrowstyle='solid', backarrowstyle='none',
backarrowsize=10),
dict(arrowstyle='solid', backarrowstyle='none',
linestyle='dotted', backarrowsize=10),
dict(arrowstyle='solid', backarrowstyle='none',
textcolor='green'),
]
DiagramWriter.__init__(self, config, styles)
def set_printer(self, file_name, basename):
"""initialize VCGWriter for a UML graph"""
self.graph_file = open(file_name, 'w+')
self.printer = VCGPrinter(self.graph_file)
self.printer.open_graph(title=basename, layoutalgorithm='dfs',
late_edge_labels='yes', port_sharing='no',
manhattan_edges='yes')
self.printer.emit_node = self.printer.node
self.printer.emit_edge = self.printer.edge
def get_title(self, obj):
"""get project title in vcg format"""
return r'\fb%s\fn' % obj.title
def get_values(self, obj):
"""get label and shape for classes.
The label contains all attributes and methods
"""
if is_exception(obj.node):
label = r'\fb\f09%s\fn' % obj.title
else:
label = r'\fb%s\fn' % obj.title
if obj.shape == 'interface':
shape = 'ellipse'
else:
shape = 'box'
if not self.config.only_classnames:
attrs = obj.attrs
methods = [func.name for func in obj.methods]
# box width for UML like diagram
maxlen = max(len(name) for name in [obj.title] + methods + attrs)
line = '_' * (maxlen + 2)
label = r'%s\n\f%s' % (label, line)
for attr in attrs:
label = r'%s\n\f08%s' % (label, attr)
if attrs:
label = r'%s\n\f%s' % (label, line)
for func in methods:
label = r'%s\n\f10%s()' % (label, func)
return dict(label=label, shape=shape)
def close_graph(self):
"""close graph and file"""
self.printer.close_graph()
self.graph_file.close()
| [((4486, 4532), 'logilab.common.graph.DotBackend', 'DotBackend', (['basename'], {'additionnal_param': 'layout'}), '(basename, additionnal_param=layout)\n', (4496, 4532), False, 'from logilab.common.graph import DotBackend\n'), ((5159, 5181), 'pylint.pyreverse.utils.is_exception', 'is_exception', (['obj.node'], {}), '(obj.node)\n', (5171, 5181), False, 'from pylint.pyreverse.utils import is_exception\n'), ((6260, 6287), 'logilab.common.vcgutils.VCGPrinter', 'VCGPrinter', (['self.graph_file'], {}), '(self.graph_file)\n', (6270, 6287), False, 'from logilab.common.vcgutils import VCGPrinter\n'), ((6861, 6883), 'pylint.pyreverse.utils.is_exception', 'is_exception', (['obj.node'], {}), '(obj.node)\n', (6873, 6883), False, 'from pylint.pyreverse.utils import is_exception\n')] |
philippe-heitzmann/python-apps | graphql-ml-serving/backend/mutations.py | 1cc6e5e9b9ac81c81a3d4f0e420ff488fe6b2f0a | import logging
from ariadne import MutationType, convert_kwargs_to_snake_case
from config import clients, messages, queue
mutation = MutationType()
@mutation.field("createMessage")
@convert_kwargs_to_snake_case
async def resolve_create_message(obj, info, content, client_id):
try:
message = {"content": content, "client_id": client_id}
messages.append(message)
await queue.put(message)
return {"success": True, "message": message}
except Exception as error:
return {"success": False, "errors": [str(error)]}
@mutation.field("createClient")
@convert_kwargs_to_snake_case
async def resolve_create_client(obj, info, client_id):
try:
logging.info(f"Client id: {client_id}")
if not clients.get(client_id):
client = {"client_id": client_id}
clients[client_id] = client
return {"success": True, "client": client}
return {"success": False, "errors": ["Client is taken"]}
except Exception as error:
return {"success": False, "errors": [str(error)]}
| [((134, 148), 'ariadne.MutationType', 'MutationType', ([], {}), '()\n', (146, 148), False, 'from ariadne import MutationType, convert_kwargs_to_snake_case\n'), ((359, 383), 'config.messages.append', 'messages.append', (['message'], {}), '(message)\n', (374, 383), False, 'from config import clients, messages, queue\n'), ((695, 734), 'logging.info', 'logging.info', (['f"""Client id: {client_id}"""'], {}), "(f'Client id: {client_id}')\n", (707, 734), False, 'import logging\n'), ((398, 416), 'config.queue.put', 'queue.put', (['message'], {}), '(message)\n', (407, 416), False, 'from config import clients, messages, queue\n'), ((750, 772), 'config.clients.get', 'clients.get', (['client_id'], {}), '(client_id)\n', (761, 772), False, 'from config import clients, messages, queue\n')] |
MaxwellDPS/healthchecks | hc/api/transports.py | 3730c67c803e707ae51b01bacf2929bd053ee22f | import os
from django.conf import settings
from django.template.loader import render_to_string
from django.utils import timezone
import json
import requests
from urllib.parse import quote, urlencode
from hc.accounts.models import Profile
from hc.lib import emails
from hc.lib.string import replace
try:
import apprise
except ImportError:
# Enforce
settings.APPRISE_ENABLED = False
def tmpl(template_name, **ctx):
template_path = "integrations/%s" % template_name
# \xa0 is non-breaking space. It causes SMS messages to use UCS2 encoding
# and cost twice the money.
return render_to_string(template_path, ctx).strip().replace("\xa0", " ")
class Transport(object):
def __init__(self, channel):
self.channel = channel
def notify(self, check):
""" Send notification about current status of the check.
This method returns None on success, and error message
on error.
"""
raise NotImplementedError()
def is_noop(self, check):
""" Return True if transport will ignore check's current status.
This method is overridden in Webhook subclass where the user can
configure webhook urls for "up" and "down" events, and both are
optional.
"""
return False
def checks(self):
return self.channel.project.check_set.order_by("created")
class Email(Transport):
def notify(self, check, bounce_url):
if not self.channel.email_verified:
return "Email not verified"
unsub_link = self.channel.get_unsub_link()
headers = {
"X-Bounce-Url": bounce_url,
"List-Unsubscribe": "<%s>" % unsub_link,
"List-Unsubscribe-Post": "List-Unsubscribe=One-Click",
}
try:
# Look up the sorting preference for this email address
p = Profile.objects.get(user__email=self.channel.email_value)
sort = p.sort
except Profile.DoesNotExist:
# Default sort order is by check's creation time
sort = "created"
# list() executes the query, to avoid DB access while
# rendering a template
ctx = {
"check": check,
"checks": list(self.checks()),
"sort": sort,
"now": timezone.now(),
"unsub_link": unsub_link,
}
emails.alert(self.channel.email_value, ctx, headers)
def is_noop(self, check):
if not self.channel.email_verified:
return True
if check.status == "down":
return not self.channel.email_notify_down
else:
return not self.channel.email_notify_up
class Shell(Transport):
def prepare(self, template, check):
""" Replace placeholders with actual values. """
ctx = {
"$CODE": str(check.code),
"$STATUS": check.status,
"$NOW": timezone.now().replace(microsecond=0).isoformat(),
"$NAME": check.name,
"$TAGS": check.tags,
}
for i, tag in enumerate(check.tags_list()):
ctx["$TAG%d" % (i + 1)] = tag
return replace(template, ctx)
def is_noop(self, check):
if check.status == "down" and not self.channel.cmd_down:
return True
if check.status == "up" and not self.channel.cmd_up:
return True
return False
def notify(self, check):
if not settings.SHELL_ENABLED:
return "Shell commands are not enabled"
if check.status == "up":
cmd = self.channel.cmd_up
elif check.status == "down":
cmd = self.channel.cmd_down
cmd = self.prepare(cmd, check)
code = os.system(cmd)
if code != 0:
return "Command returned exit code %d" % code
class HttpTransport(Transport):
@classmethod
def get_error(cls, response):
# Override in subclasses: look for a specific error message in the
# response and return it.
return None
@classmethod
def _request(cls, method, url, **kwargs):
try:
options = dict(kwargs)
options["timeout"] = 5
if "headers" not in options:
options["headers"] = {}
if "User-Agent" not in options["headers"]:
options["headers"]["User-Agent"] = "healthchecks.io"
r = requests.request(method, url, **options)
if r.status_code not in (200, 201, 202, 204):
m = cls.get_error(r)
if m:
return f'Received status code {r.status_code} with a message: "{m}"'
return f"Received status code {r.status_code}"
except requests.exceptions.Timeout:
# Well, we tried
return "Connection timed out"
except requests.exceptions.ConnectionError:
return "Connection failed"
@classmethod
def get(cls, url, **kwargs):
# Make 3 attempts--
for x in range(0, 3):
error = cls._request("get", url, **kwargs)
if error is None:
break
return error
@classmethod
def post(cls, url, **kwargs):
# Make 3 attempts--
for x in range(0, 3):
error = cls._request("post", url, **kwargs)
if error is None:
break
return error
@classmethod
def put(cls, url, **kwargs):
# Make 3 attempts--
for x in range(0, 3):
error = cls._request("put", url, **kwargs)
if error is None:
break
return error
class Webhook(HttpTransport):
def prepare(self, template, check, urlencode=False):
""" Replace variables with actual values. """
def safe(s):
return quote(s) if urlencode else s
ctx = {
"$CODE": str(check.code),
"$STATUS": check.status,
"$NOW": safe(timezone.now().replace(microsecond=0).isoformat()),
"$NAME": safe(check.name),
"$TAGS": safe(check.tags),
}
for i, tag in enumerate(check.tags_list()):
ctx["$TAG%d" % (i + 1)] = safe(tag)
return replace(template, ctx)
def is_noop(self, check):
if check.status == "down" and not self.channel.url_down:
return True
if check.status == "up" and not self.channel.url_up:
return True
return False
def notify(self, check):
spec = self.channel.webhook_spec(check.status)
if not spec["url"]:
return "Empty webhook URL"
url = self.prepare(spec["url"], check, urlencode=True)
headers = {}
for key, value in spec["headers"].items():
headers[key] = self.prepare(value, check)
body = spec["body"]
if body:
body = self.prepare(body, check)
if spec["method"] == "GET":
return self.get(url, headers=headers)
elif spec["method"] == "POST":
return self.post(url, data=body.encode(), headers=headers)
elif spec["method"] == "PUT":
return self.put(url, data=body.encode(), headers=headers)
class Slack(HttpTransport):
def notify(self, check):
text = tmpl("slack_message.json", check=check)
payload = json.loads(text)
return self.post(self.channel.slack_webhook_url, json=payload)
class HipChat(HttpTransport):
def is_noop(self, check):
return True
class OpsGenie(HttpTransport):
@classmethod
def get_error(cls, response):
try:
return response.json().get("message")
except ValueError:
pass
def notify(self, check):
headers = {
"Conent-Type": "application/json",
"Authorization": "GenieKey %s" % self.channel.opsgenie_key,
}
payload = {"alias": str(check.code), "source": settings.SITE_NAME}
if check.status == "down":
payload["tags"] = check.tags_list()
payload["message"] = tmpl("opsgenie_message.html", check=check)
payload["note"] = tmpl("opsgenie_note.html", check=check)
payload["description"] = tmpl("opsgenie_description.html", check=check)
url = "https://api.opsgenie.com/v2/alerts"
if self.channel.opsgenie_region == "eu":
url = "https://api.eu.opsgenie.com/v2/alerts"
if check.status == "up":
url += "/%s/close?identifierType=alias" % check.code
return self.post(url, json=payload, headers=headers)
class PagerDuty(HttpTransport):
URL = "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
def notify(self, check):
description = tmpl("pd_description.html", check=check)
payload = {
"service_key": self.channel.pd_service_key,
"incident_key": str(check.code),
"event_type": "trigger" if check.status == "down" else "resolve",
"description": description,
"client": settings.SITE_NAME,
"client_url": check.details_url(),
}
return self.post(self.URL, json=payload)
class PagerTree(HttpTransport):
def notify(self, check):
url = self.channel.value
headers = {"Conent-Type": "application/json"}
payload = {
"incident_key": str(check.code),
"event_type": "trigger" if check.status == "down" else "resolve",
"title": tmpl("pagertree_title.html", check=check),
"description": tmpl("pagertree_description.html", check=check),
"client": settings.SITE_NAME,
"client_url": settings.SITE_ROOT,
"tags": ",".join(check.tags_list()),
}
return self.post(url, json=payload, headers=headers)
class PagerTeam(HttpTransport):
def notify(self, check):
url = self.channel.value
headers = {"Content-Type": "application/json"}
payload = {
"incident_key": str(check.code),
"event_type": "trigger" if check.status == "down" else "resolve",
"title": tmpl("pagerteam_title.html", check=check),
"description": tmpl("pagerteam_description.html", check=check),
"client": settings.SITE_NAME,
"client_url": settings.SITE_ROOT,
"tags": ",".join(check.tags_list()),
}
return self.post(url, json=payload, headers=headers)
class Pushbullet(HttpTransport):
def notify(self, check):
text = tmpl("pushbullet_message.html", check=check)
url = "https://api.pushbullet.com/v2/pushes"
headers = {
"Access-Token": self.channel.value,
"Conent-Type": "application/json",
}
payload = {"type": "note", "title": settings.SITE_NAME, "body": text}
return self.post(url, json=payload, headers=headers)
class Pushover(HttpTransport):
URL = "https://api.pushover.net/1/messages.json"
def notify(self, check):
others = self.checks().filter(status="down").exclude(code=check.code)
# list() executes the query, to avoid DB access while
# rendering a template
ctx = {"check": check, "down_checks": list(others)}
text = tmpl("pushover_message.html", **ctx)
title = tmpl("pushover_title.html", **ctx)
pieces = self.channel.value.split("|")
user_key, prio = pieces[0], pieces[1]
# The third element, if present, is the priority for "up" events
if len(pieces) == 3 and check.status == "up":
prio = pieces[2]
payload = {
"token": settings.PUSHOVER_API_TOKEN,
"user": user_key,
"message": text,
"title": title,
"html": 1,
"priority": int(prio),
}
# Emergency notification
if prio == "2":
payload["retry"] = settings.PUSHOVER_EMERGENCY_RETRY_DELAY
payload["expire"] = settings.PUSHOVER_EMERGENCY_EXPIRATION
return self.post(self.URL, data=payload)
class VictorOps(HttpTransport):
def notify(self, check):
description = tmpl("victorops_description.html", check=check)
mtype = "CRITICAL" if check.status == "down" else "RECOVERY"
payload = {
"entity_id": str(check.code),
"message_type": mtype,
"entity_display_name": check.name_then_code(),
"state_message": description,
"monitoring_tool": settings.SITE_NAME,
}
return self.post(self.channel.value, json=payload)
class Matrix(HttpTransport):
def get_url(self):
s = quote(self.channel.value)
url = settings.MATRIX_HOMESERVER
url += "/_matrix/client/r0/rooms/%s/send/m.room.message?" % s
url += urlencode({"access_token": settings.MATRIX_ACCESS_TOKEN})
return url
def notify(self, check):
plain = tmpl("matrix_description.html", check=check)
formatted = tmpl("matrix_description_formatted.html", check=check)
payload = {
"msgtype": "m.text",
"body": plain,
"format": "org.matrix.custom.html",
"formatted_body": formatted,
}
return self.post(self.get_url(), json=payload)
class Discord(HttpTransport):
def notify(self, check):
text = tmpl("slack_message.json", check=check)
payload = json.loads(text)
url = self.channel.discord_webhook_url + "/slack"
return self.post(url, json=payload)
class Telegram(HttpTransport):
SM = "https://api.telegram.org/bot%s/sendMessage" % settings.TELEGRAM_TOKEN
@classmethod
def get_error(cls, response):
try:
return response.json().get("description")
except ValueError:
pass
@classmethod
def send(cls, chat_id, text):
# Telegram.send is a separate method because it is also used in
# hc.front.views.telegram_bot to send invite links.
return cls.post(
cls.SM, json={"chat_id": chat_id, "text": text, "parse_mode": "html"}
)
def notify(self, check):
from hc.api.models import TokenBucket
if not TokenBucket.authorize_telegram(self.channel.telegram_id):
return "Rate limit exceeded"
text = tmpl("telegram_message.html", check=check)
return self.send(self.channel.telegram_id, text)
class Sms(HttpTransport):
URL = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json"
def is_noop(self, check):
return check.status != "down"
def notify(self, check):
profile = Profile.objects.for_user(self.channel.project.owner)
if not profile.authorize_sms():
profile.send_sms_limit_notice("SMS")
return "Monthly SMS limit exceeded"
url = self.URL % settings.TWILIO_ACCOUNT
auth = (settings.TWILIO_ACCOUNT, settings.TWILIO_AUTH)
text = tmpl("sms_message.html", check=check, site_name=settings.SITE_NAME)
data = {
"From": settings.TWILIO_FROM,
"To": self.channel.sms_number,
"Body": text,
}
return self.post(url, data=data, auth=auth)
class WhatsApp(HttpTransport):
URL = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json"
def is_noop(self, check):
if check.status == "down":
return not self.channel.whatsapp_notify_down
else:
return not self.channel.whatsapp_notify_up
def notify(self, check):
profile = Profile.objects.for_user(self.channel.project.owner)
if not profile.authorize_sms():
profile.send_sms_limit_notice("WhatsApp")
return "Monthly message limit exceeded"
url = self.URL % settings.TWILIO_ACCOUNT
auth = (settings.TWILIO_ACCOUNT, settings.TWILIO_AUTH)
text = tmpl("whatsapp_message.html", check=check, site_name=settings.SITE_NAME)
data = {
"From": "whatsapp:%s" % settings.TWILIO_FROM,
"To": "whatsapp:%s" % self.channel.sms_number,
"Body": text,
}
return self.post(url, data=data, auth=auth)
class Trello(HttpTransport):
URL = "https://api.trello.com/1/cards"
def is_noop(self, check):
return check.status != "down"
def notify(self, check):
params = {
"idList": self.channel.trello_list_id,
"name": tmpl("trello_name.html", check=check),
"desc": tmpl("trello_desc.html", check=check),
"key": settings.TRELLO_APP_KEY,
"token": self.channel.trello_token,
}
return self.post(self.URL, params=params)
class Apprise(HttpTransport):
def notify(self, check):
if not settings.APPRISE_ENABLED:
# Not supported and/or enabled
return "Apprise is disabled and/or not installed"
a = apprise.Apprise()
title = tmpl("apprise_title.html", check=check)
body = tmpl("apprise_description.html", check=check)
a.add(self.channel.value)
notify_type = (
apprise.NotifyType.SUCCESS
if check.status == "up"
else apprise.NotifyType.FAILURE
)
return (
"Failed"
if not a.notify(body=body, title=title, notify_type=notify_type)
else None
)
class MsTeams(HttpTransport):
def notify(self, check):
text = tmpl("msteams_message.json", check=check)
payload = json.loads(text)
return self.post(self.channel.value, json=payload)
class Zulip(HttpTransport):
@classmethod
def get_error(cls, response):
try:
return response.json().get("msg")
except ValueError:
pass
def notify(self, check):
_, domain = self.channel.zulip_bot_email.split("@")
url = "https://%s/api/v1/messages" % domain
auth = (self.channel.zulip_bot_email, self.channel.zulip_api_key)
data = {
"type": self.channel.zulip_type,
"to": self.channel.zulip_to,
"topic": tmpl("zulip_topic.html", check=check),
"content": tmpl("zulip_content.html", check=check),
}
return self.post(url, data=data, auth=auth)
| [((2383, 2435), 'hc.lib.emails.alert', 'emails.alert', (['self.channel.email_value', 'ctx', 'headers'], {}), '(self.channel.email_value, ctx, headers)\n', (2395, 2435), False, 'from hc.lib import emails\n'), ((3164, 3186), 'hc.lib.string.replace', 'replace', (['template', 'ctx'], {}), '(template, ctx)\n', (3171, 3186), False, 'from hc.lib.string import replace\n'), ((3740, 3754), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (3749, 3754), False, 'import os\n'), ((6240, 6262), 'hc.lib.string.replace', 'replace', (['template', 'ctx'], {}), '(template, ctx)\n', (6247, 6262), False, 'from hc.lib.string import replace\n'), ((7361, 7377), 'json.loads', 'json.loads', (['text'], {}), '(text)\n', (7371, 7377), False, 'import json\n'), ((12695, 12720), 'urllib.parse.quote', 'quote', (['self.channel.value'], {}), '(self.channel.value)\n', (12700, 12720), False, 'from urllib.parse import quote, urlencode\n'), ((12848, 12905), 'urllib.parse.urlencode', 'urlencode', (["{'access_token': settings.MATRIX_ACCESS_TOKEN}"], {}), "({'access_token': settings.MATRIX_ACCESS_TOKEN})\n", (12857, 12905), False, 'from urllib.parse import quote, urlencode\n'), ((13460, 13476), 'json.loads', 'json.loads', (['text'], {}), '(text)\n', (13470, 13476), False, 'import json\n'), ((14680, 14732), 'hc.accounts.models.Profile.objects.for_user', 'Profile.objects.for_user', (['self.channel.project.owner'], {}), '(self.channel.project.owner)\n', (14704, 14732), False, 'from hc.accounts.models import Profile\n'), ((15603, 15655), 'hc.accounts.models.Profile.objects.for_user', 'Profile.objects.for_user', (['self.channel.project.owner'], {}), '(self.channel.project.owner)\n', (15627, 15655), False, 'from hc.accounts.models import Profile\n'), ((16962, 16979), 'apprise.Apprise', 'apprise.Apprise', ([], {}), '()\n', (16977, 16979), False, 'import apprise\n'), ((17570, 17586), 'json.loads', 'json.loads', (['text'], {}), '(text)\n', (17580, 17586), False, 'import json\n'), ((1873, 1930), 'hc.accounts.models.Profile.objects.get', 'Profile.objects.get', ([], {'user__email': 'self.channel.email_value'}), '(user__email=self.channel.email_value)\n', (1892, 1930), False, 'from hc.accounts.models import Profile\n'), ((2310, 2324), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2322, 2324), False, 'from django.utils import timezone\n'), ((4419, 4459), 'requests.request', 'requests.request', (['method', 'url'], {}), '(method, url, **options)\n', (4435, 4459), False, 'import requests\n'), ((14248, 14304), 'hc.api.models.TokenBucket.authorize_telegram', 'TokenBucket.authorize_telegram', (['self.channel.telegram_id'], {}), '(self.channel.telegram_id)\n', (14278, 14304), False, 'from hc.api.models import TokenBucket\n'), ((5837, 5845), 'urllib.parse.quote', 'quote', (['s'], {}), '(s)\n', (5842, 5845), False, 'from urllib.parse import quote, urlencode\n'), ((605, 641), 'django.template.loader.render_to_string', 'render_to_string', (['template_path', 'ctx'], {}), '(template_path, ctx)\n', (621, 641), False, 'from django.template.loader import render_to_string\n'), ((2926, 2940), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2938, 2940), False, 'from django.utils import timezone\n'), ((5983, 5997), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (5995, 5997), False, 'from django.utils import timezone\n')] |
Graviti-AI/graviti-python-sdk | graviti/portex/builder.py | d2faf86b4718416503b965f6057b31015417446f | #!/usr/bin/env python3
#
# Copyright 2022 Graviti. Licensed under MIT License.
#
"""Portex type builder related classes."""
from hashlib import md5
from pathlib import Path
from shutil import rmtree
from subprocess import PIPE, CalledProcessError, run
from tempfile import gettempdir
from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Type, TypeVar
import yaml
import graviti.portex.ptype as PTYPE
from graviti.exception import GitCommandError, GitNotFoundError
from graviti.portex.base import PortexRecordBase
from graviti.portex.external import PortexExternalType
from graviti.portex.factory import ConnectedFieldsFactory, TypeFactory
from graviti.portex.package import ExternalPackage, Imports, packages
from graviti.portex.param import Param, Params
from graviti.portex.register import ExternalContainerRegister
if TYPE_CHECKING:
from subprocess import CompletedProcess
from graviti.portex.base import PortexType
EXTERNAL_TYPE_TO_CONTAINER = ExternalContainerRegister.EXTERNAL_TYPE_TO_CONTAINER
_I = TypeVar("_I", bound="BuilderImports")
class PackageRepo:
"""The local git repo of the external Portex package.
Arguments:
url: The git repo url of the external package.
revision: The git repo revision (tag/commit) of the external package.
"""
_env: Dict[str, Any] = {}
def __init__(self, url: str, revision: str) -> None:
tempdir = Path(gettempdir()) / "portex"
tempdir.mkdir(exist_ok=True)
md5_instance = md5()
md5_instance.update(url.encode("utf-8"))
md5_instance.update(revision.encode("utf-8"))
self._path = tempdir / md5_instance.hexdigest()
self._url = url
self._revision = revision
try:
self._prepare_repo()
except FileNotFoundError:
raise GitNotFoundError() from None
def _prepare_repo(self) -> None:
if not self._path.exists():
self._clone_repo()
elif not self._check_repo_integrity():
rmtree(self._path)
self._clone_repo()
def _run(self, args: List[str]) -> "CompletedProcess[bytes]":
return run(args, cwd=self._path, env=self._env, stdout=PIPE, stderr=PIPE, check=True)
def _init_repo(self) -> None:
self._run(["git", "init"])
self._run(["git", "remote", "add", "origin", self._url])
def _shallow_fetch(self) -> None:
self._run(["git", "fetch", "origin", self._revision, "--depth=1"])
self._run(["git", "checkout", "FETCH_HEAD"])
def _deep_fetch(self) -> None:
try:
self._run(["git", "fetch", "origin"])
except CalledProcessError as error:
raise GitCommandError(
"'git fetch' failed, most likely due to the repo url is invalid.",
error,
) from None
try:
self._run(["git", "checkout", self._revision])
except CalledProcessError as error:
raise GitCommandError(
"'git checkout' failed, most likely due to the repo revision is invalid.",
error,
) from None
def _check_repo_integrity(self) -> bool:
try:
result = self._run(["git", "status", "--porcelain"])
except CalledProcessError:
# The git command failed means the git repo has been cleaned or broken
return False
return not bool(result.stdout)
def _clone_repo(self) -> None:
print(f"Cloning repo '{self._url}@{self._revision}'")
path = self._path
path.mkdir()
try:
self._init_repo()
try:
self._shallow_fetch()
except CalledProcessError:
self._deep_fetch()
except (CalledProcessError, GitCommandError, FileNotFoundError):
rmtree(path)
raise
print(f"Cloned to '{path}'")
def get_root(self) -> Path:
"""Get the root directory path of the package repo.
Returns:
The root directory path of the package repo.
Raises:
TypeError: when the "ROOT.yaml" not found or more than one "ROOT.yaml" found.
"""
roots = list(self._path.glob("**/ROOT.yaml"))
if len(roots) == 0:
raise TypeError("No 'ROOT.yaml' file found")
if len(roots) >= 2:
raise TypeError("More than one 'ROOT.yaml' file found")
return roots[0].parent
class PackageBuilder:
"""The builder of the external Portex package.
Arguments:
url: The git repo url of the external package.
revision: The git repo revision (tag/commit) of the external package.
"""
def __init__(self, url: str, revision: str) -> None:
self.package = ExternalPackage(url, revision)
self._builders = self._create_type_builders()
def __getitem__(self, key: str) -> Type["PortexExternalType"]:
try:
return self.package[key]
except KeyError:
return self._builders.__getitem__(key).build()
def _create_type_builders(self) -> Dict[str, "TypeBuilder"]:
repo = PackageRepo(self.package.url, self.package.revision)
root = repo.get_root()
builders = {}
for yaml_file in root.glob("**/*.yaml"):
if yaml_file.name == "ROOT.yaml":
continue
parts = (*yaml_file.relative_to(root).parent.parts, yaml_file.stem)
name = ".".join(parts)
builders[name] = TypeBuilder(name, yaml_file, self)
return builders
def build(self) -> ExternalPackage:
"""Build the Portex external package.
Returns:
The builded Portex external package.
"""
for builder in self._builders.values():
if builder.is_building:
continue
builder.build()
return self.package
class TypeBuilder:
"""The builder of the external Portex template type.
Arguments:
name: The name of the Portex template type.
path: The source file path of the Portex template type.
package: The package the Portex template type belongs to.
"""
def __init__(self, name: str, path: Path, builder: PackageBuilder) -> None:
self._name = name
self._path = path
self._builder = builder
self.is_building = False
def build(self) -> Type["PortexExternalType"]:
"""Build the Portex external type.
Returns:
The builded Portex external type.
Raises:
TypeError: Raise when circular reference detected.
"""
if self.is_building:
raise TypeError("Circular reference")
self.is_building = True
with self._path.open() as fp:
content = yaml.load(fp, yaml.Loader)
params_pyobj = content.get("parameters", [])
decl = content["declaration"]
imports = BuilderImports.from_pyobj(content.get("imports", []), self._builder)
factory = TypeFactory(decl, imports)
keys = factory.keys
params = Params.from_pyobj(params_pyobj)
for key, value in params.items():
value.ptype = keys.get(key, PTYPE.Any)
params.add(Param("nullable", False, ptype=PTYPE.Boolean))
class_attrs: Dict[str, Any] = {
"params": params,
"factory": factory,
"package": self._builder.package,
}
if issubclass(factory.class_, PortexRecordBase):
bases: Tuple[Type["PortexType"], ...] = (PortexRecordBase, PortexExternalType)
class_attrs["_fields_factory"] = ConnectedFieldsFactory(
decl, factory.class_, imports, factory.transform_kwargs
)
else:
bases = (PortexExternalType,)
type_ = type(self._name, bases, class_attrs)
self._builder.package[self._name] = type_
return type_
class BuilderImports(Imports):
"""The imports of the Portex template type.
Arguments:
package: The package the portex belongs to.
"""
_builder: PackageBuilder
def __getitem__(self, key: str) -> Type["PortexType"]:
try:
return super().__getitem__(key)
except KeyError:
return self._builder.__getitem__(key)
@classmethod
def from_pyobj( # type: ignore[override] # pylint: disable=arguments-differ
cls: Type[_I], content: List[Dict[str, Any]], builder: PackageBuilder
) -> _I:
"""Create :class:`Imports` instance from python list.
Arguments:
content: A python list representing imported types.
builder: The package builder.
Returns:
A :class:`Imports` instance created from the input python list.
"""
imports = super().from_pyobj(content)
imports._builder = builder # pylint: disable=protected-access
return imports
def build_package(url: str, revision: str) -> ExternalPackage:
"""Build an external package.
Arguments:
url: The git repo url of the external package.
revision: The git repo revision (tag/commit) of the external package.
Returns:
The :class:`ExternalPackage` instance.
"""
builder = PackageBuilder(url, revision)
package = builder.build()
packages.externals[url, revision] = package
return package
| [((1028, 1065), 'typing.TypeVar', 'TypeVar', (['"""_I"""'], {'bound': '"""BuilderImports"""'}), "('_I', bound='BuilderImports')\n", (1035, 1065), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Type, TypeVar\n'), ((1501, 1506), 'hashlib.md5', 'md5', ([], {}), '()\n', (1504, 1506), False, 'from hashlib import md5\n'), ((2149, 2227), 'subprocess.run', 'run', (['args'], {'cwd': 'self._path', 'env': 'self._env', 'stdout': 'PIPE', 'stderr': 'PIPE', 'check': '(True)'}), '(args, cwd=self._path, env=self._env, stdout=PIPE, stderr=PIPE, check=True)\n', (2152, 2227), False, 'from subprocess import PIPE, CalledProcessError, run\n'), ((4777, 4807), 'graviti.portex.package.ExternalPackage', 'ExternalPackage', (['url', 'revision'], {}), '(url, revision)\n', (4792, 4807), False, 'from graviti.portex.package import ExternalPackage, Imports, packages\n'), ((7045, 7071), 'graviti.portex.factory.TypeFactory', 'TypeFactory', (['decl', 'imports'], {}), '(decl, imports)\n', (7056, 7071), False, 'from graviti.portex.factory import ConnectedFieldsFactory, TypeFactory\n'), ((7118, 7149), 'graviti.portex.param.Params.from_pyobj', 'Params.from_pyobj', (['params_pyobj'], {}), '(params_pyobj)\n', (7135, 7149), False, 'from graviti.portex.param import Param, Params\n'), ((6819, 6845), 'yaml.load', 'yaml.load', (['fp', 'yaml.Loader'], {}), '(fp, yaml.Loader)\n', (6828, 6845), False, 'import yaml\n'), ((7264, 7309), 'graviti.portex.param.Param', 'Param', (['"""nullable"""', '(False)'], {'ptype': 'PTYPE.Boolean'}), "('nullable', False, ptype=PTYPE.Boolean)\n", (7269, 7309), False, 'from graviti.portex.param import Param, Params\n'), ((7663, 7742), 'graviti.portex.factory.ConnectedFieldsFactory', 'ConnectedFieldsFactory', (['decl', 'factory.class_', 'imports', 'factory.transform_kwargs'], {}), '(decl, factory.class_, imports, factory.transform_kwargs)\n', (7685, 7742), False, 'from graviti.portex.factory import ConnectedFieldsFactory, TypeFactory\n'), ((1415, 1427), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (1425, 1427), False, 'from tempfile import gettempdir\n'), ((1824, 1842), 'graviti.exception.GitNotFoundError', 'GitNotFoundError', ([], {}), '()\n', (1840, 1842), False, 'from graviti.exception import GitCommandError, GitNotFoundError\n'), ((2017, 2035), 'shutil.rmtree', 'rmtree', (['self._path'], {}), '(self._path)\n', (2023, 2035), False, 'from shutil import rmtree\n'), ((2691, 2785), 'graviti.exception.GitCommandError', 'GitCommandError', (['"""\'git fetch\' failed, most likely due to the repo url is invalid."""', 'error'], {}), '(\n "\'git fetch\' failed, most likely due to the repo url is invalid.", error)\n', (2706, 2785), False, 'from graviti.exception import GitCommandError, GitNotFoundError\n'), ((2973, 3079), 'graviti.exception.GitCommandError', 'GitCommandError', (['"""\'git checkout\' failed, most likely due to the repo revision is invalid."""', 'error'], {}), '(\n "\'git checkout\' failed, most likely due to the repo revision is invalid.",\n error)\n', (2988, 3079), False, 'from graviti.exception import GitCommandError, GitNotFoundError\n'), ((3838, 3850), 'shutil.rmtree', 'rmtree', (['path'], {}), '(path)\n', (3844, 3850), False, 'from shutil import rmtree\n')] |
SGeetansh/dffml | dffml/operation/mapping.py | 04647bdcadef2f7e7b59cdd8ac1e89f17ef1095b | from typing import Dict, List, Any
from ..df.types import Definition
from ..df.base import op
from ..util.data import traverse_get
MAPPING = Definition(name="mapping", primitive="map")
MAPPING_TRAVERSE = Definition(name="mapping_traverse", primitive="List[str]")
MAPPING_KEY = Definition(name="key", primitive="str")
MAPPING_VALUE = Definition(name="value", primitive="generic")
@op(
name="dffml.mapping.extract",
inputs={"mapping": MAPPING, "traverse": MAPPING_TRAVERSE},
outputs={"value": MAPPING_VALUE},
)
def mapping_extract_value(mapping: Dict[str, Any], traverse: List[str]):
"""
Extracts value from a given mapping.
Parameters
----------
mapping : dict
The mapping to extract the value from.
traverse : list[str]
A list of keys to traverse through the mapping dictionary and extract the values.
Returns
-------
dict
A dictionary containing the value of the keys.
Examples
--------
>>> import asyncio
>>> from dffml import *
>>>
>>> dataflow = DataFlow.auto(mapping_extract_value, GetSingle)
>>>
>>> dataflow.seed.append(
... Input(
... value=[mapping_extract_value.op.outputs["value"].name],
... definition=GetSingle.op.inputs["spec"],
... )
... )
>>> inputs = [
... Input(
... value={"key1": {"key2": 42}},
... definition=mapping_extract_value.op.inputs["mapping"],
... ),
... Input(
... value=["key1", "key2"],
... definition=mapping_extract_value.op.inputs["traverse"],
... ),
... ]
>>>
>>> async def main():
... async for ctx, result in MemoryOrchestrator.run(dataflow, inputs):
... print(result)
>>>
>>> asyncio.run(main())
{'value': 42}
"""
return {"value": traverse_get(mapping, *traverse)}
@op(
name="dffml.mapping.create",
inputs={"key": MAPPING_KEY, "value": MAPPING_VALUE},
outputs={"mapping": MAPPING},
)
def create_mapping(key: str, value: Any):
"""
Creates a mapping of a given key and value.
Parameters
----------
key : str
The key for the mapping.
value : Any
The value for the mapping.
Returns
-------
dict
A dictionary containing the mapping created.
Examples
--------
>>> import asyncio
>>> from dffml import *
>>>
>>> dataflow = DataFlow.auto(create_mapping, GetSingle)
>>> dataflow.seed.append(
... Input(
... value=[create_mapping.op.outputs["mapping"].name],
... definition=GetSingle.op.inputs["spec"],
... )
... )
>>> inputs = [
... Input(
... value="key1", definition=create_mapping.op.inputs["key"],
... ),
... Input(
... value=42, definition=create_mapping.op.inputs["value"],
... ),
... ]
>>>
>>> async def main():
... async for ctx, result in MemoryOrchestrator.run(dataflow, inputs):
... print(result)
>>>
>>> asyncio.run(main())
{'mapping': {'key1': 42}}
"""
return {"mapping": {key: value}}
| [] |
Vijay-P/anchore-engine | anchore_engine/services/policy_engine/__init__.py | 660a0bf10c56d16f894919209c51ec7a12081e9b | import time
import sys
import pkg_resources
import os
import retrying
from sqlalchemy.exc import IntegrityError
# anchore modules
import anchore_engine.clients.services.common
import anchore_engine.subsys.servicestatus
import anchore_engine.subsys.metrics
from anchore_engine.subsys import logger
from anchore_engine.configuration import localconfig
from anchore_engine.clients.services import simplequeue, internal_client_for
from anchore_engine.clients.services.simplequeue import SimpleQueueClient
from anchore_engine.service import ApiService, LifeCycleStages
from anchore_engine.services.policy_engine.engine.feeds.feeds import (
VulnerabilityFeed,
NvdV2Feed,
PackagesFeed,
VulnDBFeed,
GithubFeed,
feed_registry,
NvdFeed,
)
# from anchore_engine.subsys.logger import enable_bootstrap_logging
# enable_bootstrap_logging()
from anchore_engine.utils import timer
feed_sync_queuename = "feed_sync_tasks"
system_user_auth = None
feed_sync_msg = {"task_type": "feed_sync", "enabled": True}
# These are user-configurable but mostly for debugging and testing purposes
try:
FEED_SYNC_RETRIES = int(os.getenv("ANCHORE_FEED_SYNC_CHECK_RETRIES", 5))
except ValueError:
logger.exception(
"Error parsing env value ANCHORE_FEED_SYNC_CHECK_RETRIES into int, using default value of 5"
)
FEED_SYNC_RETRIES = 5
try:
FEED_SYNC_RETRY_BACKOFF = int(
os.getenv("ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF", 5)
)
except ValueError:
logger.exception(
"Error parsing env value ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF into int, using default value of 5"
)
FEED_SYNC_RETRY_BACKOFF = 5
try:
feed_config_check_retries = int(os.getenv("FEED_CLIENT_CHECK_RETRIES", 3))
except ValueError:
logger.exception(
"Error parsing env value FEED_CLIENT_CHECK_RETRIES into int, using default value of 3"
)
feed_config_check_retries = 3
try:
feed_config_check_backoff = int(os.getenv("FEED_CLIENT_CHECK_BACKOFF", 5))
except ValueError:
logger.exception(
"Error parsing env FEED_CLIENT_CHECK_BACKOFF value into int, using default value of 5"
)
feed_config_check_backoff = 5
# service funcs (must be here)
def _check_feed_client_credentials():
from anchore_engine.services.policy_engine.engine.feeds.client import get_client
sleep_time = feed_config_check_backoff
last_ex = None
for i in range(feed_config_check_retries):
if i > 0:
logger.info(
"Waiting for {} seconds to try feeds client config check again".format(
sleep_time
)
)
time.sleep(sleep_time)
sleep_time += feed_config_check_backoff
try:
logger.info(
"Checking feeds client credentials. Attempt {} of {}".format(
i + 1, feed_config_check_retries
)
)
client = get_client()
client = None
logger.info("Feeds client credentials ok")
return True
except Exception as e:
logger.warn(
"Could not verify feeds endpoint and/or config. Got exception: {}".format(
e
)
)
last_ex = e
else:
if last_ex:
raise last_ex
else:
raise Exception(
"Exceeded retries for feeds client config check. Failing check"
)
def _system_creds():
global system_user_auth
if not system_user_auth:
config = localconfig.get_config()
system_user_auth = config["system_user_auth"]
return system_user_auth
def process_preflight():
"""
Execute the preflight functions, aborting service startup if any throw uncaught exceptions or return False return value
:return:
"""
preflight_check_functions = [init_db_content, init_feed_registry]
for fn in preflight_check_functions:
try:
fn()
except Exception as e:
logger.exception(
"Preflight checks failed with error: {}. Aborting service startup".format(
e
)
)
sys.exit(1)
def _init_distro_mappings():
from anchore_engine.db import session_scope, DistroMapping
initial_mappings = [
DistroMapping(from_distro="alpine", to_distro="alpine", flavor="ALPINE"),
DistroMapping(from_distro="busybox", to_distro="busybox", flavor="BUSYB"),
DistroMapping(from_distro="centos", to_distro="rhel", flavor="RHEL"),
DistroMapping(from_distro="debian", to_distro="debian", flavor="DEB"),
DistroMapping(from_distro="fedora", to_distro="rhel", flavor="RHEL"),
DistroMapping(from_distro="ol", to_distro="ol", flavor="RHEL"),
DistroMapping(from_distro="rhel", to_distro="rhel", flavor="RHEL"),
DistroMapping(from_distro="ubuntu", to_distro="ubuntu", flavor="DEB"),
DistroMapping(from_distro="amzn", to_distro="amzn", flavor="RHEL"),
DistroMapping(from_distro="redhat", to_distro="rhel", flavor="RHEL"),
]
# set up any data necessary at system init
try:
logger.info(
"Checking policy engine db initialization. Checking initial set of distro mappings"
)
with session_scope() as dbsession:
distro_mappings = dbsession.query(DistroMapping).all()
for i in initial_mappings:
if not [x for x in distro_mappings if x.from_distro == i.from_distro]:
logger.info("Adding missing mapping: {}".format(i))
dbsession.add(i)
logger.info("Distro mapping initialization complete")
except Exception as err:
if isinstance(err, IntegrityError):
logger.warn("another process has already initialized, continuing")
else:
raise Exception(
"unable to initialize default distro mappings - exception: " + str(err)
)
return True
def init_db_content():
"""
Initialize the policy engine db with any data necessary at startup.
:return:
"""
return _init_distro_mappings()
def init_feed_registry():
# Register feeds, the tuple is the class and bool if feed is a distro vulnerability feed or not
for cls_tuple in [
(NvdV2Feed, False),
(VulnDBFeed, False),
(VulnerabilityFeed, True),
(PackagesFeed, False),
(GithubFeed, False),
(NvdFeed, False),
]:
logger.info("Registering feed handler {}".format(cls_tuple[0].__feed_name__))
feed_registry.register(cls_tuple[0], is_vulnerability_feed=cls_tuple[1])
def do_feed_sync(msg):
if "FeedsUpdateTask" not in locals():
from anchore_engine.services.policy_engine.engine.tasks import FeedsUpdateTask
if "get_selected_feeds_to_sync" not in locals():
from anchore_engine.services.policy_engine.engine.feeds.sync import (
get_selected_feeds_to_sync,
)
handler_success = False
timer = time.time()
logger.info("FIRING: feed syncer")
try:
feeds = get_selected_feeds_to_sync(localconfig.get_config())
logger.info("Syncing configured feeds: {}".format(feeds))
result = FeedsUpdateTask.run_feeds_update(json_obj=msg.get("data"))
if result is not None:
handler_success = True
else:
logger.warn("Feed sync task marked as disabled, so skipping")
except ValueError as e:
logger.warn("Received msg of wrong type")
except Exception as err:
logger.warn("failure in feed sync handler - exception: " + str(err))
if handler_success:
anchore_engine.subsys.metrics.summary_observe(
"anchore_monitor_runtime_seconds",
time.time() - timer,
function="do_feed_sync",
status="success",
)
else:
anchore_engine.subsys.metrics.summary_observe(
"anchore_monitor_runtime_seconds",
time.time() - timer,
function="do_feed_sync",
status="fail",
)
def handle_feed_sync(*args, **kwargs):
"""
Initiates a feed sync in the system in response to a message from the queue
:param args:
:param kwargs:
:return:
"""
system_user = _system_creds()
logger.info("init args: {}".format(kwargs))
cycle_time = kwargs["mythread"]["cycle_timer"]
while True:
config = localconfig.get_config()
feed_sync_enabled = config.get("feeds", {}).get("sync_enabled", True)
if feed_sync_enabled:
logger.info("Feed sync task executor activated")
try:
run_feed_sync(system_user)
except Exception as e:
logger.error("Caught escaped error in feed sync handler: {}".format(e))
finally:
logger.info("Feed sync task executor complete")
else:
logger.info("sync_enabled is set to false in config - skipping feed sync")
time.sleep(cycle_time)
return True
@retrying.retry(
stop_max_attempt_number=FEED_SYNC_RETRIES,
wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF * 1000,
wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF * 1000,
)
def run_feed_sync(system_user):
all_ready = anchore_engine.clients.services.common.check_services_ready(
["simplequeue"]
)
if not all_ready:
logger.info("simplequeue service not yet ready, will retry")
raise Exception("Simplequeue service not yet ready")
else:
try:
# This has its own retry on the queue fetch, so wrap with catch block to ensure we don't double-retry on task exec
simplequeue.run_target_with_queue_ttl(
None,
queue=feed_sync_queuename,
target=do_feed_sync,
max_wait_seconds=30,
visibility_timeout=180,
retries=FEED_SYNC_RETRIES,
backoff_time=FEED_SYNC_RETRY_BACKOFF,
)
except Exception as err:
logger.warn("failed to process task this cycle: " + str(err))
def handle_feed_sync_trigger(*args, **kwargs):
"""
Checks to see if there is a task for a feed sync in the queue and if not, adds one.
Interval for firing this should be longer than the expected feed sync duration.
:param args:
:param kwargs:
:return:
"""
system_user = _system_creds()
logger.info("init args: {}".format(kwargs))
cycle_time = kwargs["mythread"]["cycle_timer"]
while True:
config = localconfig.get_config()
feed_sync_enabled = config.get("feeds", {}).get("sync_enabled", True)
if feed_sync_enabled:
logger.info("Feed Sync task creator activated")
try:
push_sync_task(system_user)
logger.info("Feed Sync Trigger done, waiting for next cycle.")
except Exception as e:
logger.error(
"Error caught in feed sync trigger handler after all retries. Will wait for next cycle"
)
finally:
logger.info("Feed Sync task creator complete")
else:
logger.info(
"sync_enabled is set to false in config - skipping feed sync trigger"
)
time.sleep(cycle_time)
return True
@retrying.retry(
stop_max_attempt_number=FEED_SYNC_RETRIES,
wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF * 1000,
wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF * 1000,
)
def push_sync_task(system_user):
all_ready = anchore_engine.clients.services.common.check_services_ready(
["simplequeue"]
)
if not all_ready:
logger.info("simplequeue service not yet ready, will retry")
raise Exception("Simplequeue service not yet ready")
else:
# q_client = SimpleQueueClient(user=system_user[0], password=system_user[1])
q_client = internal_client_for(SimpleQueueClient, userId=None)
if not q_client.is_inqueue(name=feed_sync_queuename, inobj=feed_sync_msg):
try:
q_client.enqueue(name=feed_sync_queuename, inobj=feed_sync_msg)
except:
logger.error("Could not enqueue message for a feed sync")
raise
class PolicyEngineService(ApiService):
__service_name__ = "policy_engine"
__spec_dir__ = pkg_resources.resource_filename(__name__, "swagger")
__monitors__ = {
"service_heartbeat": {
"handler": anchore_engine.subsys.servicestatus.handle_service_heartbeat,
"taskType": "handle_service_heartbeat",
"args": [__service_name__],
"cycle_timer": 60,
"min_cycle_timer": 60,
"max_cycle_timer": 60,
"last_queued": 0,
"last_return": False,
"initialized": False,
},
"feed_sync_checker": {
"handler": handle_feed_sync_trigger,
"taskType": "handle_feed_sync_trigger",
"args": [],
"cycle_timer": 600,
"min_cycle_timer": 300,
"max_cycle_timer": 100000,
"last_queued": 0,
"last_return": False,
"initialized": False,
},
"feed_sync": {
"handler": handle_feed_sync,
"taskType": "handle_feed_sync",
"args": [],
"cycle_timer": 3600,
"min_cycle_timer": 1800,
"max_cycle_timer": 100000,
"last_queued": 0,
"last_return": False,
"initialized": False,
},
}
__lifecycle_handlers__ = {
LifeCycleStages.pre_register: [
(process_preflight, None),
]
}
| [((9137, 9318), 'retrying.retry', 'retrying.retry', ([], {'stop_max_attempt_number': 'FEED_SYNC_RETRIES', 'wait_incrementing_start': '(FEED_SYNC_RETRY_BACKOFF * 1000)', 'wait_incrementing_increment': '(FEED_SYNC_RETRY_BACKOFF * 1000)'}), '(stop_max_attempt_number=FEED_SYNC_RETRIES,\n wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF * 1000,\n wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF * 1000)\n', (9151, 9318), False, 'import retrying\n'), ((11469, 11650), 'retrying.retry', 'retrying.retry', ([], {'stop_max_attempt_number': 'FEED_SYNC_RETRIES', 'wait_incrementing_start': '(FEED_SYNC_RETRY_BACKOFF * 1000)', 'wait_incrementing_increment': '(FEED_SYNC_RETRY_BACKOFF * 1000)'}), '(stop_max_attempt_number=FEED_SYNC_RETRIES,\n wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF * 1000,\n wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF * 1000)\n', (11483, 11650), False, 'import retrying\n'), ((7101, 7112), 'time.time', 'time.time', ([], {}), '()\n', (7110, 7112), False, 'import time\n'), ((7117, 7151), 'anchore_engine.subsys.logger.info', 'logger.info', (['"""FIRING: feed syncer"""'], {}), "('FIRING: feed syncer')\n", (7128, 7151), False, 'from anchore_engine.subsys import logger\n'), ((12512, 12564), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['__name__', '"""swagger"""'], {}), "(__name__, 'swagger')\n", (12543, 12564), False, 'import pkg_resources\n'), ((1131, 1178), 'os.getenv', 'os.getenv', (['"""ANCHORE_FEED_SYNC_CHECK_RETRIES"""', '(5)'], {}), "('ANCHORE_FEED_SYNC_CHECK_RETRIES', 5)\n", (1140, 1178), False, 'import os\n'), ((1203, 1323), 'anchore_engine.subsys.logger.exception', 'logger.exception', (['"""Error parsing env value ANCHORE_FEED_SYNC_CHECK_RETRIES into int, using default value of 5"""'], {}), "(\n 'Error parsing env value ANCHORE_FEED_SYNC_CHECK_RETRIES into int, using default value of 5'\n )\n", (1219, 1323), False, 'from anchore_engine.subsys import logger\n'), ((1403, 1458), 'os.getenv', 'os.getenv', (['"""ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF"""', '(5)'], {}), "('ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF', 5)\n", (1412, 1458), False, 'import os\n'), ((1488, 1616), 'anchore_engine.subsys.logger.exception', 'logger.exception', (['"""Error parsing env value ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF into int, using default value of 5"""'], {}), "(\n 'Error parsing env value ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF into int, using default value of 5'\n )\n", (1504, 1616), False, 'from anchore_engine.subsys import logger\n'), ((1695, 1736), 'os.getenv', 'os.getenv', (['"""FEED_CLIENT_CHECK_RETRIES"""', '(3)'], {}), "('FEED_CLIENT_CHECK_RETRIES', 3)\n", (1704, 1736), False, 'import os\n'), ((1761, 1875), 'anchore_engine.subsys.logger.exception', 'logger.exception', (['"""Error parsing env value FEED_CLIENT_CHECK_RETRIES into int, using default value of 3"""'], {}), "(\n 'Error parsing env value FEED_CLIENT_CHECK_RETRIES into int, using default value of 3'\n )\n", (1777, 1875), False, 'from anchore_engine.subsys import logger\n'), ((1956, 1997), 'os.getenv', 'os.getenv', (['"""FEED_CLIENT_CHECK_BACKOFF"""', '(5)'], {}), "('FEED_CLIENT_CHECK_BACKOFF', 5)\n", (1965, 1997), False, 'import os\n'), ((2022, 2136), 'anchore_engine.subsys.logger.exception', 'logger.exception', (['"""Error parsing env FEED_CLIENT_CHECK_BACKOFF value into int, using default value of 5"""'], {}), "(\n 'Error parsing env FEED_CLIENT_CHECK_BACKOFF value into int, using default value of 5'\n )\n", (2038, 2136), False, 'from anchore_engine.subsys import logger\n'), ((3581, 3605), 'anchore_engine.configuration.localconfig.get_config', 'localconfig.get_config', ([], {}), '()\n', (3603, 3605), False, 'from anchore_engine.configuration import localconfig\n'), ((4371, 4443), 'anchore_engine.db.DistroMapping', 'DistroMapping', ([], {'from_distro': '"""alpine"""', 'to_distro': '"""alpine"""', 'flavor': '"""ALPINE"""'}), "(from_distro='alpine', to_distro='alpine', flavor='ALPINE')\n", (4384, 4443), False, 'from anchore_engine.db import session_scope, DistroMapping\n'), ((4453, 4526), 'anchore_engine.db.DistroMapping', 'DistroMapping', ([], {'from_distro': '"""busybox"""', 'to_distro': '"""busybox"""', 'flavor': '"""BUSYB"""'}), "(from_distro='busybox', to_distro='busybox', flavor='BUSYB')\n", (4466, 4526), False, 'from anchore_engine.db import session_scope, DistroMapping\n'), ((4536, 4604), 'anchore_engine.db.DistroMapping', 'DistroMapping', ([], {'from_distro': '"""centos"""', 'to_distro': '"""rhel"""', 'flavor': '"""RHEL"""'}), "(from_distro='centos', to_distro='rhel', flavor='RHEL')\n", (4549, 4604), False, 'from anchore_engine.db import session_scope, DistroMapping\n'), ((4614, 4683), 'anchore_engine.db.DistroMapping', 'DistroMapping', ([], {'from_distro': '"""debian"""', 'to_distro': '"""debian"""', 'flavor': '"""DEB"""'}), "(from_distro='debian', to_distro='debian', flavor='DEB')\n", (4627, 4683), False, 'from anchore_engine.db import session_scope, DistroMapping\n'), ((4693, 4761), 'anchore_engine.db.DistroMapping', 'DistroMapping', ([], {'from_distro': '"""fedora"""', 'to_distro': '"""rhel"""', 'flavor': '"""RHEL"""'}), "(from_distro='fedora', to_distro='rhel', flavor='RHEL')\n", (4706, 4761), False, 'from anchore_engine.db import session_scope, DistroMapping\n'), ((4771, 4833), 'anchore_engine.db.DistroMapping', 'DistroMapping', ([], {'from_distro': '"""ol"""', 'to_distro': '"""ol"""', 'flavor': '"""RHEL"""'}), "(from_distro='ol', to_distro='ol', flavor='RHEL')\n", (4784, 4833), False, 'from anchore_engine.db import session_scope, DistroMapping\n'), ((4843, 4909), 'anchore_engine.db.DistroMapping', 'DistroMapping', ([], {'from_distro': '"""rhel"""', 'to_distro': '"""rhel"""', 'flavor': '"""RHEL"""'}), "(from_distro='rhel', to_distro='rhel', flavor='RHEL')\n", (4856, 4909), False, 'from anchore_engine.db import session_scope, DistroMapping\n'), ((4919, 4988), 'anchore_engine.db.DistroMapping', 'DistroMapping', ([], {'from_distro': '"""ubuntu"""', 'to_distro': '"""ubuntu"""', 'flavor': '"""DEB"""'}), "(from_distro='ubuntu', to_distro='ubuntu', flavor='DEB')\n", (4932, 4988), False, 'from anchore_engine.db import session_scope, DistroMapping\n'), ((4998, 5064), 'anchore_engine.db.DistroMapping', 'DistroMapping', ([], {'from_distro': '"""amzn"""', 'to_distro': '"""amzn"""', 'flavor': '"""RHEL"""'}), "(from_distro='amzn', to_distro='amzn', flavor='RHEL')\n", (5011, 5064), False, 'from anchore_engine.db import session_scope, DistroMapping\n'), ((5074, 5142), 'anchore_engine.db.DistroMapping', 'DistroMapping', ([], {'from_distro': '"""redhat"""', 'to_distro': '"""rhel"""', 'flavor': '"""RHEL"""'}), "(from_distro='redhat', to_distro='rhel', flavor='RHEL')\n", (5087, 5142), False, 'from anchore_engine.db import session_scope, DistroMapping\n'), ((5215, 5321), 'anchore_engine.subsys.logger.info', 'logger.info', (['"""Checking policy engine db initialization. Checking initial set of distro mappings"""'], {}), "(\n 'Checking policy engine db initialization. Checking initial set of distro mappings'\n )\n", (5226, 5321), False, 'from anchore_engine.subsys import logger\n'), ((5690, 5743), 'anchore_engine.subsys.logger.info', 'logger.info', (['"""Distro mapping initialization complete"""'], {}), "('Distro mapping initialization complete')\n", (5701, 5743), False, 'from anchore_engine.subsys import logger\n'), ((6651, 6723), 'anchore_engine.services.policy_engine.engine.feeds.feeds.feed_registry.register', 'feed_registry.register', (['cls_tuple[0]'], {'is_vulnerability_feed': 'cls_tuple[1]'}), '(cls_tuple[0], is_vulnerability_feed=cls_tuple[1])\n', (6673, 6723), False, 'from anchore_engine.services.policy_engine.engine.feeds.feeds import VulnerabilityFeed, NvdV2Feed, PackagesFeed, VulnDBFeed, GithubFeed, feed_registry, NvdFeed\n'), ((8522, 8546), 'anchore_engine.configuration.localconfig.get_config', 'localconfig.get_config', ([], {}), '()\n', (8544, 8546), False, 'from anchore_engine.configuration import localconfig\n'), ((9094, 9116), 'time.sleep', 'time.sleep', (['cycle_time'], {}), '(cycle_time)\n', (9104, 9116), False, 'import time\n'), ((9495, 9555), 'anchore_engine.subsys.logger.info', 'logger.info', (['"""simplequeue service not yet ready, will retry"""'], {}), "('simplequeue service not yet ready, will retry')\n", (9506, 9555), False, 'from anchore_engine.subsys import logger\n'), ((10670, 10694), 'anchore_engine.configuration.localconfig.get_config', 'localconfig.get_config', ([], {}), '()\n', (10692, 10694), False, 'from anchore_engine.configuration import localconfig\n'), ((11426, 11448), 'time.sleep', 'time.sleep', (['cycle_time'], {}), '(cycle_time)\n', (11436, 11448), False, 'import time\n'), ((11829, 11889), 'anchore_engine.subsys.logger.info', 'logger.info', (['"""simplequeue service not yet ready, will retry"""'], {}), "('simplequeue service not yet ready, will retry')\n", (11840, 11889), False, 'from anchore_engine.subsys import logger\n'), ((12065, 12116), 'anchore_engine.clients.services.internal_client_for', 'internal_client_for', (['SimpleQueueClient'], {'userId': 'None'}), '(SimpleQueueClient, userId=None)\n', (12084, 12116), False, 'from anchore_engine.clients.services import simplequeue, internal_client_for\n'), ((2649, 2671), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (2659, 2671), False, 'import time\n'), ((2947, 2959), 'anchore_engine.services.policy_engine.engine.feeds.client.get_client', 'get_client', ([], {}), '()\n', (2957, 2959), False, 'from anchore_engine.services.policy_engine.engine.feeds.client import get_client\n'), ((2998, 3040), 'anchore_engine.subsys.logger.info', 'logger.info', (['"""Feeds client credentials ok"""'], {}), "('Feeds client credentials ok')\n", (3009, 3040), False, 'from anchore_engine.subsys import logger\n'), ((5348, 5363), 'anchore_engine.db.session_scope', 'session_scope', ([], {}), '()\n', (5361, 5363), False, 'from anchore_engine.db import session_scope, DistroMapping\n'), ((7204, 7228), 'anchore_engine.configuration.localconfig.get_config', 'localconfig.get_config', ([], {}), '()\n', (7226, 7228), False, 'from anchore_engine.configuration import localconfig\n'), ((7465, 7526), 'anchore_engine.subsys.logger.warn', 'logger.warn', (['"""Feed sync task marked as disabled, so skipping"""'], {}), "('Feed sync task marked as disabled, so skipping')\n", (7476, 7526), False, 'from anchore_engine.subsys import logger\n'), ((7563, 7604), 'anchore_engine.subsys.logger.warn', 'logger.warn', (['"""Received msg of wrong type"""'], {}), "('Received msg of wrong type')\n", (7574, 7604), False, 'from anchore_engine.subsys import logger\n'), ((8667, 8715), 'anchore_engine.subsys.logger.info', 'logger.info', (['"""Feed sync task executor activated"""'], {}), "('Feed sync task executor activated')\n", (8678, 8715), False, 'from anchore_engine.subsys import logger\n'), ((9010, 9084), 'anchore_engine.subsys.logger.info', 'logger.info', (['"""sync_enabled is set to false in config - skipping feed sync"""'], {}), "('sync_enabled is set to false in config - skipping feed sync')\n", (9021, 9084), False, 'from anchore_engine.subsys import logger\n'), ((9779, 9988), 'anchore_engine.clients.services.simplequeue.run_target_with_queue_ttl', 'simplequeue.run_target_with_queue_ttl', (['None'], {'queue': 'feed_sync_queuename', 'target': 'do_feed_sync', 'max_wait_seconds': '(30)', 'visibility_timeout': '(180)', 'retries': 'FEED_SYNC_RETRIES', 'backoff_time': 'FEED_SYNC_RETRY_BACKOFF'}), '(None, queue=feed_sync_queuename,\n target=do_feed_sync, max_wait_seconds=30, visibility_timeout=180,\n retries=FEED_SYNC_RETRIES, backoff_time=FEED_SYNC_RETRY_BACKOFF)\n', (9816, 9988), False, 'from anchore_engine.clients.services import simplequeue, internal_client_for\n'), ((10815, 10862), 'anchore_engine.subsys.logger.info', 'logger.info', (['"""Feed Sync task creator activated"""'], {}), "('Feed Sync task creator activated')\n", (10826, 10862), False, 'from anchore_engine.subsys import logger\n'), ((11304, 11391), 'anchore_engine.subsys.logger.info', 'logger.info', (['"""sync_enabled is set to false in config - skipping feed sync trigger"""'], {}), "(\n 'sync_enabled is set to false in config - skipping feed sync trigger')\n", (11315, 11391), False, 'from anchore_engine.subsys import logger\n'), ((4231, 4242), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4239, 4242), False, 'import sys\n'), ((5830, 5896), 'anchore_engine.subsys.logger.warn', 'logger.warn', (['"""another process has already initialized, continuing"""'], {}), "('another process has already initialized, continuing')\n", (5841, 5896), False, 'from anchore_engine.subsys import logger\n'), ((7850, 7861), 'time.time', 'time.time', ([], {}), '()\n', (7859, 7861), False, 'import time\n'), ((8072, 8083), 'time.time', 'time.time', ([], {}), '()\n', (8081, 8083), False, 'import time\n'), ((8936, 8983), 'anchore_engine.subsys.logger.info', 'logger.info', (['"""Feed sync task executor complete"""'], {}), "('Feed sync task executor complete')\n", (8947, 8983), False, 'from anchore_engine.subsys import logger\n'), ((10940, 11002), 'anchore_engine.subsys.logger.info', 'logger.info', (['"""Feed Sync Trigger done, waiting for next cycle."""'], {}), "('Feed Sync Trigger done, waiting for next cycle.')\n", (10951, 11002), False, 'from anchore_engine.subsys import logger\n'), ((11231, 11277), 'anchore_engine.subsys.logger.info', 'logger.info', (['"""Feed Sync task creator complete"""'], {}), "('Feed Sync task creator complete')\n", (11242, 11277), False, 'from anchore_engine.subsys import logger\n'), ((11054, 11165), 'anchore_engine.subsys.logger.error', 'logger.error', (['"""Error caught in feed sync trigger handler after all retries. Will wait for next cycle"""'], {}), "(\n 'Error caught in feed sync trigger handler after all retries. Will wait for next cycle'\n )\n", (11066, 11165), False, 'from anchore_engine.subsys import logger\n'), ((12333, 12390), 'anchore_engine.subsys.logger.error', 'logger.error', (['"""Could not enqueue message for a feed sync"""'], {}), "('Could not enqueue message for a feed sync')\n", (12345, 12390), False, 'from anchore_engine.subsys import logger\n')] |
EvandoBlanco/juriscraper | juriscraper/oral_args/united_states/federal_appellate/scotus.py | 3d16af258620d4ba1b4827f66ef69e8a2c5a0484 | """Scraper for Supreme Court of U.S.
CourtID: scotus
Court Short Name: scotus
History:
- 2014-07-20 - Created by Andrei Chelaru, reviewed by MLR
- 2017-10-09 - Updated by MLR.
"""
from datetime import datetime
from juriscraper.OralArgumentSite import OralArgumentSite
class Site(OralArgumentSite):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.url = (
"http://www.supremecourt.gov/oral_arguments/argument_audio.aspx"
)
self.back_scrape_iterable = list(range(2010, 2015))
def _get_download_urls(self):
path = "id('list')//tr//a/text()"
return list(map(self._return_download_url, self.html.xpath(path)))
@staticmethod
def _return_download_url(d):
file_type = "mp3" # or 'wma' is also available for any case.
download_url = "http://www.supremecourt.gov/media/audio/{type}files/{docket_number}.{type}".format(
type=file_type, docket_number=d
)
return download_url
def _get_case_names(self):
path = "id('list')//tr/td/span/text()"
return [s.lstrip(". ") for s in self.html.xpath(path)]
def _get_case_dates(self):
path = "id('list')//tr/td[2]//text()"
return [
datetime.strptime(s, "%m/%d/%y").date()
for s in self.html.xpath(path)
if not "Date" in s
]
def _get_docket_numbers(self):
path = "id('list')//tr//a/text()"
return list(self.html.xpath(path))
def _download_backwards(self, year):
self.url = (
"http://www.supremecourt.gov/oral_arguments/argument_audio/%s"
% year
)
self.html = self._download()
| [((1318, 1350), 'datetime.datetime.strptime', 'datetime.strptime', (['s', '"""%m/%d/%y"""'], {}), "(s, '%m/%d/%y')\n", (1335, 1350), False, 'from datetime import datetime\n')] |
pengzhansun/CF-CAR | code/main.py | 2e497a4da0bcc80bb327ee041f1aa0107f53bc3f | # -*- coding: utf-8 -*-
import argparse
import os
import shutil
import time
import numpy as np
import random
from collections import OrderedDict
import torch
import torch.backends.cudnn as cudnn
from callbacks import AverageMeter
from data_utils.causal_data_loader_frames import VideoFolder
from utils import save_results
from tqdm import tqdm
parser = argparse.ArgumentParser(description='Counterfactual CAR')
# Path, dataset and log related arguments
parser.add_argument('--root_frames', type=str, default='/mnt/data1/home/sunpengzhan/sth-sth-v2/',
help='path to the folder with frames')
parser.add_argument('--json_data_train', type=str, default='../data/dataset_splits/compositional/train.json',
help='path to the json file with train video meta data')
parser.add_argument('--json_data_val', type=str, default='../data/dataset_splits/compositional/validation.json',
help='path to the json file with validation video meta data')
parser.add_argument('--json_file_labels', type=str, default='../data/dataset_splits/compositional/labels.json',
help='path to the json file with ground truth labels')
parser.add_argument('--dataset', default='smth_smth',
help='which dataset to train')
parser.add_argument('--logname', default='my_method',
help='name of the experiment for checkpoints and logs')
parser.add_argument('--print_freq', '-p', default=20, type=int,
metavar='N', help='print frequency (default: 20)')
parser.add_argument('--ckpt', default='./ckpt',
help='folder to output checkpoints')
parser.add_argument('--resume_vision', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--resume_coord', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--resume_fusion', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
# model, image&feature dim and training related arguments
parser.add_argument('--model_vision', default='rgb_roi')
parser.add_argument('--model_coord', default='interaction')
parser.add_argument('--model_fusion', default='concat_fusion')
parser.add_argument('--fusion_function', default='fused_sum', type=str,
help='function for fusing activations from each branch')
parser.add_argument('--img_feature_dim', default=512, type=int, metavar='N',
help='intermediate feature dimension for image-based features')
parser.add_argument('--coord_feature_dim', default=512, type=int, metavar='N',
help='intermediate feature dimension for coord-based features')
parser.add_argument('--size', default=224, type=int, metavar='N',
help='primary image input size')
parser.add_argument('--num_boxes', default=4, type=int,
help='num of boxes for each image')
parser.add_argument('--num_frames', default=16, type=int,
help='num of frames for the model')
parser.add_argument('--num_classes', default=174, type=int,
help='num of class in the model')
parser.add_argument('--epochs', default=30, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start_epoch', default=None, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--batch_size', '-b', default=16, type=int,
metavar='N', help='mini-batch size')
parser.add_argument('--lr', '--learning-rate', default=0.01, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--lr_steps', default=[24, 35, 45], type=float, nargs="+",
metavar='LRSteps', help='epochs to decay learning rate by 10')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight_decay', '--wd', default=0.0001, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--clip_gradient', '-cg', default=5, type=float,
metavar='W', help='gradient norm clipping (default: 5)')
parser.add_argument('--search_stride', type=int, default=5, help='test performance every n strides')
# train mode, hardware setting and others related arguments
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--cf_inference_group', action='store_true', help='counterfactual inference model on validation set')
parser.add_argument('--parallel', default=True, type=bool,
help='whether or not train with multi GPUs')
parser.add_argument('--gpu_index', type=str, default='0, 1, 2, 3', help='the index of gpu you want to use')
best_loss = 1000000
def main():
global args, best_loss
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
print(args)
# create vision model
if args.model_vision == 'global_i3d':
from model.model_lib import VideoGlobalModel as RGBModel
print('global_i3d loaded!!')
elif args.model_vision == 'rgb_roi':
from model.model_lib import BboxVisualModel as RGBModel
print('rgb_roi loaded!!')
else:
print("no such a vision model!")
# create coord model
if args.model_coord == 'interaction':
from model.model_lib import BboxInteractionLatentModel as BboxModel
print('interaction loaded!!')
else:
print("no such a coordinate model!")
# create fusion model
if args.model_fusion == 'concat_fusion':
from model.model_lib import ConcatFusionModel as FusionModel
print('concat_fusion loaded!!')
else:
print('no such a fusion model!')
# load model branch
vision_model = RGBModel(args)
coord_model = BboxModel(args)
fusion_model = FusionModel(args)
# create the fusion function for the activation of three branches
if args.fusion_function == 'fused_sum':
from fusion_function import logsigsum as fusion_func
print('fused_sum loaded!!')
elif args.fusion_function == 'naive_sum':
from fusion_function import naivesum as fusion_func
print('naive_sum loaded!!')
else:
print('no such a fusion function!')
fusion_function = fusion_func()
if args.parallel:
vision_model = torch.nn.DataParallel(vision_model).cuda()
coord_model = torch.nn.DataParallel(coord_model).cuda()
fusion_model = torch.nn.DataParallel(fusion_model).cuda()
else:
vision_model = vision_model.cuda()
coord_model = coord_model.cuda()
fusion_model = fusion_model.cuda()
# optionally resume vision model from a checkpoint
if args.resume_vision:
assert os.path.isfile(args.resume_vision), "No checkpoint found at '{}'".format(args.resume_vision)
print("=> loading checkpoint '{}'".format(args.resume_vision))
checkpoint = torch.load(args.resume_vision)
if args.start_epoch is None:
args.start_epoch = checkpoint['epoch']
best_loss = checkpoint['best_loss']
vision_model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume_vision, checkpoint['epoch']))
# optionally resume coord model from a checkpoint
if args.resume_coord:
assert os.path.isfile(args.resume_coord), "No checkpoint found at '{}'".format(args.resume_coord)
print("=> loading checkpoint '{}'".format(args.resume_coord))
checkpoint = torch.load(args.resume_coord)
if args.start_epoch is None:
args.start_epoch = checkpoint['epoch']
best_loss = checkpoint['best_loss']
coord_model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume_coord, checkpoint['epoch']))
if args.resume_fusion:
assert os.path.isfile(args.resume_fusion), "No checkpoint found at '{}'".format(args.resume_fusion)
print("=> loading checkpoint '{}'".format(args.resume_fusion))
checkpoint = torch.load(args.resume_fusion)
if args.start_epoch is None:
args.start_epoch = checkpoint['epoch']
best_loss = checkpoint['best_loss']
fusion_model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume_fusion, checkpoint['epoch']))
if args.start_epoch is None:
args.start_epoch = 0
cudnn.benchmark = True
# create training and validation dataset
dataset_train = VideoFolder(root=args.root_frames,
num_boxes=args.num_boxes,
file_input=args.json_data_train,
file_labels=args.json_file_labels,
frames_duration=args.num_frames,
args=args,
is_val=False,
if_augment=True,
)
dataset_val = VideoFolder(root=args.root_frames,
num_boxes=args.num_boxes,
file_input=args.json_data_val,
file_labels=args.json_file_labels,
frames_duration=args.num_frames,
args=args,
is_val=True,
if_augment=True,
)
# create training and validation loader
train_loader = torch.utils.data.DataLoader(
dataset_train,
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, drop_last=True,
pin_memory=True
)
val_loader = torch.utils.data.DataLoader(
dataset_val, drop_last=True,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=False
)
model_list = [vision_model, coord_model, fusion_model]
optimizer_vision = torch.optim.SGD(filter(lambda p: p.requires_grad, vision_model.parameters()),
momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay)
optimizer_coord = torch.optim.SGD(filter(lambda p: p.requires_grad, coord_model.parameters()),
momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay)
optimizer_fusion = torch.optim.SGD(filter(lambda p: p.requires_grad, fusion_model.parameters()),
momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay)
optimizer_list = [optimizer_vision, optimizer_coord, optimizer_fusion]
criterion = torch.nn.CrossEntropyLoss()
search_list = np.linspace(0.0, 1.0, 11)
# factual inference (vanilla test stage)
if args.evaluate:
validate(val_loader, model_list, fusion_function, criterion, class_to_idx=dataset_val.classes_dict)
return
# Counterfactual inference by trying a list of hyperparameter
if args.cf_inference_group:
cf_inference_group(val_loader, model_list, fusion_function, search_list,
class_to_idx=dataset_val.classes_dict)
return
print('training begin...')
for epoch in tqdm(range(args.start_epoch, args.epochs)):
adjust_learning_rate(optimizer_vision, epoch, args.lr_steps, 'vision')
adjust_learning_rate(optimizer_coord, epoch, args.lr_steps, 'coord')
adjust_learning_rate(optimizer_fusion, epoch, args.lr_steps, 'fusion')
# train for one epoch
train(train_loader, model_list, fusion_function, optimizer_list, epoch, criterion)
if (epoch+1) >= 30 and (epoch + 1) % args.search_stride == 0:
loss = validate(val_loader, model_list, fusion_function, criterion,
epoch=epoch, class_to_idx=dataset_val.classes_dict)
else:
loss = 100
# remember best loss and save checkpoint
is_best = loss < best_loss
best_loss = min(loss, best_loss)
save_checkpoint(
{
'epoch': epoch + 1,
'state_dict': vision_model.state_dict(),
'best_loss': best_loss,
},
is_best,
os.path.join(args.ckpt, '{}_{}'.format(args.model_vision, args.logname)))
save_checkpoint(
{
'epoch': epoch + 1,
'state_dict': coord_model.state_dict(),
'best_loss': best_loss,
},
is_best,
os.path.join(args.ckpt, '{}_{}'.format(args.model_coord, args.logname)))
save_checkpoint(
{
'epoch': epoch + 1,
'state_dict': fusion_model.state_dict(),
'best_loss': best_loss,
},
is_best,
os.path.join(args.ckpt, '{}_{}'.format(args.model_fusion, args.logname)))
def train(train_loader, model_list, fusion_function,
optimizer_list, epoch, criterion):
global args
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
acc_top1 = AverageMeter()
acc_top5 = AverageMeter()
# load three model branches
[vision_model, coord_model, fusion_model] = model_list
# load four optimizers, including the one designed for uniform assumption
[optimizer_vision, optimizer_coord, optimizer_fusion] = optimizer_list
# switch to train mode
vision_model.train()
coord_model.train()
fusion_model.train()
end = time.time()
for i, (global_img_tensors, box_tensors, box_categories, video_label) in enumerate(train_loader):
data_time.update(time.time() - end)
# obtain the activation and vision features from vision branch
output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label)
output_vision = output_vision.view((-1, len(train_loader.dataset.classes)))
# obtain the activation and coordinate features from coordinate branch
output_coord, feature_coord = coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(), video_label)
output_coord = output_coord.view((-1, len(train_loader.dataset.classes)))
# detach the computation graph, avoid the gradient confusion
feature_vision_detached = feature_vision.detach()
feature_coord_detached = feature_coord.detach()
# obtain the activation of fusion branch
output_fusion = fusion_model(feature_vision_detached.cuda(), feature_coord_detached.cuda())
output_fusion = output_fusion.view((-1, len(train_loader.dataset.classes)))
output_factual = fusion_function(output_vision, output_coord, output_fusion)
# loss_fusion is the loss of output_fusion(fused, obtained from the fusion_function)
loss_vision = criterion(output_vision, video_label.long().cuda())
loss_coord = criterion(output_coord, video_label.long().cuda())
loss_fusion = criterion(output_fusion, video_label.long().cuda())
loss_factual = criterion(output_factual, video_label.long().cuda())
# Measure the accuracy of the sum of three branch activation results
acc1, acc5 = accuracy(output_factual.cpu(), video_label, topk=(1, 5))
# record the accuracy and loss
losses.update(loss_factual.item(), global_img_tensors.size(0))
acc_top1.update(acc1.item(), global_img_tensors.size(0))
acc_top5.update(acc5.item(), global_img_tensors.size(0))
# refresh the optimizer
optimizer_vision.zero_grad()
optimizer_coord.zero_grad()
optimizer_fusion.zero_grad()
loss = loss_vision + loss_coord + loss_factual
loss.backward()
if args.clip_gradient is not None:
torch.nn.utils.clip_grad_norm_(vision_model.parameters(), args.clip_gradient)
# update the parameter
optimizer_vision.step()
optimizer_coord.step()
optimizer_fusion.step()
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc1 {acc_top1.val:.1f} ({acc_top1.avg:.1f})\t'
'Acc5 {acc_top5.val:.1f} ({acc_top5.avg:.1f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses,
acc_top1=acc_top1, acc_top5=acc_top5))
def validate(val_loader, model_list, fusion_function, criterion,
epoch=None, class_to_idx=None):
batch_time = AverageMeter()
losses = AverageMeter()
acc_top1 = AverageMeter()
acc_top5 = AverageMeter()
logits_matrix = []
targets_list = []
# unpack three models
[vision_model, coord_model, fusion_model] = model_list
# switch to evaluate mode
vision_model.eval()
coord_model.eval()
fusion_model.eval()
end = time.time()
for i, (global_img_tensors, box_tensors, box_categories, video_label) in enumerate(val_loader):
# compute output
with torch.no_grad():
output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label)
output_vision = output_vision.view((-1, len(val_loader.dataset.classes)))
output_coord, feature_coord = coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(), video_label)
output_coord = output_coord.view((-1, len(val_loader.dataset.classes)))
# detach the computation graph, avoid the gradient confusion
feature_vision_detached = feature_vision.detach()
feature_coord_detached = feature_coord.detach()
# obtain the activation of fusion branch
output_fusion = fusion_model(feature_vision_detached.cuda(), feature_coord_detached.cuda())
output_fusion = output_fusion.view((-1, len(val_loader.dataset.classes)))
# fuse three outputs
output_factual = fusion_function(output_vision, output_coord, output_fusion)
# warning: loss_fusion is the loss of output_fusion(fused, obtained from the fusion_function)
loss_vision = criterion(output_vision, video_label.long().cuda())
loss_coord = criterion(output_coord, video_label.long().cuda())
loss_fusion = criterion(output_factual, video_label.long().cuda())
# statistic result from fusion_branch or value after fusion function
output = output_factual
loss = loss_vision
acc1, acc5 = accuracy(output.cpu(), video_label, topk=(1, 5))
if args.evaluate:
logits_matrix.append(output.cpu().data.numpy())
targets_list.append(video_label.cpu().numpy())
# measure accuracy and record loss
losses.update(loss.item(), global_img_tensors.size(0))
acc_top1.update(acc1.item(), global_img_tensors.size(0))
acc_top5.update(acc5.item(), global_img_tensors.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 or i + 1 == len(val_loader):
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc1 {acc_top1.val:.1f} ({acc_top1.avg:.1f})\t'
'Acc5 {acc_top5.val:.1f} ({acc_top5.avg:.1f})\t'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
acc_top1=acc_top1, acc_top5=acc_top5,
))
if args.evaluate:
logits_matrix = np.concatenate(logits_matrix)
targets_list = np.concatenate(targets_list)
save_results(logits_matrix, targets_list, class_to_idx, args)
return losses.avg
def cf_inference_group(val_loader, model_list, fusion_function, search_list, class_to_idx=None):
batch_time = AverageMeter()
search_length = len(search_list)
search_dict = {}
for i in range(search_length):
search_dict['acc_1_alpha_{}'.format(round(search_list[i], 1))] = AverageMeter()
search_dict['acc_5_alpha_{}'.format(round(search_list[i], 1))] = AverageMeter()
[vision_model, coord_model, fusion_model] = model_list
# switch to evaluate mode
vision_model.eval()
coord_model.eval()
fusion_model.eval()
end = time.time()
for i, (global_img_tensors, box_tensors, box_categories, video_label) in enumerate(val_loader):
# compute output
with torch.no_grad():
# factual inference
output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(),
video_label)
output_vision = output_vision.view((-1, len(val_loader.dataset.classes)))
output_coord, feature_coord = coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(),
video_label)
output_coord = output_coord.view((-1, len(val_loader.dataset.classes)))
# obtain the activation of fusion branch
output_fusion = fusion_model(feature_vision.cuda(), feature_coord.cuda())
output_fusion = output_fusion.view((-1, len(val_loader.dataset.classes)))
# fuse three outputs
output_factual = fusion_function(output_vision, output_coord, output_fusion)
# counterfactual inference
output_vision_subtrahend = output_vision
output_counterfactual = fusion_function(output_vision_subtrahend, torch.tensor(0.0), torch.tensor(0.0))
for j in range(search_length):
weight = search_list[j]
output_debiased = output_factual - output_counterfactual * weight
acc1, acc5 = accuracy(output_debiased.cpu(), video_label, topk=(1, 5))
search_dict['acc_1_alpha_{}'.format(round(search_list[j], 1))].update(acc1.item(), global_img_tensors.size(0))
search_dict['acc_5_alpha_{}'.format(round(search_list[j], 1))].update(acc5.item(), global_img_tensors.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 or i + 1 == len(val_loader):
print('Cf-Inference: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Acc1_0.0 {acc_top1_00.val:.1f} ({acc_top1_00.avg:.1f})\t'
'Acc1_0.2 {acc_top1_02.val:.1f} ({acc_top1_02.avg:.1f})\t'
'Acc1_0.5 {acc_top1_05.val:.1f} ({acc_top1_05.avg:.1f})\t'
'Acc1_0.8 {acc_top1_08.val:.1f} ({acc_top1_08.avg:.1f})\t'
'Acc1_1.0 {acc_top1_10.val:.1f} ({acc_top1_10.avg:.1f})'.format(
i, len(val_loader), batch_time=batch_time, acc_top1_00=search_dict['acc_1_alpha_0.0'],
acc_top1_02=search_dict['acc_1_alpha_0.2'], acc_top1_05=search_dict['acc_1_alpha_0.5'],
acc_top1_08=search_dict['acc_1_alpha_0.8'], acc_top1_10=search_dict['acc_1_alpha_1.0']))
for k in range(search_length):
print(search_list[k], search_dict['acc_1_alpha_{}'.format(round(search_list[k], 1))].avg,
search_dict['acc_5_alpha_{}'.format(round(search_list[k], 1))].avg)
return
def save_checkpoint(state, is_best, filename):
torch.save(state, filename + '_latest.pth.tar')
if is_best:
shutil.copyfile(filename + '_latest.pth.tar', filename + '_best.pth.tar')
def adjust_learning_rate(optimizer, epoch, lr_steps, branch_name=None):
"""Sets the learning rate to the initial LR decayed by 10"""
decay = 0.1 ** (sum(epoch >= np.array(lr_steps)))
lr = args.lr * decay
if branch_name == 'vision':
for param_group in optimizer.param_groups:
param_group['lr'] = lr * 0.8
elif branch_name == 'coord':
for param_group in optimizer.param_groups:
param_group['lr'] = lr
elif branch_name == 'fusion':
for param_group in optimizer.param_groups:
param_group['lr'] = lr
else:
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| [((371, 428), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Counterfactual CAR"""'}), "(description='Counterfactual CAR')\n", (394, 428), False, 'import argparse\n'), ((6328, 6342), 'model.model_lib.BboxVisualModel', 'RGBModel', (['args'], {}), '(args)\n', (6336, 6342), True, 'from model.model_lib import BboxVisualModel as RGBModel\n'), ((6362, 6377), 'model.model_lib.BboxInteractionLatentModel', 'BboxModel', (['args'], {}), '(args)\n', (6371, 6377), True, 'from model.model_lib import BboxInteractionLatentModel as BboxModel\n'), ((6398, 6415), 'model.model_lib.ConcatFusionModel', 'FusionModel', (['args'], {}), '(args)\n', (6409, 6415), True, 'from model.model_lib import ConcatFusionModel as FusionModel\n'), ((6859, 6872), 'fusion_function.naivesum', 'fusion_func', ([], {}), '()\n', (6870, 6872), True, 'from fusion_function import naivesum as fusion_func\n'), ((9249, 9461), 'data_utils.causal_data_loader_frames.VideoFolder', 'VideoFolder', ([], {'root': 'args.root_frames', 'num_boxes': 'args.num_boxes', 'file_input': 'args.json_data_train', 'file_labels': 'args.json_file_labels', 'frames_duration': 'args.num_frames', 'args': 'args', 'is_val': '(False)', 'if_augment': '(True)'}), '(root=args.root_frames, num_boxes=args.num_boxes, file_input=\n args.json_data_train, file_labels=args.json_file_labels,\n frames_duration=args.num_frames, args=args, is_val=False, if_augment=True)\n', (9260, 9461), False, 'from data_utils.causal_data_loader_frames import VideoFolder\n'), ((9738, 9948), 'data_utils.causal_data_loader_frames.VideoFolder', 'VideoFolder', ([], {'root': 'args.root_frames', 'num_boxes': 'args.num_boxes', 'file_input': 'args.json_data_val', 'file_labels': 'args.json_file_labels', 'frames_duration': 'args.num_frames', 'args': 'args', 'is_val': '(True)', 'if_augment': '(True)'}), '(root=args.root_frames, num_boxes=args.num_boxes, file_input=\n args.json_data_val, file_labels=args.json_file_labels, frames_duration=\n args.num_frames, args=args, is_val=True, if_augment=True)\n', (9749, 9948), False, 'from data_utils.causal_data_loader_frames import VideoFolder\n'), ((10256, 10403), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset_train'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': 'args.workers', 'drop_last': '(True)', 'pin_memory': '(True)'}), '(dataset_train, batch_size=args.batch_size,\n shuffle=True, num_workers=args.workers, drop_last=True, pin_memory=True)\n', (10283, 10403), False, 'import torch\n'), ((10461, 10609), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset_val'], {'drop_last': '(True)', 'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': 'args.workers', 'pin_memory': '(False)'}), '(dataset_val, drop_last=True, batch_size=args.\n batch_size, shuffle=False, num_workers=args.workers, pin_memory=False)\n', (10488, 10609), False, 'import torch\n'), ((11422, 11449), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (11447, 11449), False, 'import torch\n'), ((11469, 11494), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(11)'], {}), '(0.0, 1.0, 11)\n', (11480, 11494), True, 'import numpy as np\n'), ((13865, 13879), 'callbacks.AverageMeter', 'AverageMeter', ([], {}), '()\n', (13877, 13879), False, 'from callbacks import AverageMeter\n'), ((13897, 13911), 'callbacks.AverageMeter', 'AverageMeter', ([], {}), '()\n', (13909, 13911), False, 'from callbacks import AverageMeter\n'), ((13928, 13942), 'callbacks.AverageMeter', 'AverageMeter', ([], {}), '()\n', (13940, 13942), False, 'from callbacks import AverageMeter\n'), ((13961, 13975), 'callbacks.AverageMeter', 'AverageMeter', ([], {}), '()\n', (13973, 13975), False, 'from callbacks import AverageMeter\n'), ((13992, 14006), 'callbacks.AverageMeter', 'AverageMeter', ([], {}), '()\n', (14004, 14006), False, 'from callbacks import AverageMeter\n'), ((14379, 14390), 'time.time', 'time.time', ([], {}), '()\n', (14388, 14390), False, 'import time\n'), ((17766, 17780), 'callbacks.AverageMeter', 'AverageMeter', ([], {}), '()\n', (17778, 17780), False, 'from callbacks import AverageMeter\n'), ((17795, 17809), 'callbacks.AverageMeter', 'AverageMeter', ([], {}), '()\n', (17807, 17809), False, 'from callbacks import AverageMeter\n'), ((17826, 17840), 'callbacks.AverageMeter', 'AverageMeter', ([], {}), '()\n', (17838, 17840), False, 'from callbacks import AverageMeter\n'), ((17857, 17871), 'callbacks.AverageMeter', 'AverageMeter', ([], {}), '()\n', (17869, 17871), False, 'from callbacks import AverageMeter\n'), ((18128, 18139), 'time.time', 'time.time', ([], {}), '()\n', (18137, 18139), False, 'import time\n'), ((21275, 21289), 'callbacks.AverageMeter', 'AverageMeter', ([], {}), '()\n', (21287, 21289), False, 'from callbacks import AverageMeter\n'), ((21746, 21757), 'time.time', 'time.time', ([], {}), '()\n', (21755, 21757), False, 'import time\n'), ((24887, 24934), 'torch.save', 'torch.save', (['state', "(filename + '_latest.pth.tar')"], {}), "(state, filename + '_latest.pth.tar')\n", (24897, 24934), False, 'import torch\n'), ((7340, 7374), 'os.path.isfile', 'os.path.isfile', (['args.resume_vision'], {}), '(args.resume_vision)\n', (7354, 7374), False, 'import os\n'), ((7527, 7557), 'torch.load', 'torch.load', (['args.resume_vision'], {}), '(args.resume_vision)\n', (7537, 7557), False, 'import torch\n'), ((7974, 8007), 'os.path.isfile', 'os.path.isfile', (['args.resume_coord'], {}), '(args.resume_coord)\n', (7988, 8007), False, 'import os\n'), ((8158, 8187), 'torch.load', 'torch.load', (['args.resume_coord'], {}), '(args.resume_coord)\n', (8168, 8187), False, 'import torch\n'), ((8548, 8582), 'os.path.isfile', 'os.path.isfile', (['args.resume_fusion'], {}), '(args.resume_fusion)\n', (8562, 8582), False, 'import os\n'), ((8735, 8765), 'torch.load', 'torch.load', (['args.resume_fusion'], {}), '(args.resume_fusion)\n', (8745, 8765), False, 'import torch\n'), ((17006, 17017), 'time.time', 'time.time', ([], {}), '()\n', (17015, 17017), False, 'import time\n'), ((20387, 20398), 'time.time', 'time.time', ([], {}), '()\n', (20396, 20398), False, 'import time\n'), ((20976, 21005), 'numpy.concatenate', 'np.concatenate', (['logits_matrix'], {}), '(logits_matrix)\n', (20990, 21005), True, 'import numpy as np\n'), ((21030, 21058), 'numpy.concatenate', 'np.concatenate', (['targets_list'], {}), '(targets_list)\n', (21044, 21058), True, 'import numpy as np\n'), ((21068, 21129), 'utils.save_results', 'save_results', (['logits_matrix', 'targets_list', 'class_to_idx', 'args'], {}), '(logits_matrix, targets_list, class_to_idx, args)\n', (21080, 21129), False, 'from utils import save_results\n'), ((21460, 21474), 'callbacks.AverageMeter', 'AverageMeter', ([], {}), '()\n', (21472, 21474), False, 'from callbacks import AverageMeter\n'), ((21549, 21563), 'callbacks.AverageMeter', 'AverageMeter', ([], {}), '()\n', (21561, 21563), False, 'from callbacks import AverageMeter\n'), ((23686, 23697), 'time.time', 'time.time', ([], {}), '()\n', (23695, 23697), False, 'import time\n'), ((24961, 25034), 'shutil.copyfile', 'shutil.copyfile', (["(filename + '_latest.pth.tar')", "(filename + '_best.pth.tar')"], {}), "(filename + '_latest.pth.tar', filename + '_best.pth.tar')\n", (24976, 25034), False, 'import shutil\n'), ((25878, 25893), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (25891, 25893), False, 'import torch\n'), ((18281, 18296), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18294, 18296), False, 'import torch\n'), ((21899, 21914), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (21912, 21914), False, 'import torch\n'), ((6922, 6957), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['vision_model'], {}), '(vision_model)\n', (6943, 6957), False, 'import torch\n'), ((6988, 7022), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['coord_model'], {}), '(coord_model)\n', (7009, 7022), False, 'import torch\n'), ((7054, 7089), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['fusion_model'], {}), '(fusion_model)\n', (7075, 7089), False, 'import torch\n'), ((14522, 14533), 'time.time', 'time.time', ([], {}), '()\n', (14531, 14533), False, 'import time\n'), ((16972, 16983), 'time.time', 'time.time', ([], {}), '()\n', (16981, 16983), False, 'import time\n'), ((20353, 20364), 'time.time', 'time.time', ([], {}), '()\n', (20362, 20364), False, 'import time\n'), ((23037, 23054), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (23049, 23054), False, 'import torch\n'), ((23056, 23073), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (23068, 23073), False, 'import torch\n'), ((23652, 23663), 'time.time', 'time.time', ([], {}), '()\n', (23661, 23663), False, 'import time\n'), ((25214, 25232), 'numpy.array', 'np.array', (['lr_steps'], {}), '(lr_steps)\n', (25222, 25232), True, 'import numpy as np\n')] |
114000/webapp-boilerplate | api/application/__init__.py | 0550396694b4f009e5d862b0098bf7d1f61a4a40 | # encoding: utf-8
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
import logging
app = Flask(__name__)
CORS(app, resources={r"/*": {"origins": "*"}})
app.config.from_object('config.current')
db = SQLAlchemy(app)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
'''
'''
import application.jwt
import application.routes.config
import application.routes.user
import application.routes.permission
import application.routes.role
import application.routes.access
# after Model defined
db.create_all() | [((144, 159), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (149, 159), False, 'from flask import Flask\n'), ((161, 206), 'flask_cors.CORS', 'CORS', (['app'], {'resources': "{'/*': {'origins': '*'}}"}), "(app, resources={'/*': {'origins': '*'}})\n", (165, 206), False, 'from flask_cors import CORS\n'), ((260, 275), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (270, 275), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((288, 315), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (305, 315), False, 'import logging\n')] |
jefftc/changlab | Betsy/Betsy/modules/get_illumina_control.py | 11da8c415afefcba0b0216238387c75aeb3a56ac | from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
outfile):
import os
import shutil
from genomicode import filelib
in_data = antecedents
result_files = os.listdir(in_data.identifier)
for result_file in result_files:
if '-controls' in result_file:
goal_file = os.path.join(in_data.identifier, result_file)
shutil.copyfile(goal_file, outfile)
assert filelib.exists_nz(outfile), (
'the output file %s for illu_control fails' % outfile
)
def name_outfile(self, antecedents, user_options):
from Betsy import module_utils
original_file = module_utils.get_inputid(antecedents.identifier)
filename = 'control_illumina_' + original_file + '.gct'
return filename
| [((97, 126), 'Module.AbstractModule.__init__', 'AbstractModule.__init__', (['self'], {}), '(self)\n', (120, 126), False, 'from Module import AbstractModule\n'), ((368, 398), 'os.listdir', 'os.listdir', (['in_data.identifier'], {}), '(in_data.identifier)\n', (378, 398), False, 'import os\n'), ((638, 664), 'genomicode.filelib.exists_nz', 'filelib.exists_nz', (['outfile'], {}), '(outfile)\n', (655, 664), False, 'from genomicode import filelib\n'), ((864, 912), 'Betsy.module_utils.get_inputid', 'module_utils.get_inputid', (['antecedents.identifier'], {}), '(antecedents.identifier)\n', (888, 912), False, 'from Betsy import module_utils\n'), ((511, 556), 'os.path.join', 'os.path.join', (['in_data.identifier', 'result_file'], {}), '(in_data.identifier, result_file)\n', (523, 556), False, 'import os\n'), ((573, 608), 'shutil.copyfile', 'shutil.copyfile', (['goal_file', 'outfile'], {}), '(goal_file, outfile)\n', (588, 608), False, 'import shutil\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.