repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
JoOkuma/dexp | dexp/cli/dexp_commands/crop.py | 6d9003384605b72f387d38b5befa29e4e2246af8 | import click
from arbol.arbol import aprint, asection
from dexp.cli.defaults import DEFAULT_CLEVEL, DEFAULT_CODEC, DEFAULT_STORE
from dexp.cli.parsing import _get_output_path, _parse_channels, _parse_chunks
from dexp.datasets.open_dataset import glob_datasets
from dexp.datasets.operations.crop import dataset_crop
@click.command()
@click.argument("input_paths", nargs=-1) # , help='input path'
@click.option("--output_path", "-o") # , help='output path'
@click.option("--channels", "-c", default=None, help="List of channels, all channels when ommited.")
@click.option(
"--quantile",
"-q",
default=0.99,
type=float,
help="Quantile parameter for lower bound of brightness for thresholding.",
show_default=True,
)
@click.option(
"--reference-channel",
"-rc",
default=None,
help="Reference channel to estimate cropping. If no provided it picks the first one.",
)
@click.option("--store", "-st", default=DEFAULT_STORE, help="Zarr store: ‘dir’, ‘ndir’, or ‘zip’", show_default=True)
@click.option("--chunks", "-chk", default=None, help="Dataset chunks dimensions, e.g. (1, 126, 512, 512).")
@click.option(
"--codec",
"-z",
default=DEFAULT_CODEC,
help="Compression codec: zstd for ’, ‘blosclz’, ‘lz4’, ‘lz4hc’, ‘zlib’ or ‘snappy’ ",
show_default=True,
)
@click.option("--clevel", "-l", type=int, default=DEFAULT_CLEVEL, help="Compression level", show_default=True)
@click.option("--overwrite", "-w", is_flag=True, help="Forces overwrite of target", show_default=True)
@click.option(
"--workers",
"-wk",
default=-4,
help="Number of worker threads to spawn. Negative numbers n correspond to: number_of _cores / |n| ",
show_default=True,
) #
@click.option("--check", "-ck", default=True, help="Checking integrity of written file.", show_default=True) #
def crop(
input_paths,
output_path,
channels,
quantile,
reference_channel,
store,
chunks,
codec,
clevel,
overwrite,
workers,
check,
):
input_dataset, input_paths = glob_datasets(input_paths)
output_path = _get_output_path(input_paths[0], output_path, "_crop")
channels = _parse_channels(input_dataset, channels)
if reference_channel is None:
reference_channel = input_dataset.channels()[0]
chunks = _parse_chunks(chunks)
with asection(
f"Cropping from: {input_paths} to {output_path} for channels: {channels}, "
f"using channel {reference_channel} as a reference."
):
dataset_crop(
input_dataset,
output_path,
channels=channels,
reference_channel=reference_channel,
quantile=quantile,
store=store,
chunks=chunks,
compression=codec,
compression_level=clevel,
overwrite=overwrite,
workers=workers,
check=check,
)
input_dataset.close()
aprint("Done!")
| [((10, 1, 10, 16), 'click.command', 'click.command', ({}, {}), '()', False, 'import click\n'), ((11, 1, 11, 40), 'click.argument', 'click.argument', (), '', False, 'import click\n'), ((12, 1, 12, 36), 'click.option', 'click.option', ({(12, 14, 12, 29): '"""--output_path"""', (12, 31, 12, 35): '"""-o"""'}, {}), "('--output_path', '-o')", False, 'import click\n'), ((13, 1, 13, 100), 'click.option', 'click.option', (), '', False, 'import click\n'), ((14, 1, 21, 1), 'click.option', 'click.option', (), '', False, 'import click\n'), ((22, 1, 27, 1), 'click.option', 'click.option', (), '', False, 'import click\n'), ((28, 1, 28, 129), 'click.option', 'click.option', (), '', False, 'import click\n'), ((29, 1, 29, 107), 'click.option', 'click.option', (), '', False, 'import click\n'), ((30, 1, 36, 1), 'click.option', 'click.option', (), '', False, 'import click\n'), ((37, 1, 37, 110), 'click.option', 'click.option', (), '', False, 'import click\n'), ((38, 1, 38, 102), 'click.option', 'click.option', (), '', False, 'import click\n'), ((39, 1, 45, 1), 'click.option', 'click.option', (), '', False, 'import click\n'), ((46, 1, 46, 108), 'click.option', 'click.option', (), '', False, 'import click\n'), ((62, 33, 62, 59), 'dexp.datasets.open_dataset.glob_datasets', 'glob_datasets', ({(62, 47, 62, 58): 'input_paths'}, {}), '(input_paths)', False, 'from dexp.datasets.open_dataset import glob_datasets\n'), ((63, 18, 63, 72), 'dexp.cli.parsing._get_output_path', '_get_output_path', ({(63, 35, 63, 49): 'input_paths[0]', (63, 51, 63, 62): 'output_path', (63, 64, 63, 71): '"""_crop"""'}, {}), "(input_paths[0], output_path, '_crop')", False, 'from dexp.cli.parsing import _get_output_path, _parse_channels, _parse_chunks\n'), ((64, 15, 64, 55), 'dexp.cli.parsing._parse_channels', '_parse_channels', ({(64, 31, 64, 44): 'input_dataset', (64, 46, 64, 54): 'channels'}, {}), '(input_dataset, channels)', False, 'from dexp.cli.parsing import _get_output_path, _parse_channels, _parse_chunks\n'), ((67, 13, 67, 34), 'dexp.cli.parsing._parse_chunks', '_parse_chunks', ({(67, 27, 67, 33): 'chunks'}, {}), '(chunks)', False, 'from dexp.cli.parsing import _get_output_path, _parse_channels, _parse_chunks\n'), ((69, 9, 72, 5), 'arbol.arbol.asection', 'asection', ({(70, 8, 71, 60): 'f"""Cropping from: {input_paths} to {output_path} for channels: {channels}, using channel {reference_channel} as a reference."""'}, {}), "(\n f'Cropping from: {input_paths} to {output_path} for channels: {channels}, using channel {reference_channel} as a reference.'\n )", False, 'from arbol.arbol import aprint, asection\n'), ((74, 8, 87, 9), 'dexp.datasets.operations.crop.dataset_crop', 'dataset_crop', (), '', False, 'from dexp.datasets.operations.crop import dataset_crop\n'), ((90, 8, 90, 23), 'arbol.arbol.aprint', 'aprint', ({(90, 15, 90, 22): '"""Done!"""'}, {}), "('Done!')", False, 'from arbol.arbol import aprint, asection\n')] |
YinuoJin/DMT_loss | morse_DMT/write_dipha_file_3d_revise.py | c6e66cb7997b7cd5616156faaf294e350e77c4c2 | import sys
from matplotlib import image as mpimg
import numpy as np
import os
DIPHA_CONST = 8067171840
DIPHA_IMAGE_TYPE_CONST = 1
DIM = 3
input_dir = os.path.join(os.getcwd(), sys.argv[1])
dipha_output_filename = sys.argv[2]
vert_filename = sys.argv[3]
input_filenames = [name
for name in os.listdir(input_dir)
if (os.path.isfile(input_dir + '/' + name)) and (name != ".DS_Store")]
input_filenames.sort()
image = mpimg.imread(os.path.join(input_dir, input_filenames[0]))
nx, ny = image.shape
del image
nz = len(input_filenames)
print(nx, ny, nz)
#sys.exit()
im_cube = np.zeros([nx, ny, nz])
i = 0
for name in input_filenames:
sys.stdout.flush()
print(i, name)
fileName = input_dir + "/" + name
im_cube[:, :, i] = mpimg.imread(fileName)
i = i + 1
print('writing dipha output...')
with open(dipha_output_filename, 'wb') as output_file:
# this is needed to verify you are giving dipha a dipha file
np.int64(DIPHA_CONST).tofile(output_file)
# this tells dipha that we are giving an image as input
np.int64(DIPHA_IMAGE_TYPE_CONST).tofile(output_file)
# number of points
np.int64(nx * ny * nz).tofile(output_file)
# dimension
np.int64(DIM).tofile(output_file)
# pixels in each dimension
np.int64(nx).tofile(output_file)
np.int64(ny).tofile(output_file)
np.int64(nz).tofile(output_file)
# pixel values
for k in range(nz):
sys.stdout.flush()
print('dipha - working on image', k)
for j in range(ny):
for i in range(nx):
val = int(-im_cube[i, j, k]*255)
'''
if val != 0 and val != -1:
print('val check:', val)
'''
np.float64(val).tofile(output_file)
output_file.close()
print('writing vert file')
with open(vert_filename, 'w') as vert_file:
for k in range(nz):
sys.stdout.flush()
print('verts - working on image', k)
for j in range(ny):
for i in range(nx):
vert_file.write(str(i) + ' ' + str(j) + ' ' + str(k) + ' ' + str(int(-im_cube[i, j, k] * 255)) + '\n')
vert_file.close()
print(nx, ny, nz)
| [((27, 10, 27, 32), 'numpy.zeros', 'np.zeros', ({(27, 19, 27, 31): '[nx, ny, nz]'}, {}), '([nx, ny, nz])', True, 'import numpy as np\n'), ((10, 25, 10, 36), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((20, 21, 20, 64), 'os.path.join', 'os.path.join', ({(20, 34, 20, 43): 'input_dir', (20, 45, 20, 63): 'input_filenames[0]'}, {}), '(input_dir, input_filenames[0])', False, 'import os\n'), ((31, 4, 31, 22), 'sys.stdout.flush', 'sys.stdout.flush', ({}, {}), '()', False, 'import sys\n'), ((34, 23, 34, 45), 'matplotlib.image.imread', 'mpimg.imread', ({(34, 36, 34, 44): 'fileName'}, {}), '(fileName)', True, 'from matplotlib import image as mpimg\n'), ((15, 31, 15, 52), 'os.listdir', 'os.listdir', ({(15, 42, 15, 51): 'input_dir'}, {}), '(input_dir)', False, 'import os\n'), ((54, 8, 54, 26), 'sys.stdout.flush', 'sys.stdout.flush', ({}, {}), '()', False, 'import sys\n'), ((69, 8, 69, 26), 'sys.stdout.flush', 'sys.stdout.flush', ({}, {}), '()', False, 'import sys\n'), ((16, 23, 16, 61), 'os.path.isfile', 'os.path.isfile', ({(16, 38, 16, 60): "(input_dir + '/' + name)"}, {}), "(input_dir + '/' + name)", False, 'import os\n'), ((41, 4, 41, 25), 'numpy.int64', 'np.int64', ({(41, 13, 41, 24): 'DIPHA_CONST'}, {}), '(DIPHA_CONST)', True, 'import numpy as np\n'), ((43, 4, 43, 36), 'numpy.int64', 'np.int64', ({(43, 13, 43, 35): 'DIPHA_IMAGE_TYPE_CONST'}, {}), '(DIPHA_IMAGE_TYPE_CONST)', True, 'import numpy as np\n'), ((45, 4, 45, 26), 'numpy.int64', 'np.int64', ({(45, 13, 45, 25): '(nx * ny * nz)'}, {}), '(nx * ny * nz)', True, 'import numpy as np\n'), ((47, 4, 47, 17), 'numpy.int64', 'np.int64', ({(47, 13, 47, 16): 'DIM'}, {}), '(DIM)', True, 'import numpy as np\n'), ((49, 4, 49, 16), 'numpy.int64', 'np.int64', ({(49, 13, 49, 15): 'nx'}, {}), '(nx)', True, 'import numpy as np\n'), ((50, 4, 50, 16), 'numpy.int64', 'np.int64', ({(50, 13, 50, 15): 'ny'}, {}), '(ny)', True, 'import numpy as np\n'), ((51, 4, 51, 16), 'numpy.int64', 'np.int64', ({(51, 13, 51, 15): 'nz'}, {}), '(nz)', True, 'import numpy as np\n'), ((63, 16, 63, 31), 'numpy.float64', 'np.float64', ({(63, 27, 63, 30): 'val'}, {}), '(val)', True, 'import numpy as np\n')] |
clodonil/pipeline_aws_custom | microservices/validate/tools/validates.py | 8ca517d0bad48fe528461260093f0035f606f9be | """
Tools para validar o arquivo template recebido do SQS
"""
class Validate:
def __init__(self):
pass
def check_validate_yml(self, template):
"""
valida se o arquivo yml é valido
"""
if template:
return True
else:
return False
def check_yml_struct(self, template):
"""
Valida se a estrutura do yml é valido
"""
if template:
return True
else:
return False
def check_template_exist(self, template):
"""
Valida se o template informado no arquivo yml existe
"""
if template:
return True
else:
return False
def check_callback_protocol_endpoint(self, template):
"""
validar se o protocolo e endpoint são validos
"""
return True
def check_template(self, template):
if self.check_validate_yml(template) \
and self.check_yml_struct(template) \
and self.check_template_exist(template) \
and self.check_callback_protocol_endpoint(template):
msg = {"status": True}
return msg
else:
msg = {'status': False, 'message': 'problema no arquivo yml'}
return msg
def change_yml_to_json(content):
try:
template_json = yaml.safe_load(content)
return template_json
except yaml.YAMLError as error:
return {"message": str(error)}
| [] |
unrealTOM/MC | MetropolisMCMC.py | 5a4cdf1ee11ef3d438f24dd38e894731103448ac | import numpy as np
import matplotlib.pyplot as plt
import math
def normal(mu,sigma,x): #normal distribution
return 1/(math.pi*2)**0.5/sigma*np.exp(-(x-mu)**2/2/sigma**2)
def eval(x):
return normal(-4,1,x) + normal(4,1,x)
#return 0.3*np.exp(-0.2*x**2)+0.7*np.exp(-0.2*(x-10)**2)
def ref(x_star,x): #normal distribution
return normal(x,10,x_star)
N = [100,500,1000,5000]
fig = plt.figure()
for i in range(4):
X = np.array([])
x = 0.1 #initialize x0 to be 0.1
for j in range(N[i]):
u = np.random.rand()
x_star = np.random.normal(x,10)
A = min(1,eval(x_star)/eval(x)) #*q(x,x_star)/p(x)/q(x_star,x))
if u < A:
x = x_star
X=np.hstack((X,x))
ax = fig.add_subplot(2,2,i+1)
ax.hist(X,bins=100,density=True)
x = np.linspace(-10,20,5000)
#ax.plot(x,eval(x)/2.7) #2.7 approximates the normalizing constant
ax.plot(x,eval(x)/2) #2 approximates the normalizing constant
ax.set_ylim(0,0.35)
ax.text(-9,0.25,'I=%d'%N[i])
fig.suptitle('Metropolis_Hastings for MCMC(Normal)')
#fig.suptitle('Metropolis_Hastings for MCMC(Exp.)')
plt.savefig('MetropolisNormal.png',dpi=100)
#plt.savefig('MetropolisExp.png',dpi=100)
plt.show()
| [((16, 6, 16, 18), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((38, 0, 38, 43), 'matplotlib.pyplot.savefig', 'plt.savefig', (), '', True, 'import matplotlib.pyplot as plt\n'), ((40, 0, 40, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((18, 8, 18, 20), 'numpy.array', 'np.array', ({(18, 17, 18, 19): '[]'}, {}), '([])', True, 'import numpy as np\n'), ((30, 8, 30, 32), 'numpy.linspace', 'np.linspace', ({(30, 20, 30, 23): '-10', (30, 24, 30, 26): '20', (30, 27, 30, 31): '5000'}, {}), '(-10, 20, 5000)', True, 'import numpy as np\n'), ((6, 36, 6, 65), 'numpy.exp', 'np.exp', ({(6, 43, 6, 64): '(-(x - mu) ** 2 / 2 / sigma ** 2)'}, {}), '(-(x - mu) ** 2 / 2 / sigma ** 2)', True, 'import numpy as np\n'), ((21, 12, 21, 28), 'numpy.random.rand', 'np.random.rand', ({}, {}), '()', True, 'import numpy as np\n'), ((22, 17, 22, 39), 'numpy.random.normal', 'np.random.normal', ({(22, 34, 22, 35): 'x', (22, 36, 22, 38): '10'}, {}), '(x, 10)', True, 'import numpy as np\n'), ((26, 10, 26, 26), 'numpy.hstack', 'np.hstack', ({(26, 20, 26, 25): '(X, x)'}, {}), '((X, x))', True, 'import numpy as np\n')] |
lipeijian/shadowsocks-android | gfwlist/gen.py | ef707e4383a0d430775c8ac9b660c334e87e40ec | #!/usr/bin/python
# -*- encoding: utf8 -*-
import itertools
import math
import sys
import IPy
def main():
china_list_set = IPy.IPSet()
for line in sys.stdin:
china_list_set.add(IPy.IP(line))
# 输出结果
for ip in china_list_set:
print '<item>' + str(ip) + '</item>'
if __name__ == "__main__":
main()
| [] |
lastralab/Statistics | Specialization/Personal/SortHours.py | 358679f2e749db2e23c655795b34382c84270704 | name = "mail.txt"
counts = dict()
handle = open(name)
for line in handle:
line = line.rstrip()
if line == '':
continue
words = line.split()
if words[0] == 'From':
counts[words[5][:2]] = counts.get(words[5][:2], 0) + 1
tlist = list()
for key, value in counts.items():
newtup = (key, value)
tlist.append(newtup)
tlist.sort()
for key, value in tlist:
print key, value
| [] |
RangiLyu/DI-drive | core/simulators/carla_scenario_simulator.py | f7db2e7b19d70c05184d6d6edae6b7e035a324d7 | import os
from typing import Any, Dict, List, Optional
import carla
from core.simulators.carla_simulator import CarlaSimulator
from core.simulators.carla_data_provider import CarlaDataProvider
from .srunner.scenarios.route_scenario import RouteScenario, SCENARIO_CLASS_DICT
from .srunner.scenariomanager.scenario_manager import ScenarioManager
class CarlaScenarioSimulator(CarlaSimulator):
"""
Carla simualtor used to run scenarios.
The simulator loads configs of provided scenario, and create hero actor, npc vehicles, walkers, world map
according to it. The sensors and running status are set as common Carla simulator.
When created, it will set up Carla client due to arguments, set simulator basic configurations used all around
its lifetime, and set some default running configurations.
If no traffic manager port is provided, it will find random free port in system.
:Arguments:
- cfg (Dict): Config Dict.
- client (carla.Client, optional): Already established Carla client. Defaults to None.
- host (str, optional): TCP host Carla client link to. Defaults to 'localhost'.
- port (int, optional): TCP port Carla client link to. Defaults to 9000.
- tm_port (int, optional): Traffic manager port Carla client link to. Defaults to None.
- timeout (float, optional): Carla client link timeout. Defaults to 10.0.
:Interfaces:
init, get_state, get_sensor_data, get_navigation, get_information, apply_control, run_step, clean_up
:Properties:
- town_name (str): Current town name.
- hero_player (carla.Actor): hero actor in simulation.
- collided (bool): Whether collided in current episode.
- end_distance (float): Distance to target in current frame.
- end_timeout (float): Timeout for entire route provided by planner.
- total_diatance (float): Dictance for entire route provided by planner.
- scenario_manager (Any): Scenario Manager instance used to get running state.
"""
config = dict(
town='Town01',
weather='random',
sync_mode=True,
delta_seconds=0.1,
no_rendering=False,
auto_pilot=False,
n_vehicles=0,
n_pedestrians=0,
disable_two_wheels=False,
col_threshold=400,
resolution=1.0,
waypoint_num=20,
obs=list(),
planner=dict(),
aug=None,
verbose=True,
debug=False,
)
def __init__(
self,
cfg: Dict,
client: Optional[carla.Client] = None,
host: str = 'localhost',
port: int = 9000,
tm_port: int = 9050,
timeout: float = 10.0,
**kwargs
) -> None:
"""
Init Carla scenario simulator.
"""
super().__init__(cfg, client, host, port, tm_port, timeout)
self._resolution = self._cfg.resolution
self._scenario = None
self._start_scenario = False
self._manager = ScenarioManager(self._debug, self._sync_mode, self._client_timeout)
self._criteria_status = dict()
def init(self, config: Any) -> None:
"""
Init simulator episode with provided args.
This method takes an scneario configuration instance to set up scenarios in Carla server. the scenario could be
a single scenario, or a route scenario together with several scenarios during navigating the route. A scneario
manager is used to manager and check the running status and tick scenarios. A local planner is set to trace the
route to generate target waypoint and road options in each tick. It will set world, map, vehicles, pedestrians
dut to provided args and default configs, and reset running status. If no collision happens when creating
actors, the init will end and return.
:Arguments:
- config (Any): Scenario configuration instance, containing information about the scenarios.
"""
self._scenario_config = config
self.clean_up()
self._set_town(config.town)
self._set_weather(self._weather)
self._blueprints = self._world.get_blueprint_library()
while True:
self.clean_up()
CarlaDataProvider.set_client(self._client)
CarlaDataProvider.set_world(self._world)
CarlaDataProvider.set_traffic_manager_port(self._tm.get_port())
if CarlaDataProvider.get_map().name != config.town and CarlaDataProvider.get_map().name != "OpenDriveMap":
print("WARNING: The CARLA server uses the wrong map: {}".format(CarlaDataProvider.get_map().name))
print("WARNING: This scenario requires to use map: {}".format(config.town))
print("[SIMULATOR] Preparing scenario: " + config.name)
config.n_vehicles = self._n_vehicles
config.disable_two_wheels = self._disable_two_wheels
if "RouteScenario" in config.name:
self._scenario = RouteScenario(
world=self._world, config=config, debug_mode=self._debug, resolution=self._resolution
)
self._hero_actor = self._scenario.ego_vehicles[0]
self._prepare_observations()
self._manager.load_scenario(self._scenario)
self._planner.set_route(CarlaDataProvider.get_hero_vehicle_route(), clean=True)
self._total_distance = self._planner.distance_to_goal
self._end_timeout = self._scenario.route_timeout
else:
# select scenario
if config.type in SCENARIO_CLASS_DICT:
scenario_class = SCENARIO_CLASS_DICT[config.type]
ego_vehicles = []
for vehicle in config.ego_vehicles:
ego_vehicles.append(
CarlaDataProvider.request_new_actor(
vehicle.model,
vehicle.transform,
vehicle.rolename,
True,
color=vehicle.color,
actor_category=vehicle.category
)
)
self._scenario = scenario_class(
world=self._world, ego_vehicles=ego_vehicles, config=config, debug_mode=self._debug
)
else:
raise RuntimeError("Scenario '{}' not support!".format(config.type))
self._hero_actor = self._scenario.ego_vehicles[0]
self._prepare_observations()
self._manager.load_scenario(self._scenario)
self._planner.set_destination(config.route.data[0], config.route.data[1], clean=True)
self._total_distance = self._planner.distance_to_goal
self._spawn_pedestrians()
if self._ready():
if self._debug:
self._count_actors()
break
def run_step(self) -> None:
"""
Run one step simulation.
This will tick Carla world and scenarios, update informations for all sensors and measurement.
"""
if not self._start_scenario:
self._manager.start_scenario()
self._start_scenario = True
self._tick += 1
world_snapshot = self._world.get_snapshot()
timestamp = world_snapshot.timestamp
self._timestamp = timestamp.elapsed_seconds
self._manager.tick_scenario(timestamp)
if self._planner is not None:
self._planner.run_step()
self._collided = self._collision_sensor.collided
self._traffic_light_helper.tick()
if self._bev_wrapper is not None:
if CarlaDataProvider._hero_vehicle_route is not None:
self._bev_wrapper.tick()
def get_criteria(self) -> List:
"""
Get criteria status list of scenario in current frame. Criteria related with hero actor is encounted.
:Returns:
List: Criteria list of scenario.
"""
criterion_list = self._manager.analyze_tick()
for name, actor_id, result, actual_value, expected_value in criterion_list:
if actor_id == self._hero_actor.id:
self._criteria_status.update({name: [result, actual_value, expected_value]})
return self._criteria_status
def end_scenario(self) -> None:
"""
End current scenario. Must be called before ending an episode.
"""
if self._start_scenario:
self._manager.end_scenario()
self._start_scenario = False
def clean_up(self) -> None:
"""
Destroy all actors and sensors in current world. Clear all messages saved in simulator and data provider,
and clean up running scenarios. This will NOT destroy theCarla client, so simulator can use same carla
client to start next episode.
"""
if self._manager is not None:
self._manager.clean_up()
self._criteria_status.clear()
super().clean_up()
@property
def scenario_manager(self) -> Any:
return self._manager
| [((108, 12, 108, 54), 'core.simulators.carla_data_provider.CarlaDataProvider.set_client', 'CarlaDataProvider.set_client', ({(108, 41, 108, 53): 'self._client'}, {}), '(self._client)', False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((109, 12, 109, 52), 'core.simulators.carla_data_provider.CarlaDataProvider.set_world', 'CarlaDataProvider.set_world', ({(109, 40, 109, 51): 'self._world'}, {}), '(self._world)', False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((127, 40, 127, 82), 'core.simulators.carla_data_provider.CarlaDataProvider.get_hero_vehicle_route', 'CarlaDataProvider.get_hero_vehicle_route', ({}, {}), '()', False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((112, 15, 112, 42), 'core.simulators.carla_data_provider.CarlaDataProvider.get_map', 'CarlaDataProvider.get_map', ({}, {}), '()', False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((112, 67, 112, 94), 'core.simulators.carla_data_provider.CarlaDataProvider.get_map', 'CarlaDataProvider.get_map', ({}, {}), '()', False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((113, 80, 113, 107), 'core.simulators.carla_data_provider.CarlaDataProvider.get_map', 'CarlaDataProvider.get_map', ({}, {}), '()', False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((138, 28, 145, 29), 'core.simulators.carla_data_provider.CarlaDataProvider.request_new_actor', 'CarlaDataProvider.request_new_actor', (), '', False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n')] |
Conengmo/python-empty-project | bin/run.py | 18d275422116577d48ae4fdbe1c93501a5e6ef78 | import myproject
myproject.logs(show_level='debug')
myproject.mymod.do_something()
| [((3, 0, 3, 34), 'myproject.logs', 'myproject.logs', (), '', False, 'import myproject\n'), ((6, 0, 6, 30), 'myproject.mymod.do_something', 'myproject.mymod.do_something', ({}, {}), '()', False, 'import myproject\n')] |
gerold-penz/python-simplemail | development/simple_email.py | 9cfae298743af2b771d6d779717b602de559689b | #!/usr/bin/env python
# coding: utf-8
# BEGIN --- required only for testing, remove in real world code --- BEGIN
import os
import sys
THISDIR = os.path.dirname(os.path.abspath(__file__))
APPDIR = os.path.abspath(os.path.join(THISDIR, os.path.pardir, os.path.pardir))
sys.path.insert(0, APPDIR)
# END --- required only for testing, remove in real world code --- END
import simplemail
simplemail.Email(
smtp_server = "smtp.a1.net:25",
smtp_user = "xxx",
smtp_password = "xxx",
use_tls = False,
from_address = "xxx",
to_address = "xxx",
subject = u"Really simple test with umlauts (öäüß)",
message = u"This is the message with umlauts (öäüß)",
).send()
print "Sent"
print
| [] |
DerekYJC/bmi_python | features/hdf_features.py | 7b9cf3f294a33688db24b0863c1035e9cc6999ea | '''
HDF-saving features
'''
import time
import tempfile
import random
import traceback
import numpy as np
import fnmatch
import os, sys
import subprocess
from riglib import calibrations, bmi
from riglib.bmi import extractor
from riglib.experiment import traits
import hdfwriter
class SaveHDF(object):
'''
Saves data from registered sources into tables in an HDF file
'''
def init(self):
'''
Secondary init function. See riglib.experiment.Experiment.init()
Prior to starting the task, this 'init' starts an HDFWriter sink.
'''
from riglib import sink
self.sinks = sink.sinks
self.h5file = tempfile.NamedTemporaryFile(suffix=".h5", delete=False)
self.h5file.flush()
self.h5file.close()
self.hdf = sink.sinks.start(self.sink_class, filename=self.h5file.name)
super(SaveHDF, self).init()
@property
def sink_class(self):
'''
Specify the sink class as a function in case future descendant classes want to use a different type of sink
'''
return hdfwriter.HDFWriter
def run(self):
'''
Code to execute immediately prior to the beginning of the task FSM executing, or after the FSM has finished running.
See riglib.experiment.Experiment.run(). This 'run' method stops the HDF sink after the FSM has finished running
'''
try:
super(SaveHDF, self).run()
finally:
self.hdf.stop()
def join(self):
'''
Re-join any spawned process for cleanup
'''
self.hdf.join()
super(SaveHDF, self).join()
def set_state(self, condition, **kwargs):
'''
Save task state transitions to HDF
Parameters
----------
condition: string
Name of new state to transition into. The state name must be a key in the 'status' dictionary attribute of the task
Returns
-------
None
'''
self.hdf.sendMsg(condition)
super(SaveHDF, self).set_state(condition, **kwargs)
def record_annotation(self, msg):
""" Record a user-input annotation """
self.hdf.sendMsg("annotation: " + msg)
super(SaveHDF, self).record_annotation(msg)
print("Saved annotation to HDF: " + msg)
def get_h5_filename(self):
return self.h5file.name
def cleanup(self, database, saveid, **kwargs):
'''
See LogExperiment.cleanup for documentation
'''
super(SaveHDF, self).cleanup(database, saveid, **kwargs)
print("Beginning HDF file cleanup")
print("\tHDF data currently saved to temp file: %s" % self.h5file.name)
try:
print("\tRunning self.cleanup_hdf()")
self.cleanup_hdf()
except:
print("\n\n\n\n\nError cleaning up HDF file!")
import traceback
traceback.print_exc()
# this 'if' is needed because the remote procedure call to save_data doesn't like kwargs
dbname = kwargs['dbname'] if 'dbname' in kwargs else 'default'
if dbname == 'default':
database.save_data(self.h5file.name, "hdf", saveid)
else:
database.save_data(self.h5file.name, "hdf", saveid, dbname=dbname)
| [((29, 22, 29, 77), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (), '', False, 'import tempfile\n'), ((32, 19, 32, 79), 'riglib.sink.sinks.start', 'sink.sinks.start', (), '', False, 'from riglib import sink\n'), ((98, 12, 98, 33), 'traceback.print_exc', 'traceback.print_exc', ({}, {}), '()', False, 'import traceback\n')] |
vaginessa/irma | common/irma/common/exceptions.py | 02285080b67b25ef983a99a765044683bd43296c | #
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
class IrmaDependencyError(Exception):
"""Error caused by a missing dependency."""
pass
class IrmaMachineManagerError(Exception):
"""Error on a machine manager."""
pass
class IrmaMachineError(Exception):
"""Error on a machine."""
pass
class IrmaAdminError(Exception):
"""Error in admin part."""
pass
class IrmaDatabaseError(Exception):
"""Error on a database manager."""
pass
class IrmaCoreError(Exception):
"""Error in core parts (Db, Ftp, Celery..)"""
pass
class IrmaDatabaseResultNotFound(IrmaDatabaseError):
"""A database result was required but none was found."""
pass
class IrmaFileSystemError(IrmaDatabaseError):
"""Nothing corresponding to the request has been found in the database."""
pass
class IrmaConfigurationError(IrmaCoreError):
"""Error wrong configuration."""
pass
class IrmaFtpError(IrmaCoreError):
"""Error on ftp manager."""
pass
class IrmaFTPSError(IrmaFtpError):
"""Error on ftp/tls manager."""
pass
class IrmaSFTPError(IrmaFtpError):
"""Error on sftp manager."""
pass
class IrmaTaskError(IrmaCoreError):
"""Error while processing celery tasks."""
pass
class IrmaLockError(Exception):
"""Error for the locks on db content (already taken)"""
pass
class IrmaLockModeError(Exception):
"""Error for the mode of the locks (doesn't exist)"""
pass
class IrmaValueError(Exception):
"""Error for the parameters passed to the functions"""
pass
| [] |
sunmengnan/city_brain | tf_crnn/libs/infer.py | 478f0b974f4491b4201956f37b83ce6860712bc8 | import time
import os
import math
import numpy as np
from libs import utils
from libs.img_dataset import ImgDataset
from nets.crnn import CRNN
from nets.cnn.paper_cnn import PaperCNN
import shutil
def calculate_accuracy(predicts, labels):
"""
:param predicts: encoded predict result
:param labels: ground true label
:return: accuracy
"""
assert len(predicts) == len(labels)
correct_count = 0
for i, p_label in enumerate(predicts):
if p_label == labels[i]:
correct_count += 1
acc = correct_count / len(predicts)
return acc, correct_count
def calculate_edit_distance_mean(edit_distences):
"""
排除了 edit_distance == 0 的值计算编辑距离的均值
:param edit_distences:
:return:
"""
data = np.array(edit_distences)
data = data[data != 0]
if len(data) == 0:
return 0
return np.mean(data)
def validation(sess, feeds, fetches, dataset, converter, result_dir, name,
step=None, print_batch_info=False, copy_failed=False):
"""
Save file name: {acc}_{step}.txt
:param sess: tensorflow session
:param model: crnn network
:param result_dir:
:param name: val, test, infer. used to create sub dir in result_dir
:return:
"""
sess.run(dataset.init_op)
img_paths = []
predicts = []
trimed_predicts = []
labels = []
trimed_labels = []
edit_distances = []
total_batch_time = 0
for batch in range(dataset.num_batches):
img_batch, widths, label_batch, batch_labels, batch_img_paths = dataset.get_next_batch(sess)
if len(batch_labels) == 0:
continue
batch_start_time = time.time()
feed = {feeds['inputs']: img_batch,
feeds['labels']: label_batch,
feeds['sequence_length']: PaperCNN.get_sequence_lengths(widths),
feeds['is_training']: False}
try:
batch_predicts, edit_distance, batch_edit_distances = sess.run(fetches, feed)
except Exception:
print(batch_labels)
continue
batch_predicts = [converter.decode(p, CRNN.CTC_INVALID_INDEX) for p in batch_predicts]
trimed_batch_predicts = [utils.remove_all_symbols(txt) for txt in batch_predicts]
trimed_batch_labels = [utils.remove_all_symbols(txt) for txt in batch_labels]
img_paths.extend(batch_img_paths)
predicts.extend(batch_predicts)
labels.extend(batch_labels)
trimed_predicts.extend(trimed_batch_predicts)
trimed_labels.extend(trimed_batch_labels)
edit_distances.extend(batch_edit_distances)
acc, correct_count = calculate_accuracy(batch_predicts, batch_labels)
trimed_acc, trimed_correct_count = calculate_accuracy(trimed_batch_predicts, trimed_batch_labels)
batch_time = time.time() - batch_start_time
total_batch_time += batch_time
if print_batch_info:
print("{:.03f}s [{}/{}] acc: {:.03f}({}/{}), edit_distance: {:.03f}, trim_acc {:.03f}({}/{})"
.format(batch_time, batch, dataset.num_batches,
acc, correct_count, dataset.batch_size,
edit_distance,
trimed_acc, trimed_correct_count, dataset.batch_size))
acc, correct_count = calculate_accuracy(predicts, labels)
trimed_acc, trimed_correct_count = calculate_accuracy(trimed_predicts, trimed_labels)
edit_distance_mean = calculate_edit_distance_mean(edit_distances)
total_edit_distance = sum(edit_distances)
acc_str = "Accuracy: {:.03f} ({}/{}), Trimed Accuracy: {:.03f} ({}/{})" \
"Total edit distance: {:.03f}, " \
"Average edit distance: {:.03f}, Average batch time: {:.03f}" \
.format(acc, correct_count, dataset.size,
trimed_acc, trimed_correct_count, dataset.size,
total_edit_distance, edit_distance_mean, total_batch_time / dataset.num_batches)
print(acc_str)
save_dir = os.path.join(result_dir, name)
utils.check_dir_exist(save_dir)
result_file_path = save_txt_result(save_dir, acc, step, labels, predicts, 'acc',
edit_distances, acc_str)
save_txt_result(save_dir, acc, step, labels, predicts, 'acc', edit_distances,
acc_str, only_failed=True)
save_txt_result(save_dir, trimed_acc, step, trimed_labels, trimed_predicts, 'tacc',
edit_distances)
save_txt_result(save_dir, trimed_acc, step, trimed_labels, trimed_predicts, 'tacc',
edit_distances, only_failed=True)
save_txt_4_analyze(save_dir, labels, predicts, 'acc', step)
save_txt_4_analyze(save_dir, trimed_labels, trimed_predicts, 'tacc', step)
# Copy image not all match to a dir
# TODO: we will only save failed imgs for acc
if copy_failed:
failed_infer_img_dir = result_file_path[:-4] + "_failed"
if os.path.exists(failed_infer_img_dir) and os.path.isdir(failed_infer_img_dir):
shutil.rmtree(failed_infer_img_dir)
utils.check_dir_exist(failed_infer_img_dir)
failed_image_indices = []
for i, val in enumerate(edit_distances):
if val != 0:
failed_image_indices.append(i)
for i in failed_image_indices:
img_path = img_paths[i]
img_name = img_path.split("/")[-1]
dst_path = os.path.join(failed_infer_img_dir, img_name)
shutil.copyfile(img_path, dst_path)
failed_infer_result_file_path = os.path.join(failed_infer_img_dir, "result.txt")
with open(failed_infer_result_file_path, 'w', encoding='utf-8') as f:
for i in failed_image_indices:
p_label = predicts[i]
t_label = labels[i]
f.write("{}\n".format(img_paths[i]))
f.write("input: {:17s} length: {}\n".format(t_label, len(t_label)))
f.write("predict: {:17s} length: {}\n".format(p_label, len(p_label)))
f.write("edit distance: {}\n".format(edit_distances[i]))
f.write('-' * 30 + '\n')
return acc, trimed_acc, edit_distance_mean, total_edit_distance, correct_count, trimed_correct_count
def save_txt_4_analyze(save_dir, labels, predicts, acc_type, step):
"""
把测试集的真值和预测结果放在保存在同一个 txt 文件中,方便统计
"""
txt_path = os.path.join(save_dir, '%d_%s_gt_and_pred.txt' % (step, acc_type))
with open(txt_path, 'w', encoding='utf-8') as f:
for i, p_label in enumerate(predicts):
t_label = labels[i]
f.write("{}__$__{}\n".format(t_label, p_label))
def save_txt_result(save_dir, acc, step, labels, predicts, acc_type,
edit_distances=None, acc_str=None, only_failed=False):
"""
:param acc_type: 'acc' or 'tacc'
:return:
"""
failed_suffix = ''
if only_failed:
failed_suffix = 'failed'
if step is not None:
txt_path = os.path.join(save_dir, '%d_%s_%.3f_%s.txt' % (step, acc_type, acc, failed_suffix))
else:
txt_path = os.path.join(save_dir, '%s_%.3f_%s.txt' % (acc_type, acc, failed_suffix))
print("Write result to %s" % txt_path)
with open(txt_path, 'w', encoding='utf-8') as f:
for i, p_label in enumerate(predicts):
t_label = labels[i]
all_match = (t_label == p_label)
if only_failed and all_match:
continue
# f.write("{}\n".format(img_paths[i]))
f.write("input: {:17s} length: {}\n".format(t_label, len(t_label)))
f.write("predict: {:17s} length: {}\n".format(p_label, len(p_label)))
f.write("all match: {}\n".format(1 if all_match else 0))
if edit_distances:
f.write("edit distance: {}\n".format(edit_distances[i]))
f.write('-' * 30 + '\n')
if acc_str:
f.write(acc_str + "\n")
return txt_path
| [((37, 11, 37, 35), 'numpy.array', 'np.array', ({(37, 20, 37, 34): 'edit_distences'}, {}), '(edit_distences)', True, 'import numpy as np\n'), ((41, 11, 41, 24), 'numpy.mean', 'np.mean', ({(41, 19, 41, 23): 'data'}, {}), '(data)', True, 'import numpy as np\n'), ((119, 15, 119, 45), 'os.path.join', 'os.path.join', ({(119, 28, 119, 38): 'result_dir', (119, 40, 119, 44): 'name'}, {}), '(result_dir, name)', False, 'import os\n'), ((120, 4, 120, 35), 'libs.utils.check_dir_exist', 'utils.check_dir_exist', ({(120, 26, 120, 34): 'save_dir'}, {}), '(save_dir)', False, 'from libs import utils\n'), ((175, 15, 175, 81), 'os.path.join', 'os.path.join', ({(175, 28, 175, 36): 'save_dir', (175, 38, 175, 80): "'%d_%s_gt_and_pred.txt' % (step, acc_type)"}, {}), "(save_dir, '%d_%s_gt_and_pred.txt' % (step, acc_type))", False, 'import os\n'), ((69, 27, 69, 38), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((144, 8, 144, 51), 'libs.utils.check_dir_exist', 'utils.check_dir_exist', ({(144, 30, 144, 50): 'failed_infer_img_dir'}, {}), '(failed_infer_img_dir)', False, 'from libs import utils\n'), ((157, 40, 157, 88), 'os.path.join', 'os.path.join', ({(157, 53, 157, 73): 'failed_infer_img_dir', (157, 75, 157, 87): '"""result.txt"""'}, {}), "(failed_infer_img_dir, 'result.txt')", False, 'import os\n'), ((194, 19, 194, 101), 'os.path.join', 'os.path.join', ({(194, 32, 194, 40): 'save_dir', (194, 42, 194, 100): "'%d_%s_%.3f_%s.txt' % (step, acc_type, acc, failed_suffix)"}, {}), "(save_dir, '%d_%s_%.3f_%s.txt' % (step, acc_type, acc,\n failed_suffix))", False, 'import os\n'), ((196, 19, 196, 92), 'os.path.join', 'os.path.join', ({(196, 32, 196, 40): 'save_dir', (196, 42, 196, 91): "'%s_%.3f_%s.txt' % (acc_type, acc, failed_suffix)"}, {}), "(save_dir, '%s_%.3f_%s.txt' % (acc_type, acc, failed_suffix))", False, 'import os\n'), ((73, 42, 73, 79), 'nets.cnn.paper_cnn.PaperCNN.get_sequence_lengths', 'PaperCNN.get_sequence_lengths', ({(73, 72, 73, 78): 'widths'}, {}), '(widths)', False, 'from nets.cnn.paper_cnn import PaperCNN\n'), ((83, 33, 83, 62), 'libs.utils.remove_all_symbols', 'utils.remove_all_symbols', ({(83, 58, 83, 61): 'txt'}, {}), '(txt)', False, 'from libs import utils\n'), ((84, 31, 84, 60), 'libs.utils.remove_all_symbols', 'utils.remove_all_symbols', ({(84, 56, 84, 59): 'txt'}, {}), '(txt)', False, 'from libs import utils\n'), ((96, 21, 96, 32), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((141, 11, 141, 47), 'os.path.exists', 'os.path.exists', ({(141, 26, 141, 46): 'failed_infer_img_dir'}, {}), '(failed_infer_img_dir)', False, 'import os\n'), ((141, 52, 141, 87), 'os.path.isdir', 'os.path.isdir', ({(141, 66, 141, 86): 'failed_infer_img_dir'}, {}), '(failed_infer_img_dir)', False, 'import os\n'), ((142, 12, 142, 47), 'shutil.rmtree', 'shutil.rmtree', ({(142, 26, 142, 46): 'failed_infer_img_dir'}, {}), '(failed_infer_img_dir)', False, 'import shutil\n'), ((154, 23, 154, 67), 'os.path.join', 'os.path.join', ({(154, 36, 154, 56): 'failed_infer_img_dir', (154, 58, 154, 66): 'img_name'}, {}), '(failed_infer_img_dir, img_name)', False, 'import os\n'), ((155, 12, 155, 47), 'shutil.copyfile', 'shutil.copyfile', ({(155, 28, 155, 36): 'img_path', (155, 38, 155, 46): 'dst_path'}, {}), '(img_path, dst_path)', False, 'import shutil\n')] |
giTan7/30-Days-Of-Code | Day 2/Day_2_Python.py | f023a2bf1b5e58e1eb5180162443b9cd4b6b2ff8 | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the solve function below.
def solve(meal_cost, tip_percent, tax_percent):
tip = (meal_cost * tip_percent)/100
tax = (meal_cost * tax_percent)/100
print(int(meal_cost + tip + tax + 0.5))
# We add 0.5 because the float should be rounded to the nearest integer
if __name__ == '__main__':
meal_cost = float(input())
tip_percent = int(input())
tax_percent = int(input())
solve(meal_cost, tip_percent, tax_percent)
# Time complexity: O(1)
# Space complexity: O(1)
| [] |
nursix/rlpptm | modules/templates/RLPPTM/tools/mis.py | e7b50b2fdf6277aed5f198ca10ad773c5ca0b947 | # -*- coding: utf-8 -*-
#
# Helper Script for Mass-Invitation of Participant Organisations
#
# RLPPTM Template Version 1.0
#
# Execute in web2py folder after code upgrade like:
# python web2py.py -S eden -M -R applications/eden/modules/templates/RLPPTM/tools/mis.py
#
import os
import sys
from core import s3_format_datetime
from templates.RLPPTM.config import SCHOOLS
from templates.RLPPTM.helpers import InviteUserOrg
# Batch limit (set to False to disable)
BATCH_LIMIT = 250
# Override auth (disables all permission checks)
auth.override = True
# Failed-flag
failed = False
# Info
log = None
def info(msg):
sys.stderr.write("%s" % msg)
if log:
log.write("%s" % msg)
def infoln(msg):
sys.stderr.write("%s\n" % msg)
if log:
log.write("%s\n" % msg)
# Load models for tables
otable = s3db.org_organisation
gtable = s3db.org_group
mtable = s3db.org_group_membership
utable = s3db.auth_user
oltable = s3db.org_organisation_user
pltable = s3db.pr_person_user
ctable = s3db.pr_contact
timestmp = s3_format_datetime(dtfmt="%Y%m%d%H%M%S")
LOGFILE = os.path.join(request.folder, "private", "mis_%s.log" % timestmp)
# -----------------------------------------------------------------------------
# Invite organisations
#
if not failed:
try:
with open(LOGFILE, "w", encoding="utf-8") as logfile:
log = logfile
join = [mtable.on((mtable.organisation_id == otable.id) & \
(mtable.deleted == False)),
gtable.on((gtable.id == mtable.group_id) & \
(gtable.name == SCHOOLS) & \
(gtable.deleted == False)),
]
query = (otable.deleted == False)
organisations = db(query).select(otable.id,
otable.pe_id,
otable.name,
join = join,
orderby = otable.id,
)
total = len(organisations)
infoln("Total: %s Organisations" % total)
infoln("")
skipped = sent = failures = 0
invite_org = InviteUserOrg.invite_account
for organisation in organisations:
info("%s..." % organisation.name)
# Get all accounts that are linked to this org
organisation_id = organisation.id
join = oltable.on((oltable.user_id == utable.id) & \
(oltable.deleted == False))
left = pltable.on((pltable.user_id == utable.id) & \
(pltable.deleted == False))
query = (oltable.organisation_id == organisation_id)
rows = db(query).select(utable.id,
utable.email,
utable.registration_key,
pltable.pe_id,
join = join,
left = left,
)
if rows:
# There are already accounts linked to this organisation
invited, registered = [], []
for row in rows:
username = row.auth_user.email
if row.pr_person_user.pe_id:
registered.append(username)
else:
invited.append(username)
if registered:
infoln("already registered (%s)." % ", ".join(registered))
else:
infoln("already invited (%s)." % ", ".join(invited))
skipped += 1
continue
# Find email address
query = (ctable.pe_id == organisation.pe_id) & \
(ctable.contact_method == "EMAIL") & \
(ctable.deleted == False)
contact = db(query).select(ctable.value,
orderby = ctable.priority,
limitby = (0, 1),
).first()
if contact:
email = contact.value
info("(%s)..." % email)
else:
infoln("no email address.")
skipped += 1
continue
error = invite_org(organisation, email, account=None)
if not error:
sent += 1
infoln("invited.")
db.commit()
else:
failures += 1
infoln("invitation failed (%s)." % error)
if BATCH_LIMIT and sent >= BATCH_LIMIT:
infoln("Batch limit (%s) reached" % BATCH_LIMIT)
skipped = total - (sent + failures)
break
infoln("")
infoln("%s invitations sent" % sent)
infoln("%s invitations failed" % failures)
infoln("%s organisations skipped" % skipped)
log = None
except IOError:
infoln("...failed (could not create logfile)")
failed = True
# -----------------------------------------------------------------------------
# Finishing up
#
if failed:
db.rollback()
infoln("PROCESS FAILED - Action rolled back.")
else:
db.commit()
infoln("PROCESS SUCCESSFUL.")
| [((47, 11, 47, 51), 'core.s3_format_datetime', 's3_format_datetime', (), '', False, 'from core import s3_format_datetime\n'), ((48, 10, 48, 74), 'os.path.join', 'os.path.join', ({(48, 23, 48, 37): 'request.folder', (48, 39, 48, 48): '"""private"""', (48, 50, 48, 73): "'mis_%s.log' % timestmp"}, {}), "(request.folder, 'private', 'mis_%s.log' % timestmp)", False, 'import os\n'), ((30, 4, 30, 32), 'sys.stderr.write', 'sys.stderr.write', ({(30, 21, 30, 31): "('%s' % msg)"}, {}), "('%s' % msg)", False, 'import sys\n'), ((34, 4, 34, 34), 'sys.stderr.write', 'sys.stderr.write', ({(34, 21, 34, 33): "('%s\\n' % msg)"}, {}), "('%s\\n' % msg)", False, 'import sys\n')] |
harshp8l/deep-learning-lang-detection | data/train/python/22aec8fbe47f7975a1e7f4a0caa5c88c56e4a03e__init__.py | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | def save_form(form, actor=None):
"""Allows storing a form with a passed actor. Normally, Form.save() does not accept an actor, but if you require
this to be passed (is not handled by middleware), you can use this to replace form.save().
Requires you to use the audit.Model model as the actor is passed to the object's save method.
"""
obj = form.save(commit=False)
obj.save(actor=actor)
form.save_m2m()
return obj
#def intermediate_save(instance, actor=None):
# """Allows saving of an instance, without storing the changes, but keeping the history. This allows you to perform
# intermediate saves:
#
# obj.value1 = 1
# intermediate_save(obj)
# obj.value2 = 2
# obj.save()
# <value 1 and value 2 are both stored in the database>
# """
# if hasattr(instance, '_audit_changes'):
# tmp = instance._audit_changes
# if actor:
# instance.save(actor=actor)
# else:
# instance.save()
# instance._audit_changes = tmp
# else:
# if actor:
# instance.save(actor=actor)
# else:
# instance.save()
| [] |
kingsd041/os-tests | engine/test_sysctl.py | 2ea57cb6f1da534633a4670ccb83d40300989886 | # coding = utf-8
# Create date: 2018-11-05
# Author :Hailong
def test_sysctl(ros_kvm_with_paramiko, cloud_config_url):
command = 'sudo cat /proc/sys/kernel/domainname'
feed_back = 'test'
client = ros_kvm_with_paramiko(cloud_config='{url}/test_sysctl.yml'.format(url=cloud_config_url))
stdin, stdout, stderr = client.exec_command(command, timeout=10)
output = stdout.read().decode('utf-8').replace('\n', '')
assert (feed_back == output)
command_b = 'sudo cat /proc/sys/dev/cdrom/debug'
feed_back_b = '1'
stdin, stdout, stderr = client.exec_command(command_b, timeout=10)
output_b = stdout.read().decode('utf-8').replace('\n', '')
client.close()
assert (feed_back_b == output_b)
| [] |
grschafer/paprika-sync | paprika_sync/core/management/commands/import_recipes_from_file.py | 8b6fcd6246557bb79009fa9355fd4d588fb8ed90 | import json
import logging
from django.core.management.base import BaseCommand
from django.db import transaction
from paprika_sync.core.models import PaprikaAccount
from paprika_sync.core.serializers import RecipeSerializer, CategorySerializer
from paprika_sync.core.utils import log_start_end
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Import all recipes from file to specified PaprikaAccount'
def add_arguments(self, parser):
parser.add_argument(
'file',
help='Path to json file containing list of all recipes',
)
parser.add_argument(
'--categories-file',
help='Path to json file containing list of all categories',
)
parser.add_argument(
'paprika_account_id',
type=int,
help='ID of PaprikaAccount to import recipes to',
)
parser.add_argument(
'-r', '--remove',
action='store_true',
help="Removes all of account's existing recipes before importing",
)
@log_start_end(logger)
def handle(self, *args, **options):
recipes_file = options['file']
categories_file = options['categories_file']
pa_id = options['paprika_account_id']
wipe_account = options['remove']
logger.info('Starting import for PaprikaAccount id %s from %s, wipe_account=%s', pa_id, recipes_file, wipe_account)
pa = PaprikaAccount.objects.get(id=pa_id)
with open(recipes_file, 'rt') as fin:
recipes = json.load(fin)
logger.info('Found %s recipes to import to %s', len(recipes), pa)
categories = []
if categories_file:
with open(categories_file, 'rt') as fin:
categories = json.load(fin)
logger.info('Found %s categories to import to %s', len(categories), pa)
with transaction.atomic():
if wipe_account:
pa.recipes.all().delete()
pa.categories.all().delete()
for category in categories:
category['paprika_account'] = pa.id
cs = CategorySerializer(data=category)
if cs.is_valid():
cs.save()
else:
logger.warning('Failed to import category %s (%s) due to errors: %s', category['uid'], category['name'], cs.errors)
for recipe in recipes:
# Remove categories if we're not bothering to import them
if not categories:
recipe['categories'] = []
recipe['paprika_account'] = pa.id
rs = RecipeSerializer(data=recipe)
if rs.is_valid():
rs.save()
else:
logger.warning('Failed to import recipe %s (%s) due to errors: %s', recipe['uid'], recipe['name'], rs.errors)
# recipe_field_names = set([f.name for f in Recipe._meta.fields])
# Recipe.objects.create(
# paprika_account=pa,
# **{k: v for k, v in recipe.items() if k in recipe_field_names},
# )
logger.info('Finished recipe import successfully')
# transaction.set_rollback(True)
| [((12, 9, 12, 36), 'logging.getLogger', 'logging.getLogger', ({(12, 27, 12, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((38, 5, 38, 26), 'paprika_sync.core.utils.log_start_end', 'log_start_end', ({(38, 19, 38, 25): 'logger'}, {}), '(logger)', False, 'from paprika_sync.core.utils import log_start_end\n'), ((47, 13, 47, 49), 'paprika_sync.core.models.PaprikaAccount.objects.get', 'PaprikaAccount.objects.get', (), '', False, 'from paprika_sync.core.models import PaprikaAccount\n'), ((49, 22, 49, 36), 'json.load', 'json.load', ({(49, 32, 49, 35): 'fin'}, {}), '(fin)', False, 'import json\n'), ((59, 13, 59, 33), 'django.db.transaction.atomic', 'transaction.atomic', ({}, {}), '()', False, 'from django.db import transaction\n'), ((56, 29, 56, 43), 'json.load', 'json.load', ({(56, 39, 56, 42): 'fin'}, {}), '(fin)', False, 'import json\n'), ((66, 21, 66, 54), 'paprika_sync.core.serializers.CategorySerializer', 'CategorySerializer', (), '', False, 'from paprika_sync.core.serializers import RecipeSerializer, CategorySerializer\n'), ((78, 21, 78, 50), 'paprika_sync.core.serializers.RecipeSerializer', 'RecipeSerializer', (), '', False, 'from paprika_sync.core.serializers import RecipeSerializer, CategorySerializer\n')] |
sot/mica | scripts/update_asp_l1.py | 136a9b0d9521efda5208067b51cf0c8700b4def3 | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import mica.archive.asp_l1
mica.archive.asp_l1.main()
| [] |
hhgarnes/python-validity | pair.py | 82b42e4fd152f10f75584de56502fd9ada299bb5 |
from time import sleep
from proto9x.usb import usb
from proto9x.tls import tls
from proto9x.flash import read_flash
from proto9x.init_flash import init_flash
from proto9x.upload_fwext import upload_fwext
from proto9x.calibrate import calibrate
from proto9x.init_db import init_db
#usb.trace_enabled=True
#tls.trace_enabled=True
def restart():
print('Sleeping...')
sleep(3)
tls.reset()
usb.open()
usb.send_init()
tls.parseTlsFlash(read_flash(1, 0, 0x1000))
tls.open()
usb.open()
print('Initializing flash...')
init_flash()
restart()
print('Uploading firmware...')
upload_fwext()
restart()
print('Calibrating...')
calibrate()
print('Init database...')
init_db()
print('That\'s it, pairing\'s finished')
| [((24, 0, 24, 10), 'proto9x.usb.usb.open', 'usb.open', ({}, {}), '()', False, 'from proto9x.usb import usb\n'), ((26, 0, 26, 12), 'proto9x.init_flash.init_flash', 'init_flash', ({}, {}), '()', False, 'from proto9x.init_flash import init_flash\n'), ((30, 0, 30, 14), 'proto9x.upload_fwext.upload_fwext', 'upload_fwext', ({}, {}), '()', False, 'from proto9x.upload_fwext import upload_fwext\n'), ((34, 0, 34, 11), 'proto9x.calibrate.calibrate', 'calibrate', ({}, {}), '()', False, 'from proto9x.calibrate import calibrate\n'), ((37, 0, 37, 9), 'proto9x.init_db.init_db', 'init_db', ({}, {}), '()', False, 'from proto9x.init_db import init_db\n'), ((17, 4, 17, 12), 'time.sleep', 'sleep', ({(17, 10, 17, 11): '(3)'}, {}), '(3)', False, 'from time import sleep\n'), ((18, 4, 18, 15), 'proto9x.tls.tls.reset', 'tls.reset', ({}, {}), '()', False, 'from proto9x.tls import tls\n'), ((19, 4, 19, 14), 'proto9x.usb.usb.open', 'usb.open', ({}, {}), '()', False, 'from proto9x.usb import usb\n'), ((20, 4, 20, 19), 'proto9x.usb.usb.send_init', 'usb.send_init', ({}, {}), '()', False, 'from proto9x.usb import usb\n'), ((22, 4, 22, 14), 'proto9x.tls.tls.open', 'tls.open', ({}, {}), '()', False, 'from proto9x.tls import tls\n'), ((21, 22, 21, 46), 'proto9x.flash.read_flash', 'read_flash', ({(21, 33, 21, 34): '(1)', (21, 36, 21, 37): '(0)', (21, 39, 21, 45): '(4096)'}, {}), '(1, 0, 4096)', False, 'from proto9x.flash import read_flash\n')] |
tefra/xsdata-w3c-tests | output/models/ms_data/element/elem_q017_xsd/elem_q017.py | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | from dataclasses import dataclass, field
@dataclass
class FooTest:
class Meta:
name = "fooTest"
value: str = field(
init=False,
default="Hello"
)
@dataclass
class Root:
class Meta:
name = "root"
foo_test: str = field(
init=False,
default="Hello",
metadata={
"name": "fooTest",
"type": "Element",
"required": True,
}
)
| [((9, 17, 12, 5), 'dataclasses.field', 'field', (), '', False, 'from dataclasses import dataclass, field\n'), ((20, 20, 28, 5), 'dataclasses.field', 'field', (), '', False, 'from dataclasses import dataclass, field\n')] |
modelhub-ai/mic-dkfz-brats | contrib_src/predict.py | 4522a26442f1e323f97aa45fbd5047bfe9029b2b | import json
import os
from collections import OrderedDict
from copy import deepcopy
import SimpleITK as sitk
from batchgenerators.augmentations.utils import resize_segmentation # resize_softmax_output
from skimage.transform import resize
from torch.optim import lr_scheduler
from torch import nn
import numpy as np
import torch
from scipy.ndimage import binary_fill_holes
'''
This code is not intended to be looked at by anyone. It is messy. It is undocumented.
And the entire training pipeline is missing.
'''
max_num_filters_3d = 320
max_num_filters_2d = 480
join = os.path.join
def load_json(file):
with open(file, 'r') as f:
a = json.load(f)
return a
def resize_image(image, old_spacing, new_spacing, order=3, cval=0):
new_shape = (int(np.round(old_spacing[0]/new_spacing[0]*float(image.shape[0]))),
int(np.round(old_spacing[1]/new_spacing[1]*float(image.shape[1]))),
int(np.round(old_spacing[2]/new_spacing[2]*float(image.shape[2]))))
if any([i != j for i, j in zip(image.shape, new_shape)]):
res = resize(image, new_shape, order=order, mode='edge', cval=cval)
else:
res = image
return res
class ConvDropoutNormNonlin(nn.Module):
def __init__(self, input_channels, output_channels,
conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None):
super(ConvDropoutNormNonlin, self).__init__()
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
self.conv = self.conv_op(input_channels, output_channels, **self.conv_kwargs)
if self.dropout_op is not None and self.dropout_op_kwargs['p'] is not None and self.dropout_op_kwargs[
'p'] > 0:
self.dropout = self.dropout_op(**self.dropout_op_kwargs)
else:
self.dropout = None
self.instnorm = self.norm_op(output_channels, **self.norm_op_kwargs)
self.lrelu = nn.LeakyReLU(**self.nonlin_kwargs)
def forward(self, x):
x = self.conv(x)
if self.dropout is not None:
x = self.dropout(x)
return self.lrelu(self.instnorm(x))
def pad_nd_image(image, new_shape=None, mode="edge", kwargs=None, return_slicer=False, shape_must_be_divisible_by=None):
if kwargs is None:
kwargs = {}
if new_shape is not None:
old_shape = np.array(image.shape[-len(new_shape):])
else:
assert shape_must_be_divisible_by is not None
assert isinstance(shape_must_be_divisible_by, (list, tuple, np.ndarray))
new_shape = image.shape[-len(shape_must_be_divisible_by):]
old_shape = new_shape
num_axes_nopad = len(image.shape) - len(new_shape)
new_shape = [max(new_shape[i], old_shape[i]) for i in range(len(new_shape))]
if not isinstance(new_shape, np.ndarray):
new_shape = np.array(new_shape)
if shape_must_be_divisible_by is not None:
if not isinstance(shape_must_be_divisible_by, (list, tuple, np.ndarray)):
shape_must_be_divisible_by = [shape_must_be_divisible_by] * len(new_shape)
else:
assert len(shape_must_be_divisible_by) == len(new_shape)
for i in range(len(new_shape)):
if new_shape[i] % shape_must_be_divisible_by[i] == 0:
new_shape[i] -= shape_must_be_divisible_by[i]
new_shape = np.array([new_shape[i] + shape_must_be_divisible_by[i] - new_shape[i] % shape_must_be_divisible_by[i] for i in range(len(new_shape))])
difference = new_shape - old_shape
pad_below = difference // 2
pad_above = difference // 2 + difference % 2
pad_list = [[0, 0]]*num_axes_nopad + list([list(i) for i in zip(pad_below, pad_above)])
res = np.pad(image, pad_list, mode, **kwargs)
if not return_slicer:
return res
else:
pad_list = np.array(pad_list)
pad_list[:, 1] = np.array(res.shape) - pad_list[:, 1]
slicer = list(slice(*i) for i in pad_list)
return res, slicer
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
def get_device(self):
if next(self.parameters()).device == "cpu":
return "cpu"
else:
return next(self.parameters()).device.index
def set_device(self, device):
if device == "cpu":
self.cpu()
else:
self.cuda(device)
def forward(self, x):
raise NotImplementedError
class SegmentationNetwork(NeuralNetwork):
def __init__(self):
self.input_shape_must_be_divisible_by = None
self.conv_op = None
super(NeuralNetwork, self).__init__()
self.inference_apply_nonlin = lambda x:x
def predict_3D(self, x, do_mirroring, num_repeats=1, use_train_mode=False, batch_size=1, mirror_axes=(2, 3, 4),
tiled=False, tile_in_z=True, step=2, patch_size=None, regions_class_order=None, use_gaussian=False,
pad_border_mode="edge", pad_kwargs=None):
"""
:param x: (c, x, y , z)
:param do_mirroring:
:param num_repeats:
:param use_train_mode:
:param batch_size:
:param mirror_axes:
:param tiled:
:param tile_in_z:
:param step:
:param patch_size:
:param regions_class_order:
:param use_gaussian:
:return:
"""
current_mode = self.training
if use_train_mode is not None and use_train_mode:
self.train()
elif use_train_mode is not None and not use_train_mode:
self.eval()
else:
pass
assert len(x.shape) == 4, "data must have shape (c,x,y,z)"
if self.conv_op == nn.Conv3d:
if tiled:
res = self._internal_predict_3D_3Dconv_tiled(x, num_repeats, batch_size, tile_in_z, step, do_mirroring,
mirror_axes, patch_size, regions_class_order, use_gaussian,
pad_border_mode, pad_kwargs=pad_kwargs)
else:
res = self._internal_predict_3D_3Dconv(x, do_mirroring, num_repeats, patch_size, batch_size,
mirror_axes, regions_class_order, pad_border_mode, pad_kwargs=pad_kwargs)
elif self.conv_op == nn.Conv2d:
if tiled:
res = self._internal_predict_3D_2Dconv_tiled(x, do_mirroring, num_repeats, batch_size, mirror_axes,
step, patch_size, regions_class_order, use_gaussian,
pad_border_mode, pad_kwargs=pad_kwargs)
else:
res = self._internal_predict_3D_2Dconv(x, do_mirroring, num_repeats, patch_size, batch_size,
mirror_axes, regions_class_order, pad_border_mode, pad_kwargs=pad_kwargs)
else:
raise RuntimeError("Invalid conv op, cannot determine what dimensionality (2d/3d) the network is")
if use_train_mode is not None:
self.train(current_mode)
return res
def _internal_maybe_mirror_and_pred_3D(self, x, num_repeats, mirror_axes, do_mirroring=True):
with torch.no_grad():
a = torch.zeros(x.shape).float()
if self.get_device() == "cpu":
a = a.cpu()
else:
a = a.cuda(self.get_device())
if do_mirroring:
mirror_idx = 8
else:
mirror_idx = 1
all_preds = []
for i in range(num_repeats):
for m in range(mirror_idx):
data_for_net = np.array(x)
do_stuff = False
if m == 0:
do_stuff = True
pass
if m == 1 and (4 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, :, :, ::-1]
if m == 2 and (3 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, :, ::-1, :]
if m == 3 and (4 in mirror_axes) and (3 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, :, ::-1, ::-1]
if m == 4 and (2 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, ::-1, :, :]
if m == 5 and (2 in mirror_axes) and (4 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, ::-1, :, ::-1]
if m == 6 and (2 in mirror_axes) and (3 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, ::-1, ::-1, :]
if m == 7 and (2 in mirror_axes) and (3 in mirror_axes) and (4 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, ::-1, ::-1, ::-1]
if do_stuff:
_ = a.data.copy_(torch.from_numpy(np.copy(data_for_net)))
p = self.inference_apply_nonlin(self(a))
p = p.data.cpu().numpy()
if m == 0:
pass
if m == 1 and (4 in mirror_axes):
p = p[:, :, :, :, ::-1]
if m == 2 and (3 in mirror_axes):
p = p[:, :, :, ::-1, :]
if m == 3 and (4 in mirror_axes) and (3 in mirror_axes):
p = p[:, :, :, ::-1, ::-1]
if m == 4 and (2 in mirror_axes):
p = p[:, :, ::-1, :, :]
if m == 5 and (2 in mirror_axes) and (4 in mirror_axes):
p = p[:, :, ::-1, :, ::-1]
if m == 6 and (2 in mirror_axes) and (3 in mirror_axes):
p = p[:, :, ::-1, ::-1, :]
if m == 7 and (2 in mirror_axes) and (3 in mirror_axes) and (4 in mirror_axes):
p = p[:, :, ::-1, ::-1, ::-1]
all_preds.append(p)
return np.vstack(all_preds)
def _internal_predict_3D_3Dconv(self, x, do_mirroring, num_repeats, min_size=None, BATCH_SIZE=None,
mirror_axes=(2, 3, 4), regions_class_order=None, pad_border_mode="edge",
pad_kwargs=None):
with torch.no_grad():
x, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True, self.input_shape_must_be_divisible_by)
#x, old_shape = pad_patient_3D_incl_c(x, self.input_shape_must_be_divisible_by, min_size)
new_shp = x.shape
data = np.zeros(tuple([1] + list(new_shp)), dtype=np.float32)
data[0] = x
if BATCH_SIZE is not None:
data = np.vstack([data] * BATCH_SIZE)
stacked = self._internal_maybe_mirror_and_pred_3D(data, num_repeats, mirror_axes, do_mirroring)
slicer = [slice(0, stacked.shape[i]) for i in range(len(stacked.shape) - (len(slicer) - 1))] + slicer[1:]
stacked = stacked[slicer]
uncertainty = stacked.var(0)
bayesian_predictions = stacked
softmax_pred = stacked.mean(0)
if regions_class_order is None:
predicted_segmentation = softmax_pred.argmax(0)
else:
predicted_segmentation_shp = softmax_pred[0].shape
predicted_segmentation = np.zeros(predicted_segmentation_shp)
for i, c in enumerate(regions_class_order):
predicted_segmentation[softmax_pred[i] > 0.5] = c
return predicted_segmentation, bayesian_predictions, softmax_pred, uncertainty
def softmax_helper(x):
rpt = [1 for _ in range(len(x.size()))]
rpt[1] = x.size(1)
x_max = x.max(1, keepdim=True)[0].repeat(*rpt)
e_x = torch.exp(x - x_max)
return e_x / e_x.sum(1, keepdim=True).repeat(*rpt)
class InitWeights_He(object):
def __init__(self, neg_slope=1e-2):
self.neg_slope = neg_slope
def __call__(self, module):
if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d):
module.weight = nn.init.kaiming_normal_(module.weight, a=1e-2)
if module.bias is not None:
module.bias = nn.init.constant_(module.bias, 0)
class StackedConvLayers(nn.Module):
def __init__(self, input_feature_channels, output_feature_channels, num_convs,
conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None, first_stride=None):
self.input_channels = input_feature_channels
self.output_channels = output_feature_channels
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
if first_stride is not None:
self.conv_kwargs_first_conv = deepcopy(conv_kwargs)
self.conv_kwargs_first_conv['stride'] = first_stride
else:
self.conv_kwargs_first_conv = conv_kwargs
super(StackedConvLayers, self).__init__()
self.blocks = nn.Sequential(
*([ConvDropoutNormNonlin(input_feature_channels, output_feature_channels, self.conv_op,
self.conv_kwargs_first_conv,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs)] +
[ConvDropoutNormNonlin(output_feature_channels, output_feature_channels, self.conv_op,
self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs) for _ in range(num_convs - 1)]))
def forward(self, x):
return self.blocks(x)
def soft_dice(net_output, gt, smooth=1., smooth_in_nom=1.):
axes = tuple(range(2, len(net_output.size())))
intersect = sum_tensor(net_output * gt, axes, keepdim=False)
denom = sum_tensor(net_output + gt, axes, keepdim=False)
result = (- ((2 * intersect + smooth_in_nom) / (denom + smooth))).mean()
return result
def sum_tensor(input, axes, keepdim=False):
axes = np.unique(axes)
if keepdim:
for ax in axes:
input = input.sum(ax, keepdim=True)
else:
for ax in sorted(axes, reverse=True):
input = input.sum(ax)
return input
class Generic_UNet_Cotraining(SegmentationNetwork):
def __init__(self, input_channels, base_num_features, num_classes, num_conv_per_stage=2, num_downscale=4,
feat_map_mul_on_downscale=2, conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None, deep_supervision=True, dropout_in_localization=False,
final_nonlin=softmax_helper, weightInitializer=InitWeights_He(1e-2), pool_op_kernel_sizes=None,
upscale_logits=False, convolutional_pooling=False, convolutional_upsampling=False):
"""
Have fun lookint at that one. This is my go-to model. I crammed the cotraining code in there somehow, so yeah.
What a mess.
You know what's the best part? No documentation. What a great piece of code.
:param input_channels:
:param base_num_features:
:param num_classes:
:param num_conv_per_stage:
:param num_downscale:
:param feat_map_mul_on_downscale:
:param conv_op:
:param conv_kwargs:
:param norm_op:
:param norm_op_kwargs:
:param dropout_op:
:param dropout_op_kwargs:
:param nonlin:
:param nonlin_kwargs:
:param deep_supervision:
:param dropout_in_localization:
:param final_nonlin:
:param weightInitializer:
:param pool_op_kernel_sizes:
:param upscale_logits:
:param convolutional_pooling:
:param convolutional_upsampling:
"""
super(Generic_UNet_Cotraining, self).__init__()
assert isinstance(num_classes, (list, tuple)), "for cotraining, num_classes must be list or tuple of int"
self.num_classes = num_classes
self.input_shape_must_be_divisible_by = np.prod(pool_op_kernel_sizes, 0)
self.pool_op_kernel_sizes = pool_op_kernel_sizes
self.convolutional_upsampling = convolutional_upsampling
self.convolutional_pooling = convolutional_pooling
self.upscale_logits = upscale_logits
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope':1e-2, 'inplace':True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p':0.5, 'inplace':True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps':1e-5, 'affine':True, 'momentum':0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size':3, 'stride':1, 'padding':1, 'dilation':1, 'bias':True}
self.nonlin = nonlin
self.nonlin_kwargs = nonlin_kwargs
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.weightInitializer = weightInitializer
self.conv_op = conv_op
self.norm_op = norm_op
self.dropout_op = dropout_op
if pool_op_kernel_sizes is None:
if conv_op == nn.Conv2d:
pool_op_kernel_sizes = [(2, 2)] * num_downscale
elif conv_op == nn.Conv3d:
pool_op_kernel_sizes = [(2, 2, 2)] * num_downscale
else:
raise ValueError("unknown convolution dimensionality, conv op: %s" % str(conv_op))
self.pool_op_kernel_sizes = pool_op_kernel_sizes
self.final_nonlin = final_nonlin
assert num_conv_per_stage > 1, "this implementation does not support only one conv per stage"
if conv_op == nn.Conv2d:
upsample_mode = 'bilinear'
pool_op = nn.MaxPool2d
transpconv = nn.ConvTranspose2d
elif conv_op == nn.Conv3d:
upsample_mode = 'trilinear'
pool_op = nn.MaxPool3d
transpconv = nn.ConvTranspose3d
else:
raise ValueError("unknown convolution dimensionality, conv op: %s" % str(conv_op))
self.do_ds = deep_supervision
self.conv_blocks_context = []
self.conv_blocks_localization = []
self.td = []
self.tu = []
self.seg_outputs = []
output_features = base_num_features
input_features = input_channels
for d in range(num_downscale):
if d != 0 and self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[d-1]
else:
first_stride = None
self.conv_blocks_context.append(StackedConvLayers(input_features, output_features, num_conv_per_stage,
self.conv_op, self.conv_kwargs, self.norm_op,
self.norm_op_kwargs, self.dropout_op,
self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs,
first_stride))
if not self.convolutional_pooling:
self.td.append(pool_op(pool_op_kernel_sizes[d]))
input_features = output_features
output_features = int(np.round(output_features * feat_map_mul_on_downscale))
if self.conv_op == nn.Conv3d:
output_features = min(output_features, max_num_filters_3d)
else:
output_features = min(output_features, max_num_filters_2d)
if self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[-1]
else:
first_stride = None
if self.convolutional_upsampling:
final_num_features = output_features
else:
final_num_features = self.conv_blocks_context[-1].output_channels
self.conv_blocks_context.append(nn.Sequential(
StackedConvLayers(input_features, output_features, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs, first_stride),
StackedConvLayers(output_features, final_num_features, 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs)))
if not dropout_in_localization:
old_dropout_p = self.dropout_op_kwargs['p']
self.dropout_op_kwargs['p'] = 0.0
for u in range(num_downscale):
nfeatures_from_down = final_num_features
nfeatures_from_skip = self.conv_blocks_context[-(2 + u)].output_channels # self.conv_blocks_context[-1] is bottleneck, so start with -2
n_features_after_tu_and_concat = nfeatures_from_skip * 2
# the first conv reduces the number of features to match those of skip
# the following convs work on that number of features
# if not convolutional upsampling then the final conv reduces the num of features again
if u != num_downscale-1 and not self.convolutional_upsampling:
final_num_features = self.conv_blocks_context[-(3 + u)].output_channels
else:
final_num_features = nfeatures_from_skip
if not self.convolutional_upsampling:
self.tu.append(nn.Upsample(scale_factor=pool_op_kernel_sizes[-(u+1)], mode=upsample_mode))
else:
self.tu.append(transpconv(nfeatures_from_down, nfeatures_from_skip, pool_op_kernel_sizes[-(u+1)], pool_op_kernel_sizes[-(u+1)], bias=False))
self.conv_blocks_localization.append(nn.Sequential(
StackedConvLayers(n_features_after_tu_and_concat, nfeatures_from_skip, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs),
StackedConvLayers(nfeatures_from_skip, final_num_features, 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs)
))
for ds in range(len(self.conv_blocks_localization)):
self.seg_outputs.append(nn.ModuleList([conv_op(self.conv_blocks_localization[ds][-1].output_channels, i, 1, 1, 0, 1, 1, False) for i in num_classes]))
self.upscale_logits_ops = []
cum_upsample = np.cumprod(np.vstack(pool_op_kernel_sizes), axis=0)[::-1]
for usl in range(num_downscale - 1):
if self.upscale_logits:
self.upscale_logits_ops.append(nn.Upsample(scale_factor=tuple([int(i) for i in cum_upsample[usl+1]]), mode=upsample_mode))
else:
self.upscale_logits_ops.append(lambda x: x)
if not dropout_in_localization:
self.dropout_op_kwargs['p'] = old_dropout_p
# register all modules properly
self.conv_blocks_localization = nn.ModuleList(self.conv_blocks_localization)
self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context)
self.td = nn.ModuleList(self.td)
self.tu = nn.ModuleList(self.tu)
self.seg_outputs = nn.ModuleList(self.seg_outputs)
if self.upscale_logits:
self.upscale_logits_ops = nn.ModuleList(self.upscale_logits_ops) # lambda x:x is not a Module so we need to distinguish here
self.apply(self.weightInitializer)
self.test_return_output = 0
self.inference = False
def train(self, mode=True):
super(Generic_UNet_Cotraining, self).train(mode)
def eval(self):
super(Generic_UNet_Cotraining, self).eval()
def infer(self, infer):
self.train(False)
self.inference = infer
def forward(self, x):
#input_var = x
skips = []
seg_outputs = []
for d in range(len(self.conv_blocks_context) - 1):
x = self.conv_blocks_context[d](x)
skips.append(x)
if not self.convolutional_pooling:
x = self.td[d](x)
x = self.conv_blocks_context[-1](x)
for u in range(len(self.tu)):
x = self.tu[u](x)
x = torch.cat((x, skips[-(u + 1)]), dim=1)
x = self.conv_blocks_localization[u](x)
if not self.inference:
seg_outputs.append([self.final_nonlin(self.seg_outputs[u][i](x[(x.shape[0]//len(self.num_classes) * i): (x.shape[0]//len(self.num_classes) * (i+1))])) for i in range(len(self.num_classes))])
else:
seg_outputs.append(self.final_nonlin(self.seg_outputs[u][self.test_return_output](x)))
if self.do_ds:
return tuple([seg_outputs[-1]] + [i(j) for i, j in zip(list(self.upscale_logits_ops)[::-1], seg_outputs[:-1][::-1])])
else:
return seg_outputs[-1]
class NetworkTrainerBraTS2018Baseline2RegionsCotrainingBraTSDecSDCE(object):
def __init__(self):
self.preprocessed_data_directory = None
# set through arguments from init
self.experiment_name = "baseline_inspired_by_decathlon 2_regions_cotraining brats dec sd ce"
self.experiment_description = "NetworkTrainerBraTS2018Baseline 2_regions_cotraining brats dec sd ce"
self.output_folder = 'model/params'
self.dataset_directory = None
self.device = 0
self.fold = 0
self.preprocessed_data_directory = None
self.gt_niftis_folder = None
# set in self.initialize()
self.network = None
self.num_input_channels = self.num_classes = self.net_pool_per_axis = self.patch_size = self.batch_size = \
self.threeD = self.base_num_features = self.intensity_properties = self.normalization_schemes = None # loaded automatically from plans_file
self.basic_generator_patch_size = self.data_aug_params = self.plans = None
self.was_initialized = False
self.also_val_in_tr_mode = False
self.dataset = None
self.inference_apply_nonlin = nn.Sigmoid()
def initialize(self, training=True):
if not os.path.isdir(self.output_folder):
os.mkdir(self.output_folder)
self.output_folder = os.path.join(self.output_folder, "fold%d" % self.fold)
if not os.path.isdir(self.output_folder):
os.mkdir(self.output_folder)
self.process_plans_file()
if training:
raise NotImplementedError
self.initialize_network_optimizer_and_scheduler()
self.network.inference_apply_nonlin = self.inference_apply_nonlin
self.was_initialized = True
def initialize_network_optimizer_and_scheduler(self):
net_numpool = max(self.net_pool_per_axis)
net_pool_kernel_sizes = []
for s in range(1, net_numpool+1):
this_pool_kernel_sizes = [1, 1, 1]
if self.net_pool_per_axis[0] >= s:
this_pool_kernel_sizes[0] = 2
if self.net_pool_per_axis[1] >= s:
this_pool_kernel_sizes[1] = 2
if len(self.patch_size)>2:
if self.net_pool_per_axis[2] >= s:
this_pool_kernel_sizes[2] = 2
else:
this_pool_kernel_sizes = this_pool_kernel_sizes[:-1]
net_pool_kernel_sizes.append(tuple(this_pool_kernel_sizes))
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
conv_kwargs = {'kernel_size':3, 'stride':1, 'padding':1, 'dilation':1, 'bias':True}
norm_op_kwargs = {'eps':1e-5, 'affine':True, 'momentum':0.02, 'track_running_stats':False}
dropout_op_kwargs = {'p':0, 'inplace':True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope':1e-2, 'inplace':True}
self.network = Generic_UNet_Cotraining(self.num_input_channels, self.base_num_features, self.num_classes, 2, net_numpool, 2,
conv_op, conv_kwargs, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, False, False, lambda x:x, InitWeights_He(1e-2),
net_pool_kernel_sizes, True, False, False)
self.optimizer = None
self.lr_scheduler = None
self.network.set_device(self.device)
def process_plans_file(self):
self.batch_size = 2
self.net_pool_per_axis = [4, 4, 4]
self.patch_size = (128, 128, 128)
self.intensity_properties = None
self.normalization_schemes = ["nonCT"] * 4
self.base_num_features = 30
self.num_input_channels = 4
self.do_dummy_2D_aug = False
self.use_mask_for_norm = True
self.only_keep_largest_connected_component = {(0, ): False}
if len(self.patch_size) == 2:
self.threeD = False
elif len(self.patch_size) == 3:
self.threeD = True
else:
raise RuntimeError("invalid patch size in plans file: %s" % str(self.patch_size))
self.regions = ((1, 2, 3, 4), (2, 3, 4), (2,))
self.regions_class_order = (1, 3, 2)
self.batch_size = 2
self.base_num_features = 30
self.num_classes = (3, 3)
def predict_preprocessed_data_return_softmax(self, data, do_mirroring, num_repeats, use_train_mode, batch_size, mirror_axes, tiled, tile_in_z, step, min_size, use_gaussian):
return self.network.predict_3D(data, do_mirroring, num_repeats, use_train_mode, batch_size, mirror_axes, tiled, tile_in_z, step, min_size, use_gaussian=use_gaussian)[2]
def load_best_checkpoint(self, train=True):
self.load_checkpoint(os.path.join(self.output_folder, "model_best.model"), train=train)
def load_checkpoint(self, fname, train=True):
print("loading checkpoint", fname, "train=", train)
if not self.was_initialized:
self.initialize()
saved_model = torch.load(fname)
new_state_dict = OrderedDict()
for k, value in saved_model['state_dict'].items():
key = k
new_state_dict[key] = value
self.network.load_state_dict(new_state_dict)
self.epoch = saved_model['epoch']
if train:
optimizer_state_dict = saved_model['optimizer_state_dict']
if optimizer_state_dict is not None:
self.optimizer.load_state_dict(optimizer_state_dict)
if self.lr_scheduler is not None and not isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
self.lr_scheduler.load_state_dict(saved_model['lr_scheduler_state_dict'])
if len(saved_model['plot_stuff']) < 9:
self.all_tr_losses_x, self.all_tr_losses, self.all_tr_eval_metrics, self.all_val_losses_x, \
self.all_val_losses, self.all_val_eval_metrics_dc_per_sample, self.all_val_losses_tr_mode, \
self.all_val_eval_metrics_dc_glob = saved_model['plot_stuff']
self.all_val_eval_metrics_dc_per_sample_std = []
else:
self.all_tr_losses_x, self.all_tr_losses, self.all_tr_eval_metrics, self.all_val_losses_x, \
self.all_val_losses, self.all_val_eval_metrics_dc_per_sample, self.all_val_losses_tr_mode, \
self.all_val_eval_metrics_dc_glob, self.all_val_eval_metrics_dc_per_sample_std = saved_model['plot_stuff']
self.network.set_device(self.device)
def resize_softmax_output(softmax_output, new_shape, order=3):
'''
Resizes softmax output. Resizes each channel in c separately and fuses results back together
:param softmax_output: c x x x y x z
:param new_shape: x x y x z
:param order:
:return:
'''
tpe = softmax_output.dtype
new_shp = [softmax_output.shape[0]] + list(new_shape)
result = np.zeros(new_shp, dtype=softmax_output.dtype)
for i in range(softmax_output.shape[0]):
result[i] = resize(softmax_output[i].astype(float), new_shape, order, "constant", 0, True)
return result.astype(tpe)
def save_segmentation_nifti_softmax(softmax_output, dct, out_fname, order=3, region_class_order=None):
'''
segmentation must have the same spacing as the original nifti (for now). segmentation may have been cropped out
of the original image
:param segmentation:
:param dct:
:param out_fname:
:return:
'''
old_size = dct.get('size_before_cropping')
bbox = dct.get('brain_bbox')
if bbox is not None:
seg_old_size = np.zeros([softmax_output.shape[0]] + list(old_size))
for c in range(3):
bbox[c][1] = np.min((bbox[c][0] + softmax_output.shape[c+1], old_size[c]))
seg_old_size[:, bbox[0][0]:bbox[0][1],
bbox[1][0]:bbox[1][1],
bbox[2][0]:bbox[2][1]] = softmax_output
else:
seg_old_size = softmax_output
segmentation = resize_softmax_output(seg_old_size, np.array(dct['size'])[[2, 1, 0]], order=order)
if region_class_order is None:
segmentation = segmentation.argmax(0)
else:
seg_old_spacing_final = np.zeros(segmentation.shape[1:])
for i, c in enumerate(region_class_order):
seg_old_spacing_final[segmentation[i] > 0.5] = c
segmentation = seg_old_spacing_final
return segmentation.astype(np.uint8)
def subfiles(folder, join=True, prefix=None, suffix=None, sort=True):
if join:
l = os.path.join
else:
l = lambda x, y: y
res = [l(folder, i) for i in os.listdir(folder) if os.path.isfile(os.path.join(folder, i))
and (prefix is None or i.startswith(prefix))
and (suffix is None or i.endswith(suffix))]
if sort:
res.sort()
return res
def maybe_mkdir_p(directory):
splits = directory.split("/")[1:]
for i in range(0, len(splits)):
if not os.path.isdir(os.path.join("/", *splits[:i+1])):
os.mkdir(os.path.join("/", *splits[:i+1]))
def convert_labels_back(seg):
new_seg = np.zeros(seg.shape, dtype=seg.dtype)
new_seg[seg == 1] = 2
new_seg[seg == 2] = 4
new_seg[seg == 3] = 1
return new_seg
def preprocess_image(itk_image, is_seg=False, spacing_target=(1, 0.5, 0.5), brain_mask=None, cval=0):
"""
brain mask must be a numpy array that has the same shape as itk_image's pixel array. This function is not ideal but
gets the job done
:param itk_image:
:param is_seg:
:param spacing_target:
:param brain_mask:
:return:
"""
spacing = np.array(itk_image.GetSpacing())[[2, 1, 0]]
image = sitk.GetArrayFromImage(itk_image).astype(float)
if not is_seg:
if brain_mask is None:
brain_mask = (image!=image[0,0,0]).astype(float)
if np.any([[i!=j] for i, j in zip(spacing, spacing_target)]):
image = resize_image(image, spacing, spacing_target, 3, cval).astype(np.float32)
brain_mask = resize_image(brain_mask.astype(float), spacing, spacing_target, order=0).astype(int)
image[brain_mask==0] = 0
#subtract mean, divide by std. use heuristic masking
image[brain_mask!=0] -= image[brain_mask!=0].mean()
image[brain_mask!=0] /= image[brain_mask!=0].std()
else:
new_shape = (int(np.round(spacing[0] / spacing_target[0] * float(image.shape[0]))),
int(np.round(spacing[1] / spacing_target[1] * float(image.shape[1]))),
int(np.round(spacing[2] / spacing_target[2] * float(image.shape[2]))))
image = resize_segmentation(image, new_shape, 1, cval)
return image
def create_brain_masks(data):
"""
data must be (b, c, x, y, z), brain mask is hole filled binary mask where all sequences are 0 (this is a heuristic
to recover a brain mask form brain extracted mri sequences, not an actual brain ectraction)
:param data:
:return:
"""
shp = list(data.shape)
brain_mask = np.zeros(shp, dtype=np.float32)
for b in range(data.shape[0]):
for c in range(data.shape[1]):
this_mask = data[b, c] != 0
this_mask = binary_fill_holes(this_mask)
brain_mask[b, c] = this_mask
return brain_mask
def extract_brain_region(image, segmentation, outside_value=0):
brain_voxels = np.where(segmentation != outside_value)
minZidx = int(np.min(brain_voxels[0]))
maxZidx = int(np.max(brain_voxels[0]))
minXidx = int(np.min(brain_voxels[1]))
maxXidx = int(np.max(brain_voxels[1]))
minYidx = int(np.min(brain_voxels[2]))
maxYidx = int(np.max(brain_voxels[2]))
# resize images
resizer = (slice(minZidx, maxZidx), slice(minXidx, maxXidx), slice(minYidx, maxYidx))
return image[resizer], [[minZidx, maxZidx], [minXidx, maxXidx], [minYidx, maxYidx]]
def load_and_preprocess(t1_file, t1km_file, t2_file, flair_file, seg_file=None, bet_file=None, encode_bet_mask_in_seg=False, label_conversion_fn=None):
images = {}
# t1
images["T1"] = sitk.ReadImage(t1_file)
# t1km
images["T1KM"] = sitk.ReadImage(t1km_file)
properties_dict = {
"spacing": images["T1"].GetSpacing(),
"direction": images["T1"].GetDirection(),
"size": images["T1"].GetSize(),
"origin": images["T1"].GetOrigin()
}
# t2
images["T2"] = sitk.ReadImage(t2_file)
# flair
images["FLAIR"] = sitk.ReadImage(flair_file)
if seg_file is not None:
images['seg'] = sitk.ReadImage(seg_file)
if bet_file is not None:
images['bet_mask'] = sitk.ReadImage(bet_file)
else:
t1_npy = sitk.GetArrayFromImage(images["T1"])
mask = create_brain_masks(t1_npy[None])[0].astype(int)
mask = sitk.GetImageFromArray(mask)
mask.CopyInformation(images["T1"])
images['bet_mask'] = mask
try:
images["t1km_sub"] = images["T1KM"] - images["T1"]
except RuntimeError:
tmp1 = sitk.GetArrayFromImage(images["T1KM"])
tmp2 = sitk.GetArrayFromImage(images["T1"])
res = tmp1 - tmp2
res_itk = sitk.GetImageFromArray(res)
res_itk.CopyInformation(images["T1"])
images["t1km_sub"] = res_itk
for k in ['T1', 'T1KM', 'T2', 'FLAIR', "t1km_sub"]:
images[k] = sitk.Mask(images[k], images['bet_mask'], 0)
bet_numpy = sitk.GetArrayFromImage(images['bet_mask'])
for k in images.keys():
is_seg = (k == "seg") | (k == "bet_mask")
if is_seg:
cval = -1
else:
cval = 0
images[k] = preprocess_image(images[k], is_seg=is_seg,
spacing_target=(1., 1., 1.), brain_mask=np.copy(bet_numpy), cval=cval)
properties_dict['size_before_cropping'] = images["T1"].shape
mask = np.copy(images['bet_mask'])
for k in images.keys():
images[k], bbox = extract_brain_region(images[k], mask, False)
properties_dict['brain_bbox'] = bbox
if (label_conversion_fn is not None) and ("seg" in images.keys()):
images["seg"] = label_conversion_fn(images["seg"])
use_these = ['T1', 'T1KM', 'T2', 'FLAIR', "t1km_sub", 'seg']
if (not encode_bet_mask_in_seg) or ("seg" not in images.keys()):
use_these.append("bet_mask")
else:
images["seg"][images["bet_mask"] <= 0] = -1
imgs = []
for seq in use_these:
if seq not in images.keys():
imgs.append(np.zeros(images["T1"].shape)[None])
else:
imgs.append(images[seq][None])
all_data = np.vstack(imgs)
return all_data, properties_dict
def segment(t1_file, t1ce_file, t2_file, flair_file, netLoc):
"""
Segments the passed files
"""
trainer = NetworkTrainerBraTS2018Baseline2RegionsCotrainingBraTSDecSDCE()
trainer.initialize(False)
all_data, dct = load_and_preprocess(t1_file, t1ce_file, t2_file, flair_file, None, None,
True, None)
all_softmax = []
for fold in range(5):
trainer.output_folder = join(netLoc, "%d" % fold)
trainer.load_best_checkpoint(False)
trainer.network.infer(True)
trainer.network.test_return_output = 0
softmax = trainer.predict_preprocessed_data_return_softmax(all_data[:4], True, 1, False, 1, (2, 3, 4), False,
None, None, trainer.patch_size, True)
all_softmax.append(softmax[None])
softmax_consolidated = np.vstack(all_softmax).mean(0)
output = save_segmentation_nifti_softmax(softmax_consolidated, dct,
"tumor_isen2018_class.nii.gz", 1,
trainer.regions_class_order)
return output
| [((116, 10, 116, 49), 'numpy.pad', 'np.pad', ({(116, 17, 116, 22): 'image', (116, 24, 116, 32): 'pad_list', (116, 34, 116, 38): 'mode'}, {}), '(image, pad_list, mode, **kwargs)', True, 'import numpy as np\n'), ((304, 10, 304, 30), 'torch.exp', 'torch.exp', ({(304, 20, 304, 29): 'x - x_max'}, {}), '(x - x_max)', False, 'import torch\n'), ((375, 11, 375, 26), 'numpy.unique', 'np.unique', ({(375, 21, 375, 25): 'axes'}, {}), '(axes)', True, 'import numpy as np\n'), ((758, 13, 758, 58), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((820, 14, 820, 50), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((865, 17, 865, 48), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((875, 19, 875, 58), 'numpy.where', 'np.where', ({(875, 28, 875, 57): 'segmentation != outside_value'}, {}), '(segmentation != outside_value)', True, 'import numpy as np\n'), ((891, 19, 891, 42), 'SimpleITK.ReadImage', 'sitk.ReadImage', ({(891, 34, 891, 41): 't1_file'}, {}), '(t1_file)', True, 'import SimpleITK as sitk\n'), ((893, 21, 893, 46), 'SimpleITK.ReadImage', 'sitk.ReadImage', ({(893, 36, 893, 45): 't1km_file'}, {}), '(t1km_file)', True, 'import SimpleITK as sitk\n'), ((903, 19, 903, 42), 'SimpleITK.ReadImage', 'sitk.ReadImage', ({(903, 34, 903, 41): 't2_file'}, {}), '(t2_file)', True, 'import SimpleITK as sitk\n'), ((906, 22, 906, 48), 'SimpleITK.ReadImage', 'sitk.ReadImage', ({(906, 37, 906, 47): 'flair_file'}, {}), '(flair_file)', True, 'import SimpleITK as sitk\n'), ((933, 16, 933, 58), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', ({(933, 39, 933, 57): "images['bet_mask']"}, {}), "(images['bet_mask'])", True, 'import SimpleITK as sitk\n'), ((945, 11, 945, 38), 'numpy.copy', 'np.copy', ({(945, 19, 945, 37): "images['bet_mask']"}, {}), "(images['bet_mask'])", True, 'import numpy as np\n'), ((965, 15, 965, 30), 'numpy.vstack', 'np.vstack', ({(965, 25, 965, 29): 'imgs'}, {}), '(imgs)', True, 'import numpy as np\n'), ((26, 12, 26, 24), 'json.load', 'json.load', ({(26, 22, 26, 23): 'f'}, {}), '(f)', False, 'import json\n'), ((35, 14, 35, 75), 'skimage.transform.resize', 'resize', (), '', False, 'from skimage.transform import resize\n'), ((73, 21, 73, 55), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ({}, {}), '(**self.nonlin_kwargs)', False, 'from torch import nn\n'), ((98, 20, 98, 39), 'numpy.array', 'np.array', ({(98, 29, 98, 38): 'new_shape'}, {}), '(new_shape)', True, 'import numpy as np\n'), ((120, 19, 120, 37), 'numpy.array', 'np.array', ({(120, 28, 120, 36): 'pad_list'}, {}), '(pad_list)', True, 'import numpy as np\n'), ((264, 15, 264, 35), 'numpy.vstack', 'np.vstack', ({(264, 25, 264, 34): 'all_preds'}, {}), '(all_preds)', True, 'import numpy as np\n'), ((424, 48, 424, 80), 'numpy.prod', 'np.prod', ({(424, 56, 424, 76): 'pool_op_kernel_sizes', (424, 78, 424, 79): '0'}, {}), '(pool_op_kernel_sizes, 0)', True, 'import numpy as np\n'), ((561, 40, 561, 84), 'torch.nn.ModuleList', 'nn.ModuleList', ({(561, 54, 561, 83): 'self.conv_blocks_localization'}, {}), '(self.conv_blocks_localization)', False, 'from torch import nn\n'), ((562, 35, 562, 74), 'torch.nn.ModuleList', 'nn.ModuleList', ({(562, 49, 562, 73): 'self.conv_blocks_context'}, {}), '(self.conv_blocks_context)', False, 'from torch import nn\n'), ((563, 18, 563, 40), 'torch.nn.ModuleList', 'nn.ModuleList', ({(563, 32, 563, 39): 'self.td'}, {}), '(self.td)', False, 'from torch import nn\n'), ((564, 18, 564, 40), 'torch.nn.ModuleList', 'nn.ModuleList', ({(564, 32, 564, 39): 'self.tu'}, {}), '(self.tu)', False, 'from torch import nn\n'), ((565, 27, 565, 58), 'torch.nn.ModuleList', 'nn.ModuleList', ({(565, 41, 565, 57): 'self.seg_outputs'}, {}), '(self.seg_outputs)', False, 'from torch import nn\n'), ((636, 38, 636, 50), 'torch.nn.Sigmoid', 'nn.Sigmoid', ({}, {}), '()', False, 'from torch import nn\n'), ((641, 29, 641, 83), 'os.path.join', 'os.path.join', ({(641, 42, 641, 60): 'self.output_folder', (641, 62, 641, 82): "'fold%d' % self.fold"}, {}), "(self.output_folder, 'fold%d' % self.fold)", False, 'import os\n'), ((723, 22, 723, 39), 'torch.load', 'torch.load', ({(723, 33, 723, 38): 'fname'}, {}), '(fname)', False, 'import torch\n'), ((724, 25, 724, 38), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((791, 32, 791, 64), 'numpy.zeros', 'np.zeros', ({(791, 41, 791, 63): 'segmentation.shape[1:]'}, {}), '(segmentation.shape[1:])', True, 'import numpy as np\n'), ((853, 16, 853, 62), 'batchgenerators.augmentations.utils.resize_segmentation', 'resize_segmentation', ({(853, 36, 853, 41): 'image', (853, 43, 853, 52): 'new_shape', (853, 54, 853, 55): '1', (853, 57, 853, 61): 'cval'}, {}), '(image, new_shape, 1, cval)', False, 'from batchgenerators.augmentations.utils import resize_segmentation\n'), ((876, 18, 876, 41), 'numpy.min', 'np.min', ({(876, 25, 876, 40): 'brain_voxels[0]'}, {}), '(brain_voxels[0])', True, 'import numpy as np\n'), ((877, 18, 877, 41), 'numpy.max', 'np.max', ({(877, 25, 877, 40): 'brain_voxels[0]'}, {}), '(brain_voxels[0])', True, 'import numpy as np\n'), ((878, 18, 878, 41), 'numpy.min', 'np.min', ({(878, 25, 878, 40): 'brain_voxels[1]'}, {}), '(brain_voxels[1])', True, 'import numpy as np\n'), ((879, 18, 879, 41), 'numpy.max', 'np.max', ({(879, 25, 879, 40): 'brain_voxels[1]'}, {}), '(brain_voxels[1])', True, 'import numpy as np\n'), ((880, 18, 880, 41), 'numpy.min', 'np.min', ({(880, 25, 880, 40): 'brain_voxels[2]'}, {}), '(brain_voxels[2])', True, 'import numpy as np\n'), ((881, 18, 881, 41), 'numpy.max', 'np.max', ({(881, 25, 881, 40): 'brain_voxels[2]'}, {}), '(brain_voxels[2])', True, 'import numpy as np\n'), ((909, 24, 909, 48), 'SimpleITK.ReadImage', 'sitk.ReadImage', ({(909, 39, 909, 47): 'seg_file'}, {}), '(seg_file)', True, 'import SimpleITK as sitk\n'), ((912, 29, 912, 53), 'SimpleITK.ReadImage', 'sitk.ReadImage', ({(912, 44, 912, 52): 'bet_file'}, {}), '(bet_file)', True, 'import SimpleITK as sitk\n'), ((914, 17, 914, 53), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', ({(914, 40, 914, 52): "images['T1']"}, {}), "(images['T1'])", True, 'import SimpleITK as sitk\n'), ((916, 15, 916, 43), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', ({(916, 38, 916, 42): 'mask'}, {}), '(mask)', True, 'import SimpleITK as sitk\n'), ((931, 20, 931, 63), 'SimpleITK.Mask', 'sitk.Mask', ({(931, 30, 931, 39): 'images[k]', (931, 41, 931, 59): "images['bet_mask']", (931, 61, 931, 62): '0'}, {}), "(images[k], images['bet_mask'], 0)", True, 'import SimpleITK as sitk\n'), ((121, 25, 121, 44), 'numpy.array', 'np.array', ({(121, 34, 121, 43): 'res.shape'}, {}), '(res.shape)', True, 'import numpy as np\n'), ((201, 13, 201, 28), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((270, 13, 270, 28), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((314, 28, 314, 74), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (), '', False, 'from torch import nn\n'), ((347, 42, 347, 63), 'copy.deepcopy', 'deepcopy', ({(347, 51, 347, 62): 'conv_kwargs'}, {}), '(conv_kwargs)', False, 'from copy import deepcopy\n'), ((567, 38, 567, 76), 'torch.nn.ModuleList', 'nn.ModuleList', ({(567, 52, 567, 75): 'self.upscale_logits_ops'}, {}), '(self.upscale_logits_ops)', False, 'from torch import nn\n'), ((598, 16, 598, 54), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((639, 15, 639, 48), 'os.path.isdir', 'os.path.isdir', ({(639, 29, 639, 47): 'self.output_folder'}, {}), '(self.output_folder)', False, 'import os\n'), ((640, 12, 640, 40), 'os.mkdir', 'os.mkdir', ({(640, 21, 640, 39): 'self.output_folder'}, {}), '(self.output_folder)', False, 'import os\n'), ((642, 15, 642, 48), 'os.path.isdir', 'os.path.isdir', ({(642, 29, 642, 47): 'self.output_folder'}, {}), '(self.output_folder)', False, 'import os\n'), ((643, 12, 643, 40), 'os.mkdir', 'os.mkdir', ({(643, 21, 643, 39): 'self.output_folder'}, {}), '(self.output_folder)', False, 'import os\n'), ((717, 29, 717, 81), 'os.path.join', 'os.path.join', ({(717, 42, 717, 60): 'self.output_folder', (717, 62, 717, 80): '"""model_best.model"""'}, {}), "(self.output_folder, 'model_best.model')", False, 'import os\n'), ((779, 25, 779, 86), 'numpy.min', 'np.min', ({(779, 32, 779, 85): '(bbox[c][0] + softmax_output.shape[c + 1], old_size[c])'}, {}), '((bbox[c][0] + softmax_output.shape[c + 1], old_size[c]))', True, 'import numpy as np\n'), ((786, 55, 786, 76), 'numpy.array', 'np.array', ({(786, 64, 786, 75): "dct['size']"}, {}), "(dct['size'])", True, 'import numpy as np\n'), ((804, 33, 804, 51), 'os.listdir', 'os.listdir', ({(804, 44, 804, 50): 'folder'}, {}), '(folder)', False, 'import os\n'), ((838, 12, 838, 45), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', ({(838, 35, 838, 44): 'itk_image'}, {}), '(itk_image)', True, 'import SimpleITK as sitk\n'), ((869, 24, 869, 52), 'scipy.ndimage.binary_fill_holes', 'binary_fill_holes', ({(869, 42, 869, 51): 'this_mask'}, {}), '(this_mask)', False, 'from scipy.ndimage import binary_fill_holes\n'), ((923, 15, 923, 53), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', ({(923, 38, 923, 52): "images['T1KM']"}, {}), "(images['T1KM'])", True, 'import SimpleITK as sitk\n'), ((924, 15, 924, 51), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', ({(924, 38, 924, 50): "images['T1']"}, {}), "(images['T1'])", True, 'import SimpleITK as sitk\n'), ((926, 18, 926, 45), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', ({(926, 41, 926, 44): 'res'}, {}), '(res)', True, 'import SimpleITK as sitk\n'), ((988, 27, 988, 49), 'numpy.vstack', 'np.vstack', ({(988, 37, 988, 48): 'all_softmax'}, {}), '(all_softmax)', True, 'import numpy as np\n'), ((281, 23, 281, 53), 'numpy.vstack', 'np.vstack', ({(281, 33, 281, 52): '[data] * BATCH_SIZE'}, {}), '([data] * BATCH_SIZE)', True, 'import numpy as np\n'), ((295, 41, 295, 77), 'numpy.zeros', 'np.zeros', ({(295, 50, 295, 76): 'predicted_segmentation_shp'}, {}), '(predicted_segmentation_shp)', True, 'import numpy as np\n'), ((316, 30, 316, 63), 'torch.nn.init.constant_', 'nn.init.constant_', ({(316, 48, 316, 59): 'module.bias', (316, 61, 316, 62): '0'}, {}), '(module.bias, 0)', False, 'from torch import nn\n'), ((494, 34, 494, 87), 'numpy.round', 'np.round', ({(494, 43, 494, 86): 'output_features * feat_map_mul_on_downscale'}, {}), '(output_features * feat_map_mul_on_downscale)', True, 'import numpy as np\n'), ((550, 34, 550, 65), 'numpy.vstack', 'np.vstack', ({(550, 44, 550, 64): 'pool_op_kernel_sizes'}, {}), '(pool_op_kernel_sizes)', True, 'import numpy as np\n'), ((815, 29, 815, 61), 'os.path.join', 'os.path.join', ({(815, 42, 815, 45): '"""/"""', (815, 47, 815, 60): '*splits[:i + 1]'}, {}), "('/', *splits[:i + 1])", False, 'import os\n'), ((816, 21, 816, 53), 'os.path.join', 'os.path.join', ({(816, 34, 816, 37): '"""/"""', (816, 39, 816, 52): '*splits[:i + 1]'}, {}), "('/', *splits[:i + 1])", False, 'import os\n'), ((941, 77, 941, 95), 'numpy.copy', 'np.copy', ({(941, 85, 941, 94): 'bet_numpy'}, {}), '(bet_numpy)', True, 'import numpy as np\n'), ((202, 16, 202, 36), 'torch.zeros', 'torch.zeros', ({(202, 28, 202, 35): 'x.shape'}, {}), '(x.shape)', False, 'import torch\n'), ((215, 35, 215, 46), 'numpy.array', 'np.array', ({(215, 44, 215, 45): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((534, 31, 534, 105), 'torch.nn.Upsample', 'nn.Upsample', (), '', False, 'from torch import nn\n'), ((804, 70, 804, 93), 'os.path.join', 'os.path.join', ({(804, 83, 804, 89): 'folder', (804, 91, 804, 92): 'i'}, {}), '(folder, i)', False, 'import os\n'), ((962, 24, 962, 52), 'numpy.zeros', 'np.zeros', ({(962, 33, 962, 51): "images['T1'].shape"}, {}), "(images['T1'].shape)", True, 'import numpy as np\n'), ((243, 58, 243, 79), 'numpy.copy', 'np.copy', ({(243, 66, 243, 78): 'data_for_net'}, {}), '(data_for_net)', True, 'import numpy as np\n')] |
architsakhadeo/Offline-Hyperparameter-Tuning-for-RL | plot/finderror.py | 94b8f205b12f0cc59ae8e19b2e6099f34be929d6 | import os
basepath = '/home/archit/scratch/cartpoles/data/hyperparam/cartpole/offline_learning/esarsa-adam/'
dirs = os.listdir(basepath)
string = ''''''
for dir in dirs:
print(dir)
subbasepath = basepath + dir + '/'
subdirs = os.listdir(subbasepath)
for subdir in subdirs:
print(subdir)
subsubbasepath = subbasepath + subdir + '/'
subsubdirs = os.listdir(subsubbasepath)
string += subsubbasepath + '\n'
content = []
for i in range(0,len(subsubdirs)-1):
for j in range(i+1, len(subsubdirs)):
a = os.system('diff ' + subsubbasepath + subsubdirs[i] + '/log_json.txt ' + subsubbasepath + subsubdirs[j] + '/log_json.txt')
content.append([a, subsubdirs[i], subsubdirs[j]])
filteredcontent = [i for i in content if i[0] == 0]
for i in range(len(filteredcontent)):
string += ' and '.join(filteredcontent[i][1:])
if i != len(filteredcontent) - 1:
string += ', '
string += '\n\n'
f = open('offlinelearningerrors.txt','w')
f.write(string)
f.close()
| [((3, 7, 3, 27), 'os.listdir', 'os.listdir', ({(3, 18, 3, 26): 'basepath'}, {}), '(basepath)', False, 'import os\n'), ((8, 11, 8, 34), 'os.listdir', 'os.listdir', ({(8, 22, 8, 33): 'subbasepath'}, {}), '(subbasepath)', False, 'import os\n'), ((12, 15, 12, 41), 'os.listdir', 'os.listdir', ({(12, 26, 12, 40): 'subsubbasepath'}, {}), '(subsubbasepath)', False, 'import os\n'), ((17, 8, 17, 129), 'os.system', 'os.system', ({(17, 18, 17, 128): "'diff ' + subsubbasepath + subsubdirs[i\n ] + '/log_json.txt ' + subsubbasepath + subsubdirs[j] + '/log_json.txt'"}, {}), "('diff ' + subsubbasepath + subsubdirs[i] + '/log_json.txt ' +\n subsubbasepath + subsubdirs[j] + '/log_json.txt')", False, 'import os\n')] |
bluePlatinum/pyback | src/pybacked/zip_handler.py | 1c12a52974232b0482981c12a9af27e52dd2190e | import os
import shutil
import tempfile
import zipfile
def archive_write(archivepath, data, filename, compression, compressionlevel):
"""
Create a file named filename in the archive and write data to it
:param archivepath: The path to the zip-archive
:type archivepath: str
:param data: The data to be written to the file
:type data: str
:param filename: The filename for the newly created file
:type filename: str
:param compression: The desired compression for the zip-archive
:type compression: int
:param compressionlevel: The desired compression level for the zip-archive
:type compressionlevel: int
:return: void
"""
archive = zipfile.ZipFile(archivepath, mode='a',
compression=compression,
compresslevel=compressionlevel)
archive.writestr(filename, data)
archive.close()
def create_archive(archivepath, filedict, compression, compressionlevel):
"""
Write filedict to zip-archive data subdirectory. Will check wether archive
at archivepath exists before writing. If file exists will raise a
FileExistsError.
:param archivepath: the path to the file
:param filedict: dictionary containing the filepath, filename key-value
pairs
:param compression: desired compression methods (see zipfile documentation)
:param compressionlevel: compression level (see zipfile documentation)
:return: void
"""
if os.path.isfile(archivepath):
raise FileExistsError("Specified file already exists")
else:
archive = zipfile.ZipFile(archivepath, mode='x',
compression=compression,
compresslevel=compressionlevel)
for filepath, filename in filedict.items():
archive.write(filepath, arcname="data/" + filename)
archive.close()
def extract_archdata(archivepath, filename, destination):
"""
Extract a file from a archive and write it to the destination. If the
destination path already exists extract_archdata will not overwrite but
will throw a "FileExists" error.
:param archivepath: The path to the archive containing the file
:type archivepath: str
:param filename: The archive name of the desired file.
:type filename: str
:param destination: The path at which the extracted file is to be placed.
:type destination: str
:return: void
:rtype: None
"""
# check if destination path already exists
if os.path.exists(destination):
raise FileExistsError("The specified destination is already in use")
archive = zipfile.ZipFile(archivepath, mode='r')
with tempfile.TemporaryDirectory() as tmpdir:
archive.extract(filename, path=tmpdir)
# create directories for the destination
os.makedirs(os.path.dirname(destination), exist_ok=True)
shutil.copy(os.path.abspath(tmpdir + "/" + filename), destination)
def read_bin(archivepath, filelist):
"""
Read a list of files from an archive and return the file data as a
dictionary of filename, data key-value pairs.
:param archivepath: the path to the archive
:param filelist: list of filenames to read
:return: dictionary with filename, data key-value pairs
:rtype: dict
"""
datadict = dict()
if os.path.isfile(archivepath):
archive = zipfile.ZipFile(archivepath, mode='r')
else:
raise FileNotFoundError("Specified file does not exist")
for filename in filelist:
try:
file = archive.open(filename)
datadict[filename] = file.read().decode()
file.close()
except KeyError:
datadict[filename] = None
archive.close()
return datadict
def read_diff_log(archivepath):
"""
Read the diff-log.csv from a given archive file.
:param archivepath: The path to the zip-archive
:type archivepath: str
:return: The diff-log.csv contents in ascii string form.
:rtype: str
"""
arch = zipfile.ZipFile(archivepath, mode='r')
diff_log_file = arch.open("diff-log.csv")
diff_log_bin = diff_log_file.read()
diff_log = diff_log_bin.decode()
diff_log_file.close()
arch.close()
return diff_log
def zip_extract(archivepath, filelist, extractpath):
"""
Extract a list of files to a specific location
:param archivepath: the path to the zip-archive
:param filelist: list of member filenames to extract
:param extractpath: path for the extracted files
:return: void
"""
if os.path.isfile(archivepath):
archive = zipfile.ZipFile(archivepath, mode='r')
else:
raise FileNotFoundError("Specified file does not exist")
archive.extractall(path=extractpath, members=filelist)
archive.close()
| [((23, 14, 25, 61), 'zipfile.ZipFile', 'zipfile.ZipFile', (), '', False, 'import zipfile\n'), ((43, 7, 43, 34), 'os.path.isfile', 'os.path.isfile', ({(43, 22, 43, 33): 'archivepath'}, {}), '(archivepath)', False, 'import os\n'), ((70, 7, 70, 34), 'os.path.exists', 'os.path.exists', ({(70, 22, 70, 33): 'destination'}, {}), '(destination)', False, 'import os\n'), ((72, 14, 72, 52), 'zipfile.ZipFile', 'zipfile.ZipFile', (), '', False, 'import zipfile\n'), ((93, 7, 93, 34), 'os.path.isfile', 'os.path.isfile', ({(93, 22, 93, 33): 'archivepath'}, {}), '(archivepath)', False, 'import os\n'), ((119, 11, 119, 49), 'zipfile.ZipFile', 'zipfile.ZipFile', (), '', False, 'import zipfile\n'), ((137, 7, 137, 34), 'os.path.isfile', 'os.path.isfile', ({(137, 22, 137, 33): 'archivepath'}, {}), '(archivepath)', False, 'import os\n'), ((46, 18, 48, 65), 'zipfile.ZipFile', 'zipfile.ZipFile', (), '', False, 'import zipfile\n'), ((73, 9, 73, 38), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ({}, {}), '()', False, 'import tempfile\n'), ((94, 18, 94, 56), 'zipfile.ZipFile', 'zipfile.ZipFile', (), '', False, 'import zipfile\n'), ((138, 18, 138, 56), 'zipfile.ZipFile', 'zipfile.ZipFile', (), '', False, 'import zipfile\n'), ((77, 20, 77, 48), 'os.path.dirname', 'os.path.dirname', ({(77, 36, 77, 47): 'destination'}, {}), '(destination)', False, 'import os\n'), ((79, 20, 79, 60), 'os.path.abspath', 'os.path.abspath', ({(79, 36, 79, 59): "(tmpdir + '/' + filename)"}, {}), "(tmpdir + '/' + filename)", False, 'import os\n')] |
imvinod/Eva | src/query_planner/abstract_scan_plan.py | 0ed9814ae89db7dce1fb734dc99d5dac69cb3c82 | """Abstract class for all the scan planners
https://www.postgresql.org/docs/9.1/using-explain.html
https://www.postgresql.org/docs/9.5/runtime-config-query.html
"""
from src.query_planner.abstract_plan import AbstractPlan
from typing import List
class AbstractScan(AbstractPlan):
"""Abstract class for all the scan based planners
Arguments:
predicate : Expression
video : video on which the scan will be executed
columns_id :
"""
def __init__(self, predicate: Expression, video: Storage,
column_ids: List[int]):
super(AbstractScan, self).__init__()
self._predicate = predicate
self._column_ids = column_ids
self._video = video
@property
def video(self) -> Storage:
return self._video
@property
def predicate(self) -> Expression:
return self._predicate
@property
def column_ids(self) -> List:
return self._column_ids
| [] |
jimmy-huang/zephyr.js | tests/tools/test-tcp4-client.py | cef5c0dffaacf7d5aa3f8265626f68a1e2b32eb5 | # !usr/bin/python
# coding:utf-8
import time
import socket
def main():
print "Socket client creat successful"
host = "192.0.2.1"
port = 9876
bufSize = 1024
addr = (host, port)
Timeout = 300
mySocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mySocket.settimeout(Timeout)
mySocket.connect(addr)
while 1:
try :
Data = mySocket.recv(bufSize)
Data = Data.strip()
print "Got data: ", Data
time.sleep(2)
if Data == "close":
mySocket.close()
print "close socket"
break
else:
mySocket.sendall(Data)
print "Send data: ", Data
except KeyboardInterrupt :
print "exit client"
break
except :
print "time out"
continue
if __name__ == "__main__" :
main()
| [] |
s-utsch/kinto | kinto/__main__.py | 5e368849a8ab652a6e1923f44febcf89afd2c78b | import argparse
import sys
from cliquet.scripts import cliquet
from pyramid.scripts import pserve
from pyramid.paster import bootstrap
def main(args=None):
"""The main routine."""
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(description="Kinto commands")
subparsers = parser.add_subparsers(title='subcommands',
description='valid subcommands',
help='init/start/migrate')
parser_init = subparsers.add_parser('init')
parser_init.set_defaults(which='init')
parser_init.add_argument('--config_file', required=False,
help='Config file may be passed as argument')
parser_migrate = subparsers.add_parser('migrate')
parser_migrate.set_defaults(which='migrate')
parser_start = subparsers.add_parser('start')
parser_start.set_defaults(which='start')
args = vars(parser.parse_args())
if args['which'] == 'init':
if(args['config_file'] is None):
env = bootstrap('config/kinto.ini')
else:
config_file = format(args['config_file'])
env = bootstrap(config_file)
elif args['which'] == 'migrate':
env = bootstrap('config/kinto.ini')
cliquet.init_schema(env)
elif args['which'] == 'start':
pserve_argv = ['pserve', 'config/kinto.ini', '--reload']
pserve.main(pserve_argv)
if __name__ == "__main__":
main()
| [((13, 17, 13, 70), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((33, 26, 33, 55), 'pyramid.paster.bootstrap', 'bootstrap', ({(33, 36, 33, 54): '"""config/kinto.ini"""'}, {}), "('config/kinto.ini')", False, 'from pyramid.paster import bootstrap\n'), ((36, 26, 36, 48), 'pyramid.paster.bootstrap', 'bootstrap', ({(36, 36, 36, 47): 'config_file'}, {}), '(config_file)', False, 'from pyramid.paster import bootstrap\n'), ((38, 22, 38, 51), 'pyramid.paster.bootstrap', 'bootstrap', ({(38, 32, 38, 50): '"""config/kinto.ini"""'}, {}), "('config/kinto.ini')", False, 'from pyramid.paster import bootstrap\n'), ((39, 16, 39, 40), 'cliquet.scripts.cliquet.init_schema', 'cliquet.init_schema', ({(39, 36, 39, 39): 'env'}, {}), '(env)', False, 'from cliquet.scripts import cliquet\n'), ((42, 16, 42, 40), 'pyramid.scripts.pserve.main', 'pserve.main', ({(42, 28, 42, 39): 'pserve_argv'}, {}), '(pserve_argv)', False, 'from pyramid.scripts import pserve\n')] |
JumboCode/GroundWorkSomerville | apis/admin.py | 280f9cd8ea38f065c9fb113e563a4be362a7e265 | from django.contrib import admin
from django.contrib.auth.models import User
from .models import Vegetable, Harvest, Transaction, Merchandise, MerchandisePrice
from .models import PurchasedItem, UserProfile, VegetablePrice, StockedVegetable
from .models import MerchandisePhotos
admin.site.register(Vegetable)
admin.site.register(StockedVegetable)
admin.site.register(Harvest)
admin.site.register(VegetablePrice)
admin.site.register(PurchasedItem)
admin.site.register(Transaction)
admin.site.register(UserProfile)
admin.site.register(Merchandise)
admin.site.register(MerchandisePrice)
admin.site.register(MerchandisePhotos)
| [((7, 0, 7, 30), 'django.contrib.admin.site.register', 'admin.site.register', ({(7, 20, 7, 29): 'Vegetable'}, {}), '(Vegetable)', False, 'from django.contrib import admin\n'), ((8, 0, 8, 37), 'django.contrib.admin.site.register', 'admin.site.register', ({(8, 20, 8, 36): 'StockedVegetable'}, {}), '(StockedVegetable)', False, 'from django.contrib import admin\n'), ((9, 0, 9, 28), 'django.contrib.admin.site.register', 'admin.site.register', ({(9, 20, 9, 27): 'Harvest'}, {}), '(Harvest)', False, 'from django.contrib import admin\n'), ((10, 0, 10, 35), 'django.contrib.admin.site.register', 'admin.site.register', ({(10, 20, 10, 34): 'VegetablePrice'}, {}), '(VegetablePrice)', False, 'from django.contrib import admin\n'), ((11, 0, 11, 34), 'django.contrib.admin.site.register', 'admin.site.register', ({(11, 20, 11, 33): 'PurchasedItem'}, {}), '(PurchasedItem)', False, 'from django.contrib import admin\n'), ((12, 0, 12, 32), 'django.contrib.admin.site.register', 'admin.site.register', ({(12, 20, 12, 31): 'Transaction'}, {}), '(Transaction)', False, 'from django.contrib import admin\n'), ((13, 0, 13, 32), 'django.contrib.admin.site.register', 'admin.site.register', ({(13, 20, 13, 31): 'UserProfile'}, {}), '(UserProfile)', False, 'from django.contrib import admin\n'), ((14, 0, 14, 32), 'django.contrib.admin.site.register', 'admin.site.register', ({(14, 20, 14, 31): 'Merchandise'}, {}), '(Merchandise)', False, 'from django.contrib import admin\n'), ((15, 0, 15, 37), 'django.contrib.admin.site.register', 'admin.site.register', ({(15, 20, 15, 36): 'MerchandisePrice'}, {}), '(MerchandisePrice)', False, 'from django.contrib import admin\n'), ((16, 0, 16, 38), 'django.contrib.admin.site.register', 'admin.site.register', ({(16, 20, 16, 37): 'MerchandisePhotos'}, {}), '(MerchandisePhotos)', False, 'from django.contrib import admin\n')] |
AnantTiwari-Naman/pyglet | tests/unit/media/test_synthesis.py | 4774f2889057da95a78785a69372112931e6a620 | from ctypes import sizeof
from io import BytesIO
import unittest
from pyglet.media.synthesis import *
local_dir = os.path.dirname(__file__)
test_data_path = os.path.abspath(os.path.join(local_dir, '..', '..', 'data'))
del local_dir
def get_test_data_file(*file_parts):
"""Get a file from the test data directory in an OS independent way.
Supply relative file name as you would in os.path.join().
"""
return os.path.join(test_data_path, *file_parts)
class SynthesisSourceTest:
"""Simple test to check if synthesized sources provide data."""
source_class = None
def test_default(self):
source = self.source_class(1.)
self._test_total_duration(source)
if self.source_class is not WhiteNoise:
self._test_generated_bytes(source)
def test_sample_rate_11025(self):
source = self.source_class(1., sample_rate=11025)
self._test_total_duration(source)
if self.source_class is not WhiteNoise:
self._test_generated_bytes(source, sample_rate=11025)
def _test_total_duration(self, source):
total_bytes = source.audio_format.bytes_per_second
self._check_audio_data(source, total_bytes, 1.)
def _check_audio_data(self, source, expected_bytes, expected_duration):
data = source.get_audio_data(expected_bytes + 100)
self.assertIsNotNone(data)
self.assertAlmostEqual(expected_bytes, data.length, delta=20)
self.assertAlmostEqual(expected_duration, data.duration)
self.assertIsNotNone(data.data)
self.assertAlmostEqual(expected_bytes, len(data.data), delta=20)
# Should now be out of data
last_data = source.get_audio_data(100)
self.assertIsNone(last_data)
def test_seek_default(self):
source = self.source_class(1.)
self._test_seek(source)
def _test_seek(self, source):
seek_time = .5
bytes_left = source.audio_format.bytes_per_second * .5
source.seek(seek_time)
self._check_audio_data(source, bytes_left, .5)
def _test_generated_bytes(self, source, sample_rate=44800, sample_size=16):
source_name = self.source_class.__name__.lower()
filename = "synthesis_{0}_{1}_{2}_1ch.wav".format(source_name, sample_size, sample_rate)
with open(get_test_data_file('media', filename), 'rb') as f:
# discard the wave header:
loaded_bytes = f.read()[44:]
source.seek(0)
generated_data = source.get_audio_data(source._max_offset)
bytes_buffer = BytesIO(generated_data.data).getvalue()
# Compare a small chunk, to avoid hanging on mismatch:
assert bytes_buffer[:1000] == loaded_bytes[:1000],\
"Generated bytes do not match sample wave file."
class SilenceTest(SynthesisSourceTest, unittest.TestCase):
source_class = Silence
class WhiteNoiseTest(SynthesisSourceTest, unittest.TestCase):
source_class = WhiteNoise
class SineTest(SynthesisSourceTest, unittest.TestCase):
source_class = Sine
class TriangleTest(SynthesisSourceTest, unittest.TestCase):
source_class = Triangle
class SawtoothTest(SynthesisSourceTest, unittest.TestCase):
source_class = Sawtooth
class SquareTest(SynthesisSourceTest, unittest.TestCase):
source_class = Square
class FMTest(SynthesisSourceTest, unittest.TestCase):
source_class = SimpleFM
| [((73, 27, 73, 55), 'io.BytesIO', 'BytesIO', ({(73, 35, 73, 54): 'generated_data.data'}, {}), '(generated_data.data)', False, 'from io import BytesIO\n')] |
tavo1599/F.P2021 | Ejercicio/Ejercicio7.py | a592804fb5ae30da55551d9e29819887919db041 | #Datos de entrada
num=int(input("Ingrese un numero: "))
# Proceso
if num==10:
print("Calificacion: A")
elif num==9:
print("Calificacion: B")
elif num==8:
print("Calificacion: C")
elif num==7 and num==6:
print("Calificacion: D")
elif num<=5 and num>=0:
print("Calificacion: F")
| [] |
nairraghav/advent-of-code-2019 | 2015/day-2/part2.py | 274a2a4a59a8be39afb323356c592af5e1921e54 | ribbon_needed = 0
with open("input.txt", "r") as puzzle_input:
for line in puzzle_input:
length, width, height = [int(item) for item in line.split("x")]
dimensions = [length, width, height]
smallest_side = min(dimensions)
dimensions.remove(smallest_side)
second_smallest_side = min(dimensions)
ribbon_needed += 2*smallest_side + 2*second_smallest_side + length*width*height
print(ribbon_needed)
| [] |
aaronwJordan/Lean | Algorithm.Python/Alphas/GreenblattMagicFormulaAlgorithm.py | 3486a6de56a739e44af274f421ac302cbbc98f8d | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Indicators")
AddReference("QuantConnect.Algorithm.Framework")
from System import *
from QuantConnect import *
from QuantConnect.Orders.Fees import ConstantFeeModel
from QuantConnect.Data.UniverseSelection import *
from QuantConnect.Indicators import *
from Selection.FundamentalUniverseSelectionModel import FundamentalUniverseSelectionModel
from datetime import timedelta, datetime
from math import ceil
from itertools import chain
#
# This alpha picks stocks according to Joel Greenblatt's Magic Formula.
# First, each stock is ranked depending on the relative value of the ratio EV/EBITDA. For example, a stock
# that has the lowest EV/EBITDA ratio in the security universe receives a score of one while a stock that has
# the tenth lowest EV/EBITDA score would be assigned 10 points.
#
# Then, each stock is ranked and given a score for the second valuation ratio, Return on Capital (ROC).
# Similarly, a stock that has the highest ROC value in the universe gets one score point.
# The stocks that receive the lowest combined score are chosen for insights.
#
# Source: Greenblatt, J. (2010) The Little Book That Beats the Market
#
# This alpha is part of the Benchmark Alpha Series created by QuantConnect which are open
# sourced so the community and client funds can see an example of an alpha.
#
class GreenblattMagicFormulaAlgorithm(QCAlgorithmFramework):
''' Alpha Streams: Benchmark Alpha: Pick stocks according to Joel Greenblatt's Magic Formula'''
def Initialize(self):
self.SetStartDate(2018, 1, 1)
self.SetCash(100000)
#Set zero transaction fees
self.SetSecurityInitializer(lambda security: security.SetFeeModel(ConstantFeeModel(0)))
# select stocks using MagicFormulaUniverseSelectionModel
self.SetUniverseSelection(GreenBlattMagicFormulaUniverseSelectionModel())
# Use MagicFormulaAlphaModel to establish insights
self.SetAlpha(RateOfChangeAlphaModel())
# Equally weigh securities in portfolio, based on insights
self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel())
## Set Immediate Execution Model
self.SetExecution(ImmediateExecutionModel())
## Set Null Risk Management Model
self.SetRiskManagement(NullRiskManagementModel())
class RateOfChangeAlphaModel(AlphaModel):
'''Uses Rate of Change (ROC) to create magnitude prediction for insights.'''
def __init__(self, *args, **kwargs):
self.lookback = kwargs['lookback'] if 'lookback' in kwargs else 1
self.resolution = kwargs['resolution'] if 'resolution' in kwargs else Resolution.Daily
self.predictionInterval = Time.Multiply(Extensions.ToTimeSpan(self.resolution), self.lookback)
self.symbolDataBySymbol = {}
def Update(self, algorithm, data):
insights = []
for symbol, symbolData in self.symbolDataBySymbol.items():
if symbolData.CanEmit:
insights.append(Insight.Price(symbol, self.predictionInterval, InsightDirection.Up, symbolData.Return, None))
return insights
def OnSecuritiesChanged(self, algorithm, changes):
# clean up data for removed securities
for removed in changes.RemovedSecurities:
symbolData = self.symbolDataBySymbol.pop(removed.Symbol, None)
if symbolData is not None:
symbolData.RemoveConsolidators(algorithm)
# initialize data for added securities
symbols = [ x.Symbol for x in changes.AddedSecurities ]
history = algorithm.History(symbols, self.lookback, self.resolution)
if history.empty: return
tickers = history.index.levels[0]
for ticker in tickers:
symbol = SymbolCache.GetSymbol(ticker)
if symbol not in self.symbolDataBySymbol:
symbolData = SymbolData(symbol, self.lookback)
self.symbolDataBySymbol[symbol] = symbolData
symbolData.RegisterIndicators(algorithm, self.resolution)
symbolData.WarmUpIndicators(history.loc[ticker])
class SymbolData:
'''Contains data specific to a symbol required by this model'''
def __init__(self, symbol, lookback):
self.Symbol = symbol
self.ROC = RateOfChange('{}.ROC({})'.format(symbol, lookback), lookback)
self.Consolidator = None
self.previous = 0
def RegisterIndicators(self, algorithm, resolution):
self.Consolidator = algorithm.ResolveConsolidator(self.Symbol, resolution)
algorithm.RegisterIndicator(self.Symbol, self.ROC, self.Consolidator)
def RemoveConsolidators(self, algorithm):
if self.Consolidator is not None:
algorithm.SubscriptionManager.RemoveConsolidator(self.Symbol, self.Consolidator)
def WarmUpIndicators(self, history):
for tuple in history.itertuples():
self.ROC.Update(tuple.Index, tuple.close)
@property
def Return(self):
return float(self.ROC.Current.Value)
@property
def CanEmit(self):
if self.previous == self.ROC.Samples:
return False
self.previous = self.ROC.Samples
return self.ROC.IsReady
def __str__(self, **kwargs):
return '{}: {:.2%}'.format(self.ROC.Name, (1 + self.Return)**252 - 1)
class GreenBlattMagicFormulaUniverseSelectionModel(FundamentalUniverseSelectionModel):
'''Defines a universe according to Joel Greenblatt's Magic Formula, as a universe selection model for the framework algorithm.
From the universe QC500, stocks are ranked using the valuation ratios, Enterprise Value to EBITDA (EV/EBITDA) and Return on Assets (ROA).
'''
def __init__(self,
filterFineData = True,
universeSettings = None,
securityInitializer = None):
'''Initializes a new default instance of the MagicFormulaUniverseSelectionModel'''
super().__init__(filterFineData, universeSettings, securityInitializer)
# Number of stocks in Coarse Universe
self.NumberOfSymbolsCoarse = 500
# Number of sorted stocks in the fine selection subset using the valuation ratio, EV to EBITDA (EV/EBITDA)
self.NumberOfSymbolsFine = 20
# Final number of stocks in security list, after sorted by the valuation ratio, Return on Assets (ROA)
self.NumberOfSymbolsInPortfolio = 10
self.lastMonth = -1
self.dollarVolumeBySymbol = {}
self.symbols = []
def SelectCoarse(self, algorithm, coarse):
'''Performs coarse selection for constituents.
The stocks must have fundamental data
The stock must have positive previous-day close price
The stock must have positive volume on the previous trading day'''
month = algorithm.Time.month
if month == self.lastMonth:
return self.symbols
self.lastMonth = month
# The stocks must have fundamental data
# The stock must have positive previous-day close price
# The stock must have positive volume on the previous trading day
filtered = [x for x in coarse if x.HasFundamentalData
and x.Volume > 0
and x.Price > 0]
# sort the stocks by dollar volume and take the top 1000
top = sorted(filtered, key=lambda x: x.DollarVolume, reverse=True)[:self.NumberOfSymbolsCoarse]
self.dollarVolumeBySymbol = { i.Symbol: i.DollarVolume for i in top }
self.symbols = list(self.dollarVolumeBySymbol.keys())
return self.symbols
def SelectFine(self, algorithm, fine):
'''QC500: Performs fine selection for the coarse selection constituents
The company's headquarter must in the U.S.
The stock must be traded on either the NYSE or NASDAQ
At least half a year since its initial public offering
The stock's market cap must be greater than 500 million
Magic Formula: Rank stocks by Enterprise Value to EBITDA (EV/EBITDA)
Rank subset of previously ranked stocks (EV/EBITDA), using the valuation ratio Return on Assets (ROA)'''
# QC500:
## The company's headquarter must in the U.S.
## The stock must be traded on either the NYSE or NASDAQ
## At least half a year since its initial public offering
## The stock's market cap must be greater than 500 million
filteredFine = [x for x in fine if x.CompanyReference.CountryId == "USA"
and (x.CompanyReference.PrimaryExchangeID == "NYS" or x.CompanyReference.PrimaryExchangeID == "NAS")
and (algorithm.Time - x.SecurityReference.IPODate).days > 180
and x.EarningReports.BasicAverageShares.ThreeMonths * x.EarningReports.BasicEPS.TwelveMonths * x.ValuationRatios.PERatio > 5e8]
count = len(filteredFine)
if count == 0: return []
myDict = dict()
percent = float(self.NumberOfSymbolsFine / count)
# select stocks with top dollar volume in every single sector
for key in ["N", "M", "U", "T", "B", "I"]:
value = [x for x in filteredFine if x.CompanyReference.IndustryTemplateCode == key]
value = sorted(value, key=lambda x: self.dollarVolumeBySymbol[x.Symbol], reverse = True)
myDict[key] = value[:ceil(len(value) * percent)]
# stocks in QC500 universe
topFine = list(chain.from_iterable(myDict.values()))[:self.NumberOfSymbolsCoarse]
# Magic Formula:
## Rank stocks by Enterprise Value to EBITDA (EV/EBITDA)
## Rank subset of previously ranked stocks (EV/EBITDA), using the valuation ratio Return on Assets (ROA)
# sort stocks in the security universe of QC500 based on Enterprise Value to EBITDA valuation ratio
sortedByEVToEBITDA = sorted(topFine, key=lambda x: x.ValuationRatios.EVToEBITDA , reverse=True)
# sort subset of stocks that have been sorted by Enterprise Value to EBITDA, based on the valuation ratio Return on Assets (ROA)
sortedByROA = sorted(sortedByEVToEBITDA[:self.NumberOfSymbolsFine], key=lambda x: x.ValuationRatios.ForwardROA, reverse=False)
# retrieve list of securites in portfolio
top = sortedByROA[:self.NumberOfSymbolsInPortfolio]
self.symbols = [f.Symbol for f in top]
return self.symbols
| [((15, 0, 15, 22), 'clr.AddReference', 'AddReference', ({(15, 13, 15, 21): '"""System"""'}, {}), "('System')", False, 'from clr import AddReference\n'), ((16, 0, 16, 35), 'clr.AddReference', 'AddReference', ({(16, 13, 16, 34): '"""QuantConnect.Common"""'}, {}), "('QuantConnect.Common')", False, 'from clr import AddReference\n'), ((17, 0, 17, 38), 'clr.AddReference', 'AddReference', ({(17, 13, 17, 37): '"""QuantConnect.Algorithm"""'}, {}), "('QuantConnect.Algorithm')", False, 'from clr import AddReference\n'), ((18, 0, 18, 39), 'clr.AddReference', 'AddReference', ({(18, 13, 18, 38): '"""QuantConnect.Indicators"""'}, {}), "('QuantConnect.Indicators')", False, 'from clr import AddReference\n'), ((19, 0, 19, 48), 'clr.AddReference', 'AddReference', ({(19, 13, 19, 47): '"""QuantConnect.Algorithm.Framework"""'}, {}), "('QuantConnect.Algorithm.Framework')", False, 'from clr import AddReference\n'), ((57, 74, 57, 93), 'QuantConnect.Orders.Fees.ConstantFeeModel', 'ConstantFeeModel', ({(57, 91, 57, 92): '(0)'}, {}), '(0)', False, 'from QuantConnect.Orders.Fees import ConstantFeeModel\n')] |
axelbr/berkeley-cs182-deep-neural-networks | hw2/deeplearning/style_transfer.py | 2bde27d9d5361d48dce7539d00b136209c1cfaa1 | import numpy as np
import torch
import torch.nn.functional as F
def content_loss(content_weight, content_current, content_target):
"""
Compute the content loss for style transfer.
Inputs:
- content_weight: Scalar giving the weighting for the content loss.
- content_current: features of the current image; this is a PyTorch Tensor of shape
(1, C_l, H_l, W_l).
- content_target: features of the content image, Tensor with shape (1, C_l, H_l, W_l).
Returns:
- scalar content loss
"""
##############################################################################
# YOUR CODE HERE #
##############################################################################
_, C, H, W = content_current.shape
current_features = content_current.view(C, H*W)
target_features = content_target.view(C, H*W)
loss = content_weight * torch.sum(torch.square(current_features - target_features))
return loss
##############################################################################
# END OF YOUR CODE #
##############################################################################
def gram_matrix(features, normalize=True):
"""
Compute the Gram matrix from features.
Inputs:
- features: PyTorch Variable of shape (N, C, H, W) giving features for
a batch of N images.
- normalize: optional, whether to normalize the Gram matrix
If True, divide the Gram matrix by the number of neurons (H * W * C)
Returns:
- gram: PyTorch Variable of shape (N, C, C) giving the
(optionally normalized) Gram matrices for the N input images.
"""
##############################################################################
# YOUR CODE HERE #
##############################################################################
C, H, W = features.shape[-3], features.shape[-2], features.shape[-1]
reshaped = features.view(-1, C, H*W)
G = reshaped @ reshaped.transpose(dim0=1, dim1=2)
if normalize:
G = G / (H*W*C)
return G
##############################################################################
# END OF YOUR CODE #
##############################################################################
def style_loss(feats, style_layers, style_targets, style_weights):
"""
Computes the style loss at a set of layers.
Inputs:
- feats: list of the features at every layer of the current image, as produced by
the extract_features function.
- style_layers: List of layer indices into feats giving the layers to include in the
style loss.
- style_targets: List of the same length as style_layers, where style_targets[i] is
a PyTorch Variable giving the Gram matrix the source style image computed at
layer style_layers[i].
- style_weights: List of the same length as style_layers, where style_weights[i]
is a scalar giving the weight for the style loss at layer style_layers[i].
Returns:
- style_loss: A PyTorch Variable holding a scalar giving the style loss.
"""
# Hint: you can do this with one for loop over the style layers, and should
# not be very much code (~5 lines). You will need to use your gram_matrix function.
##############################################################################
# YOUR CODE HERE #
##############################################################################
loss = 0
for i, l in enumerate(style_layers):
A, G = style_targets[i], gram_matrix(feats[l])
loss += style_weights[i] * torch.sum(torch.square(G - A))
return loss
##############################################################################
# END OF YOUR CODE #
##############################################################################
def tv_loss(img, tv_weight):
"""
Compute total variation loss.
Inputs:
- img: PyTorch Variable of shape (1, 3, H, W) holding an input image.
- tv_weight: Scalar giving the weight w_t to use for the TV loss.
Returns:
- loss: PyTorch Variable holding a scalar giving the total variation loss
for img weighted by tv_weight.
"""
# Your implementation should be vectorized and not require any loops!
##############################################################################
# YOUR CODE HERE #
##############################################################################
tv = torch.square(img[..., 1:, :-1] - img[..., :-1, :-1]) + torch.square(img[..., :-1, 1:] - img[..., :-1, :-1])
return tv_weight * torch.sum(tv)
##############################################################################
# END OF YOUR CODE #
##############################################################################
| [((110, 9, 110, 61), 'torch.square', 'torch.square', ({(110, 22, 110, 60): '(img[(...), 1:, :-1] - img[(...), :-1, :-1])'}, {}), '(img[(...), 1:, :-1] - img[(...), :-1, :-1])', False, 'import torch\n'), ((110, 64, 110, 116), 'torch.square', 'torch.square', ({(110, 77, 110, 115): '(img[(...), :-1, 1:] - img[(...), :-1, :-1])'}, {}), '(img[(...), :-1, 1:] - img[(...), :-1, :-1])', False, 'import torch\n'), ((111, 23, 111, 36), 'torch.sum', 'torch.sum', ({(111, 33, 111, 35): 'tv'}, {}), '(tv)', False, 'import torch\n'), ((26, 38, 26, 86), 'torch.square', 'torch.square', ({(26, 51, 26, 85): '(current_features - target_features)'}, {}), '(current_features - target_features)', False, 'import torch\n'), ((87, 45, 87, 64), 'torch.square', 'torch.square', ({(87, 58, 87, 63): '(G - A)'}, {}), '(G - A)', False, 'import torch\n')] |
brittjay0104/rose6icse | submissions/available/Johnson-CausalTesting/Holmes/fuzzers/Peach/Transformers/Encode/HTMLDecode.py | 7b24743b7a805b9ed094b67e4a08bad7894f0e84 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import xml.sax.saxutils
from Peach.transformer import Transformer
class HtmlDecode(Transformer):
"""Decode HTML encoded string."""
def realEncode(self, data):
return xml.sax.saxutils.unescape(data)
def realEncode(self, data):
return xml.sax.saxutils.escape(data)
| [] |
AuraUAS/aura-core | src/archive/greatcircle.py | 4711521074db72ba9089213e14455d89dc5306c0 | # From: http://williams.best.vwh.net/avform.htm#GCF
import math
EPS = 0.0001
d2r = math.pi / 180.0
r2d = 180.0 / math.pi
rad2nm = (180.0 * 60.0) / math.pi
nm2rad = 1.0 / rad2nm
nm2meter = 1852
meter2nm = 1.0 / nm2meter
# p1 = (lat1(deg), lon1(deg))
# p2 = (lat2(deg), lon2(deg))
def course_and_dist(p1, p2):
# this formulations uses postive lon = W (opposite of usual, so we
# invert the longitude.)
lat1 = p1[0] * d2r
lon1 = -p1[1] * d2r
lat2 = p2[0] * d2r
lon2 = -p2[1] * d2r
dist_rad = 2.0 * math.asin(math.sqrt((math.sin((lat1-lat2)/2.0))**2.0 + math.cos(lat1)*math.cos(lat2)*(math.sin((lon1-lon2)/2.0))**2))
# if starting point is on a pole
if math.cos(lat1) < EPS:
# EPS a small number ~ machine precision
if (lat1 > 0.0):
# starting from N pole
tc1_rad = math.pi
else:
# starting from S pole
tc1_rad = 2.0 * math.pi
# For starting points other than the poles:
if dist_rad < 0.000000001:
# about a cm
tc1_rad = 0.0
else:
num1 = math.sin(lat2) - math.sin(lat1)*math.cos(dist_rad)
den1 = math.sin(dist_rad) * math.cos(lat1)
tmp1 = num1 / den1
if tmp1 < -1.0:
#print "CLIPPING TMP1 to -1.0!"
tmp1 = -1.0
if tmp1 > 1.0:
#print "CLIPPING TMP1 to 1.0!"
tmp1 = 1.0
if math.sin(lon2-lon1) < 0.0:
tc1_rad = math.acos(tmp1)
else:
tc1_rad = 2.0 * math.pi - math.acos(tmp1)
dist_nm = dist_rad * rad2nm
dist_m = dist_nm * nm2meter
tc1_deg = tc1_rad * r2d
return (tc1_deg, dist_m)
def project_course_distance(p1, course_deg, dist_m):
lat1 = p1[0] * d2r
lon1 = -p1[1] * d2r
tc = course_deg * d2r
d = dist_m * meter2nm * nm2rad
lat = math.asin(math.sin(lat1)*math.cos(d)+math.cos(lat1)*math.sin(d)*math.cos(tc))
if math.cos(lat) < EPS:
lon = lon1 # endpoint a pole
else:
lon = math.fmod(lon1-math.asin(math.sin(tc)*math.sin(d)/math.cos(lat))+math.pi, 2*math.pi) - math.pi
return (lat*r2d, -lon*r2d)
| [((26, 7, 26, 21), 'math.cos', 'math.cos', ({(26, 16, 26, 20): 'lat1'}, {}), '(lat1)', False, 'import math\n'), ((67, 7, 67, 20), 'math.cos', 'math.cos', ({(67, 16, 67, 19): 'lat'}, {}), '(lat)', False, 'import math\n'), ((40, 15, 40, 29), 'math.sin', 'math.sin', ({(40, 24, 40, 28): 'lat2'}, {}), '(lat2)', False, 'import math\n'), ((41, 15, 41, 33), 'math.sin', 'math.sin', ({(41, 24, 41, 32): 'dist_rad'}, {}), '(dist_rad)', False, 'import math\n'), ((41, 36, 41, 50), 'math.cos', 'math.cos', ({(41, 45, 41, 49): 'lat1'}, {}), '(lat1)', False, 'import math\n'), ((49, 11, 49, 30), 'math.sin', 'math.sin', ({(49, 20, 49, 29): '(lon2 - lon1)'}, {}), '(lon2 - lon1)', False, 'import math\n'), ((50, 22, 50, 37), 'math.acos', 'math.acos', ({(50, 32, 50, 36): 'tmp1'}, {}), '(tmp1)', False, 'import math\n'), ((40, 32, 40, 46), 'math.sin', 'math.sin', ({(40, 41, 40, 45): 'lat1'}, {}), '(lat1)', False, 'import math\n'), ((40, 47, 40, 65), 'math.cos', 'math.cos', ({(40, 56, 40, 64): 'dist_rad'}, {}), '(dist_rad)', False, 'import math\n'), ((52, 38, 52, 53), 'math.acos', 'math.acos', ({(52, 48, 52, 52): 'tmp1'}, {}), '(tmp1)', False, 'import math\n'), ((66, 20, 66, 34), 'math.sin', 'math.sin', ({(66, 29, 66, 33): 'lat1'}, {}), '(lat1)', False, 'import math\n'), ((66, 35, 66, 46), 'math.cos', 'math.cos', ({(66, 44, 66, 45): 'd'}, {}), '(d)', False, 'import math\n'), ((66, 74, 66, 86), 'math.cos', 'math.cos', ({(66, 83, 66, 85): 'tc'}, {}), '(tc)', False, 'import math\n'), ((66, 47, 66, 61), 'math.cos', 'math.cos', ({(66, 56, 66, 60): 'lat1'}, {}), '(lat1)', False, 'import math\n'), ((66, 62, 66, 73), 'math.sin', 'math.sin', ({(66, 71, 66, 72): 'd'}, {}), '(d)', False, 'import math\n'), ((23, 42, 23, 67), 'math.sin', 'math.sin', ({(23, 51, 23, 66): '((lat1 - lat2) / 2.0)'}, {}), '((lat1 - lat2) / 2.0)', False, 'import math\n'), ((23, 76, 23, 90), 'math.cos', 'math.cos', ({(23, 85, 23, 89): 'lat1'}, {}), '(lat1)', False, 'import math\n'), ((23, 91, 23, 105), 'math.cos', 'math.cos', ({(23, 100, 23, 104): 'lat2'}, {}), '(lat2)', False, 'import math\n'), ((23, 107, 23, 132), 'math.sin', 'math.sin', ({(23, 116, 23, 131): '((lon1 - lon2) / 2.0)'}, {}), '((lon1 - lon2) / 2.0)', False, 'import math\n'), ((70, 64, 70, 77), 'math.cos', 'math.cos', ({(70, 73, 70, 76): 'lat'}, {}), '(lat)', False, 'import math\n'), ((70, 39, 70, 51), 'math.sin', 'math.sin', ({(70, 48, 70, 50): 'tc'}, {}), '(tc)', False, 'import math\n'), ((70, 52, 70, 63), 'math.sin', 'math.sin', ({(70, 61, 70, 62): 'd'}, {}), '(d)', False, 'import math\n')] |
JoeCare/flask_geolocation_api | app/__init__.py | ad9ea0d22b738a7af8421cc57c972bd0e0fa80da | import connexion, os
from connexion.resolver import RestyResolver
from flask import json
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
# Globally accessible libraries
db = SQLAlchemy()
mm = Marshmallow()
def init_app():
"""Initialize the Connexion application."""
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
openapi_path = os.path.join(BASE_DIR, "../")
conn_app = connexion.FlaskApp(
__name__, specification_dir=openapi_path, options={
"swagger_ui": True,
"serve_spec": True
}
)
conn_app.add_api("openapi.yaml", resolver=RestyResolver('run'),
strict_validation=True)
# Flask app and getting into app_context
app = conn_app.app
# Load application config
app.config.from_object('config.ProdConfig')
app.json_encoder = json.JSONEncoder
# Initialize Plugins
db.init_app(app)
mm.init_app(app)
with app.app_context():
# Include our Routes/views
import run
# Register Blueprints
# app.register_blueprint(auth.auth_bp)
# app.register_blueprint(admin.admin_bp)
return app
| [((9, 5, 9, 17), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ({}, {}), '()', False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((10, 5, 10, 18), 'flask_marshmallow.Marshmallow', 'Marshmallow', ({}, {}), '()', False, 'from flask_marshmallow import Marshmallow\n'), ((16, 19, 16, 48), 'os.path.join', 'os.path.join', ({(16, 32, 16, 40): 'BASE_DIR', (16, 42, 16, 47): '"""../"""'}, {}), "(BASE_DIR, '../')", False, 'import connexion, os\n'), ((17, 15, 22, 9), 'connexion.FlaskApp', 'connexion.FlaskApp', (), '', False, 'import connexion, os\n'), ((15, 31, 15, 56), 'os.path.dirname', 'os.path.dirname', ({(15, 47, 15, 55): '__file__'}, {}), '(__file__)', False, 'import connexion, os\n'), ((23, 46, 23, 66), 'connexion.resolver.RestyResolver', 'RestyResolver', ({(23, 60, 23, 65): '"""run"""'}, {}), "('run')", False, 'from connexion.resolver import RestyResolver\n')] |
vkmanojk/Networks-VirtualLAN | RIPv2-Simulation/Router.py | 52c6546da611a7a7b9fdea65c567b284664a99b4 | '''
Summary: Program that implements a routing deamon based on the
RIP version 2 protocol from RFC2453.
Usage: python3 Router.py <router_config_file>
Configuration File:
The user supplies a router configuration file of the format:
[Settings]
router-id = <router_number>
input-ports = <input> [, <input>, ...]
outputs = <output>-<metric>-<destination_router>
[, <output>-<metric>-<destination_router>, ...]
where,
router_number: ID of router between 1 - 64000.
input: port number between 1024 - 64000.
output: port number between 1024 - 6400,
not equal to any inputs.
metric: metric of output between 1 - 16.
destination_router: ID of destination router.
Description:
This program implements a basic RIPv2 routing protocol from RFC2453
for routing computations in computer networks. It takes a configuration
file as shown above and sets up a router with a new socket for each
input-port.
The RIPv2 protocol uses a routing table to keep track of all reachable
routers on the network along with their metric/cost and the direct
next hop router ID along the route to that destination router. However,
it can only send messages to the direct neighbours specified in outputs.
The protocol uses the Bellman-Ford distance vector algorithm to compute
the lowest cost route to each router in the network. If the metric is
16 or greater, the router is considered unreachable.
The routing table initially starts with a single route entry (RTE) for
itself with a metric of zero. The routing table is periodically
transmitted too each of its direct output ports via an unsolicited
response message as defined in RFC2453 section 3.9.2 and 4. This is
performed on a separate thread so it does not interfere with other
operations
The receives messages from other routers by using the python select()
function which blocks until a message is ready to be read. Once a
message is received the header and contents are validated.
If the message is valid each RTE is processed according to RFC2453
section 3.9.2.
If a new router is found the RTE is added
to the routing table, adding the cost to the metric for the output
the message was received on.
If the RTE already exists, but the metric is smaller, the metric
is updated to the lower metric.
If the lower metric is from a different next hop router, change the
next hop.
If nothing has changed, restart the timeout timer.
If RTE metric >= max metric of 16, mark the entry for
garbage collection and update the metric in the table.
If any change has occurred in the routing table as a result of a
received message, a triggered update (RFC2453 section 3.10.1) is sent
to all outputs with the updated entries. Triggered updates are sent with
a random delay between 1 - 5 seconds to prevent synchronized updates.
Request messages are not implemented in this program.
Timers (all timers are on separate threads) (RFC2453 section 3.8):
Update timer - Periodic unsolicited response message sent to all
outputs. The period is adjusted each time to a random value
between 0.8 * BASE_TIMER and 1.2 * BASE_TIMER to prevent
synchronized updates.
Timeout - used to check the routing table for RTEs which have
have not been updated within the ROUTE_TIMEOUT interval. If
a router has not been heard from within this time, then set the
metric to the max metric of 16 and start the garbage collection
timer.
Garbage timer - used to check the routing table for RTEs set
for garbage collection. If the timeout >= DELETE_TIMEOUT,
mark the RTE for deletion.
Garbage Collection - used to check the routing table for RTEs
marked for deletion, and removes those entries from the table.
'''
import configparser
import select
import socket
import sys
import time
import threading
import struct
import datetime
from random import randint, randrange
DEBUG = False
HOST = '127.0.0.1' # localhost
BASE_TIMER = 5
MAX_METRIC = 16
ROUTE_TIMEOUT = BASE_TIMER * 6
DELETE_TIMEOUT = BASE_TIMER * 4
AF_INET = 2
# ===========================================================================
# TRANSITIONS
class Transistion():
'''Class Representing a transition between states.'''
def __init__(self, to_state):
self.to_state = to_state
def execute(self):
'''Run the transition functions'''
pass
# ===========================================================================
# STATES
class State():
'''Class Representing a generic state'''
def __init__(self, fsm):
self.fsm = fsm
def enter(self):
'''Execute functions for entering a state'''
pass
def execute(self):
'''Execute functions while in state'''
pass
def exit(self):
'''Execute functions for leaving a state'''
pass
class StartUp(State):
'''Class Representing the Start up state which reads the configuration file
'''
def __init__(self, fsm):
super(StartUp, self).__init__(fsm)
def execute(self):
'''Execute the configuration functions'''
print_message("Loading Configuration File: '"
+ self.fsm.router.config_file + "'")
config = configparser.ConfigParser()
config.read(self.fsm.router.config_file)
self.get_router_id(config)
self.setup_inputs(config)
self.get_outputs(config)
self.setup_routing_table()
self.fsm.router.print_routing_table()
self.fsm.to_transition("toWaiting")
def exit(self):
'''Print complete message'''
print_message("Router Setup Complete.")
def get_router_id(self, config):
'''Read the router id number from the configuration file'''
if 1 <= int(config['Settings']['router-id']) <= 64000:
self.fsm.router.router_settings['id'] = \
int(config['Settings']['router-id'])
else:
raise Exception('Invalid Router ID Number')
def get_outputs(self, config):
'''Return a dictionary of outputs containing port, cost and destination
router id from the Configuration file'''
outputs = config['Settings']['outputs'].split(', ')
outputs = [i.split('-') for i in outputs]
self.fsm.router.router_settings['outputs'] = {}
existing_ports = []
for output in outputs:
is_valid_port = 1024 <= int(output[0]) <= 64000 and not \
int(output[0]) in existing_ports
is_valid_cost = 1 <= int(output[1]) < 16
is_valid_id = 1 <= int(output[2]) <= 64000
if is_valid_port and is_valid_cost and is_valid_id:
existing_ports.append(int(output[0]))
self.fsm.router.router_settings['outputs'][int(output[2])] = \
{'metric': int(output[1]),
'port': int(output[0])}
else:
raise Exception('Invalid Outputs')
def setup_inputs(self, config):
'''Create input sockets from the inputs specified in the config file'''
# get inputs from configuration file
ports = config['Settings']['input-ports'].split(', ')
inputs = []
for port in ports:
if 1024 <= int(port) <= 64000 and not int(port) in inputs:
inputs.append(int(port))
else:
raise Exception('Invalid Port Number')
self.fsm.router.router_settings['inputs'] = {}
# create socket for each input port
for port in inputs:
try:
self.fsm.router.router_settings['inputs'][port] = \
socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print_message('Socket ' + str(port) + ' Created.')
except socket.error as msg:
print('Failed to create socket. Message: ' + str(msg))
sys.exit()
# bind port to socket
try:
self.fsm.router.router_settings['inputs'][port].bind(
(HOST, port))
print_message('Socket ' + str(port) + ' Bind Complete.')
except socket.error as msg:
print('Failed to create socket. Message ' + str(msg))
sys.exit()
def setup_routing_table(self):
'''Setup routing table with the outputs specified in the config file'''
self.fsm.router.routing_table[self.fsm.router.router_settings['id']] = \
RIPRouteEntry(address=self.fsm.router.router_settings['id'],
nexthop=0,
metric=0,
imported=True)
class Waiting(State):
'''
Class representing the waiting state of the FSM where the router waits
for messages to be received on its input sockets. When a message is
received the state changes to the ReadMeassage state.
'''
def __init__(self, fsm):
super(Waiting, self).__init__(fsm)
def enter(self):
'''Display State entry message'''
print_message("Entering idle state...")
def execute(self):
'''Waits for input sockets to be readable and then changes the state
to process the received message.'''
readable = select.select(
self.fsm.router.router_settings['inputs'].values(), [], [])
if readable[0]:
self.fsm.router.readable_ports = readable[0]
self.fsm.to_transition("toReadMessage")
def exit(self):
'''Display State exit message'''
print_message("Message Received")
class ReadMessage(State):
'''Class representing the state for reading messages received on the input
sockets'''
def __init__(self, fsm):
super(ReadMessage, self).__init__(fsm)
def enter(self):
print_message("Reading Messages...")
def execute(self):
for port in self.fsm.router.readable_ports:
packet = RIPPacket(port.recvfrom(1024)[0])
self.fsm.router.update_routing_table(packet)
if self.fsm.router.route_change:
self.fsm.router.trigger_update()
self.fsm.router.print_routing_table()
self.fsm.to_transition("toWaiting")
def exit(self):
print_message("Messages Read.")
# ===========================================================================
# FINITE STATE MACHINE
class RouterFSM():
'''Class representing the Router finite state machine'''
def __init__(self, rip_router):
self.router = rip_router
self.states = {}
self.transitions = {}
self.cur_state = None
self.trans = None
def add_transistion(self, trans_name, transition):
'''Add a new transition to the FSM'''
self.transitions[trans_name] = transition
def add_state(self, state_name, state):
'''Add a new state to the FSM'''
self.states[state_name] = state
def set_state(self, state_name):
'''Set the current state of the FSM'''
self.cur_state = self.states[state_name]
def to_transition(self, to_trans):
'''Set the current transition of the FSM'''
self.trans = self.transitions[to_trans]
def execute(self):
'''Run the FSM'''
if self.trans:
self.cur_state.exit()
self.trans.execute()
self.set_state(self.trans.to_state)
self.cur_state.enter()
self.trans = None
self.cur_state.execute()
# ===========================================================================
# IMPLEMENTATION
class RIPPacket:
'''Class representing a RIP packet containing a header and body as defined
in RFC2453 RIPv2 section 4.'''
def __init__(self, data=None, header=None, rtes=None):
if data:
self._init_from_network(data)
elif header and rtes:
self._init_from_host(header, rtes)
else:
raise ValueError
def __repr__(self):
return "RIPPacket: Command {}, Ver. {}, number of RTEs {}.". \
format(self.header.cmd, self.header.ver, len(self.rtes))
def _init_from_network(self, data):
'''Init for RIPPacket if data is from the network'''
# Packet Validation
datalen = len(data)
if datalen < RIPHeader.SIZE:
raise FormatException
malformed_rtes = (datalen - RIPHeader.SIZE) % RIPRouteEntry.SIZE
if malformed_rtes:
raise FormatException
# Convert bytes in packet to header and RTE data
num_rtes = int((datalen - RIPHeader.SIZE) / RIPRouteEntry.SIZE)
self.header = RIPHeader(data[0:RIPHeader.SIZE])
self.rtes = []
rte_start = RIPHeader.SIZE
rte_end = RIPHeader.SIZE + RIPRouteEntry.SIZE
# Loop over data packet to obtain each RTE
for i in range(num_rtes):
self.rtes.append(RIPRouteEntry(rawdata=data[rte_start:rte_end],
src_id=self.header.src))
rte_start += RIPRouteEntry.SIZE
rte_end += RIPRouteEntry.SIZE
def _init_from_host(self, header, rtes):
'''Init for imported data'''
if header.ver != 2:
raise ValueError("Only Version 2 is supported.")
self.header = header
self.rtes = rtes
def serialize(self):
'''Return the byte sting representing this packet for network
transmission'''
packed = self.header.serialize()
for rte in self.rtes:
packed += rte.serialize()
return packed
class RIPHeader:
'''Class representing the header of a RIP packet'''
FORMAT = "!BBH"
SIZE = struct.calcsize(FORMAT)
TYPE_RESPONSE = 2
VERSION = 2
def __init__(self, rawdata=None, router_id=None):
self.packed = None
if rawdata:
self._init_from_network(rawdata)
elif router_id:
self._init_from_host(router_id)
else:
raise ValueError
def __repr__(self):
return "RIP Header (cmd = {}, ver = {}, src = {})".format(self.cmd,
self.ver,
self.src)
def _init_from_network(self, rawdata):
'''init for data from network'''
header = struct.unpack(self.FORMAT, rawdata)
self.cmd = header[0]
self.ver = header[1]
self.src = header[2]
def _init_from_host(self, router_id):
'''Init for data from host'''
self.cmd = self.TYPE_RESPONSE
self.ver = self.VERSION
self.src = router_id
def serialize(self):
'''Return the byte sting representing this header for network
transmission'''
return struct.pack(self.FORMAT, self.cmd, self.ver, self.src)
class RIPRouteEntry:
'''Class representing a single RIP route entry (RTE)'''
FORMAT = "!HHIII"
SIZE = struct.calcsize(FORMAT)
MIN_METRIC = 0
MAX_METRIC = 16
def __init__(self, rawdata=None, src_id=None, address=None,
nexthop=None, metric=None, imported=False):
self.changed = False
self.imported = imported
self.init_timeout()
if rawdata and src_id != None:
self._init_from_network(rawdata, src_id)
elif address and nexthop != None and metric != None:
self._init_from_host(address, nexthop, metric)
else:
raise ValueError
def __repr__(self):
template = "|{:^11}|{:^10}|{:^11}|{:^15}|{:^10}|{:^13}|"
# Check that timeout is set
if self.timeout == None:
return template.format(self.addr, self.metric, self.nexthop,
self.changed, self.garbage,
str(self.timeout))
else:
timeout = (datetime.datetime.now() - self.timeout).total_seconds()
return template.format(self.addr, self.metric, self.nexthop,
self.changed, self.garbage,
round(timeout, 1))
def _init_from_host(self, address, nexthop, metric):
'''Init for data from host'''
self.afi = AF_INET
self.tag = 0 # not used
self.addr = address
self.nexthop = nexthop
self.metric = metric
def _init_from_network(self, rawdata, src_id):
'''Init for data received from network'''
rte = struct.unpack(self.FORMAT, rawdata)
self.afi = rte[0]
self.tag = rte[1]
self.addr = rte[2]
self.set_nexthop(rte[3])
self.metric = rte[4]
if self.nexthop == 0:
self.nexthop = src_id
# Validation
if not self.MIN_METRIC <= self.metric <= self.MAX_METRIC:
raise FormatException
def init_timeout(self):
'''Initialize the timeout property'''
if self.imported:
self.timeout = None
else:
self.timeout = datetime.datetime.now()
self.garbage = False
self.marked_for_delection = False
def __eq__(self, other):
if self.afi == other.afi and \
self.addr == other.addr and \
self.tag == other.tag and \
self.nexthop == other.nexthop and \
self.metric == other.metric:
return True
else:
return False
def set_nexthop(self, nexthop):
'''Set the nexthop property'''
self.nexthop = nexthop
def serialize(self):
'''Pack entries into typical RIPv2 packet format for sending over the
network. '''
return struct.pack(self.FORMAT, self.afi, self.tag, self.addr,
self.nexthop, self.metric)
class FormatException(Exception):
'''Class representing the Format Exception'''
def __init__(self, message=""):
self.message = message
class Router:
'''Class representing a single router'''
def __init__(self, config_file):
self.fsm = RouterFSM(self)
self.config_file = config_file
# Dictionary of router settings, including router-id, inputs and
# outputs
self.router_settings = {}
self.readable_ports = []
# Dictionary of routing table
self.routing_table = {}
self.route_change = False
# STATES
self.fsm.add_state("StartUp", StartUp(self.fsm))
self.fsm.add_state("Waiting", Waiting(self.fsm))
self.fsm.add_state("ReadMessage", ReadMessage(self.fsm))
# TRANSITIONS
self.fsm.add_transistion("toWaiting", Transistion("Waiting"))
self.fsm.add_transistion("toReadMessage", Transistion("ReadMessage"))
self.fsm.set_state("StartUp")
def execute(self):
'''Run the router's finite state machine'''
self.fsm.execute()
def update_routing_table(self, packet):
'''Update Routing table if new route info exist'''
for rte in packet.rtes:
# ignore RTEs of self
if rte.addr != self.fsm.router.router_settings['id']:
bestroute = self.routing_table.get(rte.addr)
# set nexthop to source router and calculate metric
rte.set_nexthop(packet.header.src)
rte.metric = min(rte.metric +
self.router_settings['outputs'][
packet.header.src]['metric'],
RIPRouteEntry.MAX_METRIC)
# Route dosn't yet exist
if not bestroute:
# ignore RTEs with a metric of MAX_METRIC
if rte.metric == RIPRouteEntry.MAX_METRIC:
return
# Add new RTE to routing table
rte.changed = True
self.route_change = True
self.routing_table[rte.addr] = rte
print_message("RTE added for Router: " + str(rte.addr))
return
else:
# Route already exists
if rte.nexthop == bestroute.nexthop:
if bestroute.metric != rte.metric:
if bestroute.metric != RIPRouteEntry.MAX_METRIC \
and rte.metric >= RIPRouteEntry.MAX_METRIC:
# mark for garbage collection
bestroute.metric = RIPRouteEntry.MAX_METRIC
bestroute.garbage = True
bestroute.changed = True
self.route_change = True
else:
self.update_route(bestroute, rte)
# Route still exists with same values
elif not bestroute.garbage:
bestroute.init_timeout()
# Lower metric on existing route
elif rte.metric < bestroute.metric:
self.update_route(bestroute, rte)
def update_route(self, bestroute, rte):
'''Update an existing route entry with new route info'''
bestroute.init_timeout()
bestroute.garbage = False
bestroute.changed = True
bestroute.metric = rte.metric
bestroute.nexthop = rte.nexthop
self.route_change = True
print_message("RTE for Router: " + str(rte.addr) +
" updated with metric=" + str(rte.metric) +
", nexthop=" + str(rte.nexthop) + ".")
def print_routing_table(self):
'''Print the routing table to the terminal'''
line = "+-----------+----------+-----------+---------------+----------+-------------+"
print(line)
print(
"| Routing Table (Router "
+ str(self.router_settings['id']) + ") |")
print(line)
print(
"|Router ID | Metric | NextHop | ChangedFlag | Garbage | Timeout(s) |")
print(line)
print(self.routing_table[self.router_settings['id']])
print(
"+===========+==========+===========+===============+==========+=============+")
for entry in self.routing_table:
if entry != self.router_settings['id']:
print(self.routing_table[entry])
print(line)
print('\n')
def trigger_update(self):
'''Send Routing update for only the routes which have changed'''
changed_rtes = []
print_message("Sending Trigger update.")
for rte in self.routing_table.values():
if rte.changed:
changed_rtes.append(rte)
rte.changed = False
self.route_change = False
# send update with random delay between 1 and 5 seconds
delay = randint(1, 5)
threading.Timer(delay, self.update, [changed_rtes])
def update(self, entries):
'''Send a message to all output ports'''
if self.router_settings != {}:
sock = list(self.router_settings['inputs'].values())[1]
local_header = RIPHeader(router_id=self.router_settings['id'])
for output in self.router_settings['outputs']:
# Split horizon
# Remove RTES for which nexthop == output
split_horizon_entries = []
for entry in entries:
if entry.nexthop != output:
split_horizon_entries.append(entry)
else:
# Poison reverse
# Create new entry to get around some funky referencing
# When doing poisoned_entry = entry
poisoned_entry = RIPRouteEntry(rawdata=None,
src_id=None, address=entry.addr,
nexthop=entry.nexthop, metric= RIPRouteEntry.MAX_METRIC,
imported=entry.imported)
split_horizon_entries.append(poisoned_entry)
# comment out to disable split horizon
packet = RIPPacket(
header=local_header, rtes=split_horizon_entries)
# Uncomment to disable split horizon
# packet = RIPPacket(header=local_header, rtes=entries)
sock.sendto(packet.serialize(),
(HOST,
self.router_settings['outputs'][output]["port"]))
print_message("Message Sent To Router: " + str(output))
def check_timeout(self):
'''Check the current timeout value for each RTE in the routing table.
If the time difference with now is greater than ROUTE_TIMEOUT, then
set the metric to 16 and start the garbage collection timer.'''
print_message("Checking timeout...")
if self.routing_table != {}:
for rte in self.routing_table.values():
if rte.timeout != None and \
(datetime.datetime.now() - rte.timeout).total_seconds() \
>= ROUTE_TIMEOUT:
rte.garbage = True
rte.changed = True
self.route_change = True
rte.metric = RIPRouteEntry.MAX_METRIC
rte.timeout = datetime.datetime.now()
self.print_routing_table()
print_message("Router: " + str(rte.addr) + " timed out.")
def garbage_timer(self):
'''Check the status of the garbage property of each RTE. If true, and
the timeout value difference with now is greater than DELETE_TIMEOUT,
mark it for deletion'''
print_message("Checking garbage timeout...")
if self.routing_table != {}:
for rte in self.routing_table.values():
if rte.garbage:
if (datetime.datetime.now() - rte.timeout).total_seconds() \
>= DELETE_TIMEOUT:
rte.marked_for_delection = True
def garbage_collection(self):
'''Check the routing table for RTE's that are marked for deletion and
remove them.'''
print_message("Collecting Garbage...")
if self.routing_table != {}:
delete_routes = []
for rte in self.routing_table.values():
if rte.marked_for_delection:
delete_routes.append(rte.addr)
print_message("Router: " + str(rte.addr) + " has been " +
"removed from the routing table.")
for entry in delete_routes:
del self.routing_table[entry]
self.print_routing_table()
def timer(self, function, param=None):
'''Start a periodic timer which calls a specified function'''
if param != None:
function(list(param.values()))
period = BASE_TIMER * randrange(8, 12, 1) / 10
else:
period = BASE_TIMER
function()
threading.Timer(period, self.timer, [function, param]).start()
def start_timers(self):
'''Start the timers on separate threads'''
self.timer(self.update, param=self.routing_table)
self.timer(self.check_timeout)
self.timer(self.garbage_timer)
self.timer(self.garbage_collection)
def main_loop(self):
'''Start the main loop for the program.'''
while True:
self.execute()
# RUN THE PROGRAM
def print_message(message):
'''Print the given message with the current time before it'''
if DEBUG:
print("[" + time.strftime("%H:%M:%S") + "]: " + message)
def main():
'''Main function to run the program.'''
if __name__ == "__main__":
router = Router(str(sys.argv[-1]))
router.start_timers()
router.main_loop()
main()
| [((445, 11, 445, 34), 'struct.calcsize', 'struct.calcsize', ({(445, 27, 445, 33): 'FORMAT'}, {}), '(FORMAT)', False, 'import struct\n'), ((490, 11, 490, 34), 'struct.calcsize', 'struct.calcsize', ({(490, 27, 490, 33): 'FORMAT'}, {}), '(FORMAT)', False, 'import struct\n'), ((170, 17, 170, 44), 'configparser.ConfigParser', 'configparser.ConfigParser', ({}, {}), '()', False, 'import configparser\n'), ((467, 17, 467, 52), 'struct.unpack', 'struct.unpack', ({(467, 31, 467, 42): 'self.FORMAT', (467, 44, 467, 51): 'rawdata'}, {}), '(self.FORMAT, rawdata)', False, 'import struct\n'), ((482, 15, 482, 69), 'struct.pack', 'struct.pack', ({(482, 27, 482, 38): 'self.FORMAT', (482, 40, 482, 48): 'self.cmd', (482, 50, 482, 58): 'self.ver', (482, 60, 482, 68): 'self.src'}, {}), '(self.FORMAT, self.cmd, self.ver, self.src)', False, 'import struct\n'), ((533, 14, 533, 49), 'struct.unpack', 'struct.unpack', ({(533, 28, 533, 39): 'self.FORMAT', (533, 41, 533, 48): 'rawdata'}, {}), '(self.FORMAT, rawdata)', False, 'import struct\n'), ((577, 15, 578, 53), 'struct.pack', 'struct.pack', ({(577, 27, 577, 38): 'self.FORMAT', (577, 40, 577, 48): 'self.afi', (577, 50, 577, 58): 'self.tag', (577, 60, 577, 69): 'self.addr', (578, 27, 578, 39): 'self.nexthop', (578, 41, 578, 52): 'self.metric'}, {}), '(self.FORMAT, self.afi, self.tag, self.addr, self.nexthop, self.\n metric)', False, 'import struct\n'), ((718, 16, 718, 29), 'random.randint', 'randint', ({(718, 24, 718, 25): '1', (718, 27, 718, 28): '5'}, {}), '(1, 5)', False, 'from random import randint, randrange\n'), ((719, 8, 719, 59), 'threading.Timer', 'threading.Timer', ({(719, 24, 719, 29): 'delay', (719, 31, 719, 42): 'self.update', (719, 44, 719, 58): '[changed_rtes]'}, {}), '(delay, self.update, [changed_rtes])', False, 'import threading\n'), ((554, 27, 554, 50), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((238, 20, 238, 68), 'socket.socket', 'socket.socket', ({(238, 34, 238, 48): 'socket.AF_INET', (238, 50, 238, 67): 'socket.SOCK_DGRAM'}, {}), '(socket.AF_INET, socket.SOCK_DGRAM)', False, 'import socket\n'), ((818, 8, 818, 62), 'threading.Timer', 'threading.Timer', ({(818, 24, 818, 30): 'period', (818, 32, 818, 42): 'self.timer', (818, 44, 818, 61): '[function, param]'}, {}), '(period, self.timer, [function, param])', False, 'import threading\n'), ((242, 16, 242, 26), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n'), ((252, 16, 252, 26), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n'), ((774, 34, 774, 57), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((813, 34, 813, 53), 'random.randrange', 'randrange', ({(813, 44, 813, 45): '(8)', (813, 47, 813, 49): '(12)', (813, 51, 813, 52): '(1)'}, {}), '(8, 12, 1)', False, 'from random import randint, randrange\n'), ((518, 23, 518, 46), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((839, 20, 839, 45), 'time.strftime', 'time.strftime', ({(839, 34, 839, 44): '"""%H:%M:%S"""'}, {}), "('%H:%M:%S')", False, 'import time\n'), ((768, 20, 768, 43), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((787, 24, 787, 47), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n')] |
alowet/iblapps | atlaselectrophysiology/extract_files.py | 9be936cd6806153dde0cbff1b6f2180191de3aeb | from ibllib.io import spikeglx
import numpy as np
import ibllib.dsp as dsp
from scipy import signal
from ibllib.misc import print_progress
from pathlib import Path
import alf.io as aio
import logging
import ibllib.ephys.ephysqc as ephysqc
from phylib.io import alf
_logger = logging.getLogger('ibllib')
RMS_WIN_LENGTH_SECS = 3
WELCH_WIN_LENGTH_SAMPLES = 1024
def rmsmap(fbin, spectra=True):
"""
Computes RMS map in time domain and spectra for each channel of Neuropixel probe
:param fbin: binary file in spike glx format (will look for attached metatdata)
:type fbin: str or pathlib.Path
:param spectra: whether to compute the power spectrum (only need for lfp data)
:type: bool
:return: a dictionary with amplitudes in channeltime space, channelfrequency space, time
and frequency scales
"""
if not isinstance(fbin, spikeglx.Reader):
sglx = spikeglx.Reader(fbin)
rms_win_length_samples = 2 ** np.ceil(np.log2(sglx.fs * RMS_WIN_LENGTH_SECS))
# the window generator will generates window indices
wingen = dsp.WindowGenerator(ns=sglx.ns, nswin=rms_win_length_samples, overlap=0)
# pre-allocate output dictionary of numpy arrays
win = {'TRMS': np.zeros((wingen.nwin, sglx.nc)),
'nsamples': np.zeros((wingen.nwin,)),
'fscale': dsp.fscale(WELCH_WIN_LENGTH_SAMPLES, 1 / sglx.fs, one_sided=True),
'tscale': wingen.tscale(fs=sglx.fs)}
win['spectral_density'] = np.zeros((len(win['fscale']), sglx.nc))
# loop through the whole session
for first, last in wingen.firstlast:
D = sglx.read_samples(first_sample=first, last_sample=last)[0].transpose()
# remove low frequency noise below 1 Hz
D = dsp.hp(D, 1 / sglx.fs, [0, 1])
iw = wingen.iw
win['TRMS'][iw, :] = dsp.rms(D)
win['nsamples'][iw] = D.shape[1]
if spectra:
# the last window may be smaller than what is needed for welch
if last - first < WELCH_WIN_LENGTH_SAMPLES:
continue
# compute a smoothed spectrum using welch method
_, w = signal.welch(D, fs=sglx.fs, window='hanning', nperseg=WELCH_WIN_LENGTH_SAMPLES,
detrend='constant', return_onesided=True, scaling='density',
axis=-1)
win['spectral_density'] += w.T
# print at least every 20 windows
if (iw % min(20, max(int(np.floor(wingen.nwin / 75)), 1))) == 0:
print_progress(iw, wingen.nwin)
return win
def extract_rmsmap(fbin, out_folder=None, spectra=True):
"""
Wrapper for rmsmap that outputs _ibl_ephysRmsMap and _ibl_ephysSpectra ALF files
:param fbin: binary file in spike glx format (will look for attached metatdata)
:param out_folder: folder in which to store output ALF files. Default uses the folder in which
the `fbin` file lives.
:param spectra: whether to compute the power spectrum (only need for lfp data)
:type: bool
:return: None
"""
_logger.info(f"Computing QC for {fbin}")
sglx = spikeglx.Reader(fbin)
# check if output ALF files exist already:
if out_folder is None:
out_folder = Path(fbin).parent
else:
out_folder = Path(out_folder)
alf_object_time = f'_iblqc_ephysTimeRms{sglx.type.upper()}'
alf_object_freq = f'_iblqc_ephysSpectralDensity{sglx.type.upper()}'
# crunch numbers
rms = rmsmap(fbin, spectra=spectra)
# output ALF files, single precision with the optional label as suffix before extension
if not out_folder.exists():
out_folder.mkdir()
tdict = {'rms': rms['TRMS'].astype(np.single), 'timestamps': rms['tscale'].astype(np.single)}
aio.save_object_npy(out_folder, object=alf_object_time, dico=tdict)
if spectra:
fdict = {'power': rms['spectral_density'].astype(np.single),
'freqs': rms['fscale'].astype(np.single)}
aio.save_object_npy(out_folder, object=alf_object_freq, dico=fdict)
def _sample2v(ap_file):
"""
Convert raw ephys data to Volts
"""
md = spikeglx.read_meta_data(ap_file.with_suffix('.meta'))
s2v = spikeglx._conversion_sample2v_from_meta(md)
return s2v['ap'][0]
def ks2_to_alf(ks_path, bin_path, out_path, bin_file=None, ampfactor=1, label=None, force=True):
"""
Convert Kilosort 2 output to ALF dataset for single probe data
:param ks_path:
:param bin_path: path of raw data
:param out_path:
:return:
"""
m = ephysqc.phy_model_from_ks2_path(ks2_path=ks_path, bin_path=bin_path, bin_file=bin_file)
ephysqc.spike_sorting_metrics_ks2(ks_path, m, save=True)
ac = alf.EphysAlfCreator(m)
ac.convert(out_path, label=label, force=force, ampfactor=ampfactor)
def extract_data(ks_path, ephys_path, out_path):
efiles = spikeglx.glob_ephys_files(ephys_path)
for efile in efiles:
if efile.get('ap') and efile.ap.exists():
ks2_to_alf(ks_path, ephys_path, out_path, bin_file=efile.ap,
ampfactor=_sample2v(efile.ap), label=None, force=True)
extract_rmsmap(efile.ap, out_folder=out_path, spectra=False)
if efile.get('lf') and efile.lf.exists():
extract_rmsmap(efile.lf, out_folder=out_path)
# if __name__ == '__main__':
#
# ephys_path = Path('C:/Users/Mayo/Downloads/raw_ephys_data')
# ks_path = Path('C:/Users/Mayo/Downloads/KS2')
# out_path = Path('C:/Users/Mayo/Downloads/alf')
# extract_data(ks_path, ephys_path, out_path)
| [((12, 10, 12, 37), 'logging.getLogger', 'logging.getLogger', ({(12, 28, 12, 36): '"""ibllib"""'}, {}), "('ibllib')", False, 'import logging\n'), ((34, 13, 34, 85), 'ibllib.dsp.WindowGenerator', 'dsp.WindowGenerator', (), '', True, 'import ibllib.dsp as dsp\n'), ((76, 11, 76, 32), 'ibllib.io.spikeglx.Reader', 'spikeglx.Reader', ({(76, 27, 76, 31): 'fbin'}, {}), '(fbin)', False, 'from ibllib.io import spikeglx\n'), ((91, 4, 91, 71), 'alf.io.save_object_npy', 'aio.save_object_npy', (), '', True, 'import alf.io as aio\n'), ((103, 10, 103, 53), 'ibllib.io.spikeglx._conversion_sample2v_from_meta', 'spikeglx._conversion_sample2v_from_meta', ({(103, 50, 103, 52): 'md'}, {}), '(md)', False, 'from ibllib.io import spikeglx\n'), ((115, 8, 115, 95), 'ibllib.ephys.ephysqc.phy_model_from_ks2_path', 'ephysqc.phy_model_from_ks2_path', (), '', True, 'import ibllib.ephys.ephysqc as ephysqc\n'), ((116, 4, 116, 60), 'ibllib.ephys.ephysqc.spike_sorting_metrics_ks2', 'ephysqc.spike_sorting_metrics_ks2', (), '', True, 'import ibllib.ephys.ephysqc as ephysqc\n'), ((117, 9, 117, 31), 'phylib.io.alf.EphysAlfCreator', 'alf.EphysAlfCreator', ({(117, 29, 117, 30): 'm'}, {}), '(m)', False, 'from phylib.io import alf\n'), ((122, 13, 122, 50), 'ibllib.io.spikeglx.glob_ephys_files', 'spikeglx.glob_ephys_files', ({(122, 39, 122, 49): 'ephys_path'}, {}), '(ephys_path)', False, 'from ibllib.io import spikeglx\n'), ((31, 15, 31, 36), 'ibllib.io.spikeglx.Reader', 'spikeglx.Reader', ({(31, 31, 31, 35): 'fbin'}, {}), '(fbin)', False, 'from ibllib.io import spikeglx\n'), ((36, 19, 36, 51), 'numpy.zeros', 'np.zeros', ({(36, 28, 36, 50): '(wingen.nwin, sglx.nc)'}, {}), '((wingen.nwin, sglx.nc))', True, 'import numpy as np\n'), ((37, 23, 37, 47), 'numpy.zeros', 'np.zeros', ({(37, 32, 37, 46): '(wingen.nwin,)'}, {}), '((wingen.nwin,))', True, 'import numpy as np\n'), ((38, 21, 38, 86), 'ibllib.dsp.fscale', 'dsp.fscale', (), '', True, 'import ibllib.dsp as dsp\n'), ((45, 12, 45, 42), 'ibllib.dsp.hp', 'dsp.hp', ({(45, 19, 45, 20): 'D', (45, 22, 45, 33): '1 / sglx.fs', (45, 35, 45, 41): '[0, 1]'}, {}), '(D, 1 / sglx.fs, [0, 1])', True, 'import ibllib.dsp as dsp\n'), ((47, 29, 47, 39), 'ibllib.dsp.rms', 'dsp.rms', ({(47, 37, 47, 38): 'D'}, {}), '(D)', True, 'import ibllib.dsp as dsp\n'), ((81, 21, 81, 37), 'pathlib.Path', 'Path', ({(81, 26, 81, 36): 'out_folder'}, {}), '(out_folder)', False, 'from pathlib import Path\n'), ((95, 8, 95, 75), 'alf.io.save_object_npy', 'aio.save_object_npy', (), '', True, 'import alf.io as aio\n'), ((32, 42, 32, 80), 'numpy.log2', 'np.log2', ({(32, 50, 32, 79): '(sglx.fs * RMS_WIN_LENGTH_SECS)'}, {}), '(sglx.fs * RMS_WIN_LENGTH_SECS)', True, 'import numpy as np\n'), ((54, 19, 56, 40), 'scipy.signal.welch', 'signal.welch', (), '', False, 'from scipy import signal\n'), ((60, 12, 60, 43), 'ibllib.misc.print_progress', 'print_progress', ({(60, 27, 60, 29): 'iw', (60, 31, 60, 42): 'wingen.nwin'}, {}), '(iw, wingen.nwin)', False, 'from ibllib.misc import print_progress\n'), ((79, 21, 79, 31), 'pathlib.Path', 'Path', ({(79, 26, 79, 30): 'fbin'}, {}), '(fbin)', False, 'from pathlib import Path\n'), ((59, 33, 59, 59), 'numpy.floor', 'np.floor', ({(59, 42, 59, 58): '(wingen.nwin / 75)'}, {}), '(wingen.nwin / 75)', True, 'import numpy as np\n')] |
shervinbdndev/Django-Shop | site_settings/models.py | baa4e7b91fbdd01ee591049c12cd9fbfaa434379 | from django.db import models
class SiteSettings(models.Model):
site_name = models.CharField(max_length=200 , verbose_name='Site Name')
site_url = models.CharField(max_length=200 , verbose_name='Site URL')
site_address = models.CharField(max_length=300 , verbose_name='Site Address')
site_phone = models.CharField(max_length=100 , null=True , blank=True , verbose_name='Site Phone')
site_fax = models.CharField(max_length=200 , null=True , blank=True , verbose_name='Site Fax')
site_email = models.EmailField(max_length=200 , null=True , blank=True , verbose_name='Site Email')
about_us_text = models.TextField(verbose_name='About Us Text')
site_copy_right = models.TextField(verbose_name='Copyright Text')
site_logo = models.ImageField(upload_to='images/site-setting/' , verbose_name='Site Logo')
is_main_setting = models.BooleanField(verbose_name='Site Main Settings')
def __str__(self) -> str:
super(SiteSettings , self).__str__()
return self.site_name
class Meta:
verbose_name = 'Site Setting'
verbose_name_plural = 'Site Settings'
class FooterLinkBox(models.Model):
title = models.CharField(max_length=200 , verbose_name='Title')
def __str__(self) -> str:
super(FooterLinkBox , self).__str__()
return self.title
class Meta:
verbose_name = 'Footer Link Setting'
verbose_name_plural = 'Footer Link Settings'
class FooterLink(models.Model):
title = models.CharField(max_length=200 , verbose_name='Title')
url = models.URLField(max_length=500 , verbose_name='Links')
footer_link_box = models.ForeignKey(to=FooterLinkBox , verbose_name='Category' , on_delete=models.CASCADE)
def __str__(self) -> str:
super(FooterLink , self).__str__()
return self.title
class Meta:
verbose_name = 'Footer Link'
verbose_name_plural = 'Footer Links'
class Slider(models.Model):
title = models.CharField(max_length=200 , verbose_name='Title')
description = models.TextField(verbose_name='Slider Description')
url_title = models.CharField(max_length=200 , verbose_name='URL Title')
url = models.URLField(max_length=200 , verbose_name='URL Address')
image = models.ImageField(upload_to='images/sliders' , verbose_name='Slider Image')
is_active = models.BooleanField(default=False , verbose_name='Active / Inactive')
def __str__(self) -> str:
super(Slider , self).__str__()
return self.title
class Meta:
verbose_name = 'Slider'
verbose_name_plural = 'Sliders' | [((7, 16, 7, 75), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((8, 15, 8, 73), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((9, 19, 9, 81), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((10, 17, 10, 102), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((11, 15, 11, 98), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((12, 17, 12, 103), 'django.db.models.EmailField', 'models.EmailField', (), '', False, 'from django.db import models\n'), ((13, 20, 13, 66), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import models\n'), ((14, 22, 14, 69), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import models\n'), ((15, 16, 15, 94), 'django.db.models.ImageField', 'models.ImageField', (), '', False, 'from django.db import models\n'), ((16, 22, 16, 76), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import models\n'), ((31, 12, 31, 67), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((46, 12, 46, 67), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((47, 10, 47, 64), 'django.db.models.URLField', 'models.URLField', (), '', False, 'from django.db import models\n'), ((48, 22, 48, 110), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import models\n'), ((63, 12, 63, 67), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((64, 18, 64, 69), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import models\n'), ((65, 16, 65, 75), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((66, 10, 66, 70), 'django.db.models.URLField', 'models.URLField', (), '', False, 'from django.db import models\n'), ((67, 12, 67, 87), 'django.db.models.ImageField', 'models.ImageField', (), '', False, 'from django.db import models\n'), ((68, 16, 68, 85), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import models\n')] |
3DAlgoLab/vispy | examples/basics/visuals/line_prototype.py | 91972307cf336674aad58198fb26b9e46f8f9ca1 | # -*- coding: utf-8 -*-
# vispy: gallery 10
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import sys
import numpy as np
from vispy import app, gloo, visuals
from vispy.visuals.filters import Clipper, ColorFilter
from vispy.visuals.shaders import MultiProgram
from vispy.visuals.collections import PointCollection
from vispy.visuals.transforms import STTransform
from vispy.scene import SceneCanvas
from vispy.scene.visuals import create_visual_node
class LineVisual(visuals.Visual):
"""Example of a very simple GL-line visual.
This shows the minimal set of methods that need to be reimplemented to
make a new visual class.
"""
def __init__(self, pos=None, color=(1, 1, 1, 1)):
vcode = """
attribute vec2 a_pos;
void main() {
gl_Position = $transform(vec4(a_pos, 0., 1.));
gl_PointSize = 10.;
}
"""
fcode = """
void main() {
gl_FragColor = $color;
}
"""
visuals.Visual.__init__(self, vcode=vcode, fcode=fcode)
self.pos_buf = gloo.VertexBuffer()
# The Visual superclass contains a MultiProgram, which is an object
# that behaves like a normal shader program (you can assign shader
# code, upload values, set template variables, etc.) but internally
# manages multiple ModularProgram instances, one per view.
# The MultiProgram is accessed via the `shared_program` property, so
# the following modifications to the program will be applied to all
# views:
self.shared_program['a_pos'] = self.pos_buf
self.shared_program.frag['color'] = color
self._need_upload = False
# Visual keeps track of draw mode, index buffer, and GL state. These
# are shared between all views.
self._draw_mode = 'line_strip'
self.set_gl_state('translucent', depth_test=False)
if pos is not None:
self.set_data(pos)
def set_data(self, pos):
self._pos = pos
self._need_upload = True
def _prepare_transforms(self, view=None):
view.view_program.vert['transform'] = view.transforms.get_transform()
def _prepare_draw(self, view=None):
"""This method is called immediately before each draw.
The *view* argument indicates which view is about to be drawn.
"""
if self._need_upload:
# Note that pos_buf is shared between all views, so we have no need
# to use the *view* argument in this example. This will be true
# for most visuals.
self.pos_buf.set_data(self._pos)
self._need_upload = False
class PointVisual(LineVisual):
"""Another simple visual class.
Due to the simplicity of these example classes, it was only necessary to
subclass from LineVisual and set the draw mode to 'points'. A more
fully-featured PointVisual class might not follow this approach.
"""
def __init__(self, pos=None, color=(1, 1, 1, 1)):
LineVisual.__init__(self, pos, color)
self._draw_mode = 'points'
class PlotLineVisual(visuals.CompoundVisual):
"""An example compound visual that draws lines and points.
To the user, the compound visual behaves exactly like a normal visual--it
has a transform system, draw() and bounds() methods, etc. Internally, the
compound visual automatically manages proxying these transforms and methods
to its sub-visuals.
"""
def __init__(self, pos=None, line_color=(1, 1, 1, 1),
point_color=(1, 1, 1, 1)):
self._line = LineVisual(pos, color=line_color)
self._point = PointVisual(pos, color=point_color)
visuals.CompoundVisual.__init__(self, [self._line, self._point])
class PointCollectionVisual(visuals.Visual):
"""Thin wrapper around a point collection.
Note: This is currently broken!
"""
def __init__(self):
prog = MultiProgram(vcode='', fcode='')
self.points = PointCollection("agg", color="shared", program=prog)
visuals.Visual.__init__(self, program=prog)
def _prepare_draw(self, view):
if self.points._need_update:
self.points._update()
self._draw_mode = self.points._mode
self._index_buffer = self.points._indices_buffer
def append(self, *args, **kwargs):
self.points.append(*args, **kwargs)
def _prepare_transforms(self, view=None):
pass
@property
def color(self):
return self.points['color']
@color.setter
def color(self, c):
self.points['color'] = c
class PanZoomTransform(STTransform):
def __init__(self, canvas=None, aspect=None, **kwargs):
self._aspect = aspect
self.attach(canvas)
STTransform.__init__(self, **kwargs)
def attach(self, canvas):
""" Attach this tranform to a canvas """
self._canvas = canvas
canvas.events.mouse_wheel.connect(self.on_mouse_wheel)
canvas.events.mouse_move.connect(self.on_mouse_move)
def on_mouse_move(self, event):
if event.is_dragging:
dxy = event.pos - event.last_event.pos
button = event.press_event.button
if button == 1:
self.move(dxy)
elif button == 2:
center = event.press_event.pos
if self._aspect is None:
self.zoom(np.exp(dxy * (0.01, -0.01)), center)
else:
s = dxy[1] * -0.01
self.zoom(np.exp(np.array([s, s])), center)
def on_mouse_wheel(self, event):
self.zoom(np.exp(event.delta * (0.01, -0.01)), event.pos)
canvas = app.Canvas(keys='interactive', size=(900, 600), show=True,
title="Visual Canvas")
pos = np.random.normal(size=(1000, 2), loc=0, scale=50).astype('float32')
pos[0] = [0, 0]
# Make a line visual
line = LineVisual(pos=pos)
line.transforms.canvas = canvas
line.transform = STTransform(scale=(2, 1), translate=(20, 20))
panzoom = PanZoomTransform(canvas)
line.transforms.scene_transform = panzoom
panzoom.changed.connect(lambda ev: canvas.update())
# Attach color filter to all views (current and future) of the visual
line.attach(ColorFilter((1, 1, 0.5, 0.7)))
# Attach a clipper just to this view. The Clipper filter requires a
# transform that maps from the framebuffer coordinate system to the
# clipping coordinates.
tr = line.transforms.get_transform('framebuffer', 'canvas')
line.attach(Clipper((20, 20, 260, 260), transform=tr), view=line)
# Make a view of the line that will draw its shadow
shadow = line.view()
shadow.transforms.canvas = canvas
shadow.transform = STTransform(scale=(2, 1), translate=(25, 25))
shadow.transforms.scene_transform = panzoom
shadow.attach(ColorFilter((0, 0, 0, 0.6)), view=shadow)
tr = shadow.transforms.get_transform('framebuffer', 'canvas')
shadow.attach(Clipper((20, 20, 260, 260), transform=tr), view=shadow)
# And make a second view of the line with different clipping bounds
view = line.view()
view.transforms.canvas = canvas
view.transform = STTransform(scale=(2, 0.5), translate=(450, 150))
tr = view.transforms.get_transform('framebuffer', 'canvas')
view.attach(Clipper((320, 20, 260, 260), transform=tr), view=view)
# Make a compound visual
plot = PlotLineVisual(pos, (0.5, 1, 0.5, 0.2), (0.5, 1, 1, 0.3))
plot.transforms.canvas = canvas
plot.transform = STTransform(translate=(80, 450), scale=(1.5, 1))
tr = plot.transforms.get_transform('framebuffer', 'canvas')
plot.attach(Clipper((20, 320, 260, 260), transform=tr), view=plot)
# And make a view on the compound
view2 = plot.view()
view2.transforms.canvas = canvas
view2.transform = STTransform(scale=(1.5, 1), translate=(450, 400))
tr = view2.transforms.get_transform('framebuffer', 'canvas')
view2.attach(Clipper((320, 320, 260, 260), transform=tr), view=view2)
# And a shadow for the view
shadow2 = plot.view()
shadow2.transforms.canvas = canvas
shadow2.transform = STTransform(scale=(1.5, 1), translate=(455, 405))
shadow2.attach(ColorFilter((0, 0, 0, 0.6)), view=shadow2)
tr = shadow2.transforms.get_transform('framebuffer', 'canvas')
shadow2.attach(Clipper((320, 320, 260, 260), transform=tr), view=shadow2)
# Example of a collection visual
collection = PointCollectionVisual()
collection.transforms.canvas = canvas
collection.transform = STTransform(translate=(750, 150))
collection.append(np.random.normal(loc=0, scale=20, size=(10000, 3)),
itemsize=5000)
collection.color = (1, 0.5, 0.5, 1), (0.5, 0.5, 1, 1)
shadow3 = collection.view()
shadow3.transforms.canvas = canvas
shadow3.transform = STTransform(scale=(1, 1), translate=(752, 152))
shadow3.attach(ColorFilter((0, 0, 0, 0.6)), view=shadow3)
# tr = shadow3.transforms.get_transform('framebuffer', 'canvas')
# shadow3.attach(Clipper((320, 320, 260, 260), transform=tr), view=shadow2)
order = [shadow, line, view, plot, shadow2, view2, shadow3, collection]
@canvas.connect
def on_draw(event):
canvas.context.clear((0.3, 0.3, 0.3, 1.0))
for v in order:
v.draw()
def on_resize(event):
# Set canvas viewport and reconfigure visual transforms to match.
vp = (0, 0, canvas.physical_size[0], canvas.physical_size[1])
canvas.context.set_viewport(*vp)
for v in order:
v.transforms.configure(canvas=canvas, viewport=vp)
canvas.events.resize.connect(on_resize)
on_resize(None)
Line = create_visual_node(LineVisual)
canvas2 = SceneCanvas(keys='interactive', title='Scene Canvas', show=True)
v = canvas2.central_widget.add_view(margin=10)
v.border_color = (1, 1, 1, 1)
v.bgcolor = (0.3, 0.3, 0.3, 1)
v.camera = 'panzoom'
line2 = Line(pos, parent=v.scene)
def mouse(ev):
print(ev)
v.events.mouse_press.connect(mouse)
if __name__ == '__main__':
if sys.flags.interactive != 1:
app.run()
| [((180, 9, 181, 42), 'vispy.app.Canvas', 'app.Canvas', (), '', False, 'from vispy import app, gloo, visuals\n'), ((188, 17, 188, 62), 'vispy.visuals.transforms.STTransform', 'STTransform', (), '', False, 'from vispy.visuals.transforms import STTransform\n'), ((205, 19, 205, 64), 'vispy.visuals.transforms.STTransform', 'STTransform', (), '', False, 'from vispy.visuals.transforms import STTransform\n'), ((214, 17, 214, 66), 'vispy.visuals.transforms.STTransform', 'STTransform', (), '', False, 'from vispy.visuals.transforms import STTransform\n'), ((221, 17, 221, 65), 'vispy.visuals.transforms.STTransform', 'STTransform', (), '', False, 'from vispy.visuals.transforms import STTransform\n'), ((228, 18, 228, 67), 'vispy.visuals.transforms.STTransform', 'STTransform', (), '', False, 'from vispy.visuals.transforms import STTransform\n'), ((235, 20, 235, 69), 'vispy.visuals.transforms.STTransform', 'STTransform', (), '', False, 'from vispy.visuals.transforms import STTransform\n'), ((243, 23, 243, 56), 'vispy.visuals.transforms.STTransform', 'STTransform', (), '', False, 'from vispy.visuals.transforms import STTransform\n'), ((250, 20, 250, 67), 'vispy.visuals.transforms.STTransform', 'STTransform', (), '', False, 'from vispy.visuals.transforms import STTransform\n'), ((274, 7, 274, 37), 'vispy.scene.visuals.create_visual_node', 'create_visual_node', ({(274, 26, 274, 36): 'LineVisual'}, {}), '(LineVisual)', False, 'from vispy.scene.visuals import create_visual_node\n'), ((275, 10, 275, 74), 'vispy.scene.SceneCanvas', 'SceneCanvas', (), '', False, 'from vispy.scene import SceneCanvas\n'), ((194, 12, 194, 41), 'vispy.visuals.filters.ColorFilter', 'ColorFilter', ({(194, 24, 194, 40): '(1, 1, 0.5, 0.7)'}, {}), '((1, 1, 0.5, 0.7))', False, 'from vispy.visuals.filters import Clipper, ColorFilter\n'), ((200, 12, 200, 53), 'vispy.visuals.filters.Clipper', 'Clipper', (), '', False, 'from vispy.visuals.filters import Clipper, ColorFilter\n'), ((207, 14, 207, 41), 'vispy.visuals.filters.ColorFilter', 'ColorFilter', ({(207, 26, 207, 40): '(0, 0, 0, 0.6)'}, {}), '((0, 0, 0, 0.6))', False, 'from vispy.visuals.filters import Clipper, ColorFilter\n'), ((209, 14, 209, 55), 'vispy.visuals.filters.Clipper', 'Clipper', (), '', False, 'from vispy.visuals.filters import Clipper, ColorFilter\n'), ((216, 12, 216, 54), 'vispy.visuals.filters.Clipper', 'Clipper', (), '', False, 'from vispy.visuals.filters import Clipper, ColorFilter\n'), ((223, 12, 223, 54), 'vispy.visuals.filters.Clipper', 'Clipper', (), '', False, 'from vispy.visuals.filters import Clipper, ColorFilter\n'), ((230, 13, 230, 56), 'vispy.visuals.filters.Clipper', 'Clipper', (), '', False, 'from vispy.visuals.filters import Clipper, ColorFilter\n'), ((236, 15, 236, 42), 'vispy.visuals.filters.ColorFilter', 'ColorFilter', ({(236, 27, 236, 41): '(0, 0, 0, 0.6)'}, {}), '((0, 0, 0, 0.6))', False, 'from vispy.visuals.filters import Clipper, ColorFilter\n'), ((238, 15, 238, 58), 'vispy.visuals.filters.Clipper', 'Clipper', (), '', False, 'from vispy.visuals.filters import Clipper, ColorFilter\n'), ((244, 18, 244, 68), 'numpy.random.normal', 'np.random.normal', (), '', True, 'import numpy as np\n'), ((251, 15, 251, 42), 'vispy.visuals.filters.ColorFilter', 'ColorFilter', ({(251, 27, 251, 41): '(0, 0, 0, 0.6)'}, {}), '((0, 0, 0, 0.6))', False, 'from vispy.visuals.filters import Clipper, ColorFilter\n'), ((43, 8, 43, 63), 'vispy.visuals.Visual.__init__', 'visuals.Visual.__init__', (), '', False, 'from vispy import app, gloo, visuals\n'), ((45, 23, 45, 42), 'vispy.gloo.VertexBuffer', 'gloo.VertexBuffer', ({}, {}), '()', False, 'from vispy import app, gloo, visuals\n'), ((114, 8, 114, 72), 'vispy.visuals.CompoundVisual.__init__', 'visuals.CompoundVisual.__init__', ({(114, 40, 114, 44): 'self', (114, 46, 114, 71): '[self._line, self._point]'}, {}), '(self, [self._line, self._point])', False, 'from vispy import app, gloo, visuals\n'), ((124, 15, 124, 47), 'vispy.visuals.shaders.MultiProgram', 'MultiProgram', (), '', False, 'from vispy.visuals.shaders import MultiProgram\n'), ((125, 22, 125, 74), 'vispy.visuals.collections.PointCollection', 'PointCollection', (), '', False, 'from vispy.visuals.collections import PointCollection\n'), ((126, 8, 126, 51), 'vispy.visuals.Visual.__init__', 'visuals.Visual.__init__', (), '', False, 'from vispy import app, gloo, visuals\n'), ((153, 8, 153, 44), 'vispy.visuals.transforms.STTransform.__init__', 'STTransform.__init__', ({(153, 29, 153, 33): 'self'}, {}), '(self, **kwargs)', False, 'from vispy.visuals.transforms import STTransform\n'), ((182, 6, 182, 55), 'numpy.random.normal', 'np.random.normal', (), '', True, 'import numpy as np\n'), ((290, 8, 290, 17), 'vispy.app.run', 'app.run', ({}, {}), '()', False, 'from vispy import app, gloo, visuals\n'), ((177, 18, 177, 53), 'numpy.exp', 'np.exp', ({(177, 25, 177, 52): '(event.delta * (0.01, -0.01))'}, {}), '(event.delta * (0.01, -0.01))', True, 'import numpy as np\n'), ((171, 30, 171, 57), 'numpy.exp', 'np.exp', ({(171, 37, 171, 56): '(dxy * (0.01, -0.01))'}, {}), '(dxy * (0.01, -0.01))', True, 'import numpy as np\n'), ((174, 37, 174, 53), 'numpy.array', 'np.array', ({(174, 46, 174, 52): '[s, s]'}, {}), '([s, s])', True, 'import numpy as np\n')] |
Mou-Ikkai/h1st | h1st/tests/core/test_schemas_inferrer.py | da47a8f1ad6af532c549e075fba19e3b3692de89 | from unittest import TestCase
from datetime import datetime
import pyarrow as pa
import numpy as np
import pandas as pd
from h1st.schema import SchemaInferrer
class SchemaInferrerTestCase(TestCase):
def test_infer_python(self):
inferrer = SchemaInferrer()
self.assertEqual(inferrer.infer_schema(1), pa.int64())
self.assertEqual(inferrer.infer_schema(1.1), pa.float64())
self.assertEqual(inferrer.infer_schema({
'test1': 1,
'test2': "hello",
'test3': b"hello",
'today': datetime.now(),
}), {
'type': dict,
'fields': {
'test1': pa.int64(),
'test2': pa.string(),
'test3': pa.binary(),
'today': pa.date64(),
}
})
self.assertEqual(inferrer.infer_schema((
1, 2, 3
)), pa.list_(pa.int64()))
self.assertEqual(inferrer.infer_schema((
1.2, 1.3, 1.4
)), pa.list_(pa.float64()))
table = pa.Table.from_arrays(
[pa.array([1, 2, 3]), pa.array(["a", "b", "c"])],
['c1', 'c2']
)
self.assertEqual(inferrer.infer_schema(table), table.schema)
def test_infer_numpy(self):
inferrer = SchemaInferrer()
self.assertEqual(inferrer.infer_schema(np.random.random((100, 28, 28))), {
'type': np.ndarray,
'item': pa.float64(),
'shape': (None, 28, 28)
})
self.assertEqual(inferrer.infer_schema(np.array(["1", "2", "3"])), {
'type': np.ndarray,
'item': pa.string()
})
def test_infer_dataframe(self):
inferrer = SchemaInferrer()
df = pd.DataFrame({
'f1': [1, 2, 3],
'f2': ['a', 'b', 'c'],
'f3': [0.1, 0.2, 0.9]
})
self.assertEqual(inferrer.infer_schema(df), {
'type': pd.DataFrame,
'fields': {
'f1': pa.int64(),
'f2': pa.string(),
'f3': pa.float64()
}
})
df = pd.DataFrame({
'Timestamp': [1.1, 2.2, 3.1],
'CarSpeed': [0.1, 0.2, 0.9],
'Gx': [0.1, 0.2, 0.9],
'Gy': [0.1, 0.2, 0.9],
'Label': ['1', '0', '1']
})
self.assertEqual(inferrer.infer_schema(df), {
'type': pd.DataFrame,
'fields': {
'Timestamp': pa.float64(),
'CarSpeed': pa.float64(),
'Gx': pa.float64(),
'Gy': pa.float64(),
'Label': pa.string(),
}
})
self.assertEqual(inferrer.infer_schema(pd.Series([1, 2, 3])), {
'type': pd.Series,
'item': pa.int64()
})
def test_infer_dict(self):
inferrer = SchemaInferrer()
self.assertEqual(inferrer.infer_schema({
'test': 123,
}), {
'type': dict,
'fields': {
'test': pa.int64(),
}
})
self.assertEqual(inferrer.infer_schema({
'test': 123,
'indices': [1, 2, 3]
}), {
'type': dict,
'fields': {
'test': pa.int64(),
'indices': pa.list_(pa.int64())
}
})
self.assertEqual(inferrer.infer_schema({
'results': pd.DataFrame({
'CarSpeed': [0, 1, 2],
'Label': ['a', 'b', 'c']
})
}), {
'type': dict,
'fields': {
'results': {
'type': pd.DataFrame,
'fields': {
'CarSpeed': pa.int64(),
'Label': pa.string(),
}
}
}
})
def test_infer_list(self):
inferrer = SchemaInferrer()
self.assertEqual(inferrer.infer_schema([
{'test': 123},
{'test': 345},
]), {
'type': list,
'item': {
'type': dict,
'fields': {
'test': pa.int64()
}
}
})
| [((11, 19, 11, 35), 'h1st.schema.SchemaInferrer', 'SchemaInferrer', ({}, {}), '()', False, 'from h1st.schema import SchemaInferrer\n'), ((46, 19, 46, 35), 'h1st.schema.SchemaInferrer', 'SchemaInferrer', ({}, {}), '()', False, 'from h1st.schema import SchemaInferrer\n'), ((59, 19, 59, 35), 'h1st.schema.SchemaInferrer', 'SchemaInferrer', ({}, {}), '()', False, 'from h1st.schema import SchemaInferrer\n'), ((60, 13, 64, 10), 'pandas.DataFrame', 'pd.DataFrame', ({(60, 26, 64, 9): "{'f1': [1, 2, 3], 'f2': ['a', 'b', 'c'], 'f3': [0.1, 0.2, 0.9]}"}, {}), "({'f1': [1, 2, 3], 'f2': ['a', 'b', 'c'], 'f3': [0.1, 0.2, 0.9]})", True, 'import pandas as pd\n'), ((75, 13, 81, 10), 'pandas.DataFrame', 'pd.DataFrame', ({(75, 26, 81, 9): "{'Timestamp': [1.1, 2.2, 3.1], 'CarSpeed': [0.1, 0.2, 0.9], 'Gx': [0.1, 0.2,\n 0.9], 'Gy': [0.1, 0.2, 0.9], 'Label': ['1', '0', '1']}"}, {}), "({'Timestamp': [1.1, 2.2, 3.1], 'CarSpeed': [0.1, 0.2, 0.9],\n 'Gx': [0.1, 0.2, 0.9], 'Gy': [0.1, 0.2, 0.9], 'Label': ['1', '0', '1']})", True, 'import pandas as pd\n'), ((100, 19, 100, 35), 'h1st.schema.SchemaInferrer', 'SchemaInferrer', ({}, {}), '()', False, 'from h1st.schema import SchemaInferrer\n'), ((140, 19, 140, 35), 'h1st.schema.SchemaInferrer', 'SchemaInferrer', ({}, {}), '()', False, 'from h1st.schema import SchemaInferrer\n'), ((13, 51, 13, 61), 'pyarrow.int64', 'pa.int64', ({}, {}), '()', True, 'import pyarrow as pa\n'), ((14, 53, 14, 65), 'pyarrow.float64', 'pa.float64', ({}, {}), '()', True, 'import pyarrow as pa\n'), ((33, 21, 33, 31), 'pyarrow.int64', 'pa.int64', ({}, {}), '()', True, 'import pyarrow as pa\n'), ((37, 21, 37, 33), 'pyarrow.float64', 'pa.float64', ({}, {}), '()', True, 'import pyarrow as pa\n'), ((40, 13, 40, 32), 'pyarrow.array', 'pa.array', ({(40, 22, 40, 31): '[1, 2, 3]'}, {}), '([1, 2, 3])', True, 'import pyarrow as pa\n'), ((40, 34, 40, 59), 'pyarrow.array', 'pa.array', ({(40, 43, 40, 58): "['a', 'b', 'c']"}, {}), "(['a', 'b', 'c'])", True, 'import pyarrow as pa\n'), ((47, 47, 47, 78), 'numpy.random.random', 'np.random.random', ({(47, 64, 47, 77): '(100, 28, 28)'}, {}), '((100, 28, 28))', True, 'import numpy as np\n'), ((49, 20, 49, 32), 'pyarrow.float64', 'pa.float64', ({}, {}), '()', True, 'import pyarrow as pa\n'), ((53, 47, 53, 72), 'numpy.array', 'np.array', ({(53, 56, 53, 71): "['1', '2', '3']"}, {}), "(['1', '2', '3'])", True, 'import numpy as np\n'), ((55, 20, 55, 31), 'pyarrow.string', 'pa.string', ({}, {}), '()', True, 'import pyarrow as pa\n'), ((94, 47, 94, 67), 'pandas.Series', 'pd.Series', ({(94, 57, 94, 66): '[1, 2, 3]'}, {}), '([1, 2, 3])', True, 'import pandas as pd\n'), ((96, 20, 96, 30), 'pyarrow.int64', 'pa.int64', ({}, {}), '()', True, 'import pyarrow as pa\n'), ((20, 21, 20, 35), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((24, 25, 24, 35), 'pyarrow.int64', 'pa.int64', ({}, {}), '()', True, 'import pyarrow as pa\n'), ((25, 25, 25, 36), 'pyarrow.string', 'pa.string', ({}, {}), '()', True, 'import pyarrow as pa\n'), ((26, 25, 26, 36), 'pyarrow.binary', 'pa.binary', ({}, {}), '()', True, 'import pyarrow as pa\n'), ((27, 25, 27, 36), 'pyarrow.date64', 'pa.date64', ({}, {}), '()', True, 'import pyarrow as pa\n'), ((69, 22, 69, 32), 'pyarrow.int64', 'pa.int64', ({}, {}), '()', True, 'import pyarrow as pa\n'), ((70, 22, 70, 33), 'pyarrow.string', 'pa.string', ({}, {}), '()', True, 'import pyarrow as pa\n'), ((71, 22, 71, 34), 'pyarrow.float64', 'pa.float64', ({}, {}), '()', True, 'import pyarrow as pa\n'), ((86, 29, 86, 41), 'pyarrow.float64', 'pa.float64', ({}, {}), '()', True, 'import pyarrow as pa\n'), ((87, 28, 87, 40), 'pyarrow.float64', 'pa.float64', ({}, {}), '()', True, 'import pyarrow as pa\n'), ((88, 22, 88, 34), 'pyarrow.float64', 'pa.float64', ({}, {}), '()', True, 'import pyarrow as pa\n'), ((89, 22, 89, 34), 'pyarrow.float64', 'pa.float64', ({}, {}), '()', True, 'import pyarrow as pa\n'), ((90, 25, 90, 36), 'pyarrow.string', 'pa.string', ({}, {}), '()', True, 'import pyarrow as pa\n'), ((106, 24, 106, 34), 'pyarrow.int64', 'pa.int64', ({}, {}), '()', True, 'import pyarrow as pa\n'), ((116, 24, 116, 34), 'pyarrow.int64', 'pa.int64', ({}, {}), '()', True, 'import pyarrow as pa\n'), ((122, 23, 125, 14), 'pandas.DataFrame', 'pd.DataFrame', ({(122, 36, 125, 13): "{'CarSpeed': [0, 1, 2], 'Label': ['a', 'b', 'c']}"}, {}), "({'CarSpeed': [0, 1, 2], 'Label': ['a', 'b', 'c']})", True, 'import pandas as pd\n'), ((117, 36, 117, 46), 'pyarrow.int64', 'pa.int64', ({}, {}), '()', True, 'import pyarrow as pa\n'), ((149, 28, 149, 38), 'pyarrow.int64', 'pa.int64', ({}, {}), '()', True, 'import pyarrow as pa\n'), ((132, 36, 132, 46), 'pyarrow.int64', 'pa.int64', ({}, {}), '()', True, 'import pyarrow as pa\n'), ((133, 33, 133, 44), 'pyarrow.string', 'pa.string', ({}, {}), '()', True, 'import pyarrow as pa\n')] |
nicolasessisbreton/pyzehe | c_core_librairies/exercise_a.py | 7497a0095d974ac912ce9826a27e21fd9d513942 | """
# refactoring
Refactoring is the key to successfull projects.
Refactor:
1) annuity_factor such that:
conversion to integer is handled,
no extra printing
2) policy_book into a class such that:
a function generates the book and the premium
stats and visualizations functions are avalaible
3) book_report such that:
it uses all the previous improvements
""" | [] |
harshitAgr/vess2ret | util/util.py | 5702175bcd9ecde34d4fedab45a7cd2878a0184c | """Auxiliary methods."""
import os
import json
from errno import EEXIST
import numpy as np
import seaborn as sns
import cPickle as pickle
import matplotlib.pyplot as plt
sns.set()
DEFAULT_LOG_DIR = 'log'
ATOB_WEIGHTS_FILE = 'atob_weights.h5'
D_WEIGHTS_FILE = 'd_weights.h5'
class MyDict(dict):
"""
Dictionary that allows to access elements with dot notation.
ex:
>> d = MyDict({'key': 'val'})
>> d.key
'val'
>> d.key2 = 'val2'
>> d
{'key2': 'val2', 'key': 'val'}
"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
def convert_to_rgb(img, is_binary=False):
"""Given an image, make sure it has 3 channels and that it is between 0 and 1."""
if len(img.shape) != 3:
raise Exception("""Image must have 3 dimensions (channels x height x width). """
"""Given {0}""".format(len(img.shape)))
img_ch, _, _ = img.shape
if img_ch != 3 and img_ch != 1:
raise Exception("""Unsupported number of channels. """
"""Must be 1 or 3, given {0}.""".format(img_ch))
imgp = img
if img_ch == 1:
imgp = np.repeat(img, 3, axis=0)
if not is_binary:
imgp = imgp * 127.5 + 127.5
imgp /= 255.
return np.clip(imgp.transpose((1, 2, 0)), 0, 1)
def compose_imgs(a, b, is_a_binary=True, is_b_binary=False):
"""Place a and b side by side to be plotted."""
ap = convert_to_rgb(a, is_binary=is_a_binary)
bp = convert_to_rgb(b, is_binary=is_b_binary)
if ap.shape != bp.shape:
raise Exception("""A and B must have the same size. """
"""{0} != {1}""".format(ap.shape, bp.shape))
# ap.shape and bp.shape must have the same size here
h, w, ch = ap.shape
composed = np.zeros((h, 2*w, ch))
composed[:, :w, :] = ap
composed[:, w:, :] = bp
return composed
def get_log_dir(log_dir, expt_name):
"""Compose the log_dir with the experiment name."""
if log_dir is None:
raise Exception('log_dir can not be None.')
if expt_name is not None:
return os.path.join(log_dir, expt_name)
return log_dir
def mkdir(mypath):
"""Create a directory if it does not exist."""
try:
os.makedirs(mypath)
except OSError as exc:
if exc.errno == EEXIST and os.path.isdir(mypath):
pass
else:
raise
def create_expt_dir(params):
"""Create the experiment directory and return it."""
expt_dir = get_log_dir(params.log_dir, params.expt_name)
# Create directories if they do not exist
mkdir(params.log_dir)
mkdir(expt_dir)
# Save the parameters
json.dump(params, open(os.path.join(expt_dir, 'params.json'), 'wb'),
indent=4, sort_keys=True)
return expt_dir
def plot_loss(loss, label, filename, log_dir):
"""Plot a loss function and save it in a file."""
plt.figure(figsize=(5, 4))
plt.plot(loss, label=label)
plt.legend()
plt.savefig(os.path.join(log_dir, filename))
plt.clf()
def log(losses, atob, it_val, N=4, log_dir=DEFAULT_LOG_DIR, expt_name=None,
is_a_binary=True, is_b_binary=False):
"""Log losses and atob results."""
log_dir = get_log_dir(log_dir, expt_name)
# Save the losses for further inspection
pickle.dump(losses, open(os.path.join(log_dir, 'losses.pkl'), 'wb'))
###########################################################################
# PLOT THE LOSSES #
###########################################################################
plot_loss(losses['d'], 'discriminator', 'd_loss.png', log_dir)
plot_loss(losses['d_val'], 'discriminator validation', 'd_val_loss.png', log_dir)
plot_loss(losses['p2p'], 'Pix2Pix', 'p2p_loss.png', log_dir)
plot_loss(losses['p2p_val'], 'Pix2Pix validation', 'p2p_val_loss.png', log_dir)
###########################################################################
# PLOT THE A->B RESULTS #
###########################################################################
plt.figure(figsize=(10, 6))
for i in range(N*N):
a, _ = next(it_val)
bp = atob.predict(a)
img = compose_imgs(a[0], bp[0], is_a_binary=is_a_binary, is_b_binary=is_b_binary)
plt.subplot(N, N, i+1)
plt.imshow(img)
plt.axis('off')
plt.savefig(os.path.join(log_dir, 'atob.png'))
plt.clf()
# Make sure all the figures are closed.
plt.close('all')
def save_weights(models, log_dir=DEFAULT_LOG_DIR, expt_name=None):
"""Save the weights of the models into a file."""
log_dir = get_log_dir(log_dir, expt_name)
models.atob.save_weights(os.path.join(log_dir, ATOB_WEIGHTS_FILE), overwrite=True)
models.d.save_weights(os.path.join(log_dir, D_WEIGHTS_FILE), overwrite=True)
def load_weights(atob, d, log_dir=DEFAULT_LOG_DIR, expt_name=None):
"""Load the weights into the corresponding models."""
log_dir = get_log_dir(log_dir, expt_name)
atob.load_weights(os.path.join(log_dir, ATOB_WEIGHTS_FILE))
d.load_weights(os.path.join(log_dir, D_WEIGHTS_FILE))
def load_weights_of(m, weights_file, log_dir=DEFAULT_LOG_DIR, expt_name=None):
"""Load the weights of the model m."""
log_dir = get_log_dir(log_dir, expt_name)
m.load_weights(os.path.join(log_dir, weights_file))
def load_losses(log_dir=DEFAULT_LOG_DIR, expt_name=None):
"""Load the losses of the given experiment."""
log_dir = get_log_dir(log_dir, expt_name)
losses = pickle.load(open(os.path.join(log_dir, 'losses.pkl'), 'rb'))
return losses
def load_params(params):
"""
Load the parameters of an experiment and return them.
The params passed as argument will be merged with the new params dict.
If there is a conflict with a key, the params passed as argument prevails.
"""
expt_dir = get_log_dir(params.log_dir, params.expt_name)
expt_params = json.load(open(os.path.join(expt_dir, 'params.json'), 'rb'))
# Update the loaded parameters with the current parameters. This will
# override conflicting keys as expected.
expt_params.update(params)
return expt_params
| [((11, 0, 11, 9), 'seaborn.set', 'sns.set', ({}, {}), '()', True, 'import seaborn as sns\n'), ((68, 15, 68, 37), 'numpy.zeros', 'np.zeros', ({(68, 24, 68, 36): '(h, 2 * w, ch)'}, {}), '((h, 2 * w, ch))', True, 'import numpy as np\n'), ((113, 4, 113, 30), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((114, 4, 114, 31), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((115, 4, 115, 16), 'matplotlib.pyplot.legend', 'plt.legend', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((117, 4, 117, 13), 'matplotlib.pyplot.clf', 'plt.clf', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((140, 4, 140, 31), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((152, 4, 152, 13), 'matplotlib.pyplot.clf', 'plt.clf', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((155, 4, 155, 20), 'matplotlib.pyplot.close', 'plt.close', ({(155, 14, 155, 19): '"""all"""'}, {}), "('all')", True, 'import matplotlib.pyplot as plt\n'), ((48, 15, 48, 40), 'numpy.repeat', 'np.repeat', (), '', True, 'import numpy as np\n'), ((81, 15, 81, 47), 'os.path.join', 'os.path.join', ({(81, 28, 81, 35): 'log_dir', (81, 37, 81, 46): 'expt_name'}, {}), '(log_dir, expt_name)', False, 'import os\n'), ((88, 8, 88, 27), 'os.makedirs', 'os.makedirs', ({(88, 20, 88, 26): 'mypath'}, {}), '(mypath)', False, 'import os\n'), ((116, 16, 116, 47), 'os.path.join', 'os.path.join', ({(116, 29, 116, 36): 'log_dir', (116, 38, 116, 46): 'filename'}, {}), '(log_dir, filename)', False, 'import os\n'), ((147, 8, 147, 30), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(147, 20, 147, 21): 'N', (147, 23, 147, 24): 'N', (147, 26, 147, 29): '(i + 1)'}, {}), '(N, N, i + 1)', True, 'import matplotlib.pyplot as plt\n'), ((148, 8, 148, 23), 'matplotlib.pyplot.imshow', 'plt.imshow', ({(148, 19, 148, 22): 'img'}, {}), '(img)', True, 'import matplotlib.pyplot as plt\n'), ((149, 8, 149, 23), 'matplotlib.pyplot.axis', 'plt.axis', ({(149, 17, 149, 22): '"""off"""'}, {}), "('off')", True, 'import matplotlib.pyplot as plt\n'), ((151, 16, 151, 49), 'os.path.join', 'os.path.join', ({(151, 29, 151, 36): 'log_dir', (151, 38, 151, 48): '"""atob.png"""'}, {}), "(log_dir, 'atob.png')", False, 'import os\n'), ((162, 29, 162, 69), 'os.path.join', 'os.path.join', ({(162, 42, 162, 49): 'log_dir', (162, 51, 162, 68): 'ATOB_WEIGHTS_FILE'}, {}), '(log_dir, ATOB_WEIGHTS_FILE)', False, 'import os\n'), ((163, 26, 163, 63), 'os.path.join', 'os.path.join', ({(163, 39, 163, 46): 'log_dir', (163, 48, 163, 62): 'D_WEIGHTS_FILE'}, {}), '(log_dir, D_WEIGHTS_FILE)', False, 'import os\n'), ((170, 22, 170, 62), 'os.path.join', 'os.path.join', ({(170, 35, 170, 42): 'log_dir', (170, 44, 170, 61): 'ATOB_WEIGHTS_FILE'}, {}), '(log_dir, ATOB_WEIGHTS_FILE)', False, 'import os\n'), ((171, 19, 171, 56), 'os.path.join', 'os.path.join', ({(171, 32, 171, 39): 'log_dir', (171, 41, 171, 55): 'D_WEIGHTS_FILE'}, {}), '(log_dir, D_WEIGHTS_FILE)', False, 'import os\n'), ((178, 19, 178, 54), 'os.path.join', 'os.path.join', ({(178, 32, 178, 39): 'log_dir', (178, 41, 178, 53): 'weights_file'}, {}), '(log_dir, weights_file)', False, 'import os\n'), ((105, 27, 105, 64), 'os.path.join', 'os.path.join', ({(105, 40, 105, 48): 'expt_dir', (105, 50, 105, 63): '"""params.json"""'}, {}), "(expt_dir, 'params.json')", False, 'import os\n'), ((126, 29, 126, 64), 'os.path.join', 'os.path.join', ({(126, 42, 126, 49): 'log_dir', (126, 51, 126, 63): '"""losses.pkl"""'}, {}), "(log_dir, 'losses.pkl')", False, 'import os\n'), ((184, 30, 184, 65), 'os.path.join', 'os.path.join', ({(184, 43, 184, 50): 'log_dir', (184, 52, 184, 64): '"""losses.pkl"""'}, {}), "(log_dir, 'losses.pkl')", False, 'import os\n'), ((197, 33, 197, 70), 'os.path.join', 'os.path.join', ({(197, 46, 197, 54): 'expt_dir', (197, 56, 197, 69): '"""params.json"""'}, {}), "(expt_dir, 'params.json')", False, 'import os\n'), ((90, 35, 90, 56), 'os.path.isdir', 'os.path.isdir', ({(90, 49, 90, 55): 'mypath'}, {}), '(mypath)', False, 'import os\n')] |
CakeCrusher/voon-video_processing | services/apiRequests.py | 6ecaacf4e36baa72d713a92101b445885b3d95ef | from github import Github
def parseGithubURL(url):
splitURL = url.split('/')
owner = splitURL[3]
repo = splitURL[4]
return {
"owner": owner,
"repo": repo
}
def fetchRepoFiles(owner, repo):
files = []
g = Github('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')
repo = g.get_repo(f'{owner}/{repo}')
contents = repo.get_contents('')
while contents:
file_content = contents.pop(0)
if file_content.type == 'dir':
contents.extend(repo.get_contents(file_content.path))
else:
files.append(file_content.path)
return files
# parsedUrl = parseGithubURL('https://github.com/CakeCrusher/restock_emailer')
# filePaths = fetchRepoFiles(parsedUrl['owner'], parsedUrl['repo'])
# files = [path.split('/')[-1] for path in filePaths]
# print(files)
| [((13, 8, 13, 58), 'github.Github', 'Github', ({(13, 15, 13, 57): '"""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"""'}, {}), "('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')", False, 'from github import Github\n')] |
HouchangX-AI/Dialog-Solution | utils/tricks.py | 1f68f847d9c9c4a46ef0b5fc6a78014402a4dd7a | #-*- coding: utf-8 -*-
import codecs
import random
from utils.global_names import GlobalNames, get_file_path
def modify_tokens(tokens):
new_tokens = []
pos = 0
len_ = len(tokens)
while pos < len_:
if tokens[pos] == "[":
if pos+2 < len_ and tokens[pos+2] == "]":
token = "".join(tokens[pos:pos+3])
new_tokens.append(token)
pos += 3
elif pos+3 < len_ and tokens[pos+3] == "]":
if tokens[pos+2].isdigit():
tokens[pos+2] = "_digit_"
token = "".join(tokens[pos:pos+4])
new_tokens.append(token)
pos += 4
else:
pos += 1
else:
new_tokens.append(tokens[pos])
pos += 1
return new_tokens
def length_weight(corpus, orders, length_limit=6):
for idx, _ in enumerate(orders):
if len(corpus[idx]) > length_limit:
return idx
return 0
| [] |
Jagadambass/Graph-Neural-Networks | test/functional/test_device.py | c8f1d87f8cd67d645c2f05f370be039acf05ca52 | from graphgallery.functional import device
import tensorflow as tf
import torch
def test_device():
# how about other backend?
# tf
assert isinstance(device("cpu", "tf"), str)
assert device() == 'cpu'
assert device("cpu", "tf") == 'CPU'
assert device("cpu", "tf") == 'cpu'
assert device("device/cpu", "tf") == 'cpu'
try:
assert device("gpu", "tf") == 'GPU'
assert device("cuda", "tf") == 'GPU'
except RuntimeError:
pass
device = tf.device("cpu")
assert device(device, "tf") == device._device_name
# ?? torch
device = device("cpu", "torch")
assert isinstance(device, torch.device) and 'cpu' in str(device)
device = device(backend="torch")
assert isinstance(device, torch.device) and 'cpu' in str(device)
try:
assert 'cuda' in str(device("gpu", "torch"))
assert 'cuda' in str(device("cuda", "torch"))
except RuntimeError:
pass
device = torch.device("cpu")
assert device(device, "torch") == device
if __name__ == "__main__":
test_device()
| [((21, 13, 21, 29), 'tensorflow.device', 'tf.device', ({(21, 23, 21, 28): '"""cpu"""'}, {}), "('cpu')", True, 'import tensorflow as tf\n'), ((35, 13, 35, 32), 'torch.device', 'torch.device', ({(35, 26, 35, 31): '"""cpu"""'}, {}), "('cpu')", False, 'import torch\n')] |
krinj/hanabi-simulator | py_hanabi/card.py | b77b04aa09bab8bd8d7b784e04bf8b9d5d76d1a6 | # -*- coding: utf-8 -*-
"""
A card (duh).
"""
import random
import uuid
from enum import Enum
from typing import List
from py_hanabi.settings import CARD_DECK_DISTRIBUTION
__author__ = "Jakrin Juangbhanich"
__email__ = "[email protected]"
class Color(Enum):
RED = 1
BLUE = 2
GREEN = 3
YELLOW = 4
WHITE = 5
class Card:
def __init__(self, number: int, color: Color):
self._number: int = number
self._color: Color = color
self._id: str = uuid.uuid4().hex
self._hint_number_counter: int = 0
self._hint_color_counter: int = 0
# self._index_hinted: List[int] = []
# self._lone_hinted: List[bool] = []
# According to hints, these are the ones we know it is NOT.
self.not_color: List[Color] = []
self.not_number: List[int] = []
def __repr__(self):
hint_str = ""
if self.hint_received_color:
hint_str += "C"
if self.hint_received_number:
hint_str += "N"
return f"[{self.color} {self.number} {hint_str}]"
def __eq__(self, other: 'Card'):
return self.color == other.color and self.number == other.number
def receive_hint_number(self, number: int):
if number == self.number:
self._hint_number_counter += 1
else:
self.not_number.append(number)
def receive_hint_color(self, color: Color):
if color == self.color:
self._hint_color_counter += 1
else:
self.not_color.append(color)
def remove_hint_number(self, number: int):
if number == self.number:
self._hint_number_counter -= 1
else:
self.not_number.pop()
def remove_hint_color(self, color: Color):
if color == self.color:
self._hint_color_counter -= 1
else:
self.not_color.pop()
@property
def label(self):
return f"{self.number} of {self.get_color_label(self.color)}"
@property
def id(self) -> str:
return self._id
@property
def key(self) -> tuple:
return self.get_key(self.color, self.number)
@staticmethod
def get_key(c: Color, n: int) -> tuple:
return c, n
@property
def number(self) -> int:
return self._number
@property
def color(self) -> Color:
return self._color
@property
def observed_color(self) -> Color:
return None if not self.hint_received_color else self._color
@property
def observed_number(self) -> int:
return None if not self.hint_received_number else self._number
@property
def hint_received_number(self) -> bool:
return self._hint_number_counter > 0
@property
def hint_received_color(self) -> bool:
return self._hint_color_counter > 0
@staticmethod
def generate_deck() -> List['Card']:
""" Generate the starting deck for the game. """
deck: List[Card] = []
for color in Color:
for i in CARD_DECK_DISTRIBUTION:
card = Card(i, color)
deck.append(card)
random.shuffle(deck)
return deck
@staticmethod
def get_color_label(color: Color) -> str:
color_labels = {
Color.BLUE: "Blue",
Color.RED: "Red",
Color.YELLOW: "Yellow",
Color.GREEN: "Green",
Color.WHITE: "White",
}
return color_labels[color]
| [((126, 8, 126, 28), 'random.shuffle', 'random.shuffle', ({(126, 23, 126, 27): 'deck'}, {}), '(deck)', False, 'import random\n'), ((29, 24, 29, 36), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n')] |
bigsassy/django-facetools | facetools/test/testcases.py | aeedaea81ab0007ee8e96b2f81f1404dc8bddb3c | import types
import django.test.testcases
from django.conf import settings
from facetools.models import TestUser
from facetools.common import _create_signed_request
from facetools.test import TestUserNotLoaded
from facetools.signals import sync_facebook_test_user, setup_facebook_test_client
from facetools.common import _get_facetools_test_fixture_name
class FacebookTestCaseMixin(object):
"""
TestCase which makes it possible to test views when the FacebookMiddleware
and SyncFacebookUser middlewares are activated. Must use the Client
attached to this object (i.e. self.client).
"""
facebook_test_user = None
def set_client_signed_request(self, facebook_id, access_token):
"""
Allow code to configure the test client so it has a signed request
of the specified test user for each request
"""
setup_facebook_test_client.send(sender=None, client=self.client, signed_request=_create_signed_request(
settings.FACEBOOK_APPLICATION_SECRET_KEY, facebook_id, oauth_token=access_token))
def _pre_setup(self):
if self.facebook_test_user:
if type(self.facebook_test_user) not in [str, unicode]:
raise Exception("facebook_test_user variable must be a string (found a %s)" % type(self.facebook_test_user))
app_name = get_app_name_from_test_case(type(self).__module__)
facetools_fixture_name = _get_facetools_test_fixture_name(app_name)
if not hasattr(self, 'fixtures'):
self.fixtures = []
if facetools_fixture_name not in self.fixtures:
self.fixtures.append(facetools_fixture_name)
super(FacebookTestCaseMixin, self)._pre_setup()
# Make sure anybody that needs to sync their models loaded from fixtures
# has a chance to do so now that the refreshed user test data is available.
try:
for test_user in TestUser.objects.all():
sync_facebook_test_user.send(sender=None, test_user=test_user)
self.test_user = TestUser.objects.get(name=self.facebook_test_user)
self.set_client_signed_request(self.test_user.facebook_id, self.test_user.access_token)
except TestUser.DoesNotExist:
raise TestUserNotLoaded("Test user %s hasn't been loaded via the %s fixture (did you run sync_facebook_test_users?)" %
(self.facebook_test_user, facetools_fixture_name))
else:
super(FacebookTestCaseMixin, self)._pre_setup()
def get_app_name_from_test_case(module_path_string):
"""
Gets thet Django app from the __class__ attribute of a TestCase in a Django app.
class_string should look something like this: 'facetools_tests.tests.test_test_module'
"""
packages = module_path_string.split(".")
try:
tests_location = packages.index("tests")
except ValueError:
raise ValueError("Couldn't find tests module in %s (are you running this test from tests.py or a tests package in your Django app?)" % module_path_string)
if tests_location == 0:
raise ValueError("Facetools doesn't support Django app's with a name of 'tests', or it failed to find the Django app name out of %s" % module_path_string)
app_name = packages[tests_location - 1]
if app_name not in settings.INSTALLED_APPS:
raise ValueError("Facetools didn't find %s among INSTALLED_APPS. (app name pulled from %s)" % (app_name, module_path_string))
return app_name
# -----------------------------------------------------------------------------
# Test Cases
# -----------------------------------------------------------------------------
class FacebookTransactionTestCase(FacebookTestCaseMixin, django.test.testcases.TransactionTestCase):
def _pre_setup(self):
super(FacebookTransactionTestCase, self)._pre_setup()
class FacebookTestCase(FacebookTestCaseMixin, django.test.testcases.TestCase):
def _pre_setup(self):
super(FacebookTestCase, self)._pre_setup()
if 'LiveServerTestCase' in dir(django.test.testcases):
class FacebookLiveServerTestCase(FacebookTestCaseMixin, django.test.testcases.LiveServerTestCase):
def _pre_setup(self):
super(FacebookLiveServerTestCase, self)._pre_setup()
| [((33, 37, 33, 79), 'facetools.common._get_facetools_test_fixture_name', '_get_facetools_test_fixture_name', ({(33, 70, 33, 78): 'app_name'}, {}), '(app_name)', False, 'from facetools.common import _get_facetools_test_fixture_name\n'), ((24, 88, 25, 92), 'facetools.common._create_signed_request', '_create_signed_request', (), '', False, 'from facetools.common import _create_signed_request\n'), ((44, 33, 44, 55), 'facetools.models.TestUser.objects.all', 'TestUser.objects.all', ({}, {}), '()', False, 'from facetools.models import TestUser\n'), ((46, 33, 46, 83), 'facetools.models.TestUser.objects.get', 'TestUser.objects.get', (), '', False, 'from facetools.models import TestUser\n'), ((45, 20, 45, 82), 'facetools.signals.sync_facebook_test_user.send', 'sync_facebook_test_user.send', (), '', False, 'from facetools.signals import sync_facebook_test_user, setup_facebook_test_client\n'), ((49, 22, 50, 90), 'facetools.test.TestUserNotLoaded', 'TestUserNotLoaded', ({(49, 40, 50, 89): '("Test user %s hasn\'t been loaded via the %s fixture (did you run sync_facebook_test_users?)"\n % (self.facebook_test_user, facetools_fixture_name))'}, {}), '(\n "Test user %s hasn\'t been loaded via the %s fixture (did you run sync_facebook_test_users?)"\n % (self.facebook_test_user, facetools_fixture_name))', False, 'from facetools.test import TestUserNotLoaded\n')] |
d2gex/distpickymodel | setup.py | 7acd4ffafbe592d6336d91d6e7411cd45357e41c | import setuptools
import distpickymodel
def get_long_desc():
with open("README.rst", "r") as fh:
return fh.read()
setuptools.setup(
name="distpickymodel",
version=distpickymodel.__version__,
author="Dan G",
author_email="[email protected]",
description="A shared Mongoengine-based model library",
long_description=get_long_desc(),
url="https://github.com/d2gex/distpickymodel",
# Exclude 'tests' and 'docs'
packages=['distpickymodel'],
python_requires='>=3.6',
install_requires=['pymongo>=3.7.2', 'mongoengine>=0.17.0', 'six'],
tests_require=['pytest>=4.4.0', 'PyYAML>=5.1'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| [] |
tinatasha/passwordgenerator | credentials_test.py | ad161e14779e975e98ad989c5df976ac3662f8d8 | import unittest
from password import Credentials
class TestCredentials(unittest.TestCase):
"""
Class to test behaviour of the credentials class
"""
def setUp(self):
"""
Setup method that defines instructions
"""
self.new_credentials = Credentials("Github","Tina","blackfaffp1")
def tearDown(self):
"""
Method that cleans up after each test
"""
Credentials.credentials_list = []
def test_init(self):
"""
Test for correct initialization
"""
self.assertEqual(self.new_credentials.account_name,"Github")
self.assertEqual(self.new_credentials.username,"tinatasga")
self.assertEqual(self.new_credentials.password,"@#tinatasha")
def test_save_credentials(self):
"""
Test to check whether app saves account credentials
"""
self.new_credentials.save_credentials()
self.assertEqual(len(Credentials.credentials_list),1)
def test_save_multiple_credentials(self):
"""
Test for saving multiple credentials
"""
self.new_credentials.save_credentials()
test_credentials = Credentials("AllFootball","Kibet","messithegoat")
test_credentials.save_credentials()
self.assertEqual(len(Credentials.credentials_list),2)
def test_view_credentials(self):
"""
Test to view an account credential
"""
self.assertEqual(Credentials.display_credentials(),Credentials.credentials_list)
def test_delete_credentials(self):
"""
Test to delete account credentials
"""
self.new_credentials.save_credentials()
test_credentials = Credentials("i","love","cats")
test_credentials.save_credentials()
self.new_credentials.delete_credentials()
self.assertEqual(len(Credentials.credentials_list),1)
if __name__ == '__main__':
unittest.main() | [] |
ufpa-organization-repositories/evolutionary-computing | homework_08/calc_fitness.py | e16786f9619e2b357b94ab91ff3a7b352e6a0d92 | def calc_fitness(pop):
from to_decimal import to_decimal
from math import sin, sqrt
for index, elem in enumerate(pop):
# só atribui a fitness a cromossomos que ainda não possuem fitness
# print(elem[0], elem[1])
x = to_decimal(elem[0])
y = to_decimal(elem[1])
# x = elem[0]
# y = elem[1]
f6 = 0.5 - ((sin(sqrt(x**2 + y**2)))**2 - 0.5) / (1 + 0.001 * (x**2 + y**2))**2
pop[index] = [f6, elem]
return 0
# populacao = [[0,0],[-3,1]]
# calc_fitness(pop=populacao)
# print(populacao)
| [((9, 12, 9, 31), 'to_decimal.to_decimal', 'to_decimal', ({(9, 23, 9, 30): 'elem[0]'}, {}), '(elem[0])', False, 'from to_decimal import to_decimal\n'), ((10, 12, 10, 31), 'to_decimal.to_decimal', 'to_decimal', ({(10, 23, 10, 30): 'elem[1]'}, {}), '(elem[1])', False, 'from to_decimal import to_decimal\n'), ((13, 25, 13, 42), 'math.sqrt', 'sqrt', ({(13, 30, 13, 41): '(x ** 2 + y ** 2)'}, {}), '(x ** 2 + y ** 2)', False, 'from math import sin, sqrt\n')] |
jamenor/pichetprofile | pichetprofile/__init__.py | 6633ea6eaa7473af9e10f34f6a19428c2db92465 | # -*- coding: utf-8 -*-
from oopschool.school import Student,Tesla,SpecialStudent,Teacher
from oopschool.newschool import Test | [] |
HPluseven/playground | leetcode/group2/461.py | 78e363b5b376af3945bcb55a13d6a96b7c151a1b | class Solution:
def hammingDistance(self, x: int, y: int) -> int:
xor = x ^ y
distance = 0
while xor:
if xor & 1:
distance += 1
xor = xor >> 1
return distance
class Solution:
def hammingDistance(self, x: int, y: int) -> int:
xor = x ^ y
distance = 0
while xor:
distance += 1
xor = xor & (xor-1)
return distance
| [] |
pageuppeople-opensource/relational-data-loader | rdl/data_sources/DataSourceFactory.py | 0bac7036d65636d06eacca4e68e09d6e1c506ea4 | import logging
from rdl.data_sources.MsSqlDataSource import MsSqlDataSource
from rdl.data_sources.AWSLambdaDataSource import AWSLambdaDataSource
class DataSourceFactory(object):
def __init__(self, logger=None):
self.logger = logger or logging.getLogger(__name__)
self.sources = [MsSqlDataSource, AWSLambdaDataSource]
def create_source(self, connection_string):
for source in self.sources:
if source.can_handle_connection_string(connection_string):
self.logger.info(
f"Found handler '{source}' for given connection string."
)
return source(connection_string)
raise RuntimeError(
"There are no data sources that can handle this connection string"
)
def is_prefix_supported(self, connection_string):
for source in self.sources:
if source.can_handle_connection_string(connection_string):
return True
return False
def get_supported_source_prefixes(self):
return list(
map(lambda source: source.get_connection_string_prefix(), self.sources)
)
| [((8, 32, 8, 59), 'logging.getLogger', 'logging.getLogger', ({(8, 50, 8, 58): '__name__'}, {}), '(__name__)', False, 'import logging\n')] |
alexmalins/kagglebook | ch05/ch05-02-timeseries.py | 260f6634b6bbaa94c2e989770e75dc7101f5c614 | # ---------------------------------
# Prepare the data etc.
# ----------------------------------
import numpy as np
import pandas as pd
# train_x is the training data, train_y is the target values, and test_x is the test data
# stored in pandas DataFrames and Series (numpy arrays also used)
train = pd.read_csv('../input/sample-data/train_preprocessed.csv')
train_x = train.drop(['target'], axis=1)
train_y = train['target']
test_x = pd.read_csv('../input/sample-data/test_preprocessed.csv')
# As time-series data assume a period variable is set that changes with time
train_x['period'] = np.arange(0, len(train_x)) // (len(train_x) // 4)
train_x['period'] = np.clip(train_x['period'], 0, 3)
test_x['period'] = 4
# -----------------------------------
# Hold-out method for time-series data
# -----------------------------------
# Partition using the period variable as the basis (0 to 3 are the training data, 4 is the test data)
# Here for within the training data period 3 is used for validation and periods 0 to 2 are used for training
is_tr = train_x['period'] < 3
is_va = train_x['period'] == 3
tr_x, va_x = train_x[is_tr], train_x[is_va]
tr_y, va_y = train_y[is_tr], train_y[is_va]
# -----------------------------------
# Cross validation for time-series data (use method that follows time)
# -----------------------------------
# Partition using the period variable as the basis (0 to 3 are the training data, 4 is the test data)
# Periods 1, 2 and 3 are each used for cross-validation, and the preceding periods are used for training
va_period_list = [1, 2, 3]
for va_period in va_period_list:
is_tr = train_x['period'] < va_period
is_va = train_x['period'] == va_period
tr_x, va_x = train_x[is_tr], train_x[is_va]
tr_y, va_y = train_y[is_tr], train_y[is_va]
# (For reference) Using TimeSeriesSplit() function is difficult as only the order of the data can be used
from sklearn.model_selection import TimeSeriesSplit
tss = TimeSeriesSplit(n_splits=4)
for tr_idx, va_idx in tss.split(train_x):
tr_x, va_x = train_x.iloc[tr_idx], train_x.iloc[va_idx]
tr_y, va_y = train_y.iloc[tr_idx], train_y.iloc[va_idx]
# -----------------------------------
# Cross validation for time-series data (method to simply partition by time)
# -----------------------------------
# Partition using the period variable as the basis (0 to 3 are the training data, 4 is the test data)
# Periods 1, 2 and 3 are each used for cross-validation, and the preceding periods are used for training
va_period_list = [0, 1, 2, 3]
for va_period in va_period_list:
is_tr = train_x['period'] != va_period
is_va = train_x['period'] == va_period
tr_x, va_x = train_x[is_tr], train_x[is_va]
tr_y, va_y = train_y[is_tr], train_y[is_va]
| [((10, 8, 10, 66), 'pandas.read_csv', 'pd.read_csv', ({(10, 20, 10, 65): '"""../input/sample-data/train_preprocessed.csv"""'}, {}), "('../input/sample-data/train_preprocessed.csv')", True, 'import pandas as pd\n'), ((13, 9, 13, 66), 'pandas.read_csv', 'pd.read_csv', ({(13, 21, 13, 65): '"""../input/sample-data/test_preprocessed.csv"""'}, {}), "('../input/sample-data/test_preprocessed.csv')", True, 'import pandas as pd\n'), ((17, 20, 17, 52), 'numpy.clip', 'np.clip', ({(17, 28, 17, 45): "train_x['period']", (17, 47, 17, 48): '0', (17, 50, 17, 51): '3'}, {}), "(train_x['period'], 0, 3)", True, 'import numpy as np\n'), ((46, 6, 46, 33), 'sklearn.model_selection.TimeSeriesSplit', 'TimeSeriesSplit', (), '', False, 'from sklearn.model_selection import TimeSeriesSplit\n')] |
owo/jitalk | server/WitClient.py | 2db2782282a2302b4cf6049030822734a6856982 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import wit
import json
class WitClient(object):
"""docstring for WitClient"""
_access_token = 'NBPOVLY7T6W3KOUEML2GXOWODH3LPWPD'
def __init__(self):
wit.init()
def text_query(self, text):
res = json.loads(wit.text_query(text, WitClient._access_token))
return res["outcomes"]
def close_connection(self):
wit.close()
if __name__ == "__main__":
print "You ran the Wit client, nothing will happen. Exiting..." | [] |
Mohammed-Shoaib/HackerRank-Problems | HackerRank/Python/Easy/E0036.py | ccfb9fc2f0d8dff454439d75ce519cf83bad7c3b | # Problem Statement: https://www.hackerrank.com/challenges/itertools-combinations-with-replacement/problem
from itertools import combinations_with_replacement
S, k = input().split()
for comb in combinations_with_replacement(sorted(S), int(k)):
print(''.join(comb)) | [] |
hayyubi/visual-genome-driver | visual_genome/models.py | 412223bf1552b1927fb1219cfcf90dcd2599bf34 | """
Visual Genome Python API wrapper, models
"""
class Image:
"""
Image.
ID int
url hyperlink string
width int
height int
"""
def __init__(self, id, url, width, height, coco_id, flickr_id):
self.id = id
self.url = url
self.width = width
self.height = height
self.coco_id = coco_id
self.flickr_id = flickr_id
def __str__(self):
return 'id: %d, coco_id: %d, flickr_id: %d, width: %d, url: %s' \
% (self.id, -1
if self.coco_id is None
else self.coco_id, -1
if self.flickr_id is None
else self.flickr_id, self.width, self.url)
def __repr__(self):
return str(self)
class Region:
"""
Region.
image int
phrase string
x int
y int
width int
height int
"""
def __init__(self, id, image, phrase, x, y, width, height):
self.id = id
self.image = image
self.phrase = phrase
self.x = x
self.y = y
self.width = width
self.height = height
def __str__(self):
stat_str = 'id: {0}, x: {1}, y: {2}, width: {3},' \
'height: {4}, phrase: {5}, image: {6}'
return stat_str.format(self.id, self.x, self.y,
self.width, self.height, self.phrase,
self.image.id)
def __repr__(self):
return str(self)
class Graph:
"""
Graphs contain objects, relationships and attributes
image Image
bboxes Object array
relationships Relationship array
attributes Attribute array
"""
def __init__(self, image, objects, relationships, attributes):
self.image = image
self.objects = objects
self.relationships = relationships
self.attributes = attributes
class Object:
"""
Objects.
id int
x int
y int
width int
height int
names string array
synsets Synset array
"""
def __init__(self, id, x, y, width, height, names, synsets):
self.id = id
self.x = x
self.y = y
self.width = width
self.height = height
self.names = names[0]
self.synsets = synsets
self.bbox = [x, y, width, height]
def __str__(self):
name = self.names[0] if len(self.names) != 0 else 'None'
return '%s' % (name)
def __repr__(self):
return str(self)
class Relationship:
"""
Relationships. Ex, 'man - jumping over - fire hydrant'.
subject int
predicate string
object int
rel_canon Synset
"""
def __init__(self, id, subject, predicate, object, synset):
self.id = id
self.subject = subject
self.predicate = predicate
self.object = object
self.synset = synset
def __str__(self):
return "{0}: {1} {2} {3}".format(self.id, self.subject,
self.predicate, self.object)
def __repr__(self):
return str(self)
class Attribute:
"""
Attributes. Ex, 'man - old'.
subject Object
attribute string
synset Synset
"""
def __init__(self, id, subject, attribute, synset):
self.id = id
self.subject = subject
self.attribute = attribute
self.synset = synset
def __str__(self):
return "%d: %s is %s" % (self.id, self.subject, self.attribute)
def __repr__(self):
return str(self)
class QA:
"""
Question Answer Pairs.
ID int
image int
question string
answer string
q_objects QAObject array
a_objects QAObject array
"""
def __init__(self, id, image, question, answer,
question_objects, answer_objects):
self.id = id
self.image = image
self.question = question
self.answer = answer
self.q_objects = question_objects
self.a_objects = answer_objects
def __str__(self):
return 'id: %d, image: %d, question: %s, answer: %s' \
% (self.id, self.image.id, self.question, self.answer)
def __repr__(self):
return str(self)
class QAObject:
"""
Question Answer Objects are localized in the image and refer to a part
of the question text or the answer text.
start_idx int
end_idx int
name string
synset_name string
synset_definition string
"""
def __init__(self, start_idx, end_idx, name, synset):
self.start_idx = start_idx
self.end_idx = end_idx
self.name = name
self.synset = synset
def __repr__(self):
return str(self)
class Synset:
"""
Wordnet Synsets.
name string
definition string
"""
def __init__(self, name, definition):
self.name = name
self.definition = definition
def __str__(self):
return '{} - {}'.format(self.name, self.definition)
def __repr__(self):
return str(self)
| [] |
GayashanNA/my-scripts | python-scripts/plot_delay.py | d865e828c833d6b54c787ce9475da512f8488278 | import csv
import matplotlib.pyplot as plt
import time
PLOT_PER_WINDOW = False
WINDOW_LENGTH = 60000
BINS = 1000
delay_store = {}
perwindow_delay_store = {}
plotting_delay_store = {}
filename = "output-large.csv"
# filename = "output.csv"
# filename = "output-medium.csv"
# filename = "output-small.csv"
# filename = "output-tiny.csv"
with open(filename, "rU") as dataFile:
csvreader = csv.reader(dataFile)
for row in csvreader:
if len(row) > 2 and str(row[0]).isdigit():
delay_store[long(row[1])] = long(row[2])
window_begin = min(delay_store.keys())
window_end = max(delay_store.keys())
if PLOT_PER_WINDOW:
window_end = window_begin + WINDOW_LENGTH
# find the time delays that are within the window of choice
for (tapp, delay) in delay_store.iteritems():
if window_begin <= tapp <= window_end:
perwindow_delay_store[tapp] = delay
plotting_delay_store = perwindow_delay_store
else:
plotting_delay_store = delay_store
# the histogram of the data
n, bins, patches = plt.hist(plotting_delay_store.values(), BINS, histtype='stepfilled',
normed=True, cumulative=False, facecolor='blue', alpha=0.9)
# plt.axhline(y=0.95, color='red', label='0.95')
max_delay = max(plotting_delay_store.values())
min_delay = min(plotting_delay_store.values())
count = len(plotting_delay_store.values())
# format epoch time to date time to be shown in the plot figure
window_begin_in_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(window_begin / 1000))
window_end_in_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(window_end / 1000))
title = "Window begin: %s\n" % window_begin_in_datetime
title += "Window end: %s\n" % window_end_in_datetime
# title += "Window length: %dms\n" % WINDOW_LENGTH
title += "Window length: ~%dmins\n" % ((window_end - window_begin)/60000)
title += "Maximum delay: %dms\n" % max_delay
title += "Minimum delay: %dms\n" % min_delay
title += "Count: %d" % count
# start plotting
plt.xlabel('Delay (ms)')
plt.ylabel('Probability')
plt.grid(True)
plt.legend()
plt.suptitle(title)
plt.subplots_adjust(top=0.8)
plt.show()
| [((58, 0, 58, 24), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(58, 11, 58, 23): '"""Delay (ms)"""'}, {}), "('Delay (ms)')", True, 'import matplotlib.pyplot as plt\n'), ((59, 0, 59, 25), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(59, 11, 59, 24): '"""Probability"""'}, {}), "('Probability')", True, 'import matplotlib.pyplot as plt\n'), ((60, 0, 60, 14), 'matplotlib.pyplot.grid', 'plt.grid', ({(60, 9, 60, 13): '(True)'}, {}), '(True)', True, 'import matplotlib.pyplot as plt\n'), ((61, 0, 61, 12), 'matplotlib.pyplot.legend', 'plt.legend', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((62, 0, 62, 19), 'matplotlib.pyplot.suptitle', 'plt.suptitle', ({(62, 13, 62, 18): 'title'}, {}), '(title)', True, 'import matplotlib.pyplot as plt\n'), ((63, 0, 63, 28), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', (), '', True, 'import matplotlib.pyplot as plt\n'), ((64, 0, 64, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((19, 16, 19, 36), 'csv.reader', 'csv.reader', ({(19, 27, 19, 35): 'dataFile'}, {}), '(dataFile)', False, 'import csv\n'), ((46, 62, 46, 97), 'time.localtime', 'time.localtime', ({(46, 77, 46, 96): 'window_begin / 1000'}, {}), '(window_begin / 1000)', False, 'import time\n'), ((47, 60, 47, 93), 'time.localtime', 'time.localtime', ({(47, 75, 47, 92): 'window_end / 1000'}, {}), '(window_end / 1000)', False, 'import time\n')] |
peterthorpe5/Methods_M.cerasi_R.padi_genome_assembly | python_scripts/BUSCO_phylogenetics/rename_all_fa_seq.py | c6cb771afaf40f5def47e33ff11cd8867ec528e0 | #!/usr/bin/env python
# author: Peter Thorpe September 2015. The James Hutton Insitute, Dundee, UK.
# title rename single copy busco genes
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
import os
from sys import stdin,argv
import sys
from optparse import OptionParser
########################################################################
# functions
def parse_busco_file(busco):
"""this is a function to open busco full ouput
and get a list of duplicated genes. This list is required
so we can ignore these genes later. Takes file,
return list"""
duplicated_list = []
with open(busco) as handle:
for line in handle:
if not line.strip():
continue # if the last line is blank
if line.startswith("#"):
continue
if not line:
print ("your file is empty")
return False
line_info = line.rstrip().split("\t")
# first element
Busco_name = line_info[0]
# second element
status = line_info[1]
if status == "Duplicated" or status == "Fragmented":
duplicated_list.append(Busco_name)
return duplicated_list
def reformat_as_fasta(filename,prefix,outfile):
"this function re-write a file as a fasta file"
f= open(outfile, 'w')
fas = open(filename, "r")
for line in fas:
if not line.strip():
continue # if the last line is blank
if line.startswith("#"):
continue
if not line:
return False
if not line.startswith(">"):
seq = line
title = ">" + prefix + "_" + filename.replace("BUSCOa", "").split(".fas")[0]
data = "%s\n%s\n" %(title, seq)
f.write(data)
f.close()
if "-v" in sys.argv or "--version" in sys.argv:
print "v0.0.1"
sys.exit(0)
usage = """Use as follows:
converts
$ python renaem....py -p Mce -b full_table_BUSCO_output
script to walk through all files in a folder and rename the seq id
to start with Prefix.
Used for Busco output.
give it the busco full ouput table. The script will only return
complete single copy gene. Duplicate gene will be ignored.
"""
parser = OptionParser(usage=usage)
parser.add_option("-p", "--prefix", dest="prefix",
default=None,
help="Output filename",
metavar="FILE")
parser.add_option("-b", "--busco", dest="busco",
default=None,
help="full_table_*_BUSCO output from BUSCO",
metavar="FILE")
(options, args) = parser.parse_args()
prefix = options.prefix
busco = options.busco
# Run as script
if __name__ == '__main__':
#call function to get a list of dupicated gene.
#these genes will be ignored
duplicated_list = parse_busco_file(busco)
#iterate through the dir
for filename in os.listdir("."):
count = 1
if not filename.endswith(".fas"):
continue
#filter out the ones we dont want
if filename.split(".fa")[0] in duplicated_list:
continue
out_file = "../"+prefix+filename
out_file = out_file.replace("BUSCOa", "")
#out_file = "../"+filename
try:
#print filename
reformat_as_fasta(filename, prefix, out_file)
except:
ValueError
continue
| [] |
afeld/api-snippets | video/rest/compositionhooks/delete-hook/delete-hook.6.x.py | d77456c387c9471d36aa949e2cf785d8a534a370 | # Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
api_key_sid = 'SKXXXX'
api_key_secret = 'your_api_key_secret'
client = Client(api_key_sid, api_key_secret)
did_delete = client.video\
.compositionHooks('HKXXXX')\
.delete()
if(did_delete):
print('Composition removed')
| [((7, 9, 7, 44), 'twilio.rest.Client', 'Client', ({(7, 16, 7, 27): 'api_key_sid', (7, 29, 7, 43): 'api_key_secret'}, {}), '(api_key_sid, api_key_secret)', False, 'from twilio.rest import Client\n')] |
AkagiYui/AzurLaneTool | global_info.py | f00fa6e5c6371db72ee399d7bd178a81f39afd8b | from time import sleep
debug_mode = False
time_to_exit = False
exiting = False
exit_code = 0
def get_debug_mode():
return debug_mode
def trigger_exit(_exit_code):
global time_to_exit, exit_code
exit_code = _exit_code
time_to_exit = True
sleep(0.1)
| [((18, 4, 18, 14), 'time.sleep', 'sleep', ({(18, 10, 18, 13): '(0.1)'}, {}), '(0.1)', False, 'from time import sleep\n')] |
slowy07/tensorflow-model-research | advesarial_text/data/data_utils_test.py | 48ba4ba6240452eb3e3350fe7099f2b045acc530 | from __future__ import absoulte_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from data import data_utils
data = data_utils
class SequenceWrapperTest(tf.test.TestCase):
def testDefaultTimesteps(self):
seq = data.SequenceWrapper()
t1 = seq.add_timestep()
_ = seq.add_timestep()
self.assertEqual(len(seq), 2)
self.assertEqual(t1.weight, 0.0)
self.assertEqual(t1.label, 0)
self.assertEqual(t1.token, 0)
def testSettersAndGetters(self):
ts = data.SequenceWrapper().add_timestep()
ts.set_token(3)
ts.set_label(4)
ts.set_weight(2.0)
self.assertEqual(ts.token, 3)
self.assertEqual(ts.label, 4)
self.assertEqual(ts.weight, 2.0)
def testTimestepIteration(self):
seq = data.SequenceWrapper()
seq.add_timestep().set_token(0)
seq.add_timestep().set_token(1)
seq.add_timestep().set_token(2)
for i, ts in enumerate(seq):
self.assertEqual(ts.token, i)
def testFillsSequenceExampleCorrectly(self):
seq = data.SequenceWrapper()
seq.add_timestep().set_token(1).set_label(2).set_weight(3.0)
seq.add_timestep().set_token(10).set_label(20).set_weight(30.0)
seq_ex = seq.seq
fl = seq_ex.feature_lists.feature_list
fl_token = fl[data.SequenceWrapper.F_TOKEN_ID].feature
fl_label = fl[data.SequenceWrapper.F_LABEL].feature
fl_weight = fl[data.SequenceWrapper.F_WEIGHT].feature
_ = [self.assertEqual(len(f), 2) for f in [fl_token, fl_label, fl_weight]]
self.assertAllEqual([f.int64_list.value[0] for f in fl_token], [1, 10])
self.assertAllEqual([f.int64_list.value[0] for f in fl_label], [2, 20])
self.assertAllEqual([f.float_list.value[0] for f in fl_weight], [3.0, 30.0])
class DataUtilsTest(tf.test.TestCase):
def testSplitByPunct(self):
output = data.split_by_punct(
"hello! world, i've been\nwaiting\tfor\ryou for.a long time"
)
expected = [
"hello",
"world",
"i",
"ve",
"been",
"waiting",
"for",
"you",
"for",
"a",
"long",
"time",
]
self.assertListEqual(output, expected)
def _buildDummySequence(self):
seq = data.SequenceWrapper()
for i in range(10):
seq.add_timestep().set_token(i)
return seq
def testBuildLMSeq(self):
seq = self._buildDummySequence()
lm_seq = data.build_lm_sequence(seq)
for i, ts in enumerate(lm_seq):
# For end of sequence, the token and label should be same, and weight
# should be 0.0.
if i == len(lm_seq) - 1:
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, i)
self.assertEqual(ts.weight, 0.0)
else:
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, i + 1)
self.assertEqual(ts.weight, 1.0)
def testBuildSAESeq(self):
seq = self._buildDummySequence()
sa_seq = data.build_seq_ae_sequence(seq)
self.assertEqual(len(sa_seq), len(seq) * 2 - 1)
# Tokens should be sequence twice, minus the EOS token at the end
for i, ts in enumerate(sa_seq):
self.assertEqual(ts.token, seq[i % 10].token)
# Weights should be len-1 0.0's and len 1.0's.
for i in range(len(seq) - 1):
self.assertEqual(sa_seq[i].weight, 0.0)
for i in range(len(seq) - 1, len(sa_seq)):
self.assertEqual(sa_seq[i].weight, 1.0)
# Labels should be len-1 0's, and then the sequence
for i in range(len(seq) - 1):
self.assertEqual(sa_seq[i].label, 0)
for i in range(len(seq) - 1, len(sa_seq)):
self.assertEqual(sa_seq[i].label, seq[i - (len(seq) - 1)].token)
def testBuildLabelSeq(self):
seq = self._buildDummySequence()
eos_id = len(seq) - 1
label_seq = data.build_labeled_sequence(seq, True)
for i, ts in enumerate(label_seq[:-1]):
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, 0)
self.assertEqual(ts.weight, 0.0)
final_timestep = label_seq[-1]
self.assertEqual(final_timestep.token, eos_id)
self.assertEqual(final_timestep.label, 1)
self.assertEqual(final_timestep.weight, 1.0)
def testBuildBidirLabelSeq(self):
seq = self._buildDummySequence()
reverse_seq = data.build_reverse_sequence(seq)
bidir_seq = data.build_bidirectional_seq(seq, reverse_seq)
label_seq = data.build_labeled_sequence(bidir_seq, True)
for (i, ts), j in zip(enumerate(label_seq[:-1]), reversed(range(len(seq) - 1))):
self.assertAllEqual(ts.tokens, [i, j])
self.assertEqual(ts.label, 0)
self.assertEqual(ts.weight, 0.0)
final_timestep = label_seq[-1]
eos_id = len(seq) - 1
self.assertAllEqual(final_timestep.tokens, [eos_id, eos_id])
self.assertEqual(final_timestep.label, 1)
self.assertEqual(final_timestep.weight, 1.0)
def testReverseSeq(self):
seq = self._buildDummySequence()
reverse_seq = data.build_reverse_sequence(seq)
for i, ts in enumerate(reversed(reverse_seq[:-1])):
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, 0)
self.assertEqual(ts.weight, 0.0)
final_timestep = reverse_seq[-1]
eos_id = len(seq) - 1
self.assertEqual(final_timestep.token, eos_id)
self.assertEqual(final_timestep.label, 0)
self.assertEqual(final_timestep.weight, 0.0)
def testBidirSeq(self):
seq = self._buildDummySequence()
reverse_seq = data.build_reverse_sequence(seq)
bidir_seq = data.build_bidirectional_seq(seq, reverse_seq)
for (i, ts), j in zip(enumerate(bidir_seq[:-1]), reversed(range(len(seq) - 1))):
self.assertAllEqual(ts.tokens, [i, j])
self.assertEqual(ts.label, 0)
self.assertEqual(ts.weight, 0.0)
final_timestep = bidir_seq[-1]
eos_id = len(seq) - 1
self.assertAllEqual(final_timestep.tokens, [eos_id, eos_id])
self.assertEqual(final_timestep.label, 0)
self.assertEqual(final_timestep.weight, 0.0)
def testLabelGain(self):
seq = self._buildDummySequence()
label_seq = data.build_labeled_sequence(seq, True, label_gain=True)
for i, ts in enumerate(label_seq):
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, 1)
self.assertNear(ts.weight, float(i) / (len(seq) - 1), 1e-3)
if __name__ == "__main__":
tf.test.main()
| [((191, 4, 191, 18), 'tensorflow.test.main', 'tf.test.main', ({}, {}), '()', True, 'import tensorflow as tf\n')] |
arush15june/wagtail-torchbox | headlesspreview/apps.py | c4d06e096c72bd8007975dc016133024f9d27fab | from django.apps import AppConfig
class HeadlesspreviewConfig(AppConfig):
name = 'headlesspreview'
| [] |
revbucket/LipSDP | LipSDP/solve_sdp.py | 39f2ffe65cb656440e055e4e86a750bc7e77e357 | import argparse
import numpy as np
import matlab.engine
from scipy.io import savemat
import os
from time import time
def main(args):
start_time = time()
eng = matlab.engine.start_matlab()
eng.addpath(os.path.join(file_dir, 'matlab_engine'))
eng.addpath(os.path.join(file_dir, r'matlab_engine/weight_utils'))
eng.addpath(os.path.join(file_dir, r'matlab_engine/error_messages'))
eng.addpath(os.path.join(file_dir, r'examples/saved_weights'))
network = {
'alpha': matlab.double([args.alpha]),
'beta': matlab.double([args.beta]),
'weight_path': args.weight_path,
}
lip_params = {
'formulation': args.form,
'split': matlab.logical([args.split]),
'parallel': matlab.logical([args.parallel]),
'verbose': matlab.logical([args.verbose]),
'split_size': matlab.double([args.split_size]),
'num_neurons': matlab.double([args.num_neurons]),
'num_workers': matlab.double([args.num_workers]),
'num_dec_vars': matlab.double([args.num_decision_vars])
}
L = eng.solve_LipSDP(network, lip_params, nargout=1)
if lip_params['verbose']:
print(f'LipSDP-{args.form.capitalize()} gives a Lipschitz constant of %.03f' % L)
print('Total time %.03f' % (time() - start_time))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--form',
default='neuron',
const='neuron',
nargs='?',
choices=('neuron', 'network', 'layer', 'network-rand', 'network-dec-vars'),
help='LipSDP formulation to use')
parser.add_argument('-v', '--verbose',
action='store_true',
help='prints CVX output from solve if supplied')
parser.add_argument('--alpha',
type=float,
default=0,
nargs=1,
help='lower bound for slope restriction bound')
parser.add_argument('--beta',
type=float,
default=1,
nargs=1,
help='lower bound for slope restriction bound')
parser.add_argument('--num-neurons',
type=int,
default=100,
nargs=1,
help='number of neurons to couple for LipSDP-Network-rand formulation')
parser.add_argument('--split',
action='store_true',
help='splits network into subnetworks for more efficient solving if supplied')
parser.add_argument('--parallel',
action='store_true',
help='parallelizes solving for split formulations if supplied')
parser.add_argument('--split-size',
type=int,
default=2,
nargs=1,
help='number of layers in each subnetwork for splitting formulations')
parser.add_argument('--num-workers',
type=int,
default=0,
nargs=1,
help='number of workers for parallelization of splitting formulations')
parser.add_argument('--num-decision-vars',
type=int,
default=10,
nargs=1,
help='specify number of decision variables to be used for LipSDP')
parser.add_argument('--weight-path',
type=str,
required=True,
nargs=1,
help='path of weights corresponding to trained neural network model')
args = parser.parse_args()
if args.parallel is True and args.num_workers[0] < 1:
raise ValueError('When you use --parallel, --num-workers must be an integer >= 1.')
if args.split is True and args.split_size[0] < 1:
raise ValueError('When you use --split, --split-size must be an integer >= 1.')
main(args)
| [((10, 17, 10, 23), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((43, 13, 43, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((12, 16, 12, 55), 'os.path.join', 'os.path.join', ({(12, 29, 12, 37): 'file_dir', (12, 39, 12, 54): '"""matlab_engine"""'}, {}), "(file_dir, 'matlab_engine')", False, 'import os\n'), ((13, 16, 13, 69), 'os.path.join', 'os.path.join', ({(13, 29, 13, 37): 'file_dir', (13, 39, 13, 68): '"""matlab_engine/weight_utils"""'}, {}), "(file_dir, 'matlab_engine/weight_utils')", False, 'import os\n'), ((14, 16, 14, 71), 'os.path.join', 'os.path.join', ({(14, 29, 14, 37): 'file_dir', (14, 39, 14, 70): '"""matlab_engine/error_messages"""'}, {}), "(file_dir, 'matlab_engine/error_messages')", False, 'import os\n'), ((15, 16, 15, 65), 'os.path.join', 'os.path.join', ({(15, 29, 15, 37): 'file_dir', (15, 39, 15, 64): '"""examples/saved_weights"""'}, {}), "(file_dir, 'examples/saved_weights')", False, 'import os\n'), ((39, 36, 39, 42), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n')] |
chihyi-liao/stockprophet | stockprophet/__init__.py | 891c91b2a446e3bd30bb56b88be3874d7dda1b8d | from stockprophet.cli import entry_point
from stockprophet.crawler import (
init_stock_type, init_stock_category
)
from stockprophet.db import init_db
from .utils import read_db_settings
def preprocessing() -> bool:
result = False
# noinspection PyBroadException
try:
db_config = read_db_settings()
if not db_config:
print("config.ini 找不到 'database' 區段")
return result
except Exception:
print("無法讀取或解析config.ini")
return result
# noinspection PyBroadException
try:
init_db(db_config)
init_stock_category()
init_stock_type()
result = True
except Exception as e:
print("無法連線資料庫: %s" % (str(e), ))
return result
def main():
if preprocessing():
entry_point()
| [((24, 8, 24, 26), 'stockprophet.db.init_db', 'init_db', ({(24, 16, 24, 25): 'db_config'}, {}), '(db_config)', False, 'from stockprophet.db import init_db\n'), ((25, 8, 25, 29), 'stockprophet.crawler.init_stock_category', 'init_stock_category', ({}, {}), '()', False, 'from stockprophet.crawler import init_stock_type, init_stock_category\n'), ((26, 8, 26, 25), 'stockprophet.crawler.init_stock_type', 'init_stock_type', ({}, {}), '()', False, 'from stockprophet.crawler import init_stock_type, init_stock_category\n'), ((35, 8, 35, 21), 'stockprophet.cli.entry_point', 'entry_point', ({}, {}), '()', False, 'from stockprophet.cli import entry_point\n')] |
mpcjanssen/Advent-of-Code | 2021/day_25.py | 06c5257d038bfcd3d4790f3213afecb5c36d5c61 | import aoc_helper
RAW = aoc_helper.day(25)
print(RAW)
def parse_raw():
...
DATA = parse_raw()
def part_one():
...
def part_two():
...
aoc_helper.submit(25, part_one)
aoc_helper.submit(25, part_two)
| [((3, 6, 3, 24), 'aoc_helper.day', 'aoc_helper.day', ({(3, 21, 3, 23): '25'}, {}), '(25)', False, 'import aoc_helper\n'), ((17, 0, 17, 31), 'aoc_helper.submit', 'aoc_helper.submit', ({(17, 18, 17, 20): '(25)', (17, 22, 17, 30): 'part_one'}, {}), '(25, part_one)', False, 'import aoc_helper\n'), ((18, 0, 18, 31), 'aoc_helper.submit', 'aoc_helper.submit', ({(18, 18, 18, 20): '(25)', (18, 22, 18, 30): 'part_two'}, {}), '(25, part_two)', False, 'import aoc_helper\n')] |
Hunter1753/adventofcode | 6/6.2.py | 962df52af01f6ab575e8f00eb2d1c1335dba5430 | def setIntersectionCount(group):
return len(set.intersection(*group))
groupList = []
tempGroup = []
with open("./6/input.txt") as inputFile:
for line in inputFile:
line = line.replace("\n","")
if len(line) > 0:
tempGroup.append(set(line))
else:
groupList.append(tempGroup)
tempGroup = []
if len(tempGroup) > 0:
groupList.append(tempGroup)
groupList = list(map(setIntersectionCount,groupList))
print("{} common options in groups".format(sum(groupList))) | [] |
ZichaoGuo/PaddleSlim | demo/gpnas/CVPR2021_NAS_competition_gpnas_demo.py | 2550fb4ec86aee6155c1c8a2c9ab174e239918a3 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import copy
import numpy as np
from paddleslim.nas import GPNAS
# 使用GP-NAS参加[CVPR 2021 NAS国际比赛](https://www.cvpr21-nas.com/competition) Track2 demo
# [CVPR 2021 NAS国际比赛Track2 studio地址](https://aistudio.baidu.com/aistudio/competition/detail/71?lang=en)
# [AI studio GP-NAS demo](https://aistudio.baidu.com/aistudio/projectdetail/1824958)
# demo 基于paddleslim自研NAS算法GP-NAS:Gaussian Process based Neural Architecture Search
# 基于本demo的改进版可以获得双倍奖金
def preprare_trainning_data(file_name, t_flag):
## t_flag ==1 using all trainning data
## t_flag ==2 using half trainning data
with open(file_name, 'r') as f:
arch_dict = json.load(f)
Y_all = []
X_all = []
for sub_dict in arch_dict.items():
Y_all.append(sub_dict[1]['acc'] * 100)
X_all.append(np.array(sub_dict[1]['arch']).T.reshape(4, 16)[2])
X_all, Y_all = np.array(X_all), np.array(Y_all)
X_train, Y_train, X_test, Y_test = X_all[0::t_flag], Y_all[
0::t_flag], X_all[1::t_flag], Y_all[1::t_flag]
return X_train, Y_train, X_test, Y_test
if __name__ == '__main__':
stage1_file = './datasets/Track2_stage1_trainning.json'
stage2_file = './datasets/Track2_stage2_few_show_trainning.json'
X_train_stage1, Y_train_stage1, X_test_stage1, Y_test_stage1 = preprare_trainning_data(
stage1_file, 1)
X_train_stage2, Y_train_stage2, X_test_stage2, Y_test_stage2 = preprare_trainning_data(
stage2_file, 2)
gpnas = GPNAS()
w = gpnas.get_initial_mean(X_test_stage1, Y_test_stage1)
init_cov = gpnas.get_initial_cov(X_train_stage1)
error_list = np.array(
Y_test_stage2.reshape(len(Y_test_stage2), 1) - gpnas.get_predict(
X_test_stage2))
print('RMSE trainning on stage1 testing on stage2:',
np.sqrt(np.dot(error_list.T, error_list) / len(error_list)))
gpnas.get_posterior_mean(X_train_stage2[0::3], Y_train_stage2[0::3])
gpnas.get_posterior_mean(X_train_stage2[1::3], Y_train_stage2[1::3])
gpnas.get_posterior_cov(X_train_stage2[1::3], Y_train_stage2[1::3])
error_list = np.array(
Y_test_stage2.reshape(len(Y_test_stage2), 1) - gpnas.get_predict_jiont(
X_test_stage2, X_train_stage2[::1], Y_train_stage2[::1]))
print('RMSE using stage1 as prior:',
np.sqrt(np.dot(error_list.T, error_list) / len(error_list)))
| [((50, 12, 50, 19), 'paddleslim.nas.GPNAS', 'GPNAS', ({}, {}), '()', False, 'from paddleslim.nas import GPNAS\n'), ((31, 20, 31, 32), 'json.load', 'json.load', ({(31, 30, 31, 31): 'f'}, {}), '(f)', False, 'import json\n'), ((37, 19, 37, 34), 'numpy.array', 'np.array', ({(37, 28, 37, 33): 'X_all'}, {}), '(X_all)', True, 'import numpy as np\n'), ((37, 36, 37, 51), 'numpy.array', 'np.array', ({(37, 45, 37, 50): 'Y_all'}, {}), '(Y_all)', True, 'import numpy as np\n'), ((57, 18, 57, 50), 'numpy.dot', 'np.dot', ({(57, 25, 57, 37): 'error_list.T', (57, 39, 57, 49): 'error_list'}, {}), '(error_list.T, error_list)', True, 'import numpy as np\n'), ((65, 18, 65, 50), 'numpy.dot', 'np.dot', ({(65, 25, 65, 37): 'error_list.T', (65, 39, 65, 49): 'error_list'}, {}), '(error_list.T, error_list)', True, 'import numpy as np\n'), ((36, 21, 36, 50), 'numpy.array', 'np.array', ({(36, 30, 36, 49): "sub_dict[1]['arch']"}, {}), "(sub_dict[1]['arch'])", True, 'import numpy as np\n')] |
yogeshprasad/spa-development | pages/migrations/0004_auto_20181102_0944.py | 1bee9ca64da5815e1c9a2f7af43b44b59ee2ca7b | # Generated by Django 2.0.6 on 2018-11-02 09:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0003_coachingcourse'),
]
operations = [
migrations.AlterField(
model_name='coachingcourse',
name='username',
field=models.CharField(default='', max_length=100),
),
]
| [((16, 18, 16, 62), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n')] |
waikato-datamining/keras-imaging | imageclassification/src/sample/splitters/_StratifiedSplitter.py | f044f883242895c18cfdb31a827bc32bdb0405ed | from collections import OrderedDict
from random import Random
from typing import Set
from .._types import Dataset, Split, LabelIndices
from .._util import per_label
from ._RandomSplitter import RandomSplitter
from ._Splitter import Splitter
class StratifiedSplitter(Splitter):
"""
TODO
"""
def __init__(self, percentage: float, labels: LabelIndices, random: Random = Random()):
self._percentage = percentage
self._labels = labels
self._random = random
def __str__(self) -> str:
return f"strat-{self._percentage}"
def __call__(self, dataset: Dataset) -> Split:
subsets_per_label = per_label(dataset)
sub_splits = {
label: RandomSplitter(int(len(subsets_per_label[label]) * self._percentage), self._random)(subsets_per_label[label])
for label in self._labels.keys()
}
result = OrderedDict(), OrderedDict()
for filename, label in dataset.items():
result_index = 0 if filename in sub_splits[label][0] else 1
result[result_index][filename] = label
return result
| [((15, 81, 15, 89), 'random.Random', 'Random', ({}, {}), '()', False, 'from random import Random\n'), ((31, 17, 31, 30), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((31, 32, 31, 45), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n')] |
YuraHavrylko/revenuecat_python | revenuecat_python/enums.py | a25b234933b6e80e1ff09b6a82d73a0e3df91caa | from enum import Enum
class SubscriptionPlatform(Enum):
ios = 'ios'
android = 'android'
macos = 'macos'
uikitformac = 'uikitformac'
stripe = 'stripe'
class AttributionNetworkCode(Enum):
apple_search_ads = 0
adjust = 1
apps_flyer = 2
branch = 3
tenjin = 4
facebook = 5 | [] |
codeproject/DeepStack | windows_packages_gpu/torch/nn/intrinsic/qat/modules/linear_relu.py | d96368a3db1bc0266cb500ba3701d130834da0e6 | from __future__ import absolute_import, division, print_function, unicode_literals
import torch.nn.qat as nnqat
import torch.nn.intrinsic
import torch.nn.functional as F
class LinearReLU(nnqat.Linear):
r"""
A LinearReLU module fused from Linear and ReLU modules, attached with
FakeQuantize modules for output activation and weight, used in
quantization aware training.
We adopt the same interface as :class:`torch.nn.Linear`.
Similar to `torch.nn.intrinsic.LinearReLU`, with FakeQuantize modules initialized to
default.
Attributes:
activation_post_process: fake quant module for output activation
weight: fake quant module for weight
Examples::
>>> m = nn.qat.LinearReLU(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
_FLOAT_MODULE = torch.nn.intrinsic.LinearReLU
def __init__(self, in_features, out_features, bias=True,
qconfig=None):
super(LinearReLU, self).__init__(in_features, out_features, bias, qconfig)
def forward(self, input):
return self.activation_post_process(F.relu(
F.linear(input, self.weight_fake_quant(self.weight), self.bias)))
@classmethod
def from_float(cls, mod, qconfig=None):
return super(LinearReLU, cls).from_float(mod, qconfig)
| [] |
temelkirci/Motion_Editor | venv/Lib/site-packages/PyOpenGL-3.0.1/OpenGL/GL/EXT/draw_buffers2.py | a8b8d4c4d2dcc9be28385600f56066cef92a38ad | '''OpenGL extension EXT.draw_buffers2
This module customises the behaviour of the
OpenGL.raw.GL.EXT.draw_buffers2 to provide a more
Python-friendly API
Overview (from the spec)
This extension builds upon the ARB_draw_buffers extension and provides
separate blend enables and color write masks for each color output. In
ARB_draw_buffers (part of OpenGL 2.0), separate values can be written to
each color buffer, but the blend enable and color write mask are global
and apply to all color outputs.
While this extension does provide separate blend enables, it does not
provide separate blend functions or blend equations per color output.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/draw_buffers2.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.EXT.draw_buffers2 import *
### END AUTOGENERATED SECTION | [] |
liquidpele/pymemcache | pymemcache/client/retrying.py | 0001f94a06b91078ed7b7708729ef0d1aaa73a68 | """ Module containing the RetryingClient wrapper class. """
from time import sleep
def _ensure_tuple_argument(argument_name, argument_value):
"""
Helper function to ensure the given arguments are tuples of Exceptions (or
subclasses), or can at least be converted to such.
Args:
argument_name: str, name of the argument we're checking, only used for
raising meaningful exceptions.
argument: any, the argument itself.
Returns:
tuple[Exception]: A tuple with the elements from the argument if they are
valid.
Exceptions:
ValueError: If the argument was not None, tuple or Iterable.
ValueError: If any of the elements of the argument is not a subclass of
Exception.
"""
# Ensure the argument is a tuple, set or list.
if argument_value is None:
return tuple()
elif not isinstance(argument_value, (tuple, set, list)):
raise ValueError("%s must be either a tuple, a set or a list." % argument_name)
# Convert the argument before checking contents.
argument_tuple = tuple(argument_value)
# Check that all the elements are actually inherited from Exception.
# (Catchable)
if not all([issubclass(arg, Exception) for arg in argument_tuple]):
raise ValueError(
"%s is only allowed to contain elements that are subclasses of "
"Exception." % argument_name
)
return argument_tuple
class RetryingClient(object):
"""
Client that allows retrying calls for the other clients.
"""
def __init__(
self, client, attempts=2, retry_delay=0, retry_for=None, do_not_retry_for=None
):
"""
Constructor for RetryingClient.
Args:
client: Client|PooledClient|HashClient, inner client to use for
performing actual work.
attempts: optional int, how many times to attempt an action before
failing. Must be 1 or above. Defaults to 2.
retry_delay: optional int|float, how many seconds to sleep between
each attempt.
Defaults to 0.
retry_for: optional None|tuple|set|list, what exceptions to
allow retries for. Will allow retries for all exceptions if None.
Example:
`(MemcacheClientError, MemcacheUnexpectedCloseError)`
Accepts any class that is a subclass of Exception.
Defaults to None.
do_not_retry_for: optional None|tuple|set|list, what
exceptions should be retried. Will not block retries for any
Exception if None.
Example:
`(IOError, MemcacheIllegalInputError)`
Accepts any class that is a subclass of Exception.
Defaults to None.
Exceptions:
ValueError: If `attempts` is not 1 or above.
ValueError: If `retry_for` or `do_not_retry_for` is not None, tuple or
Iterable.
ValueError: If any of the elements of `retry_for` or
`do_not_retry_for` is not a subclass of Exception.
ValueError: If there is any overlap between `retry_for` and
`do_not_retry_for`.
"""
if attempts < 1:
raise ValueError(
"`attempts` argument must be at least 1. "
"Otherwise no attempts are made."
)
self._client = client
self._attempts = attempts
self._retry_delay = retry_delay
self._retry_for = _ensure_tuple_argument("retry_for", retry_for)
self._do_not_retry_for = _ensure_tuple_argument(
"do_not_retry_for", do_not_retry_for
)
# Verify no overlap in the go/no-go exception collections.
for exc_class in self._retry_for:
if exc_class in self._do_not_retry_for:
raise ValueError(
'Exception class "%s" was present in both `retry_for` '
"and `do_not_retry_for`. Any exception class is only "
"allowed in a single argument." % repr(exc_class)
)
# Take dir from the client to speed up future checks.
self._client_dir = dir(self._client)
def _retry(self, name, func, *args, **kwargs):
"""
Workhorse function, handles retry logic.
Args:
name: str, Name of the function called.
func: callable, the function to retry.
*args: args, array arguments to pass to the function.
**kwargs: kwargs, keyword arguments to pass to the function.
"""
for attempt in range(self._attempts):
try:
result = func(*args, **kwargs)
return result
except Exception as exc:
# Raise the exception to caller if either is met:
# - We've used the last attempt.
# - self._retry_for is set, and we do not match.
# - self._do_not_retry_for is set, and we do match.
# - name is not actually a member of the client class.
if (
attempt >= self._attempts - 1
or (self._retry_for and not isinstance(exc, self._retry_for))
or (
self._do_not_retry_for
and isinstance(exc, self._do_not_retry_for)
)
or name not in self._client_dir
):
raise exc
# Sleep and try again.
sleep(self._retry_delay)
# This is the real magic soup of the class, we catch anything that isn't
# strictly defined for ourselves and pass it on to whatever client we've
# been given.
def __getattr__(self, name):
return lambda *args, **kwargs: self._retry(
name, self._client.__getattribute__(name), *args, **kwargs
)
# We implement these explicitly because they're "magic" functions and won't
# get passed on by __getattr__.
def __dir__(self):
return self._client_dir
# These magics are copied from the base client.
def __setitem__(self, key, value):
self.set(key, value, noreply=True)
def __getitem__(self, key):
value = self.get(key)
if value is None:
raise KeyError
return value
def __delitem__(self, key):
self.delete(key, noreply=True)
| [((150, 16, 150, 40), 'time.sleep', 'sleep', ({(150, 22, 150, 39): 'self._retry_delay'}, {}), '(self._retry_delay)', False, 'from time import sleep\n')] |
HuaichenOvO/EIE3280HW | 8.1.py | e1424abb8baf715a4e9372e2ca6b0bed1e62f3d6 | import numpy as np
import numpy.linalg as lg
A_mat = np.matrix([
[0, 1, 1, 1, 0],
[1, 0, 0, 0, 1],
[1, 0, 0, 1, 1],
[1, 0, 1, 0, 1],
[0, 1, 1, 1, 0]
])
eigen = lg.eig(A_mat) # return Arr[5] with 5 different linear independent eigen values
vec = eigen[1][:, 0] # the column (eigen vector) with the largest eigen value
value = eigen[0][0] # the largest eigen value
print(vec)
print(A_mat * vec)
print(value * vec)
| [((4, 8, 10, 2), 'numpy.matrix', 'np.matrix', ({(4, 18, 10, 1): '[[0, 1, 1, 1, 0], [1, 0, 0, 0, 1], [1, 0, 0, 1, 1], [1, 0, 1, 0, 1], [0, 1,\n 1, 1, 0]]'}, {}), '([[0, 1, 1, 1, 0], [1, 0, 0, 0, 1], [1, 0, 0, 1, 1], [1, 0, 1, 0, \n 1], [0, 1, 1, 1, 0]])', True, 'import numpy as np\n'), ((12, 8, 12, 21), 'numpy.linalg.eig', 'lg.eig', ({(12, 15, 12, 20): 'A_mat'}, {}), '(A_mat)', True, 'import numpy.linalg as lg\n')] |
Abulhusain/E-learing | classroom/migrations/0025_myfile_file.py | 65cfe3125f1b6794572ef2daf89917976f0eac09 | # Generated by Django 2.2.2 on 2019-08-25 09:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('classroom', '0024_auto_20190825_1723'),
]
operations = [
migrations.AddField(
model_name='myfile',
name='file',
field=models.CharField(blank=True, max_length=100),
),
]
| [((16, 18, 16, 62), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n')] |
rolfberkenbosch/python-jumbo-api | jumbo_api/objects/profile.py | 9ca35cbea6225dcc6108093539e76f110b1840b0 | from jumbo_api.objects.store import Store
class Profile(object):
def __init__(self, data):
self.id = data.get("identifier")
self.store = Store(data.get("store"))
def __str__(self):
return f"{self.id} {self.store}"
| [] |
hankai17/test | tmp/real_time_log_analy/logWatcher.py | 8f38d999a7c6a92eac94b4d9dc8e444619d2144f | #!/usr/bin/env python
import os
import sys
import time
import errno
import stat
import datetime
import socket
import struct
import atexit
import logging
#from lru import LRUCacheDict
from logging import handlers
from task_manager import Job, taskManage
from ctypes import *
from urlparse import *
from multiprocessing import Process,Lock
from log_obj import CLog
from parse_conf import cConfParser
log_file = "timelog.log"
log_fmt = '%(asctime)s: %(message)s'
config_file = 'test.config'
domain_white_dict = {}
pps_ip_list = []
pps_port = 0
domain_sfx_err_count = 0
domain_sfx_err_rate = 0
ats_ip = ''
def daemonize(pid_file=None):
pid = os.fork()
if pid:
sys.exit(0)
os.chdir('/')
os.umask(0)
os.setsid()
_pid = os.fork()
if _pid:
sys.exit(0)
sys.stdout.flush()
sys.stderr.flush()
with open('/dev/null') as read_null, open('/dev/null', 'w') as write_null:
os.dup2(read_null.fileno(), sys.stdin.fileno())
os.dup2(write_null.fileno(), sys.stdout.fileno())
os.dup2(write_null.fileno(), sys.stderr.fileno())
if pid_file:
with open(pid_file, 'w+') as f:
f.write(str(os.getpid()))
atexit.register(os.remove, pid_file)
def get_suffix(p):
if len(p) == 1:
#return "pure domain"
return "nil"
fields = p.split("/")
if len(fields) == 0 or len(fields) == 1:
return "null"
fields1 = fields[len(fields) - 1].split(".")
if len(fields1) == 0 or len(fields1) == 1:
return "null"
else:
return fields1[len(fields1) - 1]
class LogWatcher(object):
def __init__(self, folder, callback, extensions=["log"], logfile_keyword="squid", tail_lines=0):
self.files_map = {}
self.callback = callback
self.folder = os.path.realpath(folder)
self.extensions = extensions
self.logfile_kw = logfile_keyword
assert os.path.exists(self.folder), "%s does not exists" % self.folder
assert callable(callback)
self.update_files()
for id, file in self.files_map.iteritems():
file.seek(os.path.getsize(file.name)) # EOF
if tail_lines:
lines = self.tail(file.name, tail_lines)
if lines:
self.callback(file.name, lines)
def __del__(self):
self.close()
def loop(self, interval=0.1, async=False):
while 1:
try:
self.update_files()
for fid, file in list(self.files_map.iteritems()):
self.readfile(file)
if async:
return
time.sleep(interval)
except KeyboardInterrupt:
break
def log(self, line):
print line
def listdir(self):
ls = os.listdir(self.folder)
if self.extensions:
return [x for x in ls if os.path.splitext(x)[1][1:] in self.extensions and self.logfile_kw in os.path.split(x)[1] ]
else:
return ls
@staticmethod
def tail(fname, window):
try:
f = open(fname, 'r')
except IOError, err:
if err.errno == errno.ENOENT:
return []
else:
raise
else:
BUFSIZ = 1024
f.seek(0, os.SEEK_END)
fsize = f.tell()
block = -1
data = ""
exit = False
while not exit:
step = (block * BUFSIZ)
if abs(step) >= fsize:
f.seek(0)
exit = True
else:
f.seek(step, os.SEEK_END)
data = f.read().strip()
if data.count('\n') >= window:
break
else:
block -= 1
return data.splitlines()[-window:]
def update_files(self):
ls = []
if os.path.isdir(self.folder):
for name in self.listdir():
absname = os.path.realpath(os.path.join(self.folder, name))
try:
st = os.stat(absname)
except EnvironmentError, err:
if err.errno != errno.ENOENT:
raise
else:
if not stat.S_ISREG(st.st_mode):
continue
fid = self.get_file_id(st)
ls.append((fid, absname))
elif os.path.isfile(self.folder):
absname = os.path.realpath(self.folder)
try:
st = os.stat(absname)
except EnvironmentError, err:
if err.errno != errno.ENOENT:
raise
else:
fid = self.get_file_id(st)
ls.append((fid, absname))
else:
print 'You submitted an object that was neither a file or folder...exiting now.'
sys.exit()
for fid, file in list(self.files_map.iteritems()):
try:
st = os.stat(file.name)
except EnvironmentError, err:
if err.errno == errno.ENOENT:
self.unwatch(file, fid)
else:
raise
else:
if fid != self.get_file_id(st):
self.unwatch(file, fid)
self.watch(file.name)
for fid, fname in ls:
if fid not in self.files_map:
self.watch(fname)
def readfile(self, file):
lines = file.readlines()
if lines:
self.callback(file.name, lines)
def watch(self, fname):
try:
file = open(fname, "r")
fid = self.get_file_id(os.stat(fname))
except EnvironmentError, err:
if err.errno != errno.ENOENT:
raise
else:
self.log("watching logfile %s" % fname)
self.files_map[fid] = file
def unwatch(self, file, fid):
lines = self.readfile(file)
self.log("un-watching logfile %s" % file.name)
del self.files_map[fid]
if lines:
self.callback(file.name, lines)
@staticmethod
def get_file_id(st):
return "%xg%x" % (st.st_dev, st.st_ino)
def close(self):
for id, file in self.files_map.iteritems():
file.close()
self.files_map.clear()
def udp_send_message(ip_list, port, arr):
for ip in ip_list:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.sendto(arr, (ip, port))
s.close()
def pull_data(job):
if not (job.sfx == "nil" or job.sfx == "null"):
fmt = "=HHHH%dsH%dsH" %(len(job.url),len(job.sfx))
data = struct.pack(
fmt,
80, #id
1, #type
8 + len(job.url) + 2 + len(job.sfx) + 1, #length
len(job.url), #domain_len
job.url, #domain
len(job.sfx), #sfx_len
job.sfx, #sfx
0
)
else:
fmt = "=HHHH%dsH" %(len(job.url))
data = struct.pack(
fmt,
80, #id
1, #type
8 + len(job.url) + 1, #length
len(job.url), #domain_len
job.url,
0
)
global pps_ip_list
global pps_port
udp_send_message(pps_ip_list, pps_port, data)
tmg.done_task_add(job)
log_message = job.url + ' ' + job.sfx
loger.write(20, log_message)
def callback_routine(idx):
print 'callback_routinue'
def get_domain_white(f):
if len(f) == 0:
print 'No domain_white_list'
return
filename = f
fd = open(filename, 'r')
for line in fd.readlines():
line = line.strip()
if not domain_white_dict.has_key(line):
domain_white_dict[line] = 1
print 'parse domain_white_list done'
def period_check_task(job):
global txn_idx
global once_flag
if txn_idx == 0 and once_flag == 0:
once_flag = 1
tmg.done_task_add(job)
job.addtime = time.time()
tmg.task_add(job)
return
loger.write(10, '------>')
mutex.acquire()
for k in d1.keys():
if domain_white_dict.has_key(k):
continue
for k1 in d1[k].keys():
err_rate = d1[k][k1]['not_ok'] * 100 / (d1[k][k1]['not_ok'] + d1[k][k1]['20x'])
log_message = k + ' ' + str(err_rate)
loger.write(10, log_message)
global domain_sfx_err_count
global domain_sfx_err_rate
if err_rate >= domain_sfx_err_rate and (d1[k][k1]['not_ok'] + d1[k][k1]['20x']) >= domain_sfx_err_count :
#print "will add to task", k, k1, "ok:", d1[k][k1]['20x'], "not_ok:", d1[k][k1]['not_ok'], "err rate:", err_rate
txn_idx += 1
job = Job(txn_idx, pull_data, time.time(), 0, k, '', callback_routine, k1, '')
tmg.task_add(job)
loger.write(10, '<------')
d1.clear()
mutex.release()
tmg.done_task_add(job)
if job.period > 0:
job.addtime = time.time()
tmg.task_add(job)
def config_parse():
global domain_sfx_err_count
global domain_sfx_err_rate
global pps_ip_list
global pps_port
global ats_ip
cp = cConfParser(config_file)
pps_ip = cp.get('common', 'pps_ip')
fields = pps_ip.strip().split('|')
if len(fields) > 0:
for i in fields:
pps_ip_list.append(i)
else:
pps_ip_list.append(pps_ip)
pps_port = int(cp.get('common', 'pps_port'))
domain_sfx_err_count = int(cp.get('common', 'domain_sfx_err_count' ))
domain_sfx_err_rate = int(cp.get('common', 'domain_sfx_err_rate' ))
ats_ip = cp.get('common', 'ats_ip')
print 'ats_ip: ', ats_ip
print 'pps_ip: ', pps_ip
print 'pps_port: ', pps_port
print 'domain_sfx_err_count: ', domain_sfx_err_count
print 'domain_sfx_err_rate: ', domain_sfx_err_rate
return cp
once_flag = 0
txn_idx = 0
d1 = {}
mutex = Lock()
version_message = '1.0.1'
#1.0.1: Add conf obj; Add log obj
#1.0.2: More pps. add tool config
if __name__ == '__main__':
help_message = 'Usage: python %s' % sys.argv[0]
if len(sys.argv) == 2 and (sys.argv[1] in '--version'):
print version_message
exit(1)
if len(sys.argv) == 2 and (sys.argv[1] in '--help'):
print help_message
exit(1)
if len(sys.argv) != 1:
print help_message
exit(1)
cp = config_parse()
get_domain_white(cp.get('common', 'domain_white_list'))
loger = CLog(log_file, log_fmt, 12, 5, cp.get('common', 'debug'))
print 'Start ok'
daemonize()
tmg = taskManage()
tmg.run()
pull_pps_job = Job(txn_idx, period_check_task, time.time(), int(cp.get('common', 'interval')), '', '', callback_routine, '', '')
tmg.task_add(pull_pps_job)
def callback(filename, lines):
for line in lines:
fields = line.strip().split("'")
http_code = fields[23]
domain = fields[13]
log_message = 'new line ' + domain
#loger.write(10, log_message)
if len(domain.split(":")) > 0:
domain = domain.split(":")[0]
user_ip = fields[5]
result = urlparse(fields[15])
sfx = get_suffix(result.path)
if sfx == 'nil' or sfx == 'null':
continue
if len(domain) <= 3:
continue
#is watch req
global ats_ip
if user_ip == ats_ip:
continue
mutex.acquire()
sfx_dict = None
if not d1.has_key(domain):
d1[domain] = {}
sfx_dict = d1[domain]
else:
sfx_dict = d1[domain]
if not sfx_dict.has_key(sfx):
sfx_dict[sfx] = {'20x':0, 'not_ok':0}
if not(http_code in "200" or http_code in "206" or http_code in "304" or http_code in "204"):
sfx_dict[sfx]['not_ok'] += 1
else:
sfx_dict[sfx]['20x'] += 1
mutex.release()
l = LogWatcher("/opt/ats/var/log/trafficserver", callback)
l.loop()
#https://docs.python.org/2/library/ctypes.html
#https://blog.csdn.net/u012611644/article/details/80529746
| [] |
jonathonfletcher/LazyBlacksmith | lazyblacksmith/views/ajax/__init__.py | f244f0a15c795707b64e7cc53f82c6d6270691b5 | # -*- encoding: utf-8 -*-
from flask import request
from lazyblacksmith.utils.request import is_xhr
import logging
logger = logging.getLogger('lb.ajax')
def is_not_ajax():
"""
Return True if request is not ajax
This function is used in @cache annotation
to not cache direct call (http 403)
"""
return not is_xhr(request)
| [((7, 9, 7, 37), 'logging.getLogger', 'logging.getLogger', ({(7, 27, 7, 36): '"""lb.ajax"""'}, {}), "('lb.ajax')", False, 'import logging\n'), ((16, 15, 16, 30), 'lazyblacksmith.utils.request.is_xhr', 'is_xhr', ({(16, 22, 16, 29): 'request'}, {}), '(request)', False, 'from lazyblacksmith.utils.request import is_xhr\n')] |
logic-and-learning/AdvisoRL | src/automata_learning_with_policybank/Traces.py | 3bbd741e681e6ea72562fec142d54e9d781d097d | import os
class Traces:
def __init__(self, positive = set(), negative = set()):
self.positive = positive
self.negative = negative
"""
IG: at the moment we are adding a trace only if it ends up in an event.
should we be more restrictive, e.g. consider xxx, the same as xxxxxxxxxx (where x is an empty event '')
recent suggestion (from the meeting): ignore empty events altogether and don't consider them as events at all (neither for
execution, nor for learning)
"""
def _should_add(self, trace, i):
prefixTrace = trace[:i]
if not prefixTrace[-1] == '':
return True
else:
return False
def _get_prefixes(self, trace, up_to_limit = None):
if up_to_limit is None:
up_to_limit = len(trace)
all_prefixes = set()
for i in range(1, up_to_limit+1):
if self._should_add(trace, i):
all_prefixes.add(trace[:i])
return all_prefixes
def symbol_to_trace(self,symbols):
letters = ['a','b','c','d','e','f','g', 'h', 'n']
numbers = [int(i) for i in range(0,9)]
dictionary = dict(zip(letters, numbers))
traces = list()
for symbol in symbols:
traces.append(dictionary.get(symbol))
return tuple(traces)
def trace_to_symbol(self,traces):
letters = ['a','b','c','d','e','f','g', 'h', 'n']
numbers = [int(i) for i in range(0,9)]
dictionary = dict(zip(numbers, letters))
symbols = list()
for trace in traces:
symbols.append(dictionary.get(trace))
return tuple(traces)
def rm_trace_to_symbol(self,rm_file):
file = rm_file
letters = ['a','b','c','d','e','f','g', 'h', 'n']
numbers = [int(i) for i in range(0,9)]
dictionary = dict(zip(numbers, letters))
with open(file) as f:
content = f.readlines()
lines = []
for line in content:
end = 0
begin = 1 #initialize values based on what won't enter the loops; initial values irrelevant
number = 0 #random, had to initialize
if line != content[0]:
number = str()
check = 0
count=0
for character in line:
if ((check==1) & (character=="'")): #looks for second quotation
check = 10 #end search
end = count-1
elif (character == "'"): #looks for first quotation
check = 1
begin = count+1
elif (check==1):
number += character
count = count+1
symbol = dictionary.get(int(number))
#symbol = symbol + '&!n'
line = list(line) #necessary for use of pop,insert
if end==begin+1:
line.pop(end)
line.pop(begin)
line.insert(begin,symbol)
elif end==begin:
line.pop(begin)
line.insert(begin,symbol)
lines.append(line)
with open(rm_file, 'w') as f:
for line in lines:
for item in line:
f.write(str(item))
def fix_rmfiles(self,rmfile):
file = rmfile
with open(file) as f:
content = f.readlines()
final_state = str()
for line in content:
if line != content[0]:
brackets = 0
commas = 0
state = str()
next_state = str()
for character in line:
if (character == "(") & (brackets == 0):
brackets = 1
elif brackets == 1:
if character == "(":
brackets = 2
elif brackets == 2:
if character == "1":
final_state = next_state
print(final_state)
if ((commas == 0) & (brackets == 1)):
if character == ",":
commas = 1
else:
state += character
elif ((commas == 1) & (brackets == 1)):
if character == ",":
commas = 2
else:
next_state += character
# with open(rmfile, 'w') as f:
# for line in content:
# for item in line:
# f.write(str(item))
# f.write("\n")
# writethis = "(" + str(final_state) + "," + str(final_state) + ",'True',ConstantRewardFunction(0))"
# f.write(writethis)
"""
when adding a trace, it additionally adds all prefixes as negative traces
"""
def add_trace(self, trace, reward, learned):
trace = tuple(trace)
if reward > 0:
self.positive.add(trace)
# | is a set union operator
#if learned==0:
self.negative |= self._get_prefixes(trace, len(trace)-1)
else:
#if learned == 0:
self.negative |= self._get_prefixes(trace)
# else:
# self.negative.add(trace)
def export_traces(self, filename):
parent_path = os.path.dirname(filename)
os.makedirs(parent_path,exist_ok=True)
with open(filename, "w") as output_file:
output_file.write("POSITIVE:")
for trace in self.positive:
output_file.write("\n")
string_repr = [str(el) for el in trace]
output_file.write(','.join(string_repr))
output_file.write("\nNEGATIVE:")
for trace in self.negative:
output_file.write("\n")
string_repr = [str(el) for el in trace]
output_file.write(','.join(string_repr))
def __repr__(self):
return repr(self.positive) + "\n\n" + repr(self.negative)
| [((151, 22, 151, 47), 'os.path.dirname', 'os.path.dirname', ({(151, 38, 151, 46): 'filename'}, {}), '(filename)', False, 'import os\n'), ((152, 8, 152, 46), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n')] |
edwilding/django-comments-xtd | example/comp/urls.py | c3a335b6345b52c75cce69c66b7cf0ef72439d35 | import django
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
if django.VERSION[:2] > (1, 9):
from django.views.i18n import JavaScriptCatalog
else:
from django.views.i18n import javascript_catalog
from django_comments_xtd import LatestCommentFeed
from django_comments_xtd.views import XtdCommentListView
from comp import views
admin.autodiscover()
urlpatterns = [
url(r'^$', views.HomepageView.as_view(), name='homepage'),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^admin/', include(admin.site.urls)),
url(r'^articles/', include('comp.articles.urls')),
url(r'^quotes/', include('comp.quotes.urls')),
url(r'^comments/', include('django_comments_xtd.urls')),
url(r'^comments/$', XtdCommentListView.as_view(
content_types=["articles.article", "quotes.quote"],
paginate_by=10, page_range=5),
name='comments-xtd-list'),
url(r'^feeds/comments/$', LatestCommentFeed(), name='comments-feed'),
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
]
if django.VERSION[:2] > (1, 9):
urlpatterns.append(
url(r'^jsi18n/$', JavaScriptCatalog.as_view(),
name='javascript-catalog')
)
else:
js_info_dict = {
'packages': ('django_comments_xtd',)
}
urlpatterns.append(
url(r'^jsi18n/$', javascript_catalog, js_info_dict,
name='javascript-catalog')
)
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
if 'rosetta' in settings.INSTALLED_APPS:
urlpatterns += [url(r'^rosetta/', include('rosetta.urls'))]
| [((18, 0, 18, 20), 'django.contrib.admin.autodiscover', 'admin.autodiscover', ({}, {}), '()', False, 'from django.contrib import admin\n'), ((52, 19, 52, 44), 'django.contrib.staticfiles.urls.staticfiles_urlpatterns', 'staticfiles_urlpatterns', ({}, {}), '()', False, 'from django.contrib.staticfiles.urls import staticfiles_urlpatterns\n'), ((22, 15, 22, 43), 'comp.views.HomepageView.as_view', 'views.HomepageView.as_view', ({}, {}), '()', False, 'from comp import views\n'), ((23, 19, 23, 51), 'django.conf.urls.include', 'include', ({(23, 27, 23, 50): '"""django.conf.urls.i18n"""'}, {}), "('django.conf.urls.i18n')", False, 'from django.conf.urls import include, url\n'), ((24, 20, 24, 44), 'django.conf.urls.include', 'include', ({(24, 28, 24, 43): 'admin.site.urls'}, {}), '(admin.site.urls)', False, 'from django.conf.urls import include, url\n'), ((25, 23, 25, 52), 'django.conf.urls.include', 'include', ({(25, 31, 25, 51): '"""comp.articles.urls"""'}, {}), "('comp.articles.urls')", False, 'from django.conf.urls import include, url\n'), ((26, 21, 26, 48), 'django.conf.urls.include', 'include', ({(26, 29, 26, 47): '"""comp.quotes.urls"""'}, {}), "('comp.quotes.urls')", False, 'from django.conf.urls import include, url\n'), ((27, 23, 27, 58), 'django.conf.urls.include', 'include', ({(27, 31, 27, 57): '"""django_comments_xtd.urls"""'}, {}), "('django_comments_xtd.urls')", False, 'from django.conf.urls import include, url\n'), ((28, 24, 30, 37), 'django_comments_xtd.views.XtdCommentListView.as_view', 'XtdCommentListView.as_view', (), '', False, 'from django_comments_xtd.views import XtdCommentListView\n'), ((32, 30, 32, 49), 'django_comments_xtd.LatestCommentFeed', 'LatestCommentFeed', ({}, {}), '()', False, 'from django_comments_xtd import LatestCommentFeed\n'), ((33, 23, 34, 58), 'django.conf.urls.include', 'include', (), '', False, 'from django.conf.urls import include, url\n'), ((47, 8, 48, 38), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import include, url\n'), ((39, 26, 39, 53), 'django.views.i18n.JavaScriptCatalog.as_view', 'JavaScriptCatalog.as_view', ({}, {}), '()', False, 'from django.views.i18n import JavaScriptCatalog\n'), ((55, 38, 55, 61), 'django.conf.urls.include', 'include', ({(55, 46, 55, 60): '"""rosetta.urls"""'}, {}), "('rosetta.urls')", False, 'from django.conf.urls import include, url\n')] |
jumploop/high_performance_python | 09_multiprocessing/prime_validation/primes_factor_test.py | da5b11735601b51f141975f9d59f14293cab16bb | import math
import time
def check_prime(n):
if n % 2 == 0:
return False, 2
for i in range(3, int(math.sqrt(n)) + 1):
if n % i == 0:
return False, i
return True, None
if __name__ == "__main__":
primes = []
t1 = time.time()
# 100109100129100151 big prime
# http://primes.utm.edu/curios/page.php/100109100129100151.html
# number_range = xrange(100109100129100153, 100109100129101238, 2)
number_range = range(100109100129101237, 100109100129201238, 2)
# new expensive near-primes
# [(95362951, (100109100129100369, 7.254560947418213))
# (171656941, (100109100129101027, 13.052711009979248))
# (121344023, (100109100129101291, 8.994053840637207)
# note these two lines of timings look really wrong, they're about 4sec
# each really
# [(265687139, (100109100129102047, 19.642582178115845)), (219609683, (100109100129102277, 16.178056001663208)), (121344023, (100109100129101291, 8.994053840637207))]
# [(316096873, (100109100129126653, 23.480671882629395)), (313994287, (100109100129111617, 23.262380123138428)), (307151363, (100109100129140177, 22.80288815498352))]
# primes
# 100109100129162907
# 100109100129162947
highest_factors = {}
for possible_prime in number_range:
t2 = time.time()
is_prime, factor = check_prime(possible_prime)
if is_prime:
primes.append(possible_prime)
print("GOT NEW PRIME", possible_prime)
else:
highest_factors[factor] = (possible_prime, time.time() - t2)
hf = highest_factors.items()
hf = sorted(hf, reverse=True)
print(hf[:3])
print("Took:", time.time() - t1)
print(len(primes), primes[:10], primes[-10:])
| [((16, 9, 16, 20), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((37, 13, 37, 24), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((47, 19, 47, 30), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((8, 26, 8, 38), 'math.sqrt', 'math.sqrt', ({(8, 36, 8, 37): 'n'}, {}), '(n)', False, 'import math\n'), ((43, 55, 43, 66), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')] |
hagabb/katana | python/test/test_dynamic_bitset.py | a52a688b90315a79aa95cf8d279fd7f949a3b94b | import pytest
from katana.dynamic_bitset import DynamicBitset
__all__ = []
SIZE = 50
@pytest.fixture
def dbs():
return DynamicBitset(SIZE)
def test_set(dbs):
dbs[10] = 1
assert dbs[10]
def test_set_invalid_type(dbs):
try:
dbs[2.3] = 0
assert False
except TypeError:
pass
def test_set_invalid_index_low(dbs):
try:
dbs[-1] = 1
assert False
except IndexError:
pass
def test_set_invalid_index_high(dbs):
try:
dbs[SIZE] = 1
assert False
except IndexError:
pass
def test_reset(dbs):
dbs[10] = 1
dbs.reset()
assert not dbs[10]
assert len(dbs) == SIZE
def test_reset_index(dbs):
dbs[10] = 1
dbs[10] = 0
assert not dbs[10]
def test_reset_begin_end(dbs):
dbs[10] = 1
dbs[15] = 1
dbs[12:17] = 0
assert dbs[10]
assert not dbs[15]
def test_reset_begin_end_invalid_step(dbs):
try:
dbs[12:17:22] = 0
assert False
except ValueError:
pass
def test_reset_none_end(dbs):
dbs[10] = 1
dbs[15] = 1
dbs[:12] = 0
assert not dbs[10]
assert dbs[15]
def test_resize(dbs):
dbs.resize(20)
assert len(dbs) == 20
dbs[8] = 1
dbs.resize(20)
assert len(dbs) == 20
assert dbs[8]
dbs.resize(70)
assert len(dbs) == 70
assert dbs[8]
assert dbs.count() == 1
def test_clear(dbs):
dbs[10] = 1
dbs.clear()
assert len(dbs) == 0
dbs.resize(20)
assert len(dbs) == 20
assert not dbs[10]
def test_count(dbs):
dbs[10] = 1
assert dbs.count() == 1
| [((12, 11, 12, 30), 'katana.dynamic_bitset.DynamicBitset', 'DynamicBitset', ({(12, 25, 12, 29): 'SIZE'}, {}), '(SIZE)', False, 'from katana.dynamic_bitset import DynamicBitset\n')] |
kopp/python-astar | tests/basic/test_basic.py | 642dd4bcef9829776614dc0f12681ac94634a3bc | import unittest
import astar
class BasicTests(unittest.TestCase):
def test_bestpath(self):
"""ensure that we take the shortest path, and not the path with less elements.
the path with less elements is A -> B with a distance of 100
the shortest path is A -> C -> D -> B with a distance of 60
"""
nodes = {'A': [('B', 100), ('C', 20)],
'C': [('D', 20)], 'D': [('B', 20)]}
def neighbors(n):
for n1, d in nodes[n]:
yield n1
def distance(n1, n2):
for n, d in nodes[n1]:
if n == n2:
return d
def cost(n, goal):
return 1
path = list(astar.find_path('A', 'B', neighbors_fnct=neighbors,
heuristic_cost_estimate_fnct=cost, distance_between_fnct=distance))
self.assertEqual(4, len(path))
for i, n in enumerate('ACDB'):
self.assertEqual(n, path[i])
if __name__ == '__main__':
unittest.main()
| [((33, 4, 33, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((26, 20, 27, 86), 'astar.find_path', 'astar.find_path', (), '', False, 'import astar\n')] |
abdza/skyrim_formulas | potions.py | bf6be3c82715cfde89810d6e6183c95a55a4414c | #!/bin/env python3
import csv
def intersect(list1,list2):
list3 = [ value for value in list1 if value in list2]
return list3
def category(list1,effects):
cat = 'Good'
good = 0
bad = 0
for ing in list1:
if effects[ing]=='Good':
good += 1
else:
bad += 1
if bad==0:
return 'Potion'
elif good==0:
return 'Poison'
else:
return 'Downside'
effects = {}
ingredients = {}
print("Formulating formulas")
with open('ingredients.csv') as csvfile:
aff = csv.reader(csvfile, delimiter=',')
for row in aff:
if row[0] not in effects.keys():
effects[row[0]] = row[1]
with open('skyrim-ingredients.csv', newline='') as csvfile:
ingre = csv.reader(csvfile, delimiter=',')
for row in ingre:
if row[0] not in ingredients.keys():
ingredients[row[0]] = [row[1],row[2],row[3],row[4]]
multieffects = {}
for ce in effects:
curing = []
for ing in ingredients:
if ce in ingredients[ing]:
curing.append(ing)
for k,curi in enumerate(curing):
for i in range(k+1,len(curing)):
cureff = intersect(ingredients[curi],ingredients[curing[i]])
cureff.sort()
if len(cureff)>1:
if curi>curing[i]:
curname = curing[i] + ':' + curi
else:
curname = curi + ':' + curing[i]
multieffects[curname] = cureff
finallist = {}
for me in multieffects:
curing = me.split(":")
for ing in ingredients:
if ing!=curing[0] and ing!=curing[1]:
eff1 = intersect(ingredients[curing[0]],ingredients[ing])
eff2 = intersect(ingredients[curing[1]],ingredients[ing])
if len(eff1)>0 or len(eff2)>0:
tmpname = [ val for val in curing ]
tmpname.append(ing)
tmpname.sort()
finalname = ":".join(tmpname)
finallist[finalname] = list(set(multieffects[me] + eff1 + eff2))
finallist[finalname].sort()
with open('formulas.csv',mode='w') as formula_file:
formula_writer = csv.writer(formula_file, delimiter=',')
formula_writer.writerow(['Category','Ingredient 1','Ingredient 2','Ingredient 3','Effect 1','Effect 2','Effect 3','Effect 4','Effect 5'])
for fl in finallist:
formula_writer.writerow([category(finallist[fl],effects)] + fl.split(":") + finallist[fl])
for fl in multieffects:
formula_writer.writerow([category(multieffects[fl],effects)] + fl.split(":") + [''] + multieffects[fl])
| [((31, 10, 31, 44), 'csv.reader', 'csv.reader', (), '', False, 'import csv\n'), ((36, 12, 36, 46), 'csv.reader', 'csv.reader', (), '', False, 'import csv\n'), ((76, 21, 76, 60), 'csv.writer', 'csv.writer', (), '', False, 'import csv\n')] |
IceT-M/ctm-python-client | src/clients/ctm_api_client/models/user_additional_properties.py | 0ef1d8a3c9a27a01c088be1cdf5d177d25912bac | # coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.215
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from clients.ctm_api_client.configuration import Configuration
class UserAdditionalProperties(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
"member_of_groups": "list[str]",
"authentication": "AuthenticationData",
"is_external_user": "bool",
}
attribute_map = {
"member_of_groups": "memberOfGroups",
"authentication": "authentication",
"is_external_user": "isExternalUser",
}
def __init__(
self,
member_of_groups=None,
authentication=None,
is_external_user=None,
_configuration=None,
): # noqa: E501
"""UserAdditionalProperties - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._member_of_groups = None
self._authentication = None
self._is_external_user = None
self.discriminator = None
if member_of_groups is not None:
self.member_of_groups = member_of_groups
if authentication is not None:
self.authentication = authentication
if is_external_user is not None:
self.is_external_user = is_external_user
@property
def member_of_groups(self):
"""Gets the member_of_groups of this UserAdditionalProperties. # noqa: E501
List of role names # noqa: E501
:return: The member_of_groups of this UserAdditionalProperties. # noqa: E501
:rtype: list[str]
"""
return self._member_of_groups
@member_of_groups.setter
def member_of_groups(self, member_of_groups):
"""Sets the member_of_groups of this UserAdditionalProperties.
List of role names # noqa: E501
:param member_of_groups: The member_of_groups of this UserAdditionalProperties. # noqa: E501
:type: list[str]
"""
self._member_of_groups = member_of_groups
@property
def authentication(self):
"""Gets the authentication of this UserAdditionalProperties. # noqa: E501
user authentication # noqa: E501
:return: The authentication of this UserAdditionalProperties. # noqa: E501
:rtype: AuthenticationData
"""
return self._authentication
@authentication.setter
def authentication(self, authentication):
"""Sets the authentication of this UserAdditionalProperties.
user authentication # noqa: E501
:param authentication: The authentication of this UserAdditionalProperties. # noqa: E501
:type: AuthenticationData
"""
self._authentication = authentication
@property
def is_external_user(self):
"""Gets the is_external_user of this UserAdditionalProperties. # noqa: E501
:return: The is_external_user of this UserAdditionalProperties. # noqa: E501
:rtype: bool
"""
return self._is_external_user
@is_external_user.setter
def is_external_user(self, is_external_user):
"""Sets the is_external_user of this UserAdditionalProperties.
:param is_external_user: The is_external_user of this UserAdditionalProperties. # noqa: E501
:type: bool
"""
self._is_external_user = is_external_user
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(UserAdditionalProperties, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UserAdditionalProperties):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, UserAdditionalProperties):
return True
return self.to_dict() != other.to_dict()
| [((142, 23, 142, 56), 'six.iteritems', 'six.iteritems', ({(142, 37, 142, 55): 'self.swagger_types'}, {}), '(self.swagger_types)', False, 'import six\n'), ((56, 29, 56, 44), 'clients.ctm_api_client.configuration.Configuration', 'Configuration', ({}, {}), '()', False, 'from clients.ctm_api_client.configuration import Configuration\n')] |
harshasunder-1/pyleecan | Tests/Methods/Mesh/Interpolation/test_interpolation.py | 32ae60f98b314848eb9b385e3652d7fc50a77420 | # -*- coding: utf-8 -*-
import pytest
import numpy as np
from unittest import TestCase
from pyleecan.Classes.CellMat import CellMat
from pyleecan.Classes.MeshSolution import MeshSolution
from pyleecan.Classes.PointMat import PointMat
from pyleecan.Classes.MeshMat import MeshMat
from pyleecan.Classes.ScalarProductL2 import ScalarProductL2
from pyleecan.Classes.Interpolation import Interpolation
from pyleecan.Classes.RefSegmentP1 import RefSegmentP1
from pyleecan.Classes.FPGNSeg import FPGNSeg
@pytest.mark.MeshSol
class unittest_real_points(TestCase):
""" Tests for interpolation method"""
def test_line(self):
DELTA = 1e-10
mesh = MeshMat()
mesh.cell["line"] = CellMat(nb_pt_per_cell=2)
mesh.point = PointMat()
mesh.point.add_point(np.array([0, 0]))
mesh.point.add_point(np.array([1, 0]))
mesh.point.add_point(np.array([0, 1]))
mesh.point.add_point(np.array([2, 3]))
mesh.point.add_point(np.array([3, 3]))
mesh.add_cell(np.array([0, 1]), "line")
mesh.add_cell(np.array([0, 2]), "line")
mesh.add_cell(np.array([1, 2]), "line")
c_line = mesh.cell["line"]
c_line.interpolation = Interpolation()
c_line.interpolation.ref_cell = RefSegmentP1()
c_line.interpolation.scalar_product = ScalarProductL2()
c_line.interpolation.gauss_point = FPGNSeg()
meshsol = MeshSolution()
meshsol.mesh = [mesh]
vert = mesh.get_vertice(0)["line"]
test_pt = np.array([0.7, 0])
test_field = np.array([1, 1])
sol = [1]
func = c_line.interpolation.ref_cell.interpolation(test_pt, vert, test_field)
testA = np.sum(abs(func - sol))
msg = "Wrong result: returned " + str(func) + ", expected: " + str(test_field)
self.assertAlmostEqual(testA, 0, msg=msg, delta=DELTA)
vert = mesh.get_vertice(0)["line"]
test_pt = np.array([0.7, 0])
test_field = np.ones(
(2, 120, 3)
) # Simulate a 3D vector field for 120 time step
func = c_line.interpolation.ref_cell.interpolation(test_pt, vert, test_field)
sol = np.ones((120, 3))
testA = np.sum(abs(func - sol))
msg = "Wrong result: returned " + str(func) + ", expected: " + str(sol)
self.assertAlmostEqual(testA, 0, msg=msg, delta=DELTA)
vert = mesh.get_vertice(2)["line"]
test_pt = np.array([0.6, 0.4])
test_field = np.zeros((2, 120, 3))
test_field[0, :] = np.ones(
(1, 120, 3)
) # Simulate a 3D vector field for 120 time step
func = c_line.interpolation.ref_cell.interpolation(test_pt, vert, test_field)
sol = 0.6 * np.ones((120, 3))
testA = np.sum(abs(sol - func))
msg = "Wrong result: returned " + str(func) + ", expected: " + str(sol)
self.assertAlmostEqual(testA, 0, msg=msg, delta=DELTA)
vert = mesh.get_vertice(1)["line"]
test_pt = np.array([0, 0.4])
test_field = np.zeros((2, 120, 3))
test_field[1, :] = np.ones(
(1, 120, 3)
) # Simulate a 3D vector field for 120 time step
func = c_line.interpolation.ref_cell.interpolation(test_pt, vert, test_field)
sol = 0.4 * np.ones((120, 3))
testA = np.sum(abs(sol - func))
msg = "Wrong result: returned " + str(func) + ", expected: " + str(sol)
self.assertAlmostEqual(testA, 0, msg=msg, delta=DELTA)
| [((24, 15, 24, 24), 'pyleecan.Classes.MeshMat.MeshMat', 'MeshMat', ({}, {}), '()', False, 'from pyleecan.Classes.MeshMat import MeshMat\n'), ((25, 28, 25, 53), 'pyleecan.Classes.CellMat.CellMat', 'CellMat', (), '', False, 'from pyleecan.Classes.CellMat import CellMat\n'), ((26, 21, 26, 31), 'pyleecan.Classes.PointMat.PointMat', 'PointMat', ({}, {}), '()', False, 'from pyleecan.Classes.PointMat import PointMat\n'), ((39, 31, 39, 46), 'pyleecan.Classes.Interpolation.Interpolation', 'Interpolation', ({}, {}), '()', False, 'from pyleecan.Classes.Interpolation import Interpolation\n'), ((40, 40, 40, 54), 'pyleecan.Classes.RefSegmentP1.RefSegmentP1', 'RefSegmentP1', ({}, {}), '()', False, 'from pyleecan.Classes.RefSegmentP1 import RefSegmentP1\n'), ((41, 46, 41, 63), 'pyleecan.Classes.ScalarProductL2.ScalarProductL2', 'ScalarProductL2', ({}, {}), '()', False, 'from pyleecan.Classes.ScalarProductL2 import ScalarProductL2\n'), ((42, 43, 42, 52), 'pyleecan.Classes.FPGNSeg.FPGNSeg', 'FPGNSeg', ({}, {}), '()', False, 'from pyleecan.Classes.FPGNSeg import FPGNSeg\n'), ((44, 18, 44, 32), 'pyleecan.Classes.MeshSolution.MeshSolution', 'MeshSolution', ({}, {}), '()', False, 'from pyleecan.Classes.MeshSolution import MeshSolution\n'), ((48, 18, 48, 36), 'numpy.array', 'np.array', ({(48, 27, 48, 35): '[0.7, 0]'}, {}), '([0.7, 0])', True, 'import numpy as np\n'), ((49, 21, 49, 37), 'numpy.array', 'np.array', ({(49, 30, 49, 36): '[1, 1]'}, {}), '([1, 1])', True, 'import numpy as np\n'), ((57, 18, 57, 36), 'numpy.array', 'np.array', ({(57, 27, 57, 35): '[0.7, 0]'}, {}), '([0.7, 0])', True, 'import numpy as np\n'), ((58, 21, 60, 9), 'numpy.ones', 'np.ones', ({(59, 12, 59, 23): '(2, 120, 3)'}, {}), '((2, 120, 3))', True, 'import numpy as np\n'), ((62, 14, 62, 31), 'numpy.ones', 'np.ones', ({(62, 22, 62, 30): '(120, 3)'}, {}), '((120, 3))', True, 'import numpy as np\n'), ((68, 18, 68, 38), 'numpy.array', 'np.array', ({(68, 27, 68, 37): '[0.6, 0.4]'}, {}), '([0.6, 0.4])', True, 'import numpy as np\n'), ((69, 21, 69, 42), 'numpy.zeros', 'np.zeros', ({(69, 30, 69, 41): '(2, 120, 3)'}, {}), '((2, 120, 3))', True, 'import numpy as np\n'), ((70, 27, 72, 9), 'numpy.ones', 'np.ones', ({(71, 12, 71, 23): '(1, 120, 3)'}, {}), '((1, 120, 3))', True, 'import numpy as np\n'), ((80, 18, 80, 36), 'numpy.array', 'np.array', ({(80, 27, 80, 35): '[0, 0.4]'}, {}), '([0, 0.4])', True, 'import numpy as np\n'), ((81, 21, 81, 42), 'numpy.zeros', 'np.zeros', ({(81, 30, 81, 41): '(2, 120, 3)'}, {}), '((2, 120, 3))', True, 'import numpy as np\n'), ((82, 27, 84, 9), 'numpy.ones', 'np.ones', ({(83, 12, 83, 23): '(1, 120, 3)'}, {}), '((1, 120, 3))', True, 'import numpy as np\n'), ((27, 29, 27, 45), 'numpy.array', 'np.array', ({(27, 38, 27, 44): '[0, 0]'}, {}), '([0, 0])', True, 'import numpy as np\n'), ((28, 29, 28, 45), 'numpy.array', 'np.array', ({(28, 38, 28, 44): '[1, 0]'}, {}), '([1, 0])', True, 'import numpy as np\n'), ((29, 29, 29, 45), 'numpy.array', 'np.array', ({(29, 38, 29, 44): '[0, 1]'}, {}), '([0, 1])', True, 'import numpy as np\n'), ((30, 29, 30, 45), 'numpy.array', 'np.array', ({(30, 38, 30, 44): '[2, 3]'}, {}), '([2, 3])', True, 'import numpy as np\n'), ((31, 29, 31, 45), 'numpy.array', 'np.array', ({(31, 38, 31, 44): '[3, 3]'}, {}), '([3, 3])', True, 'import numpy as np\n'), ((33, 22, 33, 38), 'numpy.array', 'np.array', ({(33, 31, 33, 37): '[0, 1]'}, {}), '([0, 1])', True, 'import numpy as np\n'), ((34, 22, 34, 38), 'numpy.array', 'np.array', ({(34, 31, 34, 37): '[0, 2]'}, {}), '([0, 2])', True, 'import numpy as np\n'), ((35, 22, 35, 38), 'numpy.array', 'np.array', ({(35, 31, 35, 37): '[1, 2]'}, {}), '([1, 2])', True, 'import numpy as np\n'), ((74, 20, 74, 37), 'numpy.ones', 'np.ones', ({(74, 28, 74, 36): '(120, 3)'}, {}), '((120, 3))', True, 'import numpy as np\n'), ((86, 20, 86, 37), 'numpy.ones', 'np.ones', ({(86, 28, 86, 36): '(120, 3)'}, {}), '((120, 3))', True, 'import numpy as np\n')] |
ecarg/grace | lib/models.py | 8c1540116c07648f7d8852ee5e9edff33b6ae2f6 | # -*- coding: utf-8 -*-
"""
Pytorch models
__author__ = 'Jamie ([email protected])'
__copyright__ = 'No copyright. Just copyleft!'
"""
# pylint: disable=no-member
# pylint: disable=invalid-name
###########
# imports #
###########
import torch
import torch.nn as nn
from embedder import Embedder
from pos_models import PosTagger, FnnTagger, CnnTagger # pylint: disable=unused-import
#############
# Ner Class #
#############
class Ner(nn.Module):
"""
named entity recognizer pytorch model
"""
def __init__(self, embedder, encoder, decoder):
"""
* embedder (Embedder)
[sentence_len, context_len] => [sentence_len, context_len, embed_dim]
* encoder (nn.Module)
[sentence_len, context_len, embed_dim] => [sentence_len, hidden_dim]
* decoder (nn.Module)
[sentence_len, hidden_dim] => [sentence_len, n_tags],
"""
super().__init__()
self.embedder = embedder
self.encoder = encoder
self.decoder = decoder
assert isinstance(embedder, Embedder)
assert isinstance(encoder, nn.Module)
assert isinstance(decoder, nn.Module)
def forward(self, sentence, gazet, pos, words): #pylint: disable=arguments-differ
# [sentence_len, context_len] => [sentence_len, context_len, embed_dim]
sentence_embed = self.embedder(sentence, gazet, pos, words)
# [sentence_len, context_len, embed_dim] => [sentence_len, hidden_dim]
hidden = self.encoder(sentence_embed)
# [sentence_len, hidden_dim] => [sentence_len, n_tags]
predicted_tags = self.decoder(hidden)
return predicted_tags
def save(self, path):
"""
모델을 저장하는 메소드
:param path: 경로
"""
if torch.cuda.is_available():
self.cpu()
torch.save(self, str(path))
if torch.cuda.is_available():
self.cuda()
@classmethod
def load(cls, path):
"""
저장된 모델을 로드하는 메소드
:param path: 경로
:return: 모델 클래스 객체
"""
model = torch.load(str(path))
if torch.cuda.is_available():
model.cuda()
return model
#################
# Encoder Class #
#################
class Fnn5(nn.Module):
"""
2-Layer Full-Connected Neural Networks
"""
def __init__(self, context_len=21, in_dim=50, hidden_dim=500):
super(Fnn5, self).__init__()
self.context_len = context_len
self.hidden_dim = hidden_dim
self.out_dim = hidden_dim
self.net = nn.Sequential(
nn.Linear(context_len*in_dim, hidden_dim),
)
def forward(self, x):#pylint: disable=arguments-differ
"""
Args:
x: [sentence_len, context_len, in_dim]
Return:
x: [sentence_len, out_dim]
"""
sentence_len = x.size(0)
x = x.view(sentence_len, -1) # [sentence_len, context_len x in_dim]
x = self.net(x) # [setence_len, out_dim]
return x
class Cnn7(nn.Module):
"""
ConvNet kernels=[2,3,4,5] + Fully-Connected
"""
def __init__(self, in_dim=50, hidden_dim=500):
"""
"""
super(Cnn7, self).__init__()
self.in_dim = in_dim
self.hidden_dim = hidden_dim
self.out_dim = in_dim * 4
self.conv2 = nn.Sequential(
nn.Conv1d(in_dim, in_dim, kernel_size=2), # 20
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 10
nn.Conv1d(in_dim, in_dim, kernel_size=2), # 9
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 5
nn.Conv1d(in_dim, in_dim, kernel_size=2), # 4
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 2
nn.Conv1d(in_dim, in_dim, kernel_size=2), # 1
)
self.conv3 = nn.Sequential(
nn.Conv1d(in_dim, in_dim, kernel_size=3, padding=1), # 21
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 11
nn.Conv1d(in_dim, in_dim, kernel_size=3, padding=1), # 11
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 6
nn.Conv1d(in_dim, in_dim, kernel_size=3, padding=1), # 6
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 3
nn.Conv1d(in_dim, in_dim, kernel_size=3), # 1
)
self.conv4 = nn.Sequential(
nn.Conv1d(in_dim, in_dim, kernel_size=4, padding=1), # 20
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 10
nn.Conv1d(in_dim, in_dim, kernel_size=4, padding=1), # 9
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 5
nn.Conv1d(in_dim, in_dim, kernel_size=4, padding=1), # 4
nn.ReLU(),
nn.Conv1d(in_dim, in_dim, kernel_size=4), # 1
)
self.conv5 = nn.Sequential(
nn.Conv1d(in_dim, in_dim, kernel_size=5, padding=2), # 21
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 11
nn.Conv1d(in_dim, in_dim, kernel_size=5, padding=2), # 11
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 6
nn.Conv1d(in_dim, in_dim, kernel_size=5, padding=2), # 6
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 3
nn.Conv1d(in_dim, in_dim, kernel_size=5, padding=1), # 1
)
def forward(self, x): #pylint: disable=arguments-differ
"""
Args:
x: [sentence_length, context_len, in_dim]
Return:
x: [sentence_length, in_dim * 4]
"""
# [sentence_length, in_dim, context_len]
x = x.transpose(1, 2)
conv2 = self.conv2(x).squeeze(-1) # [sentence_len, in_dim]
conv3 = self.conv3(x).squeeze(-1) # [sentence_len, in_dim]
conv4 = self.conv4(x).squeeze(-1) # [sentence_len, in_dim]
conv5 = self.conv5(x).squeeze(-1) # [sentence_len, in_dim]
# [sentence_len, in_dim * 4]
out = torch.cat([conv2, conv3, conv4, conv5], dim=1)
return out
class Cnn8(nn.Module):
"""
9-layer Conv NN + Batch Norm + Residual
"""
def __init__(self, context_len=21, in_dim=64, hidden_dim=None):
super(Cnn8, self).__init__()
self.context_len = context_len
# conv block 64
self.conv_block1_1 = self.conv_block(in_dim, 2, False)
self.conv_block1_2_1 = self.conv_block(in_dim, 1, False)
self.conv_block1_2_2 = self.conv_block(in_dim, 1, True)
self.pool1 = nn.MaxPool1d(kernel_size=2, padding=1, ceil_mode=True)
# conv block 128
self.conv_block2_1 = self.conv_block(in_dim*2, 2, False)
self.conv_block2_2_1 = self.conv_block(in_dim*2, 1, False)
self.conv_block2_2_2 = self.conv_block(in_dim*2, 1, True)
self.pool2 = nn.MaxPool1d(kernel_size=2, padding=1, ceil_mode=True)
# conv block 256
self.conv_block3_1 = self.conv_block(in_dim*4, 2, False)
self.conv_block3_2_1 = self.conv_block(in_dim*4, 1, False)
self.conv_block3_2_2 = self.conv_block(in_dim*4, 1, True)
self.pool3 = nn.MaxPool1d(kernel_size=2)
# conv block 512
self.conv_block4_1 = self.conv_block(in_dim*8, 2, False)
self.conv_block4_2_1 = self.conv_block(in_dim*8, 1, False)
self.conv_block4_2_2 = self.conv_block(in_dim*8, 1, True)
self.pool4 = nn.MaxPool1d(kernel_size=3)
self.out_dim = in_dim*16
@classmethod
def conv_block(cls, in_dim=64, depth=2, double=True):
"""
Args:
[batch_size, dim, length]
Return:
[batch_size, dim*2, length] if double=True
[batch_size, dim, length] if double=False
"""
out_dim = in_dim
layers = []
for i in range(depth):
if double:
if i == depth - 1:
out_dim = in_dim * 2
layers.append(nn.Conv1d(in_dim, out_dim, kernel_size=3, padding=1))
layers.append(nn.BatchNorm1d(out_dim))
layers.append(nn.ReLU())
return nn.Sequential(*layers)
def forward(self, sentence):#pylint: disable=arguments-differ
"""
Args:
sentence: [sentence_len, context_len, embed_dim]
Return:
logit: [batch_size, out_dim]
"""
# [sentence_len, embed_dim, context_len]
x = sentence.transpose(1, 2)
# conv block 64
x = self.conv_block1_1(x) + x # [batch, in_dim, 21]
x = self.conv_block1_2_1(x) + x # [batch, in_dim, 21]
x = self.conv_block1_2_2(x) # [batch, in_dim*2, 21]
x = self.pool1(x) # [batch, in_dim*2, 11]
# conv block 128
x = self.conv_block2_1(x) + x # [batch, in_dim*2, 11]
x = self.conv_block2_2_1(x) + x # [batch, in_dim*2, 11]
x = self.conv_block2_2_2(x) # [batch, in_dim*4, 11]
x = self.pool2(x) # [batch, in_dim*4, 6]
# conv block 256
x = self.conv_block3_1(x) + x # [batch, in_dim*4, 6]
x = self.conv_block3_2_1(x) + x # [batch, in_dim*4, 6]
x = self.conv_block3_2_2(x) # [batch, in_dim*8, 6]
x = self.pool3(x) # [batch, in_dim*8, 3]
# conv block 512
x = self.conv_block4_1(x) + x # [batch, in_dim*8, 3]
x = self.conv_block4_2_1(x) + x # [batch, in_dim*8, 3]
x = self.conv_block4_2_2(x) # [batch, in_dim*16, 3]
x = self.pool4(x) # [batch_size, in_dim*16, 1]
x = x.squeeze(-1) # [batch, in_dim*16]
return x
class RnnEncoder(nn.Module):
"""
RNN Encoder Module
"""
def __init__(self, context_len=21, in_dim=1024, out_dim=1024,
num_layers=2, cell='gru'):
super(RnnEncoder, self).__init__()
self.hidden_dim = out_dim // 2
if cell == 'gru':
self.rnn = nn.GRU(
input_size=in_dim,
hidden_size=self.hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
if cell == 'lstm':
self.rnn = nn.LSTM(
input_size=in_dim,
hidden_size=self.hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
elif cell == 'sru':
from sru import SRU
self.rnn = SRU(
input_size=in_dim,
hidden_size=self.hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
def forward(self, x):#pylint: disable=arguments-differ
"""
Args:
x: [sentence_len, context_len, input_size]
Return:
x: [sentence_len, hidden_size]
"""
# input (seq_len, batch, input_size)
# h_0 (num_layers * num_directions, batch, hidden_size)
# output (seq_len, batch, hidden_size * num_directions)
# h_n (num_layers * num_directions, batch, hidden_size)
# [sequence_len, context_len, input_size]
# =>[sentence_len, context_len, hidden_size x 2]
x, _ = self.rnn(x)
# [sequence_len, hidden_size x 2]
x = x[:, 10, :]
return x
#################
# Decoder Class #
#################
class FCDecoder(nn.Module):
"""
Fully-Connected Decoder
"""
def __init__(self, in_dim, hidden_dim, n_tags):
super(FCDecoder, self).__init__()
self.net = nn.Sequential(
nn.ReLU(),
nn.Dropout(),
nn.Linear(in_dim, n_tags)
)
def forward(self, x):#pylint: disable=arguments-differ
"""
[sentence_len, in_dim] => [sentence_len, n_tags]
"""
return self.net(x)
class RnnDecoder(nn.Module):
"""
RNN-based Decoder
"""
def __init__(self, in_dim=1024, hidden_dim=512, n_tags=11,
num_layers=2, cell='gru'):
super(RnnDecoder, self).__init__()
if cell == 'gru':
self.rnn = nn.GRU(
input_size=in_dim,
hidden_size=hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
if cell == 'lstm':
self.rnn = nn.LSTM(
input_size=in_dim,
hidden_size=hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
elif cell == 'sru':
from sru import SRU
self.rnn = SRU(
input_size=in_dim,
hidden_size=hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
self.out = nn.Sequential(
nn.ReLU(),
nn.Dropout(),
nn.Linear(hidden_dim * 2, n_tags)
)
def forward(self, x):#pylint: disable=arguments-differ
"""
[sentence_len, in_dim] => [sentence_len, n_tags]
"""
# input (seq_len, batch, input_size)
# h_0 (num_layers * num_directions, batch, hidden_size)
# output (seq_len, batch, hidden_size * num_directions)
# h_n (num_layers * num_directions, batch, hidden_size)
# [sentence_len, batch=1, input_size]
x = x.unsqueeze(1)
# x: [sentence_len, batch=1, hidden_size x 2]
# h_n: [num_layers * 2, batch=1, hidden_size]
# c_n: [num_layers * 2, batch=1, hidden_size]
x, _ = self.rnn(x)
# [sequence_len, hidden_size x 2]
x = x.squeeze(1)
# [sequence_len, n_tags]
x = self.out(x)
return x
| [((66, 11, 66, 36), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((69, 11, 69, 36), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((80, 11, 80, 36), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((199, 14, 199, 60), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((217, 21, 217, 75), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (), '', True, 'import torch.nn as nn\n'), ((223, 21, 223, 75), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (), '', True, 'import torch.nn as nn\n'), ((229, 21, 229, 48), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (), '', True, 'import torch.nn as nn\n'), ((235, 21, 235, 48), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (), '', True, 'import torch.nn as nn\n'), ((257, 15, 257, 37), 'torch.nn.Sequential', 'nn.Sequential', ({(257, 29, 257, 36): '*layers'}, {}), '(*layers)', True, 'import torch.nn as nn\n'), ((100, 12, 100, 53), 'torch.nn.Linear', 'nn.Linear', ({(100, 22, 100, 40): 'context_len * in_dim', (100, 42, 100, 52): 'hidden_dim'}, {}), '(context_len * in_dim, hidden_dim)', True, 'import torch.nn as nn\n'), ((130, 12, 130, 52), 'torch.nn.Conv1d', 'nn.Conv1d', (), '', True, 'import torch.nn as nn\n'), ((131, 12, 131, 21), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((132, 12, 132, 55), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (), '', True, 'import torch.nn as nn\n'), ((133, 12, 133, 52), 'torch.nn.Conv1d', 'nn.Conv1d', (), '', True, 'import torch.nn as nn\n'), ((134, 12, 134, 21), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((135, 12, 135, 55), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (), '', True, 'import torch.nn as nn\n'), ((136, 12, 136, 52), 'torch.nn.Conv1d', 'nn.Conv1d', (), '', True, 'import torch.nn as nn\n'), ((137, 12, 137, 21), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((138, 12, 138, 55), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (), '', True, 'import torch.nn as nn\n'), ((139, 12, 139, 52), 'torch.nn.Conv1d', 'nn.Conv1d', (), '', True, 'import torch.nn as nn\n'), ((143, 12, 143, 63), 'torch.nn.Conv1d', 'nn.Conv1d', (), '', True, 'import torch.nn as nn\n'), ((144, 12, 144, 21), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((145, 12, 145, 55), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (), '', True, 'import torch.nn as nn\n'), ((146, 12, 146, 63), 'torch.nn.Conv1d', 'nn.Conv1d', (), '', True, 'import torch.nn as nn\n'), ((147, 12, 147, 21), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((148, 12, 148, 55), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (), '', True, 'import torch.nn as nn\n'), ((149, 12, 149, 63), 'torch.nn.Conv1d', 'nn.Conv1d', (), '', True, 'import torch.nn as nn\n'), ((150, 12, 150, 21), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((151, 12, 151, 55), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (), '', True, 'import torch.nn as nn\n'), ((152, 12, 152, 52), 'torch.nn.Conv1d', 'nn.Conv1d', (), '', True, 'import torch.nn as nn\n'), ((157, 12, 157, 63), 'torch.nn.Conv1d', 'nn.Conv1d', (), '', True, 'import torch.nn as nn\n'), ((158, 12, 158, 21), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((159, 12, 159, 55), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (), '', True, 'import torch.nn as nn\n'), ((160, 12, 160, 63), 'torch.nn.Conv1d', 'nn.Conv1d', (), '', True, 'import torch.nn as nn\n'), ((161, 12, 161, 21), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((162, 12, 162, 55), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (), '', True, 'import torch.nn as nn\n'), ((163, 12, 163, 63), 'torch.nn.Conv1d', 'nn.Conv1d', (), '', True, 'import torch.nn as nn\n'), ((164, 12, 164, 21), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((166, 12, 166, 52), 'torch.nn.Conv1d', 'nn.Conv1d', (), '', True, 'import torch.nn as nn\n'), ((170, 12, 170, 63), 'torch.nn.Conv1d', 'nn.Conv1d', (), '', True, 'import torch.nn as nn\n'), ((171, 12, 171, 21), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((172, 12, 172, 55), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (), '', True, 'import torch.nn as nn\n'), ((173, 12, 173, 63), 'torch.nn.Conv1d', 'nn.Conv1d', (), '', True, 'import torch.nn as nn\n'), ((174, 12, 174, 21), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((175, 12, 175, 55), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (), '', True, 'import torch.nn as nn\n'), ((176, 12, 176, 63), 'torch.nn.Conv1d', 'nn.Conv1d', (), '', True, 'import torch.nn as nn\n'), ((177, 12, 177, 21), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((178, 12, 178, 55), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (), '', True, 'import torch.nn as nn\n'), ((179, 12, 179, 63), 'torch.nn.Conv1d', 'nn.Conv1d', (), '', True, 'import torch.nn as nn\n'), ((308, 23, 313, 35), 'torch.nn.GRU', 'nn.GRU', (), '', True, 'import torch.nn as nn\n'), ((316, 23, 321, 35), 'torch.nn.LSTM', 'nn.LSTM', (), '', True, 'import torch.nn as nn\n'), ((366, 12, 366, 21), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((367, 12, 367, 24), 'torch.nn.Dropout', 'nn.Dropout', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((368, 12, 368, 37), 'torch.nn.Linear', 'nn.Linear', ({(368, 22, 368, 28): 'in_dim', (368, 30, 368, 36): 'n_tags'}, {}), '(in_dim, n_tags)', True, 'import torch.nn as nn\n'), ((385, 23, 390, 35), 'torch.nn.GRU', 'nn.GRU', (), '', True, 'import torch.nn as nn\n'), ((393, 23, 398, 35), 'torch.nn.LSTM', 'nn.LSTM', (), '', True, 'import torch.nn as nn\n'), ((410, 12, 410, 21), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((411, 12, 411, 24), 'torch.nn.Dropout', 'nn.Dropout', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((412, 12, 412, 45), 'torch.nn.Linear', 'nn.Linear', ({(412, 22, 412, 36): 'hidden_dim * 2', (412, 38, 412, 44): 'n_tags'}, {}), '(hidden_dim * 2, n_tags)', True, 'import torch.nn as nn\n'), ((254, 26, 254, 78), 'torch.nn.Conv1d', 'nn.Conv1d', (), '', True, 'import torch.nn as nn\n'), ((255, 26, 255, 49), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', ({(255, 41, 255, 48): 'out_dim'}, {}), '(out_dim)', True, 'import torch.nn as nn\n'), ((256, 26, 256, 35), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((325, 23, 330, 35), 'sru.SRU', 'SRU', (), '', False, 'from sru import SRU\n'), ((402, 23, 407, 35), 'sru.SRU', 'SRU', (), '', False, 'from sru import SRU\n')] |
BioGeek/pyseqlogo | pyseqlogo/__init__.py | e41d9645c7a9fa5baf3deab281acf40ea5357f64 | # -*- coding: utf-8 -*-
"""Top-level package for pyseqlogo."""
__author__ = """Saket Choudhary"""
__email__ = '[email protected]'
__version__ = '0.1.0'
from .pyseqlogo import draw_logo
from .pyseqlogo import setup_axis
| [] |
edulix/apscheduler | setup.py | 8030e0fc7e1845a15861e649988cc73a1aa624ec | # coding: utf-8
import os.path
try:
from setuptools import setup
extras = dict(zip_safe=False, test_suite='nose.collector', tests_require=['nose'])
except ImportError:
from distutils.core import setup
extras = {}
import apscheduler
here = os.path.dirname(__file__)
readme_path = os.path.join(here, 'README.rst')
readme = open(readme_path).read()
setup(
name='APScheduler',
version=apscheduler.release,
description='In-process task scheduler with Cron-like capabilities',
long_description=readme,
author='Alex Gronholm',
author_email='[email protected]',
url='http://pypi.python.org/pypi/APScheduler/',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3'
],
keywords='scheduling cron',
license='MIT',
packages=('apscheduler', 'apscheduler.jobstores', 'apscheduler.triggers', 'apscheduler.triggers.cron'),
)
| [((18, 0, 40, 1), 'distutils.core.setup', 'setup', (), '', False, 'from distutils.core import setup\n')] |
travisyates81/object-detection | object_detection/exporter_test.py | 931bebfa54798c08d2c401e9c1bad39015d8c832 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Travis Yates
"""Tests for object_detection.export_inference_graph."""
import os
import mock
import numpy as np
import tensorflow as tf
from object_detection import exporter
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.protos import pipeline_pb2
class FakeModel(model.DetectionModel):
def preprocess(self, inputs):
return (tf.identity(inputs) *
tf.get_variable('dummy', shape=(),
initializer=tf.constant_initializer(2),
dtype=tf.float32))
def predict(self, preprocessed_inputs):
return {'image': tf.identity(preprocessed_inputs)}
def postprocess(self, prediction_dict):
with tf.control_dependencies(prediction_dict.values()):
return {
'detection_boxes': tf.constant([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]], tf.float32),
'detection_scores': tf.constant([[0.7, 0.6]], tf.float32),
'detection_classes': tf.constant([[0, 1]], tf.float32),
'num_detections': tf.constant([2], tf.float32)
}
def restore_fn(self, checkpoint_path, from_detection_checkpoint):
pass
def loss(self, prediction_dict):
pass
class ExportInferenceGraphTest(tf.test.TestCase):
def _save_checkpoint_from_mock_model(self, checkpoint_path,
use_moving_averages):
g = tf.Graph()
with g.as_default():
mock_model = FakeModel(num_classes=1)
mock_model.preprocess(tf.constant([1, 3, 4, 3], tf.float32))
if use_moving_averages:
tf.train.ExponentialMovingAverage(0.0).apply()
saver = tf.train.Saver()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
saver.save(sess, checkpoint_path)
def _load_inference_graph(self, inference_graph_path):
od_graph = tf.Graph()
with od_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(inference_graph_path) as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return od_graph
def _create_tf_example(self, image_array):
with self.test_session():
encoded_image = tf.image.encode_jpeg(tf.constant(image_array)).eval()
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': _bytes_feature(encoded_image),
'image/format': _bytes_feature('jpg'),
'image/source_id': _bytes_feature('image_id')
})).SerializeToString()
return example
def test_export_graph_with_image_tensor_input(self):
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(num_classes=1)
inference_graph_path = os.path.join(self.get_temp_dir(),
'exported_graph.pbtxt')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
checkpoint_path=None,
inference_graph_path=inference_graph_path)
def test_export_graph_with_tf_example_input(self):
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(num_classes=1)
inference_graph_path = os.path.join(self.get_temp_dir(),
'exported_graph.pbtxt')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='tf_example',
pipeline_config=pipeline_config,
checkpoint_path=None,
inference_graph_path=inference_graph_path)
def test_export_frozen_graph(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model-ckpt')
self._save_checkpoint_from_mock_model(checkpoint_path,
use_moving_averages=False)
inference_graph_path = os.path.join(self.get_temp_dir(),
'exported_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(num_classes=1)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
checkpoint_path=checkpoint_path,
inference_graph_path=inference_graph_path)
def test_export_frozen_graph_with_moving_averages(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model-ckpt')
self._save_checkpoint_from_mock_model(checkpoint_path,
use_moving_averages=True)
inference_graph_path = os.path.join(self.get_temp_dir(),
'exported_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(num_classes=1)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = True
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
checkpoint_path=checkpoint_path,
inference_graph_path=inference_graph_path)
def test_export_and_run_inference_with_image_tensor(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model-ckpt')
self._save_checkpoint_from_mock_model(checkpoint_path,
use_moving_averages=False)
inference_graph_path = os.path.join(self.get_temp_dir(),
'exported_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(num_classes=1)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
checkpoint_path=checkpoint_path,
inference_graph_path=inference_graph_path)
inference_graph = self._load_inference_graph(inference_graph_path)
with self.test_session(graph=inference_graph) as sess:
image_tensor = inference_graph.get_tensor_by_name('image_tensor:0')
boxes = inference_graph.get_tensor_by_name('detection_boxes:0')
scores = inference_graph.get_tensor_by_name('detection_scores:0')
classes = inference_graph.get_tensor_by_name('detection_classes:0')
num_detections = inference_graph.get_tensor_by_name('num_detections:0')
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: np.ones((1, 4, 4, 3)).astype(np.uint8)})
self.assertAllClose(boxes, [[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]])
self.assertAllClose(scores, [[0.7, 0.6]])
self.assertAllClose(classes, [[1, 2]])
self.assertAllClose(num_detections, [2])
def test_export_and_run_inference_with_tf_example(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model-ckpt')
self._save_checkpoint_from_mock_model(checkpoint_path,
use_moving_averages=False)
inference_graph_path = os.path.join(self.get_temp_dir(),
'exported_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(num_classes=1)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='tf_example',
pipeline_config=pipeline_config,
checkpoint_path=checkpoint_path,
inference_graph_path=inference_graph_path)
inference_graph = self._load_inference_graph(inference_graph_path)
with self.test_session(graph=inference_graph) as sess:
tf_example = inference_graph.get_tensor_by_name('tf_example:0')
boxes = inference_graph.get_tensor_by_name('detection_boxes:0')
scores = inference_graph.get_tensor_by_name('detection_scores:0')
classes = inference_graph.get_tensor_by_name('detection_classes:0')
num_detections = inference_graph.get_tensor_by_name('num_detections:0')
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={tf_example: self._create_tf_example(
np.ones((4, 4, 3)).astype(np.uint8))})
self.assertAllClose(boxes, [[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]])
self.assertAllClose(scores, [[0.7, 0.6]])
self.assertAllClose(classes, [[1, 2]])
self.assertAllClose(num_detections, [2])
if __name__ == '__main__':
tf.test.main()
| [((214, 2, 214, 16), 'tensorflow.test.main', 'tf.test.main', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((48, 8, 48, 18), 'tensorflow.Graph', 'tf.Graph', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((61, 15, 61, 25), 'tensorflow.Graph', 'tf.Graph', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((19, 12, 19, 31), 'tensorflow.identity', 'tf.identity', ({(19, 24, 19, 30): 'inputs'}, {}), '(inputs)', True, 'import tensorflow as tf\n'), ((25, 21, 25, 53), 'tensorflow.identity', 'tf.identity', ({(25, 33, 25, 52): 'preprocessed_inputs'}, {}), '(preprocessed_inputs)', True, 'import tensorflow as tf\n'), ((54, 14, 54, 30), 'tensorflow.train.Saver', 'tf.train.Saver', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((55, 13, 55, 46), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((63, 21, 63, 34), 'tensorflow.GraphDef', 'tf.GraphDef', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((83, 9, 84, 46), 'mock.patch.object', 'mock.patch.object', (), '', False, 'import mock\n'), ((89, 24, 89, 62), 'object_detection.protos.pipeline_pb2.TrainEvalPipelineConfig', 'pipeline_pb2.TrainEvalPipelineConfig', ({}, {}), '()', False, 'from object_detection.protos import pipeline_pb2\n'), ((91, 6, 95, 52), 'object_detection.exporter.export_inference_graph', 'exporter.export_inference_graph', (), '', False, 'from object_detection import exporter\n'), ((98, 9, 99, 46), 'mock.patch.object', 'mock.patch.object', (), '', False, 'import mock\n'), ((103, 24, 103, 62), 'object_detection.protos.pipeline_pb2.TrainEvalPipelineConfig', 'pipeline_pb2.TrainEvalPipelineConfig', ({}, {}), '()', False, 'from object_detection.protos import pipeline_pb2\n'), ((105, 6, 109, 52), 'object_detection.exporter.export_inference_graph', 'exporter.export_inference_graph', (), '', False, 'from object_detection import exporter\n'), ((117, 9, 118, 46), 'mock.patch.object', 'mock.patch.object', (), '', False, 'import mock\n'), ((120, 24, 120, 62), 'object_detection.protos.pipeline_pb2.TrainEvalPipelineConfig', 'pipeline_pb2.TrainEvalPipelineConfig', ({}, {}), '()', False, 'from object_detection.protos import pipeline_pb2\n'), ((122, 6, 126, 52), 'object_detection.exporter.export_inference_graph', 'exporter.export_inference_graph', (), '', False, 'from object_detection import exporter\n'), ((134, 9, 135, 46), 'mock.patch.object', 'mock.patch.object', (), '', False, 'import mock\n'), ((137, 24, 137, 62), 'object_detection.protos.pipeline_pb2.TrainEvalPipelineConfig', 'pipeline_pb2.TrainEvalPipelineConfig', ({}, {}), '()', False, 'from object_detection.protos import pipeline_pb2\n'), ((139, 6, 143, 52), 'object_detection.exporter.export_inference_graph', 'exporter.export_inference_graph', (), '', False, 'from object_detection import exporter\n'), ((151, 9, 152, 46), 'mock.patch.object', 'mock.patch.object', (), '', False, 'import mock\n'), ((154, 24, 154, 62), 'object_detection.protos.pipeline_pb2.TrainEvalPipelineConfig', 'pipeline_pb2.TrainEvalPipelineConfig', ({}, {}), '()', False, 'from object_detection.protos import pipeline_pb2\n'), ((156, 6, 160, 52), 'object_detection.exporter.export_inference_graph', 'exporter.export_inference_graph', (), '', False, 'from object_detection import exporter\n'), ((184, 9, 185, 46), 'mock.patch.object', 'mock.patch.object', (), '', False, 'import mock\n'), ((187, 24, 187, 62), 'object_detection.protos.pipeline_pb2.TrainEvalPipelineConfig', 'pipeline_pb2.TrainEvalPipelineConfig', ({}, {}), '()', False, 'from object_detection.protos import pipeline_pb2\n'), ((189, 6, 193, 52), 'object_detection.exporter.export_inference_graph', 'exporter.export_inference_graph', (), '', False, 'from object_detection import exporter\n'), ((30, 29, 31, 76), 'tensorflow.constant', 'tf.constant', ({(30, 41, 31, 63): '[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]', (31, 65, 31, 75): 'tf.float32'}, {}), '([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], tf.float32)', True, 'import tensorflow as tf\n'), ((32, 30, 32, 67), 'tensorflow.constant', 'tf.constant', ({(32, 42, 32, 54): '[[0.7, 0.6]]', (32, 56, 32, 66): 'tf.float32'}, {}), '([[0.7, 0.6]], tf.float32)', True, 'import tensorflow as tf\n'), ((33, 31, 33, 64), 'tensorflow.constant', 'tf.constant', ({(33, 43, 33, 51): '[[0, 1]]', (33, 53, 33, 63): 'tf.float32'}, {}), '([[0, 1]], tf.float32)', True, 'import tensorflow as tf\n'), ((34, 28, 34, 56), 'tensorflow.constant', 'tf.constant', ({(34, 40, 34, 43): '[2]', (34, 45, 34, 55): 'tf.float32'}, {}), '([2], tf.float32)', True, 'import tensorflow as tf\n'), ((51, 28, 51, 65), 'tensorflow.constant', 'tf.constant', ({(51, 40, 51, 52): '[1, 3, 4, 3]', (51, 54, 51, 64): 'tf.float32'}, {}), '([1, 3, 4, 3], tf.float32)', True, 'import tensorflow as tf\n'), ((64, 11, 64, 47), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', ({(64, 26, 64, 46): 'inference_graph_path'}, {}), '(inference_graph_path)', True, 'import tensorflow as tf\n'), ((67, 8, 67, 50), 'tensorflow.import_graph_def', 'tf.import_graph_def', (), '', True, 'import tensorflow as tf\n'), ((21, 40, 21, 66), 'tensorflow.constant_initializer', 'tf.constant_initializer', ({(21, 64, 21, 65): '(2)'}, {}), '(2)', True, 'import tensorflow as tf\n'), ((74, 41, 74, 74), 'tensorflow.train.BytesList', 'tf.train.BytesList', (), '', True, 'import tensorflow as tf\n'), ((53, 8, 53, 46), 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', ({(53, 42, 53, 45): '(0.0)'}, {}), '(0.0)', True, 'import tensorflow as tf\n'), ((72, 43, 72, 67), 'tensorflow.constant', 'tf.constant', ({(72, 55, 72, 66): 'image_array'}, {}), '(image_array)', True, 'import tensorflow as tf\n'), ((171, 35, 171, 56), 'numpy.ones', 'np.ones', ({(171, 43, 171, 55): '(1, 4, 4, 3)'}, {}), '((1, 4, 4, 3))', True, 'import numpy as np\n'), ((205, 14, 205, 32), 'numpy.ones', 'np.ones', ({(205, 22, 205, 31): '(4, 4, 3)'}, {}), '((4, 4, 3))', True, 'import numpy as np\n')] |
matthewyoung28/macmentum | run.py | af1a26903e25b4a4f278388d7be1e638e071c0a8 | import os
import sys
import random
def get_next_wallpaper(curr_path):
lst_dir = os.listdir()
rand_index = random.randint(0, len(lst_dir) - 1)
return lst_dir[rand_index]
def get_wall_dir():
return "/Users/MYOUNG/Pictures/mmt"
def main():
script = "osascript -e 'tell application \"Finder\" to set desktop picture to POSIX file '"
path = get_wall_dir()
file = get_next_wallpaper(path)
# print("FILE = ", file)
script = script + path + "/" + file
# print("SCRIPT = ", script)
os.system(script)
main()
| [((7, 14, 7, 26), 'os.listdir', 'os.listdir', ({}, {}), '()', False, 'import os\n'), ((23, 2, 23, 19), 'os.system', 'os.system', ({(23, 12, 23, 18): 'script'}, {}), '(script)', False, 'import os\n')] |
dolfno/mlops_demo | noxfile.py | 52a04525f1655a32d45002384a972a1920fd517a | """Automated CI tools to run with Nox"""
import nox
from nox import Session
locations = "src", "noxfile.py", "docs/conf.py"
nox.options.sessions = "lint", "tests"
@nox.session(python="3.9")
def tests(session: Session) -> None:
"""Run tests with nox"""
session.run("poetry", "install", external=True)
session.run("pytest", "--cov")
@nox.session(python="3.9")
def lint(session: Session) -> None:
"""Run linting with nox"""
session.install(
"flake8",
"flake8-annotations",
"flake8-bandit",
"flake8-black",
"flake8-bugbear",
"flake8-docstrings",
"flake8-import-order",
)
args = session.posargs or locations
session.run("flake8", *args)
@nox.session(python="3.9")
def black(session: Session) -> None:
"""Run black with nox"""
session.install("black")
args = session.posargs or locations
session.run("black", *args, "--line-length=120")
@nox.session(python="3.9")
def pytype(session: Session) -> None:
"""Run the static type checker."""
args = session.posargs or ["--disable=import-error", *locations]
session.install("pytype")
session.run("pytype", *args)
package = "hypermodern_python"
@nox.session(python=["3.9"])
def typeguard(session: Session) -> None:
"""Run typeguard for type checking with nox"""
args = session.posargs or ["-m", "not e2e"]
session.run("poetry", "install", "--no-dev", external=True)
session.install("pytest", "pytest-mock", "typeguard")
session.run("pytest", f"--typeguard-packages={package}", *args)
@nox.session(python="3.9")
def docs(session: Session) -> None:
"""Build the documentation."""
session.run("poetry", "install", "--no-dev", external=True)
session.install("sphinx", "sphinx-autodoc-typehints")
session.run("sphinx-build", "docs", "docs/_build")
@nox.session(python="3.9")
def coverage(session: Session) -> None:
"""Upload coverage data."""
session.install("coverage[toml]", "codecov")
session.run("coverage", "xml", "--fail-under=0")
session.run("codecov", *session.posargs)
| [((9, 1, 9, 26), 'nox.session', 'nox.session', (), '', False, 'import nox\n'), ((16, 1, 16, 26), 'nox.session', 'nox.session', (), '', False, 'import nox\n'), ((32, 1, 32, 26), 'nox.session', 'nox.session', (), '', False, 'import nox\n'), ((40, 1, 40, 26), 'nox.session', 'nox.session', (), '', False, 'import nox\n'), ((51, 1, 51, 28), 'nox.session', 'nox.session', (), '', False, 'import nox\n'), ((60, 1, 60, 26), 'nox.session', 'nox.session', (), '', False, 'import nox\n'), ((68, 1, 68, 26), 'nox.session', 'nox.session', (), '', False, 'import nox\n')] |
canerbulduk/cocotb-test | cocotb_test/run.py | ece092446a1e5de932db12dfb60441d6f322d5f1 |
import cocotb_test.simulator
# For partial back compatibility
def run(simulator=None, **kwargs):
if simulator:
sim = simulator(**kwargs)
sim.run()
else:
cocotb_test.simulator.run(**kwargs)
| [] |
hamzabouissi/kanban_backend | kanban_backend/project_management/apps.py | 549d8c2711313011f3186b5b3a3ac969481df3f7 | from django.apps import AppConfig
class ProjectManagementConfig(AppConfig):
name = 'kanban_backend.project_management'
def ready(self):
try:
import kanban_backend.users.signals # noqa F401
except ImportError:
pass
| [] |
davidhozic/Discord-Shiller | src/framework/tracing.py | ff22bb1ceb7b4128ee0d27f3c9c9dd0a5279feb9 | """
~ Tracing ~
This modules containes functions and classes
related to the console debug long or trace.
"""
from enum import Enum, auto
import time
__all__ = (
"TraceLEVELS",
"trace"
)
m_use_debug = None
class TraceLEVELS(Enum):
"""
Info: Level of trace for debug
"""
NORMAL = 0
WARNING = auto()
ERROR = auto()
def trace(message: str,
level: TraceLEVELS = TraceLEVELS.NORMAL):
""""
Name : trace
Param:
- message : str = Trace message
- level : TraceLEVELS = Level of the trace
"""
if m_use_debug:
timestruct = time.localtime()
timestamp = "Date: {:02d}.{:02d}.{:04d} Time:{:02d}:{:02d}"
timestamp = timestamp.format(timestruct.tm_mday,
timestruct.tm_mon,
timestruct.tm_year,
timestruct.tm_hour,
timestruct.tm_min)
l_trace = f"{timestamp}\nTrace level: {level.name}\nMessage: {message}\n"
print(l_trace)
| [((22, 14, 22, 20), 'enum.auto', 'auto', ({}, {}), '()', False, 'from enum import Enum, auto\n'), ((23, 13, 23, 19), 'enum.auto', 'auto', ({}, {}), '()', False, 'from enum import Enum, auto\n'), ((34, 21, 34, 37), 'time.localtime', 'time.localtime', ({}, {}), '()', False, 'import time\n')] |
jeffreypaul15/sunkit-image | sunkit_image/__init__.py | 0987db8fcd38c79a83d7d890e407204e63a05c4f | """
sunkit-image
============
A image processing toolbox for Solar Physics.
* Homepage: https://sunpy.org
* Documentation: https://sunkit-image.readthedocs.io/en/latest/
"""
import sys
from .version import version as __version__ # NOQA
# Enforce Python version check during package import.
__minimum_python_version__ = "3.7"
class UnsupportedPythonError(Exception):
"""
Running on an unsupported version of Python.
"""
if sys.version_info < tuple(int(val) for val in __minimum_python_version__.split(".")):
# This has to be .format to keep backwards compatibly.
raise UnsupportedPythonError(
"sunkit_image does not support Python < {}".format(__minimum_python_version__)
)
__all__ = []
| [] |
lucasblazzi/stocker | app/view.py | 52cdec481ed84a09d97369ee4da229e169f99f51 | import plotly.graph_objects as go
import plotly.express as px
import pandas as pd
class View:
def __init__(self, st):
self.st = st
self.st.set_page_config(layout='wide')
self.side_bar = st.sidebar
def show_message(self, location, _type, message):
if location == "sb":
component = self.side_bar
else:
component = self.st
if _type == "success":
component.success(message)
elif _type == "error":
component.error(message)
elif _type == "warning":
component.warning(message)
elif _type == "info":
component.info(message)
def login(self):
_user = self.side_bar.text_input("Username:")
_pass = self.side_bar.text_input("Password", type="password")
return _user, _pass
def advisor_setup(self):
option = self.side_bar.selectbox("Options:", ("Research", ))
if option == "Research":
self.st.header("Advisor Research Area")
self.st.markdown("___")
return option
def research_area(self):
execute = False
args = {"price": {"enabled": False}, "sector": {"enabled": False}, "news": {"enabled": False},
"company_info": {"enabled": False}, "volatility": {"enabled": False}, "return": {"enabled": False},
"raw_price": {"enabled": False}, "volume": {"enabled": False}}
self.st.markdown("___")
check_cols = self.st.beta_columns(4)
args["price"]["enabled"] = check_cols[0].checkbox("Price")
args["company_info"]["enabled"] = check_cols[1].checkbox("Company Information")
args["sector"]["enabled"] = check_cols[2].checkbox("Sector Distribution")
args["news"]["enabled"] = check_cols[3].checkbox("News")
if args["price"]["enabled"]:
self.st.markdown("___")
self.st.subheader("Price Insights")
price_cols = self.st.beta_columns(7)
args["price"]["_type"] = price_cols[0].selectbox("Price type:", ("close", "open", "high", "low"))
args["price"]["period"] = price_cols[1].selectbox("Period:", ("ytd", "1m", "6m", "1y", "2y", "5y", "max"))
args["raw_price"]["enabled"] = price_cols[3].checkbox("Raw Price")
args["volume"]["enabled"] = price_cols[4].checkbox("Volume")
args["return"]["enabled"] = price_cols[5].checkbox("Return")
args["volatility"]["enabled"] = price_cols[6].checkbox("Volatility")
return execute, args
def show_cryptos(self, cryptos):
for crypto in cryptos:
cols = self.st.beta_columns(3)
cols[0].markdown(f"**Symbol: ** {crypto.get('symbol', '-')}")
cols[1].markdown(f"**Name: ** {crypto.get('name', '-')}")
cols[2].markdown(f"**Price: ** {crypto.get('price', '-')}")
def crypto_form(self):
self.st.markdown("<br><br>", unsafe_allow_html=True)
self.st.markdown("___")
_input = self.st.text_input("Cryptocurrency")
return _input
def sector_distribution(self, sectors):
self.st.subheader("Sector Distribution")
r = sectors['sector'].value_counts()
fig = go.Figure(data=[go.Pie(labels=r.index, values=r)])
fig.update_layout(
width=400, height=400,
)
self.st.plotly_chart(fig)
def plot_price(self, prices, _type):
self.st.subheader(_type.capitalize())
fig = go.Figure()
for price in prices:
name = price["symbol"][0]
fig.add_trace(go.Scatter(x=price.index, y=price[_type],
mode='lines',
name=name))
fig.update_layout(
template="plotly_white",
width=1400, height=500,
hovermode="x unified",
plot_bgcolor='rgba(0,0,0,0)'
)
self.st.plotly_chart(fig)
def show_companies(self, companies):
self.st.markdown("___")
self.st.subheader("Company Information")
self.st.markdown("<br>", unsafe_allow_html=True)
for company in companies:
basic = self.st.beta_columns(4)
basic[0].markdown(f"## **{company.get('name', ' ')} ({company.get('symbol', ' ')})**")
if company.get("logo"):
basic[3].image(company.get("logo"), width=50)
basic[3].markdown("<br>", unsafe_allow_html=True)
desc = self.st.beta_columns(2)
if company.get('sector'):
desc[0].markdown(f"**Sector: ** {company.get('sector', '-')}")
if company.get('industry'):
desc[1].markdown(f"**Industry: ** {company.get('industry', '-')}")
if company.get('description'):
desc[0].markdown(f"**Description: ** {company.get('description', '-')}")
info = self.st.beta_columns(2)
if company.get('CEO'):
info[0].markdown(f"**CEO: ** {company.get('CEO', '-')}")
if company.get('employees'):
info[1].markdown(f"**Employees: ** {company.get('employees', '-')}")
if company.get('website'):
info[0].markdown(f"**Website: ** {company.get('website', '-')}")
if company.get('city') or company.get('state') or company.get('country'):
info[1].markdown(f"**Location: ** {company.get('city', ' ')} - {company.get('state', ' ')} - {company.get('country', ' ')}")
self.st.markdown("___")
def show_news(self, news, title="Company News"):
self.st.markdown("___")
self.st.subheader(title)
self.st.markdown("<br>", unsafe_allow_html=True)
for n in news:
if n.get('symbol') or n.get('title') or n.get('date'):
self.st.markdown(f"**{n.get('symbol', ' ')} - {n.get('title', ' ')} [{n.get('date', ' ')}]**")
if n.get('source'):
self.st.markdown(f"**Source: ** {n.get('source', '-')}")
if n.get("image"):
self.st.image(n.get("image"), width=300)
if n.get("description"):
self.st.markdown(f"**Description: ** {n.get('description', '-')}")
if n.get("url"):
self.st.markdown(f"**Access on: ** {n.get('url', '-')}")
self.st.markdown("<br>", unsafe_allow_html=True)
def list_advisors(self, advisors):
for advisor in advisors:
cols = self.st.beta_columns(3)
cols[0].markdown(f"**Name: ** {advisor[0]}")
cols[1].markdown(f"**CPF: ** {advisor[1]}")
cols[2].markdown(f"**CVM: ** {advisor[2]}")
def symbol_input(self, symbols):
selected_symbols = self.st.multiselect("Stocks list:", symbols)
return selected_symbols
def admin_setup(self):
option = self.side_bar.selectbox("Option:", ("Data Loader", "Advisors", "Ad-Hoc"))
execute = False
arg = None
self.st.title("Stocker Administration Area")
self.st.markdown("___")
if option == "Data Loader":
arg = dict()
self.st.header("Stocker Data Loader")
arg["symbols"] = self.st.selectbox("Stocks Option:", ("Sample", "S&P 100"))
self.st.markdown("<br><br>", unsafe_allow_html=True)
self.st.markdown("___")
self.st.subheader("Stocker Company Loader")
self.show_message("st", "info", "Stock Loading: Load on our database information about the companies listed"
"on the Stocks Option selected")
if self.st.button("Load Stocks"):
execute = True
arg["loader"] = "company"
self.st.markdown("<br><br><br>", unsafe_allow_html=True)
self.st.markdown("___")
self.st.subheader("Stocker Price Loader")
self.show_message("st", "info", "Price Loading: Load on our database information about companies daily"
" prices, you can select a specific period")
arg["period"] = self.st.selectbox("Prices Period:", ("5y", "2y", "1y", "ytd", "6m", "3m", "1m", "5d"))
if self.st.button("Load Prices"):
execute = True
arg["loader"] = "price"
self.st.markdown("<br><br><br>", unsafe_allow_html=True)
self.st.markdown("___")
self.st.subheader("Stocker News Loader")
self.show_message("st", "info", "News Loading: Load on our database information about the latest news of"
" companies which can impact the market")
if self.st.button("Load News"):
execute = True
arg["loader"] = "news"
self.st.markdown("<br><br><br>", unsafe_allow_html=True)
self.st.markdown("___")
self.st.subheader("Stocker Crypto Loader")
self.show_message("st", "info", "Crypto Loading: Load on our database information about all "
"cryptocurrencies available on the market")
if self.st.button("Load Crypto"):
execute = True
arg["loader"] = "crypto"
self.st.markdown("<br><br><br>", unsafe_allow_html=True)
self.st.markdown("___")
self.st.subheader("Stocker Full Loader")
self.show_message("st", "info", "Full Loading: Load on our database all information listed above: companies"
" prices, news and cryptocurrencies")
if self.st.button("Full Load"):
execute = True
arg["loader"] = "full"
elif option == "Ad-Hoc":
self.st.header("Ad-Hoc")
elif option == "Advisors":
sub_option = self.st.selectbox("Opções:", ("List Advisors", "Register Advisor", "Edit Advisor"))
self.st.markdown("___")
if sub_option == "List Advisors":
option = sub_option
execute = True
elif sub_option == "Register Advisor":
arg = self.advisor_form(None)
option = sub_option
if arg:
execute = True
elif sub_option == "Edit Advisor":
arg = self.st.text_input("CPF", max_chars=15, type='default', help="CPF: 123.123.123-12")
execute = True
option = sub_option
self.st.markdown("___")
return option, execute, arg
def advisor_form(self, advisor):
cols = self.st.beta_columns([0.5, 0.25, 0.25])
button = "Update Advisor" if advisor else "Register Advisor"
advisor = {
"name": cols[0].text_input("Nome", max_chars=30, type='default', help="Nome Completo",
value=advisor["name"]) if advisor
else cols[0].text_input("Nome", max_chars=30, type='default', help="Nome Completo"),
"username": cols[1].text_input("Usuário", max_chars=15, type='default', help="Usuário para login",
value=advisor["username"]) if advisor
else cols[1].text_input("Usuário", max_chars=15, type='default', help="Usuário para login"),
"password": cols[2].text_input("Senha", max_chars=15, type='password', help="Senha para login"),
"cpf": advisor["cpf"] if advisor
else cols[2].text_input("CPF", max_chars=15, type='default', help="CPF: 123.123.123-12"),
"cvm_license": cols[1].text_input("Lincença CVM", max_chars=10, type='default',
value=advisor["cvm_license"]) if advisor
else cols[1].text_input("Lincença CVM", max_chars=10, type='default'),
"email": cols[0].text_input("Email", max_chars=30, type='default', value=advisor["email"]) if advisor
else cols[0].text_input("Email", max_chars=30, type='default'),
"profile": "advisor"
}
register = self.st.button(button)
self.st.markdown("___")
filled = True
for b in advisor.values():
if not b:
filled = False
if register:
if not filled:
self.show_message("st", "warning", "Preencha todos os campos")
else:
return advisor
@staticmethod
def plot_bar(companies, x, y, title, color):
df = pd.DataFrame(companies)
fig = px.bar(df, x=x, y=y,
color=color, title=title,
color_discrete_sequence=px.colors.qualitative.Pastel,
height=400)
return fig
@staticmethod
def plot_bar2(companies, y, title):
df = pd.DataFrame(companies)[["symbol", y]]
r = df[y].value_counts()
fig = go.Figure(data=[go.Bar(x=df[y], y=r)])
fig.update_layout(
height=400,
title=title
)
return fig
@staticmethod
def plot_pie(companies, y, title):
df = pd.DataFrame(companies)[["symbol", y]]
r = df[y].value_counts()
fig = go.Figure(data=[go.Pie(labels=df[y], values=r)])
fig.update_layout(
height=400,
title=title
)
return fig
@staticmethod
def plot_highest_emp(highest_emp):
fig = go.Figure(data=[go.Indicator(
mode="number+delta",
value=highest_emp[0][1],
title={
"text": f"{highest_emp[0][0]}<br><span style='font-size:0.8em;color:gray'>Highest number</span><br>"
f"<span style='font-size:0.8em;color:gray'>of employees</span>"},
)])
return fig
@staticmethod
def plot_information_companies(cols, companies):
logos = [company[1] for company in companies]
names = [company[0] for company in companies]
for idx, logo in enumerate(logos):
col = 2 if idx % 2 == 0 else 3
cols[col].image(logo, width=50)
for idx, name in enumerate(names):
col = 0 if idx % 2 == 0 else 1
cols[col].markdown(f"**Name: ** {name}")
@staticmethod
def plot_notusa_companies(cols, companies):
for company in companies:
cols[0].markdown(f"**Name: ** {company[0]}")
cols[1].markdown(f"**Country: ** {company[2]}")
cols[2].image(company[1], width=50)
@staticmethod
def plot_insight_prices(k, v):
fig = go.Figure(data=[go.Indicator(
mode="number+delta",
value=v[0][1],
title={
"text": f"{v[0][0]}<br><span style='font-size:0.8em;color:gray'>{k.split('_')[0].capitalize()} {k.split('_')[1].capitalize()}</span><br>"
f"<span style='font-size:0.8em;color:gray'>{v[0][2]}</span>"},
)])
return fig
def plot_company_ad_hoc(self, results):
companies = results["company"]["specific"]
highest_emp = results["company"]["insights"]["highest_emp"]
information = results["company"]["insights"]["tech"]
not_usa = results["company"]["insights"]["not_us"]
fields = results["company"]["fields"]
if companies:
if not "symbol" in fields:
self.st.warning("Be sure to select the symbol option")
else:
self.show_companies(companies)
col = self.st.beta_columns(2)
if "employees" in fields:
fig1 = self.plot_bar(companies, "symbol", "employees", "Number of employees by company", "employees")
col[0].plotly_chart(fig1, use_container_width=True)
if "state" in fields:
fig2 = self.plot_bar2(companies, "state", "State distribution")
col[1].plotly_chart(fig2, use_container_width=True)
col2 = self.st.beta_columns(2)
if "sector" in fields:
fig3 = self.plot_pie(companies, "sector", "Companies by sector")
col2[0].plotly_chart(fig3, use_container_width=True)
if "industry" in fields:
fig4 = self.plot_pie(companies, "industry", "Companies by industry")
col2[1].plotly_chart(fig4, use_container_width=True)
if highest_emp:
fig5 = self.plot_highest_emp(highest_emp)
self.st.plotly_chart(fig5, use_container_width=True)
if information:
self.st.markdown("___")
title_col = self.st.beta_columns(1)
cols4 = self.st.beta_columns([1, 1, 0.2, 0.2])
title_col[0].subheader("Information sector companies")
self.plot_information_companies(cols4, information)
if not_usa:
self.st.markdown("___")
title_col2 = self.st.beta_columns(1)
title_col2[0].subheader("Nasdaq listed companies outside USA")
cols5 = self.st.beta_columns(4)
self.plot_notusa_companies(cols5, not_usa)
def plot_price_ad_hoc(self, results):
if not results["price"]["specific"].empty:
self.st.markdown("___")
dfs = list()
for company in results["price"]["company_list"]:
mask = (results["price"]["specific"]["symbol"] == company)
dfs.append(results["price"]["specific"][mask])
self.plot_price(dfs, results["price"]["type"][0])
self.st.markdown("___")
c = 0
cols = self.st.beta_columns(len(results["price"]["insights"].keys()))
for k, val in results["price"]["insights"].items():
if val:
cols[c].plotly_chart(self.plot_insight_prices(k, val), use_container_width=True)
c += 1
def plot_news_ad_hoc(self, results):
if results["news"]["filter"]:
self.show_news(results["news"]["filter"], "Filtered News")
if results["news"]["insights"]:
news_fields = ("id", "symbol", "date", "title", "source", "url", "description", "image")
latest = results["news"]["insights"][0]
latest_news = dict()
for idx, v in enumerate(latest):
latest_news[news_fields[idx]] = v
self.show_news([latest], f"Latest news - {latest['symbol']} - {latest['date']}")
def plot_crypto_ad_hoc(self, results):
if results["crypto"]:
self.st.markdown("___")
self.show_cryptos(results["crypto"])
def ad_hoc_plot(self, results):
self.plot_company_ad_hoc(results)
self.plot_price_ad_hoc(results)
self.plot_news_ad_hoc(results)
self.plot_crypto_ad_hoc(results)
def ad_hoc_form(self, symbols):
company_fields = ("symbol", "name", "exchange", "industry", "website", "description", "CEO", "sector",
"employees", "state", "city", "country", "logo")
news_fields = ("symbol", "date", "title", "source", "url", "description", "image")
ad_hoc = self.default_ad_hoc()
self.st.markdown("___")
self.st.markdown(f"**Company Options:**")
cols = self.st.beta_columns([2, 1, 1])
cols[0].markdown(f"**Specific company views:**")
ad_hoc["company"]["specific"]["company_list"] = cols[0].multiselect("Stocks list:", sum(symbols, []))
ad_hoc["company"]["specific"]["fields"] = cols[0].multiselect("Information:", company_fields)
filter_cols = self.st.beta_columns(6)
ad_hoc["company"]["specific"]["order_by"] = filter_cols[0].selectbox("Order By:", ad_hoc["company"]["specific"]["fields"]),
ad_hoc["company"]["specific"]["order_method"] = filter_cols[1].selectbox("Order Method:", ("Ascending", "Descending")),
ad_hoc["company"]["specific"]["limit"] = filter_cols[2].number_input("Number of results:", value=1, min_value=1, max_value=100),
ad_hoc["company"]["specific"]["rule_filter"] = {}
cols[1].markdown(f"**Insights views:**")
cols[2].markdown(f"**-**")
cols[1].markdown("<br>", unsafe_allow_html=True)
ad_hoc["company"]["insights"]["highest_emp"] = cols[1].checkbox("Highest employees number")
cols[1].markdown("<br>", unsafe_allow_html=True)
ad_hoc["company"]["insights"]["tech"] = cols[1].checkbox("Information Companies")
cols[2].markdown("<br>", unsafe_allow_html=True)
ad_hoc["company"]["insights"]["not_us"] = cols[2].checkbox("Outside USA")
cols[2].markdown("<br>", unsafe_allow_html=True)
ad_hoc["company"]["specific"]["rule_filter"]["apply"] = cols[2].checkbox("Rule filter")
if ad_hoc["company"]["specific"]["rule_filter"]["apply"]:
ad_hoc["company"]["specific"]["rule_filter"]["field"] = filter_cols[0].selectbox(
"Filter Field:", ("symbol", "name", "employees"))
ad_hoc["company"]["specific"]["rule_filter"]["operation"] = filter_cols[1].selectbox(
"Operation", ("Greater than", "Less than", "Equals to") if
ad_hoc["company"]["specific"]["rule_filter"]["field"] == "employees" else ("Equals to", ))
ad_hoc["company"]["specific"]["rule_filter"]["value"] = filter_cols[2].number_input("Value: ") \
if ad_hoc["company"]["specific"]["rule_filter"]["field"] == "employees"\
else filter_cols[2].text_input("Value: ")
self.st.markdown("___")
self.st.markdown(f"**Prices Options:**")
price_cols = self.st.beta_columns([2, 1, 1])
price_cols[0].markdown(f"**Specific price views:**")
ad_hoc["price"]["specific"]["company_list"] = price_cols[0].multiselect("Price Stocks:", sum(symbols, []))
filter_price_cols = self.st.beta_columns(6)
ad_hoc["price"]["specific"]["start_date"] = filter_price_cols[0].date_input("Start Date:")
ad_hoc["price"]["specific"]["end_date"] = filter_price_cols[1].date_input("End Date:")
ad_hoc["price"]["specific"]["type"] = filter_price_cols[2].selectbox("Price Type:", ("close", "open", "high", "low")),
price_cols[1].markdown(f"**Insights views:**")
price_cols[2].markdown(f"**-**")
price_cols[1].markdown("<br>", unsafe_allow_html=True)
price_cols[2].markdown("<br>", unsafe_allow_html=True)
ad_hoc["price"]["insights"]["highest_close"] = price_cols[1].checkbox("Highest close price")
price_cols[1].markdown("<br>", unsafe_allow_html=True)
ad_hoc["price"]["insights"]["lowest_close"] = price_cols[2].checkbox("Lowest close price")
ad_hoc["price"]["insights"]["highest_volume"] = price_cols[1].checkbox("Highest volume")
price_cols[2].markdown("<br>", unsafe_allow_html=True)
ad_hoc["price"]["insights"]["lowest_volume"] = price_cols[2].checkbox("Lowest volume")
self.st.markdown("___")
self.st.markdown(f"**News Options:**")
news_cols = self.st.beta_columns([2, 1, 1, 1])
news_cols[0].markdown(f"**Specific news views:**")
news_cols[1].markdown("-<br>", unsafe_allow_html=True)
news_cols[2].markdown("-<br>", unsafe_allow_html=True)
news_cols[3].markdown("-<br>", unsafe_allow_html=True)
ad_hoc["news"]["company_list"] = news_cols[0].multiselect("News Stocks:", sum(symbols, []))
ad_hoc["news"]["fields"] = news_cols[0].multiselect("News Info:", news_fields)
ad_hoc["news"]["date"] = news_cols[1].date_input("Date:")
ad_hoc["news"]["filter_date"] = news_cols[2].selectbox("Filter Date as:", ("On", "Starting from", "Until"))
ad_hoc["news"]["order_by"] = news_cols[1].selectbox("Order by field:", ad_hoc["news"]["fields"])
ad_hoc["news"]["order_method"] = news_cols[2].selectbox("Order results:", ("Ascending", "Descending"))
ad_hoc["news"]["limit"] = news_cols[3].number_input("Limit of results:", value=1, min_value=1, max_value=100)
ad_hoc["news"]["latest"] = news_cols[3].checkbox("Latest News")
self.st.markdown("___")
self.st.markdown(f"**Crypto Options:**")
crypto_col = self.st.beta_columns([2, 0.5, 1])
ad_hoc["crypto"]["name"] = crypto_col[0].text_input("Cryptocurrency")
ad_hoc["crypto"]["limit"] = crypto_col[1].number_input("Limit of crypto:", value=1, min_value=1, max_value=100)
generate = self.st.button("Generate Report")
if generate:
return ad_hoc
@staticmethod
def default_ad_hoc():
return {
"company": {
"specific": {
"company_list": [],
"fields": [],
"order_by": None,
"order_method": None,
"limit": None,
"rule_filter": {
"apply": False,
"field": None,
"operation": None,
"value": None
}
},
"insights": {
"highest_emp": False,
"tech": False,
"not_us": False
}
},
"news": {
"company_list": [],
"date": None,
"filter_date": None,
},
"price": {
"specific": {
"company_list": [],
"type": None,
"start_date": None,
"end_date": None
},
"insights": {
"highest_close": False,
"lowest_close": False,
"highest_volume": False,
"lowest_volume": False,
}
},
"crypto": {
"name": None,
"limit": None
}
} | [((87, 14, 87, 25), 'plotly.graph_objects.Figure', 'go.Figure', ({}, {}), '()', True, 'import plotly.graph_objects as go\n'), ((282, 13, 282, 36), 'pandas.DataFrame', 'pd.DataFrame', ({(282, 26, 282, 35): 'companies'}, {}), '(companies)', True, 'import pandas as pd\n'), ((283, 14, 286, 32), 'plotly.express.bar', 'px.bar', (), '', True, 'import plotly.express as px\n'), ((291, 13, 291, 36), 'pandas.DataFrame', 'pd.DataFrame', ({(291, 26, 291, 35): 'companies'}, {}), '(companies)', True, 'import pandas as pd\n'), ((302, 13, 302, 36), 'pandas.DataFrame', 'pd.DataFrame', ({(302, 26, 302, 35): 'companies'}, {}), '(companies)', True, 'import pandas as pd\n'), ((90, 26, 92, 47), 'plotly.graph_objects.Scatter', 'go.Scatter', (), '', True, 'import plotly.graph_objects as go\n'), ((79, 30, 79, 62), 'plotly.graph_objects.Pie', 'go.Pie', (), '', True, 'import plotly.graph_objects as go\n'), ((293, 30, 293, 50), 'plotly.graph_objects.Bar', 'go.Bar', (), '', True, 'import plotly.graph_objects as go\n'), ((304, 30, 304, 60), 'plotly.graph_objects.Pie', 'go.Pie', (), '', True, 'import plotly.graph_objects as go\n'), ((313, 30, 319, 13), 'plotly.graph_objects.Indicator', 'go.Indicator', (), '', True, 'import plotly.graph_objects as go\n')] |
ProhardONE/python_primer | ch_4/stopping_length.py | 211e37c1f2fd169269fc4f3c08e8b7e5225f2ad0 | # Exercise 4.11
# Author: Noah Waterfield Price
import sys
g = 9.81 # acceleration due to gravity
try:
# initial velocity (convert to m/s)
v0 = (1000. / 3600) * float(sys.argv[1])
mu = float(sys.argv[2]) # coefficient of friction
except IndexError:
print 'Both v0 (in km/s) and mu must be supplied on the command line'
v0 = (1000. / 3600) * float(raw_input('v0 = ?\n'))
mu = float(raw_input('mu = ?\n'))
except ValueError:
print 'v0 and mu must be pure numbers'
sys.exit(1)
d = 0.5 * v0 ** 2 / mu / g
print d
"""
Sample run:
python stopping_length.py 120 0.3
188.771850342
python stopping_length.py 50 0.3
32.7728906843
"""
| [] |
GeorgeIoak/Oden | TestFiles/volumioTest.py | 9bb6a5811e2ea40ceef67e46bc56eab1be9ce06c | # Testing code to check update status on demand
from socketIO_client import SocketIO, LoggingNamespace
from threading import Thread
socketIO = SocketIO('localhost', 3000)
status = 'pause'
def on_push_state(*args):
print('state', args)
global status, position, duration, seek
status = args[0]['status'].encode('ascii', 'ignore')
seek = args[0]['seek']
duration = args[0]['duration']
if duration:
position = int(seek / 1000)
else:
position = 0
print("status", status, "position", position)
def _receive_thread():
socketIO.wait()
receive_thread = Thread(target=_receive_thread, daemon=True)
receive_thread.start()
socketIO.on('pushState', on_push_state)
# issue this and the socketIO.wait in the background will push the reply
socketIO.emit('getState', '', on_push_state) | [((5, 11, 5, 38), 'socketIO_client.SocketIO', 'SocketIO', ({(5, 20, 5, 31): '"""localhost"""', (5, 33, 5, 37): '3000'}, {}), "('localhost', 3000)", False, 'from socketIO_client import SocketIO, LoggingNamespace\n'), ((23, 17, 23, 60), 'threading.Thread', 'Thread', (), '', False, 'from threading import Thread\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.