repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
superhasduper/PythonGames | levels/sombie.py | 64995d3e0b619006a2cf80d0da3c0fdf97db6fd9 | import arcade
import os
SPRITE_SCALING = 0.5
SPRITE_NATIVE_SIZE = 128
SPRITE_SIZE = int(SPRITE_NATIVE_SIZE * SPRITE_SCALING)
SCREEN_WIDTH = SPRITE_SIZE * 14
SCREEN_HEIGHT = SPRITE_SIZE * 10
MOVEMENT_SPEED = 5
COIN_SCALE = 0.7
class Room:
"""
This class holds all the information about the
different rooms.
"""
def __init__(self):
# You may want many lists. Lists for coins, monsters, etc.
self.wall_list = None
self.coin_list = None
self.door_list = None
self.smallpotion_list = None
self.bigpotion_list = None
# This holds the background images. If you don't want changing
# background images, you can delete this part.
self.background = None
self.score = 0
def setup_room_1():
"""
Create and return room 1.
If your program gets large, you may want to separate this into different
files.
"""
room = Room()
""" Set up the game and initialize the variables. """
# Sprite lists
room.wall_list = arcade.SpriteList()
room.door_list = arcade.SpriteList()
room.coin_list = arcade.SpriteList()
room.smallpotion_list = arcade.SpriteList()
room.bigpotion_list = arcade.SpriteList()
for y in (0, SCREEN_HEIGHT - SPRITE_SIZE):
# Loop for each box going across
for x in range(0, SCREEN_WIDTH, SPRITE_SIZE):
wall = arcade.Sprite("gravel_dirt.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
# Create left and right column of boxes
for x in (0, SCREEN_WIDTH - SPRITE_SIZE):
# Loop for each box going across
for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE):
# Skip making a block 4 and 5 blocks up on the right side
if (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x == 0:
wall = arcade.Sprite("gravel_dirt.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
for x in (0, SCREEN_WIDTH - SPRITE_SIZE):
# Loop for each box going across
for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE):
if not (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x == 0:
door = arcade.Sprite("fence.png", SPRITE_SCALING)
door.left = x
door.bottom = y
room.door_list.append(door)
wall = arcade.Sprite("gravel_dirt.png", SPRITE_SCALING)
wall.left = 7 * SPRITE_SIZE
wall.bottom = 5 * SPRITE_SIZE
room.wall_list.append(wall)
# If you want coins or monsters in a level, then add that code here.
# Load the background image for this level.
room.background = arcade.load_texture("g.png")
for i in range(300,600,75):
coin = arcade.Sprite("coin.png",COIN_SCALE)
coin.center_x = i
coin.center_y = 500
room.coin_list.append(coin)
smallpotion = arcade.Sprite("big.png",0.05)
smallpotion.center_x = 100
smallpotion.center_y = 900
room.smallpotion_list.append(smallpotion)
return room
def setup_room_2():
"""
Create and return room 2.
"""
room = Room()
""" Set up the game and initialize the variables. """
# Sprite lists
room.door_list = arcade.SpriteList()
room.wall_list = arcade.SpriteList()
room.coin_list = arcade.SpriteList()
room.smallpotion_list = arcade.SpriteList()
room.bigpotion_list = arcade.SpriteList()
# -- Set up the walls
# Create bottom and top row of boxes
# This y loops a list of two, the coordinate 0, and just under the top of window
for y in (0, SCREEN_HEIGHT - SPRITE_SIZE):
# Loop for each box going across
for x in range(0, SCREEN_WIDTH, SPRITE_SIZE):
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
# Create left and right column of boxes
for x in (0, SCREEN_WIDTH - SPRITE_SIZE):
# Loop for each box going across
for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE):
# Skip making a block 4 and 5 blocks up
if (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x != 0:
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 1 * SPRITE_SIZE
wall.bottom = 6 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 1 * SPRITE_SIZE
wall.bottom = 3 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 2 * SPRITE_SIZE
wall.bottom = 5.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 2 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 3 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 4 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 4 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 2 * SPRITE_SIZE
wall.bottom = 5.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 2 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 3 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 4 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 5 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 5.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 4 * SPRITE_SIZE
wall.bottom = 2.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom =3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 0.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 7 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 7 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 9 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 2.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 5.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 9 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 7.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 8 * SPRITE_SIZE
room.wall_list.append(wall)
room.background = arcade.load_texture("g.png")
bigpotion = arcade.Sprite("small.png",0.05)
bigpotion.center_x = 800
bigpotion.center_y = 100
room.bigpotion_list.append(bigpotion)
return room
class MyGame(arcade.Window):
""" Main application class. """
def __init__(self, width, height):
"""
Initializer
"""
super().__init__(width, height,"Tocate el pnnywise")
# Set the working directory (where we expect to find files) to the same
# directory this .py file is in. You can leave this out of your own
# code, but it is needed to easily run the examples using "python -m"
# as mentioned at the top of this program.
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
# Sprite lists
self.current_room = 0
# Set up the player
self.game_over = False
self.door_list = None
self.rooms = None
self.score = 0
self.coin_list = None
self.player_sprite = None
self.physics_engine = None
self.smallpotion_list = None
self.bigpotion_list = None
def setup(self):
""" Set up the game and initialize the variables. """
# Set up the player
self.player_sprite = arcade.AnimatedWalkingSprite()
self.score = 0
self.coin_list = arcade.SpriteList()
self.smallpotion_list = arcade.SpriteList()
self.bigpotion_list = arcade.SpriteList()
self.player_sprite.center_x = 100
self.player_sprite.center_y = 150
character_scale = 0.75
self.player_sprite.stand_right_textures = []
self.player_sprite.stand_right_textures.append(arcade.load_texture("zombie_stand.png",
scale=character_scale))
self.player_sprite.stand_left_textures = []
self.player_sprite.stand_left_textures.append(arcade.load_texture("zombie_stand.png",
scale=character_scale, mirrored=True))
self.player_sprite.walk_right_textures = []
self.player_sprite.walk_right_textures.append(arcade.load_texture("zombie_walk1.png",
scale=character_scale))
self.player_sprite.walk_right_textures.append(arcade.load_texture("zombie_walk2.png",
scale=character_scale))
self.player_sprite.walk_left_textures = []
self.player_sprite.walk_left_textures.append(arcade.load_texture("zombie_walk1.png",
scale=character_scale, mirrored=True))
self.player_sprite.walk_left_textures.append(arcade.load_texture("zombie_walk2.png",
scale=character_scale, mirrored=True))
# Our list of rooms
self.rooms = []
# Create the rooms. Extend the pattern for each room.
room = setup_room_1()
self.rooms.append(room)
room = setup_room_2()
self.rooms.append(room)
# Our starting room number
self.current_room = 0
# Create a physics engine for this room
self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.rooms[self.current_room].wall_list)
self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.rooms[self.current_room].door_list)
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
arcade.start_render()
# Draw the background texture
arcade.draw_texture_rectangle(SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2,
SCREEN_WIDTH, SCREEN_HEIGHT, self.rooms[self.current_room].background)
# Draw all the walls in this room
self.rooms[self.current_room].door_list.draw()
self.rooms[self.current_room].wall_list.draw()
self.rooms[self.current_room].coin_list.draw()
self.rooms[self.current_room].bigpotion_list.draw()
self.rooms[self.current_room].smallpotion_list.draw()
# If you have coins or monsters, then copy and modify the line
# above for each list.
output = "Score: {}".format(self.score)
arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)
self.player_sprite.draw()
def on_key_press(self, key, modifiers):
"""Called whenever a key is pressed. """
if key == arcade.key.W:
self.player_sprite.change_y = MOVEMENT_SPEED
elif key == arcade.key.S:
self.player_sprite.change_y = -MOVEMENT_SPEED
elif key == arcade.key.A:
self.player_sprite.change_x = -MOVEMENT_SPEED
elif key == arcade.key.D:
self.player_sprite.change_x = MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""Called when the user releases a key. """
if key == arcade.key.W or key == arcade.key.S:
self.player_sprite.change_y = 0
elif key == arcade.key.A or key == arcade.key.D:
self.player_sprite.change_x = 0
def update(self, delta_time):
""" Movement and game logic """
self.player_sprite.update_animation()
# Call update on all sprites (The sprites don't do much in this
# example though.)
self.physics_engine.update()
# Do some logic here to figure out what room we are in, and if we need to go
# to a different room.
if self.player_sprite.center_x > SCREEN_WIDTH and self.current_room == 0:
self.current_room = 1
self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite,
self.rooms[self.current_room].wall_list)
self.player_sprite.center_x = 0
elif self.player_sprite.center_x < 0 and self.current_room == 1:
self.current_room = 0
self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite,
self.rooms[self.current_room].wall_list)
self.player_sprite.center_x = SCREEN_WIDTH
hit_list = arcade.check_for_collision_with_list(self.player_sprite,self.rooms[self.current_room].coin_list)
hit_list2 = arcade.check_for_collision_with_list(self.player_sprite,self.rooms[self.current_room].bigpotion_list)
hit_list3 = arcade.check_for_collision_with_list(self.player_sprite,self.rooms[self.current_room].smallpotion_list)
for coin in hit_list:
coin.kill()
self.score += 1
my_sound = arcade.load_sound("coinsound.wav")
arcade.play_sound(my_sound)
if self.score == 4:
for i in self.rooms[self.current_room].door_list:
i.kill()
your_sound = arcade.load_sound("door.wav")
arcade.play_sound(your_sound)
for smallpotion in hit_list3:
smallpotion.kill()
self.player_sprite.scale=0.5
tu_sound = arcade.load_sound("shrink.wav")
arcade.play_sound(tu_sound)
for bigpotion in hit_list2:
bigpotion.kill()
self.player_sprite.scale=1
yo_sound = arcade.load_sound("grow.wav")
arcade.play_sound(yo_sound)
def main():
""" Main method """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT)
window.setup()
arcade.run()
if __name__ == "__main__":
main() | [((45, 21, 45, 40), 'arcade.SpriteList', 'arcade.SpriteList', ({}, {}), '()', False, 'import arcade\n'), ((46, 21, 46, 40), 'arcade.SpriteList', 'arcade.SpriteList', ({}, {}), '()', False, 'import arcade\n'), ((47, 21, 47, 40), 'arcade.SpriteList', 'arcade.SpriteList', ({}, {}), '()', False, 'import arcade\n'), ((49, 28, 49, 47), 'arcade.SpriteList', 'arcade.SpriteList', ({}, {}), '()', False, 'import arcade\n'), ((50, 26, 50, 45), 'arcade.SpriteList', 'arcade.SpriteList', ({}, {}), '()', False, 'import arcade\n'), ((79, 11, 79, 59), 'arcade.Sprite', 'arcade.Sprite', ({(79, 25, 79, 42): '"""gravel_dirt.png"""', (79, 44, 79, 58): 'SPRITE_SCALING'}, {}), "('gravel_dirt.png', SPRITE_SCALING)", False, 'import arcade\n'), ((87, 22, 87, 50), 'arcade.load_texture', 'arcade.load_texture', ({(87, 42, 87, 49): '"""g.png"""'}, {}), "('g.png')", False, 'import arcade\n'), ((94, 18, 94, 47), 'arcade.Sprite', 'arcade.Sprite', ({(94, 32, 94, 41): '"""big.png"""', (94, 42, 94, 46): '0.05'}, {}), "('big.png', 0.05)", False, 'import arcade\n'), ((109, 21, 109, 40), 'arcade.SpriteList', 'arcade.SpriteList', ({}, {}), '()', False, 'import arcade\n'), ((110, 21, 110, 40), 'arcade.SpriteList', 'arcade.SpriteList', ({}, {}), '()', False, 'import arcade\n'), ((111, 21, 111, 40), 'arcade.SpriteList', 'arcade.SpriteList', ({}, {}), '()', False, 'import arcade\n'), ((112, 28, 112, 47), 'arcade.SpriteList', 'arcade.SpriteList', ({}, {}), '()', False, 'import arcade\n'), ((113, 26, 113, 45), 'arcade.SpriteList', 'arcade.SpriteList', ({}, {}), '()', False, 'import arcade\n'), ((136, 11, 136, 58), 'arcade.Sprite', 'arcade.Sprite', ({(136, 25, 136, 41): '"""stone_snow.png"""', (136, 43, 136, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((141, 11, 141, 58), 'arcade.Sprite', 'arcade.Sprite', ({(141, 25, 141, 41): '"""stone_snow.png"""', (141, 43, 141, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((146, 11, 146, 58), 'arcade.Sprite', 'arcade.Sprite', ({(146, 25, 146, 41): '"""stone_snow.png"""', (146, 43, 146, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((151, 11, 151, 58), 'arcade.Sprite', 'arcade.Sprite', ({(151, 25, 151, 41): '"""stone_snow.png"""', (151, 43, 151, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((156, 11, 156, 58), 'arcade.Sprite', 'arcade.Sprite', ({(156, 25, 156, 41): '"""stone_snow.png"""', (156, 43, 156, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((161, 11, 161, 58), 'arcade.Sprite', 'arcade.Sprite', ({(161, 25, 161, 41): '"""stone_snow.png"""', (161, 43, 161, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((166, 11, 166, 58), 'arcade.Sprite', 'arcade.Sprite', ({(166, 25, 166, 41): '"""stone_snow.png"""', (166, 43, 166, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((171, 11, 171, 58), 'arcade.Sprite', 'arcade.Sprite', ({(171, 25, 171, 41): '"""stone_snow.png"""', (171, 43, 171, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((176, 11, 176, 58), 'arcade.Sprite', 'arcade.Sprite', ({(176, 25, 176, 41): '"""stone_snow.png"""', (176, 43, 176, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((181, 11, 181, 58), 'arcade.Sprite', 'arcade.Sprite', ({(181, 25, 181, 41): '"""stone_snow.png"""', (181, 43, 181, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((186, 11, 186, 58), 'arcade.Sprite', 'arcade.Sprite', ({(186, 25, 186, 41): '"""stone_snow.png"""', (186, 43, 186, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((191, 11, 191, 58), 'arcade.Sprite', 'arcade.Sprite', ({(191, 25, 191, 41): '"""stone_snow.png"""', (191, 43, 191, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((196, 11, 196, 58), 'arcade.Sprite', 'arcade.Sprite', ({(196, 25, 196, 41): '"""stone_snow.png"""', (196, 43, 196, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((201, 11, 201, 58), 'arcade.Sprite', 'arcade.Sprite', ({(201, 25, 201, 41): '"""stone_snow.png"""', (201, 43, 201, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((206, 11, 206, 58), 'arcade.Sprite', 'arcade.Sprite', ({(206, 25, 206, 41): '"""stone_snow.png"""', (206, 43, 206, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((211, 11, 211, 58), 'arcade.Sprite', 'arcade.Sprite', ({(211, 25, 211, 41): '"""stone_snow.png"""', (211, 43, 211, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((216, 11, 216, 58), 'arcade.Sprite', 'arcade.Sprite', ({(216, 25, 216, 41): '"""stone_snow.png"""', (216, 43, 216, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((221, 11, 221, 58), 'arcade.Sprite', 'arcade.Sprite', ({(221, 25, 221, 41): '"""stone_snow.png"""', (221, 43, 221, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((226, 11, 226, 58), 'arcade.Sprite', 'arcade.Sprite', ({(226, 25, 226, 41): '"""stone_snow.png"""', (226, 43, 226, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((231, 11, 231, 58), 'arcade.Sprite', 'arcade.Sprite', ({(231, 25, 231, 41): '"""stone_snow.png"""', (231, 43, 231, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((237, 11, 237, 58), 'arcade.Sprite', 'arcade.Sprite', ({(237, 25, 237, 41): '"""stone_snow.png"""', (237, 43, 237, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((242, 11, 242, 58), 'arcade.Sprite', 'arcade.Sprite', ({(242, 25, 242, 41): '"""stone_snow.png"""', (242, 43, 242, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((247, 11, 247, 58), 'arcade.Sprite', 'arcade.Sprite', ({(247, 25, 247, 41): '"""stone_snow.png"""', (247, 43, 247, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((252, 11, 252, 58), 'arcade.Sprite', 'arcade.Sprite', ({(252, 25, 252, 41): '"""stone_snow.png"""', (252, 43, 252, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((257, 11, 257, 58), 'arcade.Sprite', 'arcade.Sprite', ({(257, 25, 257, 41): '"""stone_snow.png"""', (257, 43, 257, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((262, 11, 262, 58), 'arcade.Sprite', 'arcade.Sprite', ({(262, 25, 262, 41): '"""stone_snow.png"""', (262, 43, 262, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((267, 11, 267, 58), 'arcade.Sprite', 'arcade.Sprite', ({(267, 25, 267, 41): '"""stone_snow.png"""', (267, 43, 267, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((272, 11, 272, 58), 'arcade.Sprite', 'arcade.Sprite', ({(272, 25, 272, 41): '"""stone_snow.png"""', (272, 43, 272, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((277, 11, 277, 58), 'arcade.Sprite', 'arcade.Sprite', ({(277, 25, 277, 41): '"""stone_snow.png"""', (277, 43, 277, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((282, 11, 282, 58), 'arcade.Sprite', 'arcade.Sprite', ({(282, 25, 282, 41): '"""stone_snow.png"""', (282, 43, 282, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((287, 11, 287, 58), 'arcade.Sprite', 'arcade.Sprite', ({(287, 25, 287, 41): '"""stone_snow.png"""', (287, 43, 287, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((292, 11, 292, 58), 'arcade.Sprite', 'arcade.Sprite', ({(292, 25, 292, 41): '"""stone_snow.png"""', (292, 43, 292, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((297, 11, 297, 58), 'arcade.Sprite', 'arcade.Sprite', ({(297, 25, 297, 41): '"""stone_snow.png"""', (297, 43, 297, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((303, 11, 303, 58), 'arcade.Sprite', 'arcade.Sprite', ({(303, 25, 303, 41): '"""stone_snow.png"""', (303, 43, 303, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((308, 11, 308, 58), 'arcade.Sprite', 'arcade.Sprite', ({(308, 25, 308, 41): '"""stone_snow.png"""', (308, 43, 308, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((313, 11, 313, 58), 'arcade.Sprite', 'arcade.Sprite', ({(313, 25, 313, 41): '"""stone_snow.png"""', (313, 43, 313, 57): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((317, 22, 317, 50), 'arcade.load_texture', 'arcade.load_texture', ({(317, 42, 317, 49): '"""g.png"""'}, {}), "('g.png')", False, 'import arcade\n'), ((319, 16, 319, 47), 'arcade.Sprite', 'arcade.Sprite', ({(319, 30, 319, 41): '"""small.png"""', (319, 42, 319, 46): '0.05'}, {}), "('small.png', 0.05)", False, 'import arcade\n'), ((508, 4, 508, 16), 'arcade.run', 'arcade.run', ({}, {}), '()', False, 'import arcade\n'), ((89, 15, 89, 51), 'arcade.Sprite', 'arcade.Sprite', ({(89, 29, 89, 39): '"""coin.png"""', (89, 40, 89, 50): 'COIN_SCALE'}, {}), "('coin.png', COIN_SCALE)", False, 'import arcade\n'), ((340, 8, 340, 27), 'os.chdir', 'os.chdir', ({(340, 17, 340, 26): 'file_path'}, {}), '(file_path)', False, 'import os\n'), ((358, 30, 358, 60), 'arcade.AnimatedWalkingSprite', 'arcade.AnimatedWalkingSprite', ({}, {}), '()', False, 'import arcade\n'), ((360, 25, 360, 44), 'arcade.SpriteList', 'arcade.SpriteList', ({}, {}), '()', False, 'import arcade\n'), ((361, 32, 361, 51), 'arcade.SpriteList', 'arcade.SpriteList', ({}, {}), '()', False, 'import arcade\n'), ((362, 30, 362, 49), 'arcade.SpriteList', 'arcade.SpriteList', ({}, {}), '()', False, 'import arcade\n'), ((402, 30, 402, 117), 'arcade.PhysicsEngineSimple', 'arcade.PhysicsEngineSimple', ({(402, 57, 402, 75): 'self.player_sprite', (402, 77, 402, 116): 'self.rooms[self.current_room].wall_list'}, {}), '(self.player_sprite, self.rooms[self.current_room\n ].wall_list)', False, 'import arcade\n'), ((403, 30, 403, 117), 'arcade.PhysicsEngineSimple', 'arcade.PhysicsEngineSimple', ({(403, 57, 403, 75): 'self.player_sprite', (403, 77, 403, 116): 'self.rooms[self.current_room].door_list'}, {}), '(self.player_sprite, self.rooms[self.current_room\n ].door_list)', False, 'import arcade\n'), ((414, 8, 414, 29), 'arcade.start_render', 'arcade.start_render', ({}, {}), '()', False, 'import arcade\n'), ((417, 8, 418, 108), 'arcade.draw_texture_rectangle', 'arcade.draw_texture_rectangle', ({(417, 38, 417, 55): '(SCREEN_WIDTH // 2)', (417, 57, 417, 75): '(SCREEN_HEIGHT // 2)', (418, 38, 418, 50): 'SCREEN_WIDTH', (418, 52, 418, 65): 'SCREEN_HEIGHT', (418, 67, 418, 107): 'self.rooms[self.current_room].background'}, {}), '(SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2,\n SCREEN_WIDTH, SCREEN_HEIGHT, self.rooms[self.current_room].background)', False, 'import arcade\n'), ((431, 8, 431, 64), 'arcade.draw_text', 'arcade.draw_text', ({(431, 25, 431, 31): 'output', (431, 33, 431, 35): '(10)', (431, 37, 431, 39): '(20)', (431, 41, 431, 59): 'arcade.color.WHITE', (431, 61, 431, 63): '(14)'}, {}), '(output, 10, 20, arcade.color.WHITE, 14)', False, 'import arcade\n'), ((475, 19, 475, 115), 'arcade.check_for_collision_with_list', 'arcade.check_for_collision_with_list', ({(475, 56, 475, 74): 'self.player_sprite', (475, 75, 475, 114): 'self.rooms[self.current_room].coin_list'}, {}), '(self.player_sprite, self.rooms[self.\n current_room].coin_list)', False, 'import arcade\n'), ((476, 20, 476, 121), 'arcade.check_for_collision_with_list', 'arcade.check_for_collision_with_list', ({(476, 57, 476, 75): 'self.player_sprite', (476, 76, 476, 120): 'self.rooms[self.current_room].bigpotion_list'}, {}), '(self.player_sprite, self.rooms[self.\n current_room].bigpotion_list)', False, 'import arcade\n'), ((477, 20, 477, 123), 'arcade.check_for_collision_with_list', 'arcade.check_for_collision_with_list', ({(477, 57, 477, 75): 'self.player_sprite', (477, 76, 477, 122): 'self.rooms[self.current_room].smallpotion_list'}, {}), '(self.player_sprite, self.rooms[self.\n current_room].smallpotion_list)', False, 'import arcade\n'), ((55, 19, 55, 67), 'arcade.Sprite', 'arcade.Sprite', ({(55, 33, 55, 50): '"""gravel_dirt.png"""', (55, 52, 55, 66): 'SPRITE_SCALING'}, {}), "('gravel_dirt.png', SPRITE_SCALING)", False, 'import arcade\n'), ((120, 19, 120, 66), 'arcade.Sprite', 'arcade.Sprite', ({(120, 33, 120, 49): '"""stone_snow.png"""', (120, 51, 120, 65): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((339, 36, 339, 61), 'os.path.abspath', 'os.path.abspath', ({(339, 52, 339, 60): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((367, 55, 368, 90), 'arcade.load_texture', 'arcade.load_texture', (), '', False, 'import arcade\n'), ((370, 54, 371, 104), 'arcade.load_texture', 'arcade.load_texture', (), '', False, 'import arcade\n'), ((375, 54, 376, 89), 'arcade.load_texture', 'arcade.load_texture', (), '', False, 'import arcade\n'), ((377, 54, 378, 89), 'arcade.load_texture', 'arcade.load_texture', (), '', False, 'import arcade\n'), ((382, 53, 383, 103), 'arcade.load_texture', 'arcade.load_texture', (), '', False, 'import arcade\n'), ((384, 53, 385, 103), 'arcade.load_texture', 'arcade.load_texture', (), '', False, 'import arcade\n'), ((467, 34, 468, 101), 'arcade.PhysicsEngineSimple', 'arcade.PhysicsEngineSimple', ({(467, 61, 467, 79): 'self.player_sprite', (468, 61, 468, 100): 'self.rooms[self.current_room].wall_list'}, {}), '(self.player_sprite, self.rooms[self.current_room\n ].wall_list)', False, 'import arcade\n'), ((482, 23, 482, 57), 'arcade.load_sound', 'arcade.load_sound', ({(482, 41, 482, 56): '"""coinsound.wav"""'}, {}), "('coinsound.wav')", False, 'import arcade\n'), ((483, 12, 483, 39), 'arcade.play_sound', 'arcade.play_sound', ({(483, 30, 483, 38): 'my_sound'}, {}), '(my_sound)', False, 'import arcade\n'), ((493, 23, 493, 54), 'arcade.load_sound', 'arcade.load_sound', ({(493, 41, 493, 53): '"""shrink.wav"""'}, {}), "('shrink.wav')", False, 'import arcade\n'), ((494, 12, 494, 39), 'arcade.play_sound', 'arcade.play_sound', ({(494, 30, 494, 38): 'tu_sound'}, {}), '(tu_sound)', False, 'import arcade\n'), ((499, 23, 499, 52), 'arcade.load_sound', 'arcade.load_sound', ({(499, 41, 499, 51): '"""grow.wav"""'}, {}), "('grow.wav')", False, 'import arcade\n'), ((500, 12, 500, 39), 'arcade.play_sound', 'arcade.play_sound', ({(500, 30, 500, 38): 'yo_sound'}, {}), '(yo_sound)', False, 'import arcade\n'), ((66, 23, 66, 71), 'arcade.Sprite', 'arcade.Sprite', ({(66, 37, 66, 54): '"""gravel_dirt.png"""', (66, 56, 66, 70): 'SPRITE_SCALING'}, {}), "('gravel_dirt.png', SPRITE_SCALING)", False, 'import arcade\n'), ((74, 23, 74, 65), 'arcade.Sprite', 'arcade.Sprite', ({(74, 37, 74, 48): '"""fence.png"""', (74, 50, 74, 64): 'SPRITE_SCALING'}, {}), "('fence.png', SPRITE_SCALING)", False, 'import arcade\n'), ((131, 23, 131, 70), 'arcade.Sprite', 'arcade.Sprite', ({(131, 37, 131, 53): '"""stone_snow.png"""', (131, 55, 131, 69): 'SPRITE_SCALING'}, {}), "('stone_snow.png', SPRITE_SCALING)", False, 'import arcade\n'), ((472, 34, 473, 101), 'arcade.PhysicsEngineSimple', 'arcade.PhysicsEngineSimple', ({(472, 61, 472, 79): 'self.player_sprite', (473, 61, 473, 100): 'self.rooms[self.current_room].wall_list'}, {}), '(self.player_sprite, self.rooms[self.current_room\n ].wall_list)', False, 'import arcade\n'), ((487, 29, 487, 58), 'arcade.load_sound', 'arcade.load_sound', ({(487, 47, 487, 57): '"""door.wav"""'}, {}), "('door.wav')", False, 'import arcade\n'), ((488, 16, 488, 45), 'arcade.play_sound', 'arcade.play_sound', ({(488, 34, 488, 44): 'your_sound'}, {}), '(your_sound)', False, 'import arcade\n')] |
Guillaume-Fernandez/phishfinder | venv/lib/python3.6/site-packages/gevent/testing/openfiles.py | b459a30202fd5dfb1340b43c70363705de7cedd9 | # Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, print_function, division
import os
import unittest
import re
from . import sysinfo
# Linux/OS X/BSD platforms can implement this by calling out to lsof
if sysinfo.WIN:
def _run_lsof():
raise unittest.SkipTest("lsof not expected on Windows")
else:
def _run_lsof():
import tempfile
pid = os.getpid()
fd, tmpname = tempfile.mkstemp('get_open_files')
os.close(fd)
lsof_command = 'lsof -p %s > %s' % (pid, tmpname)
if os.system(lsof_command):
# XXX: This prints to the console an annoying message: 'lsof is not recognized'
raise unittest.SkipTest("lsof failed")
with open(tmpname) as fobj:
data = fobj.read().strip()
os.remove(tmpname)
return data
def default_get_open_files(pipes=False):
data = _run_lsof()
results = {}
for line in data.split('\n'):
line = line.strip()
if not line or line.startswith("COMMAND"):
# Skip header and blank lines
continue
split = re.split(r'\s+', line)
_command, _pid, _user, fd = split[:4]
# Pipes (on OS X, at least) get an fd like "3" while normal files get an fd like "1u"
if fd[:-1].isdigit() or fd.isdigit():
if not pipes and fd[-1].isdigit():
continue
fd = int(fd[:-1]) if not fd[-1].isdigit() else int(fd)
if fd in results:
params = (fd, line, split, results.get(fd), data)
raise AssertionError('error when parsing lsof output: duplicate fd=%r\nline=%r\nsplit=%r\nprevious=%r\ndata:\n%s' % params)
results[fd] = line
if not results:
raise AssertionError('failed to parse lsof:\n%s' % (data, ))
results['data'] = data
return results
def default_get_number_open_files():
if os.path.exists('/proc/'):
# Linux only
fd_directory = '/proc/%d/fd' % os.getpid()
return len(os.listdir(fd_directory))
try:
return len(get_open_files(pipes=True)) - 1
except (OSError, AssertionError, unittest.SkipTest):
return 0
lsof_get_open_files = default_get_open_files
try:
# psutil import subprocess which on Python 3 imports selectors.
# This can expose issues with monkey-patching.
import psutil
except ImportError:
get_open_files = default_get_open_files
get_number_open_files = default_get_number_open_files
else:
# If psutil is available (it is cross-platform) use that.
# It is *much* faster than shelling out to lsof each time
# (Running 14 tests takes 3.964s with lsof and 0.046 with psutil)
# However, it still doesn't completely solve the issue on Windows: fds are reported
# as -1 there, so we can't fully check those.
def get_open_files():
"""
Return a list of popenfile and pconn objects.
Note that other than `fd`, they have different attributes.
.. important:: If you want to find open sockets, on Windows
and linux, it is important that the socket at least be listening
(socket.listen(1)). Unlike the lsof implementation, this will only
return sockets in a state like that.
"""
results = dict()
process = psutil.Process()
results['data'] = process.open_files() + process.connections('all')
for x in results['data']:
results[x.fd] = x
results['data'] += ['From psutil', process]
return results
def get_number_open_files():
process = psutil.Process()
try:
return process.num_fds()
except AttributeError:
# num_fds is unix only. Is num_handles close enough on Windows?
return 0
| [((74, 7, 74, 31), 'os.path.exists', 'os.path.exists', ({(74, 22, 74, 30): '"""/proc/"""'}, {}), "('/proc/')", False, 'import os\n'), ((33, 14, 33, 63), 'unittest.SkipTest', 'unittest.SkipTest', ({(33, 32, 33, 62): '"""lsof not expected on Windows"""'}, {}), "('lsof not expected on Windows')", False, 'import unittest\n'), ((37, 14, 37, 25), 'os.getpid', 'os.getpid', ({}, {}), '()', False, 'import os\n'), ((38, 22, 38, 56), 'tempfile.mkstemp', 'tempfile.mkstemp', ({(38, 39, 38, 55): '"""get_open_files"""'}, {}), "('get_open_files')", False, 'import tempfile\n'), ((39, 8, 39, 20), 'os.close', 'os.close', ({(39, 17, 39, 19): 'fd'}, {}), '(fd)', False, 'import os\n'), ((41, 11, 41, 34), 'os.system', 'os.system', ({(41, 21, 41, 33): 'lsof_command'}, {}), '(lsof_command)', False, 'import os\n'), ((46, 8, 46, 26), 'os.remove', 'os.remove', ({(46, 18, 46, 25): 'tmpname'}, {}), '(tmpname)', False, 'import os\n'), ((57, 16, 57, 38), 're.split', 're.split', ({(57, 25, 57, 31): '"""\\\\s+"""', (57, 33, 57, 37): 'line'}, {}), "('\\\\s+', line)", False, 'import re\n'), ((112, 18, 112, 34), 'psutil.Process', 'psutil.Process', ({}, {}), '()', False, 'import psutil\n'), ((120, 18, 120, 34), 'psutil.Process', 'psutil.Process', ({}, {}), '()', False, 'import psutil\n'), ((43, 18, 43, 50), 'unittest.SkipTest', 'unittest.SkipTest', ({(43, 36, 43, 49): '"""lsof failed"""'}, {}), "('lsof failed')", False, 'import unittest\n'), ((76, 39, 76, 50), 'os.getpid', 'os.getpid', ({}, {}), '()', False, 'import os\n'), ((77, 19, 77, 43), 'os.listdir', 'os.listdir', ({(77, 30, 77, 42): 'fd_directory'}, {}), '(fd_directory)', False, 'import os\n')] |
ct-clmsn/distributed-tensorflow-orchestration | examples/multiprocess_example.py | c841659881e98209149bd6e3e09774a50e3c748e | '''
marathon_example.py
performs a simple matrix multiply using 3 compute nodes
'''
def parseargs():
parser = argparse.ArgumentParser(description='Marathon for TensorFlow.')
parser.add_argument('--n_tasks', default=1, help='an integer for the accumulator')
parser.add_argument('--cpu', default=100.0, help='an integer for the accumulator')
parser.add_argument('--mem', default=100.0, help='an integer for the accumulator')
parser.add_argument('--taskname', default=uuid.uuid1(), help='name for the task')
parser.add_argument('--url', help='DNS addr to marathon')
parser.add_argument('--usr', help='marathon username')
parser.add_argument('--usrpwd', help='marathon password')
parser.add_argument('--uri', help='curl-friendly URI to the tensorflow client executable (url?, hdfs?, docker?)')
args = parser.parse_args()
return args
if __name__ == '__main__':
from sys import argv
import tensorflow as tf
from dtforchestrator import *
args = parseargs()
with MultiprocessTensorFlowSession(args.taskname, args.n_tasks) as tfdevices:
with tf.device(tfdevices.getDeviceSpec(1)):
matrix1 = tf.constant([[3.],[3.]])
with tf.device(tfdevices.getDeviceSpec(2)):
matrix2 = tf.constant([[3.,3.]])
with tf.device(tfdevices.getDeviceSpec(0)):
matrix0 = tf.constant([[3.,3.]])
product1 = tf.matmul(matrix0, matrix1)
product2 = tf.matmul(matrix2, matrix1)
with tf.Session(tfdevices.localGRPC()) as sess:
res = sess.run(product1)
print res
res = sess.run(product2)
print res
| [] |
Awannaphasch2016/CDKFAUCovid19Cralwer | FAUCovidCrawler/AWSLambda/lambda_function.py | a84d90612314cb4d4618da95238617a524b1b280 | '''
Original code contributor: mentzera
Article link: https://aws.amazon.com/blogs/big-data/building-a-near-real-time-discovery-platform-with-aws/
'''
import boto3
import json
import twitter_to_es
# from Examples.Demo.AWS_Related.TwitterStreamWithAWS.LambdaWithS3Trigger import \
# twitter_to_es
from tweet_utils import \
get_tweet, id_field, get_tweet_mapping
headers = {"Content-Type": "application/json"}
s3 = boto3.client('s3')
kinesis_client = boto3.client('kinesis')
# dynamoDb_client = boto3.client('dynamodb')
# Lambda execution starts here
def handler(event, context):
for record in event['Records']:
# Get the bucket name and key for the new file
bucket = record['s3']['bucket']['name']
key = record['s3']['object']['key']
# Get s3 object, read, and split the file into lines
try:
obj = s3.get_object(Bucket=bucket, Key=key)
except Exception as e:
print(e)
print(
'Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(
key, bucket))
raise e
# Parse s3 object content (JSON)
try:
# https://stackoverflow.com/questions/31976273/open-s3-object-as-a-string-with-boto3
s3_file_content = obj['Body'].read().decode('utf-8')
# clean trailing comma
if s3_file_content.endswith(',\n'):
s3_file_content = s3_file_content[:-2]
tweets_str = '[' + s3_file_content + ']'
# print(tweets_str)
tweets = json.loads(tweets_str)
except Exception as e:
print(e)
print('Error loading json from object {} in bucket {}'.format(key,
bucket))
raise e
for doc in tweets:
tweet = get_tweet(doc)
# print(tweet['sentiments'])
print(tweet)
print('===\n\n\n')
#=====================
#==send data to dynamoDB
#=====================
# Get the service resource.
dynamodb = boto3.resource('dynamodb')
# Instantiate a table resource object without actually
# creating a DynamoDB table. Note that the attributes of this table
# are lazy-loaded: a request is not made nor are the attribute
# values populated until the attributes
# on the table resource are accessed or its load() method is called.
table = dynamodb.Table('faucovidstream_twitter_with_sentiment')
# Print out some data about the table.
# This will cause a request to be made to DynamoDB and its attribute
# values will be set based on the response.
print(table.creation_date_time)
dynamodb.put_item(
Item=tweet
)
| [((17, 5, 17, 23), 'boto3.client', 'boto3.client', ({(17, 18, 17, 22): '"""s3"""'}, {}), "('s3')", False, 'import boto3\n'), ((18, 17, 18, 40), 'boto3.client', 'boto3.client', ({(18, 30, 18, 39): '"""kinesis"""'}, {}), "('kinesis')", False, 'import boto3\n'), ((71, 19, 71, 45), 'boto3.resource', 'boto3.resource', ({(71, 34, 71, 44): '"""dynamodb"""'}, {}), "('dynamodb')", False, 'import boto3\n'), ((51, 21, 51, 43), 'json.loads', 'json.loads', ({(51, 32, 51, 42): 'tweets_str'}, {}), '(tweets_str)', False, 'import json\n'), ((60, 20, 60, 34), 'tweet_utils.get_tweet', 'get_tweet', ({(60, 30, 60, 33): 'doc'}, {}), '(doc)', False, 'from tweet_utils import get_tweet, id_field, get_tweet_mapping\n')] |
everaccountable/django-user-messages | user_messages/context_processors.py | 101d539b785bdb440bf166fb16ad25eb66e4174a | from django.contrib.messages.constants import DEFAULT_LEVELS
from user_messages.api import get_messages
def messages(request):
"""
Return a lazy 'messages' context variable as well as
'DEFAULT_MESSAGE_LEVELS'.
"""
return {
"messages": get_messages(request=request),
"DEFAULT_MESSAGE_LEVELS": DEFAULT_LEVELS,
}
| [((12, 20, 12, 49), 'user_messages.api.get_messages', 'get_messages', (), '', False, 'from user_messages.api import get_messages\n')] |
ecanro/100DaysOfCode_Python | Day_5/highest_score.py | a86ebe5a793fd4743e0de87454ba76925efdd23d | ## Highest Score
# 🚨 Don't change the code below 👇
student_scores = input("Input a list of student scores: ").split()
for n in range(0, len(student_scores)):
student_scores[n] = int(student_scores[n])
print(student_scores)
# 🚨 Don't change the code above 👆
# Write your code below this row 👇
highest_score = 0
for scores in student_scores:
if scores > highest_score:
highest_score = scores
print(f'The highest score is: {highest_score}')
# functional code
print(max(student_scores)) | [] |
zaixizhang/MGSSL | finetune/finetune.py | fdb7e78bb927d735ed64dc78fb792adb13352e1c | import argparse
from loader import MoleculeDataset
from torch_geometric.data import DataLoader
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
import numpy as np
from model import GNN, GNN_graphpred
from sklearn.metrics import roc_auc_score
from splitters import scaffold_split, random_split
import pandas as pd
import os
import shutil
from tensorboardX import SummaryWriter
criterion = nn.BCEWithLogitsLoss(reduction = "none")
def train(args, model, device, loader, optimizer):
model.train()
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
pred = model(batch.x, batch.edge_index, batch.edge_attr, batch.batch)
y = batch.y.view(pred.shape).to(torch.float64)
#Whether y is non-null or not.
is_valid = y**2 > 0
#Loss matrix
loss_mat = criterion(pred.double(), (y+1)/2)
#loss matrix after removing null target
loss_mat = torch.where(is_valid, loss_mat, torch.zeros(loss_mat.shape).to(loss_mat.device).to(loss_mat.dtype))
optimizer.zero_grad()
loss = torch.sum(loss_mat)/torch.sum(is_valid)
loss.backward()
optimizer.step()
def eval(args, model, device, loader):
model.eval()
y_true = []
y_scores = []
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
with torch.no_grad():
pred = model(batch.x, batch.edge_index, batch.edge_attr, batch.batch)
y_true.append(batch.y.view(pred.shape))
y_scores.append(pred)
y_true = torch.cat(y_true, dim = 0).cpu().numpy()
y_scores = torch.cat(y_scores, dim = 0).cpu().numpy()
roc_list = []
for i in range(y_true.shape[1]):
#AUC is only defined when there is at least one positive data.
if np.sum(y_true[:,i] == 1) > 0 and np.sum(y_true[:,i] == -1) > 0:
is_valid = y_true[:,i]**2 > 0
roc_list.append(roc_auc_score((y_true[is_valid,i] + 1)/2, y_scores[is_valid,i]))
if len(roc_list) < y_true.shape[1]:
print("Some target is missing!")
print("Missing ratio: %f" %(1 - float(len(roc_list))/y_true.shape[1]))
return sum(roc_list)/len(roc_list) #y_true.shape[1]
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch implementation of pre-training of graph neural networks')
parser.add_argument('--device', type=int, default=0,
help='which gpu to use if any (default: 0)')
parser.add_argument('--batch_size', type=int, default=32,
help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=100,
help='number of epochs to train (default: 100)')
parser.add_argument('--lr', type=float, default=0.001,
help='learning rate (default: 0.001)')
parser.add_argument('--lr_scale', type=float, default=1,
help='relative learning rate for the feature extraction layer (default: 1)')
parser.add_argument('--decay', type=float, default=0,
help='weight decay (default: 0)')
parser.add_argument('--num_layer', type=int, default=5,
help='number of GNN message passing layers (default: 5).')
parser.add_argument('--emb_dim', type=int, default=300,
help='embedding dimensions (default: 300)')
parser.add_argument('--dropout_ratio', type=float, default=0.5,
help='dropout ratio (default: 0.5)')
parser.add_argument('--graph_pooling', type=str, default="mean",
help='graph level pooling (sum, mean, max, set2set, attention)')
parser.add_argument('--JK', type=str, default="last",
help='how the node features across layers are combined. last, sum, max or concat')
parser.add_argument('--gnn_type', type=str, default="gin")
parser.add_argument('--dataset', type=str, default = 'sider', help='root directory of dataset. For now, only classification.')
parser.add_argument('--input_model_file', type=str, default = '../motif_based_pretrain/saved_model/motif_pretrain.pth', help='filename to read the model (if there is any)')
parser.add_argument('--filename', type=str, default = '', help='output filename')
parser.add_argument('--seed', type=int, default=42, help = "Seed for splitting the dataset.")
parser.add_argument('--runseed', type=int, default=0, help = "Seed for minibatch selection, random initialization.")
parser.add_argument('--split', type = str, default="scaffold", help = "random or scaffold or random_scaffold")
parser.add_argument('--eval_train', type=int, default = 1, help='evaluating training or not')
parser.add_argument('--num_workers', type=int, default = 4, help='number of workers for dataset loading')
args = parser.parse_args()
torch.manual_seed(args.runseed)
np.random.seed(args.runseed)
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.runseed)
#Bunch of classification tasks
if args.dataset == "tox21":
num_tasks = 12
elif args.dataset == "hiv":
num_tasks = 1
elif args.dataset == "pcba":
num_tasks = 128
elif args.dataset == "muv":
num_tasks = 17
elif args.dataset == "bace":
num_tasks = 1
elif args.dataset == "bbbp":
num_tasks = 1
elif args.dataset == "toxcast":
num_tasks = 617
elif args.dataset == "sider":
num_tasks = 27
elif args.dataset == "clintox":
num_tasks = 2
else:
raise ValueError("Invalid dataset name.")
#set up dataset
dataset = MoleculeDataset("dataset/" + args.dataset, dataset=args.dataset)
print(dataset)
if args.split == "scaffold":
smiles_list = pd.read_csv('dataset/' + args.dataset + '/processed/smiles.csv', header=None)[0].tolist()
train_dataset, valid_dataset, test_dataset = scaffold_split(dataset, smiles_list, null_value=0, frac_train=0.8,frac_valid=0.1, frac_test=0.1)
print("scaffold")
elif args.split == "random":
train_dataset, valid_dataset, test_dataset = random_split(dataset, null_value=0, frac_train=0.8,frac_valid=0.1, frac_test=0.1, seed = args.seed)
print("random")
elif args.split == "random_scaffold":
smiles_list = pd.read_csv('dataset/' + args.dataset + '/processed/smiles.csv', header=None)[0].tolist()
train_dataset, valid_dataset, test_dataset = random_scaffold_split(dataset, smiles_list, null_value=0, frac_train=0.8,frac_valid=0.1, frac_test=0.1, seed = args.seed)
print("random scaffold")
else:
raise ValueError("Invalid split option.")
print(train_dataset[0])
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers = args.num_workers)
val_loader = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False, num_workers = args.num_workers)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers = args.num_workers)
#set up model
model = GNN_graphpred(args.num_layer, args.emb_dim, num_tasks, JK = args.JK, drop_ratio = args.dropout_ratio, graph_pooling = args.graph_pooling, gnn_type = args.gnn_type)
if not args.input_model_file == "":
model.from_pretrained(args.input_model_file)
model.to(device)
#set up optimizer
#different learning rate for different part of GNN
model_param_group = []
model_param_group.append({"params": model.gnn.parameters()})
if args.graph_pooling == "attention":
model_param_group.append({"params": model.pool.parameters(), "lr":args.lr*args.lr_scale})
model_param_group.append({"params": model.graph_pred_linear.parameters(), "lr":args.lr*args.lr_scale})
optimizer = optim.Adam(model_param_group, lr=args.lr, weight_decay=args.decay)
print(optimizer)
for epoch in range(1, args.epochs+1):
print("====epoch " + str(epoch))
train(args, model, device, train_loader, optimizer)
print("====Evaluation")
if args.eval_train:
train_acc = eval(args, model, device, train_loader)
else:
print("omit the training accuracy computation")
train_acc = 0
val_acc = eval(args, model, device, val_loader)
test_acc = eval(args, model, device, test_loader)
print("train: %f val: %f test: %f" %(train_acc, val_acc, test_acc))
if __name__ == "__main__":
main()
| [((25, 12, 25, 52), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', (), '', True, 'import torch.nn as nn\n'), ((83, 13, 83, 115), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((118, 4, 118, 35), 'torch.manual_seed', 'torch.manual_seed', ({(118, 22, 118, 34): 'args.runseed'}, {}), '(args.runseed)', False, 'import torch\n'), ((119, 4, 119, 32), 'numpy.random.seed', 'np.random.seed', ({(119, 19, 119, 31): 'args.runseed'}, {}), '(args.runseed)', True, 'import numpy as np\n'), ((121, 7, 121, 32), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((147, 14, 147, 78), 'loader.MoleculeDataset', 'MoleculeDataset', (), '', False, 'from loader import MoleculeDataset\n'), ((167, 19, 167, 118), 'torch_geometric.data.DataLoader', 'DataLoader', (), '', False, 'from torch_geometric.data import DataLoader\n'), ((168, 17, 168, 117), 'torch_geometric.data.DataLoader', 'DataLoader', (), '', False, 'from torch_geometric.data import DataLoader\n'), ((169, 18, 169, 117), 'torch_geometric.data.DataLoader', 'DataLoader', (), '', False, 'from torch_geometric.data import DataLoader\n'), ((172, 12, 172, 175), 'model.GNN_graphpred', 'GNN_graphpred', (), '', False, 'from model import GNN, GNN_graphpred\n'), ((185, 16, 185, 82), 'torch.optim.Adam', 'optim.Adam', (), '', True, 'import torch.optim as optim\n'), ((30, 33, 30, 63), 'tqdm.tqdm', 'tqdm', (), '', False, 'from tqdm import tqdm\n'), ((54, 33, 54, 63), 'tqdm.tqdm', 'tqdm', (), '', False, 'from tqdm import tqdm\n'), ((120, 57, 120, 82), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((120, 88, 120, 107), 'torch.device', 'torch.device', ({(120, 101, 120, 106): '"""cpu"""'}, {}), "('cpu')", False, 'import torch\n'), ((122, 8, 122, 48), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', ({(122, 35, 122, 47): 'args.runseed'}, {}), '(args.runseed)', False, 'import torch\n'), ((153, 53, 153, 149), 'splitters.scaffold_split', 'scaffold_split', (), '', False, 'from splitters import scaffold_split, random_split\n'), ((43, 15, 43, 34), 'torch.sum', 'torch.sum', ({(43, 25, 43, 33): 'loss_mat'}, {}), '(loss_mat)', False, 'import torch\n'), ((43, 35, 43, 54), 'torch.sum', 'torch.sum', ({(43, 45, 43, 53): 'is_valid'}, {}), '(is_valid)', False, 'import torch\n'), ((57, 13, 57, 28), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((156, 53, 156, 152), 'splitters.random_split', 'random_split', (), '', False, 'from splitters import scaffold_split, random_split\n'), ((69, 11, 69, 35), 'numpy.sum', 'np.sum', ({(69, 18, 69, 34): '(y_true[:, (i)] == 1)'}, {}), '(y_true[:, (i)] == 1)', True, 'import numpy as np\n'), ((69, 44, 69, 69), 'numpy.sum', 'np.sum', ({(69, 51, 69, 68): '(y_true[:, (i)] == -1)'}, {}), '(y_true[:, (i)] == -1)', True, 'import numpy as np\n'), ((71, 28, 71, 91), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', ({(71, 42, 71, 68): '((y_true[is_valid, i] + 1) / 2)', (71, 70, 71, 90): 'y_scores[is_valid, i]'}, {}), '((y_true[is_valid, i] + 1) / 2, y_scores[is_valid, i])', False, 'from sklearn.metrics import roc_auc_score\n'), ((63, 13, 63, 39), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((64, 15, 64, 43), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((152, 22, 152, 99), 'pandas.read_csv', 'pd.read_csv', (), '', True, 'import pandas as pd\n'), ((40, 51, 40, 78), 'torch.zeros', 'torch.zeros', ({(40, 63, 40, 77): 'loss_mat.shape'}, {}), '(loss_mat.shape)', False, 'import torch\n'), ((159, 22, 159, 99), 'pandas.read_csv', 'pd.read_csv', (), '', True, 'import pandas as pd\n')] |
threefoldtech/js-sdk | jumpscale/packages/vdc_dashboard/bottle/api/exceptions.py | 811f783ac34a60225175bab2d806802a87b9d5c7 | from jumpscale.core import exceptions
class BaseError(exceptions.Base):
"""a generic base error for bcdb rest, with status code"""
def __init__(self, status, *args, **kwargs):
super().__init__(*args, *kwargs)
self.status = status
class VDCNotFound(BaseError):
pass
class MissingAuthorizationHeader(BaseError):
pass
class InvalidCredentials(BaseError):
pass
class MissingArgument(BaseError):
pass
class StellarServiceDown(BaseError):
pass
class FlavorNotSupported(BaseError):
pass
class NoEnoughCapacity(BaseError):
pass
class AdddingNodeFailed(BaseError):
pass
class VirtualMachineDeploymentFailed(BaseError):
pass
class CannotDeleteMasterNode(BaseError):
pass
class ZDBDeploymentFailed(BaseError):
pass
class ZDBDeletionFailed(BaseError):
pass
class KubeConfigNotFound(BaseError):
pass
class InvalidKubeConfig(BaseError):
pass
class ZStorConfigNotFound(BaseError):
pass
class InvalidZStorConfig(BaseError):
pass
class NoEnoughFunds(BaseError):
pass
class BadRequestError(BaseError):
pass
class UnknownError(BaseError):
pass
| [] |
gutierrezps/NeuroKit | neurokit2/signal/signal_plot.py | a30f76e64b4108abdc652a20391dc0288c62501d | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..events import events_plot
from ..stats import standardize as nk_standardize
def signal_plot(
signal, sampling_rate=None, subplots=False, standardize=False, labels=None, **kwargs
):
"""Plot signal with events as vertical lines.
Parameters
----------
signal : array or DataFrame
Signal array (can be a dataframe with many signals).
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second). Needs to be supplied if
the data should be plotted over time in seconds. Otherwise the data is plotted over samples.
Defaults to None.
subplots : bool
If True, each signal is plotted in a subplot.
standardize : bool
If True, all signals will have the same scale (useful for visualisation).
labels : str or list
Defaults to None.
**kwargs : optional
Arguments passed to matplotlib plotting.
Examples
----------
>>> import numpy as np
>>> import pandas as pd
>>> import neurokit2 as nk
>>>
>>> signal = nk.signal_simulate(duration=10, sampling_rate=1000)
>>> nk.signal_plot(signal, sampling_rate=1000, color="red")
>>>
>>> data = pd.DataFrame({"Signal2": np.cos(np.linspace(start=0, stop=20, num=1000)),
... "Signal3": np.sin(np.linspace(start=0, stop=20, num=1000)),
... "Signal4": nk.signal_binarize(np.cos(np.linspace(start=0, stop=40, num=1000)))})
>>> nk.signal_plot(data, labels=['signal_1', 'signal_2', 'signal_3'], subplots=True)
>>> nk.signal_plot([signal, data], standardize=True)
"""
# Sanitize format
if isinstance(signal, list):
try:
for i in signal:
len(i)
except TypeError:
signal = np.array(signal)
if isinstance(signal, pd.DataFrame) is False:
# If list is passed
if isinstance(signal, list) or len(np.array(signal).shape) > 1:
out = pd.DataFrame()
for i, content in enumerate(signal):
if isinstance(content, (pd.DataFrame, pd.Series)):
out = pd.concat([out, content], axis=1, sort=True)
else:
out = pd.concat(
[out, pd.DataFrame({"Signal" + str(i + 1): content})],
axis=1,
sort=True,
)
signal = out
# If vector is passed
else:
signal = pd.DataFrame({"Signal": signal})
# Copy signal
signal = signal.copy()
# Guess continuous and events columns
continuous_columns = list(signal.columns.values)
events_columns = []
for col in signal.columns:
vector = signal[col]
if vector.nunique() == 2:
indices = np.where(vector == np.max(vector.unique()))
if bool(np.any(np.diff(indices) == 1)) is False:
events_columns.append(col)
continuous_columns.remove(col)
# Adjust for sampling rate
if sampling_rate is not None:
signal.index = signal.index / sampling_rate
title_x = "Time (seconds)"
else:
title_x = "Time"
# x_axis = np.linspace(0, signal.shape[0] / sampling_rate, signal.shape[0])
# x_axis = pd.DataFrame(x_axis, columns=["Time (s)"])
# signal = pd.concat([signal, x_axis], axis=1)
# signal = signal.set_index("Time (s)")
# Plot accordingly
if len(events_columns) > 0:
events = []
for col in events_columns:
vector = signal[col]
events.append(np.where(vector == np.max(vector.unique()))[0])
plot = events_plot(events, signal=signal[continuous_columns])
if sampling_rate is None and signal.index.is_integer():
plot.gca().set_xlabel("Samples")
else:
plot.gca().set_xlabel(title_x)
else:
# Aesthetics
colors = [
"#1f77b4",
"#ff7f0e",
"#2ca02c",
"#d62728",
"#9467bd",
"#8c564b",
"#e377c2",
"#7f7f7f",
"#bcbd22",
"#17becf",
]
if len(continuous_columns) > len(colors):
colors = plt.cm.viridis(np.linspace(0, 1, len(continuous_columns)))
# Plot
if standardize is True:
signal[continuous_columns] = nk_standardize(signal[continuous_columns])
if subplots is True:
_, axes = plt.subplots(nrows=len(continuous_columns), ncols=1, sharex=True, **kwargs)
for ax, col, color in zip(axes, continuous_columns, colors):
ax.plot(signal[col], c=color, **kwargs)
else:
plot = signal[continuous_columns].plot(subplots=False, sharex=True, **kwargs)
if sampling_rate is None and signal.index.is_integer():
plt.xlabel("Samples")
else:
plt.xlabel(title_x)
# Tidy legend locations and add labels
if labels is None:
labels = continuous_columns.copy()
if isinstance(labels, str):
n_labels = len([labels])
labels = [labels]
elif isinstance(labels, list):
n_labels = len(labels)
if len(signal[continuous_columns].columns) != n_labels:
raise ValueError(
"NeuroKit error: signal_plot(): number of labels does not equal the number of plotted signals."
)
if subplots is False:
plt.legend(labels, loc=1)
else:
for i, label in enumerate(labels):
axes[i].legend([label], loc=1)
| [((164, 8, 164, 33), 'matplotlib.pyplot.legend', 'plt.legend', (), '', True, 'import matplotlib.pyplot as plt\n'), ((60, 18, 60, 32), 'pandas.DataFrame', 'pd.DataFrame', ({}, {}), '()', True, 'import pandas as pd\n'), ((74, 21, 74, 53), 'pandas.DataFrame', 'pd.DataFrame', ({(74, 34, 74, 52): "{'Signal': signal}"}, {}), "({'Signal': signal})", True, 'import pandas as pd\n'), ((144, 12, 144, 33), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(144, 23, 144, 32): '"""Samples"""'}, {}), "('Samples')", True, 'import matplotlib.pyplot as plt\n'), ((146, 12, 146, 31), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(146, 23, 146, 30): 'title_x'}, {}), '(title_x)', True, 'import matplotlib.pyplot as plt\n'), ((54, 21, 54, 37), 'numpy.array', 'np.array', ({(54, 30, 54, 36): 'signal'}, {}), '(signal)', True, 'import numpy as np\n'), ((63, 26, 63, 70), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((59, 43, 59, 59), 'numpy.array', 'np.array', ({(59, 52, 59, 58): 'signal'}, {}), '(signal)', True, 'import numpy as np\n'), ((86, 27, 86, 43), 'numpy.diff', 'np.diff', ({(86, 35, 86, 42): 'indices'}, {}), '(indices)', True, 'import numpy as np\n')] |
MTES-MCT/mobilic-api | migrations/versions/1a89721126f7_only_one_validation_per_mission_user_.py | b3754de2282262fd60a27dc90e40777df9c1e230 | """Only one validation per mission, user and actor
Revision ID: 1a89721126f7
Revises: fa96dfc8237d
Create Date: 2021-10-14 11:22:01.124488
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "1a89721126f7"
down_revision = "fa96dfc8237d"
branch_labels = None
depends_on = None
def upgrade():
op.execute(
"""
WITH validation_duplicates AS (
SELECT
id,
ROW_NUMBER() OVER (PARTITION BY user_id, mission_id, submitter_id ORDER BY reception_time DESC) AS rn
FROM mission_validation
)
DELETE FROM mission_validation mv
USING validation_duplicates vd
WHERE mv.id = vd.id AND vd.rn >= 2
"""
)
op.execute(
"""
ALTER TABLE mission_validation ADD CONSTRAINT only_one_validation_per_submitter_mission_and_user
EXCLUDE USING GIST (
mission_id WITH =,
submitter_id WITH =,
user_id WITH =
)
"""
)
def downgrade():
op.drop_constraint(
"only_one_validation_per_submitter_mission_and_user",
"mission_validation",
)
| [((20, 4, 32, 5), 'alembic.op.execute', 'op.execute', ({(21, 8, 31, 11): '"""\n WITH validation_duplicates AS (\n SELECT\n id,\n ROW_NUMBER() OVER (PARTITION BY user_id, mission_id, submitter_id ORDER BY reception_time DESC) AS rn\n FROM mission_validation\n )\n DELETE FROM mission_validation mv\n USING validation_duplicates vd\n WHERE mv.id = vd.id AND vd.rn >= 2\n """'}, {}), '(\n """\n WITH validation_duplicates AS (\n SELECT\n id,\n ROW_NUMBER() OVER (PARTITION BY user_id, mission_id, submitter_id ORDER BY reception_time DESC) AS rn\n FROM mission_validation\n )\n DELETE FROM mission_validation mv\n USING validation_duplicates vd\n WHERE mv.id = vd.id AND vd.rn >= 2\n """\n )', False, 'from alembic import op\n'), ((33, 4, 42, 5), 'alembic.op.execute', 'op.execute', ({(34, 8, 41, 11): '"""\n ALTER TABLE mission_validation ADD CONSTRAINT only_one_validation_per_submitter_mission_and_user\n EXCLUDE USING GIST (\n mission_id WITH =,\n submitter_id WITH =,\n user_id WITH =\n )\n """'}, {}), '(\n """\n ALTER TABLE mission_validation ADD CONSTRAINT only_one_validation_per_submitter_mission_and_user\n EXCLUDE USING GIST (\n mission_id WITH =,\n submitter_id WITH =,\n user_id WITH =\n )\n """\n )', False, 'from alembic import op\n'), ((46, 4, 49, 5), 'alembic.op.drop_constraint', 'op.drop_constraint', ({(47, 8, 47, 60): '"""only_one_validation_per_submitter_mission_and_user"""', (48, 8, 48, 28): '"""mission_validation"""'}, {}), "('only_one_validation_per_submitter_mission_and_user',\n 'mission_validation')", False, 'from alembic import op\n')] |
Falcons-Robocup/code | packages/facilities/rtdb/python/rtdb2_get.py | 2281a8569e7f11cbd3238b7cc7341c09e2e16249 | # Copyright 2020 Jan Feitsma (Falcons)
# SPDX-License-Identifier: Apache-2.0
#!/usr/bin/python
import os
import sys
import argparse
from rtdb2 import RtDB2Store, RTDB2_DEFAULT_PATH
import rtdb2tools
from hexdump import hexdump
# Main structure of the program
if __name__ == "__main__":
# Argument parsing.
descriptionTxt = 'This tool reads a value from the database given an RtDB key.\n'
exampleTxt = """Example: rtdb2_get.py -a 6 ROBOT_STATE
age: 2h
shared: True
list: False
value: [2, [1581172987, 618438], [0.05368572473526001, -0.2938263416290283, 5.330356597900391], [0.1385340541601181, -0.8020891547203064, 0.7817431688308716], False, [0.0, 0.0], 6, 'A']
Example: rtdb2_get.py -a 2 DIAG_WORLDMODEL_LOCAL -x "['balls'][0]['result']"
[[5.3209381103515625, 0.5837346315383911, 0.15281200408935547], [-0.0029433025047183037, 0.01433953270316124, 1.2758345292240847e-05], 1.0, [22033, 1889585904]]
"""
parser = argparse.ArgumentParser(description=descriptionTxt, epilog=exampleTxt, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-a', '--agent', help='agent ID to use', type=int, default=rtdb2tools.guessAgentId())
parser.add_argument('-s', '--serialized', help='also show serialized string (as hexdump)', action='store_true')
parser.add_argument('-p', '--path', help='database path to use', type=str, default=RTDB2_DEFAULT_PATH)
parser.add_argument('-x', '--expression', help='evaluate expression, useful to fetch a specific element', type=str)
parser.add_argument('key', help='RtDB key to read')
args = parser.parse_args()
# Create instance of RtDB2Store and read databases from disk
rtdb2Store = RtDB2Store(args.path)
item = rtdb2Store.get(args.agent, args.key, timeout=None)
if args.expression:
print(eval("item.value" + args.expression))
else:
print(str(item))
if args.serialized:
hexdump(item.value_serialized)
rtdb2Store.closeAll()
| [((26, 17, 26, 142), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((35, 17, 35, 38), 'rtdb2.RtDB2Store', 'RtDB2Store', ({(35, 28, 35, 37): 'args.path'}, {}), '(args.path)', False, 'from rtdb2 import RtDB2Store, RTDB2_DEFAULT_PATH\n'), ((43, 8, 43, 38), 'hexdump.hexdump', 'hexdump', ({(43, 16, 43, 37): 'item.value_serialized'}, {}), '(item.value_serialized)', False, 'from hexdump import hexdump\n'), ((27, 83, 27, 108), 'rtdb2tools.guessAgentId', 'rtdb2tools.guessAgentId', ({}, {}), '()', False, 'import rtdb2tools\n')] |
what3versin/reinforce_py | algorithms/A3C/atari/atari_env_deprecated.py | 46769da50aea65346cd3a300b55306d25f1f2683 | from __future__ import print_function
from __future__ import division
import os
import gym
import numpy as np
from skimage.transform import resize
from skimage.color import rgb2gray
class Atari(object):
s_dim = [84, 84, 1]
a_dim = 3
def __init__(self, args, record_video=False):
self.env = gym.make('BreakoutNoFrameskip-v4')
self.ale = self.env.env.ale # ale interface
if record_video:
video_dir = os.path.join(args.save_path, 'videos')
if not os.path.exists(video_dir):
os.makedirs(video_dir)
self.env = gym.wrappers.Monitor(
self.env, video_dir, video_callable=lambda x: True, resume=True)
self.ale = self.env.env.env.ale
self.screen_size = Atari.s_dim[:2] # 84x84
self.noop_max = 30
self.frame_skip = 4
self.frame_feq = 4
self.s_dim = Atari.s_dim
self.a_dim = Atari.a_dim
self.action_space = [1, 2, 3] # Breakout specify
self.done = True
def new_round(self):
if not self.done: # dead but not done
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
obs = self.preprocess(obs)
else: # terminal
self.env.reset()
# No-op
for _ in range(np.random.randint(1, self.noop_max + 1)):
obs, _, done, _ = self.env.step(0)
obs = self.preprocess(obs)
return obs
def preprocess(self, observ):
return resize(rgb2gray(observ), self.screen_size)
def step(self, action):
observ, reward, dead = None, 0, False
for _ in range(self.frame_skip):
lives_before = self.ale.lives()
o, r, self.done, _ = self.env.step(self.action_space[action])
lives_after = self.ale.lives()
reward += r
if lives_before > lives_after:
dead = True
break
observ = self.preprocess(o)
observ = np.reshape(observ, newshape=self.screen_size + [1])
self.state = np.append(self.state[:, :, 1:], observ, axis=2)
return self.state, reward, dead, self.done
| [((17, 19, 17, 53), 'gym.make', 'gym.make', ({(17, 28, 17, 52): '"""BreakoutNoFrameskip-v4"""'}, {}), "('BreakoutNoFrameskip-v4')", False, 'import gym\n'), ((64, 17, 64, 68), 'numpy.reshape', 'np.reshape', (), '', True, 'import numpy as np\n'), ((65, 21, 65, 68), 'numpy.append', 'np.append', (), '', True, 'import numpy as np\n'), ((20, 24, 20, 62), 'os.path.join', 'os.path.join', ({(20, 37, 20, 51): 'args.save_path', (20, 53, 20, 61): '"""videos"""'}, {}), "(args.save_path, 'videos')", False, 'import os\n'), ((23, 23, 24, 80), 'gym.wrappers.Monitor', 'gym.wrappers.Monitor', (), '', False, 'import gym\n'), ((51, 22, 51, 38), 'skimage.color.rgb2gray', 'rgb2gray', ({(51, 31, 51, 37): 'observ'}, {}), '(observ)', False, 'from skimage.color import rgb2gray\n'), ((21, 19, 21, 44), 'os.path.exists', 'os.path.exists', ({(21, 34, 21, 43): 'video_dir'}, {}), '(video_dir)', False, 'import os\n'), ((22, 16, 22, 38), 'os.makedirs', 'os.makedirs', ({(22, 28, 22, 37): 'video_dir'}, {}), '(video_dir)', False, 'import os\n'), ((45, 27, 45, 66), 'numpy.random.randint', 'np.random.randint', ({(45, 45, 45, 46): '(1)', (45, 48, 45, 65): '(self.noop_max + 1)'}, {}), '(1, self.noop_max + 1)', True, 'import numpy as np\n')] |
NBCLab/nimare-paper | content/_build/jupyter_execute/macm.py | 2b9e70febcfde4ca12420adc3c2910ff622252f2 | #!/usr/bin/env python
# coding: utf-8
# # Meta-Analytic Coactivation Modeling
# In[1]:
# First, import the necessary modules and functions
import os
from datetime import datetime
import matplotlib.pyplot as plt
from myst_nb import glue
from repo2data.repo2data import Repo2Data
import nimare
start = datetime.now()
# Install the data if running locally, or points to cached data if running on neurolibre
DATA_REQ_FILE = os.path.join("../binder/data_requirement.json")
FIG_DIR = os.path.abspath("../images")
# Download data
repo2data = Repo2Data(DATA_REQ_FILE)
data_path = repo2data.install()
data_path = os.path.join(data_path[0], "data")
# Now, load the Datasets we will use in this chapter
neurosynth_dset = nimare.dataset.Dataset.load(os.path.join(data_path, "neurosynth_dataset.pkl.gz"))
# Meta-analytic coactivation modeling (MACM) {cite:p}`Laird2009-gc,Robinson2010-iv,Eickhoff2010-vx`, also known as meta-analytic connectivity modeling, uses meta-analytic data to measure co-occurrence of activations between brain regions providing evidence of functional connectivity of brain regions across tasks.
# In coordinate-based MACM, whole-brain studies within the database are selected based on whether or not they report at least one peak in a region of interest specified for the analysis.
# These studies are then subjected to a meta-analysis, often comparing the selected studies to those remaining in the database.
# In this way, the significance of each voxel in the analysis corresponds to whether there is greater convergence of foci at the voxel among studies which also report foci in the region of interest than those which do not.
#
# <!-- TODO: Determine appropriate citation style here. -->
#
# MACM results have historically been accorded a similar interpretation to task-related functional connectivity (e.g., {cite:p}`Hok2015-lt,Kellermann2013-en`), although this approach is quite removed from functional connectivity analyses of task fMRI data (e.g., beta-series correlations, psychophysiological interactions, or even seed-to-voxel functional connectivity analyses on task data).
# Nevertheless, MACM analyses do show high correspondence with resting-state functional connectivity {cite:p}`Reid2017-ez`.
# MACM has been used to characterize the task-based functional coactivation of the cerebellum {cite:p}`Riedel2015-tx`, lateral prefrontal cortex {cite:p}`Reid2016-ba`, fusiform gyrus {cite:p}`Caspers2014-ja`, and several other brain regions.
#
# Within NiMARE, MACMs can be performed by selecting studies in a Dataset based on the presence of activation within a target mask or coordinate-centered sphere.
#
# In this section, we will perform two MACMs- one with a target mask and one with a coordinate-centered sphere.
# For the former, we use {py:meth}`nimare.dataset.Dataset.get_studies_by_mask`.
# For the latter, we use {py:meth}`nimare.dataset.Dataset.get_studies_by_coordinate`.
# In[2]:
# Create Dataset only containing studies with peaks within the amygdala mask
amygdala_mask = os.path.join(data_path, "amygdala_roi.nii.gz")
amygdala_ids = neurosynth_dset.get_studies_by_mask(amygdala_mask)
dset_amygdala = neurosynth_dset.slice(amygdala_ids)
# Create Dataset only containing studies with peaks within the sphere ROI
sphere_ids = neurosynth_dset.get_studies_by_coordinate([[24, -2, -20]], r=6)
dset_sphere = neurosynth_dset.slice(sphere_ids)
# In[3]:
import numpy as np
from nilearn import input_data, plotting
# In order to plot a sphere with a precise radius around a coordinate with
# nilearn, we need to use a NiftiSpheresMasker
mask_img = neurosynth_dset.masker.mask_img
sphere_masker = input_data.NiftiSpheresMasker([[24, -2, -20]], radius=6, mask_img=mask_img)
sphere_masker.fit(mask_img)
sphere_img = sphere_masker.inverse_transform(np.array([[1]]))
fig, axes = plt.subplots(figsize=(6, 4), nrows=2)
display = plotting.plot_roi(
amygdala_mask,
annotate=False,
draw_cross=False,
axes=axes[0],
figure=fig,
)
axes[0].set_title("Amygdala ROI")
display = plotting.plot_roi(
sphere_img,
annotate=False,
draw_cross=False,
axes=axes[1],
figure=fig,
)
axes[1].set_title("Spherical ROI")
glue("figure_macm_rois", fig, display=False)
# ```{glue:figure} figure_macm_rois
# :name: figure_macm_rois
# :align: center
#
# Region of interest masks for (1) a target mask-based MACM and (2) a coordinate-based MACM.
# ```
# Once the `Dataset` has been reduced to studies with coordinates within the mask or sphere requested, any of the supported CBMA Estimators can be run.
# In[4]:
from nimare import meta
meta_amyg = meta.cbma.ale.ALE(kernel__sample_size=20)
results_amyg = meta_amyg.fit(dset_amygdala)
meta_sphere = meta.cbma.ale.ALE(kernel__sample_size=20)
results_sphere = meta_sphere.fit(dset_sphere)
# In[5]:
meta_results = {
"Amygdala ALE MACM": results_amyg.get_map("z", return_type="image"),
"Sphere ALE MACM": results_sphere.get_map("z", return_type="image"),
}
fig, axes = plt.subplots(figsize=(6, 4), nrows=2)
for i_meta, (name, file_) in enumerate(meta_results.items()):
display = plotting.plot_stat_map(
file_,
annotate=False,
axes=axes[i_meta],
cmap="Reds",
cut_coords=[24, -2, -20],
draw_cross=False,
figure=fig,
)
axes[i_meta].set_title(name)
colorbar = display._cbar
colorbar_ticks = colorbar.get_ticks()
if colorbar_ticks[0] < 0:
new_ticks = [colorbar_ticks[0], 0, colorbar_ticks[-1]]
else:
new_ticks = [colorbar_ticks[0], colorbar_ticks[-1]]
colorbar.set_ticks(new_ticks, update_ticks=True)
glue("figure_macm", fig, display=False)
# ```{glue:figure} figure_macm
# :name: figure_macm
# :align: center
#
# Unthresholded z-statistic maps for (1) the target mask-based MACM and (2) the coordinate-based MACM.
# ```
# In[6]:
end = datetime.now()
print(f"macm.md took {end - start} to build.")
| [((19, 8, 19, 22), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((22, 16, 22, 63), 'os.path.join', 'os.path.join', ({(22, 29, 22, 62): '"""../binder/data_requirement.json"""'}, {}), "('../binder/data_requirement.json')", False, 'import os\n'), ((23, 10, 23, 38), 'os.path.abspath', 'os.path.abspath', ({(23, 26, 23, 37): '"""../images"""'}, {}), "('../images')", False, 'import os\n'), ((26, 12, 26, 36), 'repo2data.repo2data.Repo2Data', 'Repo2Data', ({(26, 22, 26, 35): 'DATA_REQ_FILE'}, {}), '(DATA_REQ_FILE)', False, 'from repo2data.repo2data import Repo2Data\n'), ((28, 12, 28, 46), 'os.path.join', 'os.path.join', ({(28, 25, 28, 37): 'data_path[0]', (28, 39, 28, 45): '"""data"""'}, {}), "(data_path[0], 'data')", False, 'import os\n'), ((55, 16, 55, 62), 'os.path.join', 'os.path.join', ({(55, 29, 55, 38): 'data_path', (55, 40, 55, 61): '"""amygdala_roi.nii.gz"""'}, {}), "(data_path, 'amygdala_roi.nii.gz')", False, 'import os\n'), ((73, 16, 73, 91), 'nilearn.input_data.NiftiSpheresMasker', 'input_data.NiftiSpheresMasker', (), '', False, 'from nilearn import input_data, plotting\n'), ((77, 12, 77, 49), 'matplotlib.pyplot.subplots', 'plt.subplots', (), '', True, 'import matplotlib.pyplot as plt\n'), ((78, 10, 84, 1), 'nilearn.plotting.plot_roi', 'plotting.plot_roi', (), '', False, 'from nilearn import input_data, plotting\n'), ((86, 10, 92, 1), 'nilearn.plotting.plot_roi', 'plotting.plot_roi', (), '', False, 'from nilearn import input_data, plotting\n'), ((94, 0, 94, 44), 'myst_nb.glue', 'glue', (), '', False, 'from myst_nb import glue\n'), ((111, 12, 111, 53), 'nimare.meta.cbma.ale.ALE', 'meta.cbma.ale.ALE', (), '', False, 'from nimare import meta\n'), ((114, 14, 114, 55), 'nimare.meta.cbma.ale.ALE', 'meta.cbma.ale.ALE', (), '', False, 'from nimare import meta\n'), ((126, 12, 126, 49), 'matplotlib.pyplot.subplots', 'plt.subplots', (), '', True, 'import matplotlib.pyplot as plt\n'), ((147, 0, 147, 39), 'myst_nb.glue', 'glue', (), '', False, 'from myst_nb import glue\n'), ((160, 6, 160, 20), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((31, 46, 31, 98), 'os.path.join', 'os.path.join', ({(31, 59, 31, 68): 'data_path', (31, 70, 31, 97): '"""neurosynth_dataset.pkl.gz"""'}, {}), "(data_path, 'neurosynth_dataset.pkl.gz')", False, 'import os\n'), ((75, 45, 75, 60), 'numpy.array', 'np.array', ({(75, 54, 75, 59): '[[1]]'}, {}), '([[1]])', True, 'import numpy as np\n'), ((128, 14, 136, 5), 'nilearn.plotting.plot_stat_map', 'plotting.plot_stat_map', (), '', False, 'from nilearn import input_data, plotting\n')] |
Maikor/ydk-py | cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_IPSLA_ECHO_MIB.py | b86c4a7c570ae3b2c5557d098420446df5de4929 | """ CISCO_IPSLA_ECHO_MIB
This MIB module defines the templates for IP SLA operations of
ICMP echo, UDP echo and TCP connect.
The ICMP echo operation measures end\-to\-end response time between
a Cisco router and any IP enabled device by computing the time
taken between sending an ICMP echo request message to the
destination and receiving an ICMP echo reply.
The UDP echo operation measures end\-to\-end response time between
a Cisco router and any IP enabled device by computing the time
taken between sending an UDP echo request message to the
destination and receiving an UDP echo reply.
The TCP connect operation measures end\-to\-end response time between
a Cisco router and any IP enabled device by computing the time
taken to perform a TCP connect operation.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class CISCOIPSLAECHOMIB(Entity):
"""
.. attribute:: cipslaicmpechotmpltable
A table that contains ICMP echo template definitions
**type**\: :py:class:`CipslaIcmpEchoTmplTable <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable>`
.. attribute:: cipslaudpechotmpltable
A table that contains UDP echo template specific definitions
**type**\: :py:class:`CipslaUdpEchoTmplTable <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable>`
.. attribute:: cipslatcpconntmpltable
A table that contains TCP connect template specific definitions
**type**\: :py:class:`CipslaTcpConnTmplTable <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB, self).__init__()
self._top_entity = None
self.yang_name = "CISCO-IPSLA-ECHO-MIB"
self.yang_parent_name = "CISCO-IPSLA-ECHO-MIB"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("cipslaIcmpEchoTmplTable", ("cipslaicmpechotmpltable", CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable)), ("cipslaUdpEchoTmplTable", ("cipslaudpechotmpltable", CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable)), ("cipslaTcpConnTmplTable", ("cipslatcpconntmpltable", CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable))])
self._leafs = OrderedDict()
self.cipslaicmpechotmpltable = CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable()
self.cipslaicmpechotmpltable.parent = self
self._children_name_map["cipslaicmpechotmpltable"] = "cipslaIcmpEchoTmplTable"
self.cipslaudpechotmpltable = CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable()
self.cipslaudpechotmpltable.parent = self
self._children_name_map["cipslaudpechotmpltable"] = "cipslaUdpEchoTmplTable"
self.cipslatcpconntmpltable = CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable()
self.cipslatcpconntmpltable.parent = self
self._children_name_map["cipslatcpconntmpltable"] = "cipslaTcpConnTmplTable"
self._segment_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB, [], name, value)
class CipslaIcmpEchoTmplTable(Entity):
"""
A table that contains ICMP echo template definitions.
.. attribute:: cipslaicmpechotmplentry
A row entry representing an IPSLA ICMP echo template
**type**\: list of :py:class:`CipslaIcmpEchoTmplEntry <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable, self).__init__()
self.yang_name = "cipslaIcmpEchoTmplTable"
self.yang_parent_name = "CISCO-IPSLA-ECHO-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("cipslaIcmpEchoTmplEntry", ("cipslaicmpechotmplentry", CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry))])
self._leafs = OrderedDict()
self.cipslaicmpechotmplentry = YList(self)
self._segment_path = lambda: "cipslaIcmpEchoTmplTable"
self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable, [], name, value)
class CipslaIcmpEchoTmplEntry(Entity):
"""
A row entry representing an IPSLA ICMP echo template.
.. attribute:: cipslaicmpechotmplname (key)
This field is used to specify the ICMP echo template name
**type**\: str
**length:** 1..64
.. attribute:: cipslaicmpechotmpldescription
This field is used to provide description for the ICMP echo template
**type**\: str
**length:** 0..128
.. attribute:: cipslaicmpechotmplsrcaddrtype
An enumerated value which specifies the IP address type of the source. It must be used along with the cipslaIcmpEchoTmplSrcAddr object
**type**\: :py:class:`InetAddressType <ydk.models.cisco_ios_xe.INET_ADDRESS_MIB.InetAddressType>`
.. attribute:: cipslaicmpechotmplsrcaddr
A string which specifies the IP address of the source
**type**\: str
**length:** 0..255
.. attribute:: cipslaicmpechotmpltimeout
Specifies the duration to wait for a IP SLA operation completion. For connection oriented protocols, this may cause the connection to be closed by the operation. Once closed, it will be assumed that the connection reestablishment will be performed. To prevent unwanted closure of connections, be sure to set this value to a realistic connection timeout
**type**\: int
**range:** 0..604800000
**units**\: milliseconds
.. attribute:: cipslaicmpechotmplverifydata
When set to true, the resulting data in each IP SLA operation is compared with the expected data. This includes checking header information (if possible) and exact packet size
**type**\: bool
.. attribute:: cipslaicmpechotmplreqdatasize
This object represents the number of octets to be placed into the ARR Data portion of the request message, when using SNA protocols. For non\-ARR protocols' IP SLA request/responses, this value represents the native payload size. REMEMBER\: The ARR Header overhead is not included in this value
**type**\: int
**range:** 0..16384
**units**\: octets
.. attribute:: cipslaicmpechotmpltos
This object represents the type of service octet in an IP header
**type**\: int
**range:** 0..255
.. attribute:: cipslaicmpechotmplvrfname
This field is used to specify the VRF name with which the IP SLA operation will be used. For regular IP SLA operation this field should not be configured. The agent will use this field to identify the VRF routing table for this operation
**type**\: str
**length:** 0..32
.. attribute:: cipslaicmpechotmplthreshold
This object defines an administrative threshold limit. If the IP SLA operation time exceeds this limit and if the condition specified in cipslaIcmpEchoTmplHistFilter is satisfied, one threshold crossing occurrence will be counted
**type**\: int
**range:** 0..2147483647
**units**\: milliseconds
.. attribute:: cipslaicmpechotmplhistlives
The maximum number of history lives to record. A life is defined by the countdown (or transition) to zero by the cipslaAutoGroupScheduleLife object. A new life is created when the same conceptual control row is restarted via the transition of the cipslaAutoGroupScheduleLife object and its subsequent countdown. The value of zero will shut off all data collection
**type**\: int
**range:** 0..2
.. attribute:: cipslaicmpechotmplhistbuckets
The maximum number of history buckets to record. This value is set to the number of operations to keep per lifetime. After cipslaIcmpEchoTmplHistBuckets are filled, the oldest entries are deleted and the most recent cipslaIcmpEchoTmplHistBuckets buckets are retained
**type**\: int
**range:** 1..60
.. attribute:: cipslaicmpechotmplhistfilter
Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaIcmpEchoTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded
**type**\: :py:class:`CipslaIcmpEchoTmplHistFilter <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry.CipslaIcmpEchoTmplHistFilter>`
.. attribute:: cipslaicmpechotmplstatshours
The maximum number of hours for which statistics are maintained. Specifically this is the number of hourly groups to keep before rolling over. The value of one is not advisable because the hourly group will close and immediately be deleted before the network management station will have the opportunity to retrieve the statistics. The value of zero will shut off data collection
**type**\: int
**range:** 0..25
**units**\: hours
.. attribute:: cipslaicmpechotmpldistbuckets
The maximum number of statistical distribution buckets to accumulate. Since this index does not rollover, only the first cipslaIcmpEchoTmplStatsNumDistBuckets will be kept. The last cipslaIcmpEchoTmplStatsNumDistBucket will contain all entries from its distribution interval start point to infinity
**type**\: int
**range:** 1..20
.. attribute:: cipslaicmpechotmpldistinterval
The statistical distribution buckets interval. Distribution Bucket Example\: cipslaIcmpEchoTmplDistBuckets = 5 buckets cipslaIcmpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| Bucket 2 \| Bucket 3 \| Bucket 4 \| Bucket 5 \| \| 0\-9 ms \| 10\-19 ms \| 20\-29 ms \| 30\-39 ms \| 40\-Inf ms \| Odd Example\: cipslaIcmpEchoTmplDistBuckets = 1 buckets cipslaIcmpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| \| 0\-Inf ms \| Thus, this odd example shows that the value of cipslaIcmpEchoTmplDistInterval does not apply when cipslaIcmpEchoTmplDistBuckets is one
**type**\: int
**range:** 1..100
**units**\: milliseconds
.. attribute:: cipslaicmpechotmplstoragetype
The storage type of this conceptual row
**type**\: :py:class:`StorageType <ydk.models.cisco_ios_xe.SNMPv2_TC.StorageType>`
.. attribute:: cipslaicmpechotmplrowstatus
The status of the conceptual ICMP echo template control row. When the status is active, all the read\-create objects in that row can be modified
**type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry, self).__init__()
self.yang_name = "cipslaIcmpEchoTmplEntry"
self.yang_parent_name = "cipslaIcmpEchoTmplTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['cipslaicmpechotmplname']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('cipslaicmpechotmplname', (YLeaf(YType.str, 'cipslaIcmpEchoTmplName'), ['str'])),
('cipslaicmpechotmpldescription', (YLeaf(YType.str, 'cipslaIcmpEchoTmplDescription'), ['str'])),
('cipslaicmpechotmplsrcaddrtype', (YLeaf(YType.enumeration, 'cipslaIcmpEchoTmplSrcAddrType'), [('ydk.models.cisco_ios_xe.INET_ADDRESS_MIB', 'InetAddressType', '')])),
('cipslaicmpechotmplsrcaddr', (YLeaf(YType.str, 'cipslaIcmpEchoTmplSrcAddr'), ['str'])),
('cipslaicmpechotmpltimeout', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplTimeOut'), ['int'])),
('cipslaicmpechotmplverifydata', (YLeaf(YType.boolean, 'cipslaIcmpEchoTmplVerifyData'), ['bool'])),
('cipslaicmpechotmplreqdatasize', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplReqDataSize'), ['int'])),
('cipslaicmpechotmpltos', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplTOS'), ['int'])),
('cipslaicmpechotmplvrfname', (YLeaf(YType.str, 'cipslaIcmpEchoTmplVrfName'), ['str'])),
('cipslaicmpechotmplthreshold', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplThreshold'), ['int'])),
('cipslaicmpechotmplhistlives', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplHistLives'), ['int'])),
('cipslaicmpechotmplhistbuckets', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplHistBuckets'), ['int'])),
('cipslaicmpechotmplhistfilter', (YLeaf(YType.enumeration, 'cipslaIcmpEchoTmplHistFilter'), [('ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB', 'CISCOIPSLAECHOMIB', 'CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry.CipslaIcmpEchoTmplHistFilter')])),
('cipslaicmpechotmplstatshours', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplStatsHours'), ['int'])),
('cipslaicmpechotmpldistbuckets', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplDistBuckets'), ['int'])),
('cipslaicmpechotmpldistinterval', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplDistInterval'), ['int'])),
('cipslaicmpechotmplstoragetype', (YLeaf(YType.enumeration, 'cipslaIcmpEchoTmplStorageType'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'StorageType', '')])),
('cipslaicmpechotmplrowstatus', (YLeaf(YType.enumeration, 'cipslaIcmpEchoTmplRowStatus'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowStatus', '')])),
])
self.cipslaicmpechotmplname = None
self.cipslaicmpechotmpldescription = None
self.cipslaicmpechotmplsrcaddrtype = None
self.cipslaicmpechotmplsrcaddr = None
self.cipslaicmpechotmpltimeout = None
self.cipslaicmpechotmplverifydata = None
self.cipslaicmpechotmplreqdatasize = None
self.cipslaicmpechotmpltos = None
self.cipslaicmpechotmplvrfname = None
self.cipslaicmpechotmplthreshold = None
self.cipslaicmpechotmplhistlives = None
self.cipslaicmpechotmplhistbuckets = None
self.cipslaicmpechotmplhistfilter = None
self.cipslaicmpechotmplstatshours = None
self.cipslaicmpechotmpldistbuckets = None
self.cipslaicmpechotmpldistinterval = None
self.cipslaicmpechotmplstoragetype = None
self.cipslaicmpechotmplrowstatus = None
self._segment_path = lambda: "cipslaIcmpEchoTmplEntry" + "[cipslaIcmpEchoTmplName='" + str(self.cipslaicmpechotmplname) + "']"
self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/cipslaIcmpEchoTmplTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry, ['cipslaicmpechotmplname', 'cipslaicmpechotmpldescription', 'cipslaicmpechotmplsrcaddrtype', 'cipslaicmpechotmplsrcaddr', 'cipslaicmpechotmpltimeout', 'cipslaicmpechotmplverifydata', 'cipslaicmpechotmplreqdatasize', 'cipslaicmpechotmpltos', 'cipslaicmpechotmplvrfname', 'cipslaicmpechotmplthreshold', 'cipslaicmpechotmplhistlives', 'cipslaicmpechotmplhistbuckets', 'cipslaicmpechotmplhistfilter', 'cipslaicmpechotmplstatshours', 'cipslaicmpechotmpldistbuckets', 'cipslaicmpechotmpldistinterval', 'cipslaicmpechotmplstoragetype', 'cipslaicmpechotmplrowstatus'], name, value)
class CipslaIcmpEchoTmplHistFilter(Enum):
"""
CipslaIcmpEchoTmplHistFilter (Enum Class)
Defines a filter for adding RTT results to the history
buffer\:
none(1) \- no history is recorded
all(2) \- the results of all completion times
and failed completions are recorded
overThreshold(3) \- the results of completion times
over cipslaIcmpEchoTmplThreshold are
recorded.
failures(4) \- the results of failed operations (only)
are recorded.
.. data:: none = 1
.. data:: all = 2
.. data:: overThreshold = 3
.. data:: failures = 4
"""
none = Enum.YLeaf(1, "none")
all = Enum.YLeaf(2, "all")
overThreshold = Enum.YLeaf(3, "overThreshold")
failures = Enum.YLeaf(4, "failures")
class CipslaUdpEchoTmplTable(Entity):
"""
A table that contains UDP echo template specific definitions.
.. attribute:: cipslaudpechotmplentry
A row entry representing an IPSLA UDP echo template
**type**\: list of :py:class:`CipslaUdpEchoTmplEntry <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable, self).__init__()
self.yang_name = "cipslaUdpEchoTmplTable"
self.yang_parent_name = "CISCO-IPSLA-ECHO-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("cipslaUdpEchoTmplEntry", ("cipslaudpechotmplentry", CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry))])
self._leafs = OrderedDict()
self.cipslaudpechotmplentry = YList(self)
self._segment_path = lambda: "cipslaUdpEchoTmplTable"
self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable, [], name, value)
class CipslaUdpEchoTmplEntry(Entity):
"""
A row entry representing an IPSLA UDP echo template.
.. attribute:: cipslaudpechotmplname (key)
A string which specifies the UDP echo template name
**type**\: str
**length:** 1..64
.. attribute:: cipslaudpechotmpldescription
A string which provides description to the UDP echo template
**type**\: str
**length:** 0..128
.. attribute:: cipslaudpechotmplcontrolenable
If this object is enabled, then the IP SLA application will send control messages to a responder, residing on the target router to respond to the data request packets being sent by the source router
**type**\: bool
.. attribute:: cipslaudpechotmplsrcaddrtype
An enumerated value which specifies the IP address type of the source. It must be used along with the cipslaUdpEchoTmplSrcAddr object
**type**\: :py:class:`InetAddressType <ydk.models.cisco_ios_xe.INET_ADDRESS_MIB.InetAddressType>`
.. attribute:: cipslaudpechotmplsrcaddr
A string which specifies the IP address of the source
**type**\: str
**length:** 0..255
.. attribute:: cipslaudpechotmplsrcport
This object represents the source's port number. If this object is not specified, the application will get a port allocated by the system
**type**\: int
**range:** 0..65535
.. attribute:: cipslaudpechotmpltimeout
Specifies the duration to wait for an IP SLA operation completion. For connection oriented protocols, this may cause the connection to be closed by the operation. Once closed, it will be assumed that the connection reestablishment will be performed. To prevent unwanted closure of connections, be sure to set this value to a realistic connection timeout
**type**\: int
**range:** 0..604800000
**units**\: milliseconds
.. attribute:: cipslaudpechotmplverifydata
When set to true, the resulting data in each IP SLA operation is compared with the expected data. This includes checking header information (if possible) and exact packet size
**type**\: bool
.. attribute:: cipslaudpechotmplreqdatasize
This object represents the number of octets to be placed into the ARR Data portion of the request message, when using SNA protocols. For non\-ARR protocols' RTT request/responses, this value represents the native payload size. REMEMBER\: The ARR Header overhead is not included in this value
**type**\: int
**range:** 4..1500
**units**\: octets
.. attribute:: cipslaudpechotmpltos
This object represents the type of service octet in an IP header
**type**\: int
**range:** 0..255
.. attribute:: cipslaudpechotmplvrfname
This field is used to specify the VRF name with which the IP SLA operation will be used. For regular IP SLA operation this field should not be configured. The agent will use this field to identify the VRF routing Table for this operation
**type**\: str
**length:** 0..32
.. attribute:: cipslaudpechotmplthreshold
This object defines an administrative threshold limit. If the IP SLA operation time exceeds this limit and if the condition specified in cipslaUdpEchoTmplHistFilter is satisfied, one threshold crossing occurrence will be counted
**type**\: int
**range:** 0..2147483647
**units**\: milliseconds
.. attribute:: cipslaudpechotmplhistlives
The maximum number of history lives to record. A life is defined by the countdown (or transition) to zero by the cipslaAutoGroupScheduleLife object. A new life is created when the same conceptual control row is restarted via the transition of the cipslaAutoGroupScheduleLife object and its subsequent countdown. The value of zero will shut off all data collection
**type**\: int
**range:** 0..2
.. attribute:: cipslaudpechotmplhistbuckets
The maximum number of history buckets to record. This value should be set to the number of operations to keep per lifetime. After cipslaUdpEchoTmplHistBuckets are filled, the oldest entries are deleted and the most recent cipslaUdpEchoTmplHistBuckets buckets are retained
**type**\: int
**range:** 1..60
.. attribute:: cipslaudpechotmplhistfilter
Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaUdpEchoTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded
**type**\: :py:class:`CipslaUdpEchoTmplHistFilter <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry.CipslaUdpEchoTmplHistFilter>`
.. attribute:: cipslaudpechotmplstatshours
The maximum number of hours for which statistics are maintained. Specifically this is the number of hourly groups to keep before rolling over. The value of one is not advisable because the hourly group will close and immediately be deleted before the network management station will have the opportunity to retrieve the statistics. The value of zero will shut off data collection
**type**\: int
**range:** 0..25
**units**\: hours
.. attribute:: cipslaudpechotmpldistbuckets
The maximum number of statistical distribution buckets to accumulate. Since this index does not rollover, only the first cipslaUdpEchoTmplStatsNumDistBuckets will be kept. The last cipslaUdpEchoTmplStatsNumDistBuckets will contain all entries from its distribution interval start point to infinity
**type**\: int
**range:** 1..20
.. attribute:: cipslaudpechotmpldistinterval
The statistical distribution buckets interval. Distribution Bucket Example\: cipslaUdpEchoTmplDistBuckets = 5 buckets cipslaUdpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| Bucket 2 \| Bucket 3 \| Bucket 4 \| Bucket 5 \| \| 0\-9 ms \| 10\-19 ms \| 20\-29 ms \| 30\-39 ms \| 40\-Inf ms \| Odd Example\: cipslaUdpEchoTmplDistBuckets = 1 buckets cipslaUdpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| \| 0\-Inf ms \| Thus, this odd example shows that the value of cipslaUdpEchoTmplDistInterval does not apply when cipslaUdpEchoTmplDistBuckets is one
**type**\: int
**range:** 1..100
**units**\: milliseconds
.. attribute:: cipslaudpechotmplstoragetype
The storage type of this conceptual row
**type**\: :py:class:`StorageType <ydk.models.cisco_ios_xe.SNMPv2_TC.StorageType>`
.. attribute:: cipslaudpechotmplrowstatus
The status of the conceptual UDP echo template control row. When the status is active, all the read\-create objects in that row can be modified
**type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry, self).__init__()
self.yang_name = "cipslaUdpEchoTmplEntry"
self.yang_parent_name = "cipslaUdpEchoTmplTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['cipslaudpechotmplname']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('cipslaudpechotmplname', (YLeaf(YType.str, 'cipslaUdpEchoTmplName'), ['str'])),
('cipslaudpechotmpldescription', (YLeaf(YType.str, 'cipslaUdpEchoTmplDescription'), ['str'])),
('cipslaudpechotmplcontrolenable', (YLeaf(YType.boolean, 'cipslaUdpEchoTmplControlEnable'), ['bool'])),
('cipslaudpechotmplsrcaddrtype', (YLeaf(YType.enumeration, 'cipslaUdpEchoTmplSrcAddrType'), [('ydk.models.cisco_ios_xe.INET_ADDRESS_MIB', 'InetAddressType', '')])),
('cipslaudpechotmplsrcaddr', (YLeaf(YType.str, 'cipslaUdpEchoTmplSrcAddr'), ['str'])),
('cipslaudpechotmplsrcport', (YLeaf(YType.uint16, 'cipslaUdpEchoTmplSrcPort'), ['int'])),
('cipslaudpechotmpltimeout', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplTimeOut'), ['int'])),
('cipslaudpechotmplverifydata', (YLeaf(YType.boolean, 'cipslaUdpEchoTmplVerifyData'), ['bool'])),
('cipslaudpechotmplreqdatasize', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplReqDataSize'), ['int'])),
('cipslaudpechotmpltos', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplTOS'), ['int'])),
('cipslaudpechotmplvrfname', (YLeaf(YType.str, 'cipslaUdpEchoTmplVrfName'), ['str'])),
('cipslaudpechotmplthreshold', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplThreshold'), ['int'])),
('cipslaudpechotmplhistlives', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplHistLives'), ['int'])),
('cipslaudpechotmplhistbuckets', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplHistBuckets'), ['int'])),
('cipslaudpechotmplhistfilter', (YLeaf(YType.enumeration, 'cipslaUdpEchoTmplHistFilter'), [('ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB', 'CISCOIPSLAECHOMIB', 'CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry.CipslaUdpEchoTmplHistFilter')])),
('cipslaudpechotmplstatshours', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplStatsHours'), ['int'])),
('cipslaudpechotmpldistbuckets', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplDistBuckets'), ['int'])),
('cipslaudpechotmpldistinterval', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplDistInterval'), ['int'])),
('cipslaudpechotmplstoragetype', (YLeaf(YType.enumeration, 'cipslaUdpEchoTmplStorageType'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'StorageType', '')])),
('cipslaudpechotmplrowstatus', (YLeaf(YType.enumeration, 'cipslaUdpEchoTmplRowStatus'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowStatus', '')])),
])
self.cipslaudpechotmplname = None
self.cipslaudpechotmpldescription = None
self.cipslaudpechotmplcontrolenable = None
self.cipslaudpechotmplsrcaddrtype = None
self.cipslaudpechotmplsrcaddr = None
self.cipslaudpechotmplsrcport = None
self.cipslaudpechotmpltimeout = None
self.cipslaudpechotmplverifydata = None
self.cipslaudpechotmplreqdatasize = None
self.cipslaudpechotmpltos = None
self.cipslaudpechotmplvrfname = None
self.cipslaudpechotmplthreshold = None
self.cipslaudpechotmplhistlives = None
self.cipslaudpechotmplhistbuckets = None
self.cipslaudpechotmplhistfilter = None
self.cipslaudpechotmplstatshours = None
self.cipslaudpechotmpldistbuckets = None
self.cipslaudpechotmpldistinterval = None
self.cipslaudpechotmplstoragetype = None
self.cipslaudpechotmplrowstatus = None
self._segment_path = lambda: "cipslaUdpEchoTmplEntry" + "[cipslaUdpEchoTmplName='" + str(self.cipslaudpechotmplname) + "']"
self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/cipslaUdpEchoTmplTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry, ['cipslaudpechotmplname', 'cipslaudpechotmpldescription', 'cipslaudpechotmplcontrolenable', 'cipslaudpechotmplsrcaddrtype', 'cipslaudpechotmplsrcaddr', 'cipslaudpechotmplsrcport', 'cipslaudpechotmpltimeout', 'cipslaudpechotmplverifydata', 'cipslaudpechotmplreqdatasize', 'cipslaudpechotmpltos', 'cipslaudpechotmplvrfname', 'cipslaudpechotmplthreshold', 'cipslaudpechotmplhistlives', 'cipslaudpechotmplhistbuckets', 'cipslaudpechotmplhistfilter', 'cipslaudpechotmplstatshours', 'cipslaudpechotmpldistbuckets', 'cipslaudpechotmpldistinterval', 'cipslaudpechotmplstoragetype', 'cipslaudpechotmplrowstatus'], name, value)
class CipslaUdpEchoTmplHistFilter(Enum):
"""
CipslaUdpEchoTmplHistFilter (Enum Class)
Defines a filter for adding RTT results to the history
buffer\:
none(1) \- no history is recorded
all(2) \- the results of all completion times
and failed completions are recorded
overThreshold(3) \- the results of completion times
over cipslaUdpEchoTmplThreshold are
recorded.
failures(4) \- the results of failed operations (only)
are recorded.
.. data:: none = 1
.. data:: all = 2
.. data:: overThreshold = 3
.. data:: failures = 4
"""
none = Enum.YLeaf(1, "none")
all = Enum.YLeaf(2, "all")
overThreshold = Enum.YLeaf(3, "overThreshold")
failures = Enum.YLeaf(4, "failures")
class CipslaTcpConnTmplTable(Entity):
"""
A table that contains TCP connect template specific definitions.
.. attribute:: cipslatcpconntmplentry
A row entry representing an IPSLA TCP connect template
**type**\: list of :py:class:`CipslaTcpConnTmplEntry <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable, self).__init__()
self.yang_name = "cipslaTcpConnTmplTable"
self.yang_parent_name = "CISCO-IPSLA-ECHO-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("cipslaTcpConnTmplEntry", ("cipslatcpconntmplentry", CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry))])
self._leafs = OrderedDict()
self.cipslatcpconntmplentry = YList(self)
self._segment_path = lambda: "cipslaTcpConnTmplTable"
self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable, [], name, value)
class CipslaTcpConnTmplEntry(Entity):
"""
A row entry representing an IPSLA TCP connect template.
.. attribute:: cipslatcpconntmplname (key)
A string which specifies the TCP connect template name
**type**\: str
**length:** 1..64
.. attribute:: cipslatcpconntmpldescription
A string which provides description for the TCP connect template
**type**\: str
**length:** 0..128
.. attribute:: cipslatcpconntmplcontrolenable
If this object is enabled, then the IP SLA application will send control messages to a responder, residing on the target router to respond to the data request packets being sent by the source router
**type**\: bool
.. attribute:: cipslatcpconntmplsrcaddrtype
An enumerated value which specifies the IP address type of the source. It must be used along with the cipslaTcpConnTmplSrcAddr object
**type**\: :py:class:`InetAddressType <ydk.models.cisco_ios_xe.INET_ADDRESS_MIB.InetAddressType>`
.. attribute:: cipslatcpconntmplsrcaddr
A string which specifies the IP address of the source
**type**\: str
**length:** 0..255
.. attribute:: cipslatcpconntmplsrcport
This object represents the source's port number. If this object is not specified, the application will get a port allocated by the system
**type**\: int
**range:** 0..65535
.. attribute:: cipslatcpconntmpltimeout
Specifies the duration to wait for an IP SLA operation completion. For connection oriented protocols, this may cause the connection to be closed by the operation. Once closed, it will be assumed that the connection reestablishment will be performed. To prevent unwanted closure of connections, be sure to set this value to a realistic connection timeout
**type**\: int
**range:** 0..604800000
**units**\: milliseconds
.. attribute:: cipslatcpconntmplverifydata
When set to true, the resulting data in each IP SLA operation is compared with the expected data. This includes checking header information (if possible) and exact packet size
**type**\: bool
.. attribute:: cipslatcpconntmpltos
This object represents the type of service octet in an IP header
**type**\: int
**range:** 0..255
.. attribute:: cipslatcpconntmplthreshold
This object defines an administrative threshold limit. If the IP SLA operation time exceeds this limit and if the condition specified in cipslaTcpConnTmplHistFilter is satisfied, one threshold crossing occurrence will be counted
**type**\: int
**range:** 0..2147483647
**units**\: milliseconds
.. attribute:: cipslatcpconntmplhistlives
The maximum number of history lives to record. A life is defined by the countdown (or transition) to zero by the cipslaAutoGroupScheduleLife object. A new life is created when the same conceptual control row is restarted via the transition of the cipslaAutoGroupScheduleLife object and its subsequent countdown. The value of zero will shut off all data collection
**type**\: int
**range:** 0..2
.. attribute:: cipslatcpconntmplhistbuckets
The maximum number of history buckets to record. This value should be set to the number of operations to keep per lifetime. After cipslaTcpConnTmplHistBuckets are filled, the oldest entries are deleted and the most recent cipslaTcpConnTmplHistBuckets buckets are retained
**type**\: int
**range:** 1..60
.. attribute:: cipslatcpconntmplhistfilter
Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaTcpConnTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded
**type**\: :py:class:`CipslaTcpConnTmplHistFilter <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry.CipslaTcpConnTmplHistFilter>`
.. attribute:: cipslatcpconntmplstatshours
The maximum number of hours for which statistics are maintained. Specifically this is the number of hourly groups to keep before rolling over. The value of one is not advisable because the hourly group will close and immediately be deleted before the network management station will have the opportunity to retrieve the statistics. The value of zero will shut off data collection
**type**\: int
**range:** 0..25
**units**\: hours
.. attribute:: cipslatcpconntmpldistbuckets
The maximum number of statistical distribution buckets to accumulate. Since this index does not rollover, only the first cipslaTcpConnTmplDistBuckets will be kept. The last cipslaTcpConnTmplDistBuckets will contain all entries from its distribution interval start point to infinity
**type**\: int
**range:** 1..20
.. attribute:: cipslatcpconntmpldistinterval
The statistical distribution buckets interval. Distribution Bucket Example\: cipslaTcpConnTmplDistBuckets = 5 buckets cipslaTcpConnTmplDistInterval = 10 milliseconds \| Bucket 1 \| Bucket 2 \| Bucket 3 \| Bucket 4 \| Bucket 5 \| \| 0\-9 ms \| 10\-19 ms \| 20\-29 ms \| 30\-39 ms \| 40\-Inf ms \| Odd Example\: cipslaTcpConnTmplDistBuckets = 1 buckets cipslaTcpConnTmplDistInterval = 10 milliseconds \| Bucket 1 \| \| 0\-Inf ms \| Thus, this odd example shows that the value of cipslaTcpConnTmplDistInterval does not apply when cipslaTcpConnTmplDistBuckets is one
**type**\: int
**range:** 1..100
**units**\: milliseconds
.. attribute:: cipslatcpconntmplstoragetype
The storage type of this conceptual row
**type**\: :py:class:`StorageType <ydk.models.cisco_ios_xe.SNMPv2_TC.StorageType>`
.. attribute:: cipslatcpconntmplrowstatus
The status of the conceptual tcp connect control row. When the status is active, all the read\-create objects in that row can be modified
**type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry, self).__init__()
self.yang_name = "cipslaTcpConnTmplEntry"
self.yang_parent_name = "cipslaTcpConnTmplTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['cipslatcpconntmplname']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('cipslatcpconntmplname', (YLeaf(YType.str, 'cipslaTcpConnTmplName'), ['str'])),
('cipslatcpconntmpldescription', (YLeaf(YType.str, 'cipslaTcpConnTmplDescription'), ['str'])),
('cipslatcpconntmplcontrolenable', (YLeaf(YType.boolean, 'cipslaTcpConnTmplControlEnable'), ['bool'])),
('cipslatcpconntmplsrcaddrtype', (YLeaf(YType.enumeration, 'cipslaTcpConnTmplSrcAddrType'), [('ydk.models.cisco_ios_xe.INET_ADDRESS_MIB', 'InetAddressType', '')])),
('cipslatcpconntmplsrcaddr', (YLeaf(YType.str, 'cipslaTcpConnTmplSrcAddr'), ['str'])),
('cipslatcpconntmplsrcport', (YLeaf(YType.uint16, 'cipslaTcpConnTmplSrcPort'), ['int'])),
('cipslatcpconntmpltimeout', (YLeaf(YType.uint32, 'cipslaTcpConnTmplTimeOut'), ['int'])),
('cipslatcpconntmplverifydata', (YLeaf(YType.boolean, 'cipslaTcpConnTmplVerifyData'), ['bool'])),
('cipslatcpconntmpltos', (YLeaf(YType.uint32, 'cipslaTcpConnTmplTOS'), ['int'])),
('cipslatcpconntmplthreshold', (YLeaf(YType.uint32, 'cipslaTcpConnTmplThreshold'), ['int'])),
('cipslatcpconntmplhistlives', (YLeaf(YType.uint32, 'cipslaTcpConnTmplHistLives'), ['int'])),
('cipslatcpconntmplhistbuckets', (YLeaf(YType.uint32, 'cipslaTcpConnTmplHistBuckets'), ['int'])),
('cipslatcpconntmplhistfilter', (YLeaf(YType.enumeration, 'cipslaTcpConnTmplHistFilter'), [('ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB', 'CISCOIPSLAECHOMIB', 'CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry.CipslaTcpConnTmplHistFilter')])),
('cipslatcpconntmplstatshours', (YLeaf(YType.uint32, 'cipslaTcpConnTmplStatsHours'), ['int'])),
('cipslatcpconntmpldistbuckets', (YLeaf(YType.uint32, 'cipslaTcpConnTmplDistBuckets'), ['int'])),
('cipslatcpconntmpldistinterval', (YLeaf(YType.uint32, 'cipslaTcpConnTmplDistInterval'), ['int'])),
('cipslatcpconntmplstoragetype', (YLeaf(YType.enumeration, 'cipslaTcpConnTmplStorageType'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'StorageType', '')])),
('cipslatcpconntmplrowstatus', (YLeaf(YType.enumeration, 'cipslaTcpConnTmplRowStatus'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowStatus', '')])),
])
self.cipslatcpconntmplname = None
self.cipslatcpconntmpldescription = None
self.cipslatcpconntmplcontrolenable = None
self.cipslatcpconntmplsrcaddrtype = None
self.cipslatcpconntmplsrcaddr = None
self.cipslatcpconntmplsrcport = None
self.cipslatcpconntmpltimeout = None
self.cipslatcpconntmplverifydata = None
self.cipslatcpconntmpltos = None
self.cipslatcpconntmplthreshold = None
self.cipslatcpconntmplhistlives = None
self.cipslatcpconntmplhistbuckets = None
self.cipslatcpconntmplhistfilter = None
self.cipslatcpconntmplstatshours = None
self.cipslatcpconntmpldistbuckets = None
self.cipslatcpconntmpldistinterval = None
self.cipslatcpconntmplstoragetype = None
self.cipslatcpconntmplrowstatus = None
self._segment_path = lambda: "cipslaTcpConnTmplEntry" + "[cipslaTcpConnTmplName='" + str(self.cipslatcpconntmplname) + "']"
self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/cipslaTcpConnTmplTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry, ['cipslatcpconntmplname', 'cipslatcpconntmpldescription', 'cipslatcpconntmplcontrolenable', 'cipslatcpconntmplsrcaddrtype', 'cipslatcpconntmplsrcaddr', 'cipslatcpconntmplsrcport', 'cipslatcpconntmpltimeout', 'cipslatcpconntmplverifydata', 'cipslatcpconntmpltos', 'cipslatcpconntmplthreshold', 'cipslatcpconntmplhistlives', 'cipslatcpconntmplhistbuckets', 'cipslatcpconntmplhistfilter', 'cipslatcpconntmplstatshours', 'cipslatcpconntmpldistbuckets', 'cipslatcpconntmpldistinterval', 'cipslatcpconntmplstoragetype', 'cipslatcpconntmplrowstatus'], name, value)
class CipslaTcpConnTmplHistFilter(Enum):
"""
CipslaTcpConnTmplHistFilter (Enum Class)
Defines a filter for adding RTT results to the history
buffer\:
none(1) \- no history is recorded
all(2) \- the results of all completion times
and failed completions are recorded
overThreshold(3) \- the results of completion times
over cipslaTcpConnTmplThreshold are
recorded.
failures(4) \- the results of failed operations (only)
are recorded.
.. data:: none = 1
.. data:: all = 2
.. data:: overThreshold = 3
.. data:: failures = 4
"""
none = Enum.YLeaf(1, "none")
all = Enum.YLeaf(2, "all")
overThreshold = Enum.YLeaf(3, "overThreshold")
failures = Enum.YLeaf(4, "failures")
def clone_ptr(self):
self._top_entity = CISCOIPSLAECHOMIB()
return self._top_entity
| [((67, 30, 67, 340), 'collections.OrderedDict', 'OrderedDict', ({(67, 42, 67, 339): "[('cipslaIcmpEchoTmplTable', ('cipslaicmpechotmpltable', CISCOIPSLAECHOMIB.\n CipslaIcmpEchoTmplTable)), ('cipslaUdpEchoTmplTable', (\n 'cipslaudpechotmpltable', CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable)), (\n 'cipslaTcpConnTmplTable', ('cipslatcpconntmpltable', CISCOIPSLAECHOMIB.\n CipslaTcpConnTmplTable))]"}, {}), "([('cipslaIcmpEchoTmplTable', ('cipslaicmpechotmpltable',\n CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable)), ('cipslaUdpEchoTmplTable',\n ('cipslaudpechotmpltable', CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable)),\n ('cipslaTcpConnTmplTable', ('cipslatcpconntmpltable', CISCOIPSLAECHOMIB\n .CipslaTcpConnTmplTable))])", False, 'from collections import OrderedDict\n'), ((68, 22, 68, 35), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((112, 34, 112, 172), 'collections.OrderedDict', 'OrderedDict', ({(112, 46, 112, 171): "[('cipslaIcmpEchoTmplEntry', ('cipslaicmpechotmplentry', CISCOIPSLAECHOMIB.\n CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry))]"}, {}), "([('cipslaIcmpEchoTmplEntry', ('cipslaicmpechotmplentry',\n CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry))])", False, 'from collections import OrderedDict\n'), ((113, 26, 113, 39), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((115, 43, 115, 54), 'ydk.types.YList', 'YList', ({(115, 49, 115, 53): 'self'}, {}), '(self)', False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((383, 34, 383, 168), 'collections.OrderedDict', 'OrderedDict', ({(383, 46, 383, 167): "[('cipslaUdpEchoTmplEntry', ('cipslaudpechotmplentry', CISCOIPSLAECHOMIB.\n CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry))]"}, {}), "([('cipslaUdpEchoTmplEntry', ('cipslaudpechotmplentry',\n CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry))])", False, 'from collections import OrderedDict\n'), ((384, 26, 384, 39), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((386, 42, 386, 53), 'ydk.types.YList', 'YList', ({(386, 48, 386, 52): 'self'}, {}), '(self)', False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((670, 34, 670, 168), 'collections.OrderedDict', 'OrderedDict', ({(670, 46, 670, 167): "[('cipslaTcpConnTmplEntry', ('cipslatcpconntmplentry', CISCOIPSLAECHOMIB.\n CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry))]"}, {}), "([('cipslaTcpConnTmplEntry', ('cipslatcpconntmplentry',\n CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry))])", False, 'from collections import OrderedDict\n'), ((671, 26, 671, 39), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((673, 42, 673, 53), 'ydk.types.YList', 'YList', ({(673, 48, 673, 52): 'self'}, {}), '(self)', False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((269, 38, 269, 53), 'collections.OrderedDict', 'OrderedDict', ({(269, 50, 269, 52): '[]'}, {}), '([])', False, 'from collections import OrderedDict\n'), ((349, 23, 349, 44), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', ({(349, 34, 349, 35): '1', (349, 37, 349, 43): '"""none"""'}, {}), "(1, 'none')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((351, 22, 351, 42), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', ({(351, 33, 351, 34): '2', (351, 36, 351, 41): '"""all"""'}, {}), "(2, 'all')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((353, 32, 353, 62), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', ({(353, 43, 353, 44): '3', (353, 46, 353, 61): '"""overThreshold"""'}, {}), "(3, 'overThreshold')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((355, 27, 355, 52), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', ({(355, 38, 355, 39): '4', (355, 41, 355, 51): '"""failures"""'}, {}), "(4, 'failures')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((552, 38, 552, 53), 'collections.OrderedDict', 'OrderedDict', ({(552, 50, 552, 52): '[]'}, {}), '([])', False, 'from collections import OrderedDict\n'), ((636, 23, 636, 44), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', ({(636, 34, 636, 35): '1', (636, 37, 636, 43): '"""none"""'}, {}), "(1, 'none')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((638, 22, 638, 42), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', ({(638, 33, 638, 34): '2', (638, 36, 638, 41): '"""all"""'}, {}), "(2, 'all')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((640, 32, 640, 62), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', ({(640, 43, 640, 44): '3', (640, 46, 640, 61): '"""overThreshold"""'}, {}), "(3, 'overThreshold')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((642, 27, 642, 52), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', ({(642, 38, 642, 39): '4', (642, 41, 642, 51): '"""failures"""'}, {}), "(4, 'failures')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((823, 38, 823, 53), 'collections.OrderedDict', 'OrderedDict', ({(823, 50, 823, 52): '[]'}, {}), '([])', False, 'from collections import OrderedDict\n'), ((903, 23, 903, 44), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', ({(903, 34, 903, 35): '1', (903, 37, 903, 43): '"""none"""'}, {}), "(1, 'none')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((905, 22, 905, 42), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', ({(905, 33, 905, 34): '2', (905, 36, 905, 41): '"""all"""'}, {}), "(2, 'all')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((907, 32, 907, 62), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', ({(907, 43, 907, 44): '3', (907, 46, 907, 61): '"""overThreshold"""'}, {}), "(3, 'overThreshold')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((909, 27, 909, 52), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', ({(909, 38, 909, 39): '4', (909, 41, 909, 51): '"""failures"""'}, {}), "(4, 'failures')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((271, 48, 271, 90), 'ydk.types.YLeaf', 'YLeaf', ({(271, 54, 271, 63): 'YType.str', (271, 65, 271, 89): '"""cipslaIcmpEchoTmplName"""'}, {}), "(YType.str, 'cipslaIcmpEchoTmplName')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((272, 55, 272, 104), 'ydk.types.YLeaf', 'YLeaf', ({(272, 61, 272, 70): 'YType.str', (272, 72, 272, 103): '"""cipslaIcmpEchoTmplDescription"""'}, {}), "(YType.str, 'cipslaIcmpEchoTmplDescription')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((273, 55, 273, 112), 'ydk.types.YLeaf', 'YLeaf', ({(273, 61, 273, 78): 'YType.enumeration', (273, 80, 273, 111): '"""cipslaIcmpEchoTmplSrcAddrType"""'}, {}), "(YType.enumeration, 'cipslaIcmpEchoTmplSrcAddrType')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((274, 51, 274, 96), 'ydk.types.YLeaf', 'YLeaf', ({(274, 57, 274, 66): 'YType.str', (274, 68, 274, 95): '"""cipslaIcmpEchoTmplSrcAddr"""'}, {}), "(YType.str, 'cipslaIcmpEchoTmplSrcAddr')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((275, 51, 275, 99), 'ydk.types.YLeaf', 'YLeaf', ({(275, 57, 275, 69): 'YType.uint32', (275, 71, 275, 98): '"""cipslaIcmpEchoTmplTimeOut"""'}, {}), "(YType.uint32, 'cipslaIcmpEchoTmplTimeOut')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((276, 54, 276, 106), 'ydk.types.YLeaf', 'YLeaf', ({(276, 60, 276, 73): 'YType.boolean', (276, 75, 276, 105): '"""cipslaIcmpEchoTmplVerifyData"""'}, {}), "(YType.boolean, 'cipslaIcmpEchoTmplVerifyData')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((277, 55, 277, 107), 'ydk.types.YLeaf', 'YLeaf', ({(277, 61, 277, 73): 'YType.uint32', (277, 75, 277, 106): '"""cipslaIcmpEchoTmplReqDataSize"""'}, {}), "(YType.uint32, 'cipslaIcmpEchoTmplReqDataSize')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((278, 47, 278, 91), 'ydk.types.YLeaf', 'YLeaf', ({(278, 53, 278, 65): 'YType.uint32', (278, 67, 278, 90): '"""cipslaIcmpEchoTmplTOS"""'}, {}), "(YType.uint32, 'cipslaIcmpEchoTmplTOS')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((279, 51, 279, 96), 'ydk.types.YLeaf', 'YLeaf', ({(279, 57, 279, 66): 'YType.str', (279, 68, 279, 95): '"""cipslaIcmpEchoTmplVrfName"""'}, {}), "(YType.str, 'cipslaIcmpEchoTmplVrfName')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((280, 53, 280, 103), 'ydk.types.YLeaf', 'YLeaf', ({(280, 59, 280, 71): 'YType.uint32', (280, 73, 280, 102): '"""cipslaIcmpEchoTmplThreshold"""'}, {}), "(YType.uint32, 'cipslaIcmpEchoTmplThreshold')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((281, 53, 281, 103), 'ydk.types.YLeaf', 'YLeaf', ({(281, 59, 281, 71): 'YType.uint32', (281, 73, 281, 102): '"""cipslaIcmpEchoTmplHistLives"""'}, {}), "(YType.uint32, 'cipslaIcmpEchoTmplHistLives')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((282, 55, 282, 107), 'ydk.types.YLeaf', 'YLeaf', ({(282, 61, 282, 73): 'YType.uint32', (282, 75, 282, 106): '"""cipslaIcmpEchoTmplHistBuckets"""'}, {}), "(YType.uint32, 'cipslaIcmpEchoTmplHistBuckets')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((283, 54, 283, 110), 'ydk.types.YLeaf', 'YLeaf', ({(283, 60, 283, 77): 'YType.enumeration', (283, 79, 283, 109): '"""cipslaIcmpEchoTmplHistFilter"""'}, {}), "(YType.enumeration, 'cipslaIcmpEchoTmplHistFilter')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((284, 54, 284, 105), 'ydk.types.YLeaf', 'YLeaf', ({(284, 60, 284, 72): 'YType.uint32', (284, 74, 284, 104): '"""cipslaIcmpEchoTmplStatsHours"""'}, {}), "(YType.uint32, 'cipslaIcmpEchoTmplStatsHours')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((285, 55, 285, 107), 'ydk.types.YLeaf', 'YLeaf', ({(285, 61, 285, 73): 'YType.uint32', (285, 75, 285, 106): '"""cipslaIcmpEchoTmplDistBuckets"""'}, {}), "(YType.uint32, 'cipslaIcmpEchoTmplDistBuckets')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((286, 56, 286, 109), 'ydk.types.YLeaf', 'YLeaf', ({(286, 62, 286, 74): 'YType.uint32', (286, 76, 286, 108): '"""cipslaIcmpEchoTmplDistInterval"""'}, {}), "(YType.uint32, 'cipslaIcmpEchoTmplDistInterval')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((287, 55, 287, 112), 'ydk.types.YLeaf', 'YLeaf', ({(287, 61, 287, 78): 'YType.enumeration', (287, 80, 287, 111): '"""cipslaIcmpEchoTmplStorageType"""'}, {}), "(YType.enumeration, 'cipslaIcmpEchoTmplStorageType')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((288, 53, 288, 108), 'ydk.types.YLeaf', 'YLeaf', ({(288, 59, 288, 76): 'YType.enumeration', (288, 78, 288, 107): '"""cipslaIcmpEchoTmplRowStatus"""'}, {}), "(YType.enumeration, 'cipslaIcmpEchoTmplRowStatus')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((554, 47, 554, 88), 'ydk.types.YLeaf', 'YLeaf', ({(554, 53, 554, 62): 'YType.str', (554, 64, 554, 87): '"""cipslaUdpEchoTmplName"""'}, {}), "(YType.str, 'cipslaUdpEchoTmplName')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((555, 54, 555, 102), 'ydk.types.YLeaf', 'YLeaf', ({(555, 60, 555, 69): 'YType.str', (555, 71, 555, 101): '"""cipslaUdpEchoTmplDescription"""'}, {}), "(YType.str, 'cipslaUdpEchoTmplDescription')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((556, 56, 556, 110), 'ydk.types.YLeaf', 'YLeaf', ({(556, 62, 556, 75): 'YType.boolean', (556, 77, 556, 109): '"""cipslaUdpEchoTmplControlEnable"""'}, {}), "(YType.boolean, 'cipslaUdpEchoTmplControlEnable')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((557, 54, 557, 110), 'ydk.types.YLeaf', 'YLeaf', ({(557, 60, 557, 77): 'YType.enumeration', (557, 79, 557, 109): '"""cipslaUdpEchoTmplSrcAddrType"""'}, {}), "(YType.enumeration, 'cipslaUdpEchoTmplSrcAddrType')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((558, 50, 558, 94), 'ydk.types.YLeaf', 'YLeaf', ({(558, 56, 558, 65): 'YType.str', (558, 67, 558, 93): '"""cipslaUdpEchoTmplSrcAddr"""'}, {}), "(YType.str, 'cipslaUdpEchoTmplSrcAddr')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((559, 50, 559, 97), 'ydk.types.YLeaf', 'YLeaf', ({(559, 56, 559, 68): 'YType.uint16', (559, 70, 559, 96): '"""cipslaUdpEchoTmplSrcPort"""'}, {}), "(YType.uint16, 'cipslaUdpEchoTmplSrcPort')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((560, 50, 560, 97), 'ydk.types.YLeaf', 'YLeaf', ({(560, 56, 560, 68): 'YType.uint32', (560, 70, 560, 96): '"""cipslaUdpEchoTmplTimeOut"""'}, {}), "(YType.uint32, 'cipslaUdpEchoTmplTimeOut')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((561, 53, 561, 104), 'ydk.types.YLeaf', 'YLeaf', ({(561, 59, 561, 72): 'YType.boolean', (561, 74, 561, 103): '"""cipslaUdpEchoTmplVerifyData"""'}, {}), "(YType.boolean, 'cipslaUdpEchoTmplVerifyData')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((562, 54, 562, 105), 'ydk.types.YLeaf', 'YLeaf', ({(562, 60, 562, 72): 'YType.uint32', (562, 74, 562, 104): '"""cipslaUdpEchoTmplReqDataSize"""'}, {}), "(YType.uint32, 'cipslaUdpEchoTmplReqDataSize')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((563, 46, 563, 89), 'ydk.types.YLeaf', 'YLeaf', ({(563, 52, 563, 64): 'YType.uint32', (563, 66, 563, 88): '"""cipslaUdpEchoTmplTOS"""'}, {}), "(YType.uint32, 'cipslaUdpEchoTmplTOS')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((564, 50, 564, 94), 'ydk.types.YLeaf', 'YLeaf', ({(564, 56, 564, 65): 'YType.str', (564, 67, 564, 93): '"""cipslaUdpEchoTmplVrfName"""'}, {}), "(YType.str, 'cipslaUdpEchoTmplVrfName')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((565, 52, 565, 101), 'ydk.types.YLeaf', 'YLeaf', ({(565, 58, 565, 70): 'YType.uint32', (565, 72, 565, 100): '"""cipslaUdpEchoTmplThreshold"""'}, {}), "(YType.uint32, 'cipslaUdpEchoTmplThreshold')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((566, 52, 566, 101), 'ydk.types.YLeaf', 'YLeaf', ({(566, 58, 566, 70): 'YType.uint32', (566, 72, 566, 100): '"""cipslaUdpEchoTmplHistLives"""'}, {}), "(YType.uint32, 'cipslaUdpEchoTmplHistLives')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((567, 54, 567, 105), 'ydk.types.YLeaf', 'YLeaf', ({(567, 60, 567, 72): 'YType.uint32', (567, 74, 567, 104): '"""cipslaUdpEchoTmplHistBuckets"""'}, {}), "(YType.uint32, 'cipslaUdpEchoTmplHistBuckets')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((568, 53, 568, 108), 'ydk.types.YLeaf', 'YLeaf', ({(568, 59, 568, 76): 'YType.enumeration', (568, 78, 568, 107): '"""cipslaUdpEchoTmplHistFilter"""'}, {}), "(YType.enumeration, 'cipslaUdpEchoTmplHistFilter')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((569, 53, 569, 103), 'ydk.types.YLeaf', 'YLeaf', ({(569, 59, 569, 71): 'YType.uint32', (569, 73, 569, 102): '"""cipslaUdpEchoTmplStatsHours"""'}, {}), "(YType.uint32, 'cipslaUdpEchoTmplStatsHours')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((570, 54, 570, 105), 'ydk.types.YLeaf', 'YLeaf', ({(570, 60, 570, 72): 'YType.uint32', (570, 74, 570, 104): '"""cipslaUdpEchoTmplDistBuckets"""'}, {}), "(YType.uint32, 'cipslaUdpEchoTmplDistBuckets')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((571, 55, 571, 107), 'ydk.types.YLeaf', 'YLeaf', ({(571, 61, 571, 73): 'YType.uint32', (571, 75, 571, 106): '"""cipslaUdpEchoTmplDistInterval"""'}, {}), "(YType.uint32, 'cipslaUdpEchoTmplDistInterval')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((572, 54, 572, 110), 'ydk.types.YLeaf', 'YLeaf', ({(572, 60, 572, 77): 'YType.enumeration', (572, 79, 572, 109): '"""cipslaUdpEchoTmplStorageType"""'}, {}), "(YType.enumeration, 'cipslaUdpEchoTmplStorageType')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((573, 52, 573, 106), 'ydk.types.YLeaf', 'YLeaf', ({(573, 58, 573, 75): 'YType.enumeration', (573, 77, 573, 105): '"""cipslaUdpEchoTmplRowStatus"""'}, {}), "(YType.enumeration, 'cipslaUdpEchoTmplRowStatus')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((825, 47, 825, 88), 'ydk.types.YLeaf', 'YLeaf', ({(825, 53, 825, 62): 'YType.str', (825, 64, 825, 87): '"""cipslaTcpConnTmplName"""'}, {}), "(YType.str, 'cipslaTcpConnTmplName')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((826, 54, 826, 102), 'ydk.types.YLeaf', 'YLeaf', ({(826, 60, 826, 69): 'YType.str', (826, 71, 826, 101): '"""cipslaTcpConnTmplDescription"""'}, {}), "(YType.str, 'cipslaTcpConnTmplDescription')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((827, 56, 827, 110), 'ydk.types.YLeaf', 'YLeaf', ({(827, 62, 827, 75): 'YType.boolean', (827, 77, 827, 109): '"""cipslaTcpConnTmplControlEnable"""'}, {}), "(YType.boolean, 'cipslaTcpConnTmplControlEnable')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((828, 54, 828, 110), 'ydk.types.YLeaf', 'YLeaf', ({(828, 60, 828, 77): 'YType.enumeration', (828, 79, 828, 109): '"""cipslaTcpConnTmplSrcAddrType"""'}, {}), "(YType.enumeration, 'cipslaTcpConnTmplSrcAddrType')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((829, 50, 829, 94), 'ydk.types.YLeaf', 'YLeaf', ({(829, 56, 829, 65): 'YType.str', (829, 67, 829, 93): '"""cipslaTcpConnTmplSrcAddr"""'}, {}), "(YType.str, 'cipslaTcpConnTmplSrcAddr')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((830, 50, 830, 97), 'ydk.types.YLeaf', 'YLeaf', ({(830, 56, 830, 68): 'YType.uint16', (830, 70, 830, 96): '"""cipslaTcpConnTmplSrcPort"""'}, {}), "(YType.uint16, 'cipslaTcpConnTmplSrcPort')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((831, 50, 831, 97), 'ydk.types.YLeaf', 'YLeaf', ({(831, 56, 831, 68): 'YType.uint32', (831, 70, 831, 96): '"""cipslaTcpConnTmplTimeOut"""'}, {}), "(YType.uint32, 'cipslaTcpConnTmplTimeOut')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((832, 53, 832, 104), 'ydk.types.YLeaf', 'YLeaf', ({(832, 59, 832, 72): 'YType.boolean', (832, 74, 832, 103): '"""cipslaTcpConnTmplVerifyData"""'}, {}), "(YType.boolean, 'cipslaTcpConnTmplVerifyData')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((833, 46, 833, 89), 'ydk.types.YLeaf', 'YLeaf', ({(833, 52, 833, 64): 'YType.uint32', (833, 66, 833, 88): '"""cipslaTcpConnTmplTOS"""'}, {}), "(YType.uint32, 'cipslaTcpConnTmplTOS')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((834, 52, 834, 101), 'ydk.types.YLeaf', 'YLeaf', ({(834, 58, 834, 70): 'YType.uint32', (834, 72, 834, 100): '"""cipslaTcpConnTmplThreshold"""'}, {}), "(YType.uint32, 'cipslaTcpConnTmplThreshold')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((835, 52, 835, 101), 'ydk.types.YLeaf', 'YLeaf', ({(835, 58, 835, 70): 'YType.uint32', (835, 72, 835, 100): '"""cipslaTcpConnTmplHistLives"""'}, {}), "(YType.uint32, 'cipslaTcpConnTmplHistLives')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((836, 54, 836, 105), 'ydk.types.YLeaf', 'YLeaf', ({(836, 60, 836, 72): 'YType.uint32', (836, 74, 836, 104): '"""cipslaTcpConnTmplHistBuckets"""'}, {}), "(YType.uint32, 'cipslaTcpConnTmplHistBuckets')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((837, 53, 837, 108), 'ydk.types.YLeaf', 'YLeaf', ({(837, 59, 837, 76): 'YType.enumeration', (837, 78, 837, 107): '"""cipslaTcpConnTmplHistFilter"""'}, {}), "(YType.enumeration, 'cipslaTcpConnTmplHistFilter')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((838, 53, 838, 103), 'ydk.types.YLeaf', 'YLeaf', ({(838, 59, 838, 71): 'YType.uint32', (838, 73, 838, 102): '"""cipslaTcpConnTmplStatsHours"""'}, {}), "(YType.uint32, 'cipslaTcpConnTmplStatsHours')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((839, 54, 839, 105), 'ydk.types.YLeaf', 'YLeaf', ({(839, 60, 839, 72): 'YType.uint32', (839, 74, 839, 104): '"""cipslaTcpConnTmplDistBuckets"""'}, {}), "(YType.uint32, 'cipslaTcpConnTmplDistBuckets')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((840, 55, 840, 107), 'ydk.types.YLeaf', 'YLeaf', ({(840, 61, 840, 73): 'YType.uint32', (840, 75, 840, 106): '"""cipslaTcpConnTmplDistInterval"""'}, {}), "(YType.uint32, 'cipslaTcpConnTmplDistInterval')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((841, 54, 841, 110), 'ydk.types.YLeaf', 'YLeaf', ({(841, 60, 841, 77): 'YType.enumeration', (841, 79, 841, 109): '"""cipslaTcpConnTmplStorageType"""'}, {}), "(YType.enumeration, 'cipslaTcpConnTmplStorageType')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((842, 52, 842, 106), 'ydk.types.YLeaf', 'YLeaf', ({(842, 58, 842, 75): 'YType.enumeration', (842, 77, 842, 105): '"""cipslaTcpConnTmplRowStatus"""'}, {}), "(YType.enumeration, 'cipslaTcpConnTmplRowStatus')", False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n')] |
tkameyama/incubator-mxnet | example/model-parallel/matrix_factorization/train.py | 47b0bdd00e7c5e1c9a448809b02e68c0e4b72e96 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import logging
import time
import mxnet as mx
import numpy as np
from get_data import get_movielens_iter, get_movielens_data
from model import matrix_fact_model_parallel_net
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description="Run model parallel version of matrix factorization",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--num-epoch', type=int, default=3,
help='number of epochs to train')
parser.add_argument('--batch-size', type=int, default=256,
help='number of examples per batch')
parser.add_argument('--print-every', type=int, default=100,
help='logging interval')
parser.add_argument('--factor-size', type=int, default=128,
help="the factor size of the embedding operation")
parser.add_argument('--num-gpus', type=int, default=2,
help="number of gpus to use")
MOVIELENS = {
'dataset': 'ml-10m',
'train': './ml-10M100K/r1.train',
'val': './ml-10M100K/r1.test',
'max_user': 71569,
'max_movie': 65135,
}
if __name__ == '__main__':
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.INFO, format=head)
# arg parser
args = parser.parse_args()
logging.info(args)
num_epoch = args.num_epoch
batch_size = args.batch_size
optimizer = 'sgd'
factor_size = args.factor_size
print_every = args.print_every
num_gpus = args.num_gpus
momentum = 0.9
learning_rate = 0.1
# prepare dataset and iterators
max_user = MOVIELENS['max_user']
max_movies = MOVIELENS['max_movie']
get_movielens_data(MOVIELENS['dataset'])
train_iter = get_movielens_iter(MOVIELENS['train'], batch_size)
val_iter = get_movielens_iter(MOVIELENS['val'], batch_size)
# construct the model
net = matrix_fact_model_parallel_net(factor_size, factor_size, max_user, max_movies)
# construct the module
# map the ctx_group attribute to the context assignment
group2ctxs={'dev1':[mx.cpu()]*num_gpus, 'dev2':[mx.gpu(i) for i in range(num_gpus)]}
# Creating a module by passing group2ctxs attribute which maps
# the ctx_group attribute to the context assignment
mod = mx.module.Module(symbol=net, context=[mx.cpu()]*num_gpus, data_names=['user', 'item'],
label_names=['score'], group2ctxs=group2ctxs)
# the initializer used to initialize the parameters
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
# the parameters for the optimizer constructor
optimizer_params = {
'learning_rate': learning_rate,
'wd': 1e-4,
'momentum': momentum,
'rescale_grad': 1.0/batch_size}
# use MSE as the metric
metric = mx.gluon.metric.create(['MSE'])
speedometer = mx.callback.Speedometer(batch_size, print_every)
# start training
mod.fit(train_iter,
val_iter,
eval_metric = metric,
num_epoch = num_epoch,
optimizer = optimizer,
optimizer_params = optimizer_params,
initializer = initializer,
batch_end_callback = speedometer)
| [((27, 0, 27, 40), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((29, 9, 30, 88), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((52, 4, 52, 56), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((56, 4, 56, 22), 'logging.info', 'logging.info', ({(56, 17, 56, 21): 'args'}, {}), '(args)', False, 'import logging\n'), ((70, 4, 70, 44), 'get_data.get_movielens_data', 'get_movielens_data', ({(70, 23, 70, 43): "MOVIELENS['dataset']"}, {}), "(MOVIELENS['dataset'])", False, 'from get_data import get_movielens_iter, get_movielens_data\n'), ((71, 17, 71, 67), 'get_data.get_movielens_iter', 'get_movielens_iter', ({(71, 36, 71, 54): "MOVIELENS['train']", (71, 56, 71, 66): 'batch_size'}, {}), "(MOVIELENS['train'], batch_size)", False, 'from get_data import get_movielens_iter, get_movielens_data\n'), ((72, 15, 72, 63), 'get_data.get_movielens_iter', 'get_movielens_iter', ({(72, 34, 72, 50): "MOVIELENS['val']", (72, 52, 72, 62): 'batch_size'}, {}), "(MOVIELENS['val'], batch_size)", False, 'from get_data import get_movielens_iter, get_movielens_data\n'), ((75, 10, 75, 88), 'model.matrix_fact_model_parallel_net', 'matrix_fact_model_parallel_net', ({(75, 41, 75, 52): 'factor_size', (75, 54, 75, 65): 'factor_size', (75, 67, 75, 75): 'max_user', (75, 77, 75, 87): 'max_movies'}, {}), '(factor_size, factor_size, max_user, max_movies)', False, 'from model import matrix_fact_model_parallel_net\n'), ((87, 18, 87, 66), 'mxnet.init.Xavier', 'mx.init.Xavier', (), '', True, 'import mxnet as mx\n'), ((97, 13, 97, 44), 'mxnet.gluon.metric.create', 'mx.gluon.metric.create', ({(97, 36, 97, 43): "['MSE']"}, {}), "(['MSE'])", True, 'import mxnet as mx\n'), ((99, 18, 99, 66), 'mxnet.callback.Speedometer', 'mx.callback.Speedometer', ({(99, 42, 99, 52): 'batch_size', (99, 54, 99, 65): 'print_every'}, {}), '(batch_size, print_every)', True, 'import mxnet as mx\n'), ((79, 52, 79, 61), 'mxnet.gpu', 'mx.gpu', ({(79, 59, 79, 60): 'i'}, {}), '(i)', True, 'import mxnet as mx\n'), ((79, 24, 79, 32), 'mxnet.cpu', 'mx.cpu', ({}, {}), '()', True, 'import mxnet as mx\n'), ((83, 48, 83, 56), 'mxnet.cpu', 'mx.cpu', ({}, {}), '()', True, 'import mxnet as mx\n')] |
nbfigueroa/franka_interactive_controllers | scripts/libfranka_gui_gripper_run.py | 7befdd5fbaa3c7a83b931292fab39ab98754a60c | #!/usr/bin/env python3
import shlex
from tkinter import *
from tkinter import messagebox
from psutil import Popen
top = Tk()
top.title("Franka Gripper Control")
top.geometry("300x75")
def open():
node_process = Popen(shlex.split('rosrun franka_interactive_controllers libfranka_gripper_run 1'))
messagebox.showinfo("Open Gripper", "Gripper Opened")
node_process.terminate()
def close():
node_process = Popen(shlex.split('rosrun franka_interactive_controllers libfranka_gripper_run 0'))
messagebox.showinfo("Close Gripper", "Gripper Closed")
node_process.terminate()
B1 = Button(top, text = "Open Gripper", command = open)
B1.place(x = 30,y = 20)
B2 = Button(top, text = "Close Gripper", command = close)
B2.place(x = 160,y = 20)
top.mainloop()
| [((14, 1, 14, 54), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', ({(14, 21, 14, 35): '"""Open Gripper"""', (14, 37, 14, 53): '"""Gripper Opened"""'}, {}), "('Open Gripper', 'Gripper Opened')", False, 'from tkinter import messagebox\n'), ((19, 1, 19, 55), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', ({(19, 21, 19, 36): '"""Close Gripper"""', (19, 38, 19, 54): '"""Gripper Closed"""'}, {}), "('Close Gripper', 'Gripper Closed')", False, 'from tkinter import messagebox\n'), ((13, 22, 13, 98), 'shlex.split', 'shlex.split', ({(13, 34, 13, 97): '"""rosrun franka_interactive_controllers libfranka_gripper_run 1"""'}, {}), "('rosrun franka_interactive_controllers libfranka_gripper_run 1')", False, 'import shlex\n'), ((18, 22, 18, 98), 'shlex.split', 'shlex.split', ({(18, 34, 18, 97): '"""rosrun franka_interactive_controllers libfranka_gripper_run 0"""'}, {}), "('rosrun franka_interactive_controllers libfranka_gripper_run 0')", False, 'import shlex\n')] |
zubtsov/competitive-programming | codeforces.com/1669F/solution.py | 919d63130144347d7f6eddcf8f5bc2afb85fddf3 | for i in range(int(input())):
number_of_candies = int(input())
candies_weights = list(map(int, input().split()))
bob_pos = number_of_candies - 1
alice_pos = 0
bob_current_weight = 0
alice_current_weight = 0
last_equal_candies_total_number = 0
while alice_pos <= bob_pos:
if alice_current_weight <= bob_current_weight:
alice_current_weight += candies_weights[alice_pos]
alice_pos += 1
else:
bob_current_weight += candies_weights[bob_pos]
bob_pos -= 1
if alice_current_weight == bob_current_weight:
last_equal_candies_total_number = alice_pos + (number_of_candies - bob_pos - 1)
print(last_equal_candies_total_number)
| [] |
patriotemeritus/grr | client/client_build.py | bf2b9268c8b9033ab091e27584986690438bd7c3 | #!/usr/bin/env python
"""This tool builds or repacks the client binaries.
This handles invocations for the build across the supported platforms including
handling Visual Studio, pyinstaller and other packaging mechanisms.
"""
import logging
import os
import platform
import time
# pylint: disable=unused-import
from grr.client import client_plugins
# pylint: enable=unused-import
from grr.lib import build
from grr.lib import builders
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import startup
parser = flags.PARSER
# Guess which arch we should be building based on where we are running.
if platform.architecture()[0] == "32bit":
default_arch = "i386"
else:
default_arch = "amd64"
default_platform = platform.system().lower()
parser.add_argument(
"--platform", choices=["darwin", "linux", "windows"],
default=default_platform,
help="The platform to build or repack for. This will default to "
"the current platform: %s." % platform.system())
parser.add_argument(
"--arch", choices=["amd64", "i386"],
default=default_arch,
help="The architecture to build or repack for.")
# Guess which package format we should be building based on where we are
# running.
if default_platform == "linux":
distro = platform.linux_distribution()[0]
if distro in ["Ubuntu", "debian"]:
default_package = "deb"
elif distro in ["CentOS Linux", "CentOS", "centos", "redhat", "fedora"]:
default_package = "rpm"
else:
default_package = None
elif default_platform == "darwin":
default_package = "dmg"
elif default_platform == "windows":
default_package = "exe"
parser.add_argument(
"--package_format", choices=["deb", "rpm"],
default=default_package,
help="The packaging format to use when building a Linux client.")
# Initialize sub parsers and their arguments.
subparsers = parser.add_subparsers(
title="subcommands", dest="subparser_name", description="valid subcommands")
# Build arguments.
parser_build = subparsers.add_parser(
"build", help="Build a client from source.")
parser_repack = subparsers.add_parser(
"repack", help="Repack a zip file into an installer (Only useful when "
"signing).")
parser_repack.add_argument("--template", default=None,
help="The template zip file to repack.")
parser_repack.add_argument("--output", default=None,
help="The path to write the output installer.")
parser_repack.add_argument("--outputdir", default="",
help="The directory to which we should write the "
"output installer. Installers will be named "
"automatically from config options. Incompatible"
" with --output")
parser_repack.add_argument("--debug_build", action="store_true", default=False,
help="Create a debug client.")
parser_repack.add_argument("-p", "--plugins", default=[], nargs="+",
help="Additional python files that will be loaded "
"as custom plugins.")
parser_deploy = subparsers.add_parser(
"deploy", help="Build a deployable self installer from a package.")
parser_deploy.add_argument("--template", default=None,
help="The template zip file to deploy.")
parser_deploy.add_argument("--templatedir", default="",
help="Directory containing template zip files to "
"repack. Incompatible with --template")
parser_deploy.add_argument("--output", default=None,
help="The path to write the output installer.")
parser_deploy.add_argument("--outputdir", default="",
help="The directory to which we should write the "
"output installer. Installers will be named "
"automatically from config options. Incompatible"
" with --output")
parser_deploy.add_argument("-p", "--plugins", default=[], nargs="+",
help="Additional python files that will be loaded "
"as custom plugins.")
parser_deploy.add_argument("--debug_build", action="store_true", default=False,
help="Create a debug client.")
parser_buildanddeploy = subparsers.add_parser(
"buildanddeploy",
help="Build and deploy clients for multiple labels and architectures.")
parser_buildanddeploy.add_argument("--template", default=None,
help="The template zip file to repack, if "
"none is specified we will build it.")
args = parser.parse_args()
def GetBuilder(context):
"""Get the appropriate builder based on the selected flags."""
try:
if args.platform == "darwin":
context = ["Platform:Darwin"] + context
builder_obj = builders.DarwinClientBuilder
elif args.platform == "windows":
context = ["Platform:Windows"] + context
builder_obj = builders.WindowsClientBuilder
elif args.platform == "linux":
if args.package_format == "deb":
context = ["Platform:Linux"] + context
builder_obj = builders.LinuxClientBuilder
elif args.package_format == "rpm":
context = ["Platform:Linux", "Target:LinuxRpm"] + context
builder_obj = builders.CentosClientBuilder
else:
parser.error("Couldn't guess packaging format for: %s" %
platform.linux_distribution()[0])
else:
parser.error("Unsupported build platform: %s" % args.platform)
except AttributeError:
raise RuntimeError("Unable to build for platform %s when running "
"on current platform." % args.platform)
return builder_obj(context=context)
def GetDeployer(context):
"""Get the appropriate client deployer based on the selected flags."""
if args.platform == "darwin":
context = ["Platform:Darwin"] + context
deployer_obj = build.DarwinClientDeployer
elif args.platform == "windows":
context = ["Platform:Windows"] + context
deployer_obj = build.WindowsClientDeployer
elif args.platform == "linux":
if args.package_format == "deb":
context = ["Platform:Linux"] + context
deployer_obj = build.LinuxClientDeployer
else:
context = ["Platform:Linux", "Target:LinuxRpm"] + context
deployer_obj = build.CentosClientDeployer
else:
parser.error("Unsupported build platform: %s" % args.platform)
return deployer_obj(context=context)
def TemplateInputFilename(context):
"""Build template file name from config."""
if args.templatedir:
filename = config_lib.CONFIG.Get("PyInstaller.template_filename",
context=context)
return os.path.join(args.templatedir, filename)
return None
def BuildAndDeploy(context):
"""Run build and deploy to create installers."""
# ISO 8601 date
timestamp = time.strftime("%Y-%m-%dT%H:%M:%S%z")
if args.plugins:
config_lib.CONFIG.Set("Client.plugins", args.plugins)
# Output directory like: 2015-02-13T21:48:47-0800/linux_amd64_deb/
spec = "_".join((args.platform, args.arch, args.package_format))
output_dir = os.path.join(config_lib.CONFIG.Get(
"ClientBuilder.executables_path", context=context), timestamp, spec)
# If we weren't passed a template, build one
if args.template:
template_path = args.template
else:
template_path = os.path.join(output_dir, config_lib.CONFIG.Get(
"PyInstaller.template_filename", context=context))
builder_obj = GetBuilder(context)
builder_obj.MakeExecutableTemplate(output_file=template_path)
# Get the list of contexts which we should be building.
context_list = config_lib.CONFIG.Get("ClientBuilder.BuildTargets")
logging.info("Building installers for: %s", context_list)
config_orig = config_lib.CONFIG.ExportState()
deployed_list = []
for deploycontext in context_list:
# Add the settings for this context
for newcontext in deploycontext.split(","):
config_lib.CONFIG.AddContext(newcontext)
context.append(newcontext)
try:
# If the ClientBuilder.target_platforms doesn't match our environment,
# skip.
if not config_lib.CONFIG.MatchBuildContext(args.platform, args.arch,
args.package_format):
continue
deployer = GetDeployer(context)
# Make a nicer filename out of the context string.
context_filename = deploycontext.replace(
"AllPlatforms Context,", "").replace(",", "_").replace(" ", "_")
deployed_list.append(context_filename)
output_filename = os.path.join(
output_dir, context_filename,
config_lib.CONFIG.Get("ClientBuilder.output_filename",
context=deployer.context))
logging.info("Deploying %s as %s with labels: %s", deploycontext,
config_lib.CONFIG.Get(
"Client.name", context=deployer.context),
config_lib.CONFIG.Get(
"Client.labels", context=deployer.context))
deployer.MakeDeployableBinary(template_path, output_filename)
finally:
# Remove the custom settings for the next deploy
for newcontext in deploycontext.split(","):
context.remove(newcontext)
config_lib.ImportConfigManger(config_orig)
logging.info("Complete, installers for %s are in %s", deployed_list,
output_dir)
def main(_):
"""Launch the appropriate builder."""
config_lib.CONFIG.AddContext(
"ClientBuilder Context",
"Context applied when we run the client builder script.")
startup.ClientInit()
# Make sure we have all the secondary configs since they may be set under the
# ClientBuilder Context
for secondconfig in config_lib.CONFIG["ConfigIncludes"]:
config_lib.CONFIG.LoadSecondaryConfig(secondconfig)
# Use basic console output logging so we can see what is happening.
logger = logging.getLogger()
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
logger.handlers = [handler]
# The following is used to change the identity of the builder based on the
# target platform.
context = flags.FLAGS.context
if args.arch == "amd64":
context.append("Arch:amd64")
else:
context.append("Arch:i386")
if args.subparser_name == "build":
builder_obj = GetBuilder(context)
builder_obj.MakeExecutableTemplate()
elif args.subparser_name == "repack":
if args.plugins:
config_lib.CONFIG.Set("Client.plugins", args.plugins)
if args.debug_build:
context += ["DebugClientBuild Context"]
deployer = GetDeployer(context)
output_filename = os.path.join(
args.outputdir, config_lib.CONFIG.Get(
"ClientBuilder.output_filename", context=deployer.context))
deployer.RepackInstaller(open(args.template, "rb").read(), args.output or
output_filename)
elif args.subparser_name == "deploy":
if args.plugins:
config_lib.CONFIG.Set("Client.plugins", args.plugins)
if args.debug_build:
context += ["DebugClientBuild Context"]
deployer = GetDeployer(context)
template_path = (args.template or TemplateInputFilename(deployer.context) or
config_lib.CONFIG.Get("ClientBuilder.template_path",
context=deployer.context))
# If neither output filename or output directory is specified,
# use the default location from the config file.
output = None
if args.output:
output = args.output
elif args.outputdir:
# If output filename isn't specified, write to args.outputdir with a
# .deployed extension so we can distinguish it from repacked binaries.
filename = ".".join(
(config_lib.CONFIG.Get("ClientBuilder.output_filename",
context=deployer.context), "deployed"))
output = os.path.join(args.outputdir, filename)
deployer.MakeDeployableBinary(template_path, output)
elif args.subparser_name == "buildanddeploy":
BuildAndDeploy(context)
if __name__ == "__main__":
flags.StartMain(main)
| [((199, 14, 199, 50), 'time.strftime', 'time.strftime', ({(199, 28, 199, 49): '"""%Y-%m-%dT%H:%M:%S%z"""'}, {}), "('%Y-%m-%dT%H:%M:%S%z')", False, 'import time\n'), ((219, 17, 219, 68), 'grr.lib.config_lib.CONFIG.Get', 'config_lib.CONFIG.Get', ({(219, 39, 219, 67): '"""ClientBuilder.BuildTargets"""'}, {}), "('ClientBuilder.BuildTargets')", False, 'from grr.lib import config_lib\n'), ((221, 2, 221, 59), 'logging.info', 'logging.info', ({(221, 15, 221, 44): '"""Building installers for: %s"""', (221, 46, 221, 58): 'context_list'}, {}), "('Building installers for: %s', context_list)", False, 'import logging\n'), ((222, 16, 222, 47), 'grr.lib.config_lib.CONFIG.ExportState', 'config_lib.CONFIG.ExportState', ({}, {}), '()', False, 'from grr.lib import config_lib\n'), ((262, 2, 263, 26), 'logging.info', 'logging.info', ({(262, 15, 262, 54): '"""Complete, installers for %s are in %s"""', (262, 56, 262, 69): 'deployed_list', (263, 15, 263, 25): 'output_dir'}, {}), "('Complete, installers for %s are in %s', deployed_list, output_dir\n )", False, 'import logging\n'), ((268, 2, 270, 63), 'grr.lib.config_lib.CONFIG.AddContext', 'config_lib.CONFIG.AddContext', ({(269, 6, 269, 29): '"""ClientBuilder Context"""', (270, 6, 270, 62): '"""Context applied when we run the client builder script."""'}, {}), "('ClientBuilder Context',\n 'Context applied when we run the client builder script.')", False, 'from grr.lib import config_lib\n'), ((272, 2, 272, 22), 'grr.lib.startup.ClientInit', 'startup.ClientInit', ({}, {}), '()', False, 'from grr.lib import startup\n'), ((280, 11, 280, 30), 'logging.getLogger', 'logging.getLogger', ({}, {}), '()', False, 'import logging\n'), ((281, 12, 281, 35), 'logging.StreamHandler', 'logging.StreamHandler', ({}, {}), '()', False, 'import logging\n'), ((344, 2, 344, 23), 'grr.lib.flags.StartMain', 'flags.StartMain', ({(344, 18, 344, 22): 'main'}, {}), '(main)', False, 'from grr.lib import flags\n'), ((27, 3, 27, 26), 'platform.architecture', 'platform.architecture', ({}, {}), '()', False, 'import platform\n'), ((32, 19, 32, 36), 'platform.system', 'platform.system', ({}, {}), '()', False, 'import platform\n'), ((47, 11, 47, 40), 'platform.linux_distribution', 'platform.linux_distribution', ({}, {}), '()', False, 'import platform\n'), ((190, 15, 191, 53), 'grr.lib.config_lib.CONFIG.Get', 'config_lib.CONFIG.Get', (), '', False, 'from grr.lib import config_lib\n'), ((192, 11, 192, 51), 'os.path.join', 'os.path.join', ({(192, 24, 192, 40): 'args.templatedir', (192, 42, 192, 50): 'filename'}, {}), '(args.templatedir, filename)', False, 'import os\n'), ((202, 4, 202, 57), 'grr.lib.config_lib.CONFIG.Set', 'config_lib.CONFIG.Set', ({(202, 26, 202, 42): '"""Client.plugins"""', (202, 44, 202, 56): 'args.plugins'}, {}), "('Client.plugins', args.plugins)", False, 'from grr.lib import config_lib\n'), ((206, 28, 207, 56), 'grr.lib.config_lib.CONFIG.Get', 'config_lib.CONFIG.Get', (), '', False, 'from grr.lib import config_lib\n'), ((277, 4, 277, 55), 'grr.lib.config_lib.CONFIG.LoadSecondaryConfig', 'config_lib.CONFIG.LoadSecondaryConfig', ({(277, 42, 277, 54): 'secondconfig'}, {}), '(secondconfig)', False, 'from grr.lib import config_lib\n'), ((37, 34, 37, 51), 'platform.system', 'platform.system', ({}, {}), '()', False, 'import platform\n'), ((213, 45, 214, 57), 'grr.lib.config_lib.CONFIG.Get', 'config_lib.CONFIG.Get', (), '', False, 'from grr.lib import config_lib\n'), ((228, 6, 228, 46), 'grr.lib.config_lib.CONFIG.AddContext', 'config_lib.CONFIG.AddContext', ({(228, 35, 228, 45): 'newcontext'}, {}), '(newcontext)', False, 'from grr.lib import config_lib\n'), ((260, 6, 260, 48), 'grr.lib.config_lib.ImportConfigManger', 'config_lib.ImportConfigManger', ({(260, 36, 260, 47): 'config_orig'}, {}), '(config_orig)', False, 'from grr.lib import config_lib\n'), ((234, 13, 235, 69), 'grr.lib.config_lib.CONFIG.MatchBuildContext', 'config_lib.CONFIG.MatchBuildContext', ({(234, 49, 234, 62): 'args.platform', (234, 64, 234, 73): 'args.arch', (235, 49, 235, 68): 'args.package_format'}, {}), '(args.platform, args.arch, args.\n package_format)', False, 'from grr.lib import config_lib\n'), ((246, 10, 247, 57), 'grr.lib.config_lib.CONFIG.Get', 'config_lib.CONFIG.Get', (), '', False, 'from grr.lib import config_lib\n'), ((250, 19, 251, 63), 'grr.lib.config_lib.CONFIG.Get', 'config_lib.CONFIG.Get', (), '', False, 'from grr.lib import config_lib\n'), ((252, 19, 253, 65), 'grr.lib.config_lib.CONFIG.Get', 'config_lib.CONFIG.Get', (), '', False, 'from grr.lib import config_lib\n'), ((299, 6, 299, 59), 'grr.lib.config_lib.CONFIG.Set', 'config_lib.CONFIG.Set', ({(299, 28, 299, 44): '"""Client.plugins"""', (299, 46, 299, 58): 'args.plugins'}, {}), "('Client.plugins', args.plugins)", False, 'from grr.lib import config_lib\n'), ((306, 24, 307, 70), 'grr.lib.config_lib.CONFIG.Get', 'config_lib.CONFIG.Get', (), '', False, 'from grr.lib import config_lib\n'), ((314, 6, 314, 59), 'grr.lib.config_lib.CONFIG.Set', 'config_lib.CONFIG.Set', ({(314, 28, 314, 44): '"""Client.plugins"""', (314, 46, 314, 58): 'args.plugins'}, {}), "('Client.plugins', args.plugins)", False, 'from grr.lib import config_lib\n'), ((321, 21, 322, 68), 'grr.lib.config_lib.CONFIG.Get', 'config_lib.CONFIG.Get', (), '', False, 'from grr.lib import config_lib\n'), ((335, 15, 335, 53), 'os.path.join', 'os.path.join', ({(335, 28, 335, 42): 'args.outputdir', (335, 44, 335, 52): 'filename'}, {}), '(args.outputdir, filename)', False, 'import os\n'), ((333, 11, 334, 58), 'grr.lib.config_lib.CONFIG.Get', 'config_lib.CONFIG.Get', (), '', False, 'from grr.lib import config_lib\n'), ((152, 21, 152, 50), 'platform.linux_distribution', 'platform.linux_distribution', ({}, {}), '()', False, 'import platform\n')] |
naveena41/greyatom-python-for-data-science | Greyatom-projects/code.py | 3aa63878ff12e0e8cdf0e63bafe9b4a2c082f7b1 | # --------------
# Code starts here
# Create the lists
class_1 = ['geoffrey hinton', 'andrew ng', 'sebastian raschka', 'yoshu bengio']
class_2 = ['hilary mason', 'carla gentry', 'corinna cortes']
# Concatenate both the strings
new_class = class_1+class_2
print(new_class)
# Append the list
new_class.append('peter warden')
# Print updated list
print(new_class)
# Remove the element from the list
new_class.remove('carla gentry')
# Print the list
print(new_class)
# Create the Dictionary
courses = {"math": 65, "english": 70, "history": 80, "french": 70, "science":60}
# Slice the dict and stores the all subjects marks in variable
total = 65+70+80+70+60
print(total)
# Store the all the subject in one variable `Total`
# Print the total
# Insert percentage formula
percentage =float(total)*(100/500)
# Print the percentage
print(percentage)
# Create the Dictionary
mathematics = {"geoffery hinton" :78, "andrew ng" :95, "sebastian raschka" :65, "yoshua benjio" :50, "hilary mason" :70, "corinna cortes" :66, "peter warden" :75}
topper = max(mathematics,key = mathematics.get)
print(topper)
# Given string
print(topper.split())
# Create variable first_name
first_name = 'andrew'
# Create variable Last_name and store last two element in the list
Last_name ='ng'
# Concatenate the string
full_name = Last_name+' '+first_name
# print the full_name
print(full_name)
# print the name in upper case
certificate_name = full_name.upper()
print(certificate_name)
# Code ends here
| [] |
jackblandin/ml-fairness-gym | environments/recommenders/recsim_wrapper_test.py | dce1feaacf2588e0a2d6187e896796241a25ed81 | # coding=utf-8
# Copyright 2022 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for recsim.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import test_util
from environments.recommenders import recsim_wrapper
from recsim.environments import interest_exploration
class RecommenderTest(absltest.TestCase):
def test_interest_exploration_can_run(self):
env_config = {
'num_candidates': 5,
'slate_size': 2,
'resample_documents': False,
'seed': 100,
}
params = recsim_wrapper.Params(
recsim_env=interest_exploration.create_environment(env_config))
env = recsim_wrapper.RecsimWrapper(params)
test_util.run_test_simulation(env=env, stackelberg=True)
def test_interest_exploration_can_run_with_resampling(self):
env_config = {
'num_candidates': 5,
'slate_size': 2,
'resample_documents': True,
'seed': 100,
}
params = recsim_wrapper.Params(
recsim_env=interest_exploration.create_environment(env_config))
env = recsim_wrapper.RecsimWrapper(params)
test_util.run_test_simulation(env=env, stackelberg=True)
if __name__ == '__main__':
absltest.main()
| [((57, 2, 57, 17), 'absl.testing.absltest.main', 'absltest.main', ({}, {}), '()', False, 'from absl.testing import absltest\n'), ((40, 10, 40, 46), 'environments.recommenders.recsim_wrapper.RecsimWrapper', 'recsim_wrapper.RecsimWrapper', ({(40, 39, 40, 45): 'params'}, {}), '(params)', False, 'from environments.recommenders import recsim_wrapper\n'), ((41, 4, 41, 60), 'test_util.run_test_simulation', 'test_util.run_test_simulation', (), '', False, 'import test_util\n'), ((52, 10, 52, 46), 'environments.recommenders.recsim_wrapper.RecsimWrapper', 'recsim_wrapper.RecsimWrapper', ({(52, 39, 52, 45): 'params'}, {}), '(params)', False, 'from environments.recommenders import recsim_wrapper\n'), ((53, 4, 53, 60), 'test_util.run_test_simulation', 'test_util.run_test_simulation', (), '', False, 'import test_util\n'), ((39, 19, 39, 70), 'recsim.environments.interest_exploration.create_environment', 'interest_exploration.create_environment', ({(39, 59, 39, 69): 'env_config'}, {}), '(env_config)', False, 'from recsim.environments import interest_exploration\n'), ((51, 19, 51, 70), 'recsim.environments.interest_exploration.create_environment', 'interest_exploration.create_environment', ({(51, 59, 51, 69): 'env_config'}, {}), '(env_config)', False, 'from recsim.environments import interest_exploration\n')] |
mernst32/dl-searchcode-code | moss_client_cli.py | 504fe59df245ba123ad8ad6e45f03b17de6ef236 | import argparse
import csv
import os
from moss_client.core import submit_and_dl, parse_moss_reports
data_folder = 'data'
def handle_input(user_id, base_folder, parse, only_parse, join_file, batch):
global data_folder
abs_path = os.path.abspath(os.path.dirname(__file__))
root_data_folder = os.path.join(abs_path, data_folder)
if not os.path.exists(root_data_folder):
os.makedirs(root_data_folder)
report_links_file = os.path.join(root_data_folder, 'links_to_moss_reports.html')
report_csv_file = os.path.join(root_data_folder, 'moss_report.csv')
if not os.path.isabs(base_folder):
base_folder = os.path.join(abs_path, base_folder)
if len(join_file) > 0:
expected_keys = ["SC_Filepath", "Stackoverflow_Links"]
with open(join_file, mode='r', encoding='utf-8') as csv_file:
csv_reader = csv.DictReader(csv_file)
actual_keys = csv_reader.fieldnames
if expected_keys[0] != actual_keys[0] or expected_keys[1] != actual_keys[1]:
print("Error: Unexpected Headers! SC_Filepath and Stackoverflow_Links are required!")
return -1
if not only_parse:
submit_and_dl(user_id, base_folder, report_links_file, batch)
if parse or only_parse:
print("Parsing the moss reports...")
parse_moss_reports(report_links_file, report_csv_file, join_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="MOSS CLI client for submitting java files to the service and downloading the report from the "
"service locally. Will go through the sub folders of the given folder and submit the java files "
"for plagiarism checks and download the reports locally, creating a linking file in the process")
parser.add_argument('user_id', metavar='U', nargs=1, help="Your user-id for the MOSS service.")
parser.add_argument('folder', metavar='F', nargs=1, help="The folder whose contents you want to submit.")
parser.add_argument('-p', '--parse', action='store_true', help="Parses the moss reports into a csv file.")
parser.add_argument('-o', '--only-parse', action='store_true',
help="Only parses the local moss reports and does not submit files and download the reports. "
"Requires the reports and the links_to_reports html file created normally by this app.")
parser.add_argument('-j', '--join-file', nargs=1, default=[""],
help="When the parse or only-parse option is given, joins the parsed data with the parsed data.")
parser.add_argument('-b', '--batch-mode', action='store_true',
help="Only submits a 100 folders to the Moss Service, also looks for already processed folders so "
"that it does not submit those again.")
args = parser.parse_args()
handle_input(args.user_id[0], args.folder[0], args.parse, args.only_parse, args.join_file[0], args.batch_mode)
| [((12, 23, 12, 58), 'os.path.join', 'os.path.join', ({(12, 36, 12, 44): 'abs_path', (12, 46, 12, 57): 'data_folder'}, {}), '(abs_path, data_folder)', False, 'import os\n'), ((15, 24, 15, 84), 'os.path.join', 'os.path.join', ({(15, 37, 15, 53): 'root_data_folder', (15, 55, 15, 83): '"""links_to_moss_reports.html"""'}, {}), "(root_data_folder, 'links_to_moss_reports.html')", False, 'import os\n'), ((16, 22, 16, 71), 'os.path.join', 'os.path.join', ({(16, 35, 16, 51): 'root_data_folder', (16, 53, 16, 70): '"""moss_report.csv"""'}, {}), "(root_data_folder, 'moss_report.csv')", False, 'import os\n'), ((36, 13, 39, 117), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((11, 31, 11, 56), 'os.path.dirname', 'os.path.dirname', ({(11, 47, 11, 55): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((13, 11, 13, 43), 'os.path.exists', 'os.path.exists', ({(13, 26, 13, 42): 'root_data_folder'}, {}), '(root_data_folder)', False, 'import os\n'), ((14, 8, 14, 37), 'os.makedirs', 'os.makedirs', ({(14, 20, 14, 36): 'root_data_folder'}, {}), '(root_data_folder)', False, 'import os\n'), ((17, 11, 17, 37), 'os.path.isabs', 'os.path.isabs', ({(17, 25, 17, 36): 'base_folder'}, {}), '(base_folder)', False, 'import os\n'), ((18, 22, 18, 57), 'os.path.join', 'os.path.join', ({(18, 35, 18, 43): 'abs_path', (18, 45, 18, 56): 'base_folder'}, {}), '(abs_path, base_folder)', False, 'import os\n'), ((29, 8, 29, 69), 'moss_client.core.submit_and_dl', 'submit_and_dl', ({(29, 22, 29, 29): 'user_id', (29, 31, 29, 42): 'base_folder', (29, 44, 29, 61): 'report_links_file', (29, 63, 29, 68): 'batch'}, {}), '(user_id, base_folder, report_links_file, batch)', False, 'from moss_client.core import submit_and_dl, parse_moss_reports\n'), ((32, 8, 32, 73), 'moss_client.core.parse_moss_reports', 'parse_moss_reports', ({(32, 27, 32, 44): 'report_links_file', (32, 46, 32, 61): 'report_csv_file', (32, 63, 32, 72): 'join_file'}, {}), '(report_links_file, report_csv_file, join_file)', False, 'from moss_client.core import submit_and_dl, parse_moss_reports\n'), ((23, 25, 23, 49), 'csv.DictReader', 'csv.DictReader', ({(23, 40, 23, 48): 'csv_file'}, {}), '(csv_file)', False, 'import csv\n')] |
DiegoOrtegoP/Software | catkin_ws/src/localization/src/localization_node.py | 4a07dd2dab29db910ca2e26848fa6b53b7ab00cd | #!/usr/bin/env python
import rospy
#from apriltags_ros.msg import AprilTagDetectionArray
from duckietown_msgs.msg import AprilTagsWithInfos
import tf2_ros
from tf2_msgs.msg import TFMessage
import tf.transformations as tr
from geometry_msgs.msg import Transform, TransformStamped
import numpy as np
from localization import PoseAverage
from visualization_msgs.msg import Marker
# Localization Node
# Author: Teddy Ort
# Inputs: apriltags/duckietown_msgs/AprilTags - A list of april tags in a camera frame
# Outputs: pose2d/duckietown_msgs/Pose2dStamped - The estimated pose of the robot in the world frame in 2D coordinates
# pose3d/geometry_msgs/PoseStamped - The estimated pose of the robot in the world frame in 3D coordinates
class LocalizationNode(object):
def __init__(self):
self.node_name = 'localization_node'
# Constants
self.world_frame = "world"
self.duckiebot_frame = "duckiebot"
self.duckiebot_lifetime = self.setupParam("~duckiebot_lifetime", 5) # The number of seconds to keep the duckiebot alive bewtween detections
self.highlight_lifetime = self.setupParam("~highlight_lifetime", 3) # The number of seconds to keep a sign highlighted after a detection
# Setup the publishers and subscribers
self.sub_april = rospy.Subscriber("~apriltags", AprilTagsWithInfos, self.tag_callback)
self.pub_tf = rospy.Publisher("/tf", TFMessage, queue_size=1, latch=True)
self.pub_rviz = rospy.Publisher("/sign_highlights", Marker, queue_size=1, latch=True)
# Setup the transform listener
self.tfbuf = tf2_ros.Buffer()
self.tfl = tf2_ros.TransformListener(self.tfbuf)
# Use a timer to make the duckiebot disappear
self.lifetimer = rospy.Time.now()
self.publish_duckie_marker()
rospy.loginfo("[%s] has started", self.node_name)
def tag_callback(self, msg_tag):
# Listen for the transform of the tag in the world
avg = PoseAverage.PoseAverage()
for tag in msg_tag.detections:
try:
Tt_w = self.tfbuf.lookup_transform(self.world_frame, "tag_{id}".format(id=tag.id), rospy.Time(), rospy.Duration(1))
Mtbase_w=self.transform_to_matrix(Tt_w.transform)
Mt_tbase = tr.concatenate_matrices(tr.translation_matrix((0,0,0.17)), tr.euler_matrix(0,0,np.pi))
Mt_w = tr.concatenate_matrices(Mtbase_w,Mt_tbase)
Mt_r=self.pose_to_matrix(tag.pose)
Mr_t=np.linalg.inv(Mt_r)
Mr_w=np.dot(Mt_w,Mr_t)
Tr_w = self.matrix_to_transform(Mr_w)
avg.add_pose(Tr_w)
self.publish_sign_highlight(tag.id)
except(tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as ex:
rospy.logwarn("Error looking up transform for tag_%s", tag.id)
rospy.logwarn(ex.message)
Tr_w = avg.get_average() # Average of the opinions
# Broadcast the robot transform
if Tr_w is not None:
# Set the z translation, and x and y rotations to 0
Tr_w.translation.z = 0
rot = Tr_w.rotation
rotz=tr.euler_from_quaternion((rot.x, rot.y, rot.z, rot.w))[2]
(rot.x, rot.y, rot.z, rot.w) = tr.quaternion_from_euler(0, 0, rotz)
T = TransformStamped()
T.transform = Tr_w
T.header.frame_id = self.world_frame
T.header.stamp = rospy.Time.now()
T.child_frame_id = self.duckiebot_frame
self.pub_tf.publish(TFMessage([T]))
self.lifetimer = rospy.Time.now()
def publish_duckie_marker(self):
# Publish a duckiebot transform far away unless the timer was reset
rate = rospy.Rate(10)
while not rospy.is_shutdown():
rate.sleep()
if rospy.Time.now() - self.lifetimer > rospy.Duration(self.duckiebot_lifetime):
T = TransformStamped()
T.transform.translation.z = 1000 # Throw it 1km in the air
T.transform.rotation.w = 1
T.header.frame_id = self.world_frame
T.header.stamp = rospy.Time.now()
T.child_frame_id = self.duckiebot_frame
self.pub_tf.publish(TFMessage([T]))
def publish_sign_highlight(self, id):
# Publish a highlight marker on the sign that is seen by the robot
m = Marker()
m.header.frame_id="tag_{id}".format(id=id)
m.header.stamp = rospy.Time.now()
m.id=id
m.lifetime = rospy.Duration(self.highlight_lifetime)
m.type = Marker.CYLINDER
p = m.pose.position
o = m.pose.orientation
c = m.color
s = m.scale
s.x, s.y, s.z = (0.1, 0.1, 0.3)
p.z = 0.15
c.a, c.r, c.g, c.b = (0.2, 0.9, 0.9, 0.0)
o.w = 1
self.pub_rviz.publish(m)
def pose_to_matrix(self, p):
# Return the 4x4 homogeneous matrix for a PoseStamped.msg p from the geometry_msgs
trans = (p.pose.position.x, p.pose.position.y, p.pose.position.z)
rot = (p.pose.orientation.x, p.pose.orientation.y, p.pose.orientation.z, p.pose.orientation.w)
return np.dot(tr.translation_matrix(trans), tr.quaternion_matrix(rot))
def transform_to_matrix(self, T):
# Return the 4x4 homogeneous matrix for a TransformStamped.msg T from the geometry_msgs
trans = (T.translation.x, T.translation.y, T.translation.z)
rot = (T.rotation.x, T.rotation.y, T.rotation.z, T.rotation.w)
return np.dot(tr.translation_matrix(trans), tr.quaternion_matrix(rot))
def matrix_to_transform(self, M):
# Return a TransformStamped.msg T from the geometry_msgs from a 4x4 homogeneous matrix
T=Transform()
(T.translation.x, T.translation.y, T.translation.z) = tr.translation_from_matrix(M)
(T.rotation.x, T.rotation.y, T.rotation.z, T.rotation.w) = tr.quaternion_from_matrix(M)
return T
def setupParam(self, param_name, default_value):
value = rospy.get_param(param_name, default_value)
rospy.set_param(param_name, value) #Write to parameter server for transparancy
rospy.loginfo("[%s] %s = %s " % (self.node_name, param_name, value))
return value
if __name__ == '__main__':
rospy.init_node('localization_node', anonymous=False)
localization_node = LocalizationNode()
rospy.spin()
| [((140, 4, 140, 57), 'rospy.init_node', 'rospy.init_node', (), '', False, 'import rospy\n'), ((142, 4, 142, 16), 'rospy.spin', 'rospy.spin', ({}, {}), '()', False, 'import rospy\n'), ((31, 25, 31, 94), 'rospy.Subscriber', 'rospy.Subscriber', ({(31, 42, 31, 54): '"""~apriltags"""', (31, 56, 31, 74): 'AprilTagsWithInfos', (31, 76, 31, 93): 'self.tag_callback'}, {}), "('~apriltags', AprilTagsWithInfos, self.tag_callback)", False, 'import rospy\n'), ((32, 22, 32, 81), 'rospy.Publisher', 'rospy.Publisher', (), '', False, 'import rospy\n'), ((33, 24, 33, 93), 'rospy.Publisher', 'rospy.Publisher', (), '', False, 'import rospy\n'), ((36, 21, 36, 37), 'tf2_ros.Buffer', 'tf2_ros.Buffer', ({}, {}), '()', False, 'import tf2_ros\n'), ((37, 19, 37, 56), 'tf2_ros.TransformListener', 'tf2_ros.TransformListener', ({(37, 45, 37, 55): 'self.tfbuf'}, {}), '(self.tfbuf)', False, 'import tf2_ros\n'), ((40, 25, 40, 41), 'rospy.Time.now', 'rospy.Time.now', ({}, {}), '()', False, 'import rospy\n'), ((43, 8, 43, 57), 'rospy.loginfo', 'rospy.loginfo', ({(43, 22, 43, 40): '"""[%s] has started"""', (43, 42, 43, 56): 'self.node_name'}, {}), "('[%s] has started', self.node_name)", False, 'import rospy\n'), ((47, 14, 47, 39), 'localization.PoseAverage.PoseAverage', 'PoseAverage.PoseAverage', ({}, {}), '()', False, 'from localization import PoseAverage\n'), ((83, 15, 83, 29), 'rospy.Rate', 'rospy.Rate', ({(83, 26, 83, 28): '10'}, {}), '(10)', False, 'import rospy\n'), ((97, 12, 97, 20), 'visualization_msgs.msg.Marker', 'Marker', ({}, {}), '()', False, 'from visualization_msgs.msg import Marker\n'), ((99, 25, 99, 41), 'rospy.Time.now', 'rospy.Time.now', ({}, {}), '()', False, 'import rospy\n'), ((101, 21, 101, 60), 'rospy.Duration', 'rospy.Duration', ({(101, 36, 101, 59): 'self.highlight_lifetime'}, {}), '(self.highlight_lifetime)', False, 'import rospy\n'), ((127, 10, 127, 21), 'geometry_msgs.msg.Transform', 'Transform', ({}, {}), '()', False, 'from geometry_msgs.msg import Transform, TransformStamped\n'), ((128, 62, 128, 91), 'tf.transformations.translation_from_matrix', 'tr.translation_from_matrix', ({(128, 89, 128, 90): 'M'}, {}), '(M)', True, 'import tf.transformations as tr\n'), ((129, 67, 129, 95), 'tf.transformations.quaternion_from_matrix', 'tr.quaternion_from_matrix', ({(129, 93, 129, 94): 'M'}, {}), '(M)', True, 'import tf.transformations as tr\n'), ((133, 16, 133, 58), 'rospy.get_param', 'rospy.get_param', ({(133, 32, 133, 42): 'param_name', (133, 44, 133, 57): 'default_value'}, {}), '(param_name, default_value)', False, 'import rospy\n'), ((134, 8, 134, 42), 'rospy.set_param', 'rospy.set_param', ({(134, 24, 134, 34): 'param_name', (134, 36, 134, 41): 'value'}, {}), '(param_name, value)', False, 'import rospy\n'), ((135, 8, 135, 76), 'rospy.loginfo', 'rospy.loginfo', ({(135, 22, 135, 75): "('[%s] %s = %s ' % (self.node_name, param_name, value))"}, {}), "('[%s] %s = %s ' % (self.node_name, param_name, value))", False, 'import rospy\n'), ((72, 43, 72, 79), 'tf.transformations.quaternion_from_euler', 'tr.quaternion_from_euler', ({(72, 68, 72, 69): '0', (72, 71, 72, 72): '0', (72, 74, 72, 78): 'rotz'}, {}), '(0, 0, rotz)', True, 'import tf.transformations as tr\n'), ((73, 16, 73, 34), 'geometry_msgs.msg.TransformStamped', 'TransformStamped', ({}, {}), '()', False, 'from geometry_msgs.msg import Transform, TransformStamped\n'), ((76, 29, 76, 45), 'rospy.Time.now', 'rospy.Time.now', ({}, {}), '()', False, 'import rospy\n'), ((79, 29, 79, 45), 'rospy.Time.now', 'rospy.Time.now', ({}, {}), '()', False, 'import rospy\n'), ((84, 18, 84, 37), 'rospy.is_shutdown', 'rospy.is_shutdown', ({}, {}), '()', False, 'import rospy\n'), ((117, 22, 117, 50), 'tf.transformations.translation_matrix', 'tr.translation_matrix', ({(117, 44, 117, 49): 'trans'}, {}), '(trans)', True, 'import tf.transformations as tr\n'), ((117, 52, 117, 77), 'tf.transformations.quaternion_matrix', 'tr.quaternion_matrix', ({(117, 73, 117, 76): 'rot'}, {}), '(rot)', True, 'import tf.transformations as tr\n'), ((123, 22, 123, 50), 'tf.transformations.translation_matrix', 'tr.translation_matrix', ({(123, 44, 123, 49): 'trans'}, {}), '(trans)', True, 'import tf.transformations as tr\n'), ((123, 52, 123, 77), 'tf.transformations.quaternion_matrix', 'tr.quaternion_matrix', ({(123, 73, 123, 76): 'rot'}, {}), '(rot)', True, 'import tf.transformations as tr\n'), ((53, 23, 53, 65), 'tf.transformations.concatenate_matrices', 'tr.concatenate_matrices', ({(53, 47, 53, 55): 'Mtbase_w', (53, 56, 53, 64): 'Mt_tbase'}, {}), '(Mtbase_w, Mt_tbase)', True, 'import tf.transformations as tr\n'), ((55, 21, 55, 40), 'numpy.linalg.inv', 'np.linalg.inv', ({(55, 35, 55, 39): 'Mt_r'}, {}), '(Mt_r)', True, 'import numpy as np\n'), ((56, 21, 56, 38), 'numpy.dot', 'np.dot', ({(56, 28, 56, 32): 'Mt_w', (56, 33, 56, 37): 'Mr_t'}, {}), '(Mt_w, Mr_t)', True, 'import numpy as np\n'), ((71, 17, 71, 71), 'tf.transformations.euler_from_quaternion', 'tr.euler_from_quaternion', ({(71, 42, 71, 70): '(rot.x, rot.y, rot.z, rot.w)'}, {}), '((rot.x, rot.y, rot.z, rot.w))', True, 'import tf.transformations as tr\n'), ((78, 32, 78, 46), 'tf2_msgs.msg.TFMessage', 'TFMessage', ({(78, 42, 78, 45): '[T]'}, {}), '([T])', False, 'from tf2_msgs.msg import TFMessage\n'), ((86, 51, 86, 90), 'rospy.Duration', 'rospy.Duration', ({(86, 66, 86, 89): 'self.duckiebot_lifetime'}, {}), '(self.duckiebot_lifetime)', False, 'import rospy\n'), ((87, 20, 87, 38), 'geometry_msgs.msg.TransformStamped', 'TransformStamped', ({}, {}), '()', False, 'from geometry_msgs.msg import Transform, TransformStamped\n'), ((91, 33, 91, 49), 'rospy.Time.now', 'rospy.Time.now', ({}, {}), '()', False, 'import rospy\n'), ((50, 99, 50, 111), 'rospy.Time', 'rospy.Time', ({}, {}), '()', False, 'import rospy\n'), ((50, 113, 50, 130), 'rospy.Duration', 'rospy.Duration', ({(50, 128, 50, 129): '1'}, {}), '(1)', False, 'import rospy\n'), ((52, 51, 52, 84), 'tf.transformations.translation_matrix', 'tr.translation_matrix', ({(52, 73, 52, 83): '(0, 0, 0.17)'}, {}), '((0, 0, 0.17))', True, 'import tf.transformations as tr\n'), ((52, 86, 52, 112), 'tf.transformations.euler_matrix', 'tr.euler_matrix', ({(52, 102, 52, 103): '0', (52, 104, 52, 105): '0', (52, 106, 52, 111): 'np.pi'}, {}), '(0, 0, np.pi)', True, 'import tf.transformations as tr\n'), ((61, 16, 61, 78), 'rospy.logwarn', 'rospy.logwarn', ({(61, 30, 61, 69): '"""Error looking up transform for tag_%s"""', (61, 71, 61, 77): 'tag.id'}, {}), "('Error looking up transform for tag_%s', tag.id)", False, 'import rospy\n'), ((62, 16, 62, 41), 'rospy.logwarn', 'rospy.logwarn', ({(62, 30, 62, 40): 'ex.message'}, {}), '(ex.message)', False, 'import rospy\n'), ((86, 15, 86, 31), 'rospy.Time.now', 'rospy.Time.now', ({}, {}), '()', False, 'import rospy\n'), ((93, 36, 93, 50), 'tf2_msgs.msg.TFMessage', 'TFMessage', ({(93, 46, 93, 49): '[T]'}, {}), '([T])', False, 'from tf2_msgs.msg import TFMessage\n')] |
wusui/NCAA2019 | gen_data/get_teams.py | d33a69926dc2d5355f33f9b69e39475c54d03c56 | #!/usr/bin/python
# pylint: disable=W0223
"""
Get a list of teams
"""
from html.parser import HTMLParser
import requests
class ChkTeams(HTMLParser):
"""
Extract team names from page
"""
def __init__(self):
HTMLParser.__init__(self)
self.retval = []
def handle_starttag(self, tag, attrs):
for apt in attrs:
if apt[0] == 'title':
if apt[1] != "ESPN Search":
self.retval.append(apt[1])
DATALOC = "http://www.espn.com/mens-college-basketball/tournament/bracket"
def check_teams():
"""
Extract a list of teams (schools)
"""
req = requests.get(DATALOC)
parser = ChkTeams()
parser.feed(req.text)
retv = parser.retval
return retv[8:]
def make_team_list():
"""
Call check_teams and stick result in text file
"""
listv = check_teams()
with open('teams.txt', 'w') as ofile:
for team in listv:
ofile.write(team + '\n')
if __name__ == '__main__':
make_team_list()
| [((32, 10, 32, 31), 'requests.get', 'requests.get', ({(32, 23, 32, 30): 'DATALOC'}, {}), '(DATALOC)', False, 'import requests\n'), ((15, 8, 15, 33), 'html.parser.HTMLParser.__init__', 'HTMLParser.__init__', ({(15, 28, 15, 32): 'self'}, {}), '(self)', False, 'from html.parser import HTMLParser\n')] |
agustinhenze/mibs.snmplabs.com | pysnmp-with-texts/Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB.py | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | #
# PySNMP MIB module Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB
# Produced by pysmi-0.3.4 at Wed May 1 14:31:21 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint")
mscMod, mscModIndex = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-BaseShelfMIB", "mscMod", "mscModIndex")
DisplayString, RowStatus, StorageType, Unsigned32, Integer32 = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-StandardTextualConventionsMIB", "DisplayString", "RowStatus", "StorageType", "Unsigned32", "Integer32")
DigitString, NonReplicated = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-TextualConventionsMIB", "DigitString", "NonReplicated")
mscPassportMIBs, = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-UsefulDefinitionsMIB", "mscPassportMIBs")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Counter32, Counter64, IpAddress, ObjectIdentity, Bits, iso, Unsigned32, Gauge32, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Integer32, TimeTicks, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Counter64", "IpAddress", "ObjectIdentity", "Bits", "iso", "Unsigned32", "Gauge32", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Integer32", "TimeTicks", "ModuleIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
subnetInterfaceMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45))
mscModVcs = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2))
mscModVcsRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 1), )
if mibBuilder.loadTexts: mscModVcsRowStatusTable.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsRowStatusTable.setDescription('This entry controls the addition and deletion of mscModVcs components.')
mscModVcsRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 1, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-BaseShelfMIB", "mscModIndex"), (0, "Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB", "mscModVcsIndex"))
if mibBuilder.loadTexts: mscModVcsRowStatusEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsRowStatusEntry.setDescription('A single entry in the table represents a single mscModVcs component.')
mscModVcsRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 1, 1, 1), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsRowStatus.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsRowStatus.setDescription('This variable is used as the basis for SNMP naming of mscModVcs components. These components can be added and deleted.')
mscModVcsComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscModVcsComponentName.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsComponentName.setDescription("This variable provides the component's string name for use with the ASCII Console Interface")
mscModVcsStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscModVcsStorageType.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsStorageType.setDescription('This variable represents the storage type value for the mscModVcs tables.')
mscModVcsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mscModVcsIndex.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsIndex.setDescription('This variable represents the index for the mscModVcs tables.')
mscModVcsAccOptTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 10), )
if mibBuilder.loadTexts: mscModVcsAccOptTable.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsAccOptTable.setDescription("Accounting information is owned by the Vc System; it is stored in the Vc Accounting component, which itself is considered to be a component on the switch. The Accounting Component contains a bit map indicating which of the accounting facilities are to be spooled in the accounting record - for example, bit '0' if set indicates that the accounting facility with facility code H.00 should be spooled if present in the Vc for accounting purposes. The data contained in the Vc Accounting must be identical network wide even though the component can be changed and upgraded on a module by module basis.")
mscModVcsAccOptEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 10, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-BaseShelfMIB", "mscModIndex"), (0, "Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB", "mscModVcsIndex"))
if mibBuilder.loadTexts: mscModVcsAccOptEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsAccOptEntry.setDescription('An entry in the mscModVcsAccOptTable.')
mscModVcsSegmentSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 10, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))).clone(namedValues=NamedValues(("n1", 0), ("n2", 1), ("n4", 2), ("n8", 3), ("n16", 4), ("n32", 5), ("n64", 6), ("n128", 7), ("n256", 8), ("n512", 9), ("n1024", 10), ("n2048", 11), ("n4096", 12))).clone('n128')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsSegmentSize.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsSegmentSize.setDescription('This attribute specifies the segment size for accounting of national calls. Minimum allowed segment size is 1. If data segment is sent which is less than segmentSize it is still counted as one segment.')
mscModVcsUnitsCounted = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 10, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("segments", 0), ("frames", 1))).clone('segments')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsUnitsCounted.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsUnitsCounted.setDescription('This attribute specifies what is counted by frame services. If set to frames, frames are counted, else segments are counted.')
mscModVcsAccountingFax = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 10, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1).clone(hexValue="20")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsAccountingFax.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsAccountingFax.setDescription('Each value corresponds to an accounting facility code, of which there are currently 10 facility codes defined with codes H.00 to H.09, and corresponding to the above 10 facilities. Each of the above facilities may or may not be present and stored in the Vc for accounting purposes, depending on the nature of the call. For example, only those Vcs where a NUI (Network User Identifier) is used for charging or identification purposes will have a NUI stored in the Vc. Description of bits: notused0(0) notused1(1) originalCalledAddressFax(2)')
mscModVcsGenerationMode = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 10, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("bothEnds", 0), ("singleEnd", 1))).clone('singleEnd')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsGenerationMode.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsGenerationMode.setDescription('This attribute specifies part of the rules by which the network generates accounting records. If set to bothEnds, then both ends of the Vc generate accounting records. If set to singleEnd, then the charged end of the Vc generates accounting records. In single end generation mode, if the call does not clear gracefully, both ends of the Vc will generate accounting record.')
mscModVcsAddOptTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12), )
if mibBuilder.loadTexts: mscModVcsAddOptTable.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsAddOptTable.setDescription('The Vc AddressingOptions group describes the addressing parameters. It is currently owned by the Vc. Most of the data contained in the Vc AddressingOptions group is identical network wide even though the group can be changed and upgraded on a module by module basis.')
mscModVcsAddOptEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-BaseShelfMIB", "mscModIndex"), (0, "Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB", "mscModVcsIndex"))
if mibBuilder.loadTexts: mscModVcsAddOptEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsAddOptEntry.setDescription('An entry in the mscModVcsAddOptTable.')
mscModVcsDefaultNumberingPlan = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("x121", 0), ("e164", 1))).clone('x121')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsDefaultNumberingPlan.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsDefaultNumberingPlan.setDescription('This attribute specifies the numbering plan used which determines the address format: X.121-- the international numbering plan for public packet switched data networks or E.164-- the international numbering plan for ISDN and PSTN. The default numbering plan does not need to be consistent across all of the nodes in the network.')
mscModVcsNetworkIdType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("dnic", 0), ("inic", 1))).clone('dnic')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscModVcsNetworkIdType.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsNetworkIdType.setDescription('This attribute specifies whether the network uses a DNIC or INIC. It is used by X.75 Gateways to indicate whether in network the DNIC or INIC is used in various utilities. If it is DNIC it can be DNIC or DCC type. If it is INIC it can be 4 digits only.')
mscModVcsX121Type = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("dnic", 0), ("dcc", 1))).clone('dnic')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsX121Type.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsX121Type.setDescription('This attribute specifies whether DNIC mode or DCC mode is used in X.121 address of international calls. If DCC is specified, then the first 3 digits of each DNA must be the Network ID Code. If this attribute is changed all Dnas in the network must start with this code. Numbering plan is affected by the change.')
mscModVcsNetworkIdCode = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 6), DigitString().subtype(subtypeSpec=ValueSizeConstraint(3, 4))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsNetworkIdCode.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsNetworkIdCode.setDescription('This attribute specifies the DNIC (Data Network ID Code) of the network or DCC code.')
mscModVcsX121IntlAddresses = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disallowed", 0), ("allowed", 1))).clone('allowed')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsX121IntlAddresses.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsX121IntlAddresses.setDescription('This attribute indicates if any DTE is allowed to signal international addresses.')
mscModVcsX121IntllPrefixDigit = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 9)).clone(9)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsX121IntllPrefixDigit.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsX121IntllPrefixDigit.setDescription('This attribute indicates the prefix digit to be used for X.121 international calls. When this digit is provided the call will have full international address.')
mscModVcsX121MinAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 11), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsX121MinAddressLength.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsX121MinAddressLength.setDescription('This attribute indicates minimum length of x121 address.')
mscModVcsX121MaxAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 12), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(15)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsX121MaxAddressLength.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsX121MaxAddressLength.setDescription('This attribute indicates maximum length of x121 address.')
mscModVcsX121ToE164EscapeSignificance = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1))).clone('no')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsX121ToE164EscapeSignificance.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsX121ToE164EscapeSignificance.setDescription('This attribute specifies whether an X.121 to E.164 escape digit has significance in selecting an X.32 (analog) or an ISDN switched path. If two values are significant (the value 0 or the value 9) then yes is set to this attribute. If the value of the originally entered escape digit is not significant in routing the call then value of no is assigned to this attribute.')
mscModVcsE164IntlFormatAllowed = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disallowed", 0), ("allowed", 1))).clone('allowed')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsE164IntlFormatAllowed.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsE164IntlFormatAllowed.setDescription("This attribute indicates whether or not to allow national format E.164 addresses. If this attribute is set to a value of Yes (=1) then national format E.164 addresses are not allowed and international format addresses only are allowed. If this attribute is set to a value of No (=0), then national format E.164 addresses are allowed. If only international format E.164 addresses are allowed, then the 'e164NatlPrefixDigit' attribute is not required, nor is the 'e164IntlPrefixDigits' required.")
mscModVcsE164IntlPrefixDigits = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 15), DigitString().subtype(subtypeSpec=ValueSizeConstraint(0, 3)).clone(hexValue="30")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsE164IntlPrefixDigits.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsE164IntlPrefixDigits.setDescription("This attribute specifies the E.164 international prefix digits. If applicable, it is specified as 1 to 3 BCD digits. The 3 BCD digits are stored with the length of the international prefix in the low order nibble, nibble [0] followed by the most significant digit of the international prefix in the next low order nibble, nibble [1], etc. This attribute is not required if the corresponding attribute, 'e164IntlFormatOnly' is set to a value of Yes (=1).")
mscModVcsE164NatlPrefixDigit = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 16), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 9)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsE164NatlPrefixDigit.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsE164NatlPrefixDigit.setDescription('This attribute contains the E.164 national prefix which may be added in front of E.164 local or national call. If e164IntlFormatOnly is set to 1, this attribute is not needed.')
mscModVcsE164LocalAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 17), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(4, 15)).clone(7)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsE164LocalAddressLength.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsE164LocalAddressLength.setDescription('This attribute indicates the length of a local E.164 DNA on this module. This attribute is not required if the corresponding attribute, e164IntlFormatOnly is set to a value of yes. This attribute does not need to be consistent across all of the nodes in the network.')
mscModVcsE164TeleCountryCode = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 18), DigitString().subtype(subtypeSpec=ValueSizeConstraint(1, 4)).clone(hexValue="31")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsE164TeleCountryCode.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsE164TeleCountryCode.setDescription('This attribute specifies the E.164 Telephone Country Code (TCC) for the country in which the network resides. If applicable, it is specified as 1 to 3 BCD digits. The 3 BCD digits are stored with the length of the TCC in the low order nibble, nibble [0] followed by the most significant digit of the TCC in the next low order nibble, nibble [1], etc.')
mscModVcsE164NatlMinAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 20), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsE164NatlMinAddressLength.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsE164NatlMinAddressLength.setDescription('This attribute indicates minimum length of e164 national address.')
mscModVcsE164NatlMaxAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 21), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(15)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsE164NatlMaxAddressLength.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsE164NatlMaxAddressLength.setDescription('This attribute indicates maximum length of e164 national address.')
mscModVcsE164IntlMinAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 22), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsE164IntlMinAddressLength.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsE164IntlMinAddressLength.setDescription('This attribute indicates minimum length of e164 international address.')
mscModVcsE164IntlMaxAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 23), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(15)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsE164IntlMaxAddressLength.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsE164IntlMaxAddressLength.setDescription('This attribute indicates maximum length of e164 international address.')
mscModVcsE164LocalMinAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 24), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsE164LocalMinAddressLength.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsE164LocalMinAddressLength.setDescription('This attribute indicates minimum length of e164 local address.')
mscModVcsE164LocalMaxAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 25), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(15)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsE164LocalMaxAddressLength.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsE164LocalMaxAddressLength.setDescription('This attribute indicates maximum length of e164 local address.')
mscModVcsIntOptTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 13), )
if mibBuilder.loadTexts: mscModVcsIntOptTable.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsIntOptTable.setDescription('The Vc InterfaceOptions group defines Vc system parameters common in the network. It is owned by the Vc and is considered to be a module wide component on the switch. The data contained in the Vc InterfaceOptions group must be identical network wide even though this group can be changed and upgraded on a module by module basis.')
mscModVcsIntOptEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 13, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-BaseShelfMIB", "mscModIndex"), (0, "Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB", "mscModVcsIndex"))
if mibBuilder.loadTexts: mscModVcsIntOptEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsIntOptEntry.setDescription('An entry in the mscModVcsIntOptTable.')
mscModVcsHighPriorityPacketSizes = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 13, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2).clone(hexValue="ff80")).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscModVcsHighPriorityPacketSizes.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsHighPriorityPacketSizes.setDescription('This attribute indicates which packet sizes are supported for high priority calls within the network. Description of bits: n16(0) n32(1) n64(2) n128(3) n256(4) n512(5) n1024(6) n2048(7) n4096(8)')
mscModVcsMaxSubnetPacketSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 13, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4, 5, 6, 7, 8, 9, 10, 11, 12))).clone(namedValues=NamedValues(("n16", 4), ("n32", 5), ("n64", 6), ("n128", 7), ("n256", 8), ("n512", 9), ("n1024", 10), ("n2048", 11), ("n4096", 12))).clone('n512')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsMaxSubnetPacketSize.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsMaxSubnetPacketSize.setDescription('This attribute specifies the maximum subnet packet size used for the connections originating or terminating on this module. All modules in the same network should have the same maxSubnetPacketSize. If this value is not identical throughout the network, the following points need to be considered: a) When Passport and DPN switches are connected in the same network, the maxSubnetPacketSize on a DPN switch can be at most 2048 and the DPN part of the network must be configured with hardware which supports this size: - Dedicated PE386 Network link/Trunk - Minimum measured link speed of 256Kbits/sec This hardware has to be present on every potential data path between connecting end points! b) The calling end of the connection signals the maxSubnetPacketSize value to the called end. The called end then compares this value to its own provisioned value and selects the smaller value. Note that this smaller value is not signalled back to the calling end. The calling and called ends can therefore have different maxSubnetPacketSize values.')
mscModVcsCallSetupTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 13, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(5, 100)).clone(5)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsCallSetupTimer.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsCallSetupTimer.setDescription('This attribute specifies the Vc callSetupTimer in units of 1 second ticks. This timer specifies how long the Vc will wait, after sending a subnet Call Request packet into the network, for a response from the remote end of the Vc (in the form of a subnet Raccept packet). If, after sending a subnet Call packet into the network, a response is not received within this time period, the Vc will time out, clearing the call in the assumption that the remote end is unreachable. This timer must be long enough to take into account the time required for routing the subnet Call Request through the Source Call Routing and the Destination Call Routing systems in order to be delivered to the final destination.')
mscModVcsCallRetryTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 13, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(30, 300)).clone(60)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsCallRetryTimer.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsCallRetryTimer.setDescription('This attribute specifies, for Vc implementing Direct Calls with the auto-call retry feature (including PVCs), the Vc callRetryTimer in units of 1 second ticks. This timer specifies how long the Vc will wait between unsuccessful call attempts.')
mscModVcsDelaySubnetAcks = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 13, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1))).clone('no')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsDelaySubnetAcks.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsDelaySubnetAcks.setDescription('This attribute specifies delay acknowledgment timer mechanism. If this attribute is set to no, then the Vc will automatically return acknowledgment packets without delay. If this attribute is set to yes, then the Vc will wait for one second in an attempt to piggyback the acknowledgment packet on another credit or data packet. If the Vc cannot piggyback the acknowledgment packet within this time, then the packet is returned without piggybacking.')
mscModVcsWinsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 213), )
if mibBuilder.loadTexts: mscModVcsWinsTable.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsWinsTable.setDescription('This is the windowSize corresponding to the given packet size and throughput class. All Vcs using the windowSize matrix support large Vc windows on both ends of the Vc, and support the signalling of the chosen Vc window size from the destination (called) end to the source (calling) end. This is the only matrix supported. The windowSize should be configured in the same way network wide, though it can be upgraded on a module by module basis. Vcs using the windowSize matrix will run properly if the matrices on different nodes differ since the Vc window is selected by the destination (called) side of the Vc.')
mscModVcsWinsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 213, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-BaseShelfMIB", "mscModIndex"), (0, "Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB", "mscModVcsIndex"), (0, "Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB", "mscModVcsWinsPktIndex"), (0, "Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB", "mscModVcsWinsTptIndex"))
if mibBuilder.loadTexts: mscModVcsWinsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsWinsEntry.setDescription('An entry in the mscModVcsWinsTable.')
mscModVcsWinsPktIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 213, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))).clone(namedValues=NamedValues(("n16", 0), ("n32", 1), ("n64", 2), ("n128", 3), ("n256", 4), ("n512", 5), ("n1024", 6), ("n2048", 7), ("n4096", 8), ("n8192", 9), ("n32768", 10), ("n65535", 11))))
if mibBuilder.loadTexts: mscModVcsWinsPktIndex.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsWinsPktIndex.setDescription('This variable represents the next to last index for the mscModVcsWinsTable.')
mscModVcsWinsTptIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 213, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15)))
if mibBuilder.loadTexts: mscModVcsWinsTptIndex.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsWinsTptIndex.setDescription('This variable represents the final index for the mscModVcsWinsTable.')
mscModVcsWinsValue = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 213, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 63))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscModVcsWinsValue.setStatus('mandatory')
if mibBuilder.loadTexts: mscModVcsWinsValue.setDescription('This variable represents an individual value for the mscModVcsWinsTable.')
subnetInterfaceGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45, 1))
subnetInterfaceGroupCA = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45, 1, 1))
subnetInterfaceGroupCA02 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45, 1, 1, 3))
subnetInterfaceGroupCA02A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45, 1, 1, 3, 2))
subnetInterfaceCapabilities = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45, 3))
subnetInterfaceCapabilitiesCA = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45, 3, 1))
subnetInterfaceCapabilitiesCA02 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45, 3, 1, 3))
subnetInterfaceCapabilitiesCA02A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45, 3, 1, 3, 2))
mibBuilder.exportSymbols("Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB", mscModVcsStorageType=mscModVcsStorageType, mscModVcs=mscModVcs, mscModVcsRowStatusEntry=mscModVcsRowStatusEntry, mscModVcsX121MinAddressLength=mscModVcsX121MinAddressLength, mscModVcsRowStatus=mscModVcsRowStatus, mscModVcsE164NatlMinAddressLength=mscModVcsE164NatlMinAddressLength, mscModVcsAccOptTable=mscModVcsAccOptTable, mscModVcsE164LocalAddressLength=mscModVcsE164LocalAddressLength, mscModVcsE164IntlMinAddressLength=mscModVcsE164IntlMinAddressLength, mscModVcsE164IntlMaxAddressLength=mscModVcsE164IntlMaxAddressLength, mscModVcsE164LocalMaxAddressLength=mscModVcsE164LocalMaxAddressLength, mscModVcsWinsTptIndex=mscModVcsWinsTptIndex, mscModVcsE164IntlPrefixDigits=mscModVcsE164IntlPrefixDigits, mscModVcsComponentName=mscModVcsComponentName, mscModVcsIndex=mscModVcsIndex, subnetInterfaceGroupCA=subnetInterfaceGroupCA, mscModVcsX121IntllPrefixDigit=mscModVcsX121IntllPrefixDigit, mscModVcsDelaySubnetAcks=mscModVcsDelaySubnetAcks, mscModVcsX121Type=mscModVcsX121Type, mscModVcsWinsTable=mscModVcsWinsTable, mscModVcsE164NatlPrefixDigit=mscModVcsE164NatlPrefixDigit, subnetInterfaceMIB=subnetInterfaceMIB, mscModVcsAccountingFax=mscModVcsAccountingFax, mscModVcsMaxSubnetPacketSize=mscModVcsMaxSubnetPacketSize, mscModVcsAddOptTable=mscModVcsAddOptTable, mscModVcsWinsValue=mscModVcsWinsValue, subnetInterfaceCapabilitiesCA02A=subnetInterfaceCapabilitiesCA02A, subnetInterfaceCapabilities=subnetInterfaceCapabilities, subnetInterfaceGroupCA02=subnetInterfaceGroupCA02, subnetInterfaceCapabilitiesCA=subnetInterfaceCapabilitiesCA, mscModVcsX121MaxAddressLength=mscModVcsX121MaxAddressLength, mscModVcsE164IntlFormatAllowed=mscModVcsE164IntlFormatAllowed, subnetInterfaceGroup=subnetInterfaceGroup, mscModVcsSegmentSize=mscModVcsSegmentSize, mscModVcsX121IntlAddresses=mscModVcsX121IntlAddresses, mscModVcsGenerationMode=mscModVcsGenerationMode, mscModVcsWinsEntry=mscModVcsWinsEntry, mscModVcsUnitsCounted=mscModVcsUnitsCounted, mscModVcsNetworkIdType=mscModVcsNetworkIdType, mscModVcsAccOptEntry=mscModVcsAccOptEntry, mscModVcsAddOptEntry=mscModVcsAddOptEntry, mscModVcsX121ToE164EscapeSignificance=mscModVcsX121ToE164EscapeSignificance, mscModVcsDefaultNumberingPlan=mscModVcsDefaultNumberingPlan, mscModVcsIntOptTable=mscModVcsIntOptTable, mscModVcsCallRetryTimer=mscModVcsCallRetryTimer, mscModVcsWinsPktIndex=mscModVcsWinsPktIndex, mscModVcsCallSetupTimer=mscModVcsCallSetupTimer, mscModVcsE164NatlMaxAddressLength=mscModVcsE164NatlMaxAddressLength, subnetInterfaceGroupCA02A=subnetInterfaceGroupCA02A, mscModVcsNetworkIdCode=mscModVcsNetworkIdCode, mscModVcsE164TeleCountryCode=mscModVcsE164TeleCountryCode, mscModVcsIntOptEntry=mscModVcsIntOptEntry, subnetInterfaceCapabilitiesCA02=subnetInterfaceCapabilitiesCA02, mscModVcsE164LocalMinAddressLength=mscModVcsE164LocalMinAddressLength, mscModVcsRowStatusTable=mscModVcsRowStatusTable, mscModVcsHighPriorityPacketSizes=mscModVcsHighPriorityPacketSizes)
| [] |
omniscale/svgserver | svgserver/app.py | a98f75ec9547fda25941129e854af046ba8f5dfe | import codecs
import tempfile
from contextlib import closing
from .cgi import CGIClient
from .combine import CombineSVG
from .mapserv import MapServer, InternalError
from .tree import build_tree
def _recursive_add_layer(nodes, params, svg, mapserver, translations):
for node in nodes:
group_name = format_group_name(node, translations)
svg.push_group(group_name)
if node.layer:
params["layers"] = node.layer
params["format"] = "image/svg+xml"
resp = mapserver.get(params)
if resp.headers["Content-type"] != "image/svg+xml":
raise InternalError(
"received non SVG response for layer %s:\n%s\n%s"
% (node.layer, resp.headers, resp.read())
)
svg.add(resp)
if node.subs:
_recursive_add_layer(node.subs, params, svg, mapserver, translations)
svg.pop_group()
def format_group_name(node, translations):
if isinstance(node.name, tuple):
return ', '.join(translations.get(n, n) for n in node.name)
return translations.get(node.name, node.name)
def layered_svg(params, translations={}, mapserver_binary="mapserv", root_id='map'):
mapserver = MapServer(binary=mapserver_binary)
layers = mapserver.layer_names(params)
nodes = build_tree(layers)
root_id = translations.get(root_id, root_id)
f = tempfile.TemporaryFile()
try:
with CombineSVG(f, root_id=root_id) as svg:
_recursive_add_layer(
nodes,
params=params,
svg=svg,
mapserver=mapserver,
translations=translations,
)
f.seek(0)
return f
except:
# close to remove temporary file
f.close()
raise
def load_translations(filename):
if not filename:
return {}
translations = {}
with codecs.open(filename, encoding="utf8") as f:
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
if '=' not in line:
continue
key, translation = line.split('=', 1)
translations[key.strip()] = translation.strip()
return translations
if __name__ == "__main__":
import os
import logging
logging.basicConfig(level=logging.DEBUG)
params = {
"service": "WMS",
"version": "1.1.1",
"request": "GetMap",
"width": 1234,
"height": 769,
"srs": "EPSG:3857",
"styles": "",
"format": "image/svg+xml",
"bbox": "775214.9923087133,6721788.224989068,776688.4414913012,6722705.993822992",
"map": os.path.abspath(os.path.dirname(__file__) + "/../tests/ms.map"),
}
with closing(layered_svg(params)) as f:
print(f.read())
| [((41, 8, 41, 32), 'tempfile.TemporaryFile', 'tempfile.TemporaryFile', ({}, {}), '()', False, 'import tempfile\n'), ((81, 4, 81, 44), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((65, 9, 65, 47), 'codecs.open', 'codecs.open', (), '', False, 'import codecs\n'), ((93, 31, 93, 56), 'os.path.dirname', 'os.path.dirname', ({(93, 47, 93, 55): '__file__'}, {}), '(__file__)', False, 'import os\n')] |
israillaky/ERPOSAPP11 | 11_app/script/purchase_order.py | 90dd26213fecce7f6301bfa2f2356d8f5d3a8086 | import frappe
@frappe.whitelist()
def filt_itemby_supplier(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""Select parent from `tabItem Supplier` where supplier= %s""",(filters.get("supplier")));
@frappe.whitelist()
def filteritem(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select item_code, item_name, item_group, volume, item_type,stock_uom from `tabItem`"""); | [((4, 1, 4, 19), 'frappe.whitelist', 'frappe.whitelist', ({}, {}), '()', False, 'import frappe\n'), ((8, 1, 8, 19), 'frappe.whitelist', 'frappe.whitelist', ({}, {}), '()', False, 'import frappe\n'), ((10, 11, 10, 115), 'frappe.db.sql', 'frappe.db.sql', ({(10, 25, 10, 114): '"""select item_code, item_name, item_group, volume, item_type,stock_uom from `tabItem`"""'}, {}), "(\n 'select item_code, item_name, item_group, volume, item_type,stock_uom from `tabItem`'\n )", False, 'import frappe\n')] |
duttaprat/proteinGAN | src/common/bio/smiles.py | 92b32192ab959e327e1d713d09fc9b40dc01d757 | from common.bio.constants import SMILES_CHARACTER_TO_ID, ID_TO_SMILES_CHARACTER
def from_smiles_to_id(data, column):
"""Converts sequences from smiles to ids
Args:
data: data that contains characters that need to be converted to ids
column: a column of the dataframe that contains characters that need to be converted to ids
Returns:
array of ids
"""
return [[SMILES_CHARACTER_TO_ID[char] for char in val] for index, val in data[column].iteritems()]
def from_id_from_smiles(data, column):
"""Converts sequences from ids to smiles characters
Args:
data: data that contains ids that need to be converted to characters
column: a column of the dataframe that contains ids that need to be converted to characters
Returns:
array of characters
"""
return [[ID_TO_SMILES_CHARACTER[id] for id in val] for index, val in data[column].iteritems()]
| [] |
yokoyama-flogics/ibp_monitor_2 | test/lib_config_test.py | 1a7df55a524ff3a7908df330e7e02c9f27e24ae0 | import os
import sys
import unittest
# Set Python search path to the parent directory
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from lib.config import *
class TestLibConfig(unittest.TestCase):
def test_config_noconfigfile(self):
config = BeaconConfigParser('not_exist.cfg')
with self.assertRaises(ConfigParser.NoSectionError):
config.getpath('Test', 'dbdir')
def test_config_default(self):
import os
os.environ['HOME'] = 'notexist'
config = BeaconConfigParser()
with self.assertRaises(ConfigParser.NoSectionError):
config.get('Signal', 'samplerate')
def test_config_items(self):
config = BeaconConfigParser('test_config.cfg')
self.assertEqual(config.get('Test', 'dbdir'), 'nodb')
self.assertEqual(config.getpath('Test', 'dbdir'), 'nodb')
self.assertEqual(config.getint('Signal', 'samplerate'), 16000)
if __name__ == "__main__":
unittest.main(buffer=True)
| [((29, 4, 29, 30), 'unittest.main', 'unittest.main', (), '', False, 'import unittest\n'), ((6, 29, 6, 54), 'os.path.dirname', 'os.path.dirname', ({(6, 45, 6, 53): '__file__'}, {}), '(__file__)', False, 'import os\n')] |
phdye/nimporter | tests/test_installation.py | 64eccc74950811e03efdde50649e84ca1fe87ae4 | """
Test to make sure that libraries built with Nimporter can be installed via Pip.
"""
import sys, os, subprocess, shutil, pkg_resources, json, warnings
from pathlib import Path
import pytest
import nimporter
PYTHON = 'python' if sys.platform == 'win32' else 'python3'
PIP = 'pip' if shutil.which('pip') else 'pip3'
@pytest.mark.integration_test
def test_ensure_nimporter_installed():
"Make sure that Nimporter is installed before running integration tests."
libs = {lib.key.lower() for lib in pkg_resources.working_set}
assert 'nimporter' in libs, (
f'Nimporter is not installed. Please install via:'
f'`{PIP} install .` before running the integration tests.'
)
@pytest.mark.integration_test
def test_create_sdist():
"Test the successful creation of a source distribution."
with nimporter.cd('tests/proj1'):
subprocess.Popen(f'{PYTHON} setup.py sdist'.split()).wait()
dist = Path('dist')
egg = Path('project1.egg-info')
try:
assert dist.exists()
assert egg.exists()
targets = list(dist.glob('project1*'))
assert len(targets) == 1
assert targets[0].exists()
# Make sure the appropriate compiler is being used
for extension in Path('nim-extensions').iterdir():
(nim_build_data_file,) = extension.glob('*json')
nim_build_data = json.loads(nim_build_data_file.read_text())
expected = nimporter.NimCompiler.get_compatible_compiler()
installed_ccs = nimporter.NimCompiler.get_installed_compilers()
if not expected:
warnings.warn(
f'No compatible C compiler installed: {installed_ccs}'
)
else:
cc_path = installed_ccs[expected]
actual = nim_build_data['linkcmd'].split()[0].strip()
if not actual.startswith(cc_path.stem):
warnings.warn(
f'Nim used a different C compiler than what Python '
f'expects. Python uses {cc_path.stem} and Nim used '
f'{actual}'
)
finally:
shutil.rmtree(str(dist.absolute()))
shutil.rmtree(str(egg.absolute()))
@pytest.mark.integration_test
def test_create_bdist():
"Test the successful create of a wheel."
with nimporter.cd('tests/proj1'):
subprocess.Popen(f'{PYTHON} setup.py bdist_wheel'.split()).wait()
dist = Path('dist')
build = Path('build')
egg = Path('project1.egg-info')
try:
assert dist.exists()
assert build.exists()
assert egg.exists()
targets = list(Path('dist').glob('project1*.whl'))
assert len(targets) == 1
assert targets[0].exists()
# Make sure the appropriate compiler is being used
for extension in Path('nim-extensions').iterdir():
(nim_build_data_file,) = extension.glob('*json')
nim_build_data = json.loads(nim_build_data_file.read_text())
expected = nimporter.NimCompiler.get_compatible_compiler()
installed_ccs = nimporter.NimCompiler.get_installed_compilers()
if not expected:
warnings.warn(
f'No compatible C compiler installed: {installed_ccs}'
)
else:
cc_path = installed_ccs[expected]
actual = nim_build_data['linkcmd'].split()[0].strip()
if not actual.startswith(cc_path.stem):
warnings.warn(
f'Nim used a different C compiler than what Python '
f'expects. Python uses {cc_path.stem} and Nim used '
f'{actual}'
)
finally:
shutil.rmtree(str(dist.absolute()))
shutil.rmtree(str(build.absolute()))
shutil.rmtree(str(egg.absolute()))
@pytest.mark.slow_integration_test
def test_install_sdist():
"Make sure that the project can be installed by Pip"
with nimporter.cd('tests/proj1'):
subprocess.Popen(f'{PYTHON} setup.py sdist'.split()).wait()
dist = Path('dist')
egg = Path('project1.egg-info')
try:
assert dist.exists()
assert egg.exists()
targets = list(dist.glob('project1*'))
assert len(targets) == 1
(target,) = targets
assert target.exists()
subprocess.Popen(f'{PIP} install {target}'.split()).wait()
finally:
shutil.rmtree(str(dist.absolute()))
shutil.rmtree(str(egg.absolute()))
# Make sure that `tests/proj1` is not imported as a SimpleNamespace and that
# the installed library in `site-packages` is used.
with nimporter.cd('../..'):
try:
import proj1
assert proj1
import proj1.performance
assert proj1.performance
import proj1.lib1
assert proj1.lib1
assert proj1.foo
assert proj1.bar
assert proj1.baz
assert proj1.baz() == 1
except Exception as e:
warnings.warn(str(e))
# Cannot delete a DLL in use by another process on Windows
if sys.platform != 'win32':
subprocess.Popen(f'{PIP} uninstall project1 -y'.split()).wait()
@pytest.mark.slow_integration_test
def test_install_bdist():
"Make sure that the wheel can be installed by Pip"
with nimporter.cd('tests/proj1'):
subprocess.Popen(f'{PYTHON} setup.py bdist_wheel'.split()).wait()
dist = Path('dist')
build = Path('build')
egg = Path('project1.egg-info')
try:
assert dist.exists()
assert build.exists()
assert egg.exists()
targets = list(Path('dist').glob('project1*.whl'))
assert len(targets) == 1
wheel = targets[0]
assert wheel.exists()
subprocess.Popen(f'{PIP} install {wheel}'.split()).wait()
finally:
shutil.rmtree(str(dist.absolute()))
shutil.rmtree(str(build.absolute()))
shutil.rmtree(str(egg.absolute()))
# Make sure that `tests/proj1` is not imported as a SimpleNamespace and that
# the installed library in `site-packages` is used.
with nimporter.cd('../..'):
try:
import proj1
assert proj1
import proj1.performance
assert proj1.performance
import proj1.lib1
assert proj1.lib1
assert proj1.foo
assert proj1.bar
assert proj1.baz
assert proj1.baz() == 1
except Exception as e:
warnings.warn(str(e))
# Cannot delete a DLL in use by another process on Windows
if sys.platform != 'win32':
subprocess.Popen(f'{PIP} uninstall project1 -y'.split()).wait()
| [((11, 15, 11, 34), 'shutil.which', 'shutil.which', ({(11, 28, 11, 33): '"""pip"""'}, {}), "('pip')", False, 'import sys, os, subprocess, shutil, pkg_resources, json, warnings\n'), ((25, 9, 25, 36), 'nimporter.cd', 'nimporter.cd', ({(25, 22, 25, 35): '"""tests/proj1"""'}, {}), "('tests/proj1')", False, 'import nimporter\n'), ((27, 15, 27, 27), 'pathlib.Path', 'Path', ({(27, 20, 27, 26): '"""dist"""'}, {}), "('dist')", False, 'from pathlib import Path\n'), ((28, 14, 28, 39), 'pathlib.Path', 'Path', ({(28, 19, 28, 38): '"""project1.egg-info"""'}, {}), "('project1.egg-info')", False, 'from pathlib import Path\n'), ((63, 9, 63, 36), 'nimporter.cd', 'nimporter.cd', ({(63, 22, 63, 35): '"""tests/proj1"""'}, {}), "('tests/proj1')", False, 'import nimporter\n'), ((65, 15, 65, 27), 'pathlib.Path', 'Path', ({(65, 20, 65, 26): '"""dist"""'}, {}), "('dist')", False, 'from pathlib import Path\n'), ((66, 16, 66, 29), 'pathlib.Path', 'Path', ({(66, 21, 66, 28): '"""build"""'}, {}), "('build')", False, 'from pathlib import Path\n'), ((67, 14, 67, 39), 'pathlib.Path', 'Path', ({(67, 19, 67, 38): '"""project1.egg-info"""'}, {}), "('project1.egg-info')", False, 'from pathlib import Path\n'), ((104, 9, 104, 36), 'nimporter.cd', 'nimporter.cd', ({(104, 22, 104, 35): '"""tests/proj1"""'}, {}), "('tests/proj1')", False, 'import nimporter\n'), ((106, 15, 106, 27), 'pathlib.Path', 'Path', ({(106, 20, 106, 26): '"""dist"""'}, {}), "('dist')", False, 'from pathlib import Path\n'), ((107, 14, 107, 39), 'pathlib.Path', 'Path', ({(107, 19, 107, 38): '"""project1.egg-info"""'}, {}), "('project1.egg-info')", False, 'from pathlib import Path\n'), ((122, 9, 122, 30), 'nimporter.cd', 'nimporter.cd', ({(122, 22, 122, 29): '"""../.."""'}, {}), "('../..')", False, 'import nimporter\n'), ((145, 9, 145, 36), 'nimporter.cd', 'nimporter.cd', ({(145, 22, 145, 35): '"""tests/proj1"""'}, {}), "('tests/proj1')", False, 'import nimporter\n'), ((147, 15, 147, 27), 'pathlib.Path', 'Path', ({(147, 20, 147, 26): '"""dist"""'}, {}), "('dist')", False, 'from pathlib import Path\n'), ((148, 16, 148, 29), 'pathlib.Path', 'Path', ({(148, 21, 148, 28): '"""build"""'}, {}), "('build')", False, 'from pathlib import Path\n'), ((149, 14, 149, 39), 'pathlib.Path', 'Path', ({(149, 19, 149, 38): '"""project1.egg-info"""'}, {}), "('project1.egg-info')", False, 'from pathlib import Path\n'), ((167, 9, 167, 30), 'nimporter.cd', 'nimporter.cd', ({(167, 22, 167, 29): '"""../.."""'}, {}), "('../..')", False, 'import nimporter\n'), ((40, 27, 40, 74), 'nimporter.NimCompiler.get_compatible_compiler', 'nimporter.NimCompiler.get_compatible_compiler', ({}, {}), '()', False, 'import nimporter\n'), ((41, 32, 41, 79), 'nimporter.NimCompiler.get_installed_compilers', 'nimporter.NimCompiler.get_installed_compilers', ({}, {}), '()', False, 'import nimporter\n'), ((80, 27, 80, 74), 'nimporter.NimCompiler.get_compatible_compiler', 'nimporter.NimCompiler.get_compatible_compiler', ({}, {}), '()', False, 'import nimporter\n'), ((81, 32, 81, 79), 'nimporter.NimCompiler.get_installed_compilers', 'nimporter.NimCompiler.get_installed_compilers', ({}, {}), '()', False, 'import nimporter\n'), ((133, 19, 133, 30), 'proj1.baz', 'proj1.baz', ({}, {}), '()', False, 'import proj1\n'), ((178, 19, 178, 30), 'proj1.baz', 'proj1.baz', ({}, {}), '()', False, 'import proj1\n'), ((37, 29, 37, 51), 'pathlib.Path', 'Path', ({(37, 34, 37, 50): '"""nim-extensions"""'}, {}), "('nim-extensions')", False, 'from pathlib import Path\n'), ((43, 20, 45, 21), 'warnings.warn', 'warnings.warn', ({(44, 24, 44, 78): 'f"""No compatible C compiler installed: {installed_ccs}"""'}, {}), "(f'No compatible C compiler installed: {installed_ccs}')", False, 'import sys, os, subprocess, shutil, pkg_resources, json, warnings\n'), ((77, 29, 77, 51), 'pathlib.Path', 'Path', ({(77, 34, 77, 50): '"""nim-extensions"""'}, {}), "('nim-extensions')", False, 'from pathlib import Path\n'), ((83, 20, 85, 21), 'warnings.warn', 'warnings.warn', ({(84, 24, 84, 78): 'f"""No compatible C compiler installed: {installed_ccs}"""'}, {}), "(f'No compatible C compiler installed: {installed_ccs}')", False, 'import sys, os, subprocess, shutil, pkg_resources, json, warnings\n'), ((50, 24, 54, 25), 'warnings.warn', 'warnings.warn', ({(51, 28, 53, 39): 'f"""Nim used a different C compiler than what Python expects. Python uses {cc_path.stem} and Nim used {actual}"""'}, {}), "(\n f'Nim used a different C compiler than what Python expects. Python uses {cc_path.stem} and Nim used {actual}'\n )", False, 'import sys, os, subprocess, shutil, pkg_resources, json, warnings\n'), ((72, 27, 72, 39), 'pathlib.Path', 'Path', ({(72, 32, 72, 38): '"""dist"""'}, {}), "('dist')", False, 'from pathlib import Path\n'), ((90, 24, 94, 25), 'warnings.warn', 'warnings.warn', ({(91, 28, 93, 39): 'f"""Nim used a different C compiler than what Python expects. Python uses {cc_path.stem} and Nim used {actual}"""'}, {}), "(\n f'Nim used a different C compiler than what Python expects. Python uses {cc_path.stem} and Nim used {actual}'\n )", False, 'import sys, os, subprocess, shutil, pkg_resources, json, warnings\n'), ((154, 27, 154, 39), 'pathlib.Path', 'Path', ({(154, 32, 154, 38): '"""dist"""'}, {}), "('dist')", False, 'from pathlib import Path\n')] |
bvanaken/pytorch-pretrained-BERT | hotpot_sample_dict.py | 71c1660fb082fa5ebde4afd8c7db2bc96b80bb59 | samples = {
"2_brother_plays": {
"question_parts": [range(1, 13), range(13, 17)],
"sp_parts": [range(20, 43), range(50, 60)]
}
}
| [] |
alexander-sidorov/tms-z43 | src/applications/blog/migrations/0003_post_author.py | 61ecd204f5de4e97ff0300f6ef91c36c2bcda31c | # Generated by Django 3.1.7 on 2021-03-24 17:41
import django.db.models.deletion
from django.conf import settings
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("blog", "0002_auto_20210323_1834"),
]
operations = [
migrations.AddField(
model_name="post",
name="author",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
]
| [((12, 8, 12, 65), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', ({(12, 40, 12, 64): 'settings.AUTH_USER_MODEL'}, {}), '(settings.AUTH_USER_MODEL)', False, 'from django.db import migrations\n'), ((20, 18, 25, 13), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import models\n')] |
mdop-wh/pulumi-aws | sdk/python/pulumi_aws/cloudformation/stack_set.py | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = ['StackSet']
class StackSet(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
administration_role_arn: Optional[pulumi.Input[str]] = None,
capabilities: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
execution_role_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
template_body: Optional[pulumi.Input[str]] = None,
template_url: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Manages a CloudFormation StackSet. StackSets allow CloudFormation templates to be easily deployed across multiple accounts and regions via StackSet Instances (`cloudformation.StackSetInstance` resource). Additional information about StackSets can be found in the [AWS CloudFormation User Guide](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/what-is-cfnstacksets.html).
> **NOTE:** All template parameters, including those with a `Default`, must be configured or ignored with the `lifecycle` configuration block `ignore_changes` argument.
> **NOTE:** All `NoEcho` template parameters must be ignored with the `lifecycle` configuration block `ignore_changes` argument.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
a_ws_cloud_formation_stack_set_administration_role_assume_role_policy = aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs(
actions=["sts:AssumeRole"],
effect="Allow",
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
identifiers=["cloudformation.amazonaws.com"],
type="Service",
)],
)])
a_ws_cloud_formation_stack_set_administration_role = aws.iam.Role("aWSCloudFormationStackSetAdministrationRole", assume_role_policy=a_ws_cloud_formation_stack_set_administration_role_assume_role_policy.json)
example = aws.cloudformation.StackSet("example",
administration_role_arn=a_ws_cloud_formation_stack_set_administration_role.arn,
parameters={
"VPCCidr": "10.0.0.0/16",
},
template_body=\"\"\"{
"Parameters" : {
"VPCCidr" : {
"Type" : "String",
"Default" : "10.0.0.0/16",
"Description" : "Enter the CIDR block for the VPC. Default is 10.0.0.0/16."
}
},
"Resources" : {
"myVpc": {
"Type" : "AWS::EC2::VPC",
"Properties" : {
"CidrBlock" : { "Ref" : "VPCCidr" },
"Tags" : [
{"Key": "Name", "Value": "Primary_CF_VPC"}
]
}
}
}
}
\"\"\")
a_ws_cloud_formation_stack_set_administration_role_execution_policy_policy_document = example.execution_role_name.apply(lambda execution_role_name: aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs(
actions=["sts:AssumeRole"],
effect="Allow",
resources=[f"arn:aws:iam::*:role/{execution_role_name}"],
)]))
a_ws_cloud_formation_stack_set_administration_role_execution_policy_role_policy = aws.iam.RolePolicy("aWSCloudFormationStackSetAdministrationRoleExecutionPolicyRolePolicy",
policy=a_ws_cloud_formation_stack_set_administration_role_execution_policy_policy_document.json,
role=a_ws_cloud_formation_stack_set_administration_role.name)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] administration_role_arn: Amazon Resource Number (ARN) of the IAM Role in the administrator account.
:param pulumi.Input[List[pulumi.Input[str]]] capabilities: A list of capabilities. Valid values: `CAPABILITY_IAM`, `CAPABILITY_NAMED_IAM`, `CAPABILITY_AUTO_EXPAND`.
:param pulumi.Input[str] description: Description of the StackSet.
:param pulumi.Input[str] execution_role_name: Name of the IAM Role in all target accounts for StackSet operations. Defaults to `AWSCloudFormationStackSetExecutionRole`.
:param pulumi.Input[str] name: Name of the StackSet. The name must be unique in the region where you create your StackSet. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphabetic character and cannot be longer than 128 characters.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: Key-value map of input parameters for the StackSet template. All template parameters, including those with a `Default`, must be configured or ignored with `lifecycle` configuration block `ignore_changes` argument. All `NoEcho` template parameters must be ignored with the `lifecycle` configuration block `ignore_changes` argument.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of tags to associate with this StackSet and the Stacks created from it. AWS CloudFormation also propagates these tags to supported resources that are created in the Stacks. A maximum number of 50 tags can be specified.
:param pulumi.Input[str] template_body: String containing the CloudFormation template body. Maximum size: 51,200 bytes. Conflicts with `template_url`.
:param pulumi.Input[str] template_url: String containing the location of a file containing the CloudFormation template body. The URL must point to a template that is located in an Amazon S3 bucket. Maximum location file size: 460,800 bytes. Conflicts with `template_body`.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if administration_role_arn is None:
raise TypeError("Missing required property 'administration_role_arn'")
__props__['administration_role_arn'] = administration_role_arn
__props__['capabilities'] = capabilities
__props__['description'] = description
__props__['execution_role_name'] = execution_role_name
__props__['name'] = name
__props__['parameters'] = parameters
__props__['tags'] = tags
__props__['template_body'] = template_body
__props__['template_url'] = template_url
__props__['arn'] = None
__props__['stack_set_id'] = None
super(StackSet, __self__).__init__(
'aws:cloudformation/stackSet:StackSet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
administration_role_arn: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
capabilities: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
execution_role_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
stack_set_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
template_body: Optional[pulumi.Input[str]] = None,
template_url: Optional[pulumi.Input[str]] = None) -> 'StackSet':
"""
Get an existing StackSet resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] administration_role_arn: Amazon Resource Number (ARN) of the IAM Role in the administrator account.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN) of the StackSet.
:param pulumi.Input[List[pulumi.Input[str]]] capabilities: A list of capabilities. Valid values: `CAPABILITY_IAM`, `CAPABILITY_NAMED_IAM`, `CAPABILITY_AUTO_EXPAND`.
:param pulumi.Input[str] description: Description of the StackSet.
:param pulumi.Input[str] execution_role_name: Name of the IAM Role in all target accounts for StackSet operations. Defaults to `AWSCloudFormationStackSetExecutionRole`.
:param pulumi.Input[str] name: Name of the StackSet. The name must be unique in the region where you create your StackSet. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphabetic character and cannot be longer than 128 characters.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: Key-value map of input parameters for the StackSet template. All template parameters, including those with a `Default`, must be configured or ignored with `lifecycle` configuration block `ignore_changes` argument. All `NoEcho` template parameters must be ignored with the `lifecycle` configuration block `ignore_changes` argument.
:param pulumi.Input[str] stack_set_id: Unique identifier of the StackSet.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of tags to associate with this StackSet and the Stacks created from it. AWS CloudFormation also propagates these tags to supported resources that are created in the Stacks. A maximum number of 50 tags can be specified.
:param pulumi.Input[str] template_body: String containing the CloudFormation template body. Maximum size: 51,200 bytes. Conflicts with `template_url`.
:param pulumi.Input[str] template_url: String containing the location of a file containing the CloudFormation template body. The URL must point to a template that is located in an Amazon S3 bucket. Maximum location file size: 460,800 bytes. Conflicts with `template_body`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["administration_role_arn"] = administration_role_arn
__props__["arn"] = arn
__props__["capabilities"] = capabilities
__props__["description"] = description
__props__["execution_role_name"] = execution_role_name
__props__["name"] = name
__props__["parameters"] = parameters
__props__["stack_set_id"] = stack_set_id
__props__["tags"] = tags
__props__["template_body"] = template_body
__props__["template_url"] = template_url
return StackSet(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="administrationRoleArn")
def administration_role_arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Number (ARN) of the IAM Role in the administrator account.
"""
return pulumi.get(self, "administration_role_arn")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN) of the StackSet.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def capabilities(self) -> pulumi.Output[Optional[List[str]]]:
"""
A list of capabilities. Valid values: `CAPABILITY_IAM`, `CAPABILITY_NAMED_IAM`, `CAPABILITY_AUTO_EXPAND`.
"""
return pulumi.get(self, "capabilities")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the StackSet.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="executionRoleName")
def execution_role_name(self) -> pulumi.Output[Optional[str]]:
"""
Name of the IAM Role in all target accounts for StackSet operations. Defaults to `AWSCloudFormationStackSetExecutionRole`.
"""
return pulumi.get(self, "execution_role_name")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the StackSet. The name must be unique in the region where you create your StackSet. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphabetic character and cannot be longer than 128 characters.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parameters(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-value map of input parameters for the StackSet template. All template parameters, including those with a `Default`, must be configured or ignored with `lifecycle` configuration block `ignore_changes` argument. All `NoEcho` template parameters must be ignored with the `lifecycle` configuration block `ignore_changes` argument.
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter(name="stackSetId")
def stack_set_id(self) -> pulumi.Output[str]:
"""
Unique identifier of the StackSet.
"""
return pulumi.get(self, "stack_set_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-value map of tags to associate with this StackSet and the Stacks created from it. AWS CloudFormation also propagates these tags to supported resources that are created in the Stacks. A maximum number of 50 tags can be specified.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="templateBody")
def template_body(self) -> pulumi.Output[str]:
"""
String containing the CloudFormation template body. Maximum size: 51,200 bytes. Conflicts with `template_url`.
"""
return pulumi.get(self, "template_body")
@property
@pulumi.getter(name="templateUrl")
def template_url(self) -> pulumi.Output[Optional[str]]:
"""
String containing the location of a file containing the CloudFormation template body. The URL must point to a template that is located in an Amazon S3 bucket. Maximum location file size: 460,800 bytes. Conflicts with `template_body`.
"""
return pulumi.get(self, "template_url")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [((188, 5, 188, 48), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((220, 5, 220, 44), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((244, 5, 244, 37), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((260, 5, 260, 39), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((268, 5, 268, 38), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((193, 15, 193, 58), 'pulumi.get', 'pulumi.get', ({(193, 26, 193, 30): 'self', (193, 32, 193, 57): '"""administration_role_arn"""'}, {}), "(self, 'administration_role_arn')", False, 'import pulumi\n'), ((201, 15, 201, 38), 'pulumi.get', 'pulumi.get', ({(201, 26, 201, 30): 'self', (201, 32, 201, 37): '"""arn"""'}, {}), "(self, 'arn')", False, 'import pulumi\n'), ((209, 15, 209, 47), 'pulumi.get', 'pulumi.get', ({(209, 26, 209, 30): 'self', (209, 32, 209, 46): '"""capabilities"""'}, {}), "(self, 'capabilities')", False, 'import pulumi\n'), ((217, 15, 217, 46), 'pulumi.get', 'pulumi.get', ({(217, 26, 217, 30): 'self', (217, 32, 217, 45): '"""description"""'}, {}), "(self, 'description')", False, 'import pulumi\n'), ((225, 15, 225, 54), 'pulumi.get', 'pulumi.get', ({(225, 26, 225, 30): 'self', (225, 32, 225, 53): '"""execution_role_name"""'}, {}), "(self, 'execution_role_name')", False, 'import pulumi\n'), ((233, 15, 233, 39), 'pulumi.get', 'pulumi.get', ({(233, 26, 233, 30): 'self', (233, 32, 233, 38): '"""name"""'}, {}), "(self, 'name')", False, 'import pulumi\n'), ((241, 15, 241, 45), 'pulumi.get', 'pulumi.get', ({(241, 26, 241, 30): 'self', (241, 32, 241, 44): '"""parameters"""'}, {}), "(self, 'parameters')", False, 'import pulumi\n'), ((249, 15, 249, 47), 'pulumi.get', 'pulumi.get', ({(249, 26, 249, 30): 'self', (249, 32, 249, 46): '"""stack_set_id"""'}, {}), "(self, 'stack_set_id')", False, 'import pulumi\n'), ((257, 15, 257, 39), 'pulumi.get', 'pulumi.get', ({(257, 26, 257, 30): 'self', (257, 32, 257, 38): '"""tags"""'}, {}), "(self, 'tags')", False, 'import pulumi\n'), ((265, 15, 265, 48), 'pulumi.get', 'pulumi.get', ({(265, 26, 265, 30): 'self', (265, 32, 265, 47): '"""template_body"""'}, {}), "(self, 'template_body')", False, 'import pulumi\n'), ((273, 15, 273, 47), 'pulumi.get', 'pulumi.get', ({(273, 26, 273, 30): 'self', (273, 32, 273, 46): '"""template_url"""'}, {}), "(self, 'template_url')", False, 'import pulumi\n'), ((101, 12, 101, 87), 'warnings.warn', 'warnings.warn', ({(101, 26, 101, 66): '"""explicit use of __name__ is deprecated"""', (101, 68, 101, 86): 'DeprecationWarning'}, {}), "('explicit use of __name__ is deprecated', DeprecationWarning)", False, 'import warnings\n'), ((104, 12, 104, 107), 'warnings.warn', 'warnings.warn', ({(104, 26, 104, 86): '"""explicit use of __opts__ is deprecated, use \'opts\' instead"""', (104, 88, 104, 106): 'DeprecationWarning'}, {}), '("explicit use of __opts__ is deprecated, use \'opts\' instead",\n DeprecationWarning)', False, 'import warnings\n'), ((107, 19, 107, 43), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ({}, {}), '()', False, 'import pulumi\n'), ((170, 50, 170, 79), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', (), '', False, 'import pulumi\n')] |
farioso-fernando/cover-meu-beat | code/config/imports.py | b15a9c0c97086e51e42cee4dd40e7d0650130d0e | from kivy.uix.screenmanager import ScreenManager
from kivy.uix.boxlayout import BoxLayout
from kivy.lang.builder import Builder
from kivy.animation import Animation
from kivy.core.window import Window
from kivymd.app import MDApp
import kivymd
import kivy
print(
)
def version():
kivy.require('2.0.0')
print(
) | [((15, 4, 15, 25), 'kivy.require', 'kivy.require', ({(15, 17, 15, 24): '"""2.0.0"""'}, {}), "('2.0.0')", False, 'import kivy\n')] |
kwalberg/claripy | claripy/vsa/valueset.py | b5cfa0a355eaa3cd5403e1d81f0b80bb3db20c90 | import functools
import itertools
import numbers
from ..backend_object import BackendObject
from ..annotation import Annotation
def normalize_types_two_args(f):
@functools.wraps(f)
def normalizer(self, region, o):
"""
Convert any object to an object that we can process.
"""
if isinstance(o, Base):
raise ClaripyValueError("BoolResult can't handle AST objects directly")
if not isinstance(o, StridedInterval):
raise ClaripyVSAOperationError('Unsupported operand type %s' % type(o))
return f(self, region, o)
return normalizer
def normalize_types_one_arg(f):
@functools.wraps(f)
def normalizer(self, o):
"""
Convert any object to an object that we can process.
"""
if isinstance(o, Base):
raise ClaripyValueError("BoolResult can't handle AST objects directly")
return f(self, o)
return normalizer
vs_id_ctr = itertools.count()
class RegionAnnotation(Annotation):
"""
Use RegionAnnotation to annotate ASTs. Normally, an AST annotated by RegionAnnotations is treated as a ValueSet.
Note that Annotation objects are immutable. Do not change properties of an Annotation object without creating a new
one.
"""
def __init__(self, region_id, region_base_addr, offset):
self.region_id = region_id
self.region_base_addr = region_base_addr
self.offset = offset
# Do necessary conversion here
if isinstance(self.region_base_addr, Base):
self.region_base_addr = self.region_base_addr._model_vsa
if isinstance(self.offset, Base):
self.offset = self.offset._model_vsa
@property
def eliminatable(self):
"""
A Region annotation is not eliminatable in simplifications.
:return: False
:rtype: bool
"""
return False
@property
def relocatable(self):
"""
A Region annotation is not relocatable in simplifications.
:return: False
:rtype: bool
"""
return False
#
# Public methods
#
def relocate(self, src, dst):
"""
Override Annotation.relocate().
:param src: The old AST
:param dst: The new AST, as the result of a simplification
:return: The new annotation that should be applied on the new AST
"""
raise ClaripyVSAError('RegionAnnotation is not relocatable')
#
# Overriding base methods
#
def __hash__(self):
return hash((self.region_id, self.region_base_addr, hash(self.offset)))
def __repr__(self):
return "<RegionAnnotation %s:%#08x>" % (self.region_id, self.offset)
class ValueSet(BackendObject):
"""
ValueSet is a mapping between memory regions and corresponding offsets.
"""
def __init__(self, name=None, region=None, region_base_addr=None, bits=None, val=None):
"""
Constructor.
:param str name: Name of this ValueSet object. Only for debugging purposes.
:param str region: Region ID.
:param int region_base_addr: Base address of the region.
:param int bits: Size of the ValueSet.
:param val: an initial offset
"""
self._name = 'VS_%d' % next(vs_id_ctr) if name is None else name
if bits is None:
raise ClaripyVSAError('bits must be specified when creating a ValueSet.')
self._bits = bits
self._si = StridedInterval.empty(bits)
self._regions = {}
self._region_base_addrs = {}
self._reversed = False
# Shortcuts for initialization
# May not be useful though...
if region is not None and region_base_addr is not None and val is not None:
if isinstance(region_base_addr, numbers.Number):
# Convert it to a StridedInterval
region_base_addr = StridedInterval(bits=self._bits, stride=1,
lower_bound=region_base_addr,
upper_bound=region_base_addr)
if isinstance(val, numbers.Number):
val = StridedInterval(bits=bits, stride=0, lower_bound=val, upper_bound=val)
if isinstance(val, StridedInterval):
self._set_si(region, region_base_addr, val)
else:
raise ClaripyVSAError("Unsupported type '%s' for argument 'val'" % type(val))
else:
if region is not None or val is not None:
raise ClaripyVSAError("You must specify 'region' and 'val' at the same time.")
#
# Properties
#
@property
def name(self):
return self._name
@property
def bits(self):
return self._bits
@property
def regions(self):
return self._regions
@property
def reversed(self):
return self._reversed
@property
def unique(self):
return len(self.regions) == 1 and self.regions.values()[0].unique
@property
def cardinality(self):
card = 0
for region in self._regions:
card += self._regions[region].cardinality
return card
@property
def is_empty(self):
return len(self._regions) == 0
@property
def valueset(self):
return self
#
# Private methods
#
def _set_si(self, region, region_base_addr, si):
if isinstance(si, numbers.Number):
si = StridedInterval(bits=self.bits, stride=0, lower_bound=si, upper_bound=si)
if isinstance(region_base_addr, numbers.Number):
region_base_addr = StridedInterval(bits=self.bits, stride=0, lower_bound=region_base_addr,
upper_bound=region_base_addr
)
if not isinstance(si, StridedInterval):
raise ClaripyVSAOperationError('Unsupported type %s for si' % type(si))
self._regions[region] = si
self._region_base_addrs[region] = region_base_addr
self._si = self._si.union(region_base_addr + si)
def _merge_si(self, region, region_base_addr, si):
if isinstance(region_base_addr, numbers.Number):
region_base_addr = StridedInterval(bits=self.bits, stride=0, lower_bound=region_base_addr,
upper_bound=region_base_addr
)
if region not in self._regions:
self._set_si(region, region_base_addr, si)
else:
self._regions[region] = self._regions[region].union(si)
self._region_base_addrs[region] = self._region_base_addrs[region].union(region_base_addr)
self._si = self._si.union(region_base_addr + si)
#
# Public methods
#
@staticmethod
def empty(bits):
return ValueSet(bits=bits)
def items(self):
return self._regions.items()
def size(self):
return len(self)
def copy(self):
"""
Make a copy of self and return.
:return: A new ValueSet object.
:rtype: ValueSet
"""
vs = ValueSet(bits=self.bits)
vs._regions = self._regions.copy()
vs._region_base_addrs = self._region_base_addrs.copy()
vs._reversed = self._reversed
vs._si = self._si.copy()
return vs
def get_si(self, region):
if region in self._regions:
return self._regions[region]
# TODO: Should we return a None, or an empty SI instead?
return None
def stridedinterval(self):
return self._si
def apply_annotation(self, annotation):
"""
Apply a new annotation onto self, and return a new ValueSet object.
:param RegionAnnotation annotation: The annotation to apply.
:return: A new ValueSet object
:rtype: ValueSet
"""
vs = self.copy()
vs._merge_si(annotation.region_id, annotation.region_base_addr, annotation.offset)
return vs
def __repr__(self):
s = ""
for region, si in self._regions.items():
s = "%s: %s" % (region, si)
return "(" + s + ")"
def __len__(self):
return self._bits
def __hash__(self):
return hash(tuple((r, hash(self._regions[r])) for r in self._regions))
#
# Arithmetic operations
#
@normalize_types_one_arg
def __add__(self, other):
"""
Binary operation: addition
Note that even if "other" is a ValueSet object. we still treat it as a StridedInterval. Adding two ValueSets
together does not make sense (which is essentially adding two pointers together).
:param StridedInterval other: The other operand.
:return: A new ValueSet object
:rtype: ValueSet
"""
new_vs = ValueSet(bits=self.bits)
# Call __add__ on self._si
new_vs._si = self._si.__add__(other)
for region in self._regions:
new_vs._regions[region] = self._regions[region] + other
return new_vs
@normalize_types_one_arg
def __radd__(self, other):
return self.__add__(other)
@normalize_types_one_arg
def __sub__(self, other):
"""
Binary operation: subtraction
:param other: The other operand
:return: A StridedInterval or a ValueSet.
"""
deltas = [ ]
# TODO: Handle more cases
if isinstance(other, ValueSet):
# A subtraction between two ValueSets produces a StridedInterval
if self.regions.keys() == other.regions.keys():
for region in self._regions:
deltas.append(self._regions[region] - other._regions[region])
else:
# TODO: raise the proper exception here
raise NotImplementedError()
delta = StridedInterval.empty(self.bits)
for d in deltas:
delta = delta.union(d)
return delta
else:
# A subtraction between a ValueSet and a StridedInterval produces another ValueSet
new_vs = self.copy()
# Call __sub__ on the base class
new_vs._si = self._si.__sub__(other)
for region, si in new_vs._regions.items():
new_vs._regions[region] = si - other
return new_vs
@normalize_types_one_arg
def __and__(self, other):
"""
Binary operation: and
Note that even if `other` is a ValueSet object, it will be treated as a StridedInterval as well. Doing & between
two pointers that are not the same do not make sense.
:param other: The other operand
:return: A ValueSet as the result
:rtype: ValueSet
"""
if type(other) is ValueSet:
# The only case where calling & between two points makes sense
if self.identical(other):
return self.copy()
if BoolResult.is_true(other == 0):
# Corner case: a & 0 = 0
return StridedInterval(bits=self.bits, stride=0, lower_bound=0, upper_bound=0)
if BoolResult.is_true(other < 0x100):
# Special case - sometimes (addr & mask) is used for testing whether the address is aligned or not
# We return a StridedInterval instead
ret = None
for region, si in self._regions.items():
r = si.__and__(other)
ret = r if ret is None else ret.union(r)
return ret
else:
# We should return a ValueSet here
new_vs = self.copy()
for region, si in self._regions.items():
r = si.__and__(other)
new_vs._regions[region] = r
return new_vs
def __eq__(self, other):
"""
Binary operation: ==
:param other: The other operand
:return: True/False/Maybe
"""
if isinstance(other, ValueSet):
same = False
different = False
for region, si in other.regions.items():
if region in self.regions:
comp_ret = self.regions[region] == si
if BoolResult.has_true(comp_ret):
same = True
if BoolResult.has_false(comp_ret):
different = True
else:
different = True
if same and not different:
return TrueResult()
if same and different:
return MaybeResult()
return FalseResult()
elif isinstance(other, StridedInterval):
if 'global' in self.regions:
return self.regions['global'] == other
else:
return FalseResult()
else:
return FalseResult()
def __ne__(self, other):
"""
Binary operation: ==
:param other: The other operand
:return: True/False/Maybe
"""
return ~ (self == other)
#
# Backend operations
#
def eval(self, n, signed=False):
if signed:
# How are you going to deal with a negative pointer?
raise ClaripyVSAOperationError('`signed` cannot be True when calling ValueSet.eval().')
results = []
for _, si in self._regions.items():
if len(results) < n:
results.extend(si.eval(n))
return results
@property
def min(self):
"""
The minimum integer value of a value-set. It is only defined when there is exactly one region.
:return: A integer that represents the minimum integer value of this value-set.
:rtype: int
"""
if len(self.regions) != 1:
raise ClaripyVSAOperationError("'min()' onlly works on single-region value-sets.")
return self.get_si(next(iter(self.regions))).min
@property
def max(self):
"""
The maximum integer value of a value-set. It is only defined when there is exactly one region.
:return: A integer that represents the maximum integer value of this value-set.
:rtype: int
"""
if len(self.regions) != 1:
raise ClaripyVSAOperationError("'max()' onlly works on single-region value-sets.")
return self.get_si(next(iter(self.regions))).max
def reverse(self):
# TODO: obviously valueset.reverse is not properly implemented. I'm disabling the old annoying output line for
# TODO: now. I will implement the proper reversing support soon.
vs = self.copy()
vs._reversed = not vs._reversed
return vs
def extract(self, high_bit, low_bit):
"""
Operation extract
- A cheap hack is implemented: a copy of self is returned if (high_bit - low_bit + 1 == self.bits), which is a
ValueSet instance. Otherwise a StridedInterval is returned.
:param high_bit:
:param low_bit:
:return: A ValueSet or a StridedInterval
"""
if high_bit - low_bit + 1 == self.bits:
return self.copy()
if ('global' in self._regions and len(self._regions.keys()) > 1) or \
len(self._regions.keys()) > 0:
si_ret = StridedInterval.top(high_bit - low_bit + 1)
else:
if 'global' in self._regions:
si = self._regions['global']
si_ret = si.extract(high_bit, low_bit)
else:
si_ret = StridedInterval.empty(high_bit - low_bit + 1)
return si_ret
def concat(self, b):
new_vs = ValueSet(bits=self.bits + b.bits)
# TODO: This logic is obviously flawed. Correct it later :-(
if isinstance(b, StridedInterval):
for region, si in self._regions.items():
new_vs._set_si(region, self._region_base_addrs[region], si.concat(b))
elif isinstance(b, ValueSet):
for region, si in self._regions.items():
new_vs._set_si(region, self._region_base_addrs[region], si.concat(b.get_si(region)))
else:
raise ClaripyVSAOperationError('ValueSet.concat() got an unsupported operand %s (type %s)' % (b, type(b)))
return new_vs
@normalize_types_one_arg
def union(self, b):
merged_vs = self.copy()
if type(b) is ValueSet:
for region, si in b.regions.items():
if region not in merged_vs._regions:
merged_vs._regions[region] = si
else:
merged_vs._regions[region] = merged_vs._regions[region].union(si)
merged_vs._si = merged_vs._si.union(b._si)
else:
for region, si in merged_vs._regions.items():
merged_vs._regions[region] = merged_vs._regions[region].union(b)
merged_vs._si = merged_vs._si.union(b)
return merged_vs
@normalize_types_one_arg
def widen(self, b):
merged_vs = self.copy()
if isinstance(b, ValueSet):
for region, si in b.regions.items():
if region not in merged_vs.regions:
merged_vs.regions[region] = si
else:
merged_vs.regions[region] = merged_vs.regions[region].widen(si)
merged_vs._si = merged_vs._si.widen(b._si)
else:
for region in merged_vs._regions:
merged_vs._regions[region] = merged_vs._regions[region].widen(b)
merged_vs._si = merged_vs._si.widen(b)
return merged_vs
@normalize_types_one_arg
def intersection(self, b):
vs = self.copy()
if isinstance(b, ValueSet):
for region, si in b.regions.items():
if region not in vs.regions:
pass
else:
vs.regions[region] = vs.regions[region].intersection(si)
if vs.regions[region].is_empty:
del vs.regions[region]
vs._si = vs._si.intersection(b._si)
else:
for region in self._regions:
vs.regions[region] = vs.regions[region].intersection(b)
if vs.regions[region].is_empty:
del vs.regions[region]
vs._si = vs._si.intersection(b)
return vs
def identical(self, o):
"""
Used to make exact comparisons between two ValueSets.
:param o: The other ValueSet to compare with.
:return: True if they are exactly same, False otherwise.
"""
if self._reversed != o._reversed:
return False
for region, si in self.regions.items():
if region in o.regions:
o_si = o.regions[region]
if not si.identical(o_si):
return False
else:
return False
return True
from ..ast.base import Base
from .strided_interval import StridedInterval
from .bool_result import BoolResult, TrueResult, FalseResult, MaybeResult
from .errors import ClaripyVSAOperationError, ClaripyVSAError
from ..errors import ClaripyValueError
| [((37, 12, 37, 29), 'itertools.count', 'itertools.count', ({}, {}), '()', False, 'import itertools\n'), ((9, 5, 9, 23), 'functools.wraps', 'functools.wraps', ({(9, 21, 9, 22): 'f'}, {}), '(f)', False, 'import functools\n'), ((25, 5, 25, 23), 'functools.wraps', 'functools.wraps', ({(25, 21, 25, 22): 'f'}, {}), '(f)', False, 'import functools\n')] |
sina-cb/fardaastationapi | fardaastationapi.py | 0e27afe05195f346e17fd52e1c30b853c954a3b0 | import logging
from episodes import find_updates, db, count_all
from logging import error as logi
from flask import Flask, jsonify, request
def create_app(config, debug=False, testing=False, config_overrides=None):
app = Flask(__name__)
app.config.from_object(config)
app.config['JSON_AS_ASCII'] = False
app.debug = debug
app.testing = testing
if config_overrides:
app.config.update(config_overrides)
# Configure logging
if not app.testing:
logging.basicConfig(level=logging.INFO)
@app.before_request
def before_request():
db.connect()
@app.after_request
def after_request(response):
db.close()
return response
@app.route('/get_new_episodes')
def get_new_episodes():
appengine_request = request.headers.get('X-Appengine-Cron')
if appengine_request == 'true':
from scraper import update_episodes
update_episodes()
return '<h1>Success</h1>'
else:
return '<h1>This is a crobjob and all the requests should come from appengine.</h1>'
@app.route('/get_updates')
def get_update():
timestamp = request.args.get('timestamp', '')
if timestamp == '':
logi('Default timestamp')
timestamp = 0
else:
timestamp = long(timestamp)
result = find_updates(timestamp)
return jsonify(result)
@app.route('/')
def welcome():
message = '{}{}{}{}'.format('<h1>Welcome to FardaStationAPI WebService</h1>',
'<p>To get information about the latest episodes of Fardaa Station (by '
'RadioFarda.com) please send a GET request to '
'http://fardastationapi.appspot.com/get_updates URL.</p>',
'<p>A UNIX epoch timestamp can also be passed in as an argument to filter out the '
'episodes before that timestamp. Example: '
'https://fardastationapi.appspot.com/get_updates?timestamp=1512629949</p>',
'<h1>Current number of episodes: {}</h1>'.format(count_all()))
return message
# Add an error handler. This is useful for debugging the live application,
# however, you should disable the output of the exception for production
# applications.
@app.errorhandler(500)
def server_error(e):
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
return app
| [((9, 10, 9, 25), 'flask.Flask', 'Flask', ({(9, 16, 9, 24): '__name__'}, {}), '(__name__)', False, 'from flask import Flask, jsonify, request\n'), ((21, 8, 21, 47), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((25, 8, 25, 20), 'episodes.db.connect', 'db.connect', ({}, {}), '()', False, 'from episodes import find_updates, db, count_all\n'), ((29, 8, 29, 18), 'episodes.db.close', 'db.close', ({}, {}), '()', False, 'from episodes import find_updates, db, count_all\n'), ((34, 28, 34, 67), 'flask.request.headers.get', 'request.headers.get', ({(34, 48, 34, 66): '"""X-Appengine-Cron"""'}, {}), "('X-Appengine-Cron')", False, 'from flask import Flask, jsonify, request\n'), ((44, 20, 44, 53), 'flask.request.args.get', 'request.args.get', ({(44, 37, 44, 48): '"""timestamp"""', (44, 50, 44, 52): '""""""'}, {}), "('timestamp', '')", False, 'from flask import Flask, jsonify, request\n'), ((52, 17, 52, 40), 'episodes.find_updates', 'find_updates', ({(52, 30, 52, 39): 'timestamp'}, {}), '(timestamp)', False, 'from episodes import find_updates, db, count_all\n'), ((54, 15, 54, 30), 'flask.jsonify', 'jsonify', ({(54, 23, 54, 29): 'result'}, {}), '(result)', False, 'from flask import Flask, jsonify, request\n'), ((37, 12, 37, 29), 'scraper.update_episodes', 'update_episodes', ({}, {}), '()', False, 'from scraper import update_episodes\n'), ((47, 12, 47, 37), 'logging.error', 'logi', ({(47, 17, 47, 36): '"""Default timestamp"""'}, {}), "('Default timestamp')", True, 'from logging import error as logi\n'), ((65, 85, 65, 96), 'episodes.count_all', 'count_all', ({}, {}), '()', False, 'from episodes import find_updates, db, count_all\n')] |
iTeam-co/pytglib | pytglib/api/types/can_transfer_ownership_result_password_too_fresh.py | e5e75e0a85f89b77762209b32a61b0a883c0ae61 |
from ..utils import Object
class CanTransferOwnershipResultPasswordTooFresh(Object):
"""
The 2-step verification was enabled recently, user needs to wait
Attributes:
ID (:obj:`str`): ``CanTransferOwnershipResultPasswordTooFresh``
Args:
retry_after (:obj:`int`):
Time left before the session can be used to transfer ownership of a chat, in seconds
Returns:
CanTransferOwnershipResult
Raises:
:class:`telegram.Error`
"""
ID = "canTransferOwnershipResultPasswordTooFresh"
def __init__(self, retry_after, **kwargs):
self.retry_after = retry_after # int
@staticmethod
def read(q: dict, *args) -> "CanTransferOwnershipResultPasswordTooFresh":
retry_after = q.get('retry_after')
return CanTransferOwnershipResultPasswordTooFresh(retry_after)
| [] |
spraakbanken/sparv-catapult | catapult.py | 03273985ceea6feef47a56084c595580d0338f7d | # -*- coding: utf-8 -*-
# catapult: runs python scripts in already running processes to eliminate the
# python interpreter startup time.
#
# The lexicon for sparv.saldo.annotate and sparv.saldo.compound can be pre-loaded and
# shared between processes. See the variable annotators in handle and start.
#
# Run scripts in the catapult with the c program catalaunch.
from builtins import range, object
from multiprocessing import Process, cpu_count
from decorator import decorator
import logging
import os
import re
import runpy
import socket
import sys
import traceback
import sparv.util as util
RECV_LEN = 4096
# Important to preload all modules otherwise processes will need to do
# it upon request, introducing new delays.
#
# These imports uses the __all__ variables in the __init__ files.
from sparv.util import *
from sparv import *
logging.basicConfig(format="%(process)d %(asctime)-15s %(message)s")
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
"""
Splits at every space that is not preceded by a backslash.
"""
splitter = re.compile('(?<!\\\\) ')
def set_last_argument(*values):
"""
Decorates a function f, setting its last argument(s) to the given value(s).
Used for setting the saldo lexicons to sparv.saldo.annotate and
sparv.saldo.compound, and the process "dictionary" to sparv.malt.maltparse.
The decorator module is used to give the same signature and
docstring to the function, which is exploited in sparv.util.run.
"""
@decorator
def inner(f, *args, **kwargs):
args = list(args)
for v in values:
args.pop()
for v in values:
args.append(v)
f(*args, **kwargs)
return inner
def handle(client_sock, verbose, annotators):
"""
Handle a client: parse the arguments, change to the relevant
directory, then run the script. Stdout and stderr are directed
to /dev/null or to the client socket.
"""
def chunk_send(msg):
"""
Sends a message chunk until it is totally received in the other end
"""
msg = msg.encode(util.UTF8)
while len(msg) > 0:
sent = client_sock.send(msg)
if sent == 0:
raise RuntimeError("socket connection broken")
msg = msg[sent:]
def set_stdout_stderr():
"""
Put stdout and stderr to the client_sock, if verbose.
Returns the clean-up handler.
"""
class Writer(object):
def write(self, msg):
log.debug(msg)
if verbose:
chunk_send(msg)
def flush(self):
pass
orig_stds = sys.stdout, sys.stderr
w = Writer()
sys.stdout = w
sys.stderr = w
def cleanup():
"""
Restores stdout and stderr
"""
sys.stdout = orig_stds[0]
sys.stderr = orig_stds[1]
client_sock.close()
return cleanup
# Receive data
data = b""
new_data = None
# Message is terminated with a lone \
while new_data is None or not new_data.endswith(b'\\'):
new_data = client_sock.recv(RECV_LEN)
log.debug("Received %s", new_data)
data += new_data
if len(new_data) == 0:
log.warning("Received null!")
chunk_send("Error when receiving: got an empty message")
return
# Drop the terminating \
data = data[0:-1]
# Split arguments on spaces, and replace '\ ' to ' ' and \\ to \
args = [arg.replace('\\ ', ' ').replace('\\\\', '\\')
for arg in re.split(splitter, data.decode(util.UTF8))]
log.debug("Args: %s", args)
### PING? ###
if len(args) == 2 and args[1] == "PING":
log.info("Ping requested")
chunk_send("PONG")
return
# If the first argument is -m, the following argument is a module
# name instead of a script name
module_flag = len(args) > 2 and args[1] == '-m'
if module_flag:
args.pop(1)
if len(args) > 1:
# First argument is the pwd of the caller
old_pwd = os.getcwd()
pwd = args.pop(0)
log.info('Running %s', args[0])
log.debug('with arguments: %s', ' '.join(args[1:]))
log.debug('in directory %s', pwd)
# Set stdout and stderr, which returns the cleaup function
cleanup = set_stdout_stderr()
# Run the command
try:
sys.argv = args
os.chdir(pwd)
if module_flag:
annotator = annotators.get(args[0], None)
if not annotator:
# some of the annotators require two arguments
annotator = annotators.get((args[0], args[1]), None)
if annotator:
# skip the first argument now
sys.argv = args[0]
sys.argv.extend(args[2:])
if annotator:
util.run.main(annotator)
else:
runpy.run_module(args[0], run_name='__main__')
else:
runpy.run_path(args[0], run_name='__main__')
except (ImportError, IOError):
# If file does not exist, send the error message
chunk_send("%s\n" % sys.exc_info()[1])
cleanup()
log.exception("File does not exist")
except:
# Send other errors, and if verbose, send tracebacks
chunk_send("%s\n" % sys.exc_info()[1])
traceback.print_exception(*sys.exc_info())
cleanup()
log.exception("Unknown error")
else:
cleanup()
os.chdir(old_pwd)
# Run the cleanup function if there is one (only used with malt)
annotators.get((args[0], 'cleanup'), lambda: None)()
log.info('Completed %s', args[0])
else:
log.info('Cannot handle %s', data)
chunk_send('Cannot handle %s\n' % data)
def worker(server_socket, verbose, annotators, malt_args=None, swener_args=None):
"""
Workers listen to the socket server, and handle incoming requests
Each process starts an own maltparser process, because they are
cheap and cannot serve multiple clients at the same time.
"""
if malt_args:
process_dict = dict(process=None, restart=True)
def start_malt():
if process_dict['process'] is None or process_dict['restart']:
old_process = process_dict['process']
old_process and util.system.kill_process(old_process)
malt_process = malt.maltstart(**malt_args)
if verbose:
log.info('(Re)started malt process: %s', malt_process)
process_dict['process'] = malt_process
annotators['sparv.malt'] = set_last_argument(process_dict)(malt.maltparse)
elif verbose:
log.info("Not restarting malt this time")
start_malt()
annotators['sparv.malt', 'cleanup'] = start_malt
if swener_args:
process_dict = dict(process=None, restart=True)
def start_swener():
if process_dict['process'] is None or process_dict['restart']:
old_process = process_dict['process']
old_process and util.system.kill_process(old_process)
swener_process = swener.swenerstart(**swener_args)
if verbose:
log.info('(Re)started SweNER process: %s', swener_process)
process_dict['process'] = swener_process
annotators['sparv.swener'] = set_last_argument(process_dict)(swener.tag_ne)
elif verbose:
log.info("Not restarting SweNER this time")
start_swener()
annotators['sparv.swener', 'cleanup'] = start_swener
if verbose:
log.info("Worker running!")
while True:
client_sock, addr = server_socket.accept()
try:
handle(client_sock, verbose, annotators)
except:
log.exception('Error in handling code')
traceback.print_exception(*sys.exc_info())
client_sock.close()
def start(socket_path, processes=1, verbose='false',
saldo_model=None, compound_model=None, stats_model=None,
dalin_model=None, swedberg_model=None, blingbring_model=None,
malt_jar=None, malt_model=None, malt_encoding=util.UTF8,
sentiment_model=None, swefn_model=None, swener=False,
swener_encoding=util.UTF8):
"""
Starts a catapult on a socket file, using a number of processes.
If verbose is false, all stdout and stderr programs produce is
piped to /dev/null, otherwise it is sent to the client. The
computation is done by the catapult processes, however.
Regardless of what verbose is, client errors should be reported
both in the catapult and to the client.
The saldo model and compound model can be pre-loaded and shared in
memory between processes.
Start processes using catalaunch.
"""
if os.path.exists(socket_path):
log.error('socket %s already exists', socket_path)
exit(1)
verbose = verbose.lower() == 'true'
log.info('Verbose: %s', verbose)
# If processes does not contain an int, set it to the number of processors
try:
processes = int(processes)
except:
processes = cpu_count()
# Start the socket
server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server_socket.bind(socket_path)
server_socket.listen(processes)
# The dictionary of functions with saved lexica, indexed by module name strings
annotators = {}
# Load Saldo and older lexicons
lexicons = [m for m in [saldo_model, dalin_model, swedberg_model] if m]
if lexicons:
lexicon_dict = {}
for lexicon in lexicons:
lexicon_dict[os.path.basename(lexicon).rstrip(".pickle")] = saldo.SaldoLexicon(lexicon)
annotators['sparv.saldo'] = set_last_argument(lexicon_dict)(saldo.annotate)
if stats_model and compound_model:
annotators['sparv.compound'] = set_last_argument(
compound.SaldoCompLexicon(compound_model),
compound.StatsLexicon(stats_model))(compound.annotate)
elif compound_model:
annotators['sparv.compound_simple'] = set_last_argument(
compound_simple.SaldoLexicon(compound_model))(compound_simple.annotate)
# if blingbring_model:
# annotators['sparv.lexical_classes'] = set_last_argument(
# util.PickledLexicon(blingbring_model))(lexical_classes.annotate_bb_words)
# if swefn_model:
# annotators['sparv.lexical_classes'] = set_last_argument(
# util.PickledLexicon(swefn_model))(lexical_classes.annotate_swefn_words)
if sentiment_model:
annotators['sparv.sentiment'] = set_last_argument(
util.PickledLexicon(sentiment_model))(sentiment.sentiment)
# if models_1700s:
# models = models_1700s.split()
# lexicons = [saldo.SaldoLexicon(lex) for lex in models]
# annotators[('sparv.fsv', '--annotate_fallback')] = set_last_argument(lexicons)(fsv.annotate_fallback)
# annotators[('sparv.fsv', '--annotate_full')] = set_last_argument(lexicons)(fsv.annotate_full)
if verbose:
log.info('Loaded annotators: %s', list(annotators.keys()))
if malt_jar and malt_model:
malt_args = dict(maltjar=malt_jar, model=malt_model,
encoding=malt_encoding, send_empty_sentence=True)
else:
malt_args = None
if swener:
swener_args = dict(stdin="", encoding=swener_encoding, verbose=True)
else:
swener_args = None
# Start processes-1 workers
workers = [Process(target=worker, args=[server_socket, verbose, annotators, malt_args])
for i in range(processes - 1)]
for p in workers:
p.start()
# Additionally, let this thread be worker 0
worker(server_socket, verbose, annotators, malt_args, swener_args)
if __name__ == '__main__':
util.run.main(start)
| [((32, 0, 32, 68), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((33, 6, 33, 33), 'logging.getLogger', 'logging.getLogger', ({(33, 24, 33, 32): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((39, 11, 39, 35), 're.compile', 're.compile', ({(39, 22, 39, 34): '"""(?<!\\\\\\\\) """'}, {}), "('(?<!\\\\\\\\) ')", False, 'import re\n'), ((291, 7, 291, 34), 'os.path.exists', 'os.path.exists', ({(291, 22, 291, 33): 'socket_path'}, {}), '(socket_path)', False, 'import os\n'), ((306, 20, 306, 69), 'socket.socket', 'socket.socket', ({(306, 34, 306, 48): 'socket.AF_UNIX', (306, 50, 306, 68): 'socket.SOCK_STREAM'}, {}), '(socket.AF_UNIX, socket.SOCK_STREAM)', False, 'import socket\n'), ((373, 4, 373, 24), 'sparv.util.run.main', 'util.run.main', ({(373, 18, 373, 23): 'start'}, {}), '(start)', True, 'import sparv.util as util\n'), ((148, 18, 148, 29), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((194, 8, 194, 25), 'os.chdir', 'os.chdir', ({(194, 17, 194, 24): 'old_pwd'}, {}), '(old_pwd)', False, 'import os\n'), ((363, 15, 363, 91), 'multiprocessing.Process', 'Process', (), '', False, 'from multiprocessing import Process, cpu_count\n'), ((161, 12, 161, 25), 'os.chdir', 'os.chdir', ({(161, 21, 161, 24): 'pwd'}, {}), '(pwd)', False, 'import os\n'), ((303, 20, 303, 31), 'multiprocessing.cpu_count', 'cpu_count', ({}, {}), '()', False, 'from multiprocessing import Process, cpu_count\n'), ((364, 24, 364, 44), 'builtins.range', 'range', ({(364, 30, 364, 43): '(processes - 1)'}, {}), '(processes - 1)', False, 'from builtins import range, object\n'), ((179, 16, 179, 60), 'runpy.run_path', 'runpy.run_path', (), '', False, 'import runpy\n'), ((340, 12, 340, 48), 'sparv.util.PickledLexicon', 'util.PickledLexicon', ({(340, 32, 340, 47): 'sentiment_model'}, {}), '(sentiment_model)', True, 'import sparv.util as util\n'), ((175, 20, 175, 44), 'sparv.util.run.main', 'util.run.main', ({(175, 34, 175, 43): 'annotator'}, {}), '(annotator)', True, 'import sparv.util as util\n'), ((177, 20, 177, 66), 'runpy.run_module', 'runpy.run_module', (), '', False, 'import runpy\n'), ((222, 32, 222, 69), 'sparv.util.system.kill_process', 'util.system.kill_process', ({(222, 57, 222, 68): 'old_process'}, {}), '(old_process)', True, 'import sparv.util as util\n'), ((243, 32, 243, 69), 'sparv.util.system.kill_process', 'util.system.kill_process', ({(243, 57, 243, 68): 'old_process'}, {}), '(old_process)', True, 'import sparv.util as util\n'), ((172, 24, 172, 49), 'sys.argv.extend', 'sys.argv.extend', ({(172, 40, 172, 48): 'args[2:]'}, {}), '(args[2:])', False, 'import sys\n'), ((188, 39, 188, 53), 'sys.exc_info', 'sys.exc_info', ({}, {}), '()', False, 'import sys\n'), ((266, 39, 266, 53), 'sys.exc_info', 'sys.exc_info', ({}, {}), '()', False, 'import sys\n'), ((182, 32, 182, 46), 'sys.exc_info', 'sys.exc_info', ({}, {}), '()', False, 'import sys\n'), ((187, 32, 187, 46), 'sys.exc_info', 'sys.exc_info', ({}, {}), '()', False, 'import sys\n'), ((318, 25, 318, 50), 'os.path.basename', 'os.path.basename', ({(318, 42, 318, 49): 'lexicon'}, {}), '(lexicon)', False, 'import os\n')] |
rajeshkumargp/TextBlob | tests/test_sentiments.py | a8709368f2a8a8ba4d87730111f8b6675d0735cd | from __future__ import unicode_literals
import unittest
from nose.tools import * # PEP8 asserts
from nose.plugins.attrib import attr
from textblob.sentiments import PatternAnalyzer, NaiveBayesAnalyzer, DISCRETE, CONTINUOUS
class TestPatternSentiment(unittest.TestCase):
def setUp(self):
self.analyzer = PatternAnalyzer()
def test_kind(self):
assert_equal(self.analyzer.kind, CONTINUOUS)
def test_analyze(self):
p1 = "I feel great this morning."
n1 = "This is a terrible car."
p1_result = self.analyzer.analyze(p1)
n1_result = self.analyzer.analyze(n1)
assert_true(p1_result[0] > 0)
assert_true(n1_result[0] < 0)
assert_equal(p1_result.polarity, p1_result[0])
assert_equal(p1_result.subjectivity, p1_result[1])
def test_analyze_assessments(self):
p1 = "I feel great this morning."
n1 = "This is a terrible car."
p1_result = self.analyzer.analyze(p1,keep_assessments=True)
n1_result = self.analyzer.analyze(n1,keep_assessments=True)
p1_assessment = p1_result.assessments[0]
n1_assessment = n1_result.assessments[0]
assert_true(p1_assessment[1] > 0)
assert_true(n1_assessment[1] < 0)
assert_equal(p1_result.polarity, p1_assessment[1])
assert_equal(p1_result.subjectivity, p1_assessment[2])
class TestNaiveBayesAnalyzer(unittest.TestCase):
def setUp(self):
self.analyzer = NaiveBayesAnalyzer()
def test_kind(self):
assert_equal(self.analyzer.kind, DISCRETE)
@attr('slow')
def test_analyze(self):
p1 = 'I feel great this morning.'
n1 = 'This is a terrible car.'
p1_result = self.analyzer.analyze(p1)
assert_equal(p1_result[0], 'pos')
assert_equal(self.analyzer.analyze(n1)[0], 'neg')
# The 2nd item should be the probability that it is positive
assert_true(isinstance(p1_result[1], float))
# 3rd item is probability that it is negative
assert_true(isinstance(p1_result[2], float))
assert_about_equal(p1_result[1] + p1_result[2], 1)
assert_equal(p1_result.classification, p1_result[0])
assert_equal(p1_result.p_pos, p1_result[1])
assert_equal(p1_result.p_neg, p1_result[2])
def assert_about_equal(first, second, places=4):
return assert_equal(round(first, places), second)
if __name__ == '__main__':
unittest.main()
| [((47, 5, 47, 17), 'nose.plugins.attrib.attr', 'attr', ({(47, 10, 47, 16): '"""slow"""'}, {}), "('slow')", False, 'from nose.plugins.attrib import attr\n'), ((68, 4, 68, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((12, 24, 12, 41), 'textblob.sentiments.PatternAnalyzer', 'PatternAnalyzer', ({}, {}), '()', False, 'from textblob.sentiments import PatternAnalyzer, NaiveBayesAnalyzer, DISCRETE, CONTINUOUS\n'), ((42, 24, 42, 44), 'textblob.sentiments.NaiveBayesAnalyzer', 'NaiveBayesAnalyzer', ({}, {}), '()', False, 'from textblob.sentiments import PatternAnalyzer, NaiveBayesAnalyzer, DISCRETE, CONTINUOUS\n')] |
unicef/unicef-security | src/unicef_security/apps.py | cc51ba52cddb845b8174cf3dc94706f0334453b2 | from django.apps import AppConfig
class Config(AppConfig):
name = 'unicef_security'
verbose_name = "UNICEF Security"
| [] |
isJuhn/pcsx2_ipc | utils/pretty-tests.py | 51f92d51aec05dffa82d418c97fc1d628b2ed40f | import json
import sys
f=open(sys.argv[1])
y = json.loads(f.read())
print("Tests results: " + str(y["result"]))
print("Tests duration: " + str(y["duration"]))
print("Tests output:\n~~~~~~~~~~~~~~~~~~~~\n" + str(y["stdout"]))
| [] |
AdityaHPatwardhan/openthread | tests/scripts/thread-cert/test_network_layer.py | a201e9d5d0273bb51fa20efc8758be20a725018e | #!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import io
import random
import struct
import unittest
import common
import network_layer
def any_eid():
return bytearray([random.getrandbits(8) for _ in range(16)])
def any_mac_extended_address():
return bytearray([random.getrandbits(8) for _ in range(8)])
def any_rloc16():
return random.getrandbits(16)
def any_ml_eid():
return bytearray([random.getrandbits(8) for _ in range(8)])
def any_status():
return random.getrandbits(1)
def any_seconds():
return random.getrandbits(32)
def any_id_sequence():
return random.getrandbits(8)
def any_router_id_mask():
return random.getrandbits(64)
def any_options(count=None):
count = count if count is not None else random.randint(0, 255)
return [random.getrandbits(8) for _ in range(count)]
def any_tlv_data(length=None):
_type = random.getrandbits(8)
length = length if length is not None else random.getrandbits(8)
value = bytearray([random.getrandbits(8) for _ in range(length)])
return bytearray([_type, length]) + value
def any_tlvs_data(count=None):
count = count if count is not None else random.randint(0, 16)
data = bytearray()
for _ in range(count):
data += any_tlv_data(random.randint(1, 15))
return data
class TestTargetEid(unittest.TestCase):
def test_should_return_eid_value_when_eid_property_is_called(self):
# GIVEN
eid = any_eid()
target_eid = network_layer.TargetEid(eid)
# WHEN
actual_eid = target_eid.eid
# THEN
self.assertEqual(eid, actual_eid)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
eid = any_eid()
target_eid = network_layer.TargetEid(eid)
# THEN
self.assertEqual(target_eid, network_layer.TargetEid(eid))
class TestTargetEidFactory(unittest.TestCase):
def test_should_create_TargetEid_from_bytearray_when_parse_method_is_called(self):
# GIVEN
eid = any_eid()
factory = network_layer.TargetEidFactory()
# WHEN
target_eid = factory.parse(io.BytesIO(eid), common.MessageInfo())
# THEN
self.assertTrue(isinstance(target_eid, network_layer.TargetEid))
self.assertEqual(eid, target_eid.eid)
class TestMacExtendedAddress(unittest.TestCase):
def test_should_return_mac_address_value_when_mac_address_property_is_called(self):
# GIVEN
mac_address = any_mac_extended_address()
mac_extended_address = network_layer.MacExtendedAddress(mac_address)
# WHEN
actual_mac_address = mac_extended_address.mac_address
# THEN
self.assertEqual(mac_address, actual_mac_address)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
mac_address = any_mac_extended_address()
mac_extended_address = network_layer.MacExtendedAddress(mac_address)
# THEN
self.assertEqual(mac_extended_address, network_layer.MacExtendedAddress(mac_address))
class TestMacExtendedAddressFactory(unittest.TestCase):
def test_should_create_MacExtendedAddress_from_bytearray_when_parse_method_is_called(self):
# GIVEN
mac_address = any_mac_extended_address()
factory = network_layer.MacExtendedAddressFactory()
# WHEN
mac_extended_address = factory.parse(io.BytesIO(mac_address), common.MessageInfo())
# THEN
self.assertTrue(isinstance(mac_extended_address, network_layer.MacExtendedAddress))
self.assertEqual(mac_address, mac_extended_address.mac_address)
class TestRloc16(unittest.TestCase):
def test_should_return_rloc16_value_when_rloc16_property_is_called(self):
# GIVEN
rloc16 = any_rloc16()
rloc16_obj = network_layer.Rloc16(rloc16)
# WHEN
actual_rloc16 = rloc16_obj.rloc16
# THEN
self.assertEqual(rloc16, actual_rloc16)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
rloc16 = any_rloc16()
rloc16_obj = network_layer.Rloc16(rloc16)
# THEN
self.assertEqual(rloc16_obj, network_layer.Rloc16(rloc16))
class TestRloc16Factory(unittest.TestCase):
def test_should_create_Rloc16_from_bytearray_when_parse_method_is_called(self):
# GIVEN
rloc16 = any_rloc16()
factory = network_layer.Rloc16Factory()
data = bytearray(struct.pack(">H", rloc16))
# WHEN
rloc16_obj = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(rloc16_obj, network_layer.Rloc16))
self.assertEqual(rloc16, rloc16_obj.rloc16)
class TestMlEid(unittest.TestCase):
def test_should_return_ml_eid_value_when_ml_eid_property_is_called(self):
# GIVEN
ml_eid = any_ml_eid()
ml_eid_obj = network_layer.MlEid(ml_eid)
# WHEN
actual_ml_eid = ml_eid_obj.ml_eid
# THEN
self.assertEqual(ml_eid, actual_ml_eid)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
ml_eid = any_ml_eid()
ml_eid_obj = network_layer.MlEid(ml_eid)
# THEN
self.assertEqual(ml_eid_obj, network_layer.MlEid(ml_eid))
class TestMlEidFactory(unittest.TestCase):
def test_should_create_MlEid_from_bytearray_when_parse_method_is_called(self):
# GIVEN
ml_eid = any_ml_eid()
factory = network_layer.MlEidFactory()
# WHEN
ml_eid_obj = factory.parse(io.BytesIO(ml_eid), common.MessageInfo())
# THEN
self.assertTrue(isinstance(ml_eid_obj, network_layer.MlEid))
self.assertEqual(ml_eid, ml_eid_obj.ml_eid)
class TestStatus(unittest.TestCase):
def test_should_return_status_value_when_status_property_is_called(self):
# GIVEN
status = any_status()
status_obj = network_layer.Status(status)
# WHEN
actual_status = status_obj.status
# THEN
self.assertEqual(status, actual_status)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
status = any_status()
status_obj = network_layer.Status(status)
# THEN
self.assertEqual(status_obj, network_layer.Status(status))
class TestStatusFactory(unittest.TestCase):
def test_should_create_Status_from_bytearray_when_parse_method_is_called(self):
# GIVEN
status = any_status()
factory = network_layer.StatusFactory()
data = bytearray([status])
# WHEN
status_obj = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(status_obj, network_layer.Status))
self.assertEqual(status, status_obj.status)
class TestTimeSinceLastTransaction(unittest.TestCase):
def test_should_return_seconds_value_when_seconds_property_is_called(self):
# GIVEN
seconds = any_seconds()
time_since_last_transaction = network_layer.TimeSinceLastTransaction(seconds)
# WHEN
actual_seconds = time_since_last_transaction.seconds
# THEN
self.assertEqual(seconds, actual_seconds)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
seconds = any_seconds()
time_since_last_transaction = network_layer.TimeSinceLastTransaction(seconds)
# THEN
self.assertEqual(
time_since_last_transaction,
network_layer.TimeSinceLastTransaction(seconds),
)
class TestTimeSinceLastTransactionFactory(unittest.TestCase):
def test_should_create_TimeSinceLastTransaction_from_bytearray_when_parse_method_is_called(self):
# GIVEN
seconds = any_seconds()
factory = network_layer.TimeSinceLastTransactionFactory()
data = bytearray(struct.pack(">L", seconds))
# WHEN
time_since_last_transaction = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(
time_since_last_transaction,
network_layer.TimeSinceLastTransaction,
))
self.assertEqual(seconds, time_since_last_transaction.seconds)
class TestRouterMask(unittest.TestCase):
def test_should_return_id_sequence_value_when_id_sequence_property_is_called(self):
# GIVEN
id_sequence = any_id_sequence()
router_mask = network_layer.RouterMask(id_sequence, any_router_id_mask())
# WHEN
actual_id_sequence = router_mask.id_sequence
# THEN
self.assertEqual(id_sequence, actual_id_sequence)
def test_should_return_router_id_mask_value_when_router_id_mask_property_is_called(self):
# GIVEN
router_id_mask = any_router_id_mask()
router_mask = network_layer.RouterMask(any_id_sequence(), router_id_mask)
# WHEN
actual_router_id_mask = router_mask.router_id_mask
# THEN
self.assertEqual(router_id_mask, actual_router_id_mask)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
id_sequence = any_id_sequence()
router_id_mask = any_router_id_mask()
router_mask = network_layer.RouterMask(id_sequence, router_id_mask)
# THEN
self.assertEqual(router_mask, network_layer.RouterMask(id_sequence, router_id_mask))
class TestRouterMaskFactory(unittest.TestCase):
def test_should_create_RouterMask_from_bytearray_when_parse_method_is_called(self):
# GIVEN
id_sequence = any_id_sequence()
router_id_mask = any_router_id_mask()
factory = network_layer.RouterMaskFactory()
data = bytearray([id_sequence]) + struct.pack(">Q", router_id_mask)
# WHEN
router_mask = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(router_mask, network_layer.RouterMask))
self.assertEqual(id_sequence, router_mask.id_sequence)
self.assertEqual(router_id_mask, router_mask.router_id_mask)
class TestNdOption(unittest.TestCase):
def test_should_return_options_value_when_options_property_is_called(self):
# GIVEN
options = any_options()
nd_option = network_layer.NdOption(options)
# WHEN
actual_options = nd_option.options
# THEN
self.assertEqual(options, actual_options)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
options = any_options()
nd_option = network_layer.NdOption(options)
# THEN
self.assertEqual(nd_option, network_layer.NdOption(options))
class TestNdOptionFactory(unittest.TestCase):
def test_should_create_NdOption_from_bytearray_when_parse_method_is_called(self):
# GIVEN
options = any_options()
factory = network_layer.NdOptionFactory()
data = bytearray(options)
# WHEN
nd_option = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(nd_option, network_layer.NdOption))
self.assertEqual(options, nd_option.options)
class TestThreadNetworkData(unittest.TestCase):
def test_should_return_options_value_when_options_property_is_called(self):
# GIVEN
tlvs = any_tlvs_data()
thread_network_data = network_layer.ThreadNetworkData(tlvs)
# WHEN
actual_tlvs = thread_network_data.tlvs
# THEN
self.assertEqual(tlvs, actual_tlvs)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
tlvs = any_tlvs_data()
thread_network_data = network_layer.ThreadNetworkData(tlvs)
# THEN
self.assertEqual(thread_network_data, network_layer.ThreadNetworkData(tlvs))
class TestThreadNetworkDataFactory(unittest.TestCase):
def test_should_create_ThreadNetworkData_from_bytearray_when_parse_method_is_called(self):
# GIVEN
tlvs = any_tlvs_data()
class DummyNetworkDataTlvsFactory:
def parse(self, data, message_info):
return bytearray(data.read())
factory = network_layer.ThreadNetworkDataFactory(DummyNetworkDataTlvsFactory())
# WHEN
thread_network_data = factory.parse(io.BytesIO(tlvs), common.MessageInfo())
# THEN
self.assertTrue(isinstance(thread_network_data, network_layer.ThreadNetworkData))
self.assertEqual(tlvs, thread_network_data.tlvs)
if __name__ == "__main__":
unittest.main()
| [((48, 11, 48, 33), 'random.getrandbits', 'random.getrandbits', ({(48, 30, 48, 32): '(16)'}, {}), '(16)', False, 'import random\n'), ((56, 11, 56, 32), 'random.getrandbits', 'random.getrandbits', ({(56, 30, 56, 31): '(1)'}, {}), '(1)', False, 'import random\n'), ((60, 11, 60, 33), 'random.getrandbits', 'random.getrandbits', ({(60, 30, 60, 32): '(32)'}, {}), '(32)', False, 'import random\n'), ((64, 11, 64, 32), 'random.getrandbits', 'random.getrandbits', ({(64, 30, 64, 31): '(8)'}, {}), '(8)', False, 'import random\n'), ((68, 11, 68, 33), 'random.getrandbits', 'random.getrandbits', ({(68, 30, 68, 32): '(64)'}, {}), '(64)', False, 'import random\n'), ((77, 12, 77, 33), 'random.getrandbits', 'random.getrandbits', ({(77, 31, 77, 32): '8'}, {}), '(8)', False, 'import random\n'), ((491, 4, 491, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((72, 44, 72, 66), 'random.randint', 'random.randint', ({(72, 59, 72, 60): '(0)', (72, 62, 72, 65): '(255)'}, {}), '(0, 255)', False, 'import random\n'), ((73, 12, 73, 33), 'random.getrandbits', 'random.getrandbits', ({(73, 31, 73, 32): '(8)'}, {}), '(8)', False, 'import random\n'), ((78, 47, 78, 68), 'random.getrandbits', 'random.getrandbits', ({(78, 66, 78, 67): '(8)'}, {}), '(8)', False, 'import random\n'), ((85, 44, 85, 65), 'random.randint', 'random.randint', ({(85, 59, 85, 60): '(0)', (85, 62, 85, 64): '(16)'}, {}), '(0, 16)', False, 'import random\n'), ((100, 21, 100, 49), 'network_layer.TargetEid', 'network_layer.TargetEid', ({(100, 45, 100, 48): 'eid'}, {}), '(eid)', False, 'import network_layer\n'), ((112, 21, 112, 49), 'network_layer.TargetEid', 'network_layer.TargetEid', ({(112, 45, 112, 48): 'eid'}, {}), '(eid)', False, 'import network_layer\n'), ((124, 18, 124, 50), 'network_layer.TargetEidFactory', 'network_layer.TargetEidFactory', ({}, {}), '()', False, 'import network_layer\n'), ((140, 31, 140, 76), 'network_layer.MacExtendedAddress', 'network_layer.MacExtendedAddress', ({(140, 64, 140, 75): 'mac_address'}, {}), '(mac_address)', False, 'import network_layer\n'), ((152, 31, 152, 76), 'network_layer.MacExtendedAddress', 'network_layer.MacExtendedAddress', ({(152, 64, 152, 75): 'mac_address'}, {}), '(mac_address)', False, 'import network_layer\n'), ((164, 18, 164, 59), 'network_layer.MacExtendedAddressFactory', 'network_layer.MacExtendedAddressFactory', ({}, {}), '()', False, 'import network_layer\n'), ((180, 21, 180, 49), 'network_layer.Rloc16', 'network_layer.Rloc16', ({(180, 42, 180, 48): 'rloc16'}, {}), '(rloc16)', False, 'import network_layer\n'), ((192, 21, 192, 49), 'network_layer.Rloc16', 'network_layer.Rloc16', ({(192, 42, 192, 48): 'rloc16'}, {}), '(rloc16)', False, 'import network_layer\n'), ((204, 18, 204, 47), 'network_layer.Rloc16Factory', 'network_layer.Rloc16Factory', ({}, {}), '()', False, 'import network_layer\n'), ((222, 21, 222, 48), 'network_layer.MlEid', 'network_layer.MlEid', ({(222, 41, 222, 47): 'ml_eid'}, {}), '(ml_eid)', False, 'import network_layer\n'), ((234, 21, 234, 48), 'network_layer.MlEid', 'network_layer.MlEid', ({(234, 41, 234, 47): 'ml_eid'}, {}), '(ml_eid)', False, 'import network_layer\n'), ((246, 18, 246, 46), 'network_layer.MlEidFactory', 'network_layer.MlEidFactory', ({}, {}), '()', False, 'import network_layer\n'), ((262, 21, 262, 49), 'network_layer.Status', 'network_layer.Status', ({(262, 42, 262, 48): 'status'}, {}), '(status)', False, 'import network_layer\n'), ((274, 21, 274, 49), 'network_layer.Status', 'network_layer.Status', ({(274, 42, 274, 48): 'status'}, {}), '(status)', False, 'import network_layer\n'), ((286, 18, 286, 47), 'network_layer.StatusFactory', 'network_layer.StatusFactory', ({}, {}), '()', False, 'import network_layer\n'), ((304, 38, 304, 85), 'network_layer.TimeSinceLastTransaction', 'network_layer.TimeSinceLastTransaction', ({(304, 77, 304, 84): 'seconds'}, {}), '(seconds)', False, 'import network_layer\n'), ((316, 38, 316, 85), 'network_layer.TimeSinceLastTransaction', 'network_layer.TimeSinceLastTransaction', ({(316, 77, 316, 84): 'seconds'}, {}), '(seconds)', False, 'import network_layer\n'), ((331, 18, 331, 65), 'network_layer.TimeSinceLastTransactionFactory', 'network_layer.TimeSinceLastTransactionFactory', ({}, {}), '()', False, 'import network_layer\n'), ((377, 22, 377, 75), 'network_layer.RouterMask', 'network_layer.RouterMask', ({(377, 47, 377, 58): 'id_sequence', (377, 60, 377, 74): 'router_id_mask'}, {}), '(id_sequence, router_id_mask)', False, 'import network_layer\n'), ((390, 18, 390, 51), 'network_layer.RouterMaskFactory', 'network_layer.RouterMaskFactory', ({}, {}), '()', False, 'import network_layer\n'), ((409, 20, 409, 51), 'network_layer.NdOption', 'network_layer.NdOption', ({(409, 43, 409, 50): 'options'}, {}), '(options)', False, 'import network_layer\n'), ((421, 20, 421, 51), 'network_layer.NdOption', 'network_layer.NdOption', ({(421, 43, 421, 50): 'options'}, {}), '(options)', False, 'import network_layer\n'), ((433, 18, 433, 49), 'network_layer.NdOptionFactory', 'network_layer.NdOptionFactory', ({}, {}), '()', False, 'import network_layer\n'), ((451, 30, 451, 67), 'network_layer.ThreadNetworkData', 'network_layer.ThreadNetworkData', ({(451, 62, 451, 66): 'tlvs'}, {}), '(tlvs)', False, 'import network_layer\n'), ((463, 30, 463, 67), 'network_layer.ThreadNetworkData', 'network_layer.ThreadNetworkData', ({(463, 62, 463, 66): 'tlvs'}, {}), '(tlvs)', False, 'import network_layer\n'), ((40, 22, 40, 43), 'random.getrandbits', 'random.getrandbits', ({(40, 41, 40, 42): '(8)'}, {}), '(8)', False, 'import random\n'), ((44, 22, 44, 43), 'random.getrandbits', 'random.getrandbits', ({(44, 41, 44, 42): '(8)'}, {}), '(8)', False, 'import random\n'), ((52, 22, 52, 43), 'random.getrandbits', 'random.getrandbits', ({(52, 41, 52, 42): '(8)'}, {}), '(8)', False, 'import random\n'), ((79, 23, 79, 44), 'random.getrandbits', 'random.getrandbits', ({(79, 42, 79, 43): '8'}, {}), '(8)', False, 'import random\n'), ((89, 29, 89, 50), 'random.randint', 'random.randint', ({(89, 44, 89, 45): '(1)', (89, 47, 89, 49): '(15)'}, {}), '(1, 15)', False, 'import random\n'), ((115, 37, 115, 65), 'network_layer.TargetEid', 'network_layer.TargetEid', ({(115, 61, 115, 64): 'eid'}, {}), '(eid)', False, 'import network_layer\n'), ((127, 35, 127, 50), 'io.BytesIO', 'io.BytesIO', ({(127, 46, 127, 49): 'eid'}, {}), '(eid)', False, 'import io\n'), ((127, 52, 127, 72), 'common.MessageInfo', 'common.MessageInfo', ({}, {}), '()', False, 'import common\n'), ((155, 47, 155, 92), 'network_layer.MacExtendedAddress', 'network_layer.MacExtendedAddress', ({(155, 80, 155, 91): 'mac_address'}, {}), '(mac_address)', False, 'import network_layer\n'), ((167, 45, 167, 68), 'io.BytesIO', 'io.BytesIO', ({(167, 56, 167, 67): 'mac_address'}, {}), '(mac_address)', False, 'import io\n'), ((167, 70, 167, 90), 'common.MessageInfo', 'common.MessageInfo', ({}, {}), '()', False, 'import common\n'), ((195, 37, 195, 65), 'network_layer.Rloc16', 'network_layer.Rloc16', ({(195, 58, 195, 64): 'rloc16'}, {}), '(rloc16)', False, 'import network_layer\n'), ((206, 25, 206, 50), 'struct.pack', 'struct.pack', ({(206, 37, 206, 41): '""">H"""', (206, 43, 206, 49): 'rloc16'}, {}), "('>H', rloc16)", False, 'import struct\n'), ((209, 35, 209, 51), 'io.BytesIO', 'io.BytesIO', ({(209, 46, 209, 50): 'data'}, {}), '(data)', False, 'import io\n'), ((209, 53, 209, 73), 'common.MessageInfo', 'common.MessageInfo', ({}, {}), '()', False, 'import common\n'), ((237, 37, 237, 64), 'network_layer.MlEid', 'network_layer.MlEid', ({(237, 57, 237, 63): 'ml_eid'}, {}), '(ml_eid)', False, 'import network_layer\n'), ((249, 35, 249, 53), 'io.BytesIO', 'io.BytesIO', ({(249, 46, 249, 52): 'ml_eid'}, {}), '(ml_eid)', False, 'import io\n'), ((249, 55, 249, 75), 'common.MessageInfo', 'common.MessageInfo', ({}, {}), '()', False, 'import common\n'), ((277, 37, 277, 65), 'network_layer.Status', 'network_layer.Status', ({(277, 58, 277, 64): 'status'}, {}), '(status)', False, 'import network_layer\n'), ((291, 35, 291, 51), 'io.BytesIO', 'io.BytesIO', ({(291, 46, 291, 50): 'data'}, {}), '(data)', False, 'import io\n'), ((291, 53, 291, 73), 'common.MessageInfo', 'common.MessageInfo', ({}, {}), '()', False, 'import common\n'), ((321, 12, 321, 59), 'network_layer.TimeSinceLastTransaction', 'network_layer.TimeSinceLastTransaction', ({(321, 51, 321, 58): 'seconds'}, {}), '(seconds)', False, 'import network_layer\n'), ((333, 25, 333, 51), 'struct.pack', 'struct.pack', ({(333, 37, 333, 41): '""">L"""', (333, 43, 333, 50): 'seconds'}, {}), "('>L', seconds)", False, 'import struct\n'), ((336, 52, 336, 68), 'io.BytesIO', 'io.BytesIO', ({(336, 63, 336, 67): 'data'}, {}), '(data)', False, 'import io\n'), ((336, 70, 336, 90), 'common.MessageInfo', 'common.MessageInfo', ({}, {}), '()', False, 'import common\n'), ((380, 38, 380, 91), 'network_layer.RouterMask', 'network_layer.RouterMask', ({(380, 63, 380, 74): 'id_sequence', (380, 76, 380, 90): 'router_id_mask'}, {}), '(id_sequence, router_id_mask)', False, 'import network_layer\n'), ((392, 42, 392, 75), 'struct.pack', 'struct.pack', ({(392, 54, 392, 58): '""">Q"""', (392, 60, 392, 74): 'router_id_mask'}, {}), "('>Q', router_id_mask)", False, 'import struct\n'), ((395, 36, 395, 52), 'io.BytesIO', 'io.BytesIO', ({(395, 47, 395, 51): 'data'}, {}), '(data)', False, 'import io\n'), ((395, 54, 395, 74), 'common.MessageInfo', 'common.MessageInfo', ({}, {}), '()', False, 'import common\n'), ((424, 36, 424, 67), 'network_layer.NdOption', 'network_layer.NdOption', ({(424, 59, 424, 66): 'options'}, {}), '(options)', False, 'import network_layer\n'), ((438, 34, 438, 50), 'io.BytesIO', 'io.BytesIO', ({(438, 45, 438, 49): 'data'}, {}), '(data)', False, 'import io\n'), ((438, 52, 438, 72), 'common.MessageInfo', 'common.MessageInfo', ({}, {}), '()', False, 'import common\n'), ((466, 46, 466, 83), 'network_layer.ThreadNetworkData', 'network_layer.ThreadNetworkData', ({(466, 78, 466, 82): 'tlvs'}, {}), '(tlvs)', False, 'import network_layer\n'), ((483, 44, 483, 60), 'io.BytesIO', 'io.BytesIO', ({(483, 55, 483, 59): 'tlvs'}, {}), '(tlvs)', False, 'import io\n'), ((483, 62, 483, 82), 'common.MessageInfo', 'common.MessageInfo', ({}, {}), '()', False, 'import common\n')] |
markgras/salt | salt/modules/kernelpkg_linux_apt.py | d66cd3c935533c63870b83228b978ce43e0ef70d | """
Manage Linux kernel packages on APT-based systems
"""
import functools
import logging
import re
try:
from salt.utils.versions import LooseVersion as _LooseVersion
from salt.exceptions import CommandExecutionError
HAS_REQUIRED_LIBS = True
except ImportError:
HAS_REQUIRED_LIBS = False
log = logging.getLogger(__name__)
__virtualname__ = "kernelpkg"
def __virtual__():
"""
Load this module on Debian-based systems only
"""
if not HAS_REQUIRED_LIBS:
return (False, "Required library could not be imported")
if __grains__.get("os_family", "") in ("Kali", "Debian"):
return __virtualname__
elif __grains__.get("os_family", "") == "Cumulus":
return __virtualname__
return (False, "Module kernelpkg_linux_apt: no APT based system detected")
def active():
"""
Return the version of the running kernel.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.active
"""
if "pkg.normalize_name" in __salt__:
return __salt__["pkg.normalize_name"](__grains__["kernelrelease"])
return __grains__["kernelrelease"]
def list_installed():
"""
Return a list of all installed kernels.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.list_installed
"""
pkg_re = re.compile(r"^{}-[\d.-]+-{}$".format(_package_prefix(), _kernel_type()))
pkgs = __salt__["pkg.list_pkgs"](versions_as_list=True)
if pkgs is None:
pkgs = []
result = list(filter(pkg_re.match, pkgs))
if result is None:
return []
prefix_len = len(_package_prefix()) + 1
return sorted(
[pkg[prefix_len:] for pkg in result], key=functools.cmp_to_key(_cmp_version)
)
def latest_available():
"""
Return the version of the latest kernel from the package repositories.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.latest_available
"""
result = __salt__["pkg.latest_version"](
"{}-{}".format(_package_prefix(), _kernel_type())
)
if result == "":
return latest_installed()
version = re.match(r"^(\d+\.\d+\.\d+)\.(\d+)", result)
return "{}-{}-{}".format(version.group(1), version.group(2), _kernel_type())
def latest_installed():
"""
Return the version of the latest installed kernel.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.latest_installed
.. note::
This function may not return the same value as
:py:func:`~salt.modules.kernelpkg_linux_apt.active` if a new kernel
has been installed and the system has not yet been rebooted.
The :py:func:`~salt.modules.kernelpkg_linux_apt.needs_reboot` function
exists to detect this condition.
"""
pkgs = list_installed()
if pkgs:
return pkgs[-1]
return None
def needs_reboot():
"""
Detect if a new kernel version has been installed but is not running.
Returns True if a new kernel is installed, False otherwise.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.needs_reboot
"""
return _LooseVersion(active()) < _LooseVersion(latest_installed())
def upgrade(reboot=False, at_time=None):
"""
Upgrade the kernel and optionally reboot the system.
reboot : False
Request a reboot if a new kernel is available.
at_time : immediate
Schedule the reboot at some point in the future. This argument
is ignored if ``reboot=False``. See
:py:func:`~salt.modules.system.reboot` for more details
on this argument.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.upgrade
salt '*' kernelpkg.upgrade reboot=True at_time=1
.. note::
An immediate reboot often shuts down the system before the minion has a
chance to return, resulting in errors. A minimal delay (1 minute) is
useful to ensure the result is delivered to the master.
"""
result = __salt__["pkg.install"](
name="{}-{}".format(_package_prefix(), latest_available())
)
_needs_reboot = needs_reboot()
ret = {
"upgrades": result,
"active": active(),
"latest_installed": latest_installed(),
"reboot_requested": reboot,
"reboot_required": _needs_reboot,
}
if reboot and _needs_reboot:
log.warning("Rebooting system due to kernel upgrade")
__salt__["system.reboot"](at_time=at_time)
return ret
def upgrade_available():
"""
Detect if a new kernel version is available in the repositories.
Returns True if a new kernel is available, False otherwise.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.upgrade_available
"""
return _LooseVersion(latest_available()) > _LooseVersion(latest_installed())
def remove(release):
"""
Remove a specific version of the kernel.
release
The release number of an installed kernel. This must be the entire release
number as returned by :py:func:`~salt.modules.kernelpkg_linux_apt.list_installed`,
not the package name.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.remove 4.4.0-70-generic
"""
if release not in list_installed():
raise CommandExecutionError(
"Kernel release '{}' is not installed".format(release)
)
if release == active():
raise CommandExecutionError("Active kernel cannot be removed")
target = "{}-{}".format(_package_prefix(), release)
log.info("Removing kernel package %s", target)
__salt__["pkg.purge"](target)
return {"removed": [target]}
def cleanup(keep_latest=True):
"""
Remove all unused kernel packages from the system.
keep_latest : True
In the event that the active kernel is not the latest one installed, setting this to True
will retain the latest kernel package, in addition to the active one. If False, all kernel
packages other than the active one will be removed.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.cleanup
"""
removed = []
# Loop over all installed kernel packages
for kernel in list_installed():
# Keep the active kernel package
if kernel == active():
continue
# Optionally keep the latest kernel package
if keep_latest and kernel == latest_installed():
continue
# Remove the kernel package
removed.extend(remove(kernel)["removed"])
return {"removed": removed}
def _package_prefix():
"""
Return static string for the package prefix
"""
return "linux-image"
def _kernel_type():
"""
Parse the kernel name and return its type
"""
return re.match(r"^[\d.-]+-(.+)$", active()).group(1)
def _cmp_version(item1, item2):
"""
Compare function for package version sorting
"""
vers1 = _LooseVersion(item1)
vers2 = _LooseVersion(item2)
if vers1 < vers2:
return -1
if vers1 > vers2:
return 1
return 0
| [((17, 6, 17, 33), 'logging.getLogger', 'logging.getLogger', ({(17, 24, 17, 32): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((97, 14, 97, 58), 're.match', 're.match', ({(97, 23, 97, 49): '"""^(\\\\d+\\\\.\\\\d+\\\\.\\\\d+)\\\\.(\\\\d+)"""', (97, 51, 97, 57): 'result'}, {}), "('^(\\\\d+\\\\.\\\\d+\\\\.\\\\d+)\\\\.(\\\\d+)', result)", False, 'import re\n'), ((281, 12, 281, 32), 'salt.utils.versions.LooseVersion', '_LooseVersion', ({(281, 26, 281, 31): 'item1'}, {}), '(item1)', True, 'from salt.utils.versions import LooseVersion as _LooseVersion\n'), ((282, 12, 282, 32), 'salt.utils.versions.LooseVersion', '_LooseVersion', ({(282, 26, 282, 31): 'item2'}, {}), '(item2)', True, 'from salt.utils.versions import LooseVersion as _LooseVersion\n'), ((219, 14, 219, 70), 'salt.exceptions.CommandExecutionError', 'CommandExecutionError', ({(219, 36, 219, 69): '"""Active kernel cannot be removed"""'}, {}), "('Active kernel cannot be removed')", False, 'from salt.exceptions import CommandExecutionError\n'), ((77, 50, 77, 84), 'functools.cmp_to_key', 'functools.cmp_to_key', ({(77, 71, 77, 83): '_cmp_version'}, {}), '(_cmp_version)', False, 'import functools\n')] |
david-slatinek/running-a-program-on-the-CPU-vs.-on-the-GPU | main.py | 971b911efee8f52c5950ba777b79e58a4f840024 | import json
import numpy as np
from numba import jit
from timeit import default_timer as timer
# Constant, used in the formula.
# Defined here to speed up the calculation, i.e. it's calculated only once
# and then placed in the formula.
SQRT_2PI = np.float32(np.sqrt(2 * np.pi))
# This function will run on the CPU.
def gaussian_cpu(values, mean, sigma):
"""Calculate values of the Gaussian function.
:param values: list, function input parameters.
:param mean: float, arithmetic mean.
:param sigma: float, standard deviation.
:return: list.
"""
result = np.zeros_like(values)
for index, item in enumerate(values):
result[index] = (1 / (sigma * SQRT_2PI)) * (np.e ** (-0.5 * ((item - mean) / sigma) ** 2))
return result
# This function will run on the GPU.
gaussian_gpu = jit(gaussian_cpu)
def write_to_file(name, values):
"""Write results to a file.
:param name: string, file name, only prefix.
:param values: dictionary, values to write.
"""
with open(name + ".json", 'w') as f:
json.dump(values, f, indent=4)
if __name__ == "__main__":
# Randomly generated values.
x = np.random.uniform(-3, 3, size=1000000).astype(np.float32)
# Randomly generated mean.
m = np.random.uniform(1, 10)
# Randomly generated standard deviation.
s = np.random.uniform(1, 10)
# The number of rounds.
n = 1
# Used to store execution time.
time_results = {}
for i in range(n):
start = timer()
gaussian_cpu(x, m, s)
end = timer() - start
time_results[i] = end
write_to_file("cpu", time_results)
for i in range(n):
start = timer()
gaussian_gpu(x, m, s)
end = timer() - start
time_results[i] = end
write_to_file("gpu", time_results)
| [((28, 15, 28, 32), 'numba.jit', 'jit', ({(28, 19, 28, 31): 'gaussian_cpu'}, {}), '(gaussian_cpu)', False, 'from numba import jit\n'), ((9, 22, 9, 40), 'numpy.sqrt', 'np.sqrt', ({(9, 30, 9, 39): '2 * np.pi'}, {}), '(2 * np.pi)', True, 'import numpy as np\n'), ((21, 13, 21, 34), 'numpy.zeros_like', 'np.zeros_like', ({(21, 27, 21, 33): 'values'}, {}), '(values)', True, 'import numpy as np\n'), ((45, 8, 45, 32), 'numpy.random.uniform', 'np.random.uniform', ({(45, 26, 45, 27): '1', (45, 29, 45, 31): '10'}, {}), '(1, 10)', True, 'import numpy as np\n'), ((47, 8, 47, 32), 'numpy.random.uniform', 'np.random.uniform', ({(47, 26, 47, 27): '1', (47, 29, 47, 31): '10'}, {}), '(1, 10)', True, 'import numpy as np\n'), ((38, 8, 38, 38), 'json.dump', 'json.dump', (), '', False, 'import json\n'), ((56, 16, 56, 23), 'timeit.default_timer', 'timer', ({}, {}), '()', True, 'from timeit import default_timer as timer\n'), ((64, 16, 64, 23), 'timeit.default_timer', 'timer', ({}, {}), '()', True, 'from timeit import default_timer as timer\n'), ((43, 8, 43, 46), 'numpy.random.uniform', 'np.random.uniform', (), '', True, 'import numpy as np\n'), ((58, 14, 58, 21), 'timeit.default_timer', 'timer', ({}, {}), '()', True, 'from timeit import default_timer as timer\n'), ((66, 14, 66, 21), 'timeit.default_timer', 'timer', ({}, {}), '()', True, 'from timeit import default_timer as timer\n')] |
ninetymiles/jj-logcat-analyzer | src/jj_analyzer/__init__.py | d4ae0fddfefc303ae9c17e6c9e08aad6a231e036 | #! /usr/bin/python
import sys
if sys.version_info[0] == 3:
from .__main__ import *
else:
pass | [] |
Team-501-The-PowerKnights/Powerknights-Slack-Bot | utility_functions.py | 1ce25c954aa0c089aa93a3d63bd475d585d39bb6 | import datetime
def iso_extract_info(string):
"""
Will get all of the info and return it as an array
:param string: ISO formatted string that will be used for extraction
:return: array [year, month, day, military_time_hour, minutes, hours]
:note: every item is an int except for minutes
:note: hours only is there is military_time_hour is greater than 12
"""
elements = []
characters = list(string)
year_int = int("".join(characters[0:4]))
month_int = int("".join(characters[5:7]))
day_int = int("".join(characters[8:10]))
military_time_hours_int = int("".join(characters[11:13]))
minutes_int = "".join(characters[14:16])
hours = 0
elements.append(year_int)
elements.append(month_int)
elements.append(day_int)
elements.append(minutes_int)
if military_time_hours_int > 12:
hours += military_time_hours_int - 12
elements.append(hours)
return elements
# # Testing:
# print("[year, month, day, military_time_hour, minutes, hours]")
# print(iso_extract_info('2019-04-27T16:00:00-04:00'))
# Doesn't use the "iso_extract_info" function
def iso_format_to_regular(string):
"""
Will take a string that is an iso formatted string and make it look readable
:param string: the iso formatted string
:return: str
"""
characters = list(string)
year_int = int("".join(characters[0:4]))
month_int = int("".join(characters[5:7]))
day_int = int("".join(characters[8:10]))
military_time_hours_int = int("".join(characters[11:13]))
minutes_int = "".join(characters[14:16])
if military_time_hours_int > 12:
hours = military_time_hours_int - 12
final_string = "{month}/{day}/{year} {hour}:{minute}PM".format(
month=month_int, day=day_int, year=year_int, hour=hours, minute=minutes_int)
return final_string
else:
final_string = "{month}/{day}/{year} {hour}:{minute}AM".format(
month=month_int, day=day_int, year=year_int, hour=military_time_hours_int, minute=minutes_int)
return final_string
# Testing:
# print(iso_format_to_regular('2019-04-27T16:00:00-04:00'))
# Doesn't use the "iso_extract_info" function
def fix_time(strange_date):
"""
Will rearrange the strange date that Google gives and repalce it with the normal string.
:param strange_date: strange time that google gives when an event is marked as "all day"
:return: str
"""
items = strange_date.split("-")
year_int = int(items[0])
month_int = int(items[1])
day_int = int(items[2])
new_str = "{month}/{day}/{year}".format(
month=month_int, day=day_int, year=year_int)
return new_str
# Doesn't use the "iso_extract_info" function
def multiday_checker_STRANGE(start_date, end_date):
"""
Will check if an event is more than day long
:param start_date: Strange Google formatted date of the start of the event
:param end_date: Strange Google formatted date of the end of the event
:return: Boolean
"""
start_date_items = start_date.split("-")
end_date_items = end_date.split("-")
start_date_sum = 0
end_date_sum = 0
for string in start_date_items:
number = int(string)
start_date_sum += number
for string in end_date_items:
number = int(string)
end_date_sum += number
date_dif = start_date_sum - end_date_sum
if date_dif > 2:
return True
else:
return False
# Testing:
# print(multiday_checker_STRANGE('2019-04-21', '2019-04-22'))
# Doesn't use the "iso_extract_info" function
def STRANGE_string_weekday(string):
"""
Will take a string that is a date formatted in the Google format and find what day of the week it is
:param string: Google formatted string for the date
:return: string
"""
items = string.split("/")
year_int = int(items[2])
month_int = int(items[0])
day_int = int(items[1])
datetime_instance = datetime.date(year_int, month_int, day_int)
week_day_number = datetime_instance.weekday()
if week_day_number == 0:
return "Monday"
elif week_day_number == 1:
return "Tuesday"
elif week_day_number == 2:
return "Wendsday"
elif week_day_number == 3:
return "Thursday"
elif week_day_number == 4:
return "Friday"
elif week_day_number == 5:
return "Saturday"
elif week_day_number == 6:
return "Sunday"
else:
return "Error"
# Testing:
# print(STRANGE_string_weekday("2019-04-27"))
# Doesn't use the "iso_extract_info" function
def ISO_string_weekday(string):
"""
Will take a string that is a date formatted in the ISO format and find what day of the week it is
:param string: ISO formatted string for the date
:return: string
"""
characters = list(string)
year_int = int("".join(characters[0:4]))
month_int = int("".join(characters[5:7]))
day_int = int("".join(characters[8:10]))
datetime_instance = datetime.date(year_int, month_int, day_int)
week_day_number = datetime_instance.weekday()
if week_day_number == 0:
return "Monday"
elif week_day_number == 1:
return "Tuesday"
elif week_day_number == 2:
return "Wendsday"
elif week_day_number == 3:
return "Thursday"
elif week_day_number == 4:
return "Friday"
elif week_day_number == 5:
return "Saturday"
elif week_day_number == 6:
return "Sunday"
else:
return "Error"
# Testing:
# print(ISO_string_weekday('2019-06-28T16:00:00-04:00'))
| [((119, 24, 119, 67), 'datetime.date', 'datetime.date', ({(119, 38, 119, 46): 'year_int', (119, 48, 119, 57): 'month_int', (119, 59, 119, 66): 'day_int'}, {}), '(year_int, month_int, day_int)', False, 'import datetime\n'), ((154, 24, 154, 67), 'datetime.date', 'datetime.date', ({(154, 38, 154, 46): 'year_int', (154, 48, 154, 57): 'month_int', (154, 59, 154, 66): 'day_int'}, {}), '(year_int, month_int, day_int)', False, 'import datetime\n')] |
tallamjr/mbms | python/ch_06_Animatronic_Head.py | 6763faa870d1a16f272b3eade70b433ed3df0e51 | from microbit import *
import random, speech, radio
eye_angles = [50, 140, 60, 90, 140]
radio.off()
sentences = [
"Hello my name is Mike",
"What is your name",
"I am looking at you",
"Exterminate exterminate exterminate",
"Number Five is alive",
"I cant do that Dave",
"daisee daisee give me your answer do"
]
lips0 = Image("00000:"
"00000:"
"99999:"
"00000:"
"00000")
lips1 = Image("00000:"
"00900:"
"99099:"
"00900:"
"00000")
lips2 = Image("00000:"
"09990:"
"99099:"
"09990:"
"00000")
lips = [lips0, lips1, lips2]
def set_servo_angle(pin, angle):
duty = 26 + (angle * 51) / 90
pin.write_analog(duty)
def speak(sentence):
words = sentence.split()
for i in range(0, len(words)):
display.show(random.choice(lips))
speech.say(words[i])
display.show(lips0)
def act():
set_servo_angle(pin2, random.choice(eye_angles))
sleep(300)
speak(random.choice(sentences))
set_servo_angle(pin2, 90)
base_z = 0
while True:
new_z = abs(accelerometer.get_z())
if abs(new_z - base_z) > 20:
base_z = new_z
act()
if random.randint(0, 1000) == 0: # say something 1 time in 1000
act()
sleep(200)
| [((6, 0, 6, 11), 'radio.off', 'radio.off', ({}, {}), '()', False, 'import random, speech, radio\n'), ((46, 8, 46, 28), 'speech.say', 'speech.say', ({(46, 19, 46, 27): 'words[i]'}, {}), '(words[i])', False, 'import random, speech, radio\n'), ((50, 26, 50, 51), 'random.choice', 'random.choice', ({(50, 40, 50, 50): 'eye_angles'}, {}), '(eye_angles)', False, 'import random, speech, radio\n'), ((52, 10, 52, 34), 'random.choice', 'random.choice', ({(52, 24, 52, 33): 'sentences'}, {}), '(sentences)', False, 'import random, speech, radio\n'), ((63, 7, 63, 30), 'random.randint', 'random.randint', ({(63, 22, 63, 23): '(0)', (63, 25, 63, 29): '(1000)'}, {}), '(0, 1000)', False, 'import random, speech, radio\n'), ((45, 21, 45, 40), 'random.choice', 'random.choice', ({(45, 35, 45, 39): 'lips'}, {}), '(lips)', False, 'import random, speech, radio\n')] |
caoyp2/HRunDemo | debugtalk.py | 41810a2fd366c780ea8f2bf9b4328fdd60aba171 | import datetime
import time
def sleep(n_secs):
time.sleep(n_secs)
def get_timestamp():
dtime = datetime.datetime.now()
un_time = time.mktime(dtime.timetuple())
return str(un_time)
def print_docId(docId):
print(docId)
def print_phonepass(phone,password):
print(phone + "---------" + password)
| [((5, 4, 5, 22), 'time.sleep', 'time.sleep', ({(5, 15, 5, 21): 'n_secs'}, {}), '(n_secs)', False, 'import time\n'), ((8, 12, 8, 35), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n')] |
aleronupe/2019.1-hubcare-api | hubcare/metrics/community_metrics/issue_template/urls.py | 3f031eac9559a10fdcf70a88ee4c548cf93e4ac2 | from django.urls import path
from issue_template.views import IssueTemplateView
urlpatterns = [
path(
'<str:owner>/<str:repo>/<str:token_auth>/',
IssueTemplateView.as_view()
),
]
| [((8, 8, 8, 35), 'issue_template.views.IssueTemplateView.as_view', 'IssueTemplateView.as_view', ({}, {}), '()', False, 'from issue_template.views import IssueTemplateView\n')] |
httpsgithu/hammer | src/hammer-vlsi/technology/sky130/sram_compiler/__init__.py | 6099f4169a49f71cee2e24bb1052f273039505cd |
import os, tempfile, subprocess
from hammer_vlsi import MMMCCorner, MMMCCornerType, HammerTool, HammerToolStep, HammerSRAMGeneratorTool, SRAMParameters
from hammer_vlsi.units import VoltageValue, TemperatureValue
from hammer_tech import Library, ExtraLibrary
from typing import NamedTuple, Dict, Any, List
from abc import ABCMeta, abstractmethod
class SKY130SRAMGenerator(HammerSRAMGeneratorTool):
def tool_config_prefix(self) -> str:
return "sram_generator.sky130"
def version_number(self, version: str) -> int:
return 0
# Run generator for a single sram and corner
def generate_sram(self, params: SRAMParameters, corner: MMMCCorner) -> ExtraLibrary:
tech_cache_dir = os.path.abspath(self.technology.cache_dir)
#TODO: this is really an abuse of the corner stuff
if corner.type == MMMCCornerType.Setup:
speed_name = "slow"
speed = "SS"
elif corner.type == MMMCCornerType.Hold:
speed_name = "fast"
speed = "FF"
elif corner.type == MMMCCornerType.Extra:
speed_name = "typical"
speed = "TT"
# Different target memories based on port count
# if params.family == "1rw":
# self.logger.info("Compiling 1rw memories to DFFRAM instances")
# base_dir = self.get_setting("technology.sky130.dffram_lib")
# fam_code = params.family
# sram_name = "RAM{d}x{w}".format(
# d=params.depth,
# w=params.width)
# #TODO: need real libs (perhaps run Liberate here?)
# #For now, use the dummy lib for all corners
# corner_str = "" #
# lib_path = "{b}/{n}.lib".format(
# b=base_dir,
# n=sram_name)
# if not os.path.exists(lib_path):
# self.logger.error("SKY130 1rw1r SRAM cache does not support corner: {c}".format(c=corner_str))
# return ExtraLibrary(prefix=None, library=Library(
# name=sram_name,
# nldm_liberty_file=lib_path,
# lef_file="{b}/{n}/{n}.lef".format(b=base_dir,n=sram_name),
# #TODO: GDS not generated. Unclear which DEF to use?
# #gds_file="{b}/{n}/{n}.gds".format(b=base_dir,n=sram_name),
# spice_file="{b}/{n}/{n}.spice".format(b=base_dir,n=sram_name),
# #TODO: Will not work as-is for behav. sim (this is a structural netlist referencing std. cells)
# #Need to add std cell behavioral Verilog to sim.inputs.input_files
# verilog_sim="{b}/{n}/{n}.nl.v".format(b=base_dir,n=sram_name),
# corner={'nmos': speed_name, 'pmos': speed_name, 'temperature': str(corner.temp.value_in_units("C")) + " C"},
# supplies={'VDD': str(corner.voltage.value_in_units("V")) + " V", 'GND': "0 V"},
# provides=[{'lib_type': "sram", 'vt': params.vt}]))
# elif params.family == "1rw1r":
if params.family == "1rw":
self.logger.info("Compiling 1rw1r memories to OpenRAM instances")
base_dir = self.get_setting("technology.sky130.openram_lib")
fam_code = params.family
s=round(round(params.width*params.depth/8, -3)/1000) # size in kiB
w=params.width
d=params.depth
m=8
sram_name = f"sky130_sram_{s}kbyte_1rw1r_{w}x{d}_{m}"
print(f"SRAM_NAME: {sram_name}")
#TODO: Hammer SRAMParameters doesn't have this info
#TODO: replace this if OpenRAM characterization done for other corners
#For now, use typical lib for all corners
corner_str = "TT_1p8V_25C"
#corner_str = "{speed}_{volt}V_{temp}C".format(
# speed = speed,
# volt = str(corner.voltage.value_in_units("V")).replace(".","p"),
# temp = str(int(corner.temp.value_in_units("C"))).replace(".","p"))
lib_path = "{b}/{n}/{n}_{c}.lib".format(
b=base_dir,
n=sram_name,
c=corner_str)
if not os.path.exists(lib_path):
self.logger.error("SKY130 1rw1r SRAM cache does not support corner: {c}".format(c=corner_str))
return ExtraLibrary(prefix=None, library=Library(
name=sram_name,
nldm_liberty_file=lib_path,
lef_file="{b}/{n}/{n}.lef".format(b=base_dir,n=sram_name),
gds_file="{b}/{n}/{n}.gds".format(b=base_dir,n=sram_name),
spice_file="{b}/{n}/{n}.lvs.sp".format(b=base_dir,n=sram_name),
verilog_sim="{b}/{n}/{n}.v".format(b=base_dir,n=sram_name),
corner={'nmos': speed_name, 'pmos': speed_name, 'temperature': str(corner.temp.value_in_units("C")) + " C"},
supplies={'VDD': str(corner.voltage.value_in_units("V")) + " V", 'GND': "0 V"},
provides=[{'lib_type': "sram", 'vt': params.vt}]))
else:
self.logger.error("SKY130 SRAM cache does not support family:{f}".format(f=params.family))
return ExtraLibrary(prefix=None, library=None)
tool=SKY130SRAMGenerator
| [((19, 25, 19, 67), 'os.path.abspath', 'os.path.abspath', ({(19, 41, 19, 66): 'self.technology.cache_dir'}, {}), '(self.technology.cache_dir)', False, 'import os, tempfile, subprocess\n'), ((98, 19, 98, 58), 'hammer_tech.ExtraLibrary', 'ExtraLibrary', (), '', False, 'from hammer_tech import Library, ExtraLibrary\n'), ((84, 19, 84, 43), 'os.path.exists', 'os.path.exists', ({(84, 34, 84, 42): 'lib_path'}, {}), '(lib_path)', False, 'import os, tempfile, subprocess\n')] |
PacktPublishing/Hands-on-NLP-with-NLTK-and-scikit-learn- | Section 4/nlp-4-ngrams.py | 8bb2095093a822363675368a4216d30d14cac501 | import collections
import nltk
import os
from sklearn import (
datasets, model_selection, feature_extraction, linear_model, naive_bayes,
ensemble
)
def extract_features(corpus):
'''Extract TF-IDF features from corpus'''
sa_stop_words = nltk.corpus.stopwords.words("english")
# words that might invert a sentence's meaning
white_list = [
'what', 'but', 'if', 'because', 'as', 'until', 'against',
'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again',
'further', 'then', 'once', 'here', 'there', 'why', 'how', 'all', 'any',
'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own',
'same', 'so', 'than', 'too', 'can', 'will', 'just', 'don', 'should']
# take these out of the standard NLTK stop word list
sa_stop_words = [sw for sw in sa_stop_words if sw not in white_list]
# vectorize means we turn non-numerical data into an array of numbers
count_vectorizer = feature_extraction.text.CountVectorizer(
lowercase=True, # for demonstration, True by default
tokenizer=nltk.word_tokenize, # use the NLTK tokenizer
min_df=2, # minimum document frequency, i.e. the word must appear more than once.
ngram_range=(1, 2),
stop_words=sa_stop_words
)
processed_corpus = count_vectorizer.fit_transform(corpus)
processed_corpus = feature_extraction.text.TfidfTransformer().fit_transform(
processed_corpus)
return processed_corpus
data_directory = 'movie_reviews'
movie_sentiment_data = datasets.load_files(data_directory, shuffle=True)
print('{} files loaded.'.format(len(movie_sentiment_data.data)))
print('They contain the following classes: {}.'.format(
movie_sentiment_data.target_names))
movie_tfidf = extract_features(movie_sentiment_data.data)
X_train, X_test, y_train, y_test = model_selection.train_test_split(
movie_tfidf, movie_sentiment_data.target, test_size=0.30, random_state=42)
# similar to nltk.NaiveBayesClassifier.train()
clf1 = linear_model.LogisticRegression()
clf1.fit(X_train, y_train)
print('Logistic Regression performance: {}'.format(clf1.score(X_test, y_test)))
clf2 = linear_model.SGDClassifier()
clf2.fit(X_train, y_train)
print('SGDClassifier performance: {}'.format(clf2.score(X_test, y_test)))
clf3 = naive_bayes.MultinomialNB()
clf3.fit(X_train, y_train)
print('MultinomialNB performance: {}'.format(clf3.score(X_test, y_test)))
clf4 = naive_bayes.BernoulliNB()
clf4.fit(X_train, y_train)
print('BernoulliNB performance: {}'.format(clf4.score(X_test, y_test)))
voting_model = ensemble.VotingClassifier(
estimators=[('lr', clf1), ('sgd', clf2), ('mnb', clf3), ('bnb', clf4)],
voting='hard')
voting_model.fit(X_train, y_train)
print('Voting classifier performance: {}'.format(
voting_model.score(X_test, y_test)))
| [((42, 23, 42, 72), 'sklearn.datasets.load_files', 'datasets.load_files', (), '', False, 'from sklearn import datasets, model_selection, feature_extraction, linear_model, naive_bayes, ensemble\n'), ((49, 35, 50, 78), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (), '', False, 'from sklearn import datasets, model_selection, feature_extraction, linear_model, naive_bayes, ensemble\n'), ((53, 7, 53, 40), 'sklearn.linear_model.LogisticRegression', 'linear_model.LogisticRegression', ({}, {}), '()', False, 'from sklearn import datasets, model_selection, feature_extraction, linear_model, naive_bayes, ensemble\n'), ((57, 7, 57, 35), 'sklearn.linear_model.SGDClassifier', 'linear_model.SGDClassifier', ({}, {}), '()', False, 'from sklearn import datasets, model_selection, feature_extraction, linear_model, naive_bayes, ensemble\n'), ((61, 7, 61, 34), 'sklearn.naive_bayes.MultinomialNB', 'naive_bayes.MultinomialNB', ({}, {}), '()', False, 'from sklearn import datasets, model_selection, feature_extraction, linear_model, naive_bayes, ensemble\n'), ((65, 7, 65, 32), 'sklearn.naive_bayes.BernoulliNB', 'naive_bayes.BernoulliNB', ({}, {}), '()', False, 'from sklearn import datasets, model_selection, feature_extraction, linear_model, naive_bayes, ensemble\n'), ((70, 15, 72, 18), 'sklearn.ensemble.VotingClassifier', 'ensemble.VotingClassifier', (), '', False, 'from sklearn import datasets, model_selection, feature_extraction, linear_model, naive_bayes, ensemble\n'), ((13, 20, 13, 58), 'nltk.corpus.stopwords.words', 'nltk.corpus.stopwords.words', ({(13, 48, 13, 57): '"""english"""'}, {}), "('english')", False, 'import nltk\n'), ((27, 23, 33, 5), 'sklearn.feature_extraction.text.CountVectorizer', 'feature_extraction.text.CountVectorizer', (), '', False, 'from sklearn import datasets, model_selection, feature_extraction, linear_model, naive_bayes, ensemble\n'), ((35, 23, 35, 65), 'sklearn.feature_extraction.text.TfidfTransformer', 'feature_extraction.text.TfidfTransformer', ({}, {}), '()', False, 'from sklearn import datasets, model_selection, feature_extraction, linear_model, naive_bayes, ensemble\n')] |
bhavinjawade/project-euler-solutions | code/gcd_sequence/sol_443.py | 56bf6a282730ed4b9b875fa081cf4509d9939d98 |
# -*- coding: utf-8 -*-
'''
File name: code\gcd_sequence\sol_443.py
Author: Vaidic Joshi
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #443 :: GCD sequence
#
# For more information see:
# https://projecteuler.net/problem=443
# Problem Statement
'''
Let g(n) be a sequence defined as follows:
g(4) = 13,
g(n) = g(n-1) + gcd(n, g(n-1)) for n > 4.
The first few values are:
n4567891011121314151617181920...
g(n)1314161718272829303132333451545560...
You are given that g(1 000) = 2524 and g(1 000 000) = 2624152.
Find g(1015).
'''
# Solution
# Solution Approach
'''
'''
| [] |
lreed/Diamond | src/collectors/rabbitmq/rabbitmq.py | 2772cdbc27a7ba3fedeb6d4241aeee9d2fcbdb80 | # coding=utf-8
"""
Collects data from RabbitMQ through the admin interface
#### Notes
* if two vhosts have the queues with the same name, the metrics will collide
#### Dependencies
* pyrabbit
"""
import diamond.collector
try:
from numbers import Number
Number # workaround for pyflakes issue #13
import pyrabbit.api
except ImportError:
Number = None
class RabbitMQCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(RabbitMQCollector, self).get_default_config_help()
config_help.update({
'host': 'Hostname and port to collect from',
'user': 'Username',
'password': 'Password',
'queues': 'Queues to publish. Leave empty to publish all.',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(RabbitMQCollector, self).get_default_config()
config.update({
'path': 'rabbitmq',
'host': 'localhost:55672',
'user': 'guest',
'password': 'guest',
})
return config
def collect(self):
if Number is None:
self.log.error('Unable to import either Number or pyrabbit.api')
return {}
queues = []
if 'queues' in self.config:
queues = self.config['queues'].split()
try:
client = pyrabbit.api.Client(self.config['host'],
self.config['user'],
self.config['password'])
for queue in client.get_queues():
# skip queues we don't want to publish
if queues and queue['name'] not in queues:
continue
for key in queue:
name = '{0}.{1}'.format('queues', queue['name'])
self._publish_metrics(name, [], key, queue)
overview = client.get_overview()
for key in overview:
self._publish_metrics('', [], key, overview)
except Exception, e:
self.log.error('Couldnt connect to rabbitmq %s', e)
return {}
def _publish_metrics(self, name, prev_keys, key, data):
"""Recursively publish keys"""
value = data[key]
keys = prev_keys + [key]
if isinstance(value, dict):
for new_key in value:
self._publish_metrics(name, keys, new_key, value)
elif isinstance(value, Number):
joined_keys = '.'.join(keys)
if name:
publish_key = '{0}.{1}'.format(name, joined_keys)
else:
publish_key = joined_keys
self.publish(publish_key, value)
| [] |
MalikIdreesHasanKhan/NeMo | nemo/collections/tts/torch/data.py | 984fd34921e81659c4594a22ab142311808b3bb7 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pickle
from pathlib import Path
from typing import Callable, Dict, List, Optional, Union
import librosa
import torch
from nemo_text_processing.text_normalization.normalize import Normalizer
from tqdm import tqdm
from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer
from nemo.collections.tts.torch.helpers import (
BetaBinomialInterpolator,
beta_binomial_prior_distribution,
general_padding,
)
from nemo.collections.tts.torch.tts_data_types import (
DATA_STR2DATA_CLASS,
MAIN_DATA_TYPES,
VALID_SUPPLEMENTARY_DATA_TYPES,
DurationPrior,
Durations,
Energy,
LMTokens,
LogMel,
Pitch,
SpeakerID,
WithLens,
)
from nemo.collections.tts.torch.tts_tokenizers import BaseTokenizer, EnglishCharsTokenizer, EnglishPhonemesTokenizer
from nemo.core.classes import Dataset
from nemo.utils import logging
class TTSDataset(Dataset):
def __init__(
self,
manifest_filepath: str,
sample_rate: int,
text_tokenizer: Union[BaseTokenizer, Callable[[str], List[int]]],
tokens: Optional[List[str]] = None,
text_normalizer: Optional[Union[Normalizer, Callable[[str], str]]] = None,
text_normalizer_call_args: Optional[Dict] = None,
text_tokenizer_pad_id: Optional[int] = None,
sup_data_types: Optional[List[str]] = None,
sup_data_path: Optional[Union[Path, str]] = None,
max_duration: Optional[float] = None,
min_duration: Optional[float] = None,
ignore_file: Optional[str] = None,
trim: bool = False,
n_fft=1024,
win_length=None,
hop_length=None,
window="hann",
n_mels=80,
lowfreq=0,
highfreq=None,
**kwargs,
):
"""Dataset that loads main data types (audio and text) and specified supplementary data types (e.g. log mel, durations, pitch).
Most supplementary data types will be computed on the fly and saved in the supplementary_folder if they did not exist before.
Arguments for supplementary data should be also specified in this class and they will be used from kwargs (see keyword args section).
Args:
manifest_filepath (str, Path, List[str, Path]): Path(s) to the .json manifests containing information on the
dataset. Each line in the .json file should be valid json. Note: the .json file itself is not valid
json. Each line should contain the following:
"audio_filepath": <PATH_TO_WAV>
"mel_filepath": <PATH_TO_LOG_MEL_PT> (Optional)
"duration": <Duration of audio clip in seconds> (Optional)
"text": <THE_TRANSCRIPT> (Optional)
sample_rate (int): The sample rate of the audio. Or the sample rate that we will resample all files to.
text_tokenizer (Optional[Union[BaseTokenizer, Callable[[str], List[int]]]]): BaseTokenizer or callable which represents text tokenizer.
tokens (Optional[List[str]]): Tokens from text_tokenizer. Should be specified if text_tokenizer is not BaseTokenizer.
text_normalizer (Optional[Union[Normalizer, Callable[[str], str]]]): Normalizer or callable which represents text normalizer.
text_normalizer_call_args (Optional[Dict]): Additional arguments for text_normalizer function.
text_tokenizer_pad_id (Optional[int]): Index of padding. Should be specified if text_tokenizer is not BaseTokenizer.
sup_data_types (Optional[List[str]]): List of supplementary data types.
sup_data_path (Optional[Union[Path, str]]): A folder that contains or will contain supplementary data (e.g. pitch).
max_duration (Optional[float]): Max duration of audio clips in seconds. All samples exceeding this will be
pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load
audio to compute duration. Defaults to None which does not prune.
min_duration (Optional[float]): Min duration of audio clips in seconds. All samples lower than this will be
pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load
audio to compute duration. Defaults to None which does not prune.
ignore_file (Optional[str, Path]): The location of a pickle-saved list of audio_ids (the stem of the audio
files) that will be pruned prior to training. Defaults to None which does not prune.
trim (Optional[bool]): Whether to apply librosa.effects.trim to the audio file. Defaults to False.
n_fft (Optional[int]): The number of fft samples. Defaults to 1024
win_length (Optional[int]): The length of the stft windows. Defaults to None which uses n_fft.
hop_length (Optional[int]): The hope length between fft computations. Defaults to None which uses n_fft//4.
window (Optional[str]): One of 'hann', 'hamming', 'blackman','bartlett', 'none'. Which corresponds to the
equivalent torch window function.
n_mels (Optional[int]): The number of mel filters. Defaults to 80.
lowfreq (Optional[int]): The lowfreq input to the mel filter calculation. Defaults to 0.
highfreq (Optional[int]): The highfreq input to the mel filter calculation. Defaults to None.
Keyword Args:
durs_file (Optional[str]): String path to pickled durations location.
durs_type (Optional[str]): Type of durations. Currently supported only "aligned-based".
use_beta_binomial_interpolator (Optional[bool]): Whether to use beta-binomial interpolator. Defaults to False.
pitch_fmin (Optional[float]): The fmin input to librosa.pyin. Defaults to librosa.note_to_hz('C2').
pitch_fmax (Optional[float]): The fmax input to librosa.pyin. Defaults to librosa.note_to_hz('C7').
pitch_avg (Optional[float]): The mean that we use to normalize the pitch.
pitch_std (Optional[float]): The std that we use to normalize the pitch.
pitch_norm (Optional[bool]): Whether to normalize pitch (via pitch_avg and pitch_std) or not.
"""
super().__init__()
self.text_normalizer = text_normalizer
self.text_normalizer_call = (
self.text_normalizer.normalize if isinstance(self.text_normalizer, Normalizer) else self.text_normalizer
)
self.text_normalizer_call_args = text_normalizer_call_args if text_normalizer_call_args is not None else {}
self.text_tokenizer = text_tokenizer
if isinstance(self.text_tokenizer, BaseTokenizer):
self.text_tokenizer_pad_id = text_tokenizer.pad
self.tokens = text_tokenizer.tokens
else:
if text_tokenizer_pad_id is None:
raise ValueError(f"text_tokenizer_pad_id must be specified if text_tokenizer is not BaseTokenizer")
if tokens is None:
raise ValueError(f"tokens must be specified if text_tokenizer is not BaseTokenizer")
self.text_tokenizer_pad_id = text_tokenizer_pad_id
self.tokens = tokens
if isinstance(manifest_filepath, str):
manifest_filepath = [manifest_filepath]
self.manifest_filepath = manifest_filepath
if sup_data_path is not None:
Path(sup_data_path).mkdir(parents=True, exist_ok=True)
self.sup_data_path = sup_data_path
self.sup_data_types = (
[DATA_STR2DATA_CLASS[d_as_str] for d_as_str in sup_data_types] if sup_data_types is not None else []
)
self.sup_data_types_set = set(self.sup_data_types)
self.data = []
audio_files = []
total_duration = 0
for manifest_file in self.manifest_filepath:
with open(Path(manifest_file).expanduser(), 'r') as f:
logging.info(f"Loading dataset from {manifest_file}.")
for line in tqdm(f):
item = json.loads(line)
file_info = {
"audio_filepath": item["audio_filepath"],
"mel_filepath": item["mel_filepath"] if "mel_filepath" in item else None,
"duration": item["duration"] if "duration" in item else None,
"text_tokens": None,
"speaker_id": item["speaker"] if "speaker" in item else None,
}
if "text" in item:
text = item["text"]
if self.text_normalizer is not None:
text = self.text_normalizer_call(text, **self.text_normalizer_call_args)
text_tokens = self.text_tokenizer(text)
file_info["raw_text"] = item["text"]
file_info["text_tokens"] = text_tokens
audio_files.append(file_info)
if file_info["duration"] is None:
logging.info(
"Not all audio files have duration information. Duration logging will be disabled."
)
total_duration = None
if total_duration is not None:
total_duration += item["duration"]
logging.info(f"Loaded dataset with {len(audio_files)} files.")
if total_duration is not None:
logging.info(f"Dataset contains {total_duration / 3600:.2f} hours.")
if ignore_file:
logging.info(f"using {ignore_file} to prune dataset.")
with open(Path(ignore_file).expanduser(), "rb") as f:
wavs_to_ignore = set(pickle.load(f))
pruned_duration = 0 if total_duration is not None else None
pruned_items = 0
for item in audio_files:
audio_path = item['audio_filepath']
audio_id = Path(audio_path).stem
# Prune data according to min/max_duration & the ignore file
if total_duration is not None:
if (min_duration and item["duration"] < min_duration) or (
max_duration and item["duration"] > max_duration
):
pruned_duration += item["duration"]
pruned_items += 1
continue
if ignore_file and (audio_id in wavs_to_ignore):
pruned_items += 1
pruned_duration += item["duration"]
wavs_to_ignore.remove(audio_id)
continue
self.data.append(item)
logging.info(f"Pruned {pruned_items} files. Final dataset contains {len(self.data)} files")
if pruned_duration is not None:
logging.info(
f"Pruned {pruned_duration / 3600:.2f} hours. Final dataset contains "
f"{(total_duration - pruned_duration) / 3600:.2f} hours."
)
self.sample_rate = sample_rate
self.featurizer = WaveformFeaturizer(sample_rate=self.sample_rate)
self.trim = trim
self.n_fft = n_fft
self.n_mels = n_mels
self.lowfreq = lowfreq
self.highfreq = highfreq
self.window = window
self.win_length = win_length or self.n_fft
self.hop_length = hop_length
self.hop_len = self.hop_length or self.n_fft // 4
self.fb = torch.tensor(
librosa.filters.mel(
self.sample_rate, self.n_fft, n_mels=self.n_mels, fmin=self.lowfreq, fmax=self.highfreq
),
dtype=torch.float,
).unsqueeze(0)
window_fn = {
'hann': torch.hann_window,
'hamming': torch.hamming_window,
'blackman': torch.blackman_window,
'bartlett': torch.bartlett_window,
'none': None,
}.get(self.window, None)
self.stft = lambda x: torch.stft(
input=x,
n_fft=self.n_fft,
hop_length=self.hop_len,
win_length=self.win_length,
window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None,
)
for data_type in self.sup_data_types:
if data_type not in VALID_SUPPLEMENTARY_DATA_TYPES:
raise NotImplementedError(f"Current implementation of TTSDataset doesn't support {data_type} type.")
getattr(self, f"add_{data_type.name}")(**kwargs)
def add_log_mel(self, **kwargs):
pass
def add_durations(self, **kwargs):
durs_file = kwargs.pop('durs_file')
durs_type = kwargs.pop('durs_type')
audio_stem2durs = torch.load(durs_file)
self.durs = []
for tag in [Path(d["audio_filepath"]).stem for d in self.data]:
durs = audio_stem2durs[tag]
if durs_type == "aligner-based":
self.durs.append(durs)
else:
raise NotImplementedError(
f"{durs_type} duration type is not supported. Only align-based is supported at this moment."
)
def add_duration_prior(self, **kwargs):
self.use_beta_binomial_interpolator = kwargs.pop('use_beta_binomial_interpolator', False)
if self.use_beta_binomial_interpolator:
self.beta_binomial_interpolator = BetaBinomialInterpolator()
def add_pitch(self, **kwargs):
self.pitch_fmin = kwargs.pop("pitch_fmin", librosa.note_to_hz('C2'))
self.pitch_fmax = kwargs.pop("pitch_fmax", librosa.note_to_hz('C7'))
self.pitch_avg = kwargs.pop("pitch_avg", None)
self.pitch_std = kwargs.pop("pitch_std", None)
self.pitch_norm = kwargs.pop("pitch_norm", False)
def add_energy(self, **kwargs):
pass
def add_speaker_id(self, **kwargs):
pass
def get_spec(self, audio):
with torch.cuda.amp.autocast(enabled=False):
spec = self.stft(audio)
if spec.dtype in [torch.cfloat, torch.cdouble]:
spec = torch.view_as_real(spec)
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-9)
return spec
def get_log_mel(self, audio):
with torch.cuda.amp.autocast(enabled=False):
spec = self.get_spec(audio)
mel = torch.matmul(self.fb.to(spec.dtype), spec)
log_mel = torch.log(torch.clamp(mel, min=torch.finfo(mel.dtype).tiny))
return log_mel
def __getitem__(self, index):
sample = self.data[index]
audio_stem = Path(sample["audio_filepath"]).stem
features = self.featurizer.process(sample["audio_filepath"], trim=self.trim)
audio, audio_length = features, torch.tensor(features.shape[0]).long()
text = torch.tensor(sample["text_tokens"]).long()
text_length = torch.tensor(len(sample["text_tokens"])).long()
log_mel, log_mel_length = None, None
if LogMel in self.sup_data_types_set:
mel_path = sample["mel_filepath"]
if mel_path is not None and Path(mel_path).exists():
log_mel = torch.load(mel_path)
else:
mel_path = Path(self.sup_data_path) / f"mel_{audio_stem}.pt"
if mel_path.exists():
log_mel = torch.load(mel_path)
else:
log_mel = self.get_log_mel(audio)
torch.save(log_mel, mel_path)
log_mel = log_mel.squeeze(0)
log_mel_length = torch.tensor(log_mel.shape[1]).long()
durations = None
if Durations in self.sup_data_types_set:
durations = self.durs[index]
duration_prior = None
if DurationPrior in self.sup_data_types_set:
if self.use_beta_binomial_interpolator:
mel_len = self.get_log_mel(audio).shape[2]
duration_prior = torch.from_numpy(self.beta_binomial_interpolator(mel_len, text_length.item()))
else:
prior_path = Path(self.sup_data_path) / f"pr_{audio_stem}.pt"
if prior_path.exists():
duration_prior = torch.load(prior_path)
else:
mel_len = self.get_log_mel(audio).shape[2]
duration_prior = beta_binomial_prior_distribution(text_length, mel_len)
duration_prior = torch.from_numpy(duration_prior)
torch.save(duration_prior, prior_path)
pitch, pitch_length = None, None
if Pitch in self.sup_data_types_set:
pitch_name = (
f"{audio_stem}_pitch_pyin_"
f"fmin{self.pitch_fmin}_fmax{self.pitch_fmax}_"
f"fl{self.win_length}_hs{self.hop_len}.pt"
)
pitch_path = Path(self.sup_data_path) / pitch_name
if pitch_path.exists():
pitch = torch.load(pitch_path).float()
else:
pitch, _, _ = librosa.pyin(
audio.numpy(),
fmin=self.pitch_fmin,
fmax=self.pitch_fmax,
frame_length=self.win_length,
sr=self.sample_rate,
fill_na=0.0,
)
pitch = torch.from_numpy(pitch).float()
torch.save(pitch, pitch_path)
if self.pitch_avg is not None and self.pitch_std is not None and self.pitch_norm:
pitch -= self.pitch_avg
pitch[pitch == -self.pitch_avg] = 0.0 # Zero out values that were perviously zero
pitch /= self.pitch_std
pitch_length = torch.tensor(len(pitch)).long()
energy, energy_length = None, None
if Energy in self.sup_data_types_set:
energy_path = Path(self.sup_data_path) / f"{audio_stem}_energy_wl{self.win_length}_hs{self.hop_len}.pt"
if energy_path.exists():
energy = torch.load(energy_path).float()
else:
spec = self.get_spec(audio)
energy = torch.linalg.norm(spec.squeeze(0), axis=0).float()
torch.save(energy, energy_path)
energy_length = torch.tensor(len(energy)).long()
speaker_id = None
if SpeakerID in self.sup_data_types_set:
speaker_id = torch.tensor(sample["speaker_id"]).long()
return (
audio,
audio_length,
text,
text_length,
log_mel,
log_mel_length,
durations,
duration_prior,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
)
def __len__(self):
return len(self.data)
def join_data(self, data_dict):
result = []
for data_type in MAIN_DATA_TYPES + self.sup_data_types:
result.append(data_dict[data_type.name])
if issubclass(data_type, WithLens):
result.append(data_dict[f"{data_type.name}_lens"])
return tuple(result)
def general_collate_fn(self, batch):
(
_,
audio_lengths,
_,
tokens_lengths,
_,
log_mel_lengths,
durations_list,
duration_priors_list,
pitches,
pitches_lengths,
energies,
energies_lengths,
_,
) = zip(*batch)
max_audio_len = max(audio_lengths).item()
max_tokens_len = max(tokens_lengths).item()
max_log_mel_len = max(log_mel_lengths) if LogMel in self.sup_data_types_set else None
max_durations_len = max([len(i) for i in durations_list]) if Durations in self.sup_data_types_set else None
max_pitches_len = max(pitches_lengths).item() if Pitch in self.sup_data_types_set else None
max_energies_len = max(energies_lengths).item() if Energy in self.sup_data_types_set else None
if LogMel in self.sup_data_types_set:
log_mel_pad = torch.finfo(batch[0][2].dtype).tiny
duration_priors = (
torch.zeros(
len(duration_priors_list),
max([prior_i.shape[0] for prior_i in duration_priors_list]),
max([prior_i.shape[1] for prior_i in duration_priors_list]),
)
if DurationPrior in self.sup_data_types_set
else []
)
audios, tokens, log_mels, durations_list, pitches, energies, speaker_ids = [], [], [], [], [], [], []
for i, sample_tuple in enumerate(batch):
(
audio,
audio_len,
token,
token_len,
log_mel,
log_mel_len,
durations,
duration_prior,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
) = sample_tuple
audio = general_padding(audio, audio_len.item(), max_audio_len)
audios.append(audio)
token = general_padding(token, token_len.item(), max_tokens_len, pad_value=self.text_tokenizer_pad_id)
tokens.append(token)
if LogMel in self.sup_data_types_set:
log_mels.append(general_padding(log_mel, log_mel_len, max_log_mel_len, pad_value=log_mel_pad))
if Durations in self.sup_data_types_set:
durations_list.append(general_padding(durations, len(durations), max_durations_len))
if DurationPrior in self.sup_data_types_set:
duration_priors[i, : duration_prior.shape[0], : duration_prior.shape[1]] = duration_prior
if Pitch in self.sup_data_types_set:
pitches.append(general_padding(pitch, pitch_length.item(), max_pitches_len))
if Energy in self.sup_data_types_set:
energies.append(general_padding(energy, energy_length.item(), max_energies_len))
if SpeakerID in self.sup_data_types_set:
speaker_ids.append(speaker_id)
data_dict = {
"audio": torch.stack(audios),
"audio_lens": torch.stack(audio_lengths),
"text": torch.stack(tokens),
"text_lens": torch.stack(tokens_lengths),
"log_mel": torch.stack(log_mels) if LogMel in self.sup_data_types_set else None,
"log_mel_lens": torch.stack(log_mel_lengths) if LogMel in self.sup_data_types_set else None,
"durations": torch.stack(durations_list) if Durations in self.sup_data_types_set else None,
"duration_prior": duration_priors if DurationPrior in self.sup_data_types_set else None,
"pitch": torch.stack(pitches) if Pitch in self.sup_data_types_set else None,
"pitch_lens": torch.stack(pitches_lengths) if Pitch in self.sup_data_types_set else None,
"energy": torch.stack(energies) if Energy in self.sup_data_types_set else None,
"energy_lens": torch.stack(energies_lengths) if Energy in self.sup_data_types_set else None,
"speaker_id": torch.stack(speaker_ids) if SpeakerID in self.sup_data_types_set else None,
}
return data_dict
def _collate_fn(self, batch):
data_dict = self.general_collate_fn(batch)
joined_data = self.join_data(data_dict)
return joined_data
class MixerTTSDataset(TTSDataset):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _albert(self):
from transformers import AlbertTokenizer # noqa pylint: disable=import-outside-toplevel
self.lm_model_tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
self.lm_padding_value = self.lm_model_tokenizer._convert_token_to_id('<pad>')
space_value = self.lm_model_tokenizer._convert_token_to_id('▁')
self.id2lm_tokens = {}
for i, d in enumerate(self.data):
raw_text = d["raw_text"]
assert isinstance(self.text_tokenizer, EnglishPhonemesTokenizer) or isinstance(
self.text_tokenizer, EnglishCharsTokenizer
)
preprocess_text_as_tts_input = self.text_tokenizer.text_preprocessing_func(raw_text)
lm_tokens_as_ids = self.lm_model_tokenizer.encode(preprocess_text_as_tts_input, add_special_tokens=False)
if self.text_tokenizer.pad_with_space:
lm_tokens_as_ids = [space_value] + lm_tokens_as_ids + [space_value]
self.id2lm_tokens[i] = lm_tokens_as_ids
def add_lm_tokens(self, **kwargs):
lm_model = kwargs.pop('lm_model')
if lm_model == "albert":
self._albert()
else:
raise NotImplementedError(
f"{lm_model} lm model is not supported. Only albert is supported at this moment."
)
def __getitem__(self, index):
(
audio,
audio_length,
text,
text_length,
log_mel,
log_mel_length,
durations,
duration_prior,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
) = super().__getitem__(index)
lm_tokens = None
if LMTokens in self.sup_data_types_set:
lm_tokens = torch.tensor(self.id2lm_tokens[index]).long()
return (
audio,
audio_length,
text,
text_length,
log_mel,
log_mel_length,
durations,
duration_prior,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
lm_tokens,
)
def _collate_fn(self, batch):
batch = list(zip(*batch))
data_dict = self.general_collate_fn(list(zip(*batch[:13])))
lm_tokens_list = batch[13]
if LMTokens in self.sup_data_types_set:
lm_tokens = torch.full(
(len(lm_tokens_list), max([lm_tokens.shape[0] for lm_tokens in lm_tokens_list])),
fill_value=self.lm_padding_value,
)
for i, lm_tokens_i in enumerate(lm_tokens_list):
lm_tokens[i, : lm_tokens_i.shape[0]] = lm_tokens_i
data_dict[LMTokens.name] = lm_tokens
joined_data = self.join_data(data_dict)
return joined_data
| [((235, 26, 235, 74), 'nemo.collections.asr.parts.preprocessing.features.WaveformFeaturizer', 'WaveformFeaturizer', (), '', False, 'from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer\n'), ((282, 26, 282, 47), 'torch.load', 'torch.load', ({(282, 37, 282, 46): 'durs_file'}, {}), '(durs_file)', False, 'import torch\n'), ((556, 34, 556, 83), 'transformers.AlbertTokenizer.from_pretrained', 'AlbertTokenizer.from_pretrained', ({(556, 66, 556, 82): '"""albert-base-v2"""'}, {}), "('albert-base-v2')", False, 'from transformers import AlbertTokenizer\n'), ((197, 12, 197, 80), 'nemo.utils.logging.info', 'logging.info', ({(197, 25, 197, 79): 'f"""Dataset contains {total_duration / 3600:.2f} hours."""'}, {}), "(f'Dataset contains {total_duration / 3600:.2f} hours.')", False, 'from nemo.utils import logging\n'), ((200, 12, 200, 66), 'nemo.utils.logging.info', 'logging.info', ({(200, 25, 200, 65): 'f"""using {ignore_file} to prune dataset."""'}, {}), "(f'using {ignore_file} to prune dataset.')", False, 'from nemo.utils import logging\n'), ((229, 12, 232, 13), 'nemo.utils.logging.info', 'logging.info', ({(230, 16, 231, 73): 'f"""Pruned {pruned_duration / 3600:.2f} hours. Final dataset contains {(total_duration - pruned_duration) / 3600:.2f} hours."""'}, {}), "(\n f'Pruned {pruned_duration / 3600:.2f} hours. Final dataset contains {(total_duration - pruned_duration) / 3600:.2f} hours.'\n )", False, 'from nemo.utils import logging\n'), ((298, 46, 298, 72), 'nemo.collections.tts.torch.helpers.BetaBinomialInterpolator', 'BetaBinomialInterpolator', ({}, {}), '()', False, 'from nemo.collections.tts.torch.helpers import BetaBinomialInterpolator, beta_binomial_prior_distribution, general_padding\n'), ((301, 51, 301, 75), 'librosa.note_to_hz', 'librosa.note_to_hz', ({(301, 70, 301, 74): '"""C2"""'}, {}), "('C2')", False, 'import librosa\n'), ((302, 51, 302, 75), 'librosa.note_to_hz', 'librosa.note_to_hz', ({(302, 70, 302, 74): '"""C7"""'}, {}), "('C7')", False, 'import librosa\n'), ((314, 13, 314, 51), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', (), '', False, 'import torch\n'), ((322, 13, 322, 51), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', (), '', False, 'import torch\n'), ((330, 21, 330, 51), 'pathlib.Path', 'Path', ({(330, 26, 330, 50): "sample['audio_filepath']"}, {}), "(sample['audio_filepath'])", False, 'from pathlib import Path\n'), ((526, 21, 526, 40), 'torch.stack', 'torch.stack', ({(526, 33, 526, 39): 'audios'}, {}), '(audios)', False, 'import torch\n'), ((527, 26, 527, 52), 'torch.stack', 'torch.stack', ({(527, 38, 527, 51): 'audio_lengths'}, {}), '(audio_lengths)', False, 'import torch\n'), ((528, 20, 528, 39), 'torch.stack', 'torch.stack', ({(528, 32, 528, 38): 'tokens'}, {}), '(tokens)', False, 'import torch\n'), ((529, 25, 529, 52), 'torch.stack', 'torch.stack', ({(529, 37, 529, 51): 'tokens_lengths'}, {}), '(tokens_lengths)', False, 'import torch\n'), ((162, 16, 162, 70), 'nemo.utils.logging.info', 'logging.info', ({(162, 29, 162, 69): 'f"""Loading dataset from {manifest_file}."""'}, {}), "(f'Loading dataset from {manifest_file}.')", False, 'from nemo.utils import logging\n'), ((163, 28, 163, 35), 'tqdm.tqdm', 'tqdm', ({(163, 33, 163, 34): 'f'}, {}), '(f)', False, 'from tqdm import tqdm\n'), ((208, 23, 208, 39), 'pathlib.Path', 'Path', ({(208, 28, 208, 38): 'audio_path'}, {}), '(audio_path)', False, 'from pathlib import Path\n'), ((285, 20, 285, 45), 'pathlib.Path', 'Path', ({(285, 25, 285, 44): "d['audio_filepath']"}, {}), "(d['audio_filepath'])", False, 'from pathlib import Path\n'), ((317, 23, 317, 47), 'torch.view_as_real', 'torch.view_as_real', ({(317, 42, 317, 46): 'spec'}, {}), '(spec)', False, 'import torch\n'), ((335, 15, 335, 50), 'torch.tensor', 'torch.tensor', ({(335, 28, 335, 49): "sample['text_tokens']"}, {}), "(sample['text_tokens'])", False, 'import torch\n'), ((343, 26, 343, 46), 'torch.load', 'torch.load', ({(343, 37, 343, 45): 'mel_path'}, {}), '(mel_path)', False, 'import torch\n'), ((384, 25, 384, 49), 'pathlib.Path', 'Path', ({(384, 30, 384, 48): 'self.sup_data_path'}, {}), '(self.sup_data_path)', False, 'from pathlib import Path\n'), ((397, 16, 397, 45), 'torch.save', 'torch.save', ({(397, 27, 397, 32): 'pitch', (397, 34, 397, 44): 'pitch_path'}, {}), '(pitch, pitch_path)', False, 'import torch\n'), ((408, 26, 408, 50), 'pathlib.Path', 'Path', ({(408, 31, 408, 49): 'self.sup_data_path'}, {}), '(self.sup_data_path)', False, 'from pathlib import Path\n'), ((414, 16, 414, 47), 'torch.save', 'torch.save', ({(414, 27, 414, 33): 'energy', (414, 35, 414, 46): 'energy_path'}, {}), '(energy, energy_path)', False, 'import torch\n'), ((476, 26, 476, 56), 'torch.finfo', 'torch.finfo', ({(476, 38, 476, 55): 'batch[0][2].dtype'}, {}), '(batch[0][2].dtype)', False, 'import torch\n'), ((530, 23, 530, 44), 'torch.stack', 'torch.stack', ({(530, 35, 530, 43): 'log_mels'}, {}), '(log_mels)', False, 'import torch\n'), ((531, 28, 531, 56), 'torch.stack', 'torch.stack', ({(531, 40, 531, 55): 'log_mel_lengths'}, {}), '(log_mel_lengths)', False, 'import torch\n'), ((532, 25, 532, 52), 'torch.stack', 'torch.stack', ({(532, 37, 532, 51): 'durations_list'}, {}), '(durations_list)', False, 'import torch\n'), ((534, 21, 534, 41), 'torch.stack', 'torch.stack', ({(534, 33, 534, 40): 'pitches'}, {}), '(pitches)', False, 'import torch\n'), ((535, 26, 535, 54), 'torch.stack', 'torch.stack', ({(535, 38, 535, 53): 'pitches_lengths'}, {}), '(pitches_lengths)', False, 'import torch\n'), ((536, 22, 536, 43), 'torch.stack', 'torch.stack', ({(536, 34, 536, 42): 'energies'}, {}), '(energies)', False, 'import torch\n'), ((537, 27, 537, 56), 'torch.stack', 'torch.stack', ({(537, 39, 537, 55): 'energies_lengths'}, {}), '(energies_lengths)', False, 'import torch\n'), ((538, 26, 538, 50), 'torch.stack', 'torch.stack', ({(538, 38, 538, 49): 'speaker_ids'}, {}), '(speaker_ids)', False, 'import torch\n'), ((149, 12, 149, 31), 'pathlib.Path', 'Path', ({(149, 17, 149, 30): 'sup_data_path'}, {}), '(sup_data_path)', False, 'from pathlib import Path\n'), ((164, 27, 164, 43), 'json.loads', 'json.loads', ({(164, 38, 164, 42): 'line'}, {}), '(line)', False, 'import json\n'), ((202, 37, 202, 51), 'pickle.load', 'pickle.load', ({(202, 49, 202, 50): 'f'}, {}), '(f)', False, 'import pickle\n'), ((247, 12, 249, 13), 'librosa.filters.mel', 'librosa.filters.mel', (), '', False, 'import librosa\n'), ((333, 40, 333, 71), 'torch.tensor', 'torch.tensor', ({(333, 53, 333, 70): 'features.shape[0]'}, {}), '(features.shape[0])', False, 'import torch\n'), ((345, 27, 345, 51), 'pathlib.Path', 'Path', ({(345, 32, 345, 50): 'self.sup_data_path'}, {}), '(self.sup_data_path)', False, 'from pathlib import Path\n'), ((348, 30, 348, 50), 'torch.load', 'torch.load', ({(348, 41, 348, 49): 'mel_path'}, {}), '(mel_path)', False, 'import torch\n'), ((351, 20, 351, 49), 'torch.save', 'torch.save', ({(351, 31, 351, 38): 'log_mel', (351, 40, 351, 48): 'mel_path'}, {}), '(log_mel, mel_path)', False, 'import torch\n'), ((354, 29, 354, 59), 'torch.tensor', 'torch.tensor', ({(354, 42, 354, 58): 'log_mel.shape[1]'}, {}), '(log_mel.shape[1])', False, 'import torch\n'), ((366, 29, 366, 53), 'pathlib.Path', 'Path', ({(366, 34, 366, 52): 'self.sup_data_path'}, {}), '(self.sup_data_path)', False, 'from pathlib import Path\n'), ((369, 37, 369, 59), 'torch.load', 'torch.load', ({(369, 48, 369, 58): 'prior_path'}, {}), '(prior_path)', False, 'import torch\n'), ((372, 37, 372, 91), 'nemo.collections.tts.torch.helpers.beta_binomial_prior_distribution', 'beta_binomial_prior_distribution', ({(372, 70, 372, 81): 'text_length', (372, 83, 372, 90): 'mel_len'}, {}), '(text_length, mel_len)', False, 'from nemo.collections.tts.torch.helpers import BetaBinomialInterpolator, beta_binomial_prior_distribution, general_padding\n'), ((373, 37, 373, 69), 'torch.from_numpy', 'torch.from_numpy', ({(373, 54, 373, 68): 'duration_prior'}, {}), '(duration_prior)', False, 'import torch\n'), ((374, 20, 374, 58), 'torch.save', 'torch.save', ({(374, 31, 374, 45): 'duration_prior', (374, 47, 374, 57): 'prior_path'}, {}), '(duration_prior, prior_path)', False, 'import torch\n'), ((420, 25, 420, 59), 'torch.tensor', 'torch.tensor', ({(420, 38, 420, 58): "sample['speaker_id']"}, {}), "(sample['speaker_id'])", False, 'import torch\n'), ((513, 32, 513, 109), 'nemo.collections.tts.torch.helpers.general_padding', 'general_padding', (), '', False, 'from nemo.collections.tts.torch.helpers import BetaBinomialInterpolator, beta_binomial_prior_distribution, general_padding\n'), ((605, 24, 605, 62), 'torch.tensor', 'torch.tensor', ({(605, 37, 605, 61): 'self.id2lm_tokens[index]'}, {}), '(self.id2lm_tokens[index])', False, 'import torch\n'), ((187, 24, 189, 25), 'nemo.utils.logging.info', 'logging.info', ({(188, 28, 188, 111): '"""Not all audio files have duration information. Duration logging will be disabled."""'}, {}), "(\n 'Not all audio files have duration information. Duration logging will be disabled.'\n )", False, 'from nemo.utils import logging\n'), ((342, 40, 342, 54), 'pathlib.Path', 'Path', ({(342, 45, 342, 53): 'mel_path'}, {}), '(mel_path)', False, 'from pathlib import Path\n'), ((386, 24, 386, 46), 'torch.load', 'torch.load', ({(386, 35, 386, 45): 'pitch_path'}, {}), '(pitch_path)', False, 'import torch\n'), ((396, 24, 396, 47), 'torch.from_numpy', 'torch.from_numpy', ({(396, 41, 396, 46): 'pitch'}, {}), '(pitch)', False, 'import torch\n'), ((410, 25, 410, 48), 'torch.load', 'torch.load', ({(410, 36, 410, 47): 'energy_path'}, {}), '(energy_path)', False, 'import torch\n'), ((161, 22, 161, 41), 'pathlib.Path', 'Path', ({(161, 27, 161, 40): 'manifest_file'}, {}), '(manifest_file)', False, 'from pathlib import Path\n'), ((201, 22, 201, 39), 'pathlib.Path', 'Path', ({(201, 27, 201, 38): 'ignore_file'}, {}), '(ignore_file)', False, 'from pathlib import Path\n'), ((325, 53, 325, 75), 'torch.finfo', 'torch.finfo', ({(325, 65, 325, 74): 'mel.dtype'}, {}), '(mel.dtype)', False, 'import torch\n')] |
MarkWengSTR/ansys-maxwell-online | anmotordesign/server.py | f9bbc535c7637d8f34abb241acfb97d1bdbe4103 | from flask import Flask, request, jsonify
from flask_cors import CORS
from run import run_ansys
from api.validate import spec_present, data_type_validate, spec_keys_validate, ansys_overload_check
ansys_processing_count = 0
# debug
# import ipdb; ipdb.set_trace()
app = Flask(__name__)
CORS(app) # local development cors
@app.route('/run_simu', methods=["POST"])
def run_simulation():
global ansys_processing_count
ansys_processing_count += 1
ctx = {
"request": request.get_json(),
"allow_run": True,
"process": {
"limit": 4,
"count": ansys_processing_count,
},
"start_run_response": {"msg": "start run at background"},
"error": {
"validate": {"msg": ""}
}
}
if spec_present(ctx) and \
data_type_validate(ctx) and \
spec_keys_validate(ctx) and \
ansys_overload_check(ctx):
ctx = run_ansys(self.ctx)
else:
return jsonify(ctx["error"]["validate"])
return jsonify(ctx["response"])
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True)
| [((10, 6, 10, 21), 'flask.Flask', 'Flask', ({(10, 12, 10, 20): '__name__'}, {}), '(__name__)', False, 'from flask import Flask, request, jsonify\n'), ((11, 0, 11, 9), 'flask_cors.CORS', 'CORS', ({(11, 5, 11, 8): 'app'}, {}), '(app)', False, 'from flask_cors import CORS\n'), ((39, 11, 39, 35), 'flask.jsonify', 'jsonify', ({(39, 19, 39, 34): "ctx['response']"}, {}), "(ctx['response'])", False, 'from flask import Flask, request, jsonify\n'), ((19, 19, 19, 37), 'flask.request.get_json', 'request.get_json', ({}, {}), '()', False, 'from flask import Flask, request, jsonify\n'), ((31, 7, 31, 24), 'api.validate.spec_present', 'spec_present', ({(31, 20, 31, 23): 'ctx'}, {}), '(ctx)', False, 'from api.validate import spec_present, data_type_validate, spec_keys_validate, ansys_overload_check\n'), ((32, 12, 32, 35), 'api.validate.data_type_validate', 'data_type_validate', ({(32, 31, 32, 34): 'ctx'}, {}), '(ctx)', False, 'from api.validate import spec_present, data_type_validate, spec_keys_validate, ansys_overload_check\n'), ((33, 12, 33, 35), 'api.validate.spec_keys_validate', 'spec_keys_validate', ({(33, 31, 33, 34): 'ctx'}, {}), '(ctx)', False, 'from api.validate import spec_present, data_type_validate, spec_keys_validate, ansys_overload_check\n'), ((34, 12, 34, 37), 'api.validate.ansys_overload_check', 'ansys_overload_check', ({(34, 33, 34, 36): 'ctx'}, {}), '(ctx)', False, 'from api.validate import spec_present, data_type_validate, spec_keys_validate, ansys_overload_check\n'), ((35, 14, 35, 33), 'run.run_ansys', 'run_ansys', ({(35, 24, 35, 32): 'self.ctx'}, {}), '(self.ctx)', False, 'from run import run_ansys\n'), ((37, 15, 37, 48), 'flask.jsonify', 'jsonify', ({(37, 23, 37, 47): "ctx['error']['validate']"}, {}), "(ctx['error']['validate'])", False, 'from flask import Flask, request, jsonify\n')] |
eric8607242/darts | cnn/donas_utils/dataset/__init__.py | 34c79a0956039f56a6a87bfb7f4b1ae2af615bea | from .dataset import get_cifar100, get_cifar10, get_imagenet_lmdb, get_imagenet
__all__ = ["get_cifar100", "get_cifar10", "get_imagenet_lmdb", "get_imagenet"]
| [] |
ahmdrz/spam-classifier | classifier/cross_validation.py | a9cc3916a7c22545c82f0bfae7e4b95f3b36248f | from sklearn.model_selection import KFold
def kfold_cross_validation(data, k=10):
kfold = KFold(n_splits=k)
for train, test in kfold.split(data):
yield data[train], data[test] | [((4, 12, 4, 29), 'sklearn.model_selection.KFold', 'KFold', (), '', False, 'from sklearn.model_selection import KFold\n')] |
captainxavier/AutoBlog | category/models.py | 44fb23628fe0210a3dcec80b91e1217d27ee9462 | from django.db import models
class Category(models.Model):
title = models.CharField(max_length=20)
class Meta:
db_table = 'category'
verbose_name = ("Category")
verbose_name_plural = ("Categories")
def __str__(self):
return self.title
| [((5, 12, 5, 43), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n')] |
aucoeur/WeVoteServer | admin_tools/urls.py | 7b30bdbb59d6e0c19abc81237aa42fba7de1a432 | # admin_tools/urls.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.conf.urls import re_path
from . import views
urlpatterns = [
re_path(r'^$', views.admin_home_view, name='admin_home',),
re_path(r'^data_cleanup/$', views.data_cleanup_view, name='data_cleanup'),
re_path(r'^data_cleanup_organization_analysis/$',
views.data_cleanup_organization_analysis_view, name='data_cleanup_organization_analysis'),
re_path(r'^data_cleanup_organization_list_analysis/$',
views.data_cleanup_organization_list_analysis_view, name='data_cleanup_organization_list_analysis'),
re_path(r'^data_cleanup_position_list_analysis/$',
views.data_cleanup_position_list_analysis_view, name='data_cleanup_position_list_analysis'),
re_path(r'^data_cleanup_voter_hanging_data_process/$',
views.data_cleanup_voter_hanging_data_process_view, name='data_cleanup_voter_hanging_data_process'),
re_path(r'^data_cleanup_voter_list_analysis/$',
views.data_cleanup_voter_list_analysis_view, name='data_cleanup_voter_list_analysis'),
re_path(r'^data_voter_statistics/$', views.data_voter_statistics_view, name='data_voter_statistics'),
re_path(r'^import_sample_data/$', views.import_sample_data_view, name='import_sample_data'),
re_path(r'^statistics/$', views.statistics_summary_view, name='statistics_summary'),
re_path(r'^sync_dashboard/$', views.sync_data_with_master_servers_view, name='sync_dashboard'),
]
| [((10, 4, 10, 61), 'django.conf.urls.re_path', 're_path', (), '', False, 'from django.conf.urls import re_path\n'), ((11, 4, 11, 77), 'django.conf.urls.re_path', 're_path', (), '', False, 'from django.conf.urls import re_path\n'), ((12, 4, 13, 97), 'django.conf.urls.re_path', 're_path', (), '', False, 'from django.conf.urls import re_path\n'), ((14, 4, 15, 107), 'django.conf.urls.re_path', 're_path', (), '', False, 'from django.conf.urls import re_path\n'), ((16, 4, 17, 99), 'django.conf.urls.re_path', 're_path', (), '', False, 'from django.conf.urls import re_path\n'), ((18, 4, 19, 107), 'django.conf.urls.re_path', 're_path', (), '', False, 'from django.conf.urls import re_path\n'), ((20, 4, 21, 93), 'django.conf.urls.re_path', 're_path', (), '', False, 'from django.conf.urls import re_path\n'), ((22, 4, 22, 104), 'django.conf.urls.re_path', 're_path', (), '', False, 'from django.conf.urls import re_path\n'), ((23, 4, 23, 95), 'django.conf.urls.re_path', 're_path', (), '', False, 'from django.conf.urls import re_path\n'), ((24, 4, 24, 87), 'django.conf.urls.re_path', 're_path', (), '', False, 'from django.conf.urls import re_path\n'), ((25, 4, 25, 98), 'django.conf.urls.re_path', 're_path', (), '', False, 'from django.conf.urls import re_path\n')] |
tautomer/hippynn | hippynn/graphs/nodes/base/multi.py | df4504a5ea4680cfc61f490984dcddeac7ed99ee | """
A base node that provides several output tensors.
"""
from ....layers.algebra import Idx
from .base import SingleNode, Node
from .. import _debprint
from ...indextypes import IdxType
class IndexNode(SingleNode):
_input_names = ("parent",)
def __init__(self, name, parents, index, index_state=None):
if len(parents) != 1:
raise TypeError("Index node takes exactly one parent.")
par = parents[0]
iname = par._output_names[index] if hasattr(par, "_output_names") else "<{index}>".format(index=index)
repr_info = {"parent_name": par.name, "index": iname}
module = Idx(index, repr_info=repr_info)
self.index = index
self._index_state = IdxType.NotFound if index_state is None else index_state
super().__init__(name, parents, module=module)
class MultiNode(Node): # Multinode
_output_names = NotImplemented
_output_index_states = NotImplemented # optional?
_main_output = NotImplemented
def __init__(self, name, parents, module="auto", *args, db_name=None, **kwargs):
super().__init__(name, parents, *args, module=module, **kwargs)
self.children = tuple(
IndexNode(name + "." + cn, (self,), index=i, index_state=cidx)
for i, (cn, cidx) in enumerate(zip(self._output_names, self._output_index_states))
)
self.main_output.db_name = db_name
def set_dbname(self, db_name):
self.main_output.set_dbname(db_name)
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
# Enforce _child_index_states has same length as _output_names
if cls._output_index_states is not NotImplemented:
if len(cls._output_index_states) != len(cls._output_names):
raise AssertionError(
"Lengths of _child_index_states {} doesn't match lengths of ouput_names {}".format(
cls._output_index_states, cls._output_names
)
)
# Enforce no name conflict between input names and output names
if cls._input_names is not NotImplemented:
try:
assert all(o not in cls._input_names for o in cls._output_names)
except AssertionError as ae:
raise ValueError(
"Multi-node output names {} conflict with input names {}".format(
cls._output_names, cls._input_names
)
) from ae
def __dir__(self):
dir_ = super().__dir__()
if self._output_names is not NotImplemented:
dir_ = dir_ + list(self._output_names)
return dir_
def __getattr__(self, item):
if item in ("children", "_output_names"): # Guard against recursion
raise AttributeError("Attribute {} not yet present.".format(item))
try:
return super().__getattr__(item) # Defer to BaseNode first
except AttributeError:
pass
try:
return self.children[self._output_names.index(item)]
except (AttributeError, ValueError):
raise AttributeError("{} object has no attribute '{}'".format(self.__class__, item))
@property
def main_output(self):
if self._main_output is NotImplemented:
return super().main_output
return getattr(self, self._main_output)
| [] |
JohanNicander/python-test-architecture | main_module/__init__.py | 2418f861cb46c3fccaa21be94ee92c5862985a15 | from .zero import zero
from main_module._unittester import UnitTester
test = UnitTester(__name__)
del UnitTester | [((4, 7, 4, 27), 'main_module._unittester.UnitTester', 'UnitTester', ({(4, 18, 4, 26): '__name__'}, {}), '(__name__)', False, 'from main_module._unittester import UnitTester\n')] |
LSSTDESC/barber | barber/cutter.py | 9dbe69e69a078ef3b70a316807517e2a4d4e60cd | import numpy as np
import numpy.random as npr
import scipy.optimize as spo
import tomo_challenge.metrics as tcm
# custom data type, could be replaced with/tie in to tree.py class
# cut_vals is (nfeat, nbins - 1) numpy array, float
# tree_ids is ((nbins,) * nfeat) numpy array, int
TreePars = namedtuple('TreePars', ['cut_vals', 'tree_ids'])
# should maybe put this function in a class so we can call TreePars.to_array
def treepars_to_array(treepars):
"""
Flattens cut_vals and tree_ids for optimizer
"""
cuts = np.flatten(treepars.cut_vals)
ids = np.flatten(treepars.tree_ids)
arr = np.concatenate((cuts, ids))
return(arr)
# should maybe put this function in a class so we can call TreePars.from_array
def array_to_treepars(arr):
"""
Converts optimizer format of 1D array back into namedtuple of arrays
"""
flat_cuts = arr[type(arr) == float]
flat_ids = arr[type(arr) == int]
nbins = len(np.unique(flat_ids))
nfeat = len(flat_cuts) / (nbins - 1)
# maybe do some assert checks with these just in case types have problems
# cuts = arr[0:nfeat*(nbins-1)].reshape((nfeat, nbins-1))
# ids = arr[feat*(nbins-1):].reshape((nbins,) * nfeat)
cuts = flat_cuts.reshape((nfeat, nbins-1))
ids = flat_ids.reshape((nbins,) * nfeat)
treepars = TreePars(cuts, ids)
return(treepars)
def get_cuts(galaxies, ival_treepars=None, nbins=3):
"""
Obtains simplest possible bin definitions: cuts in the space of observables given number of bins
Parameters
----------
galaxies: numpy.ndarray, float
observables (magnitudes and/or colors and/or errors) to serve as features for set of galaxies
shape(galaxies) = (ngals, nfeat)
ival_treepars: namedtuple, numpy.ndarray, float and int, optional
initial values for decision tree parameters
shape(ivals.cut_vals) = (nfeat, (nbins - 1))
shape(tree_ids) = ((nbins,) * nfeat)
nbins: int, optional
number of bins for which to obtain cuts
Returns
-------
assignments: numpy.ndarray, int
bin assignment for each galaxy
shape(assignments) = (ngals, 1)
Notes
-----
`sort_gals` does the heavy lifting.
`eval_metric` will call one of the metrics from [tomo_challenge](https://github.com/LSSTDESC/tomo_challenge/blob/master/tomo_challenge/metrics.py).
The original idea for a general, non-cut-based optimizer was to have parameters equal to the (ngals) length array of ints representing the bin assignments, but that's not necessary for the simple cut-and-sweep barber and would probably break `spo.minimize`.
"""
(ngals, nfeat) = np.shape(galaxies)
if ival_treepars is None:
cut_ivals = np.quantile(galaxies, np.linspace(0., 1., nbins), axis=1)
assert(len(np.flatten(ivals)) == nbins**nfeat)
# need structure and way of making dumb version of these
tree_ids = npr.random_integers(0, nbins, nbins**nfeat)
assert(len(np.unique(tree_ids)) == nbins)
tree_ids.reshape((nfeat, nbins))
ival_treepars = TreePars(cut_ivals, tree_ids)
ivals = treepars_to_array(ival_treepars)
opt_res = spo.minimize(eval_metric, ivals, args=galaxies)
treepars = array_to_treepars(opt_res.x)
assignments = sort_gals(galaxies, treepars)
return(assignments)
def sort_gals(galaxies, tree_pars):
"""
Divides available galaxies into subsets according to a given decision tree on their observables
Parameters
----------
galaxies: nfeature x n_gal array
tree: tree object
Notes
-----
could be based on bisect, or maybe a sklearn object?
"""
pass
def eval_metric(arr, galaxies):
"""
Just calls a metric from tomo_challenge wrapped for the `spo.minimize` API
Notes
-----
Replace `tcm.metric` with actual call to one of the tomo_challenge metrics
Actually, there's a problem in that the current tomo_challenge metrics require the true redshifts...
"""
treepars = array_to_treepars(arr)
assignments = sort_gals(galaxies, treepars)
metval = tcm.metric(assignments)
return metval
| [((16, 11, 16, 40), 'numpy.flatten', 'np.flatten', ({(16, 22, 16, 39): 'treepars.cut_vals'}, {}), '(treepars.cut_vals)', True, 'import numpy as np\n'), ((17, 10, 17, 39), 'numpy.flatten', 'np.flatten', ({(17, 21, 17, 38): 'treepars.tree_ids'}, {}), '(treepars.tree_ids)', True, 'import numpy as np\n'), ((18, 10, 18, 37), 'numpy.concatenate', 'np.concatenate', ({(18, 25, 18, 36): '(cuts, ids)'}, {}), '((cuts, ids))', True, 'import numpy as np\n'), ((66, 21, 66, 39), 'numpy.shape', 'np.shape', ({(66, 30, 66, 38): 'galaxies'}, {}), '(galaxies)', True, 'import numpy as np\n'), ((80, 14, 80, 61), 'scipy.optimize.minimize', 'spo.minimize', (), '', True, 'import scipy.optimize as spo\n'), ((115, 13, 115, 36), 'tomo_challenge.metrics.metric', 'tcm.metric', ({(115, 24, 115, 35): 'assignments'}, {}), '(assignments)', True, 'import tomo_challenge.metrics as tcm\n'), ((28, 16, 28, 35), 'numpy.unique', 'np.unique', ({(28, 26, 28, 34): 'flat_ids'}, {}), '(flat_ids)', True, 'import numpy as np\n'), ((73, 19, 73, 62), 'numpy.random.random_integers', 'npr.random_integers', ({(73, 39, 73, 40): '0', (73, 42, 73, 47): 'nbins', (73, 49, 73, 61): 'nbins ** nfeat'}, {}), '(0, nbins, nbins ** nfeat)', True, 'import numpy.random as npr\n'), ((69, 42, 69, 68), 'numpy.linspace', 'np.linspace', ({(69, 54, 69, 56): '0.0', (69, 58, 69, 60): '1.0', (69, 62, 69, 67): 'nbins'}, {}), '(0.0, 1.0, nbins)', True, 'import numpy as np\n'), ((70, 19, 70, 36), 'numpy.flatten', 'np.flatten', ({(70, 30, 70, 35): 'ivals'}, {}), '(ivals)', True, 'import numpy as np\n'), ((74, 19, 74, 38), 'numpy.unique', 'np.unique', ({(74, 29, 74, 37): 'tree_ids'}, {}), '(tree_ids)', True, 'import numpy as np\n')] |
coding-world/matrix_max7219 | examples/transfer/highscore.py | 3126604ee400a9ec1d25797f6957a2eae8a3f33c | import shelve
regal = shelve.open('score.txt')
def updateScore(neuerScore):
if('score' in regal):
score = regal['score']
if(neuerScore not in score):
score.insert(0, neuerScore)
score.sort()
ranking = score.index(neuerScore)
ranking = len(score)-ranking
else:
score = [neuerScore]
ranking = 1
print(score)
print(ranking)
regal['score'] = score
return ranking
neuerScore = int(input("Neuer HighScore: \n"))
updateScore(neuerScore) | [((3, 8, 3, 32), 'shelve.open', 'shelve.open', ({(3, 20, 3, 31): '"""score.txt"""'}, {}), "('score.txt')", False, 'import shelve\n')] |
enfold/node.ext.ldap | src/node/ext/ldap/scope.py | 28127057be6ba3092389f3c920575292d43d9f94 | # -*- coding: utf-8 -*-
import ldap
BASE = ldap.SCOPE_BASE
ONELEVEL = ldap.SCOPE_ONELEVEL
SUBTREE = ldap.SCOPE_SUBTREE
SCOPES = [BASE, ONELEVEL, SUBTREE]
del ldap
| [] |
tensorflow-korea/tfk-notebooks | urban-sound-classification/feature_merge.py | 67831acce7f435500377bf03e6bd9d15fdd5f1bc | import glob
import numpy as np
X = np.empty((0, 193))
y = np.empty((0, 10))
groups = np.empty((0, 1))
npz_files = glob.glob('./urban_sound_?.npz')
for fn in npz_files:
print(fn)
data = np.load(fn)
X = np.append(X, data['X'], axis=0)
y = np.append(y, data['y'], axis=0)
groups = np.append(groups, data['groups'], axis=0)
print(groups[groups>0])
print(X.shape, y.shape)
for r in y:
if np.sum(r) > 1.5:
print(r)
np.savez('urban_sound', X=X, y=y, groups=groups)
| [((4, 4, 4, 22), 'numpy.empty', 'np.empty', ({(4, 13, 4, 21): '(0, 193)'}, {}), '((0, 193))', True, 'import numpy as np\n'), ((5, 4, 5, 21), 'numpy.empty', 'np.empty', ({(5, 13, 5, 20): '(0, 10)'}, {}), '((0, 10))', True, 'import numpy as np\n'), ((6, 9, 6, 25), 'numpy.empty', 'np.empty', ({(6, 18, 6, 24): '(0, 1)'}, {}), '((0, 1))', True, 'import numpy as np\n'), ((7, 12, 7, 44), 'glob.glob', 'glob.glob', ({(7, 22, 7, 43): '"""./urban_sound_?.npz"""'}, {}), "('./urban_sound_?.npz')", False, 'import glob\n'), ((21, 0, 21, 48), 'numpy.savez', 'np.savez', (), '', True, 'import numpy as np\n'), ((10, 11, 10, 22), 'numpy.load', 'np.load', ({(10, 19, 10, 21): 'fn'}, {}), '(fn)', True, 'import numpy as np\n'), ((11, 8, 11, 39), 'numpy.append', 'np.append', (), '', True, 'import numpy as np\n'), ((12, 8, 12, 39), 'numpy.append', 'np.append', (), '', True, 'import numpy as np\n'), ((13, 13, 13, 54), 'numpy.append', 'np.append', (), '', True, 'import numpy as np\n'), ((19, 7, 19, 16), 'numpy.sum', 'np.sum', ({(19, 14, 19, 15): 'r'}, {}), '(r)', True, 'import numpy as np\n')] |
JankaSvK/thesis | program/program/trackers/TrackerCorrelation.py | c440ab8242b058f580fdf9d5a1d00708a1696561 | import dlib
class CorrelationTracker(object):
def init(self, image, bbox):
self.tracker = dlib.correlation_tracker()
x, y, x2, y2 = bbox
x2 += x
y2 += y
self.tracker.start_track(image, dlib.rectangle(x, y, x2, y2))
return True
def update(self, image):
self.tracker.update(image)
out = self.tracker.get_position()
return True, (out.left(), out.top(), out.right() - out.left(), out.bottom() - out.top())
| [((6, 23, 6, 49), 'dlib.correlation_tracker', 'dlib.correlation_tracker', ({}, {}), '()', False, 'import dlib\n'), ((10, 40, 10, 68), 'dlib.rectangle', 'dlib.rectangle', ({(10, 55, 10, 56): 'x', (10, 58, 10, 59): 'y', (10, 61, 10, 63): 'x2', (10, 65, 10, 67): 'y2'}, {}), '(x, y, x2, y2)', False, 'import dlib\n')] |
rilango/NeMo | examples/nlp/language_modeling/megatron_gpt_ckpt_to_nemo.py | 6f23ff725c596f25fab6043d95e7c0b4a5f56331 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import ArgumentParser
import torch.multiprocessing as mp
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.parts.nlp_overrides import NLPSaveRestoreConnector
from nemo.utils import AppState, logging
def get_args():
parser = ArgumentParser()
parser.add_argument(
"--checkpoint_folder",
type=str,
default=None,
required=True,
help="Path to PTL checkpoints saved during training. Ex: /raid/nemo_experiments/megatron_gpt/checkpoints",
)
parser.add_argument(
"--checkpoint_name",
type=str,
default=None,
required=True,
help="Name of checkpoint to be used. Ex: megatron_gpt--val_loss=6.34-step=649-last.ckpt",
)
parser.add_argument(
"--hparams_file",
type=str,
default=None,
required=False,
help="Path config for restoring. It's created during training and may need to be modified during restore if restore environment is different than training. Ex: /raid/nemo_experiments/megatron_gpt/hparams.yaml",
)
parser.add_argument("--nemo_file_path", type=str, default=None, required=True, help="Path to output .nemo file.")
parser.add_argument("--tensor_model_parallel_size", type=int, required=True, default=None)
args = parser.parse_args()
return args
def convert(rank, world_size, args):
app_state = AppState()
app_state.data_parallel_rank = 0
trainer = Trainer(gpus=args.tensor_model_parallel_size)
# TODO: reach out to PTL For an API-safe local rank override
trainer.accelerator.training_type_plugin._local_rank = rank
if args.tensor_model_parallel_size is not None and args.tensor_model_parallel_size > 1:
# inject model parallel rank
checkpoint_path = os.path.join(args.checkpoint_folder, f'mp_rank_{rank:02d}', args.checkpoint_name)
else:
checkpoint_path = os.path.join(args.checkpoint_folder, args.checkpoint_name)
model = MegatronGPTModel.load_from_checkpoint(checkpoint_path, hparams_file=args.hparams_file, trainer=trainer)
model._save_restore_connector = NLPSaveRestoreConnector()
model.save_to(args.nemo_file_path)
logging.info(f'NeMo model saved to: {args.nemo_file_path}')
def main() -> None:
args = get_args()
world_size = args.tensor_model_parallel_size
mp.spawn(convert, args=(world_size, args), nprocs=world_size, join=True)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| [((28, 13, 28, 29), 'argparse.ArgumentParser', 'ArgumentParser', ({}, {}), '()', False, 'from argparse import ArgumentParser\n'), ((61, 16, 61, 26), 'nemo.utils.AppState', 'AppState', ({}, {}), '()', False, 'from nemo.utils import AppState, logging\n'), ((63, 14, 63, 59), 'pytorch_lightning.trainer.trainer.Trainer', 'Trainer', (), '', False, 'from pytorch_lightning.trainer.trainer import Trainer\n'), ((73, 12, 73, 115), 'nemo.collections.nlp.models.language_modeling.megatron_gpt_model.MegatronGPTModel.load_from_checkpoint', 'MegatronGPTModel.load_from_checkpoint', (), '', False, 'from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel\n'), ((74, 36, 74, 61), 'nemo.collections.nlp.parts.nlp_overrides.NLPSaveRestoreConnector', 'NLPSaveRestoreConnector', ({}, {}), '()', False, 'from nemo.collections.nlp.parts.nlp_overrides import NLPSaveRestoreConnector\n'), ((76, 4, 76, 63), 'nemo.utils.logging.info', 'logging.info', ({(76, 17, 76, 62): 'f"""NeMo model saved to: {args.nemo_file_path}"""'}, {}), "(f'NeMo model saved to: {args.nemo_file_path}')", False, 'from nemo.utils import AppState, logging\n'), ((82, 4, 82, 76), 'torch.multiprocessing.spawn', 'mp.spawn', (), '', True, 'import torch.multiprocessing as mp\n'), ((69, 26, 69, 107), 'os.path.join', 'os.path.join', ({(69, 39, 69, 61): 'args.checkpoint_folder', (69, 63, 69, 84): 'f"""mp_rank_{rank:02d}"""', (69, 86, 69, 106): 'args.checkpoint_name'}, {}), "(args.checkpoint_folder, f'mp_rank_{rank:02d}', args.\n checkpoint_name)", False, 'import os\n'), ((71, 26, 71, 84), 'os.path.join', 'os.path.join', ({(71, 39, 71, 61): 'args.checkpoint_folder', (71, 63, 71, 83): 'args.checkpoint_name'}, {}), '(args.checkpoint_folder, args.checkpoint_name)', False, 'import os\n')] |
dixler/pulumi-aws | sdk/python/pulumi_aws/apigateway/api_key.py | 88838ed6d412c092717a916b0b5b154f68226c3a | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class ApiKey(pulumi.CustomResource):
arn: pulumi.Output[str]
"""
Amazon Resource Name (ARN)
"""
created_date: pulumi.Output[str]
"""
The creation date of the API key
"""
description: pulumi.Output[str]
"""
The API key description. Defaults to "Managed by Pulumi".
"""
enabled: pulumi.Output[bool]
"""
Specifies whether the API key can be used by callers. Defaults to `true`.
"""
last_updated_date: pulumi.Output[str]
"""
The last update date of the API key
"""
name: pulumi.Output[str]
"""
The name of the API key
"""
tags: pulumi.Output[dict]
"""
Key-value mapping of resource tags
"""
value: pulumi.Output[str]
"""
The value of the API key. If not specified, it will be automatically generated by AWS on creation.
"""
def __init__(__self__, resource_name, opts=None, description=None, enabled=None, name=None, tags=None, value=None, __props__=None, __name__=None, __opts__=None):
"""
Provides an API Gateway API Key.
> **NOTE:** Since the API Gateway usage plans feature was launched on August 11, 2016, usage plans are now **required** to associate an API key with an API stage.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The API key description. Defaults to "Managed by Pulumi".
:param pulumi.Input[bool] enabled: Specifies whether the API key can be used by callers. Defaults to `true`.
:param pulumi.Input[str] name: The name of the API key
:param pulumi.Input[dict] tags: Key-value mapping of resource tags
:param pulumi.Input[str] value: The value of the API key. If not specified, it will be automatically generated by AWS on creation.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/api_gateway_api_key.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if description is None:
description = 'Managed by Pulumi'
__props__['description'] = description
__props__['enabled'] = enabled
__props__['name'] = name
__props__['tags'] = tags
__props__['value'] = value
__props__['arn'] = None
__props__['created_date'] = None
__props__['last_updated_date'] = None
super(ApiKey, __self__).__init__(
'aws:apigateway/apiKey:ApiKey',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, arn=None, created_date=None, description=None, enabled=None, last_updated_date=None, name=None, tags=None, value=None):
"""
Get an existing ApiKey resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN)
:param pulumi.Input[str] created_date: The creation date of the API key
:param pulumi.Input[str] description: The API key description. Defaults to "Managed by Pulumi".
:param pulumi.Input[bool] enabled: Specifies whether the API key can be used by callers. Defaults to `true`.
:param pulumi.Input[str] last_updated_date: The last update date of the API key
:param pulumi.Input[str] name: The name of the API key
:param pulumi.Input[dict] tags: Key-value mapping of resource tags
:param pulumi.Input[str] value: The value of the API key. If not specified, it will be automatically generated by AWS on creation.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/api_gateway_api_key.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["created_date"] = created_date
__props__["description"] = description
__props__["enabled"] = enabled
__props__["last_updated_date"] = last_updated_date
__props__["name"] = name
__props__["tags"] = tags
__props__["value"] = value
return ApiKey(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [((62, 12, 62, 87), 'warnings.warn', 'warnings.warn', ({(62, 26, 62, 66): '"""explicit use of __name__ is deprecated"""', (62, 68, 62, 86): 'DeprecationWarning'}, {}), "('explicit use of __name__ is deprecated', DeprecationWarning)", False, 'import warnings\n'), ((65, 12, 65, 107), 'warnings.warn', 'warnings.warn', ({(65, 26, 65, 86): '"""explicit use of __opts__ is deprecated, use \'opts\' instead"""', (65, 88, 65, 106): 'DeprecationWarning'}, {}), '("explicit use of __opts__ is deprecated, use \'opts\' instead",\n DeprecationWarning)', False, 'import warnings\n'), ((68, 19, 68, 43), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ({}, {}), '()', False, 'import pulumi\n'), ((114, 50, 114, 79), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', (), '', False, 'import pulumi\n')] |
jwarner308/SROMPy | SROMPy/optimize/ObjectiveFunction.py | 12007e4cd99c88446f10974a93050405c5cd925b | # Copyright 2018 United States Government as represented by the Administrator of
# the National Aeronautics and Space Administration. No copyright is claimed in
# the United States under Title 17, U.S. Code. All Other Rights Reserved.
# The Stochastic Reduced Order Models with Python (SROMPy) platform is licensed
# under the Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import numpy as np
from SROMPy.target import RandomVector
from SROMPy.target.RandomEntity import RandomEntity
class ObjectiveFunction:
"""
Defines the objective function for optimizing SROM parameters. Calculates
errors between the statistics of the SROM and the target random vector
being model by it.
Will create objective function for optimization library (e.g. scipy) that
essentially wraps this class's evaluate function
"""
def __init__(self, srom, target, obj_weights=None, error='mean',
max_moment=5, num_cdf_grid_points=100):
"""
Initialize objective function. Pass in SROM & target random vector
objects that have been previously initialized. Objective function
calculates the errors between the statistics of this SROM and the
target random vector (these objects must have compute_moments,CDF,
corr_mat functions defined).
inputs:
-SROM - initialized SROM object
-targetRV - initialized RandomVector object (either
AnalyticRandomVector or SampleRandomVector) with same
dimension as SROM
-obj_weights - array of floats defining the relative weight of the
terms in the objective function. Terms are error in moments,
CDFs, and correlation matrix in that order. Default is equal
weights ([1.0,1.0,1.0])
-error - string 'mean','max', or 'sse' defining how error is defined
between the statistics of the SROM & target
-max_moment - int, max order to evaluate moment errors up to
-num_cdf_grid_points - int, # pts to evaluate CDF errors on
"""
self.__test_init_params(srom, target, obj_weights, error,
max_moment, num_cdf_grid_points)
self._SROM = srom
self._target = target
self._x_grid = None
# Generate grids for evaluating CDFs based on target RV's range
self.generate_cdf_grids(num_cdf_grid_points)
self._metric = error.upper()
self._max_moment = max_moment
def get_moment_error(self, samples, probabilities):
"""
Returns moment error for given samples & probabilities
"""
self._SROM.set_params(samples, probabilities)
return self.compute_moment_error()
def get_cdf_error(self, samples, probabilities):
"""
Returns CDF error for given samples & probabilities
"""
self._SROM.set_params(samples, probabilities)
return self.compute_cdf_error()
def get_corr_error(self, samples, probabilities):
"""
Returns correlation error for given samples & probabilities
"""
self._SROM.set_params(samples, probabilities)
return self.compute_correlation_error()
def evaluate(self, samples, probabilities):
"""
Evaluates the objective function for the specified SROM samples &
probabilities. Calculates errrors in statistics between SROM/target
"""
error = 0.0
# SROM is by the current values of samples/probabilities for stats.
self._SROM.set_params(samples, probabilities)
if self._weights[0] > 0.0:
cdf_error = self.compute_cdf_error()
error += cdf_error * self._weights[0]
if self._weights[1] > 0.0:
moment_error = self.compute_moment_error()
error += moment_error * self._weights[1]
if self._weights[2] > 0.0:
corr_error = self.compute_correlation_error()
error += corr_error * self._weights[2]
return error
def compute_moment_error(self):
"""
Calculate error in moments between SROM & target
"""
srom_moments = self._SROM.compute_moments(self._max_moment)
target_moments = self._target.compute_moments(self._max_moment)
# Reshape to 2D if returned as 1D for scalar RV.
if len(target_moments.shape) == 1:
target_moments = target_moments.reshape((self._max_moment, 1))
# Prevent divide by zero.
zero_indices = np.where(np.abs(target_moments) <= 1e-12)[0]
target_moments[zero_indices] = 1.0
# Squared relative difference:
if self._metric == "SSE":
rel_diffs = ((srom_moments-target_moments)/target_moments)**2.0
error = 0.5*np.sum(rel_diffs)
# Max absolute value:
elif self._metric == "MAX":
diffs = np.abs(srom_moments - target_moments)
error = np.max(diffs)
elif self._metric == "MEAN":
diffs = np.abs(srom_moments - target_moments)
error = np.mean(diffs)
else:
raise ValueError("Invalid error metric")
return error
def compute_cdf_error(self):
"""
Calculate error in CDFs between SROM & target at pts in x_grid
"""
srom_cdfs = self._SROM.compute_cdf(self._x_grid)
target_cdfs = self._target.compute_cdf(self._x_grid)
# Check for 0 cdf values to prevent divide by zero.
nonzero_indices = np.where(target_cdfs[:, 0] > 0)[0]
srom_cdfs = srom_cdfs[nonzero_indices, :]
target_cdfs = target_cdfs[nonzero_indices, :]
if self._metric == "SSE":
squared_diffs = (srom_cdfs - target_cdfs)**2.0
rel_diffs = squared_diffs / target_cdfs**2.0
error = 0.5*np.sum(rel_diffs)
elif self._metric == "MAX":
diffs = np.abs(srom_cdfs - target_cdfs)
error = np.max(diffs)
elif self._metric == "MEAN":
diffs = np.abs(srom_cdfs - target_cdfs)
error = np.mean(diffs)
else:
raise ValueError("Invalid error metric")
return error
def compute_correlation_error(self):
"""
Calculate error in correlation matrix between SROM & target
"""
# Neglect for 1D random variable:
if self._target._dim == 1:
return 0.0
srom_corr = self._SROM.compute_corr_mat()
target_corr = self._target.compute_correlation_matrix()
if self._metric == "SSE":
squared_diffs = (srom_corr - target_corr)**2.0
rel_diffs = squared_diffs / target_corr**2.0
error = 0.5*np.sum(rel_diffs)
elif self._metric == "MAX":
diffs = np.abs(srom_corr - target_corr)
error = np.max(diffs)
elif self._metric == "MEAN":
diffs = np.abs(srom_corr - target_corr)
error = np.mean(diffs)
else:
raise ValueError("Invalid error metric")
return error
def generate_cdf_grids(self, num_cdf_grid_points):
"""
Generate numerical grids for evaluating the CDF errors based on the
range of the target random vector. Create x_grid member variable with
num_cdf_grid_points along each dimension of the random vector.
"""
self._x_grid = np.zeros((num_cdf_grid_points, self._target._dim))
for i in range(self._target._dim):
grid = np.linspace(self._target.mins[i],
self._target.maxs[i],
num_cdf_grid_points)
self._x_grid[:, i] = grid
def __test_init_params(self, srom, target, obj_weights, error, max_moment,
num_cdf_grid_points):
"""
Due to the large numbers of parameters passed into __init__() that
need to be tested, the testing is done in this utility function
instead of __init__().
"""
# Test target.
if not (isinstance(target, RandomEntity)):
raise TypeError("target must inherit from RandomEntity.")
# Test srom.
from SROMPy.srom import SROM
if not isinstance(srom, SROM):
raise TypeError("srom must be of type SROM.")
# Ensure srom and target have same dimensions if target is RandomVector.
if isinstance(target, RandomVector):
if target._dim != srom._dim:
raise ValueError("target and srom must have same dimensions.")
# Test obj_weights.
if obj_weights is not None:
if isinstance(obj_weights, list):
obj_weights = np.array(obj_weights)
if not isinstance(obj_weights, np.ndarray):
raise TypeError("obj_weights must be of type ndarray or list.")
if len(obj_weights.shape) != 1:
raise ValueError("obj_weights must be a one dimensional array.")
if obj_weights.shape[0] != 3:
raise ValueError("obj_weights must have exactly 3 elements.")
if np.min(obj_weights) < 0.:
raise ValueError("obj_weights cannot be less than zero.")
self._weights = obj_weights
else:
self._weights = np.ones((3,))
# Test error function name.
if not isinstance(error, str):
raise TypeError("error must be a string: 'MEAN', 'MAX', or 'SSE'.")
if error.upper() not in ["MEAN", "MAX", "SSE"]:
raise ValueError("error must be either 'mean', 'max', or 'SSE'.")
# Test max_moment.
if not isinstance(max_moment, int):
raise TypeError("max_moment must be a positive integer.")
if max_moment < 1:
raise ValueError("max_moment must be a positive integer.")
# Test num_cdf_grid_points.
if not isinstance(num_cdf_grid_points, int):
raise TypeError("cf_grid_pts must be a positive integer.")
if num_cdf_grid_points < 1:
raise ValueError("num_cdf_grid_points must be a positive integer.")
| [((214, 23, 214, 73), 'numpy.zeros', 'np.zeros', ({(214, 32, 214, 72): '(num_cdf_grid_points, self._target._dim)'}, {}), '((num_cdf_grid_points, self._target._dim))', True, 'import numpy as np\n'), ((161, 26, 161, 57), 'numpy.where', 'np.where', ({(161, 35, 161, 56): '(target_cdfs[:, (0)] > 0)'}, {}), '(target_cdfs[:, (0)] > 0)', True, 'import numpy as np\n'), ((217, 19, 219, 51), 'numpy.linspace', 'np.linspace', ({(217, 31, 217, 51): 'self._target.mins[i]', (218, 31, 218, 51): 'self._target.maxs[i]', (219, 31, 219, 50): 'num_cdf_grid_points'}, {}), '(self._target.mins[i], self._target.maxs[i], num_cdf_grid_points)', True, 'import numpy as np\n'), ((264, 28, 264, 41), 'numpy.ones', 'np.ones', ({(264, 36, 264, 40): '(3,)'}, {}), '((3,))', True, 'import numpy as np\n'), ((138, 24, 138, 41), 'numpy.sum', 'np.sum', ({(138, 31, 138, 40): 'rel_diffs'}, {}), '(rel_diffs)', True, 'import numpy as np\n'), ((142, 20, 142, 57), 'numpy.abs', 'np.abs', ({(142, 27, 142, 56): 'srom_moments - target_moments'}, {}), '(srom_moments - target_moments)', True, 'import numpy as np\n'), ((143, 20, 143, 33), 'numpy.max', 'np.max', ({(143, 27, 143, 32): 'diffs'}, {}), '(diffs)', True, 'import numpy as np\n'), ((168, 24, 168, 41), 'numpy.sum', 'np.sum', ({(168, 31, 168, 40): 'rel_diffs'}, {}), '(rel_diffs)', True, 'import numpy as np\n'), ((170, 20, 170, 51), 'numpy.abs', 'np.abs', ({(170, 27, 170, 50): 'srom_cdfs - target_cdfs'}, {}), '(srom_cdfs - target_cdfs)', True, 'import numpy as np\n'), ((171, 20, 171, 33), 'numpy.max', 'np.max', ({(171, 27, 171, 32): 'diffs'}, {}), '(diffs)', True, 'import numpy as np\n'), ((195, 24, 195, 41), 'numpy.sum', 'np.sum', ({(195, 31, 195, 40): 'rel_diffs'}, {}), '(rel_diffs)', True, 'import numpy as np\n'), ((197, 20, 197, 51), 'numpy.abs', 'np.abs', ({(197, 27, 197, 50): 'srom_corr - target_corr'}, {}), '(srom_corr - target_corr)', True, 'import numpy as np\n'), ((198, 20, 198, 33), 'numpy.max', 'np.max', ({(198, 27, 198, 32): 'diffs'}, {}), '(diffs)', True, 'import numpy as np\n'), ((249, 30, 249, 51), 'numpy.array', 'np.array', ({(249, 39, 249, 50): 'obj_weights'}, {}), '(obj_weights)', True, 'import numpy as np\n'), ((260, 15, 260, 34), 'numpy.min', 'np.min', ({(260, 22, 260, 33): 'obj_weights'}, {}), '(obj_weights)', True, 'import numpy as np\n'), ((132, 32, 132, 54), 'numpy.abs', 'np.abs', ({(132, 39, 132, 53): 'target_moments'}, {}), '(target_moments)', True, 'import numpy as np\n'), ((145, 20, 145, 57), 'numpy.abs', 'np.abs', ({(145, 27, 145, 56): 'srom_moments - target_moments'}, {}), '(srom_moments - target_moments)', True, 'import numpy as np\n'), ((146, 20, 146, 34), 'numpy.mean', 'np.mean', ({(146, 28, 146, 33): 'diffs'}, {}), '(diffs)', True, 'import numpy as np\n'), ((173, 20, 173, 51), 'numpy.abs', 'np.abs', ({(173, 27, 173, 50): 'srom_cdfs - target_cdfs'}, {}), '(srom_cdfs - target_cdfs)', True, 'import numpy as np\n'), ((174, 20, 174, 34), 'numpy.mean', 'np.mean', ({(174, 28, 174, 33): 'diffs'}, {}), '(diffs)', True, 'import numpy as np\n'), ((200, 20, 200, 51), 'numpy.abs', 'np.abs', ({(200, 27, 200, 50): 'srom_corr - target_corr'}, {}), '(srom_corr - target_corr)', True, 'import numpy as np\n'), ((201, 20, 201, 34), 'numpy.mean', 'np.mean', ({(201, 28, 201, 33): 'diffs'}, {}), '(diffs)', True, 'import numpy as np\n')] |
vasili-v/distcovery | test/utils.py | e07882d55ebe2e4fd78a720764803e6b3e8cbc7d | import os
import errno
import sys
def mock_directory_tree(tree):
tree = dict([(os.path.join(*key), value) \
for key, value in tree.iteritems()])
def listdir(path):
try:
names = tree[path]
except KeyError:
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), path)
if names is None:
raise OSError(errno.ENOTDIR, os.strerror(errno.ENOTDIR), path)
return names
def isfile(path):
try:
item = tree[path]
except KeyError:
return False
return item is None
def isdir(path):
try:
item = tree[path]
except KeyError:
return False
return item is not None
return listdir, isfile, isdir
class PreserveOs(object):
def setUp(self):
super(PreserveOs, self).setUp()
self.__listdir = os.listdir
self.__isfile = os.path.isfile
self.__isdir = os.path.isdir
def tearDown(self):
os.path.isdir = self.__isdir
os.path.isfile = self.__isfile
os.listdir = self.__listdir
super(PreserveOs, self).tearDown()
def full_test_tree(self):
tree = {('.',): ('__init__.py', 'test_first.py', 'test_second.py',
'test_sub_first', 't_sub_first', 'test_sub_third'),
('.', '__init__.py'): None,
('.', 'test_first.py'): None,
('.', 'test_second.py'): None,
('.', 'test_sub_first'): ('__init__.py', 'test_sub_first.py'),
('.', 'test_sub_first', '__init__.py'): None,
('.', 'test_sub_first', 'test_sub_first.py'): None,
('.', 't_sub_first'): ('__init__.py', 'test_sub_first.py'),
('.', 't_sub_first', '__init__.py'): None,
('.', 't_sub_first', 'test_sub_first.py'): None,
('.', 'test_sub_second'): ('test_sub_first.py',),
('.', 'test_sub_second', 'test_sub_first.py'): None,
('.', 'test_sub_third'): ('__init__.py', 'test_sub_first.py',
'test_sub_second'),
('.', 'test_sub_third', '__init__.py'): None,
('.', 'test_sub_third', 'test_sub_first.py'): None,
('.', 'test_sub_third', 'test_sub_second'): \
('__init__.py', 'test_sub_first.py', 't_sub_second.py'),
('.', 'test_sub_third', 'test_sub_second', '__init__.py'): None,
('.', 'test_sub_third', 'test_sub_second',
'test_sub_first.py'): None,
('.', 'test_sub_third', 'test_sub_second',
't_sub_second.py'): None}
os.listdir, os.path.isfile, os.path.isdir = mock_directory_tree(tree)
self.expected_content = {'first': 'test_first',
'second': 'test_second',
'sub_first': 'test_sub_first',
'sub_first.sub_first': \
'test_sub_first.test_sub_first',
'sub_third': 'test_sub_third',
'sub_third.sub_first': \
'test_sub_third.test_sub_first',
'sub_third.sub_second': \
'test_sub_third.test_sub_second',
'sub_third.sub_second.sub_first': \
'test_sub_third.test_sub_second.' \
'test_sub_first'}
class ImportTrash(object):
def setUp(self):
self.modules_trash = []
self.meta_path_trash = []
def tearDown(self):
for item in self.meta_path_trash:
if item in sys.meta_path:
sys.meta_path.remove(item)
for name in self.modules_trash:
if name in sys.modules:
del sys.modules[name]
| [((6, 18, 6, 36), 'os.path.join', 'os.path.join', ({(6, 31, 6, 35): '*key'}, {}), '(*key)', False, 'import os\n'), ((16, 41, 16, 67), 'os.strerror', 'os.strerror', ({(16, 53, 16, 66): 'errno.ENOTDIR'}, {}), '(errno.ENOTDIR)', False, 'import os\n'), ((103, 16, 103, 42), 'sys.meta_path.remove', 'sys.meta_path.remove', ({(103, 37, 103, 41): 'item'}, {}), '(item)', False, 'import sys\n'), ((13, 40, 13, 65), 'os.strerror', 'os.strerror', ({(13, 52, 13, 64): 'errno.ENOENT'}, {}), '(errno.ENOENT)', False, 'import os\n')] |
adrianjhpc/spack | var/spack/repos/builtin/packages/perl-ipc-run/package.py | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlIpcRun(PerlPackage):
"""IPC::Run allows you to run and interact with child processes using
files, pipes, and pseudo-ttys. Both system()-style and scripted usages are
supported and may be mixed. Likewise, functional and OO API styles are both
supported and may be mixed."""
homepage = "https://metacpan.org/pod/IPC::Run"
url = "https://cpan.metacpan.org/authors/id/T/TO/TODDR/IPC-Run-20180523.0.tar.gz"
version('20180523.0', sha256='3850d7edf8a4671391c6e99bb770698e1c45da55b323b31c76310913349b6c2f')
depends_on('perl-io-tty', type=('build', 'run'))
depends_on('perl-readonly', type='build')
| [] |
WillAyd/tabcmd | tests/test_parser_create_site_users.py | 1ba4a6ce1586b5ec4286aca0edff0fbaa1c69f15 | import sys
import unittest
try:
from unittest import mock
except ImportError:
import mock
import argparse
from tabcmd.parsers.create_site_users_parser import CreateSiteUsersParser
from .common_setup import *
commandname = 'createsiteusers'
class CreateSiteUsersParserTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser_under_test, manager, mock_command = initialize_test_pieces(commandname)
CreateSiteUsersParser.create_site_user_parser(manager, mock_command)
def test_create_site_users_parser_users_file(self):
with mock.patch('builtins.open', mock.mock_open(read_data='test')) as open_file:
mock_args = [commandname, "users.csv"]
args = self.parser_under_test.parse_args(mock_args)
open_file.assert_called_with('users.csv', 'r', -1, None, None)
def test_create_site_user_parser_missing_arguments(self):
mock_args = [commandname]
with self.assertRaises(SystemExit):
args = self.parser_under_test.parse_args(mock_args)
def test_create_site_user_parser_role(self):
with mock.patch('builtins.open', mock.mock_open(read_data='test')):
mock_args = [commandname, "users.csv", '--site', 'site-name']
args = self.parser_under_test.parse_args(mock_args)
assert args.site == 'site-name', args
| [((19, 8, 19, 76), 'tabcmd.parsers.create_site_users_parser.CreateSiteUsersParser.create_site_user_parser', 'CreateSiteUsersParser.create_site_user_parser', ({(19, 54, 19, 61): 'manager', (19, 63, 19, 75): 'mock_command'}, {}), '(manager, mock_command)', False, 'from tabcmd.parsers.create_site_users_parser import CreateSiteUsersParser\n'), ((22, 41, 22, 73), 'mock.mock_open', 'mock.mock_open', (), '', False, 'import mock\n'), ((33, 41, 33, 73), 'mock.mock_open', 'mock.mock_open', (), '', False, 'import mock\n')] |
iarlyy/secretsmanager-env | secretsmanager_env.py | 3a34a4e9561e4651fa2975ff6f32b00ef0c0ca73 | #!/usr/bin/env python
import argparse
import json
import os
import boto3
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''\
Output following the defined format.
Options are:
dotenv - dotenv style [default]
export - shell export style
stdout - secret plain value style'''
)
parser.add_argument(
'--output',
default='dotenv',
choices=['stdout', 'dotenv', 'export'],
)
args = parser.parse_args()
try:
secret_id = os.environ.get("ENV_SECRET_NAME")
secretsmanager = boto3.client('secretsmanager')
secret_values = json.loads(secretsmanager.get_secret_value(SecretId=secret_id)['SecretString'])
except:
print('Error getting secret')
raise
if args.output == 'export':
prefix = 'export '
else:
prefix = ''
if args.output != 'stdout':
for envvar in secret_values:
print(prefix+envvar+"=$'"+secret_values[envvar].replace('\\n', '\n')+"'")
else:
print(json.dumps(secret_values.replace('\\n', '\n'), indent=2, sort_keys=True))
| [((9, 9, 17, 6), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((27, 16, 27, 49), 'os.environ.get', 'os.environ.get', ({(27, 31, 27, 48): '"""ENV_SECRET_NAME"""'}, {}), "('ENV_SECRET_NAME')", False, 'import os\n'), ((28, 21, 28, 51), 'boto3.client', 'boto3.client', ({(28, 34, 28, 50): '"""secretsmanager"""'}, {}), "('secretsmanager')", False, 'import boto3\n')] |
juandarr/ProjectEuler | 109.py | 951705ac62f550d7fbecdc3f35ab8c38b53b9225 | """
Finds the number of distinct ways a player can checkout a score less than 100
Author: Juan Rios
"""
import math
def checkout_solutions(checkout,sequence,idx_sq,d):
'''
returns the number of solution for a given checkout value
'''
counter = 0
for double in d:
if double>checkout:
break
res = checkout-double
if res==0:
counter +=1
continue
if res<=60:
if res in idx_sq:
index = idx_sq[res]
else:
index = len(sequence)-1
while res>sequence[index]:
index -=1
else:
index = len(sequence)-1
for idx in range(index,-1,-1):
a = sequence[idx]
if a==res:
counter+=1
continue
for idx2 in range(idx,-1,-1):
if a+sequence[idx2]==res:
counter +=1
elif a+sequence[idx2]<res:
break
return counter
def darts_checkout(limit_value):
s = [i for i in range(1,21)]+[25]
d = [2*i for i in range(1,21)]+[50]
t = [3*i for i in range(1,21)]
sequence = sorted(s+d+t)
idx_sq = {}
for idx in range(len(sequence)-1):
if sequence[idx]!=sequence[idx+1]:
idx_sq[sequence[idx]]=idx
idx_sq[sequence[-1]]=len(sequence)-1
n = limit_value
total = 0
for checkout in range(1,limit_value+1):
total += checkout_solutions(checkout,sequence,idx_sq,d)
return total
if __name__ == "__main__":
limit_value=99
print('The number of distinct ways a player can checkout a score less than 100 is {0}'.format(darts_checkout(limit_value))) | [] |
vjeronymo2/tevatron | src/tevatron/tevax/loss.py | 7235b0823b5c3cdf1c8ce8f67cb5f1209218086a | import jax.numpy as jnp
from jax import lax
import optax
import chex
def _onehot(labels: chex.Array, num_classes: int) -> chex.Array:
x = labels[..., None] == jnp.arange(num_classes).reshape((1,) * labels.ndim + (-1,))
x = lax.select(x, jnp.ones(x.shape), jnp.zeros(x.shape))
return x.astype(jnp.float32)
def p_contrastive_loss(ss: chex.Array, tt: chex.Array, axis: str = 'device') -> chex.Array:
per_shard_targets = tt.shape[0]
per_sample_targets = int(tt.shape[0] / ss.shape[0])
labels = jnp.arange(0, per_shard_targets, per_sample_targets) + per_shard_targets * lax.axis_index(axis)
tt = lax.all_gather(tt, axis).reshape((-1, ss.shape[-1]))
scores = jnp.dot(ss, jnp.transpose(tt))
return optax.softmax_cross_entropy(scores, _onehot(labels, scores.shape[-1]))
| [((9, 22, 9, 39), 'jax.numpy.ones', 'jnp.ones', ({(9, 31, 9, 38): 'x.shape'}, {}), '(x.shape)', True, 'import jax.numpy as jnp\n'), ((9, 41, 9, 59), 'jax.numpy.zeros', 'jnp.zeros', ({(9, 51, 9, 58): 'x.shape'}, {}), '(x.shape)', True, 'import jax.numpy as jnp\n'), ((16, 13, 16, 65), 'jax.numpy.arange', 'jnp.arange', ({(16, 24, 16, 25): '(0)', (16, 27, 16, 44): 'per_shard_targets', (16, 46, 16, 64): 'per_sample_targets'}, {}), '(0, per_shard_targets, per_sample_targets)', True, 'import jax.numpy as jnp\n'), ((19, 25, 19, 42), 'jax.numpy.transpose', 'jnp.transpose', ({(19, 39, 19, 41): 'tt'}, {}), '(tt)', True, 'import jax.numpy as jnp\n'), ((16, 88, 16, 108), 'jax.lax.axis_index', 'lax.axis_index', ({(16, 103, 16, 107): 'axis'}, {}), '(axis)', False, 'from jax import lax\n'), ((18, 9, 18, 33), 'jax.lax.all_gather', 'lax.all_gather', ({(18, 24, 18, 26): 'tt', (18, 28, 18, 32): 'axis'}, {}), '(tt, axis)', False, 'from jax import lax\n'), ((8, 29, 8, 52), 'jax.numpy.arange', 'jnp.arange', ({(8, 40, 8, 51): 'num_classes'}, {}), '(num_classes)', True, 'import jax.numpy as jnp\n')] |
kinnala/gammy | setup.py | 85237d424001f77f296d724c95c8dec5803a8e1e | import os
from setuptools import setup, find_packages
import versioneer
if __name__ == "__main__":
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
meta = {}
base_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(base_dir, 'gammy', '_meta.py')) as fp:
exec(fp.read(), meta)
setup(
name = "gammy",
version = versioneer.get_version(),
author = meta["__author__"],
author_email = meta["__contact__"],
description = "Generalized additive models with a Bayesian twist",
url = "https://github.com/malmgrek/Gammy",
cmdclass = versioneer.get_cmdclass(),
packages = find_packages(),
install_requires = [
"attrs",
"bayespy",
"h5py",
"matplotlib",
"numpy",
"scipy"
],
extras_require = {
"dev": [
"versioneer",
"pytest",
"hypothesis",
],
},
keywords = [
"Statistical modeling",
"Bayesian statistics",
"Machine learning",
],
classifiers = [
"Programming Language :: Python :: 3 :: Only",
"Development Status :: 1 - Planning",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: {0}".format(meta["__license__"]),
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering",
],
long_description = read('README.md'),
long_description_content_type = "text/markdown",
) | [((12, 31, 12, 56), 'os.path.abspath', 'os.path.abspath', ({(12, 47, 12, 55): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((13, 14, 13, 57), 'os.path.join', 'os.path.join', ({(13, 27, 13, 35): 'base_dir', (13, 37, 13, 44): '"""gammy"""', (13, 46, 13, 56): '"""_meta.py"""'}, {}), "(base_dir, 'gammy', '_meta.py')", False, 'import os\n'), ((19, 23, 19, 47), 'versioneer.get_version', 'versioneer.get_version', ({}, {}), '()', False, 'import versioneer\n'), ((24, 23, 24, 48), 'versioneer.get_cmdclass', 'versioneer.get_cmdclass', ({}, {}), '()', False, 'import versioneer\n'), ((25, 23, 25, 38), 'setuptools.find_packages', 'find_packages', ({}, {}), '()', False, 'from setuptools import setup, find_packages\n'), ((9, 33, 9, 58), 'os.path.dirname', 'os.path.dirname', ({(9, 49, 9, 57): '__file__'}, {}), '(__file__)', False, 'import os\n')] |
gabrielstork/fast-ml | fast-ml/main.py | ce93c1263970ce7b958e1c3e932c70909bcc0e31 | import root
if __name__ == '__main__':
window = root.Root()
window.mainloop()
| [((5, 13, 5, 24), 'root.Root', 'root.Root', ({}, {}), '()', False, 'import root\n')] |
QualiChain/qualichain_backend | application/recommendations/__init__.py | cc6dbf1ae5d09e8d01cccde94326563b25d28b58 | from flask import Blueprint
recommendation_blueprint = Blueprint('recommendations', __name__)
from application.recommendations import routes | [((4, 27, 4, 65), 'flask.Blueprint', 'Blueprint', ({(4, 37, 4, 54): '"""recommendations"""', (4, 56, 4, 64): '__name__'}, {}), "('recommendations', __name__)", False, 'from flask import Blueprint\n')] |
XenonLamb/higan | predictors/scene_predictor.py | 6e7b47f91df23d8d6075d95921e664c9fa4f1306 | # python 3.7
"""Predicts the scene category, attribute."""
import numpy as np
from PIL import Image
import torch
import torch.nn.functional as F
import torchvision.transforms as transforms
from .base_predictor import BasePredictor
from .scene_wideresnet import resnet18
__all__ = ['ScenePredictor']
NUM_CATEGORIES = 365
NUM_ATTRIBUTES = 102
FEATURE_DIM = 512
class ScenePredictor(BasePredictor):
"""Defines the predictor class for scene analysis."""
def __init__(self):
super().__init__('scene')
def build(self):
self.net = resnet18(num_classes=NUM_CATEGORIES)
def load(self):
# Load category labels.
self.check_attr('category_anno_path')
self.category_name_to_idx = {}
self.category_idx_to_name = {}
with open(self.category_anno_path, 'r') as f:
for line in f:
name, idx = line.strip().split(' ')
name = name[3:].replace('/', '__')
idx = int(idx)
self.category_name_to_idx[name] = idx
self.category_idx_to_name[idx] = name
assert len(self.category_name_to_idx) == NUM_CATEGORIES
assert len(self.category_idx_to_name) == NUM_CATEGORIES
# Load attribute labels.
self.check_attr('attribute_anno_path')
self.attribute_name_to_idx = {}
self.attribute_idx_to_name = {}
with open(self.attribute_anno_path, 'r') as f:
for idx, line in enumerate(f):
name = line.strip().replace(' ', '_')
self.attribute_name_to_idx[name] = idx
self.attribute_idx_to_name[idx] = name
assert len(self.attribute_name_to_idx) == NUM_ATTRIBUTES
assert len(self.attribute_idx_to_name) == NUM_ATTRIBUTES
# Transform for input images.
self.transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# Load pre-trained weights for category prediction.
checkpoint = torch.load(self.weight_path,
map_location=lambda storage, loc: storage)
state_dict = {k.replace('module.', ''): v
for k, v in checkpoint['state_dict'].items()}
self.net.load_state_dict(state_dict)
fc_weight = list(self.net.parameters())[-2].data.numpy()
fc_weight[fc_weight < 0] = 0
# Load additional weights for attribute prediction.
self.check_attr('attribute_additional_weight_path')
self.attribute_weight = np.load(self.attribute_additional_weight_path)
assert self.attribute_weight.shape == (NUM_ATTRIBUTES, FEATURE_DIM)
def _predict(self, images):
if not isinstance(images, np.ndarray):
raise ValueError(f'Images should be with type `numpy.ndarray`!')
if images.dtype != np.uint8:
raise ValueError(f'Images should be with dtype `numpy.uint8`!')
if not (len(images.shape) == 4 and
0 < images.shape[0] <= self.batch_size and
images.shape[3] == self.image_channels):
raise ValueError(f'Images should be with shape [batch_size, height '
f'width, channel], where `batch_size` no larger than '
f'{self.batch_size}, and `channel` equals to '
f'{self.image_channels}!\n'
f'But {images.shape} received!')
xs = [self.transform(Image.fromarray(img)).unsqueeze(0) for img in images]
xs = torch.cat(xs, dim=0).to(self.run_device)
logits, features = self.net(xs)
category_scores = self.get_value(F.softmax(logits, dim=1))
features = self.get_value(features).squeeze(axis=(2, 3))
attribute_scores = features.dot(self.attribute_weight.T)
assert (len(category_scores.shape) == 2 and
category_scores.shape[1] == NUM_CATEGORIES)
assert (len(attribute_scores.shape) == 2 and
attribute_scores.shape[1] == NUM_ATTRIBUTES)
results = {
'category': category_scores,
'attribute': attribute_scores,
}
if self.use_cuda:
torch.cuda.empty_cache()
return results
def predict(self, images, **kwargs):
return self.batch_run(images, self._predict)
| [((64, 17, 65, 70), 'torch.load', 'torch.load', (), '', False, 'import torch\n'), ((74, 28, 74, 74), 'numpy.load', 'np.load', ({(74, 36, 74, 73): 'self.attribute_additional_weight_path'}, {}), '(self.attribute_additional_weight_path)', True, 'import numpy as np\n'), ((95, 37, 95, 61), 'torch.nn.functional.softmax', 'F.softmax', (), '', True, 'import torch.nn.functional as F\n'), ((110, 6, 110, 30), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ({}, {}), '()', False, 'import torch\n'), ((58, 8, 58, 37), 'torchvision.transforms.Resize', 'transforms.Resize', ({(58, 26, 58, 36): '(224, 224)'}, {}), '((224, 224))', True, 'import torchvision.transforms as transforms\n'), ((59, 8, 59, 29), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ({}, {}), '()', True, 'import torchvision.transforms as transforms\n'), ((60, 8, 60, 74), 'torchvision.transforms.Normalize', 'transforms.Normalize', ({(60, 29, 60, 50): '[0.485, 0.456, 0.406]', (60, 52, 60, 73): '[0.229, 0.224, 0.225]'}, {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])', True, 'import torchvision.transforms as transforms\n'), ((92, 9, 92, 29), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((91, 25, 91, 45), 'PIL.Image.fromarray', 'Image.fromarray', ({(91, 41, 91, 44): 'img'}, {}), '(img)', False, 'from PIL import Image\n')] |
jackKiZhu/mypython | python_test.py | 43eac97bec07338ed3b8b9473d4e4fae26f7140c | from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = "mysql://root:[email protected]:3306/python_github"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_name = db.Column(db.String(64), unique=True)
user_password = db.Column(db.String(32))
def __repr__(self):
return "用户id:%s 用户名:%s" % (self.id, self.user_name)
@app.route("/", methods=["post", "get"])
def index():
index_meg = ""
if request.method == "POST":
user_name = request.form.get("user_name", "")
user_pwd = request.form.get("user_pwd", "")
if not all([user_name, user_pwd]):
index_meg = "请正确输入信息"
else:
print(request.get_data())
user_name_is_exits = User.query.filter(User.user_name == user_name).first()
if user_name_is_exits:
index_meg = "用户名已存在"
else:
user_obj = User(user_name=user_name, user_password=user_pwd)
db.session.add(user_obj)
db.session.commit()
index_meg = "注册成功"
print("注册成功")
# user_name = request.args.get("user_name", "")
# user_pwd = request.args.get("user_pwd", "")
# user_is_login = User.query.filter_by(user_name=user_name, user_password=user_pwd).first()
# if user_is_login:
# index_meg = "登陆成功"
# print("登陆成功")
# return render_template("login_ok.html", index_meg=index_meg)
# else:
# # index_meg = "登陆失败"
# print("登陆失败")
return render_template("index.html", index_meg=index_meg)
if __name__ == "__main__":
db.drop_all()
db.create_all()
app.run(debug=True)
| [((3, 6, 3, 21), 'flask.Flask', 'Flask', ({(3, 12, 3, 20): '__name__'}, {}), '(__name__)', False, 'from flask import Flask, render_template, request\n'), ((6, 5, 6, 20), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ({(6, 16, 6, 19): 'app'}, {}), '(app)', False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((48, 11, 48, 61), 'flask.render_template', 'render_template', (), '', False, 'from flask import Flask, render_template, request\n'), ((22, 20, 22, 53), 'flask.request.form.get', 'request.form.get', ({(22, 37, 22, 48): '"""user_name"""', (22, 50, 22, 52): '""""""'}, {}), "('user_name', '')", False, 'from flask import Flask, render_template, request\n'), ((23, 19, 23, 51), 'flask.request.form.get', 'request.form.get', ({(23, 36, 23, 46): '"""user_pwd"""', (23, 48, 23, 50): '""""""'}, {}), "('user_pwd', '')", False, 'from flask import Flask, render_template, request\n'), ((27, 18, 27, 36), 'flask.request.get_data', 'request.get_data', ({}, {}), '()', False, 'from flask import Flask, render_template, request\n')] |
iml1111/algorithm-study | src/etc/gec/3.py | f21f6f9f43235248f3496f034a899f2314ab6fcc | from collections import deque
def solution(N, bus_stop):
answer = [[1300 for _ in range(N)] for _ in range(N)]
bus_stop = [(x-1, y-1) for x,y in bus_stop]
q = deque(bus_stop)
for x,y in bus_stop:
answer[x][y] = 0
while q:
x, y = q.popleft()
for nx, ny in ((x-1, y), (x+1, y), (x, y+1), (x, y-1)):
if (
0 <= nx < N and 0 <= ny < N
and answer[nx][ny] > answer[x][y]
):
answer[nx][ny] = answer[x][y] + 1
q.append((nx, ny))
return answer
if __name__ == '__main__':
print(solution(
3, [[1,2],[3,3]],
)) | [((6, 8, 6, 23), 'collections.deque', 'deque', ({(6, 14, 6, 22): 'bus_stop'}, {}), '(bus_stop)', False, 'from collections import deque\n')] |
linshaoyong/leetcode | python/tree/0103_binary_tree_zigzag_level_order_traversal.py | ea052fad68a2fe0cbfa5469398508ec2b776654f | class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def zigzagLevelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
a = [root]
b = []
c = []
r = [[root.val]]
i = 1
while True:
for n in a:
if n.left:
b.append(n.left)
c.append(n.left.val)
if n.right:
b.append(n.right)
c.append(n.right.val)
if not b:
break
else:
a = b
if i & 1 == 1:
c.reverse()
r.append(c)
b = []
c = []
i += 1
return r
def test_zigzag_level_order():
a = TreeNode(3)
b = TreeNode(9)
c = TreeNode(20)
d = TreeNode(15)
e = TreeNode(7)
a.left = b
a.right = c
c.left = d
c.right = e
assert Solution().zigzagLevelOrder(a) == [
[3],
[20, 9],
[15, 7]
]
| [] |
berggren/plaso | plaso/parsers/winreg_plugins/usbstor.py | 2658c80c5076f97a9a27272e73997bde8c39e875 | # -*- coding: utf-8 -*-
"""File containing a Windows Registry plugin to parse the USBStor key."""
from __future__ import unicode_literals
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import logger
from plaso.parsers import winreg
from plaso.parsers.winreg_plugins import interface
class USBStorEventData(events.EventData):
"""USBStor event data attribute container.
Attributes:
device_type (str): type of USB device.
display_name (str): display name of the USB device.
key_path (str): Windows Registry key path.
parent_id_prefix (str): parent identifier prefix of the USB device.
product (str): product of the USB device.
serial (str): serial number of the USB device.
revision (str): revision number of the USB device.
subkey_name (str): name of the Windows Registry subkey.
vendor (str): vendor of the USB device.
"""
DATA_TYPE = 'windows:registry:usbstor'
def __init__(self):
"""Initializes event data."""
super(USBStorEventData, self).__init__(data_type=self.DATA_TYPE)
self.device_type = None
self.display_name = None
self.key_path = None
self.parent_id_prefix = None
self.product = None
self.revision = None
self.serial = None
# TODO: rename subkey_name to something that closer matches its purpose.
self.subkey_name = None
self.vendor = None
class USBStorPlugin(interface.WindowsRegistryPlugin):
"""USBStor key plugin.
Also see:
http://www.forensicswiki.org/wiki/USB_History_Viewing
"""
NAME = 'windows_usbstor_devices'
DESCRIPTION = 'Parser for USB Plug And Play Manager USBStor Registry Key.'
FILTERS = frozenset([
interface.WindowsRegistryKeyPathFilter(
'HKEY_LOCAL_MACHINE\\System\\CurrentControlSet\\Enum\\USBSTOR')])
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
"""
for subkey in registry_key.GetSubkeys():
subkey_name = subkey.name
name_values = subkey_name.split('&')
number_of_name_values = len(name_values)
# Normally we expect 4 fields here however that is not always the case.
if number_of_name_values != 4:
logger.warning(
'Expected 4 &-separated values in: {0:s}'.format(subkey_name))
event_data = USBStorEventData()
event_data.key_path = registry_key.path
event_data.subkey_name = subkey_name
if number_of_name_values >= 1:
event_data.device_type = name_values[0]
if number_of_name_values >= 2:
event_data.vendor = name_values[1]
if number_of_name_values >= 3:
event_data.product = name_values[2]
if number_of_name_values >= 4:
event_data.revision = name_values[3]
if subkey.number_of_subkeys == 0:
# Time last USB device of this class was first inserted.
event = time_events.DateTimeValuesEvent(
subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
continue
for device_key in subkey.GetSubkeys():
event_data.serial = device_key.name
friendly_name_value = device_key.GetValueByName('FriendlyName')
if friendly_name_value:
event_data.display_name = friendly_name_value.GetDataAsObject()
# ParentIdPrefix applies to Windows XP Only.
parent_id_prefix_value = device_key.GetValueByName('ParentIdPrefix')
if parent_id_prefix_value:
event_data.parent_id_prefix = parent_id_prefix_value.GetDataAsObject()
# Time last USB device of this class was first inserted.
event = time_events.DateTimeValuesEvent(
subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
# Win7 - Last Connection.
# Vista/XP - Time of an insert.
event = time_events.DateTimeValuesEvent(
device_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
device_parameter_key = device_key.GetSubkeyByName('Device Parameters')
if device_parameter_key:
event = time_events.DateTimeValuesEvent(
device_parameter_key.last_written_time,
definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
log_configuration_key = device_key.GetSubkeyByName('LogConf')
if log_configuration_key:
event = time_events.DateTimeValuesEvent(
log_configuration_key.last_written_time,
definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
properties_key = device_key.GetSubkeyByName('Properties')
if properties_key:
event = time_events.DateTimeValuesEvent(
properties_key.last_written_time,
definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
winreg.WinRegistryParser.RegisterPlugin(USBStorPlugin)
| [((144, 0, 144, 54), 'plaso.parsers.winreg.WinRegistryParser.RegisterPlugin', 'winreg.WinRegistryParser.RegisterPlugin', ({(144, 40, 144, 53): 'USBStorPlugin'}, {}), '(USBStorPlugin)', False, 'from plaso.parsers import winreg\n'), ((57, 6, 58, 73), 'plaso.parsers.winreg_plugins.interface.WindowsRegistryKeyPathFilter', 'interface.WindowsRegistryKeyPathFilter', ({(58, 10, 58, 72): '"""HKEY_LOCAL_MACHINE\\\\System\\\\CurrentControlSet\\\\Enum\\\\USBSTOR"""'}, {}), "(\n 'HKEY_LOCAL_MACHINE\\\\System\\\\CurrentControlSet\\\\Enum\\\\USBSTOR')", False, 'from plaso.parsers.winreg_plugins import interface\n'), ((94, 16, 95, 75), 'plaso.containers.time_events.DateTimeValuesEvent', 'time_events.DateTimeValuesEvent', ({(95, 12, 95, 36): 'subkey.last_written_time', (95, 38, 95, 74): 'definitions.TIME_DESCRIPTION_WRITTEN'}, {}), '(subkey.last_written_time, definitions.\n TIME_DESCRIPTION_WRITTEN)', False, 'from plaso.containers import time_events\n'), ((112, 16, 113, 75), 'plaso.containers.time_events.DateTimeValuesEvent', 'time_events.DateTimeValuesEvent', ({(113, 12, 113, 36): 'subkey.last_written_time', (113, 38, 113, 74): 'definitions.TIME_DESCRIPTION_WRITTEN'}, {}), '(subkey.last_written_time, definitions.\n TIME_DESCRIPTION_WRITTEN)', False, 'from plaso.containers import time_events\n'), ((118, 16, 119, 79), 'plaso.containers.time_events.DateTimeValuesEvent', 'time_events.DateTimeValuesEvent', ({(119, 12, 119, 40): 'device_key.last_written_time', (119, 42, 119, 78): 'definitions.TIME_DESCRIPTION_WRITTEN'}, {}), '(device_key.last_written_time, definitions.\n TIME_DESCRIPTION_WRITTEN)', False, 'from plaso.containers import time_events\n'), ((124, 18, 126, 51), 'plaso.containers.time_events.DateTimeValuesEvent', 'time_events.DateTimeValuesEvent', ({(125, 14, 125, 52): 'device_parameter_key.last_written_time', (126, 14, 126, 50): 'definitions.TIME_DESCRIPTION_WRITTEN'}, {}), '(device_parameter_key.last_written_time,\n definitions.TIME_DESCRIPTION_WRITTEN)', False, 'from plaso.containers import time_events\n'), ((131, 18, 133, 51), 'plaso.containers.time_events.DateTimeValuesEvent', 'time_events.DateTimeValuesEvent', ({(132, 14, 132, 53): 'log_configuration_key.last_written_time', (133, 14, 133, 50): 'definitions.TIME_DESCRIPTION_WRITTEN'}, {}), '(log_configuration_key.last_written_time,\n definitions.TIME_DESCRIPTION_WRITTEN)', False, 'from plaso.containers import time_events\n'), ((138, 18, 140, 51), 'plaso.containers.time_events.DateTimeValuesEvent', 'time_events.DateTimeValuesEvent', ({(139, 14, 139, 46): 'properties_key.last_written_time', (140, 14, 140, 50): 'definitions.TIME_DESCRIPTION_WRITTEN'}, {}), '(properties_key.last_written_time,\n definitions.TIME_DESCRIPTION_WRITTEN)', False, 'from plaso.containers import time_events\n')] |
CodyKochmann/damn_vulnerable_python | damn_vulnerable_python/evil.py | 8a90ee3b70dddae96f9f0a8500ed9ba5693f3082 | ''' static analyzers are annoying so lets rename eval '''
evil = eval
| [] |
MIMUW-RL/spp-rl | rltoolkit/rltoolkit/acm/off_policy/ddpg_acm.py | 86b96cdd220cc4eae86f7cfd26924c69b498dcc6 | import numpy as np
import torch
from torch.nn import functional as F
from rltoolkit.acm.off_policy import AcMOffPolicy
from rltoolkit.algorithms import DDPG
from rltoolkit.algorithms.ddpg.models import Actor, Critic
class DDPG_AcM(AcMOffPolicy, DDPG):
def __init__(
self, unbiased_update: bool = False, custom_loss: float = 0.0,
custom_loss_target: float = 0.0, custom_loss_lr: float = 0.0001,
refill_buffer: bool = False,
lagrangian_custom_loss: bool = False, separate_custom_loss: bool = False,
cw_cl_targets: list = None, custom_loss_target_decay: int = None,
custom_loss_target_dfactor: float = None,
*args, **kwargs,
):
f"""DDPG with AcM class
Args:
unbiased_update (bool, optional): Use next_obs as action for update.
Defaults to { False }.
refill_buffer (bool, optional): if buffer should be refilled with new observations, when its full
Defaults to {False}
"""
super().__init__(*args, **kwargs)
self.unbiased_update = unbiased_update
self.actor = Actor(
self.ob_dim, ac_lim=self.actor_ac_lim, ac_dim=self.actor_output_dim
)
if not self.acm_critic:
self.critic = Critic(self.ob_dim, ac_dim=self.actor_output_dim)
self.custom_loss = custom_loss
custom_loss_scaled = np.log(np.exp(custom_loss) - 1)
self.custom_loss_param = torch.tensor(custom_loss_scaled) if not separate_custom_loss else torch.Tensor([custom_loss_scaled] * self.actor_output_dim)
self.custom_loss_param.requires_grad = lagrangian_custom_loss
self.custom_loss_target = custom_loss_target
self.cw_cl_targets = cw_cl_targets
if lagrangian_custom_loss and cw_cl_targets:
self.custom_loss_target = cw_cl_targets
self.lagrangian_custom_loss = lagrangian_custom_loss
self.custom_loss_lr = custom_loss_lr
self.separate_custom_loss = separate_custom_loss
self.custom_loss_optimizer = self.opt([self.custom_loss_param], lr=custom_loss_lr)
self.refill_buffer = refill_buffer
self.custom_loss_target_decay = custom_loss_target_decay
self.custom_loss_target_dfactor = custom_loss_target_dfactor
if self.custom_loss:
self.loss["ddpg"] = 0.0
self.loss["dist"] = 0.0
if lagrangian_custom_loss:
if self.separate_custom_loss:
self.distances = []
for i in range(self.actor_output_dim):
self.loss[f"custom_loss_param/{i}"] = 0.0
else:
self.loss["custom_loss_param"] = 0.0
new_hparams = {
"hparams/unbiased_update": self.unbiased_update,
"hparams/custom_loss": self.custom_loss,
"hparams/lagrangian_cl": self.lagrangian_custom_loss,
"hparams/custom_loss_target_decay": self.custom_loss_target_decay,
"hparams/custom_loss_target_dfactor": self.custom_loss_target_dfactor,
}
if self.lagrangian_custom_loss:
if self.cw_cl_targets is None:
new_hparams["hparams/cl_target"] = self.custom_loss_target
new_hparams["hparams/cl_lr"] = self.custom_loss_lr
self.hparams_acm.update(new_hparams)
self.hparams.update(self.hparams_acm)
def noise_action(self, obs, act_noise, deterministic=False):
action, _ = self._actor.act(obs, deterministic)
noise = act_noise * torch.randn(self.actor_output_dim, device=self.device)
action += noise * self.actor_ac_lim
action = np.clip(
action.cpu(), -1.1 * self.actor_ac_lim.cpu(), 1.1 * self.actor_ac_lim.cpu()
)
action = action.to(self.device)
if self.denormalize_actor_out:
action = self.replay_buffer.denormalize(action, self.acm_ob_idx)
return action
def custom_loss_target_decay_condition(self):
return(
self.custom_loss_target_decay is not None
and self.custom_loss_target_dfactor is not None
and self.iterations > 0
and self.stats_logger.frames % self.custom_loss_target_decay == 0
)
def acm_update_condition(self):
return (
self.iteration > 0
and self.acm_epochs > 0
and self.stats_logger.frames % self.acm_update_freq == 0
)
def make_unbiased_update(self):
if self.update_condition():
for _ in range(self.grad_steps):
batch = self.replay_buffer.sample_batch(
self.update_batch_size, self.device
)
obs, next_obs, _, reward, done, acm_action = batch
self.update(
obs=obs,
next_obs=next_obs,
action=next_obs,
reward=reward,
done=done,
acm_action=acm_action,
)
def make_update(self):
if self.unbiased_update:
self.make_unbiased_update()
else:
super().make_update()
if self.custom_loss_target_decay_condition():
self.custom_loss_target *= self.custom_loss_target_dfactor
print(f"CUSTOM LOSS TARTGET DECAY, CURRENT VALUE {self.custom_loss_target}")
if self.acm_update_condition():
if self.acm_update_batches:
self.update_acm_batches(self.acm_update_batches)
else:
self.update_acm(self.acm_epochs)
def collect_params_dict(self):
params_dict = super().collect_params_dict()
params_dict["acm"] = self.acm.state_dict()
return params_dict
def apply_params_dict(self, params_dict):
super().apply_params_dict(params_dict)
self.acm.load_state_dict(params_dict["acm"])
def save_model(self, save_path=None):
save_path = DDPG.save_model(self, save_path)
torch.save(self.acm.state_dict(), save_path + "_acm_model.pt")
def compute_qfunc_targ(
self, reward: torch.Tensor, next_obs: torch.Tensor, done: torch.Tensor
):
"""Compute targets for Q-functions
Args:
reward (torch.Tensor): batch of rewards
next_obs (torch.Tensor): batch of next observations
done (torch.Tensor): batch of done
Returns:
torch.Tensor: Q-function targets for the batch
"""
with torch.no_grad():
next_action, _ = self.actor_targ(next_obs)
next_action = self.replay_buffer.denormalize(next_action, self.acm_ob_idx)
if self.acm_critic:
acm_obs = torch.cat([next_obs, next_action], axis=1)
next_action = self.acm(acm_obs)
q_target = self.critic_targ(next_obs, next_action)
qfunc_target = reward + self.gamma * (1 - done) * q_target
return qfunc_target
def add_custom_loss(self, loss, action, denorm_action, next_obs):
if self.custom_loss:
self.loss["ddpg"] = loss.item()
if self.norm_closs:
next_obs = self.replay_buffer.normalize(next_obs, force=True)
else:
action = denorm_action
if not self.separate_custom_loss:
loss_dist = F.mse_loss(action, self.cut_obs(next_obs))
self.loss["dist"] = loss_dist.item()
if self.lagrangian_custom_loss:
loss += F.softplus(self.custom_loss_param) * (loss_dist - self.custom_loss_target)
else:
loss += self.custom_loss * loss_dist
if self.custom_loss_target_decay is not None:
self.loss["custom_loss_target"] = self.custom_loss_target
else:
distances = torch.mean(F.mse_loss(action, self.cut_obs(next_obs), reduction='none'), dim=0)
if self.cw_cl_targets is None:
loss += torch.sum(F.softplus(self.custom_loss_param) * (distances - self.custom_loss_target))
else:
loss += torch.sum(F.softplus(self.custom_loss_param) * (distances - torch.Tensor(self.custom_loss_target)))
self.loss["dist"] = distances.detach()
if self.debug_mode:
for j in range(distances.shape[0]):
self.loss[f"dist/cw/{j}"] = distances[j]
return loss
def compute_pi_loss(self, obs, next_obs):
action, _ = self._actor(obs)
denorm_action = self.replay_buffer.denormalize(action, self.acm_ob_idx)
if self.acm_critic:
acm_obs = torch.cat([obs, denorm_action], axis=1)
critic_action = self.acm(acm_obs)
else:
critic_action = denorm_action
loss = -self._critic(obs, critic_action).mean()
return self.add_custom_loss(loss, action, denorm_action, next_obs)
def update_custom_loss_param_loss(self):
if not self.lagrangian_custom_loss:
return
dist_loss = self.loss["dist"]
if self.cw_cl_targets is None:
loss = -F.softplus(self.custom_loss_param) * (dist_loss - self.custom_loss_target)
else:
loss = -F.softplus(self.custom_loss_param) * (dist_loss - torch.Tensor(self.custom_loss_target))
if self.separate_custom_loss:
for i in range(len(loss)):
self.loss[f"custom_loss_param/{i}"] = loss[i].item()
self.loss["dist"] = torch.mean(self.loss["dist"]).item()
loss = torch.sum(loss)
else:
self.loss["custom_loss_param"] = loss.item()
self.custom_loss_optimizer.zero_grad()
loss.backward()
self.custom_loss_optimizer.step()
def copy_offline_dataset(self, dataset, size):
"""copies the provided offlineRL dataset into the replay buffer.
for the moment assumes D4RL dataset format (a dictionary)
and copies elements one-by-one
"""
i = 0
traj = 0
while i < size:
traj += 1
done = torch.tensor(dataset['timeouts'][i] or dataset['terminals'][i])
obs = torch.tensor(dataset['observations'][i])
prev_idx = self.replay_buffer.add_obs(obs)
i += 1
ep_len = 0
while(not done and i < size):
nextobs = torch.tensor(dataset['observations'][i])
rew = torch.tensor( dataset['rewards'][i] )
done = torch.tensor( dataset['timeouts'][i] or dataset['terminals'][i] )
action = torch.tensor( dataset['actions'][i] )
end = torch.tensor( dataset['terminals'][i] )
next_idx = self.replay_buffer.add_obs(nextobs)
self.replay_buffer.add_timestep(
prev_idx, next_idx, nextobs, rew, done, end
)
self.replay_buffer.add_acm_action(action)
prev_idx = next_idx
i += 1
ep_len += 1
print(f"copied offline dataset with {i} samples, contains {traj} trajectories")
#sets the internal variables according to the provided offline dataset
self.acm_pre_train_samples = i
self.buffer_size = i
self.max_frames = i
self.iterations = i / self.steps_per_epoch
#updates std/dev/min/max parameters of the dataset
self.update_obs_mean_std(self.replay_buffer)
def collect_batch_and_train(self, steps_per_epoch: int, *args, **kwargs):
"""SPP variant of rollouts and collect samples if there is enough samples
in replay buffer use existing samples to perform actor/critic update
otherwise generate new samples till steps_per_epoch number of steps
will be added to the replay buffer
Args:
steps_per_epoch (int): number of samples to collect and train
*args, **kwargs: arguments for make_update
"""
collected = 0
while collected < steps_per_epoch:
# important part,
# when the replay buffer is filled stop generating new frames, just use the existing buffer
# such that the number of used experience in learning is counted correctly
if (self.stats_logger.frames >= self.buffer_size - self.acm_pre_train_samples) and not self.refill_buffer:
self.stats_logger.frames += 1
collected += 1
self.make_update(*args, **kwargs)
continue
self.stats_logger.rollouts += 1
obs = self.env.reset()
# end - end of the episode from perspective of the simulation
# done - end of the episode from perspective of the model
end = False
obs = self.process_obs(obs)
prev_idx = self.replay_buffer.add_obs(obs)
ep_len = 0
while not end:
obs = self.replay_buffer.normalize(obs)
if (self.stats_logger.frames > self.acm_pre_train_samples) and (self.stats_logger.frames <= self.acm_pre_train_samples + self.random_frames):
action = self.initial_act(obs)
else:
action = self.noise_action(obs, self.act_noise)
action_proc = self.process_action(action, obs)
prev_obs = obs
obs, rew, done, _ = self.env.step(action_proc)
ep_len += 1
end = True if ep_len == self.max_ep_len else done
done = False if ep_len == self.max_ep_len else done
obs = self.process_obs(obs)
if self.next_obs_diff is not None:
obs = self.compute_next_obs_diff(prev_obs, obs)
next_idx = self.replay_buffer.add_obs(obs)
self.replay_buffer.add_timestep(
prev_idx, next_idx, action, rew, done, end
)
prev_idx = next_idx
self.stats_logger.frames += 1
collected += 1
self.make_update(*args, **kwargs)
def update(
self,
obs: torch.Tensor,
next_obs: torch.Tensor,
action: torch.Tensor,
reward: torch.Tensor,
done: torch.Tensor,
acm_action: torch.Tensor,
):
"""DDPG update step
Args:
obs (torch.Tensor): observations tensor
next_obs (torch.Tensor): next observations tensor
action (torch.Tensor): actions tensor
reward (torch.Tensor): rewards tensor
done (torch.Tensor): dones tensor
acm_action (torch.Tensor): tensor of acm actions
"""
for param in self.acm.parameters():
param.requires_grad = False
if self.acm_critic:
action = acm_action
y = self.compute_qfunc_targ(reward, next_obs, done)
# Update Q-function by one step
y_q = self._critic(obs, action)
loss_q = F.mse_loss(y_q, y)
self.loss["critic"] = loss_q.item()
self.critic_optimizer.zero_grad()
loss_q.backward()
self.critic_optimizer.step()
# Update policy by one step
self._critic.eval()
loss = self.compute_pi_loss(obs, next_obs)
self.loss["actor"] = loss.item()
self.actor_optimizer.zero_grad()
loss.backward()
self.actor_optimizer.step()
#update temperature of Lagrangian optimization obj
self.update_custom_loss_param_loss()
# Update target networks
self.update_target_nets()
self._critic.train()
for param in self.acm.parameters():
param.requires_grad = True
def add_tensorboard_logs(self, buffer, done):
super().add_tensorboard_logs(buffer, done)
if self.lagrangian_custom_loss:
self.tensorboard_writer.log_custom_loss_param(
self.iteration, self.custom_loss_param)
if __name__ == "__main__":
#with torch.cuda.device(0):
model = DDPG_AcM(
# unbiased_update=True,
# custom_loss=True,
# acm_update_batches=50,
# denormalize_actor_out=True,
env_name="Pendulum-v0",
buffer_size=50000,
act_noise=0.05,
iterations=100,
gamma=0.99,
steps_per_epoch=200,
stats_freq=5,
test_episodes=3,
custom_loss=1,
lagrangian_custom_loss=False,
# tensorboard_dir="logs_ddpg",
# tensorboard_comment="",
acm_update_freq=200,
acm_epochs=1,
acm_pre_train_epochs=10,
acm_pre_train_samples=10000,
use_gpu=False,
render=False,
)
model.pre_train()
model.train()
| [((31, 21, 33, 9), 'rltoolkit.algorithms.ddpg.models.Actor', 'Actor', (), '', False, 'from rltoolkit.algorithms.ddpg.models import Actor, Critic\n'), ((148, 20, 148, 52), 'rltoolkit.algorithms.DDPG.save_model', 'DDPG.save_model', ({(148, 36, 148, 40): 'self', (148, 42, 148, 51): 'save_path'}, {}), '(self, save_path)', False, 'from rltoolkit.algorithms import DDPG\n'), ((378, 17, 378, 35), 'torch.nn.functional.mse_loss', 'F.mse_loss', ({(378, 28, 378, 31): 'y_q', (378, 33, 378, 34): 'y'}, {}), '(y_q, y)', True, 'from torch.nn import functional as F\n'), ((35, 26, 35, 75), 'rltoolkit.algorithms.ddpg.models.Critic', 'Critic', (), '', False, 'from rltoolkit.algorithms.ddpg.models import Actor, Critic\n'), ((39, 33, 39, 65), 'torch.tensor', 'torch.tensor', ({(39, 46, 39, 64): 'custom_loss_scaled'}, {}), '(custom_loss_scaled)', False, 'import torch\n'), ((39, 99, 39, 157), 'torch.Tensor', 'torch.Tensor', ({(39, 112, 39, 156): '([custom_loss_scaled] * self.actor_output_dim)'}, {}), '([custom_loss_scaled] * self.actor_output_dim)', False, 'import torch\n'), ((81, 28, 81, 82), 'torch.randn', 'torch.randn', (), '', False, 'import torch\n'), ((164, 13, 164, 28), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((213, 22, 213, 61), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((233, 19, 233, 34), 'torch.sum', 'torch.sum', ({(233, 29, 233, 33): 'loss'}, {}), '(loss)', False, 'import torch\n'), ((250, 20, 250, 83), 'torch.tensor', 'torch.tensor', ({(250, 33, 250, 82): "dataset['timeouts'][i] or dataset['terminals'][i]"}, {}), "(dataset['timeouts'][i] or dataset['terminals'][i])", False, 'import torch\n'), ((251, 18, 251, 58), 'torch.tensor', 'torch.tensor', ({(251, 31, 251, 57): "dataset['observations'][i]"}, {}), "(dataset['observations'][i])", False, 'import torch\n'), ((38, 36, 38, 55), 'numpy.exp', 'np.exp', ({(38, 43, 38, 54): 'custom_loss'}, {}), '(custom_loss)', True, 'import numpy as np\n'), ((168, 26, 168, 68), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((257, 26, 257, 66), 'torch.tensor', 'torch.tensor', ({(257, 39, 257, 65): "dataset['observations'][i]"}, {}), "(dataset['observations'][i])", False, 'import torch\n'), ((259, 22, 259, 59), 'torch.tensor', 'torch.tensor', ({(259, 36, 259, 57): "dataset['rewards'][i]"}, {}), "(dataset['rewards'][i])", False, 'import torch\n'), ((260, 23, 260, 88), 'torch.tensor', 'torch.tensor', ({(260, 37, 260, 86): "dataset['timeouts'][i] or dataset['terminals'][i]"}, {}), "(dataset['timeouts'][i] or dataset['terminals'][i])", False, 'import torch\n'), ((261, 25, 261, 62), 'torch.tensor', 'torch.tensor', ({(261, 39, 261, 60): "dataset['actions'][i]"}, {}), "(dataset['actions'][i])", False, 'import torch\n'), ((262, 22, 262, 61), 'torch.tensor', 'torch.tensor', ({(262, 36, 262, 59): "dataset['terminals'][i]"}, {}), "(dataset['terminals'][i])", False, 'import torch\n'), ((226, 20, 226, 54), 'torch.nn.functional.softplus', 'F.softplus', ({(226, 31, 226, 53): 'self.custom_loss_param'}, {}), '(self.custom_loss_param)', True, 'from torch.nn import functional as F\n'), ((228, 20, 228, 54), 'torch.nn.functional.softplus', 'F.softplus', ({(228, 31, 228, 53): 'self.custom_loss_param'}, {}), '(self.custom_loss_param)', True, 'from torch.nn import functional as F\n'), ((228, 70, 228, 107), 'torch.Tensor', 'torch.Tensor', ({(228, 83, 228, 106): 'self.custom_loss_target'}, {}), '(self.custom_loss_target)', False, 'import torch\n'), ((232, 32, 232, 61), 'torch.mean', 'torch.mean', ({(232, 43, 232, 60): "self.loss['dist']"}, {}), "(self.loss['dist'])", False, 'import torch\n'), ((187, 28, 187, 62), 'torch.nn.functional.softplus', 'F.softplus', ({(187, 39, 187, 61): 'self.custom_loss_param'}, {}), '(self.custom_loss_param)', True, 'from torch.nn import functional as F\n'), ((197, 38, 197, 72), 'torch.nn.functional.softplus', 'F.softplus', ({(197, 49, 197, 71): 'self.custom_loss_param'}, {}), '(self.custom_loss_param)', True, 'from torch.nn import functional as F\n'), ((199, 38, 199, 72), 'torch.nn.functional.softplus', 'F.softplus', ({(199, 49, 199, 71): 'self.custom_loss_param'}, {}), '(self.custom_loss_param)', True, 'from torch.nn import functional as F\n'), ((199, 88, 199, 125), 'torch.Tensor', 'torch.Tensor', ({(199, 101, 199, 124): 'self.custom_loss_target'}, {}), '(self.custom_loss_target)', False, 'import torch\n')] |
ftrimble/route-grower | pyroute/poi_osm.py | d4343ecc9b13a3e1701c8460c8a1792d08b74567 | #!/usr/bin/python
#----------------------------------------------------------------
# OSM POI handler for pyroute
#
#------------------------------------------------------
# Copyright 2007, Oliver White
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#------------------------------------------------------
from xml.sax import make_parser, handler
from poi_base import *
import os
from xml.sax._exceptions import SAXParseException
import urllib
class osmPoiModule(poiModule, handler.ContentHandler):
def __init__(self, modules):
poiModule.__init__(self, modules)
self.draw = False
self.loadPOIs("all", "amenity|shop=*")
def loadPOIs(self, name, search):
filename = os.path.join(os.path.dirname(__file__),
"data", "poi_%s.osm" % name)
url = "http://www.informationfreeway.org/api/0.5/node[%s][%s]" %(search,
self.bbox())
if(not os.path.exists(filename)):
print "Downloading POIs from OSM"
urllib.urlretrieve(url, filename)
self.load(filename, os.path.join(os.path.dirname(__file__),
"Setup", "poi.txt"))
def bbox(self):
# TODO: based on location!
return "bbox=-6,48,2.5,61"
def load(self, filename, listfile):
self.filters = []
print "Loading POIs from %s" % listfile
f = open(listfile,"r")
try:
for line in f:
if(len(line) > 1):
text = line.rstrip()
name, filter = text.split('|')
group = poiGroup(name)
self.groups.append(group)
self.filters.append({'name':name,'filter':filter,'group':group})
finally:
f.close()
if(not os.path.exists(filename)):
print "Can't load %s"%filename
return
elif not os.path.getsize(filename):
print "%s is empty"%filename
self.inNode = False
parser = make_parser()
parser.setContentHandler(self)
try:
parser.parse(filename)
except SAXParseException:
print "Error while parsing file"
#TODO: what should now happens?
def startElement(self, name, attrs):
if name == "node":
self.currentNode = { \
'lat': float(attrs.get('lat')),
'lon': float(attrs.get('lon'))}
self.inNode = True
if name == "tag" and self.inNode:
self.currentNode[attrs.get('k')] = attrs.get('v')
def endElement(self, name):
if(name == "node"):
self.storeNode(self.currentNode)
self.inNode = False
def passesFilter(self,n,f):
parts = f.split(';')
matched = True
for part in parts:
k,v = part.split('=',1)
if(n.get(k,'') != v):
matched = False
return(matched)
def storeNode(self, n):
for f in self.filters:
if(self.passesFilter(n,f['filter'])):
x = poi(n['lat'], n['lon'])
x.title = n.get('amenity','') + ': ' + n.get('name', '?')
#print "%s matches %s" % (x.title, f['name'])
f['group'].items.append(x)
def save(self):
# Default filename if none was loaded
if(self.filename == None):
self.filename = os.path.join(os.path.dirname(__file__),
"data", "poi.osm")
self.saveAs(self.filename)
def saveAs(self,filename):
if(filename == None):
return
pass
if __name__ == "__main__":
nodes = osmPoiModule(None)
nodes.sort({'valid':True,'lat':51.3,'lon':-0.2})
#nodes.report()
| [] |
peleiden/pelutils | pelutils/logger.py | 9860734c0e06481aa58a9f767a4cfb5129cb48ec | from __future__ import annotations
import os
import traceback as tb
from collections import defaultdict
from enum import IntEnum
from functools import update_wrapper
from itertools import chain
from typing import Any, Callable, DefaultDict, Generator, Iterable, Optional
from pelutils import get_timestamp, get_repo
from .format import RichString
class LogLevels(IntEnum):
""" Logging levels by priority. Don't set any to 0, as falsiness is used in the code """
SECTION = 6
CRITICAL = 5
ERROR = 4
WARNING = 3
INFO = 2
DEBUG = 1
_STDERR_LEVELS = { LogLevels.CRITICAL, LogLevels.ERROR, LogLevels.WARNING }
# https://rich.readthedocs.io/en/stable/appendix/colors.html
_TIMESTAMP_COLOR = "#72b9e0"
_LEVEL_FORMAT = {
LogLevels.SECTION: "bright_yellow",
LogLevels.CRITICAL: "red1",
LogLevels.ERROR: "red3",
LogLevels.WARNING: "gold3",
LogLevels.INFO: "chartreuse3",
LogLevels.DEBUG: "deep_sky_blue1",
}
class _LevelManager:
"""
Used for disabling logging below a certain level
Example:
with log.level(Levels.WARNING):
log.error("This will be logged")
log.info("This will not be logged")
"""
level: LogLevels
is_active = False
def with_level(self, level: LogLevels | int) -> _LevelManager:
self.level = level
return self
def __enter__(self):
self.is_active = True
def __exit__(self, *args):
self.is_active = False
del self.level # Prevent silent failures by having level accidentally set
class _LogErrors:
"""
Used for catching exceptions with logger and logging them before reraising them
"""
def __init__(self, log):
self._log = log
def __enter__(self):
pass
def __exit__(self, et, ev, tb_):
if et and self._log._collect:
self._log.log_collected()
if et:
self._log._throw(ev, tb_)
class LoggingException(RuntimeError):
pass
class _Logger:
"""
A simple logger which creates a log file and pushes strings both to stdout and the log file
Sections, verbosity and error logging is supported
"""
_loggers: DefaultDict[str, dict[str, Any]]
_selected_logger: str
_maxlen = max(len(l.name) for l in LogLevels)
_spacing = 4 * " "
_yes = { "j", "y" }
_no = { "n" }
@property
def _logger(self) -> dict:
return self._loggers[self._selected_logger]
@property
def _fpath(self) -> str:
return self._logger["fpath"]
@property
def _default_sep(self) -> str:
return self._logger["default_sep"]
@property
def _include_micros(self) -> bool:
return self._logger["include_micros"]
@property
def _print_level(self) -> LogLevels:
return self._logger["print_level"]
@property
def _level_mgr(self) -> _LevelManager:
return self._logger["level_mgr"]
@property
def _level(self) -> LogLevels:
return self._level_mgr.level
def __init__(self):
self._log_errors = _LogErrors(self)
self._collect = False
self._collected_log: list[RichString] = list()
self._collected_print: list[RichString] = list()
self._loggers = defaultdict(dict)
self.clean()
self.configure(logger_name="print_only", print_level=LogLevels.DEBUG)
def configure(
self,
fpath: Optional[str] = None, # Path to place logger. Any missing directories are created
title: Optional[str] = None, # Title on first line of logfile
default_seperator = "\n",
include_micros = False, # Include microseconds in timestamps
log_commit = False, # Log commit of git repository
logger_name = "default", # Name of logger
append = False, # Set to True to append to old log file instead of overwriting it
print_level = LogLevels.INFO, # Highest level that will be printed. All will be logged. None for no print
):
""" Configure a logger. If not called, the logger will act like a print statement """
if logger_name in self._loggers:
raise LoggingException("Logger '%s' already exists. Did you call log.configure(...) twice?" % logger_name)
if self._collect:
raise LoggingException("Cannot configure a new logger while using collect_logs")
self._selected_logger = logger_name
self._loggers[logger_name]["fpath"] = os.path.realpath(fpath) if fpath else None
self._loggers[logger_name]["default_sep"] = default_seperator
self._loggers[logger_name]["include_micros"] = include_micros
self._loggers[logger_name]["level_mgr"] = _LevelManager()
self._loggers[logger_name]["print_level"] = print_level or len(LogLevels) + 1
if fpath is not None:
dirs = os.path.split(fpath)[0]
if dirs:
os.makedirs(dirs, exist_ok=True)
exists = os.path.exists(fpath)
with open(fpath, "a" if append else "w", encoding="utf-8") as logfile:
logfile.write("\n\n" if append and exists else "")
if title is not None:
self.section(title + "\n")
if log_commit:
repo, commit = get_repo()
if repo is not None:
self.debug(
"Executing in repository %s" % repo,
"Commit: %s\n" % commit,
)
else:
self.debug("Unable to find repository that code was executed in")
def set_logger(self, logger: str):
if logger not in self._loggers:
raise LoggingException("Logger '%s' does not exist. Available loggers: %s" % (logger, list(self._loggers)))
if self._collect:
raise LoggingException("Cannot configure a new logger while using collect_logs")
self._selected_logger = logger
def level(self, level: LogLevels):
""" Log only at given level and above. Use with a with block """
return self._level_mgr.with_level(level)
@property
def no_log(self):
""" Disable logging inside a with block """
return self._level_mgr.with_level(max(LogLevels)+1)
@property
def log_errors(self):
return self._log_errors
def __call__(self, *tolog, with_info=True, sep=None, with_print=None, level: LogLevels=LogLevels.INFO):
self._log(*tolog, level=level, with_info=with_info, sep=sep, with_print=with_print)
def _write_to_log(self, content: RichString):
if self._fpath is not None:
with open(self._fpath, "a", encoding="utf-8") as logfile:
logfile.write(f"{content}\n")
@staticmethod
def _format(s: str, format: str) -> str:
return f"[{format}]{s}[/]"
def _log(self, *tolog, level=LogLevels.INFO, with_info=True, sep=None, with_print=None):
if not self._loggers:
return
if self._level_mgr.is_active and level < self._level_mgr.level:
return
sep = sep or self._default_sep
with_print = level >= self._print_level if with_print is None else with_print
time = get_timestamp()
tolog = sep.join([str(x) for x in tolog])
time_spaces = len(time) * " "
level_format = level.name + (self._maxlen - len(level.name)) * " "
space = self._spacing + self._maxlen * " " + self._spacing
logs = tolog.split("\n")
rs = RichString(stderr=level in _STDERR_LEVELS) # Send warning
if with_info and tolog:
rs.add_string(
f"{time}{self._spacing}{level_format}{self._spacing}",
self._format(time, _TIMESTAMP_COLOR) +\
self._spacing +\
self._format(level_format, _LEVEL_FORMAT[level]) +\
self._spacing,
)
rs.add_string(logs[0])
else:
rs.add_string(f"{time_spaces}{space}{logs[0]}".rstrip())
for i in range(1, len(logs)):
s = f"\n{time_spaces}{space}{logs[i]}".rstrip()
rs.add_string(
s if s.strip() else "\n"
)
if not self._collect:
self._write_to_log(rs)
if with_print:
rs.print()
else:
self._collected_log.append(rs)
if with_print:
self._collected_print.append(rs)
def _format_tb(self, error: Exception, tb_) -> list[str]:
stack = list(chain.from_iterable([elem.split("\n") for elem in tb.format_tb(tb_)]))
stack = [line for line in stack if line.strip()]
return [
"ERROR: %s thrown with stacktrace" % type(error).__name__,
*stack,
"%s: %s" % (type(error).__name__, error),
]
def _throw(self, error: Exception, tb_=None):
stack = list()
has_cause = error.__cause__ is not None
cur_error = error.__context__
while cur_error:
stack += self._format_tb(cur_error, cur_error.__traceback__)
if has_cause:
stack += ["", "The above exception was the direct cause of the following exception:", ""]
else:
stack += ["", "During handling of the above exception, another exception occurred:", ""]
has_cause = cur_error.__cause__ is not None
cur_error = cur_error.__context__
stack += self._format_tb(error, tb_)
self.critical(*stack, with_print=False)
raise error
def _input(self, prompt: str) -> str:
self.info("Prompt: '%s'" % prompt, with_print=False)
response = input(prompt)
self.info("Input: '%s'" % response, with_print=False)
return response
def input(self, prompt: str | Iterable[str] = "") -> str | Generator[str]:
"""
Get user input and log both prompt an input
If prompt is an iterable, a generator of user inputs will be returned
"""
self._log("Waiting for user input", with_print=False)
if isinstance(prompt, str):
return self._input(prompt)
else:
return (self._input(p) for p in prompt)
@classmethod
def bool_input(cls, inp: str, default=True) -> bool:
""" Parse a yes/no user input """
inp = inp.lower()
if default:
return inp[0] not in cls._no if inp else True
else:
return inp[0] in cls._yes if inp else False
def _reset_collected(self):
self._collected_log = list()
self._collected_print = list()
def set_collect_mode(self, collect: bool):
self._collect = collect
if not collect:
self._reset_collected()
def log_collected(self):
if self._collected_log:
logs = "\n".join(str(log) for log in self._collected_log)
self._write_to_log(logs)
if self._collected_print:
RichString.multiprint(self._collected_print)
def clean(self):
""" Resets the loggers and removes all existing logger configurations """
self._loggers = defaultdict(dict)
self._selected_logger = "default"
def section(self, *tolog, with_info=True, sep=None, with_print=None, newline=True):
if newline:
self._log("")
self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.SECTION)
def critical(self, *tolog, with_info=True, sep=None, with_print=None):
self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.CRITICAL)
def error(self, *tolog, with_info=True, sep=None, with_print=None):
self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.ERROR)
def warning(self, *tolog, with_info=True, sep=None, with_print=None):
self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.WARNING)
def info(self, *tolog, with_info=True, sep=None, with_print=None):
self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.INFO)
def debug(self, *tolog, with_info=True, sep=None, with_print=None):
self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.DEBUG)
log = _Logger()
class collect_logs:
"""
Wrap functions with this class to have them output all their output at once
Useful with multiprocessing, e.g.
```
with mp.Pool() as p:
p.map(collect_logs(fun), ...)
```
Loggers cannot be changed or configured during this
"""
def __init__(self, fun: Callable):
self.fun = fun
update_wrapper(self, fun)
def __call__(self, *args, **kwargs):
log.set_collect_mode(True)
return_value = self.fun(*args, **kwargs)
log.log_collected()
log.set_collect_mode(False)
return return_value
| [((125, 24, 125, 41), 'collections.defaultdict', 'defaultdict', ({(125, 36, 125, 40): 'dict'}, {}), '(dict)', False, 'from collections import defaultdict\n'), ((213, 15, 213, 30), 'pelutils.get_timestamp', 'get_timestamp', ({}, {}), '()', False, 'from pelutils import get_timestamp, get_repo\n'), ((314, 24, 314, 41), 'collections.defaultdict', 'defaultdict', ({(314, 36, 314, 40): 'dict'}, {}), '(dict)', False, 'from collections import defaultdict\n'), ((353, 8, 353, 33), 'functools.update_wrapper', 'update_wrapper', ({(353, 23, 353, 27): 'self', (353, 29, 353, 32): 'fun'}, {}), '(self, fun)', False, 'from functools import update_wrapper\n'), ((147, 46, 147, 69), 'os.path.realpath', 'os.path.realpath', ({(147, 63, 147, 68): 'fpath'}, {}), '(fpath)', False, 'import os\n'), ((157, 21, 157, 42), 'os.path.exists', 'os.path.exists', ({(157, 36, 157, 41): 'fpath'}, {}), '(fpath)', False, 'import os\n'), ((165, 27, 165, 37), 'pelutils.get_repo', 'get_repo', ({}, {}), '()', False, 'from pelutils import get_timestamp, get_repo\n'), ((154, 19, 154, 39), 'os.path.split', 'os.path.split', ({(154, 33, 154, 38): 'fpath'}, {}), '(fpath)', False, 'import os\n'), ((156, 16, 156, 48), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((246, 71, 246, 88), 'traceback.format_tb', 'tb.format_tb', ({(246, 84, 246, 87): 'tb_'}, {}), '(tb_)', True, 'import traceback as tb\n')] |
aaxelb/django-elasticsearch-metrics | tests/test_metrics.py | 8a02ffc57f57257843834d4f84c41480f4e27fbd | import mock
import pytest
import datetime as dt
from django.utils import timezone
from elasticsearch_metrics import metrics
from elasticsearch_dsl import IndexTemplate
from elasticsearch_metrics import signals
from elasticsearch_metrics.exceptions import (
IndexTemplateNotFoundError,
IndexTemplateOutOfSyncError,
)
from tests.dummyapp.metrics import (
DummyMetric,
DummyMetricWithExplicitTemplateName,
DummyMetricWithExplicitTemplatePattern,
)
class PreprintView(metrics.Metric):
provider_id = metrics.Keyword(index=True)
user_id = metrics.Keyword(index=True)
preprint_id = metrics.Keyword(index=True)
class Index:
settings = {"refresh_interval": "-1"}
class Meta:
app_label = "dummyapp"
template_name = "osf_metrics_preprintviews"
template = "osf_metrics_preprintviews-*"
class TestGetIndexName:
def test_get_index_name(self):
date = dt.date(2020, 2, 14)
assert (
PreprintView.get_index_name(date=date)
== "osf_metrics_preprintviews_2020.02.14"
)
def test_get_index_name_respects_date_format_setting(self, settings):
settings.ELASTICSEARCH_METRICS_DATE_FORMAT = "%Y-%m-%d"
date = dt.date(2020, 2, 14)
assert (
PreprintView.get_index_name(date=date)
== "osf_metrics_preprintviews_2020-02-14"
)
def test_get_index_name_gets_index_for_today_by_default(self):
today = timezone.now().date()
today_formatted = today.strftime("%Y.%m.%d")
assert PreprintView.get_index_name() == "osf_metrics_preprintviews_{}".format(
today_formatted
)
class TestGetIndexTemplate:
def test_get_index_template_returns_template_with_correct_name_and_pattern(self):
template = PreprintView.get_index_template()
assert isinstance(template, IndexTemplate)
assert template._template_name == "osf_metrics_preprintviews"
assert "osf_metrics_preprintviews-*" in template.to_dict()["index_patterns"]
def test_get_index_template_respects_index_settings(self):
template = PreprintView.get_index_template()
assert template._index.to_dict()["settings"] == {"refresh_interval": "-1"}
def test_get_index_template_creates_template_with_mapping(self):
template = PreprintView.get_index_template()
mappings = template.to_dict()["mappings"]
assert mappings["doc"]["_source"]["enabled"] is False
properties = mappings["doc"]["properties"]
assert "timestamp" in properties
assert properties["timestamp"] == {"doc_values": True, "type": "date"}
assert properties["provider_id"] == {"type": "keyword", "index": True}
assert properties["user_id"] == {"type": "keyword", "index": True}
assert properties["preprint_id"] == {"type": "keyword", "index": True}
# regression test
def test_mappings_are_not_shared(self):
template1 = DummyMetric.get_index_template()
template2 = DummyMetricWithExplicitTemplateName.get_index_template()
assert "my_int" in template1.to_dict()["mappings"]["doc"]["properties"]
assert "my_keyword" not in template1.to_dict()["mappings"]["doc"]["properties"]
assert "my_int" not in template2.to_dict()["mappings"]["doc"]["properties"]
assert "my_keyword" in template2.to_dict()["mappings"]["doc"]["properties"]
def test_declaring_metric_with_no_app_label_or_template_name_errors(self):
with pytest.raises(RuntimeError):
class BadMetric(metrics.Metric):
pass
with pytest.raises(RuntimeError):
class MyMetric(metrics.Metric):
class Meta:
template_name = "osf_metrics_preprintviews"
def test_get_index_template_default_template_name(self):
template = DummyMetric.get_index_template()
assert isinstance(template, IndexTemplate)
assert template._template_name == "dummyapp_dummymetric"
assert "dummyapp_dummymetric_*" in template.to_dict()["index_patterns"]
def test_get_index_template_uses_app_label_in_class_meta(self):
class MyMetric(metrics.Metric):
class Meta:
app_label = "myapp"
template = MyMetric.get_index_template()
assert template._template_name == "myapp_mymetric"
def test_template_name_defined_with_no_template_falls_back_to_default_template(
self
):
template = DummyMetricWithExplicitTemplateName.get_index_template()
# template name specified in class Meta
assert template._template_name == "dummymetric"
# template is not specified, so it's generated
assert (
"dummyapp_dummymetricwithexplicittemplatename_*"
in template.to_dict()["index_patterns"]
)
def test_template_defined_with_no_template_name_falls_back_to_default_name(self):
template = DummyMetricWithExplicitTemplatePattern.get_index_template()
# template name specified in class Meta
assert (
template._template_name == "dummyapp_dummymetricwithexplicittemplatepattern"
)
# template is not specified, so it's generated
assert "dummymetric-*" in template.to_dict()["index_patterns"]
def test_inheritance(self):
class MyBaseMetric(metrics.Metric):
user_id = metrics.Keyword(index=True)
class Index:
settings = {"number_of_shards": 2}
class Meta:
abstract = True
class ConcreteMetric(MyBaseMetric):
class Meta:
app_label = "dummyapp"
template = ConcreteMetric.get_index_template()
assert template._template_name == "dummyapp_concretemetric"
assert template._index.to_dict()["settings"] == {"number_of_shards": 2}
def test_source_may_be_enabled(self):
class MyMetric(metrics.Metric):
class Meta:
app_label = "dummyapp"
template_name = "mymetric"
template = "mymetric-*"
source = metrics.MetaField(enabled=True)
template = MyMetric.get_index_template()
template_dict = template.to_dict()
doc = template_dict["mappings"]["doc"]
assert doc["_source"]["enabled"] is True
class TestRecord:
def test_calls_save(self, mock_save):
timestamp = dt.datetime(2017, 8, 21)
p = PreprintView.record(timestamp=timestamp, provider_id="abc12")
assert mock_save.call_count == 1
assert p.timestamp == timestamp
assert p.provider_id == "abc12"
@mock.patch.object(timezone, "now")
def test_defaults_timestamp_to_now(self, mock_now, mock_save):
fake_now = dt.datetime(2016, 8, 21)
mock_now.return_value = fake_now
p = PreprintView.record(provider_id="abc12")
assert mock_save.call_count == 1
assert p.timestamp == fake_now
class TestSignals:
@mock.patch.object(PreprintView, "get_index_template")
def test_create_metric_sends_signals(self, mock_get_index_template):
mock_pre_index_template_listener = mock.Mock()
mock_post_index_template_listener = mock.Mock()
signals.pre_index_template_create.connect(mock_pre_index_template_listener)
signals.post_index_template_create.connect(mock_post_index_template_listener)
PreprintView.sync_index_template()
assert mock_pre_index_template_listener.call_count == 1
assert mock_post_index_template_listener.call_count == 1
pre_call_kwargs = mock_pre_index_template_listener.call_args[1]
assert "index_template" in pre_call_kwargs
assert "using" in pre_call_kwargs
post_call_kwargs = mock_pre_index_template_listener.call_args[1]
assert "index_template" in post_call_kwargs
assert "using" in post_call_kwargs
def test_save_sends_signals(self, mock_save):
mock_pre_save_listener = mock.Mock()
mock_post_save_listener = mock.Mock()
signals.pre_save.connect(mock_pre_save_listener, sender=PreprintView)
signals.post_save.connect(mock_post_save_listener, sender=PreprintView)
provider_id = "12345"
user_id = "abcde"
preprint_id = "zyxwv"
doc = PreprintView(
provider_id=provider_id, user_id=user_id, preprint_id=preprint_id
)
doc.save()
assert mock_pre_save_listener.call_count == 1
pre_save_kwargs = mock_pre_save_listener.call_args[1]
assert isinstance(pre_save_kwargs["instance"], PreprintView)
assert "index" in pre_save_kwargs
assert "using" in pre_save_kwargs
assert pre_save_kwargs["sender"] is PreprintView
assert mock_post_save_listener.call_count == 1
post_save_kwargs = mock_pre_save_listener.call_args[1]
assert isinstance(post_save_kwargs["instance"], PreprintView)
assert "index" in post_save_kwargs
assert "using" in post_save_kwargs
assert post_save_kwargs["sender"] is PreprintView
@pytest.mark.es
class TestIntegration:
def test_init(self, client):
PreprintView.init()
name = PreprintView.get_index_name()
mapping = client.indices.get_mapping(index=name)
properties = mapping[name]["mappings"]["doc"]["properties"]
assert properties["timestamp"] == {"type": "date"}
assert properties["provider_id"] == {"type": "keyword"}
assert properties["user_id"] == {"type": "keyword"}
assert properties["preprint_id"] == {"type": "keyword"}
def test_create_document(self, client):
provider_id = "12345"
user_id = "abcde"
preprint_id = "zyxwv"
doc = PreprintView(
provider_id=provider_id, user_id=user_id, preprint_id=preprint_id
)
doc.save()
document = PreprintView.get(id=doc.meta.id, index=PreprintView.get_index_name())
# TODO flesh out this test more. Try to query ES?
assert document is not None
def test_check_index_template(self):
with pytest.raises(IndexTemplateNotFoundError):
assert PreprintView.check_index_template() is False
PreprintView.sync_index_template()
assert PreprintView.check_index_template() is True
# When settings change, template is out of sync
PreprintView._index.settings(
**{"refresh_interval": "1s", "number_of_shards": 1, "number_of_replicas": 2}
)
with pytest.raises(IndexTemplateOutOfSyncError) as excinfo:
assert PreprintView.check_index_template() is False
error = excinfo.value
assert error.settings_in_sync is False
assert error.mappings_in_sync is True
assert error.patterns_in_sync is True
PreprintView.sync_index_template()
assert PreprintView.check_index_template() is True
| [((21, 18, 21, 45), 'elasticsearch_metrics.metrics.Keyword', 'metrics.Keyword', (), '', False, 'from elasticsearch_metrics import metrics\n'), ((22, 14, 22, 41), 'elasticsearch_metrics.metrics.Keyword', 'metrics.Keyword', (), '', False, 'from elasticsearch_metrics import metrics\n'), ((23, 18, 23, 45), 'elasticsearch_metrics.metrics.Keyword', 'metrics.Keyword', (), '', False, 'from elasticsearch_metrics import metrics\n'), ((177, 5, 177, 39), 'mock.patch.object', 'mock.patch.object', ({(177, 23, 177, 31): 'timezone', (177, 33, 177, 38): '"""now"""'}, {}), "(timezone, 'now')", False, 'import mock\n'), ((188, 5, 188, 58), 'mock.patch.object', 'mock.patch.object', ({(188, 23, 188, 35): 'PreprintView', (188, 37, 188, 57): '"""get_index_template"""'}, {}), "(PreprintView, 'get_index_template')", False, 'import mock\n'), ((36, 15, 36, 35), 'datetime.date', 'dt.date', ({(36, 23, 36, 27): '2020', (36, 29, 36, 30): '2', (36, 32, 36, 34): '14'}, {}), '(2020, 2, 14)', True, 'import datetime as dt\n'), ((44, 15, 44, 35), 'datetime.date', 'dt.date', ({(44, 23, 44, 27): '2020', (44, 29, 44, 30): '2', (44, 32, 44, 34): '14'}, {}), '(2020, 2, 14)', True, 'import datetime as dt\n'), ((82, 20, 82, 52), 'tests.dummyapp.metrics.DummyMetric.get_index_template', 'DummyMetric.get_index_template', ({}, {}), '()', False, 'from tests.dummyapp.metrics import DummyMetric, DummyMetricWithExplicitTemplateName, DummyMetricWithExplicitTemplatePattern\n'), ((83, 20, 83, 76), 'tests.dummyapp.metrics.DummyMetricWithExplicitTemplateName.get_index_template', 'DummyMetricWithExplicitTemplateName.get_index_template', ({}, {}), '()', False, 'from tests.dummyapp.metrics import DummyMetric, DummyMetricWithExplicitTemplateName, DummyMetricWithExplicitTemplatePattern\n'), ((102, 19, 102, 51), 'tests.dummyapp.metrics.DummyMetric.get_index_template', 'DummyMetric.get_index_template', ({}, {}), '()', False, 'from tests.dummyapp.metrics import DummyMetric, DummyMetricWithExplicitTemplateName, DummyMetricWithExplicitTemplatePattern\n'), ((118, 19, 118, 75), 'tests.dummyapp.metrics.DummyMetricWithExplicitTemplateName.get_index_template', 'DummyMetricWithExplicitTemplateName.get_index_template', ({}, {}), '()', False, 'from tests.dummyapp.metrics import DummyMetric, DummyMetricWithExplicitTemplateName, DummyMetricWithExplicitTemplatePattern\n'), ((128, 19, 128, 78), 'tests.dummyapp.metrics.DummyMetricWithExplicitTemplatePattern.get_index_template', 'DummyMetricWithExplicitTemplatePattern.get_index_template', ({}, {}), '()', False, 'from tests.dummyapp.metrics import DummyMetric, DummyMetricWithExplicitTemplateName, DummyMetricWithExplicitTemplatePattern\n'), ((171, 20, 171, 44), 'datetime.datetime', 'dt.datetime', ({(171, 32, 171, 36): '2017', (171, 38, 171, 39): '8', (171, 41, 171, 43): '21'}, {}), '(2017, 8, 21)', True, 'import datetime as dt\n'), ((179, 19, 179, 43), 'datetime.datetime', 'dt.datetime', ({(179, 31, 179, 35): '2016', (179, 37, 179, 38): '8', (179, 40, 179, 42): '21'}, {}), '(2016, 8, 21)', True, 'import datetime as dt\n'), ((190, 43, 190, 54), 'mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'import mock\n'), ((191, 44, 191, 55), 'mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'import mock\n'), ((192, 8, 192, 83), 'elasticsearch_metrics.signals.pre_index_template_create.connect', 'signals.pre_index_template_create.connect', ({(192, 50, 192, 82): 'mock_pre_index_template_listener'}, {}), '(mock_pre_index_template_listener)', False, 'from elasticsearch_metrics import signals\n'), ((193, 8, 193, 85), 'elasticsearch_metrics.signals.post_index_template_create.connect', 'signals.post_index_template_create.connect', ({(193, 51, 193, 84): 'mock_post_index_template_listener'}, {}), '(mock_post_index_template_listener)', False, 'from elasticsearch_metrics import signals\n'), ((206, 33, 206, 44), 'mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'import mock\n'), ((207, 34, 207, 45), 'mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'import mock\n'), ((208, 8, 208, 77), 'elasticsearch_metrics.signals.pre_save.connect', 'signals.pre_save.connect', (), '', False, 'from elasticsearch_metrics import signals\n'), ((209, 8, 209, 79), 'elasticsearch_metrics.signals.post_save.connect', 'signals.post_save.connect', (), '', False, 'from elasticsearch_metrics import signals\n'), ((90, 13, 90, 40), 'pytest.raises', 'pytest.raises', ({(90, 27, 90, 39): 'RuntimeError'}, {}), '(RuntimeError)', False, 'import pytest\n'), ((95, 13, 95, 40), 'pytest.raises', 'pytest.raises', ({(95, 27, 95, 39): 'RuntimeError'}, {}), '(RuntimeError)', False, 'import pytest\n'), ((138, 22, 138, 49), 'elasticsearch_metrics.metrics.Keyword', 'metrics.Keyword', (), '', False, 'from elasticsearch_metrics import metrics\n'), ((259, 13, 259, 54), 'pytest.raises', 'pytest.raises', ({(259, 27, 259, 53): 'IndexTemplateNotFoundError'}, {}), '(IndexTemplateNotFoundError)', False, 'import pytest\n'), ((268, 13, 268, 55), 'pytest.raises', 'pytest.raises', ({(268, 27, 268, 54): 'IndexTemplateOutOfSyncError'}, {}), '(IndexTemplateOutOfSyncError)', False, 'import pytest\n'), ((51, 16, 51, 30), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((160, 25, 160, 56), 'elasticsearch_metrics.metrics.MetaField', 'metrics.MetaField', (), '', False, 'from elasticsearch_metrics import metrics\n')] |
mwk0408/codewars_solutions | 6 kyu/SumFibs.py | 9b4f502b5f159e68024d494e19a96a226acad5e5 | from functools import lru_cache
@lru_cache
def fib(n):
return n if n<2 else fib(n-1)+fib(n-2)
def sum_fibs(n):
return sum(j for j in (fib(i) for i in range(n+1)) if j%2==0) | [] |
jvegreg/ESMValCore | tests/unit/test_iris_helpers.py | 03eb1c942bf1dc3be98cb30c3592b42e82a94f16 | """Tests for :mod:`esmvalcore.iris_helpers`."""
import datetime
import iris
import numpy as np
import pytest
from cf_units import Unit
from esmvalcore.iris_helpers import date2num, var_name_constraint
@pytest.fixture
def cubes():
"""Test cubes."""
cubes = iris.cube.CubeList([
iris.cube.Cube(0.0, var_name='a', long_name='a'),
iris.cube.Cube(0.0, var_name='a', long_name='b'),
iris.cube.Cube(0.0, var_name='c', long_name='d'),
])
return cubes
@pytest.fixture
def units():
return Unit('days since 0001-01-01', calendar='proleptic_gregorian')
@pytest.mark.parametrize("date, dtype, expected", [
(datetime.datetime(1, 1, 1), np.float64, 0.0),
(datetime.datetime(1, 1, 1), int, 0.0),
(datetime.datetime(1, 1, 2, 12), np.float64, 1.5),
])
def test_date2num_scalar(date, dtype, expected, units):
num = date2num(date, units, dtype=dtype)
assert num == expected
assert num.dtype == dtype
def test_var_name_constraint(cubes):
"""Test :func:`esmvalcore.iris_helpers.var_name_constraint`."""
out_cubes = cubes.extract(var_name_constraint('a'))
assert out_cubes == iris.cube.CubeList([
iris.cube.Cube(0.0, var_name='a', long_name='a'),
iris.cube.Cube(0.0, var_name='a', long_name='b'),
])
out_cubes = cubes.extract(var_name_constraint('b'))
assert out_cubes == iris.cube.CubeList([])
out_cubes = cubes.extract(var_name_constraint('c'))
assert out_cubes == iris.cube.CubeList([
iris.cube.Cube(0.0, var_name='c', long_name='d'),
])
with pytest.raises(iris.exceptions.ConstraintMismatchError):
cubes.extract_cube(var_name_constraint('a'))
with pytest.raises(iris.exceptions.ConstraintMismatchError):
cubes.extract_cube(var_name_constraint('b'))
out_cube = cubes.extract_cube(var_name_constraint('c'))
assert out_cube == iris.cube.Cube(0.0, var_name='c', long_name='d')
| [((25, 11, 25, 72), 'cf_units.Unit', 'Unit', (), '', False, 'from cf_units import Unit\n'), ((34, 10, 34, 44), 'esmvalcore.iris_helpers.date2num', 'date2num', (), '', False, 'from esmvalcore.iris_helpers import date2num, var_name_constraint\n'), ((41, 30, 41, 54), 'esmvalcore.iris_helpers.var_name_constraint', 'var_name_constraint', ({(41, 50, 41, 53): '"""a"""'}, {}), "('a')", False, 'from esmvalcore.iris_helpers import date2num, var_name_constraint\n'), ((46, 30, 46, 54), 'esmvalcore.iris_helpers.var_name_constraint', 'var_name_constraint', ({(46, 50, 46, 53): '"""b"""'}, {}), "('b')", False, 'from esmvalcore.iris_helpers import date2num, var_name_constraint\n'), ((47, 24, 47, 46), 'iris.cube.CubeList', 'iris.cube.CubeList', ({(47, 43, 47, 45): '[]'}, {}), '([])', False, 'import iris\n'), ((48, 30, 48, 54), 'esmvalcore.iris_helpers.var_name_constraint', 'var_name_constraint', ({(48, 50, 48, 53): '"""c"""'}, {}), "('c')", False, 'from esmvalcore.iris_helpers import date2num, var_name_constraint\n'), ((52, 9, 52, 63), 'pytest.raises', 'pytest.raises', ({(52, 23, 52, 62): 'iris.exceptions.ConstraintMismatchError'}, {}), '(iris.exceptions.ConstraintMismatchError)', False, 'import pytest\n'), ((54, 9, 54, 63), 'pytest.raises', 'pytest.raises', ({(54, 23, 54, 62): 'iris.exceptions.ConstraintMismatchError'}, {}), '(iris.exceptions.ConstraintMismatchError)', False, 'import pytest\n'), ((56, 34, 56, 58), 'esmvalcore.iris_helpers.var_name_constraint', 'var_name_constraint', ({(56, 54, 56, 57): '"""c"""'}, {}), "('c')", False, 'from esmvalcore.iris_helpers import date2num, var_name_constraint\n'), ((57, 23, 57, 71), 'iris.cube.Cube', 'iris.cube.Cube', (), '', False, 'import iris\n'), ((16, 8, 16, 56), 'iris.cube.Cube', 'iris.cube.Cube', (), '', False, 'import iris\n'), ((17, 8, 17, 56), 'iris.cube.Cube', 'iris.cube.Cube', (), '', False, 'import iris\n'), ((18, 8, 18, 56), 'iris.cube.Cube', 'iris.cube.Cube', (), '', False, 'import iris\n'), ((29, 5, 29, 31), 'datetime.datetime', 'datetime.datetime', ({(29, 23, 29, 24): '(1)', (29, 26, 29, 27): '(1)', (29, 29, 29, 30): '(1)'}, {}), '(1, 1, 1)', False, 'import datetime\n'), ((30, 5, 30, 31), 'datetime.datetime', 'datetime.datetime', ({(30, 23, 30, 24): '(1)', (30, 26, 30, 27): '(1)', (30, 29, 30, 30): '(1)'}, {}), '(1, 1, 1)', False, 'import datetime\n'), ((31, 5, 31, 35), 'datetime.datetime', 'datetime.datetime', ({(31, 23, 31, 24): '(1)', (31, 26, 31, 27): '(1)', (31, 29, 31, 30): '(2)', (31, 32, 31, 34): '(12)'}, {}), '(1, 1, 2, 12)', False, 'import datetime\n'), ((53, 27, 53, 51), 'esmvalcore.iris_helpers.var_name_constraint', 'var_name_constraint', ({(53, 47, 53, 50): '"""a"""'}, {}), "('a')", False, 'from esmvalcore.iris_helpers import date2num, var_name_constraint\n'), ((55, 27, 55, 51), 'esmvalcore.iris_helpers.var_name_constraint', 'var_name_constraint', ({(55, 47, 55, 50): '"""b"""'}, {}), "('b')", False, 'from esmvalcore.iris_helpers import date2num, var_name_constraint\n'), ((43, 8, 43, 56), 'iris.cube.Cube', 'iris.cube.Cube', (), '', False, 'import iris\n'), ((44, 8, 44, 56), 'iris.cube.Cube', 'iris.cube.Cube', (), '', False, 'import iris\n'), ((50, 8, 50, 56), 'iris.cube.Cube', 'iris.cube.Cube', (), '', False, 'import iris\n')] |
saeed-moghimi-noaa/Maxelev_plot | geo_regions.py | 5bb701d8cb7d64db4c89ea9d7993a8269e57e504 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Geo regions for map plot
"""
__author__ = "Saeed Moghimi"
__copyright__ = "Copyright 2017, UCAR/NOAA"
__license__ = "GPL"
__version__ = "1.0"
__email__ = "[email protected]"
import matplotlib.pyplot as plt
from collections import defaultdict
defs = defaultdict(dict)
defs['elev']['var'] = 'elev'
defs['elev']['vmin'] = -1
defs['elev']['vmax'] = 1
defs['elev']['label'] = 'Elev. [m]'
defs['elev']['format']= '%3.1g'
defs['elev']['cmap'] = plt.cm.jet_r
def get_region_extent(region = 'hsofs_region'):
if region == 'hsofs_region':
defs['lim']['xmin'] = -99.0
defs['lim']['xmax'] = -52.8
defs['lim']['ymin'] = 5.0
defs['lim']['ymax'] = 46.3
##IKE
elif region == 'caribbean':
defs['lim']['xmin'] = -78.
defs['lim']['xmax'] = -74.
defs['lim']['ymin'] = 20.
defs['lim']['ymax'] = 24.
defs['lim']['xmin'] = -82.
defs['lim']['xmax'] = -71.
defs['lim']['ymin'] = 18.
defs['lim']['ymax'] = 26.
elif region == 'ike_region':
defs['lim']['xmin'] = -98.5
defs['lim']['xmax'] = -84.5
defs['lim']['ymin'] = 24.
defs['lim']['ymax'] = 31.5
elif region == 'caribbean_bigger':
defs['lim']['xmin'] = -78.0
defs['lim']['xmax'] = -58
defs['lim']['ymin'] = 10.0
defs['lim']['ymax'] = 28.
elif region == 'ike_local':
defs['lim']['xmin'] = -96
defs['lim']['xmax'] = -92
defs['lim']['ymin'] = 28.5
defs['lim']['ymax'] = 30.6
elif region == 'ike_wave':
defs['lim']['xmin'] = -95.63
defs['lim']['xmax'] = -88.0
defs['lim']['ymin'] = 28.37
defs['lim']['ymax'] = 30.50
elif region == 'ike_hwm':
defs['lim']['xmin'] = -96.15
defs['lim']['xmax'] = -88.5
defs['lim']['ymin'] = 28.45
defs['lim']['ymax'] = 30.7
elif region == 'ike_galv_bay':
defs['lim']['xmin'] = -95.92
defs['lim']['xmax'] = -94.81
defs['lim']['ymin'] = 29.37
defs['lim']['ymax'] = 29.96
elif region == 'ike_galv_nwm':
defs['lim']['xmin'] = -95.4
defs['lim']['xmax'] = -94.2
defs['lim']['ymin'] = 28.66
defs['lim']['ymax'] = 30.4
elif region == 'ike_wav_break':
defs['lim']['xmin'] = -95
defs['lim']['xmax'] = -94.5
defs['lim']['ymin'] = 28.7 + 0.6
defs['lim']['ymax'] = 30.4 - 0.6
elif region == 'ike_f63_timeseries':
defs['lim']['xmin'] = -94.2579 - 0.1
defs['lim']['xmax'] = -94.2579 + 0.1
defs['lim']['ymin'] = 29.88642 - 0.1
defs['lim']['ymax'] = 29.88642 + 0.1
elif region == 'ike_f63_timeseries_det':
defs['lim']['xmin'] = -94.2300
defs['lim']['xmax'] = -94.1866
defs['lim']['ymin'] = 29.82030
defs['lim']['ymax'] = 29.84397+0.05
elif region == 'ike_cpl_paper':
defs['lim']['xmin'] = -95.127481
defs['lim']['xmax'] = -93.233053
defs['lim']['ymin'] = 29.198490
defs['lim']['ymax'] = 30.132224
##IRMA
elif region == 'carib_irma':
defs['lim']['xmin'] = -84.0
defs['lim']['xmax'] = -60.
defs['lim']['ymin'] = 15.0
defs['lim']['ymax'] = 29.
elif region == 'burbuda':
defs['lim']['xmin'] = -65.0
defs['lim']['xmax'] = -60.
defs['lim']['ymin'] = 15.0
defs['lim']['ymax'] = 19.
elif region == 'burbuda_zoom':
defs['lim']['xmin'] = -63.8
defs['lim']['xmax'] = -60.8
defs['lim']['ymin'] = 16.8
defs['lim']['ymax'] = 18.65
elif region == 'puertorico':
defs['lim']['xmin'] = -67.35
defs['lim']['xmax'] = -66.531
defs['lim']['ymin'] = 18.321
defs['lim']['ymax'] = 18.674
elif region == 'puertorico_shore':
defs['lim']['xmin'] = -67.284
defs['lim']['xmax'] = -66.350
defs['lim']['ymin'] = 18.360
defs['lim']['ymax'] = 18.890
elif region == 'key_west':
defs['lim']['xmin'] = -82.7
defs['lim']['xmax'] = -74.5
defs['lim']['ymin'] = 21.3
defs['lim']['ymax'] = 27.2
elif region == 'key_west_zoom':
defs['lim']['xmin'] = -82.2
defs['lim']['xmax'] = -79.4
defs['lim']['ymin'] = 24.1
defs['lim']['ymax'] = 26.1
elif region == 'cuba_zoom':
defs['lim']['xmin'] = -82.
defs['lim']['xmax'] = -77.
defs['lim']['ymin'] = 21.5
defs['lim']['ymax'] = 23.5
elif region == 'key_west_timeseries':
defs['lim']['xmin'] = -84.62
defs['lim']['xmax'] = -79.2
defs['lim']['ymin'] = 23.6
defs['lim']['ymax'] = 30.0
elif region == 'pr_timeseries':
defs['lim']['xmin'] = -68
defs['lim']['xmax'] = -64
defs['lim']['ymin'] = 17.3
defs['lim']['ymax'] = 19.2
elif region == 'key_west_anim':
defs['lim']['xmin'] = -85.5
defs['lim']['xmax'] = -74.5
defs['lim']['ymin'] = 21.0
defs['lim']['ymax'] = 31.5
## ISABEL
elif region == 'isa_region':
defs['lim']['xmin'] = -80.2
defs['lim']['xmax'] = -71.6
defs['lim']['ymin'] = 31.9
defs['lim']['ymax'] = 41.9
elif region == 'isa_local':
defs['lim']['xmin'] = -77.5
defs['lim']['xmax'] = -74
defs['lim']['ymin'] = 34.5
defs['lim']['ymax'] = 40.0
defs['lim']['xmin'] = -78.5
defs['lim']['xmax'] = -74
defs['lim']['ymin'] = 33.5
defs['lim']['ymax'] = 39.5
elif region == 'isa_hwm':
defs['lim']['xmin'] = -76.01
defs['lim']['xmax'] = -75.93
defs['lim']['ymin'] = 36.74
defs['lim']['ymax'] = 36.93
elif region == 'isa_landfall':
defs['lim']['xmin'] = -77.8
defs['lim']['xmax'] = -75.2
defs['lim']['ymin'] = 34.2
defs['lim']['ymax'] = 37.5
elif region == 'isa_landfall_zoom':
defs['lim']['xmin'] = -77.8
defs['lim']['xmax'] = -75.2
defs['lim']['ymin'] = 34.2
defs['lim']['ymax'] = 36.0
## SANDY
elif region == 'san_track':
defs['lim']['xmin'] = -82.0
defs['lim']['xmax'] = -67.0
defs['lim']['ymin'] = 23.0
defs['lim']['ymax'] = 43.6
elif region == 'san_area':
defs['lim']['xmin'] = -77.0
defs['lim']['xmax'] = -70.0
defs['lim']['ymin'] = 37.0
defs['lim']['ymax'] = 42.0
elif region == 'san_track':
defs['lim']['xmin'] = -82.0
defs['lim']['xmax'] = -67.0
defs['lim']['ymin'] = 23.0
defs['lim']['ymax'] = 43.6
elif region == 'san_area':
defs['lim']['xmin'] = -77.0
defs['lim']['xmax'] = -70.0
defs['lim']['ymin'] = 37.0
defs['lim']['ymax'] = 42.0
elif region == 'san_area2':
defs['lim']['xmin'] = -75.9
defs['lim']['xmax'] = -73.3
defs['lim']['ymin'] = 38.5
defs['lim']['ymax'] = 41.3
elif region == 'san_newyork':
defs['lim']['xmin'] = -74.5
defs['lim']['xmax'] = -73.55
defs['lim']['ymin'] = 40.35
defs['lim']['ymax'] = 41.2
elif region == 'san_delaware':
defs['lim']['xmin'] = -75.87
defs['lim']['xmax'] = -74.31
defs['lim']['ymin'] = 38.26
defs['lim']['ymax'] = 40.51
elif region == 'san_jamaica_bay':
defs['lim']['xmin'] = -73.963520
defs['lim']['xmax'] = -73.731455
defs['lim']['ymin'] = 40.518074
defs['lim']['ymax'] = 40.699618
elif region == 'irn_region':
defs['lim']['xmin'] = -78.41
defs['lim']['xmax'] = -73.48
defs['lim']['ymin'] = 33.55
defs['lim']['ymax'] = 41.31
elif region == 'irn_hwm':
defs['lim']['xmin'] = -78.64
defs['lim']['xmax'] = -69.54
defs['lim']['ymin'] = 33.80
defs['lim']['ymax'] = 41.82
## ANDREW
elif region == 'and_region':
defs['lim']['xmin'] = -98.5
defs['lim']['xmax'] = -77.5
defs['lim']['ymin'] = 23.
defs['lim']['ymax'] = 32.
elif region == 'and_fl_lu':
defs['lim']['xmin'] = -98.5
defs['lim']['xmax'] = -76.5
defs['lim']['ymin'] = 21.
defs['lim']['ymax'] = 32.
elif region == 'and_local_lu':
defs['lim']['xmin'] = -95
defs['lim']['xmax'] = -86
defs['lim']['ymin'] = 28.
defs['lim']['ymax'] = 32
elif region == 'and_local_fl':
defs['lim']['xmin'] = -86
defs['lim']['xmax'] = -79.5
defs['lim']['ymin'] = 24.
defs['lim']['ymax'] = 34
elif region == 'and_local_lu_landfall':
defs['lim']['xmin'] = -92.4
defs['lim']['xmax'] = -87.5
defs['lim']['ymin'] = 28.
defs['lim']['ymax'] = 31.
elif region == 'and_local_fl_landfall':
defs['lim']['xmin'] = -80.0
defs['lim']['xmax'] = -80.5
defs['lim']['ymin'] = 25.34
defs['lim']['ymax'] = 25.8
## operational upgrade
# NYC area: -74.027725,40.596099
elif region == 'NYC_area':
defs['lim']['xmin'] = -74.027725 - 0.25
defs['lim']['xmax'] = -74.027725 + 0.25
defs['lim']['ymin'] = 40.596099 - 0.2
defs['lim']['ymax'] = 40.596099 + 0.2
# Tampa area: -82.455511,27.921438
elif region == 'Tampa_area':
defs['lim']['xmin'] = -82.455511 - 0.25
defs['lim']['xmax'] = -82.455511 + 0.25
defs['lim']['ymin'] = 27.921438 - 0.2
defs['lim']['ymax'] = 27.921438 + 0.2
# Marshall Islands: 169.107299,7.906637
elif region == 'Marshall':
defs['lim']['xmin'] = 169.107299 - 0.25
defs['lim']['xmax'] = 169.107299 + 0.25
defs['lim']['ymin'] = 7.906637 - 0.2
defs['lim']['ymax'] = 7.906637 + 0.2
# Palau: 134.461436,7.436438
elif region == 'Palau':
defs['lim']['xmin'] = 134.461436 - 0.25
defs['lim']['xmax'] = 134.461436 + 0.25
defs['lim']['ymin'] = 7.436438 - 0.2
defs['lim']['ymax'] = 7.436438 + 0.2
elif region == 'NYC_Area_m':
defs['lim']['xmin'] = -73.55
defs['lim']['xmax'] = -74.26
defs['lim']['ymin'] = 40.55
defs['lim']['ymax'] = 40.91
elif region == 'Tampa_Area_m':
defs['lim']['xmin'] = -82.37
defs['lim']['xmax'] = -82.75
defs['lim']['ymin'] = 27.63
defs['lim']['ymax'] = 28.05
elif region == 'Marshall_Islands_m':
defs['lim']['xmin'] = 164.92
defs['lim']['xmax'] = 173.45
defs['lim']['ymin'] = 5.10
defs['lim']['ymax'] = 11.90
elif region == 'Palau_m':
defs['lim']['xmin'] = 134.01
defs['lim']['xmax'] = 134.78
defs['lim']['ymin'] = 6.78
defs['lim']['ymax'] = 8.52
elif region == 'Port_Arthur_m':
defs['lim']['xmin'] = -93.60
defs['lim']['xmax'] = -94.24
defs['lim']['ymin'] = 29.62
defs['lim']['ymax'] = 30.14
return defs['lim']
| [((16, 8, 16, 25), 'collections.defaultdict', 'defaultdict', ({(16, 20, 16, 24): 'dict'}, {}), '(dict)', False, 'from collections import defaultdict\n')] |
davidADSP/deepAI_paper | figures/plot_log_figure_paper.py | f612e80aa0e8507444228940c54554a83bc16119 | import numpy
import matplotlib.pyplot as plt
fig_convergence = plt.figure(1,figsize=(12,6))
x = numpy.loadtxt('log_deepAI_paper_nonlin_action_long.txt')
plt.subplot(122)
plt.plot(x[:,0])
plt.xlim([0,500])
plt.ylim([-10,200])
plt.xlabel('Steps')
plt.ylabel('Free Action')
plt.axvline(x=230.0,linestyle=':')
plt.axvline(x=250.0,linestyle=':')
plt.axvline(x=270.0,linestyle=':')
ax = plt.subplot(121)
plt.plot(x[:,0])
plt.ylim([-10,200])
ax.axvspan(0, 500, alpha=0.3, color='red')
plt.xlim([0,30000])
plt.xlabel('Steps')
plt.ylabel('Free Action')
fig_convergence.subplots_adjust(left=0.07, bottom=0.1, right=0.95, top=0.95,
wspace=0.2, hspace=0.15)
fig_convergence.savefig('fig_convergence.pdf')
plt.show()
| [((4, 18, 4, 46), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((6, 4, 6, 60), 'numpy.loadtxt', 'numpy.loadtxt', ({(6, 18, 6, 59): '"""log_deepAI_paper_nonlin_action_long.txt"""'}, {}), "('log_deepAI_paper_nonlin_action_long.txt')", False, 'import numpy\n'), ((8, 0, 8, 16), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(8, 12, 8, 15): '(122)'}, {}), '(122)', True, 'import matplotlib.pyplot as plt\n'), ((9, 0, 9, 16), 'matplotlib.pyplot.plot', 'plt.plot', ({(9, 9, 9, 15): 'x[:, (0)]'}, {}), '(x[:, (0)])', True, 'import matplotlib.pyplot as plt\n'), ((10, 0, 10, 17), 'matplotlib.pyplot.xlim', 'plt.xlim', ({(10, 9, 10, 16): '[0, 500]'}, {}), '([0, 500])', True, 'import matplotlib.pyplot as plt\n'), ((11, 0, 11, 19), 'matplotlib.pyplot.ylim', 'plt.ylim', ({(11, 9, 11, 18): '[-10, 200]'}, {}), '([-10, 200])', True, 'import matplotlib.pyplot as plt\n'), ((12, 0, 12, 19), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(12, 11, 12, 18): '"""Steps"""'}, {}), "('Steps')", True, 'import matplotlib.pyplot as plt\n'), ((13, 0, 13, 25), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(13, 11, 13, 24): '"""Free Action"""'}, {}), "('Free Action')", True, 'import matplotlib.pyplot as plt\n'), ((14, 0, 14, 34), 'matplotlib.pyplot.axvline', 'plt.axvline', (), '', True, 'import matplotlib.pyplot as plt\n'), ((15, 0, 15, 34), 'matplotlib.pyplot.axvline', 'plt.axvline', (), '', True, 'import matplotlib.pyplot as plt\n'), ((16, 0, 16, 34), 'matplotlib.pyplot.axvline', 'plt.axvline', (), '', True, 'import matplotlib.pyplot as plt\n'), ((18, 5, 18, 21), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(18, 17, 18, 20): '121'}, {}), '(121)', True, 'import matplotlib.pyplot as plt\n'), ((19, 0, 19, 16), 'matplotlib.pyplot.plot', 'plt.plot', ({(19, 9, 19, 15): 'x[:, (0)]'}, {}), '(x[:, (0)])', True, 'import matplotlib.pyplot as plt\n'), ((20, 0, 20, 19), 'matplotlib.pyplot.ylim', 'plt.ylim', ({(20, 9, 20, 18): '[-10, 200]'}, {}), '([-10, 200])', True, 'import matplotlib.pyplot as plt\n'), ((22, 0, 22, 19), 'matplotlib.pyplot.xlim', 'plt.xlim', ({(22, 9, 22, 18): '[0, 30000]'}, {}), '([0, 30000])', True, 'import matplotlib.pyplot as plt\n'), ((23, 0, 23, 19), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(23, 11, 23, 18): '"""Steps"""'}, {}), "('Steps')", True, 'import matplotlib.pyplot as plt\n'), ((24, 0, 24, 25), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(24, 11, 24, 24): '"""Free Action"""'}, {}), "('Free Action')", True, 'import matplotlib.pyplot as plt\n'), ((31, 0, 31, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n')] |
matiasgrana/nagios_sql | setup.py | 7858b852cf539da418a1a289e8c06e386b62287a | #! python3
# Help from: http://www.scotttorborg.com/python-packaging/minimal.html
# https://docs.python.org/3/distutils/commandref.html#sdist-cmd
# https://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# https://docs.python.org/3.4/tutorial/modules.html
# Install it with python setup.py install
# Or use: python setup.py develop (changes to the source files will be
# immediately available)
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
from setuptools import setup, find_packages
import os
from os import path
import rstcheck
exec(open('src/version.py').read())
# __version__ comes when execution src/version.py
version = __version__
here = path.abspath(path.dirname(__file__))
with open(os.path.join(here, 'requirements.txt')) as f:
requires = [x.strip() for x in f if x.strip()]
def check_readme(file='README.rst'):
"""
Checks readme rst file, to ensure it will upload to pypi and be formatted
correctly.
:param file:
:return:
"""
# Get the long description from the relevant file
with open(file, encoding='utf-8') as f:
readme_content = f.read()
errors = list(rstcheck.check(readme_content))
if errors:
msg = 'There are errors in {}, errors \n {}'.format(file,
errors[0].message)
raise SystemExit(msg)
else:
msg = 'No errors in {}'.format(file)
print(msg)
readme_file = path.join(here, 'README.rst')
# Get the long description from the relevant file
with open(readme_file, encoding='utf-8') as f:
long_description = f.read()
check_readme(readme_file)
# Define setuptools specifications
setup(name='nagios_sql',
version=version,
description='Nagios plugin with sqlchecks',
long_description=long_description, # this is the file README.rst
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: SQL',
'Topic :: System :: Monitoring',
'Topic :: Database :: Database Engines/Servers',
'Topic :: System :: Systems Administration'
],
url='https://github.com/pablodav/nagios_sql',
author='Pablo Estigarribia',
author_email='[email protected]',
license='MIT',
packages=find_packages(),
#include_package_data=True,
#package_data={
# 'data': 'src/data/*',
#},
#data_files=[('VERSION', ['src/VERSION'])],
entry_points={
'console_scripts': [
'nagios_sql = src.nagios_sql:main'
]
},
install_requires=requires,
tests_require=['pytest',
'pytest-cov'],
zip_safe=False)
| [((48, 14, 48, 43), 'os.path.join', 'path.join', ({(48, 24, 48, 28): 'here', (48, 30, 48, 42): '"""README.rst"""'}, {}), "(here, 'README.rst')", False, 'from os import path\n'), ((22, 20, 22, 42), 'os.path.dirname', 'path.dirname', ({(22, 33, 22, 41): '__file__'}, {}), '(__file__)', False, 'from os import path\n'), ((24, 10, 24, 48), 'os.path.join', 'os.path.join', ({(24, 23, 24, 27): 'here', (24, 29, 24, 47): '"""requirements.txt"""'}, {}), "(here, 'requirements.txt')", False, 'import os\n'), ((39, 18, 39, 48), 'rstcheck.check', 'rstcheck.check', ({(39, 33, 39, 47): 'readme_content'}, {}), '(readme_content)', False, 'import rstcheck\n'), ((76, 15, 76, 30), 'setuptools.find_packages', 'find_packages', ({}, {}), '()', False, 'from setuptools import setup, find_packages\n')] |
tongr/TextNN | textnn/utils/test/test_progress_iterator.py | a0294a197d3be284177214e8f019e9fed13dff1a | import io
import sys
from textnn.utils import ProgressIterator
#inspired by https://stackoverflow.com/a/34738440
def capture_sysout(cmd):
capturedOutput = io.StringIO() # Create StringIO object
sys.stdout = capturedOutput # and redirect stdout.
cmd() # Call function.
sys.stdout = sys.__stdout__ # Reset redirect.
return capturedOutput.getvalue() # Now works as before.
def test_progress_iterator():
def progress_generator():
sum(ProgressIterator([1, 2, 3], interval=0, description=""))
report = capture_sysout(cmd=progress_generator)
lines = report.strip().split("\n")
# expected result (with changing numbers):
# 1/3 [=========>....................] - ETA: 7s
# 2/3 [===================>..........] - ETA: 1s
# 3/3 [==============================] - 4s 1s/step
assert lines[0].startswith("1/3")
assert "ETA: " in lines[0]
assert lines[1].startswith("2/3")
assert "ETA: " in lines[1]
assert lines[2].startswith("3/3")
assert lines[2].endswith("s/step")
def test_progress_iterator_with_statement():
def progress_generator():
with ProgressIterator([1,2,3], interval=0, description="") as it:
sum(it)
report = capture_sysout(cmd=progress_generator)
lines = report.strip().split("\n")
# expected result (with changing numbers):
# 1/3 [=========>....................] - ETA: 7s
# 2/3 [===================>..........] - ETA: 1s
# 3/3 [==============================] - 4s 1s/step
assert lines[0].startswith("1/3")
assert "ETA: " in lines[0]
assert lines[1].startswith("2/3")
assert "ETA: " in lines[1]
assert lines[2].startswith("3/3")
assert lines[2].endswith("s/step")
| [((8, 21, 8, 34), 'io.StringIO', 'io.StringIO', ({}, {}), '()', False, 'import io\n'), ((17, 12, 17, 67), 'textnn.utils.ProgressIterator', 'ProgressIterator', (), '', False, 'from textnn.utils import ProgressIterator\n'), ((36, 13, 36, 66), 'textnn.utils.ProgressIterator', 'ProgressIterator', (), '', False, 'from textnn.utils import ProgressIterator\n')] |
NIKH0610/class5-homework | reach.py | d4cfb1b28656a37002dff6b1b20bae1253b2ae80 | import os
import numpy as np
import pandas as pd
housing_df = pd.read_csv(filepath_or_buffer='~/C:\Users\nikhi\NIKH0610\class5-homework\toys-datasets\boston') | [] |
souparvo/airflow-plugins | queries/general_queries.py | 0ca7fa634335145b69671054680d5d67de329644 |
def insert_metatable():
"""SQL query to insert records from table insert into a table on a DB
"""
return """
INSERT INTO TABLE {{ params.target_schema }}.{{ params.target_table }} VALUES
('{{ params.schema }}', '{{ params.table }}', {{ ti.xcom_pull(key='hive_res', task_ids=params.count_inserts)[0][0] }}, current_timestamp(), '{{ params.type }}');
""" | [] |
Handfeger/pyvisa-py | pyvisa_py/highlevel.py | fcfb45895cd44dd922985c3a9d8f3372c8318d63 | # -*- coding: utf-8 -*-
"""Highlevel wrapper of the VISA Library.
:copyright: 2014-2020 by PyVISA-py Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
import random
from collections import OrderedDict
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union, cast
from pyvisa import constants, highlevel, rname
from pyvisa.constants import StatusCode
from pyvisa.typing import VISAEventContext, VISARMSession, VISASession
from pyvisa.util import LibraryPath
from . import sessions
from .common import logger
class PyVisaLibrary(highlevel.VisaLibraryBase):
"""A pure Python backend for PyVISA.
The object is basically a dispatcher with some common functions implemented.
When a new resource object is requested to pyvisa, the library creates a
Session object (that knows how to perform low-level communication operations)
associated with a session handle (a number, usually refered just as session).
A call to a library function is handled by PyVisaLibrary if it involves a
resource agnostic function or dispatched to the correct session object
(obtained from the session id).
Importantly, the user is unaware of this. PyVisaLibrary behaves for
the user just as NIVisaLibrary.
"""
#: Live session object identified by a randon session ID
sessions: Dict[int, sessions.Session]
# Try to import packages implementing lower level functionality.
try:
from .serial import SerialSession
logger.debug("SerialSession was correctly imported.")
except Exception as e:
logger.debug("SerialSession was not imported %s." % e)
try:
from .usb import USBRawSession, USBSession
logger.debug("USBSession and USBRawSession were correctly imported.")
except Exception as e:
logger.debug("USBSession and USBRawSession were not imported %s." % e)
try:
from .tcpip import TCPIPInstrSession, TCPIPSocketSession
logger.debug("TCPIPSession was correctly imported.")
except Exception as e:
logger.debug("TCPIPSession was not imported %s." % e)
try:
from .gpib import GPIBSession
logger.debug("GPIBSession was correctly imported.")
except Exception as e:
logger.debug("GPIBSession was not imported %s." % e)
@staticmethod
def get_library_paths() -> Iterable[LibraryPath]:
"""List a dummy library path to allow to create the library."""
return (LibraryPath("py"),)
@staticmethod
def get_debug_info() -> Dict[str, Union[str, List[str], Dict[str, str]]]:
"""Return a list of lines with backend info."""
from . import __version__
d: OrderedDict[str, Union[str, List[str], Dict[str, str]]] = OrderedDict()
d["Version"] = "%s" % __version__
for key, val in sessions.Session.iter_valid_session_classes():
key_name = "%s %s" % (key[0].name.upper(), key[1])
d[key_name] = "Available " + val.get_low_level_info()
for key, issue in sessions.Session.iter_session_classes_issues():
key_name = "%s %s" % (key[0].name.upper(), key[1])
d[key_name] = issue.split("\n")
return d
def _init(self) -> None:
"""Custom initialization code."""
# Map session handle to session object.
self.sessions = {}
def _register(self, obj: object) -> VISASession:
"""Creates a random but unique session handle for a session object.
Register it in the sessions dictionary and return the value.
"""
session = None
while session is None or session in self.sessions:
session = random.randint(1000000, 9999999)
self.sessions[session] = obj
return session
def open(
self,
session: VISARMSession,
resource_name: str,
access_mode: constants.AccessModes = constants.AccessModes.no_lock,
open_timeout: int = constants.VI_TMO_IMMEDIATE,
) -> Tuple[VISASession, StatusCode]:
"""Opens a session to the specified resource.
Corresponds to viOpen function of the VISA library.
Parameters
----------
session : VISARMSession
Resource Manager session (should always be a session returned from
open_default_resource_manager()).
resource_name : str
Unique symbolic name of a resource.
access_mode : constants.AccessModes, optional
Specifies the mode by which the resource is to be accessed.
open_timeout : int
Specifies the maximum time period (in milliseconds) that this
operation waits before returning an error. constants.VI_TMO_IMMEDIATE
and constants.VI_TMO_INFINITE are used as min and max.
Returns
-------
VISASession
Unique logical identifier reference to a session
StatusCode
Return value of the library call.
"""
try:
open_timeout = int(open_timeout)
except ValueError:
raise ValueError(
"open_timeout (%r) must be an integer (or compatible type)"
% open_timeout
)
try:
parsed = rname.parse_resource_name(resource_name)
except rname.InvalidResourceName:
return (
VISASession(0),
self.handle_return_value(None, StatusCode.error_invalid_resource_name),
)
cls = sessions.Session.get_session_class(
parsed.interface_type_const, parsed.resource_class
)
sess = cls(session, resource_name, parsed, open_timeout)
return self._register(sess), StatusCode.success
def clear(self, session: VISASession) -> StatusCode:
"""Clears a device.
Corresponds to viClear function of the VISA library.
Parameters
----------
session : typin.VISASession
Unique logical identifier to a session.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
return self.handle_return_value(session, sess.clear())
def flush(
self, session: VISASession, mask: constants.BufferOperation
) -> StatusCode:
"""Flush the specified buffers.
The buffers can be associated with formatted I/O operations and/or
serial communication.
Corresponds to viFlush function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
mask : constants.BufferOperation
Specifies the action to be taken with flushing the buffer.
The values can be combined using the | operator. However multiple
operations on a single buffer cannot be combined.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
return self.handle_return_value(session, sess.flush(mask))
def gpib_command(
self, session: VISASession, command_byte: bytes
) -> Tuple[int, StatusCode]:
"""Write GPIB command bytes on the bus.
Corresponds to viGpibCommand function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
command_byte : bytes
Data to write.
Returns
-------
int
Number of written bytes
StatusCode
Return value of the library call.
"""
try:
written, st = self.sessions[session].gpib_command(command_byte)
return written, self.handle_return_value(session, st)
except KeyError:
return 0, self.handle_return_value(session, StatusCode.error_invalid_object)
def assert_trigger(
self, session: VISASession, protocol: constants.TriggerProtocol
) -> StatusCode:
"""Assert software or hardware trigger.
Corresponds to viAssertTrigger function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
protocol : constants.TriggerProtocol
Trigger protocol to use during assertion.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session, self.sessions[session].assert_trigger(protocol)
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def gpib_send_ifc(self, session: VISASession) -> StatusCode:
"""Pulse the interface clear line (IFC) for at least 100 microseconds.
Corresponds to viGpibSendIFC function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session, self.sessions[session].gpib_send_ifc()
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def gpib_control_ren(
self, session: VISASession, mode: constants.RENLineOperation
) -> StatusCode:
"""Controls the state of the GPIB Remote Enable (REN) interface line.
Optionally the remote/local state of the device can also be set.
Corresponds to viGpibControlREN function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
mode : constants.RENLineOperation
State of the REN line and optionally the device remote/local state.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session, self.sessions[session].gpib_control_ren(mode)
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def gpib_control_atn(
self, session: VISASession, mode: constants.ATNLineOperation
) -> StatusCode:
"""Specifies the state of the ATN line and the local active controller state.
Corresponds to viGpibControlATN function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
mode : constants.ATNLineOperation
State of the ATN line and optionally the local active controller state.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session, self.sessions[session].gpib_control_atn(mode)
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def gpib_pass_control(
self, session: VISASession, primary_address: int, secondary_address: int
) -> StatusCode:
"""Tell a GPIB device to become controller in charge (CIC).
Corresponds to viGpibPassControl function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
primary_address : int
Primary address of the GPIB device to which you want to pass control.
secondary_address : int
Secondary address of the targeted GPIB device.
If the targeted device does not have a secondary address, this parameter
should contain the value Constants.VI_NO_SEC_ADDR.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session,
self.sessions[session].gpib_pass_control(
primary_address, secondary_address
),
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def read_stb(self, session: VISASession) -> Tuple[int, StatusCode]:
"""Reads a status byte of the service request.
Corresponds to viReadSTB function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
Returns
-------
int
Service request status byte
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return 0, self.handle_return_value(session, StatusCode.error_invalid_object)
stb, status_code = sess.read_stb()
return stb, self.handle_return_value(session, status_code)
def close(
self, session: Union[VISASession, VISAEventContext, VISARMSession]
) -> StatusCode:
"""Closes the specified session, event, or find list.
Corresponds to viClose function of the VISA library.
Parameters
---------
session : Union[VISASession, VISAEventContext, VISARMSession]
Unique logical identifier to a session, event, resource manager.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
# The RM session directly references the library.
if sess is not self:
return self.handle_return_value(session, sess.close())
else:
return self.handle_return_value(session, StatusCode.success)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def open_default_resource_manager(self) -> Tuple[VISARMSession, StatusCode]:
"""This function returns a session to the Default Resource Manager resource.
Corresponds to viOpenDefaultRM function of the VISA library.
Returns
-------
VISARMSession
Unique logical identifier to a Default Resource Manager session
StatusCode
Return value of the library call.
"""
return (
cast(VISARMSession, self._register(self)),
self.handle_return_value(None, StatusCode.success),
)
def list_resources(
self, session: VISARMSession, query: str = "?*::INSTR"
) -> Tuple[str, ...]:
"""Return a tuple of all connected devices matching query.
Parameters
----------
session : VISARMSession
Unique logical identifier to the resource manager session.
query : str
Regular expression used to match devices.
Returns
-------
Tuple[str, ...]
Resource names of all the connected devices matching the query.
"""
# For each session type, ask for the list of connected resources and
# merge them into a single list.
# HINT: the cast should not be necessary here
resources: List[str] = []
for key, st in sessions.Session.iter_valid_session_classes():
resources += st.list_resources()
return rname.filter(resources, query)
def read(self, session: VISASession, count: int) -> Tuple[bytes, StatusCode]:
"""Reads data from device or interface synchronously.
Corresponds to viRead function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
count : int
Number of bytes to be read.
Returns
-------
bytes
Date read
StatusCode
Return value of the library call.
"""
# from the session handle, dispatch to the read method of the session object.
try:
data, status_code = self.sessions[session].read(count)
except KeyError:
return (
b"",
self.handle_return_value(session, StatusCode.error_invalid_object),
)
return data, self.handle_return_value(session, status_code)
def write(self, session: VISASession, data: bytes) -> Tuple[int, StatusCode]:
"""Write data to device or interface synchronously.
Corresponds to viWrite function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
data : bytes
Data to be written.
Returns
-------
int
Number of bytes actually transferred
StatusCode
Return value of the library call.
"""
# from the session handle, dispatch to the write method of the session object.
try:
written, status_code = self.sessions[session].write(data)
except KeyError:
return 0, self.handle_return_value(session, StatusCode.error_invalid_object)
return written, self.handle_return_value(session, status_code)
def buffer_read(self, session: VISASession, count: int) -> Tuple[bytes, StatusCode]:
"""Reads data through the use of a formatted I/O read buffer.
The data can be read from a device or an interface.
Corresponds to viBufRead function of the VISA library.
Parameters
----------
session : VISASession\
Unique logical identifier to a session.
count : int
Number of bytes to be read.
Returns
-------
bytes
Data read
StatusCode
Return value of the library call.
"""
return self.read(session, count)
def buffer_write(self, session: VISASession, data: bytes) -> Tuple[int, StatusCode]:
"""Writes data to a formatted I/O write buffer synchronously.
Corresponds to viBufWrite function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
data : bytes
Data to be written.
Returns
-------
int
number of written bytes
StatusCode
return value of the library call.
"""
return self.write(session, data)
def get_attribute(
self,
session: Union[VISASession, VISAEventContext, VISARMSession],
attribute: Union[constants.ResourceAttribute, constants.EventAttribute],
) -> Tuple[Any, StatusCode]:
"""Retrieves the state of an attribute.
Corresponds to viGetAttribute function of the VISA library.
Parameters
----------
session : Union[VISASession, VISAEventContext]
Unique logical identifier to a session, event, or find list.
attribute : Union[constants.ResourceAttribute, constants.EventAttribute]
Resource or event attribute for which the state query is made.
Returns
-------
Any
State of the queried attribute for a specified resource
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return (
None,
self.handle_return_value(session, StatusCode.error_invalid_object),
)
state, status_code = sess.get_attribute(
cast(constants.ResourceAttribute, attribute)
)
return state, self.handle_return_value(session, status_code)
def set_attribute(
self,
session: VISASession,
attribute: constants.ResourceAttribute,
attribute_state: Any,
) -> StatusCode:
"""Set the state of an attribute.
Corresponds to viSetAttribute function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
attribute : constants.ResourceAttribute
Attribute for which the state is to be modified.
attribute_state : Any
The state of the attribute to be set for the specified object.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session,
self.sessions[session].set_attribute(attribute, attribute_state),
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def lock(
self,
session: VISASession,
lock_type: constants.Lock,
timeout: int,
requested_key: Optional[str] = None,
) -> Tuple[str, StatusCode]:
"""Establishes an access mode to the specified resources.
Corresponds to viLock function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
lock_type : constants.Lock
Specifies the type of lock requested.
timeout : int
Absolute time period (in milliseconds) that a resource waits to get
unlocked by the locking session before returning an error.
requested_key : Optional[str], optional
Requested locking key in the case of a shared lock. For an exclusive
lock it should be None.
Returns
-------
str
Key that can then be passed to other sessions to share the lock, or
None for an exclusive lock.
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return (
"",
self.handle_return_value(session, StatusCode.error_invalid_object),
)
key, status_code = sess.lock(lock_type, timeout, requested_key)
return key, self.handle_return_value(session, status_code)
def unlock(self, session: VISASession) -> StatusCode:
"""Relinquish a lock for the specified resource.
Corresponds to viUnlock function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
return self.handle_return_value(session, sess.unlock())
def disable_event(
self,
session: VISASession,
event_type: constants.EventType,
mechanism: constants.EventMechanism,
) -> StatusCode:
"""Disable notification for an event type(s) via the specified mechanism(s).
Corresponds to viDisableEvent function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
event_type : constants.EventType
Event type.
mechanism : constants.EventMechanism
Event handling mechanisms to be disabled.
Returns
-------
StatusCode
Return value of the library call.
"""
pass
def discard_events(
self,
session: VISASession,
event_type: constants.EventType,
mechanism: constants.EventMechanism,
) -> StatusCode:
"""Discard event occurrences for a given type and mechanisms in a session.
Corresponds to viDiscardEvents function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
event_type : constans.EventType
Logical event identifier.
mechanism : constants.EventMechanism
Specifies event handling mechanisms to be discarded.
Returns
-------
StatusCode
Return value of the library call.
"""
pass
| [((82, 69, 82, 82), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((488, 15, 488, 45), 'pyvisa.rname.filter', 'rname.filter', ({(488, 28, 488, 37): 'resources', (488, 39, 488, 44): 'query'}, {}), '(resources, query)', False, 'from pyvisa import constants, highlevel, rname\n'), ((75, 16, 75, 33), 'pyvisa.util.LibraryPath', 'LibraryPath', ({(75, 28, 75, 32): '"""py"""'}, {}), "('py')", False, 'from pyvisa.util import LibraryPath\n'), ((109, 22, 109, 54), 'random.randint', 'random.randint', ({(109, 37, 109, 44): '1000000', (109, 46, 109, 53): '9999999'}, {}), '(1000000, 9999999)', False, 'import random\n'), ((156, 21, 156, 61), 'pyvisa.rname.parse_resource_name', 'rname.parse_resource_name', ({(156, 47, 156, 60): 'resource_name'}, {}), '(resource_name)', False, 'from pyvisa import constants, highlevel, rname\n'), ((628, 12, 628, 56), 'typing.cast', 'cast', ({(628, 17, 628, 44): 'constants.ResourceAttribute', (628, 46, 628, 55): 'attribute'}, {}), '(constants.ResourceAttribute, attribute)', False, 'from typing import Any, Dict, Iterable, List, Optional, Tuple, Union, cast\n'), ((159, 16, 159, 30), 'pyvisa.typing.VISASession', 'VISASession', ({(159, 28, 159, 29): '(0)'}, {}), '(0)', False, 'from pyvisa.typing import VISAEventContext, VISARMSession, VISASession\n')] |
sisrfeng/NA-fWebSOD | detectron/utils/webly_vis.py | 49cb75a9a0d557b05968c6b11b0f17a7043f2077 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import cv2
import numpy as np
import os
import math
from PIL import Image, ImageDraw, ImageFont
from caffe2.python import workspace
from detectron.core.config import cfg
from detectron.core.config import get_output_dir
def vis_training(cur_iter):
prefix = ''
if cfg.WEBLY.MINING:
prefix = 'mining_'
if not (cfg.WSL.DEBUG or
(cfg.WSL.SAMPLE and cur_iter % cfg.WSL.SAMPLE_ITER == 0)):
return
output_dir = get_output_dir(cfg.TRAIN.DATASETS, training=True)
sample_dir = os.path.join(output_dir, 'webly_sample')
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
for gpu_id in range(cfg.NUM_GPUS):
data_ids = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, 'data_ids'))
ims = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, 'data'))
labels_oh = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, 'labels_oh'))
im_score = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, 'cls_prob'))
roi_score = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_pred'))
# roi_score_softmax = workspace.FetchBlob('gpu_{}/{}'.format(
# gpu_id, prefix + 'rois_pred_softmax'))
rois = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, prefix + 'rois'))
# anchor_argmax = workspace.FetchBlob('gpu_{}/{}'.format(
# gpu_id, 'anchor_argmax'))
preffix = 'iter_' + str(cur_iter) + '_gpu_' + str(gpu_id)
save_im(labels_oh, im_score, ims, cfg.PIXEL_MEANS, preffix, sample_dir)
save_rois(labels_oh, im_score, roi_score, ims, rois, cfg.PIXEL_MEANS,
preffix, '', sample_dir)
# continue
if cfg.WEBLY.ENTROPY:
pass
else:
continue
class_weight = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_class_weight'))
rois_pred_hatE = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_pred_hatE'))
rois_pred_E = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_pred_E'))
y_logN__logy = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_pred_y_logN__logy'))
save_entropy(labels_oh, im_score, class_weight, roi_score, ims, rois,
cfg.PIXEL_MEANS, preffix, '', sample_dir, rois_pred_hatE,
rois_pred_E, y_logN__logy)
def save_im(labels_oh, im_score, ims, pixel_means, prefix, output_dir):
batch_size, num_classes = im_score.shape
for b in range(batch_size):
for c in range(num_classes):
# if labels_oh[b][c] == 0.0:
# continue
if im_score[b][c] < 0.1:
continue
im = ims[b, :, :, :].copy()
channel_swap = (1, 2, 0)
im = im.transpose(channel_swap)
im += pixel_means
im = im.astype(np.uint8)
file_name = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '.png')
cv2.imwrite(file_name, im)
def save_rois(labels_oh, im_score, roi_score, ims, rois, pixel_means, prefix,
suffix, output_dir):
num_rois, num_classes = roi_score.shape
batch_size, _, height, weight = ims.shape
has_bg = False
num_rois_this = min(500, num_rois)
for b in range(batch_size):
for c in range(num_classes):
# if labels_oh[b][c] == 0.0:
# continue
if im_score[b][c] < 0.1:
if has_bg:
continue
has_bg = True
im = ims[b, :, :, :].copy()
channel_swap = (1, 2, 0)
im = im.transpose(channel_swap)
im += pixel_means
im = im.astype(np.uint8)
im_S = im.copy()
im_A = im.copy()
argsort = np.argsort(-np.abs(roi_score[:, c]))
argsort = argsort[:num_rois_this]
argsort = argsort[::-1]
if im_score[b][c] < 0.1:
scale_p = 1.0
else:
scale_p = 1.0 / roi_score[:, c].max()
for n in range(num_rois_this):
roi = rois[argsort[n]]
if roi[0] != b:
continue
if roi_score[argsort[n]][c] * scale_p < 0.4:
thickness = 3
else:
thickness = 6
jet = gray2jet(roi_score[argsort[n]][c] * scale_p)
cv2.rectangle(im_S, (roi[1], roi[2]), (roi[3], roi[4]), jet, thickness)
file_name = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_' +
suffix + '.png')
cv2.imwrite(file_name, im_S)
continue
num_anchors = anchor_argmax.shape[0]
for n in range(num_rois):
roi = rois[n]
if roi[0] != b:
continue
for a in range(num_anchors):
if anchor_argmax[a][n] == 1.0:
break
jet = gray2jet(1.0 * a / num_anchors)
cv2.rectangle(im_A, (roi[1], roi[2]), (roi[3], roi[4]), jet, 1)
file_name = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_A_' +
suffix + '.png')
cv2.imwrite(file_name, im_A)
def save_entropy(labels_oh, im_score, class_weight, roi_score, ims, rois,
pixel_means, prefix, suffix, output_dir, rois_pred_hatE,
rois_pred_E, y_logN__logy):
num_rois, num_classes = roi_score.shape
batch_size, _, height, weight = ims.shape
rois_pred_E_sum = np.sum(rois_pred_E, axis=0).reshape(1, -1)
E_sum_norm = np.true_divide(rois_pred_E_sum, y_logN__logy)
E_sum_norm = np.where(E_sum_norm > 1., 1., E_sum_norm)
E_class_weight = 1 - E_sum_norm
for b in range(batch_size):
for c in range(num_classes):
if labels_oh[b][c] == 0.0 and im_score[b][c] < 0.1:
continue
im = ims[b, :, :, :].copy()
channel_swap = (1, 2, 0)
im = im.transpose(channel_swap)
im += pixel_means
im = im.astype(np.uint8)
im_S = im.copy()
im_A = im.copy()
im_hatE = im.copy()
im_E = im.copy()
_NUM = 10
argsort_roi = np.argsort(roi_score[:, c])[::-1]
argsort_hatE = np.argsort(rois_pred_hatE[:, c])[::-1]
argsort_E = np.argsort(rois_pred_E[:, c])[::-1]
if len(argsort_roi) >= _NUM:
_NUM = 10
else:
_NUM = len(argsort_roi)
argsort_roi = argsort_roi[:_NUM][::-1]
argsort_hatE = argsort_hatE[:_NUM][::-1]
argsort_E = argsort_E[:_NUM][::-1]
argsort_hatE = argsort_roi
argsort_E = argsort_roi
scale_p = 1.0 / roi_score[:, c].max()
scale_p = 1.0
for n in range(_NUM):
roi = rois[argsort_roi[n]]
hatE_roi = rois[argsort_hatE[n]]
E_roi = rois[argsort_E[n]]
if roi[0] != b:
continue
# draw roi
jet = gray2jet(roi_score[argsort_roi[n]][c] * scale_p)
bgr = jet
rgb = (jet[2], jet[1], jet[0])
# roi location
cv2.rectangle(im_S, (roi[1], roi[2]), (roi[3], roi[4]),
bgr,
2,
lineType=cv2.LINE_AA)
text = "{:.4f}".format(roi_score[argsort_roi[n]][c])
im_S = putText_with_TNR(im_S, int(roi[1]), int(roi[2]), 15,
jet, rgb, text)
if hatE_roi[0] != b:
continue
# draw rois_pred_hatE
# jet = gray2jet(rois_pred_hatE[argsort_hatE[n]][c] * scale_p)
# bgr = jet
# rgb = (jet[2], jet[1], jet[0])
# roi location
cv2.rectangle(im_hatE, (hatE_roi[1], hatE_roi[2]),
(hatE_roi[3], hatE_roi[4]),
bgr,
2,
lineType=cv2.LINE_AA)
# put Text hat_E
text = "{:.4f}".format(rois_pred_hatE[argsort_hatE[n]][c])
im_hatE = putText_with_TNR(im_hatE, int(hatE_roi[1]),
int(hatE_roi[2]), 15, jet, rgb,
text)
if E_roi[0] != b:
continue
# draw rois_pred_E
# jet = gray2jet(rois_pred_E[argsort_E[n]][c] * scale_p)
# bgr = jet
# rgb = (jet[2], jet[1], jet[0])
# roi location
cv2.rectangle(im_E, (E_roi[1], E_roi[2]), (E_roi[3], E_roi[4]),
bgr,
2,
lineType=cv2.LINE_AA)
# put Text E
text = "{:.4f}".format(rois_pred_E[argsort_E[n]][c])
im_E = putText_with_TNR(im_E, int(E_roi[1]), int(E_roi[2]), 15,
jet, rgb, text)
# write im_score
text = "{:.4f}".format(im_score[b][c])
im_S = putText_with_TNR(im_S, 0, 0, 20, (0, 140, 255),
(255, 255, 255), text)
# write class_weight
text = "{:.4f}".format(class_weight[b][c])
im_hatE = putText_with_TNR(im_hatE, 0, 0, 20, (0, 140, 255),
(255, 255, 255), text)
# write class_weight
text = "{:.4f}".format(E_class_weight[b][c])
im_E = putText_with_TNR(im_E, 0, 0, 20, (0, 140, 255),
(255, 255, 255), text)
file_name_roi = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_roi' +
suffix + '.png')
cv2.imwrite(file_name_roi, im_S)
file_name_hatE = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) +
'_hatE' + suffix + '.png')
cv2.imwrite(file_name_hatE, im_hatE)
file_name_E = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_E' +
suffix + '.png')
cv2.imwrite(file_name_E, im_E)
def dump_proto_files(model, output_dir):
"""Save prototxt descriptions of the training network and parameter
initialization network."""
with open(os.path.join(output_dir, model.net.Proto().name), 'w') as fid:
fid.write(str(model.net.Proto()))
with open(os.path.join(output_dir,
model.param_init_net.Proto().name), 'w') as fid:
fid.write(str(model.param_init_net.Proto()))
def gray2jet(f):
# plot short rainbow RGB
a = f / 0.25 # invert and group
X = math.floor(a) # this is the integer part
Y = math.floor(255 * (a - X)) # fractional part from 0 to 255
Z = math.floor(128 * (a - X)) # fractional part from 0 to 128
if X == 0:
r = 0
g = Y
b = 128 - Z
elif X == 1:
r = Y
g = 255
b = 0
elif X == 2:
r = 255
g = 255 - Z
b = 0
elif X == 3:
r = 255
g = 128 - Z
b = 0
elif X == 4:
r = 255
g = 0
b = 0
# opencv is bgr, not rgb
return (b, g, r)
def putText_with_TNR(img, x, y, size, fontColor, bgColor, string):
thickness = 2
font_scale = 1.1
font = cv2.FONT_HERSHEY_SIMPLEX
s = cv2.getTextSize(string, font, font_scale, thickness)
cv2.rectangle(
img,
(x + thickness, y + thickness),
(x + thickness + s[0][0] + 2, y + thickness + s[0][1] + 2),
# (0, 140, 255),
fontColor,
cv2.FILLED,
lineType=cv2.LINE_AA)
position = (x + thickness + 1, y + thickness + s[0][1] + 1)
cv2.putText(img, string, position, font, font_scale, (255, 255, 255),
thickness, cv2.LINE_AA)
return img
# from OpenCV to PIL
font = "/home/chenzhiwei/Documents/myFonts/timesnewroman.ttf"
img_PIL = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
font = ImageFont.truetype(font, size)
position = (x + 3, y - 2)
draw = ImageDraw.Draw(img_PIL)
offsetx, offsety = font.getoffset(string)
width, height = font.getsize(string)
draw.rectangle((offsetx + x + 2, offsety + y - 3, offsetx + x + width + 3,
offsety + y + height - 3),
fill=bgColor)
draw.text(position, string, font=font, fill=fontColor)
# back to OpenCV type
img_OpenCV = cv2.cvtColor(np.asarray(img_PIL), cv2.COLOR_RGB2BGR)
return img_OpenCV
| [((26, 17, 26, 66), 'detectron.core.config.get_output_dir', 'get_output_dir', (), '', False, 'from detectron.core.config import get_output_dir\n'), ((27, 17, 27, 57), 'os.path.join', 'os.path.join', ({(27, 30, 27, 40): 'output_dir', (27, 42, 27, 56): '"""webly_sample"""'}, {}), "(output_dir, 'webly_sample')", False, 'import os\n'), ((159, 17, 159, 62), 'numpy.true_divide', 'np.true_divide', ({(159, 32, 159, 47): 'rois_pred_E_sum', (159, 49, 159, 61): 'y_logN__logy'}, {}), '(rois_pred_E_sum, y_logN__logy)', True, 'import numpy as np\n'), ((160, 17, 160, 58), 'numpy.where', 'np.where', ({(160, 26, 160, 41): 'E_sum_norm > 1.0', (160, 43, 160, 45): '1.0', (160, 47, 160, 57): 'E_sum_norm'}, {}), '(E_sum_norm > 1.0, 1.0, E_sum_norm)', True, 'import numpy as np\n'), ((290, 8, 290, 21), 'math.floor', 'math.floor', ({(290, 19, 290, 20): 'a'}, {}), '(a)', False, 'import math\n'), ((291, 8, 291, 33), 'math.floor', 'math.floor', ({(291, 19, 291, 32): '255 * (a - X)'}, {}), '(255 * (a - X))', False, 'import math\n'), ((292, 8, 292, 33), 'math.floor', 'math.floor', ({(292, 19, 292, 32): '128 * (a - X)'}, {}), '(128 * (a - X))', False, 'import math\n'), ((322, 8, 322, 60), 'cv2.getTextSize', 'cv2.getTextSize', ({(322, 24, 322, 30): 'string', (322, 32, 322, 36): 'font', (322, 38, 322, 48): 'font_scale', (322, 50, 322, 59): 'thickness'}, {}), '(string, font, font_scale, thickness)', False, 'import cv2\n'), ((324, 4, 331, 29), 'cv2.rectangle', 'cv2.rectangle', (), '', False, 'import cv2\n'), ((334, 4, 335, 39), 'cv2.putText', 'cv2.putText', ({(334, 16, 334, 19): 'img', (334, 21, 334, 27): 'string', (334, 29, 334, 37): 'position', (334, 39, 334, 43): 'font', (334, 45, 334, 55): 'font_scale', (334, 57, 334, 72): '(255, 255, 255)', (335, 16, 335, 25): 'thickness', (335, 27, 335, 38): 'cv2.LINE_AA'}, {}), '(img, string, position, font, font_scale, (255, 255, 255),\n thickness, cv2.LINE_AA)', False, 'import cv2\n'), ((342, 11, 342, 41), 'PIL.ImageFont.truetype', 'ImageFont.truetype', ({(342, 30, 342, 34): 'font', (342, 36, 342, 40): 'size'}, {}), '(font, size)', False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((344, 11, 344, 34), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', ({(344, 26, 344, 33): 'img_PIL'}, {}), '(img_PIL)', False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((28, 11, 28, 37), 'os.path.exists', 'os.path.exists', ({(28, 26, 28, 36): 'sample_dir'}, {}), '(sample_dir)', False, 'import os\n'), ((29, 8, 29, 31), 'os.makedirs', 'os.makedirs', ({(29, 20, 29, 30): 'sample_dir'}, {}), '(sample_dir)', False, 'import os\n'), ((341, 30, 341, 66), 'cv2.cvtColor', 'cv2.cvtColor', ({(341, 43, 341, 46): 'img', (341, 48, 341, 65): 'cv2.COLOR_BGR2RGB'}, {}), '(img, cv2.COLOR_BGR2RGB)', False, 'import cv2\n'), ((352, 30, 352, 49), 'numpy.asarray', 'np.asarray', ({(352, 41, 352, 48): 'img_PIL'}, {}), '(img_PIL)', True, 'import numpy as np\n'), ((85, 12, 85, 38), 'cv2.imwrite', 'cv2.imwrite', ({(85, 24, 85, 33): 'file_name', (85, 35, 85, 37): 'im'}, {}), '(file_name, im)', False, 'import cv2\n'), ((132, 12, 132, 40), 'cv2.imwrite', 'cv2.imwrite', ({(132, 24, 132, 33): 'file_name', (132, 35, 132, 39): 'im_S'}, {}), '(file_name, im_S)', False, 'import cv2\n'), ((150, 12, 150, 40), 'cv2.imwrite', 'cv2.imwrite', ({(150, 24, 150, 33): 'file_name', (150, 35, 150, 39): 'im_A'}, {}), '(file_name, im_A)', False, 'import cv2\n'), ((158, 22, 158, 49), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((264, 12, 264, 44), 'cv2.imwrite', 'cv2.imwrite', ({(264, 24, 264, 37): 'file_name_roi', (264, 39, 264, 43): 'im_S'}, {}), '(file_name_roi, im_S)', False, 'import cv2\n'), ((269, 12, 269, 48), 'cv2.imwrite', 'cv2.imwrite', ({(269, 24, 269, 38): 'file_name_hatE', (269, 40, 269, 47): 'im_hatE'}, {}), '(file_name_hatE, im_hatE)', False, 'import cv2\n'), ((274, 12, 274, 42), 'cv2.imwrite', 'cv2.imwrite', ({(274, 24, 274, 35): 'file_name_E', (274, 37, 274, 41): 'im_E'}, {}), '(file_name_E, im_E)', False, 'import cv2\n'), ((128, 16, 128, 87), 'cv2.rectangle', 'cv2.rectangle', ({(128, 30, 128, 34): 'im_S', (128, 36, 128, 52): '(roi[1], roi[2])', (128, 54, 128, 70): '(roi[3], roi[4])', (128, 72, 128, 75): 'jet', (128, 77, 128, 86): 'thickness'}, {}), '(im_S, (roi[1], roi[2]), (roi[3], roi[4]), jet, thickness)', False, 'import cv2\n'), ((146, 16, 146, 79), 'cv2.rectangle', 'cv2.rectangle', ({(146, 30, 146, 34): 'im_A', (146, 36, 146, 52): '(roi[1], roi[2])', (146, 54, 146, 70): '(roi[3], roi[4])', (146, 72, 146, 75): 'jet', (146, 77, 146, 78): '(1)'}, {}), '(im_A, (roi[1], roi[2]), (roi[3], roi[4]), jet, 1)', False, 'import cv2\n'), ((176, 26, 176, 53), 'numpy.argsort', 'np.argsort', ({(176, 37, 176, 52): 'roi_score[:, (c)]'}, {}), '(roi_score[:, (c)])', True, 'import numpy as np\n'), ((177, 27, 177, 59), 'numpy.argsort', 'np.argsort', ({(177, 38, 177, 58): 'rois_pred_hatE[:, (c)]'}, {}), '(rois_pred_hatE[:, (c)])', True, 'import numpy as np\n'), ((178, 24, 178, 53), 'numpy.argsort', 'np.argsort', ({(178, 35, 178, 52): 'rois_pred_E[:, (c)]'}, {}), '(rois_pred_E[:, (c)])', True, 'import numpy as np\n'), ((203, 16, 206, 51), 'cv2.rectangle', 'cv2.rectangle', (), '', False, 'import cv2\n'), ((219, 16, 223, 51), 'cv2.rectangle', 'cv2.rectangle', (), '', False, 'import cv2\n'), ((237, 16, 240, 51), 'cv2.rectangle', 'cv2.rectangle', (), '', False, 'import cv2\n'), ((112, 34, 112, 57), 'numpy.abs', 'np.abs', ({(112, 41, 112, 56): 'roi_score[:, (c)]'}, {}), '(roi_score[:, (c)])', True, 'import numpy as np\n')] |
StepOneInc/salt | salt/runner.py | ee210172c37bf0cee224794cd696b38e288e4073 | # -*- coding: utf-8 -*-
'''
Execute salt convenience routines
'''
# Import python libs
from __future__ import print_function
from __future__ import absolute_import
import collections
import logging
import time
import sys
import multiprocessing
# Import salt libs
import salt.exceptions
import salt.loader
import salt.minion
import salt.utils
import salt.utils.args
import salt.utils.event
from salt.client import mixins
from salt.output import display_output
from salt.utils.error import raise_error
from salt.utils.event import tagify
import salt.ext.six as six
log = logging.getLogger(__name__)
class RunnerClient(mixins.SyncClientMixin, mixins.AsyncClientMixin, object):
'''
The interface used by the :command:`salt-run` CLI tool on the Salt Master
It executes :ref:`runner modules <all-salt.runners>` which run on the Salt
Master.
Importing and using ``RunnerClient`` must be done on the same machine as
the Salt Master and it must be done using the same user that the Salt
Master is running as.
Salt's :conf_master:`external_auth` can be used to authenticate calls. The
eauth user must be authorized to execute runner modules: (``@runner``).
Only the :py:meth:`master_call` below supports eauth.
'''
client = 'runner'
tag_prefix = 'run'
def __init__(self, opts):
self.opts = opts
self.functions = salt.loader.runner(opts) # Must be self.functions for mixin to work correctly :-/
self.returners = salt.loader.returners(opts, self.functions)
self.outputters = salt.loader.outputters(opts)
self.event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
def cmd(self, fun, arg, pub_data=None, kwarg=None):
'''
Execute a runner function
.. code-block:: python
>>> opts = salt.config.master_config('/etc/salt/master')
>>> runner = salt.runner.RunnerClient(opts)
>>> runner.cmd('jobs.list_jobs', [])
{
'20131219215650131543': {
'Arguments': [300],
'Function': 'test.sleep',
'StartTime': '2013, Dec 19 21:56:50.131543',
'Target': '*',
'Target-type': 'glob',
'User': 'saltdev'
},
'20131219215921857715': {
'Arguments': [300],
'Function': 'test.sleep',
'StartTime': '2013, Dec 19 21:59:21.857715',
'Target': '*',
'Target-type': 'glob',
'User': 'saltdev'
},
}
'''
if kwarg is None:
kwarg = {}
if not isinstance(kwarg, dict):
raise salt.exceptions.SaltInvocationError(
'kwarg must be formatted as a dictionary'
)
if pub_data is None:
pub_data = {}
if not isinstance(pub_data, dict):
raise salt.exceptions.SaltInvocationError(
'pub_data must be formatted as a dictionary'
)
arglist = salt.utils.args.parse_input(arg)
def _append_kwarg(arglist, kwarg):
'''
Append the kwarg dict to the arglist
'''
kwarg['__kwarg__'] = True
arglist.append(kwarg)
if kwarg:
try:
if isinstance(arglist[-1], dict) \
and '__kwarg__' in arglist[-1]:
for key, val in six.iteritems(kwarg):
if key in arglist[-1]:
log.warning(
'Overriding keyword argument {0!r}'.format(key)
)
arglist[-1][key] = val
else:
# No kwargs yet present in arglist
_append_kwarg(arglist, kwarg)
except IndexError:
# arglist is empty, just append
_append_kwarg(arglist, kwarg)
self._verify_fun(fun)
args, kwargs = salt.minion.load_args_and_kwargs(
self.functions[fun], arglist, pub_data
)
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
jid = self.returners[fstr]()
log.debug('Runner starting with jid {0}'.format(jid))
self.event.fire_event({'runner_job': fun}, tagify([jid, 'new'], 'job'))
target = RunnerClient._thread_return
data = {'fun': fun, 'jid': jid, 'args': args, 'kwargs': kwargs}
args = (self, self.opts, data)
ret = jid
if self.opts.get('async', False):
process = multiprocessing.Process(
target=target, args=args
)
process.start()
else:
ret = target(*args)
return ret
@classmethod
def _thread_return(cls, instance, opts, data):
'''
The multiprocessing process calls back here
to stream returns
'''
# Runners modules runtime injection:
# - the progress event system with the correct jid
# - Provide JID if the runner wants to access it directly
done = {}
progress = salt.utils.event.get_runner_event(opts, data['jid']).fire_progress
for func_name, func in instance.functions.items():
if func.__module__ in done:
continue
mod = sys.modules[func.__module__]
mod.__jid__ = data['jid']
mod.__progress__ = progress
done[func.__module__] = mod
ret = instance.functions[data['fun']](*data['args'], **data['kwargs'])
# Sleep for just a moment to let any progress events return
time.sleep(0.1)
ret_load = {'return': ret, 'fun': data['fun'], 'fun_args': data['args']}
# Don't use the invoking processes' event socket because it could be closed down by the time we arrive here.
# Create another, for safety's sake.
salt.utils.event.MasterEvent(opts['sock_dir']).fire_event(ret_load, tagify([data['jid'], 'return'], 'runner'))
try:
fstr = '{0}.save_runner_load'.format(opts['master_job_cache'])
instance.returners[fstr](data['jid'], ret_load)
except KeyError:
log.debug(
'The specified returner used for the master job cache '
'"{0}" does not have a save_runner_load function! The results '
'of this runner execution will not be stored.'.format(
opts['master_job_cache']
)
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
if opts.get('async', False):
return data['jid']
else:
return ret
def master_call(self, **kwargs):
'''
Execute a runner function through the master network interface (eauth).
'''
load = kwargs
load['cmd'] = 'runner'
sreq = salt.transport.Channel.factory(self.opts,
crypt='clear',
usage='master_call')
ret = sreq.send(load)
if isinstance(ret, collections.Mapping):
if 'error' in ret:
raise_error(**ret['error'])
return ret
def _reformat_low(self, low):
'''
Format the low data for RunnerClient()'s master_call() function
The master_call function here has a different function signature than
on WheelClient. So extract all the eauth keys and the fun key and
assume everything else is a kwarg to pass along to the runner function
to be called.
'''
auth_creds = dict([(i, low.pop(i)) for i in [
'username', 'password', 'eauth', 'token', 'client',
] if i in low])
reformatted_low = {'fun': low.pop('fun')}
reformatted_low.update(auth_creds)
reformatted_low['kwarg'] = low
return reformatted_low
def cmd_async(self, low):
'''
Execute a runner function asynchronously; eauth is respected
This function requires that :conf_master:`external_auth` is configured
and the user is authorized to execute runner functions: (``@runner``).
.. code-block:: python
runner.eauth_async({
'fun': 'jobs.list_jobs',
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'pam',
})
'''
reformatted_low = self._reformat_low(low)
return self.master_call(**reformatted_low)
def cmd_sync(self, low, timeout=None):
'''
Execute a runner function synchronously; eauth is respected
This function requires that :conf_master:`external_auth` is configured
and the user is authorized to execute runner functions: (``@runner``).
.. code-block:: python
runner.eauth_sync({
'fun': 'jobs.list_jobs',
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'pam',
})
'''
sevent = salt.utils.event.get_event('master',
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts)
reformatted_low = self._reformat_low(low)
job = self.master_call(**reformatted_low)
ret_tag = tagify('ret', base=job['tag'])
timelimit = time.time() + (timeout or 300)
while True:
ret = sevent.get_event(full=True)
if ret is None:
if time.time() > timelimit:
raise salt.exceptions.SaltClientTimeout(
"RunnerClient job '{0}' timed out".format(job['jid']),
jid=job['jid'])
else:
continue
if ret['tag'] == ret_tag:
return ret['data']['return']
class Runner(RunnerClient):
'''
Execute the salt runner interface
'''
def print_docs(self):
'''
Print out the documentation!
'''
arg = self.opts.get('fun', None)
docs = super(Runner, self).get_docs(arg)
for fun in sorted(docs):
display_output('{0}:'.format(fun), 'text', self.opts)
print(docs[fun])
def run(self):
'''
Execute the runner sequence
'''
ret = {}
if self.opts.get('doc', False):
self.print_docs()
else:
try:
# Run the runner!
jid = super(Runner, self).cmd(
self.opts['fun'], self.opts['arg'], self.opts)
if self.opts.get('async', False):
log.info('Running in async mode. Results of this execution may '
'be collected by attaching to the master event bus or '
'by examing the master job cache, if configured.')
sys.exit(0)
rets = self.get_runner_returns(jid)
else:
rets = [jid]
# Gather the returns
for ret in rets:
if not self.opts.get('quiet', False):
if isinstance(ret, dict) and 'outputter' in ret and ret['outputter'] is not None:
print(self.outputters[ret['outputter']](ret['data']))
else:
salt.output.display_output(ret, '', self.opts)
except salt.exceptions.SaltException as exc:
ret = str(exc)
print(ret)
return ret
log.debug('Runner return: {0}'.format(ret))
return ret
def get_runner_returns(self, jid, timeout=None):
'''
Gather the return data from the event system, break hard when timeout
is reached.
'''
if timeout is None:
timeout = self.opts['timeout'] * 2
timeout_at = time.time() + timeout
last_progress_timestamp = time.time()
while True:
raw = self.event.get_event(timeout, full=True)
time.sleep(0.1)
# If we saw no events in the event bus timeout
# OR
# we have reached the total timeout
# AND
# have not seen any progress events for the length of the timeout.
if raw is None and (time.time() > timeout_at and
time.time() - last_progress_timestamp > timeout):
# Timeout reached
break
try:
if not raw['tag'].split('/')[1] == 'runner' and raw['tag'].split('/')[2] == jid:
continue
elif raw['tag'].split('/')[3] == 'progress' and raw['tag'].split('/')[2] == jid:
last_progress_timestamp = time.time()
yield {'data': raw['data']['data'], 'outputter': raw['data']['outputter']}
elif raw['tag'].split('/')[3] == 'return' and raw['tag'].split('/')[2] == jid:
yield raw['data']['return']
break
# Handle a findjob that might have been kicked off under the covers
elif raw['data']['fun'] == 'saltutil.findjob':
timeout_at = timeout_at + 10
continue
except (IndexError, KeyError):
continue
| [((28, 6, 28, 33), 'logging.getLogger', 'logging.getLogger', ({(28, 24, 28, 32): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((166, 8, 166, 23), 'time.sleep', 'time.sleep', ({(166, 19, 166, 22): '(0.1)'}, {}), '(0.1)', False, 'import time\n'), ((266, 18, 266, 48), 'salt.utils.event.tagify', 'tagify', (), '', False, 'from salt.utils.event import tagify\n'), ((341, 34, 341, 45), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((132, 51, 132, 78), 'salt.utils.event.tagify', 'tagify', ({(132, 58, 132, 70): "[jid, 'new']", (132, 72, 132, 77): '"""job"""'}, {}), "([jid, 'new'], 'job')", False, 'from salt.utils.event import tagify\n'), ((138, 22, 140, 13), 'multiprocessing.Process', 'multiprocessing.Process', (), '', False, 'import multiprocessing\n'), ((170, 76, 170, 117), 'salt.utils.event.tagify', 'tagify', ({(170, 83, 170, 106): "[data['jid'], 'return']", (170, 108, 170, 116): '"""runner"""'}, {}), "([data['jid'], 'return'], 'runner')", False, 'from salt.utils.event import tagify\n'), ((268, 20, 268, 31), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((340, 21, 340, 32), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((345, 12, 345, 27), 'time.sleep', 'time.sleep', ({(345, 23, 345, 26): '(0.1)'}, {}), '(0.1)', False, 'import time\n'), ((204, 16, 204, 43), 'salt.utils.error.raise_error', 'raise_error', ({}, {}), "(**ret['error'])", False, 'from salt.utils.error import raise_error\n'), ((112, 36, 112, 56), 'salt.ext.six.iteritems', 'six.iteritems', ({(112, 50, 112, 55): 'kwarg'}, {}), '(kwarg)', True, 'import salt.ext.six as six\n'), ((272, 19, 272, 30), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((313, 20, 313, 31), 'sys.exit', 'sys.exit', ({(313, 29, 313, 30): '(0)'}, {}), '(0)', False, 'import sys\n'), ((351, 32, 351, 43), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((359, 46, 359, 57), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((352, 32, 352, 43), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')] |
RivtLib/replit01 | .venv/lib/python3.8/site-packages/poetry/core/_vendor/lark/__pyinstaller/__init__.py | ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7 | # For usage of lark with PyInstaller. See https://pyinstaller-sample-hook.readthedocs.io/en/latest/index.html
import os
def get_hook_dirs():
return [os.path.dirname(__file__)] | [((6, 12, 6, 37), 'os.path.dirname', 'os.path.dirname', ({(6, 28, 6, 36): '__file__'}, {}), '(__file__)', False, 'import os\n')] |
s-gv/pong-keras | pong-pg.py | 38a0f25ae0e628f357512d085dc957720d83ece2 | # Copyright (c) 2019 Sagar Gubbi. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import numpy as np
import gym
import tensorflow as tf
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Input, Lambda, Dense, Conv2D, MaxPool2D, Flatten, BatchNormalization, Dropout
from tensorflow.keras.optimizers import RMSprop, Adam
import tensorflow.keras.backend as K
env = gym.make('PongDeterministic-v4')
UP_ACTION = 2
DOWN_ACTION = 3
ACTIONS = [UP_ACTION, DOWN_ACTION]
# Neural net model takes the state and outputs action and value for that state
model = Sequential([
Dense(512, activation='elu', input_shape=(2*6400,)),
Dense(len(ACTIONS), activation='softmax'),
])
model.compile(optimizer=RMSprop(1e-4), loss='sparse_categorical_crossentropy')
gamma = 0.99
# preprocess frames
def prepro(I):
""" prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector. http://karpathy.github.io/2016/05/31/rl/ """
if I is None: return np.zeros((6400,))
I = I[35:195] # crop
I = I[::2,::2,0] # downsample by factor of 2
I[I == 144] = 0 # erase background (background type 1)
I[I == 109] = 0 # erase background (background type 2)
I[I != 0] = 1 # everything else (paddles, ball) just set to 1
return I.astype(np.float).ravel()
def discount_rewards(r):
""" take 1D float array of rewards and compute discounted reward. http://karpathy.github.io/2016/05/31/rl/ """
discounted_r = np.zeros((len(r),))
running_add = 0
for t in reversed(range(0, len(r))):
if r[t] != 0: running_add = 0 # reset the sum, since this was a game boundary (pong specific!)
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
def train():
reward_sums = []
for ep in range(2000):
Xs, ys, rewards = [], [], []
prev_obs, obs = None, env.reset()
for t in range(99000):
x = np.hstack([prepro(obs), prepro(prev_obs)])
prev_obs = obs
action_probs = model.predict(x[None, :])
ya = np.random.choice(len(ACTIONS), p=action_probs[0])
action = ACTIONS[ya]
obs, reward, done, _ = env.step(action)
Xs.append(x)
ys.append(ya)
rewards.append(reward)
#if reward != 0: print(f'Episode {ep} -- step: {t}, ya: {ya}, reward: {reward}')
if done:
Xs = np.array(Xs)
ys = np.array(ys)
discounted_rewards = discount_rewards(rewards)
advantages = (discounted_rewards - discounted_rewards.mean()) / discounted_rewards.std()
print(f'adv: {np.min(advantages):.2f}, {np.max(advantages):.2f}')
model.fit(Xs, ys, sample_weight=advantages, epochs=1, batch_size=1024)
reward_sum = sum(rewards)
reward_sums.append(reward_sum)
avg_reward_sum = sum(reward_sums[-50:]) / len(reward_sums[-50:])
print(f'Episode {ep} -- reward_sum: {reward_sum}, avg_reward_sum: {avg_reward_sum}\n')
if ep % 20 == 0:
model.save_weights('params/model3.h5')
break
def test():
global env
env = gym.wrappers.Monitor(env, './tmp', video_callable=lambda ep_id: True, force=True)
model.load_weights('params/model3.h5')
reward_sum = 0
prev_obs, obs = None, env.reset()
for t in range(99000):
x = np.hstack([prepro(obs), prepro(prev_obs)])
prev_obs = obs
action_probs = model.predict(x[None, :])
#ya = np.argmax(action_probs[0])
ya = np.random.choice(len(ACTIONS), p=action_probs[0])
action = ACTIONS[ya]
obs, reward, done, _ = env.step(action)
reward_sum += reward
if reward != 0:
print(f't: {t} -- reward: {reward}')
if done:
print(f't: {t} -- reward_sum: {reward_sum}')
break
def main():
if len(sys.argv) >= 2 and sys.argv[1] == 'test':
test()
else:
train()
if __name__ == '__main__':
main()
| [((15, 6, 15, 38), 'gym.make', 'gym.make', ({(15, 15, 15, 37): '"""PongDeterministic-v4"""'}, {}), "('PongDeterministic-v4')", False, 'import gym\n'), ((94, 10, 94, 91), 'gym.wrappers.Monitor', 'gym.wrappers.Monitor', (), '', False, 'import gym\n'), ((23, 4, 23, 55), 'tensorflow.keras.layers.Dense', 'Dense', (), '', False, 'from tensorflow.keras.layers import Input, Lambda, Dense, Conv2D, MaxPool2D, Flatten, BatchNormalization, Dropout\n'), ((27, 24, 27, 37), 'tensorflow.keras.optimizers.RMSprop', 'RMSprop', ({(27, 32, 27, 36): '(0.0001)'}, {}), '(0.0001)', False, 'from tensorflow.keras.optimizers import RMSprop, Adam\n'), ((34, 25, 34, 42), 'numpy.zeros', 'np.zeros', ({(34, 34, 34, 41): '(6400,)'}, {}), '((6400,))', True, 'import numpy as np\n'), ((74, 21, 74, 33), 'numpy.array', 'np.array', ({(74, 30, 74, 32): 'Xs'}, {}), '(Xs)', True, 'import numpy as np\n'), ((75, 21, 75, 33), 'numpy.array', 'np.array', ({(75, 30, 75, 32): 'ys'}, {}), '(ys)', True, 'import numpy as np\n'), ((78, 23, 78, 41), 'numpy.min', 'np.min', ({(78, 30, 78, 40): 'advantages'}, {}), '(advantages)', True, 'import numpy as np\n'), ((78, 23, 78, 41), 'numpy.max', 'np.max', ({(78, 30, 78, 40): 'advantages'}, {}), '(advantages)', True, 'import numpy as np\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.