max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
lang/py/cookbook/v2/source/cb2_20_9_exm_1.py | ch1huizong/learning | 0 | 5300 | class Skidoo(object):
''' a mapping which claims to contain all keys, each with a value
of 23; item setting and deletion are no-ops; you can also call
an instance with arbitrary positional args, result is 23. '''
__metaclass__ = MetaInterfaceChecker
__implements__ = IMinimalMapping, ICallable
def __getitem__(self, key): return 23
def __setitem__(self, key, value): pass
def __delitem__(self, key): pass
def __contains__(self, key): return True
def __call__(self, *args): return 23
sk = Skidoo()
| 2.640625 | 3 |
face2anime/nb_utils.py | davidleonfdez/face2anime | 0 | 5301 | import importlib
__all__ = ['mount_gdrive']
def mount_gdrive() -> str:
"""Mount Google Drive storage of the current Google account and return the root path.
Functionality only available in Google Colab Enviroment; otherwise, it raises a RuntimeError.
"""
if (importlib.util.find_spec("google.colab") is None):
raise RuntimeError("Cannot mount Google Drive outside of Google Colab.")
from google.colab import drive
drive.mount('/content/gdrive', force_remount=True)
root_dir = "/content/gdrive/My Drive/"
return root_dir
| 3.125 | 3 |
wasch/tests.py | waschag-tvk/pywaschedv | 1 | 5302 | <reponame>waschag-tvk/pywaschedv
import datetime
from django.utils import timezone
from django.test import TestCase
from django.contrib.auth.models import (
User,
)
from wasch.models import (
Appointment,
WashUser,
WashParameters,
# not models:
AppointmentError,
StatusRights,
)
from wasch import tvkutils, payment
class WashUserTestCase(TestCase):
def test_god(self):
god, _ = WashUser.objects.get_or_create_god()
self.assertTrue(god.isActivated)
self.assertTrue(god.user.is_staff)
self.assertTrue(god.user.is_superuser)
group_names = (group.name for group in god.user.groups.all())
for expected_group in StatusRights(9).groups:
self.assertIn(expected_group, group_names)
class AppointmentTestCase(TestCase):
exampleUserName = 'waschexample'
examplePoorUserName = 'poor'
exampleTime = Appointment.manager.scheduled_appointment_times()[-1]
exampleTooOldTime = timezone.make_aware(datetime.datetime(1991, 12, 25))
exampleTooOldReference = 4481037
exampleMachine, exampleBrokenMachine, lastMachine = \
tvkutils.get_or_create_machines()[0]
def setUp(self):
tvkutils.setup()
self.exampleMachine.isAvailable = True # though this is default
self.exampleMachine.save()
self.exampleBrokenMachine.isAvailable = False
self.exampleMachine.save()
WashUser.objects.create_enduser(self.exampleUserName, isActivated=True)
WashUser.objects.create_enduser(
self.examplePoorUserName, isActivated=False)
def _createExample(self):
user = User.objects.get(username=self.exampleUserName)
return Appointment.objects.create(
time=self.exampleTime, machine=self.exampleMachine, user=user,
wasUsed=False)
def test_create(self):
result = self._createExample()
self.assertEqual(result.time, self.exampleTime)
self.assertEqual(result.machine, self.exampleMachine)
self.assertEqual(result.user.username, self.exampleUserName)
self.assertTrue(Appointment.manager.appointment_exists(
result.time, result.machine))
self.assertFalse(Appointment.manager.bookable(
result.time, result.machine, result.user))
self.assertEqual(
Appointment.manager.why_not_bookable(
result.time, result.machine, result.user),
41, # Appointment taken
)
result.cancel()
self.assertTrue(Appointment.manager.bookable(
result.time, result.machine, result.user))
def test_bookable(self):
user = User.objects.get(username=self.exampleUserName)
poorUser = User.objects.get(username=self.examplePoorUserName)
god, _ = WashUser.objects.get_or_create_god()
self.assertEqual(
Appointment.manager.why_not_bookable(
self.exampleTime, self.exampleMachine, poorUser),
31, # User not active
)
self.assertTrue(Appointment.manager.bookable(
self.exampleTime, self.exampleMachine, user))
self.assertTrue(Appointment.manager.bookable(
self.exampleTime, self.exampleMachine, god.user))
self.assertEqual(
Appointment.manager.why_not_bookable(
self.exampleTooOldTime, self.exampleMachine, user),
11, # Unsupported time
)
unsavedTooOldAppointment = Appointment.from_reference(
self.exampleTooOldReference, user)
self.assertEqual(self.exampleTooOldReference, Appointment(
time=self.exampleTooOldTime, machine=self.exampleMachine,
user=user).reference)
self.assertEqual(unsavedTooOldAppointment.time, self.exampleTooOldTime)
self.assertEqual(unsavedTooOldAppointment.machine, self.exampleMachine)
self.assertEqual(
unsavedTooOldAppointment.user.username, self.exampleUserName)
self.assertEqual(
unsavedTooOldAppointment.reference, self.exampleTooOldReference)
self.assertEqual(
Appointment.manager.why_not_bookable(
self.exampleTime, self.exampleBrokenMachine, user),
21, # Machine out of service
)
def test_make_appointment(self):
user = User.objects.get(username=self.exampleUserName)
god, _ = WashUser.objects.get_or_create_god()
appointment = Appointment.manager.make_appointment(
self.exampleTime, self.exampleMachine, user)
reference = appointment.reference
self.assertEqual(
Appointment.manager.why_not_bookable(
self.exampleTime, self.exampleMachine, god.user),
41, # Appointment taken
)
with self.assertRaises(AppointmentError) as ae:
Appointment.manager.make_appointment(
self.exampleTime, self.exampleMachine, user)
self.assertEqual(ae.exception.reason, 41)
appointment.cancel()
self.assertEqual(
appointment,
Appointment.manager.filter_for_reference(reference).get())
WashParameters.objects.update_value('bonus-method', 'empty')
self.assertTrue(Appointment.manager.bookable(
self.exampleTime, self.exampleMachine, user))
with self.assertRaises(payment.PaymentError):
Appointment.manager.make_appointment(
self.exampleTime, self.exampleMachine, user)
def test_use(self):
user = User.objects.get(username=self.exampleUserName)
appointment = Appointment.manager.make_appointment(
self.exampleTime, self.exampleMachine, user)
appointment.use()
with self.assertRaises(AppointmentError) as ae:
appointment.use()
self.assertEqual(ae.exception.reason, 61) # Appointment already used
with self.assertRaises(AppointmentError) as ae:
appointment.rebook()
self.assertEqual(ae.exception.reason, 41) # Appointment taken
with self.assertRaises(AppointmentError) as ae:
appointment.cancel()
self.assertEqual(ae.exception.reason, 61) # Appointment already used
self.assertTrue(appointment.wasUsed)
| 2.171875 | 2 |
Python/problem1150.py | 1050669722/LeetCode-Answers | 0 | 5303 | from typing import List
from collections import Counter
# class Solution:
# def isMajorityElement(self, nums: List[int], target: int) -> bool:
# d = Counter(nums)
# return d[target] > len(nums)//2
# class Solution:
# def isMajorityElement(self, nums: List[int], target: int) -> bool:
# ans = 0
# for num in nums:
# if num == target:
# ans += 1
# return ans > len(target)//2
class Solution:
def isMajorityElement(self, nums: List[int], target: int) -> bool:
if not nums:
return False
if len(nums) == 1:
return nums[0] == target
p, q = 0, len(nums)-1
while p < q:
if nums[p] > target:
return False
elif nums[p] < target:
p += 1
if nums[q] < target:
return False
elif nums[q] > target:
q -= 1
if nums[p] == nums[q] == target:
return q - p + 1 > len(nums)//2
| 3.734375 | 4 |
day17/module.py | arcadecoffee/advent-2021 | 0 | 5304 | <reponame>arcadecoffee/advent-2021<filename>day17/module.py
"""
Advent of Code 2021 - Day 17
https://adventofcode.com/2021/day/17
"""
import re
from math import ceil, sqrt
from typing import List, Tuple
DAY = 17
FULL_INPUT_FILE = f'../inputs/day{DAY:02d}/input.full.txt'
TEST_INPUT_FILE = f'../inputs/day{DAY:02d}/input.test.txt'
def load_data(infile_path: str) -> Tuple[int, int, int, int]:
regex = r'target area: x=(-?\d*)\.\.(-?\d*), y=(-?\d*)\.\.(-?\d*)'
with open(infile_path, 'r', encoding='ascii') as infile:
x1, x2, y1, y2 = [int(i) for i in re.match(regex, infile.readline()).groups()]
return x1, x2, y1, y2
def maximum_altitude(y: int) -> int:
return int(y * -1 * (y * -1 - 1) / 2)
def shot_good(x_velocity: int, y_velocity: int, x1: int, x2: int, y1: int, y2: int) -> bool:
x_position = y_position = 0
while x_position <= x2 and y_position >= y1:
if x_position >= x1 and y_position <= y2:
return True
x_position += x_velocity
y_position += y_velocity
x_velocity -= 1 if x_velocity > 0 else -1 if x_velocity < 0 else 0
y_velocity -= 1
return False
def count_good_shots(x1: int, x2: int, y1: int, y2: int) -> int:
x_min = ceil(sqrt(x1 * 8 + 1) / 2 - 1 / 2)
x_max = round(x2 / 2) + 1
y_min = y1
y_max = y1 * -1
arcing_good_shots = []
for x in range(x_min, x_max):
for y in range(y_min, y_max):
if shot_good(x, y, x1, x2, y1, y2):
arcing_good_shots.append((x, y))
direct_shot_count = (x2 + 1 - x1) * (y2 + 1 - y1)
return len(arcing_good_shots) + direct_shot_count
def part_1(infile_path: str) -> int:
target_area = load_data(infile_path)
return maximum_altitude(target_area[2])
def part_2(infile_path: str) -> int:
target_area = load_data(infile_path)
return count_good_shots(*target_area)
if __name__ == '__main__':
part1_answer = part_1(FULL_INPUT_FILE)
print(f'Part 1: {part1_answer}')
part2_answer = part_2(FULL_INPUT_FILE)
print(f'Part 2: {part2_answer}')
| 3.390625 | 3 |
src/main/python/depysible/domain/rete.py | stefano-bragaglia/DePYsible | 4 | 5305 | <reponame>stefano-bragaglia/DePYsible
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
Payload = Tuple[List['Literal'], 'Substitutions']
class Root:
def __init__(self):
self.children = set()
def notify(self, ground: 'Literal'):
for child in self.children:
child.notify(ground, {}, self)
class Alfa:
def __init__(self, pattern: 'Literal', parent: Root):
self.parent = parent
self.pattern = pattern
self.name = repr(pattern)
self.memory = []
self.children = set()
parent.children.add(self)
def notify(self, ground: 'Literal', subs: 'Substitutions', parent: Root):
subs = self.pattern.unifies(ground)
if subs is not None:
payload = ([ground], subs)
if payload not in self.memory:
self.memory.append(payload)
for child in self.children:
child.notify([ground], subs, self)
class Beta:
def __init__(self, parent_1: Union[Alfa, 'Beta'], parent_2: Alfa):
self.parent_1 = parent_1
self.parent_2 = parent_2
self.name = '%s, %s' % (parent_1.name, parent_2.name)
self.memory = []
self.children = set()
parent_1.children.add(self)
parent_2.children.add(self)
def notify(self, ground: List['Literal'], subs: 'Substitutions', parent: Union[Alfa, 'Beta']):
if parent is self.parent_1:
for ground_2, subs_2 in self.parent_2.memory:
self._notify(ground, subs, ground_2, subs_2)
elif parent is self.parent_2:
for ground_1, subs_1 in self.parent_1.memory:
self._notify(ground_1, subs_1, ground, subs)
@staticmethod
def _unifies(subs_1: 'Substitutions', subs_2: 'Substitutions') -> Optional['Substitutions']:
for var in set(subs_1).intersection(subs_2):
if subs_1[var] != subs_2[var]:
return None
return {**subs_1, **subs_2}
def _notify(self, ground_1: List['Literal'], subs_1: 'Substitutions', ground_2: List['Literal'],
subs_2: 'Substitutions'):
subs = self._unifies(subs_1, subs_2)
if subs is not None:
ground = [*ground_1, *ground_2]
payload = (ground, subs)
if payload not in self.memory:
self.memory.append(payload)
for child in self.children:
child.notify(ground, subs, self)
class Leaf:
def __init__(self, rule: 'Rule', parent: Union[Alfa, Beta], root: Root, agenda: List):
self.parent = parent
self.rule = rule
self.name = repr(rule)
self.memory = []
self.root = root
self.agenda = agenda
parent.children.add(self)
def notify(self, ground: List['Literal'], subs: 'Substitutions', parent: Union[Alfa, 'Beta']):
from depysible.domain.definitions import Rule
payload = (ground, subs)
if payload not in self.memory:
self.memory.append(payload)
lit = self.rule.head.substitutes(subs)
# if self.rule.type is RuleType.STRICT:
# fact = Rule(lit, self.rule.type, [])
# if fact not in self.agenda:
# self.agenda.append(fact)
rule = Rule(lit, self.rule.type, ground)
if rule not in self.agenda:
self.agenda.append(rule)
self.root.notify(lit)
def fire_rules(program: 'Program') -> List['Rule']:
if program.is_ground():
return program
rules = []
table = {}
root = Root()
for rule in program.rules:
if rule.is_fact():
rules.append(rule)
else:
beta = None
for lit in rule.body:
name = repr(lit)
alfa = table.setdefault(name, Alfa(lit, root))
if beta is None:
beta = alfa
else:
name = '%s, %s' % (beta.name, alfa.name)
beta = table.setdefault(name, Beta(beta, alfa))
Leaf(rule, beta, root, rules)
for fact in program.get_facts():
root.notify(fact.head)
return rules
| 2.71875 | 3 |
pythonbot_1.0/GameData.py | jeffreyzli/pokerbot-2017 | 1 | 5306 | import HandRankings as Hand
from deuces.deuces import Card, Evaluator
class GameData:
def __init__(self, name, opponent_name, stack_size, bb):
# match stats
self.name = name
self.opponent_name = opponent_name
self.starting_stack_size = int(stack_size)
self.num_hands = 0
self.num_wins = 0
self.num_flop = 0
self.big_blind = int(bb)
# self pre-flop stats
self.pfr = 0
self.vpip = 0
self.three_bet = 0
self.fold_big_bet = 0
# opponent pre-flop stats
self.opponent_pfr = 0
self.opponent_vpip = 0
self.opponent_three_bet = 0
self.opponent_fold_pfr = 0
self.opponent_fold_three_bet = 0
# self post-flop stats
self.aggression_factor = False
self.showdown = 0
self.c_bet = 0
self.showdown_win = 0
self.double_barrel = 0
self.discarded_card = None
# opponent post-flop stats
self.opponent_c_bet = 0
self.opponent_fold_c_bet = 0
self.opponent_double_barrel = 0
# current hand stats
self.button = True
self.current_pot_size = 0
self.current_hand = []
self.current_hand_strength = 0.0
self.hand_class = ''
self.hand_score = 0
self.current_game_state = ''
self.board_cards = []
self.last_actions = []
self.current_legal_actions = []
self.has_called = False
self.opponent_has_called = False
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.street_dict = {'0': 0, '3': 0, '4': 0, '5': 0}
self.discard = False
self.has_five_bet = False
self.has_bet_aggressively = False
self.time_bank = 0.0
self.opc = 0
def new_hand(self, data_list):
self.num_hands += 1
self.button = data_list[2]
if "true" in self.button:
self.button = True
else:
self.button = False
self.current_hand = [data_list[3], data_list[4]]
self.current_hand_strength = Hand.hand_win_odds(self.current_hand)
self.current_game_state = 'PREFLOP'
self.board_cards = []
self.last_actions = []
self.current_legal_actions = []
self.street_dict = {'0': 0, '3': 0, '4': 0, '5': 0}
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.has_bet_aggressively = False
self.aggression_factor = False
self.discarded_card = None
def get_action(self, data_list):
self.current_pot_size = int(data_list[1])
self.opc = self.starting_stack_size - self.current_pot_size
self.time_bank = float(data_list[-1])
num_board_cards = int(data_list[2])
self.street_dict[str(num_board_cards)] += 1
if self.current_game_state == 'PREFLOP':
if self.street_dict['3'] > 0 and self.street_dict['4'] == 0:
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.has_bet_aggressively = False
self.current_game_state = 'FLOPTURN'
self.num_flop += 1
elif self.current_game_state == 'FLOPTURN':
if self.street_dict['4'] > 0 and self.street_dict['5'] == 0:
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.has_bet_aggressively = False
self.current_game_state = 'TURNRIVER'
elif self.current_game_state == 'TURNRIVER':
if self.street_dict['5'] > 0:
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.has_bet_aggressively = False
self.current_game_state = 'POSTRIVER'
for i in range(num_board_cards):
board_card = data_list[3 + i]
if board_card not in self.board_cards:
self.board_cards.append(data_list[3 + i])
if num_board_cards > 0:
board_cards = []
for board_card in self.board_cards:
board_cards.append(Card.new(board_card))
hand = []
for card in self.current_hand:
hand.append(Card.new(card))
self.hand_score = Evaluator().evaluate(hand, board_cards)
self.hand_class = Evaluator().class_to_string(Evaluator().get_rank_class(self.hand_score))
index = 3 + num_board_cards
num_last_actions = int(data_list[index])
index += 1
current_last_actions = []
for i in range(num_last_actions):
current_last_actions.append(data_list[index + i])
self.last_actions.append(current_last_actions)
if self.discard:
for action in current_last_actions:
if 'DISCARD' in action and self.name in action:
old_card = action[8:10]
new_card = action[11:13]
self.current_hand[self.current_hand.index(old_card)] = new_card
self.current_hand_strength = Hand.hand_win_odds(self.current_hand)
self.discard = False
break
if self.current_game_state == 'PREFLOP':
if self.current_pot_size == 4:
if self.button:
self.vpip += 1
self.has_called = True
else:
self.opponent_vpip += 1
self.opponent_has_called = True
else:
for action in current_last_actions:
if 'RAISE' in action:
round_num = self.street_dict['0']
if round_num == 1:
self.opponent_pfr += 1
self.opponent_vpip += 1
self.opponent_has_two_bet = True
elif round_num == 2:
if self.button:
if self.name in action:
self.pfr += 1
self.vpip += 1
self.has_two_bet = True
else:
self.opponent_pfr += 1
self.opponent_vpip += 1
self.opponent_has_three_bet = True
else:
if self.name in action:
self.pfr += 1
self.vpip += 1
self.has_three_bet = True
else:
self.opponent_pfr += 1
self.opponent_vpip += 1
self.opponent_has_four_bet = True
elif round_num == 3:
if self.name in action:
self.pfr += 1
self.vpip += 1
elif 'CALL' in action:
if self.name in action:
self.vpip += 1
else:
self.opponent_vpip += 1
elif self.current_game_state == 'FLOPTURN':
round_num = self.street_dict['3']
if round_num == 1:
self.discard = True
elif round_num == 2:
for action in current_last_actions:
if 'BET' in action:
self.opponent_c_bet += 1
break
elif round_num == 3:
for action in current_last_actions:
if 'BET' in action:
if self.name in action:
self.c_bet += 1
else:
self.opponent_c_bet += 1
elif 'RAISE' in action:
if self.name in action:
self.has_two_bet = True
else:
if self.button:
self.opponent_has_three_bet = True
else:
self.opponent_has_two_bet = True
elif round_num == 4:
for action in current_last_actions:
if 'RAISE' in action:
if self.name in action:
if self.button:
self.has_four_bet = True
else:
self.has_three_bet = True
break
elif self.current_game_state == 'TURNRIVER':
round_num = self.street_dict['4']
if round_num == 1:
self.discard = True
for action in current_last_actions:
if 'BET' in action:
if self.name in action:
self.c_bet += 1
else:
self.opponent_c_bet += 1
break
elif round_num == 2:
for action in current_last_actions:
if 'BET' in action:
self.opponent_c_bet += 1
break
elif round_num == 3:
for action in current_last_actions:
if 'BET' in action:
if self.name in action:
self.c_bet += 1
else:
self.opponent_c_bet += 1
elif 'RAISE' in action:
if self.name in action:
self.has_two_bet = True
else:
if self.button:
self.opponent_has_three_bet = True
else:
self.opponent_has_two_bet = True
elif round_num == 4:
for action in current_last_actions:
if 'RAISE' in action:
if self.name in action:
if self.button:
self.has_four_bet = True
else:
self.has_three_bet = True
break
elif self.current_game_state == 'POSTRIVER':
round_num = self.street_dict['5']
if round_num == 1:
for action in current_last_actions:
if 'BET' in action:
if self.name in action:
self.double_barrel += 1
else:
self.opponent_double_barrel += 1
break
index += num_last_actions
num_legal_actions = int(data_list[index])
index += 1
self.current_legal_actions = []
for i in range(num_legal_actions):
self.current_legal_actions.append(data_list[index + i])
def legal_action(self, action):
for legal_action in self.current_legal_actions:
if action in legal_action:
if action == 'BET' or action == 'RAISE':
index = legal_action.index(':') + 1
sub = legal_action[index:]
index = sub.index(':')
return [int(sub[:index]), int(sub[index+1:])]
if action == 'CALL':
for last_action in self.last_actions[-1]:
if 'RAISE' in last_action and self.opponent_name in last_action:
sub = last_action[last_action.index(':')+1:]
return int(sub[:sub.index(':')])
return True
return None
def hand_over(self, data_list):
num_board_cards = data_list[3]
index = 4+num_board_cards
num_last_actions = data_list[index]
current_last_actions = []
for i in range(num_last_actions):
current_last_actions.append(data_list[index+i])
if self.current_game_state == 'PREFLOP':
for action in current_last_actions:
if 'FOLD' in action and self.opponent_name in action:
if self.button:
for last_action in self.last_actions[-1]:
if 'RAISE' in last_action and self.name in last_action:
self.opponent_fold_pfr += 1
if self.has_three_bet and not self.has_four_bet:
self.opponent_fold_three_bet += 1
self.num_wins += 1
else:
for last_action in current_last_actions:
if 'RAISE' in last_action and self.name in last_action:
self.opponent_fold_pfr += 1
if self.has_three_bet and not self.has_four_bet:
self.opponent_fold_three_bet += 1
self.num_wins += 1
elif self.current_game_state == 'FLOPTURN':
for action in current_last_actions:
if self.button:
if 'FOLD' in action and self.opponent_name in action:
for last_action in self.last_actions[-1]:
if 'BET' in last_action and self.name in last_action:
self.opponent_fold_c_bet += 1
self.num_wins += 1
else:
if 'FOLD' in action and self.opponent_name in action:
for last_action in current_last_actions:
if 'BET' in last_action and self.name in last_action:
self.opponent_fold_c_bet += 1
self.num_wins += 1
elif self.current_game_state == 'POSTRIVER':
for action in current_last_actions:
if 'WIN' in action:
if self.name in action:
self.num_wins += 1
for last_action in current_last_actions:
if 'SHOW' in last_action:
self.showdown += 1
self.showdown_win += 1
break
break
| 2.84375 | 3 |
todo/models.py | zyayoung/share-todo | 0 | 5307 | from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
class Todo(models.Model):
time_add = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=64)
detail = models.TextField(blank=True)
deadline = models.DateTimeField(blank=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
done = models.BooleanField(default=False)
def __str__(self):
return self.title
def seconds_left(self):
return (self.deadline - timezone.now()).total_seconds()
def state(self):
if self.done:
return 'Done'
elif self.seconds_left() > 0:
return 'Todo'
else:
return 'Exceeded'
class Meta:
ordering = ['deadline']
| 2.265625 | 2 |
examples/Tutorial/Example/app.py | DrewLazzeriKitware/trame | 0 | 5308 | import os
from trame import change, update_state
from trame.layouts import SinglePageWithDrawer
from trame.html import vtk, vuetify, widgets
from vtkmodules.vtkCommonDataModel import vtkDataObject
from vtkmodules.vtkFiltersCore import vtkContourFilter
from vtkmodules.vtkIOXML import vtkXMLUnstructuredGridReader
from vtkmodules.vtkRenderingAnnotation import vtkCubeAxesActor
from vtkmodules.vtkRenderingCore import (
vtkActor,
vtkDataSetMapper,
vtkRenderer,
vtkRenderWindow,
vtkRenderWindowInteractor,
)
# Required for interacter factory initialization
from vtkmodules.vtkInteractionStyle import vtkInteractorStyleSwitch # noqa
# Required for remote rendering factory initialization, not necessary for
# local rendering, but doesn't hurt to include it
import vtkmodules.vtkRenderingOpenGL2 # noqa
CURRENT_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
# -----------------------------------------------------------------------------
# Constants
# -----------------------------------------------------------------------------
class Representation:
Points = 0
Wireframe = 1
Surface = 2
SurfaceWithEdges = 3
class LookupTable:
Rainbow = 0
Inverted_Rainbow = 1
Greyscale = 2
Inverted_Greyscale = 3
# -----------------------------------------------------------------------------
# VTK pipeline
# -----------------------------------------------------------------------------
renderer = vtkRenderer()
renderWindow = vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera()
# Read Data
reader = vtkXMLUnstructuredGridReader()
reader.SetFileName(os.path.join(CURRENT_DIRECTORY, "../data/disk_out_ref.vtu"))
reader.Update()
# Extract Array/Field information
dataset_arrays = []
fields = [
(reader.GetOutput().GetPointData(), vtkDataObject.FIELD_ASSOCIATION_POINTS),
(reader.GetOutput().GetCellData(), vtkDataObject.FIELD_ASSOCIATION_CELLS),
]
for field in fields:
field_arrays, association = field
for i in range(field_arrays.GetNumberOfArrays()):
array = field_arrays.GetArray(i)
array_range = array.GetRange()
dataset_arrays.append(
{
"text": array.GetName(),
"value": i,
"range": list(array_range),
"type": association,
}
)
default_array = dataset_arrays[0]
default_min, default_max = default_array.get("range")
# Mesh
mesh_mapper = vtkDataSetMapper()
mesh_mapper.SetInputConnection(reader.GetOutputPort())
mesh_actor = vtkActor()
mesh_actor.SetMapper(mesh_mapper)
renderer.AddActor(mesh_actor)
# Mesh: Setup default representation to surface
mesh_actor.GetProperty().SetRepresentationToSurface()
mesh_actor.GetProperty().SetPointSize(1)
mesh_actor.GetProperty().EdgeVisibilityOff()
# Mesh: Apply rainbow color map
mesh_lut = mesh_mapper.GetLookupTable()
mesh_lut.SetHueRange(0.666, 0.0)
mesh_lut.SetSaturationRange(1.0, 1.0)
mesh_lut.SetValueRange(1.0, 1.0)
mesh_lut.Build()
# Mesh: Color by default array
mesh_mapper.SelectColorArray(default_array.get("text"))
mesh_mapper.GetLookupTable().SetRange(default_min, default_max)
if default_array.get("type") == vtkDataObject.FIELD_ASSOCIATION_POINTS:
mesh_mapper.SetScalarModeToUsePointFieldData()
else:
mesh_mapper.SetScalarModeToUseCellFieldData()
mesh_mapper.SetScalarVisibility(True)
mesh_mapper.SetUseLookupTableScalarRange(True)
# Contour
contour = vtkContourFilter()
contour.SetInputConnection(reader.GetOutputPort())
contour_mapper = vtkDataSetMapper()
contour_mapper.SetInputConnection(contour.GetOutputPort())
contour_actor = vtkActor()
contour_actor.SetMapper(contour_mapper)
renderer.AddActor(contour_actor)
# Contour: ContourBy default array
contour_value = 0.5 * (default_max + default_min)
contour.SetInputArrayToProcess(
0, 0, 0, default_array.get("type"), default_array.get("text")
)
contour.SetValue(0, contour_value)
# Contour: Setup default representation to surface
contour_actor.GetProperty().SetRepresentationToSurface()
contour_actor.GetProperty().SetPointSize(1)
contour_actor.GetProperty().EdgeVisibilityOff()
# Contour: Apply rainbow color map
contour_lut = contour_mapper.GetLookupTable()
contour_lut.SetHueRange(0.666, 0.0)
contour_lut.SetSaturationRange(1.0, 1.0)
contour_lut.SetValueRange(1.0, 1.0)
contour_lut.Build()
# Contour: Color by default array
contour_mapper.GetLookupTable().SetRange(default_min, default_max)
contour_mapper.SelectColorArray(default_array.get("text"))
if default_array.get("type") == vtkDataObject.FIELD_ASSOCIATION_POINTS:
contour_mapper.SetScalarModeToUsePointFieldData()
else:
contour_mapper.SetScalarModeToUseCellFieldData()
contour_mapper.SetScalarVisibility(True)
contour_mapper.SetUseLookupTableScalarRange(True)
# Cube Axes
cube_axes = vtkCubeAxesActor()
renderer.AddActor(cube_axes)
# Cube Axes: Boundaries, camera, and styling
cube_axes.SetBounds(mesh_actor.GetBounds())
cube_axes.SetCamera(renderer.GetActiveCamera())
cube_axes.SetXLabelFormat("%6.1f")
cube_axes.SetYLabelFormat("%6.1f")
cube_axes.SetZLabelFormat("%6.1f")
cube_axes.SetFlyModeToOuterEdges()
renderer.ResetCamera()
# -----------------------------------------------------------------------------
# trame Views
# -----------------------------------------------------------------------------
local_view = vtk.VtkLocalView(renderWindow)
remote_view = vtk.VtkRemoteView(renderWindow, interactive_ratio=(1,))
html_view = local_view
# -----------------------------------------------------------------------------
# Callbacks
# -----------------------------------------------------------------------------
def update_view(**kwargs):
html_view.update()
# -----------------------------------------------------------------------------
# Toolbar Callbacks
# -----------------------------------------------------------------------------
@change("cube_axes_visibility")
def update_cube_axes_visibility(cube_axes_visibility, **kwargs):
cube_axes.SetVisibility(cube_axes_visibility)
update_view()
@change("local_vs_remote")
def update_local_vs_remote(local_vs_remote, **kwargs):
# Switch html_view
global html_view
if local_vs_remote:
html_view = local_view
else:
html_view = remote_view
# Update layout
layout.content.children[0].children[0] = html_view
layout.flush_content()
# Update View
update_view()
# -----------------------------------------------------------------------------
# Representation Callbacks
# -----------------------------------------------------------------------------
def update_representation(actor, mode):
property = actor.GetProperty()
if mode == Representation.Points:
property.SetRepresentationToPoints()
property.SetPointSize(5)
property.EdgeVisibilityOff()
elif mode == Representation.Wireframe:
property.SetRepresentationToWireframe()
property.SetPointSize(1)
property.EdgeVisibilityOff()
elif mode == Representation.Surface:
property.SetRepresentationToSurface()
property.SetPointSize(1)
property.EdgeVisibilityOff()
elif mode == Representation.SurfaceWithEdges:
property.SetRepresentationToSurface()
property.SetPointSize(1)
property.EdgeVisibilityOn()
@change("mesh_representation")
def update_mesh_representation(mesh_representation, **kwargs):
update_representation(mesh_actor, mesh_representation)
update_view()
@change("contour_representation")
def update_contour_representation(contour_representation, **kwargs):
update_representation(contour_actor, contour_representation)
update_view()
# -----------------------------------------------------------------------------
# ColorBy Callbacks
# -----------------------------------------------------------------------------
def color_by_array(actor, array):
_min, _max = array.get("range")
mapper = actor.GetMapper()
mapper.SelectColorArray(array.get("text"))
mapper.GetLookupTable().SetRange(_min, _max)
if array.get("type") == vtkDataObject.FIELD_ASSOCIATION_POINTS:
mesh_mapper.SetScalarModeToUsePointFieldData()
else:
mesh_mapper.SetScalarModeToUseCellFieldData()
mapper.SetScalarModeToUsePointFieldData()
mapper.SetScalarVisibility(True)
mapper.SetUseLookupTableScalarRange(True)
@change("mesh_color_array_idx")
def update_mesh_color_by_name(mesh_color_array_idx, **kwargs):
array = dataset_arrays[mesh_color_array_idx]
color_by_array(mesh_actor, array)
update_view()
@change("contour_color_array_idx")
def update_contour_color_by_name(contour_color_array_idx, **kwargs):
array = dataset_arrays[contour_color_array_idx]
color_by_array(contour_actor, array)
update_view()
# -----------------------------------------------------------------------------
# ColorMap Callbacks
# -----------------------------------------------------------------------------
def use_preset(actor, preset):
lut = actor.GetMapper().GetLookupTable()
if preset == LookupTable.Rainbow:
lut.SetHueRange(0.666, 0.0)
lut.SetSaturationRange(1.0, 1.0)
lut.SetValueRange(1.0, 1.0)
elif preset == LookupTable.Inverted_Rainbow:
lut.SetHueRange(0.0, 0.666)
lut.SetSaturationRange(1.0, 1.0)
lut.SetValueRange(1.0, 1.0)
elif preset == LookupTable.Greyscale:
lut.SetHueRange(0.0, 0.0)
lut.SetSaturationRange(0.0, 0.0)
lut.SetValueRange(0.0, 1.0)
elif preset == LookupTable.Inverted_Greyscale:
lut.SetHueRange(0.0, 0.666)
lut.SetSaturationRange(0.0, 0.0)
lut.SetValueRange(1.0, 0.0)
lut.Build()
@change("mesh_color_preset")
def update_mesh_color_preset(mesh_color_preset, **kwargs):
use_preset(mesh_actor, mesh_color_preset)
update_view()
@change("contour_color_preset")
def update_contour_color_preset(contour_color_preset, **kwargs):
use_preset(contour_actor, contour_color_preset)
update_view()
# -----------------------------------------------------------------------------
# Opacity Callbacks
# -----------------------------------------------------------------------------
@change("mesh_opacity")
def update_mesh_opacity(mesh_opacity, **kwargs):
mesh_actor.GetProperty().SetOpacity(mesh_opacity)
update_view()
@change("contour_opacity")
def update_contour_opacity(contour_opacity, **kwargs):
contour_actor.GetProperty().SetOpacity(contour_opacity)
update_view()
# -----------------------------------------------------------------------------
# Contour Callbacks
# -----------------------------------------------------------------------------
@change("contour_by_array_idx")
def update_contour_by(contour_by_array_idx, **kwargs):
array = dataset_arrays[contour_by_array_idx]
contour_min, contour_max = array.get("range")
contour_step = 0.01 * (contour_max - contour_min)
contour_value = 0.5 * (contour_max + contour_min)
contour.SetInputArrayToProcess(0, 0, 0, array.get("type"), array.get("text"))
contour.SetValue(0, contour_value)
# Update UI
update_state("contour_min", contour_min)
update_state("contour_max", contour_max)
update_state("contour_value", contour_value)
update_state("contour_step", contour_step)
# Update View
update_view()
@change("contour_value")
def update_contour_value(contour_value, **kwargs):
contour.SetValue(0, float(contour_value))
update_view()
# -----------------------------------------------------------------------------
# Pipeline Widget Callbacks
# -----------------------------------------------------------------------------
# Selection Change
def actives_change(ids):
_id = ids[0]
if _id == "1": # Mesh
update_state("active_ui", "mesh")
elif _id == "2": # Contour
update_state("active_ui", "contour")
else:
update_state("active_ui", "nothing")
# Visibility Change
def visibility_change(event):
_id = event["id"]
_visibility = event["visible"]
if _id == "1": # Mesh
mesh_actor.SetVisibility(_visibility)
elif _id == "2": # Contour
contour_actor.SetVisibility(_visibility)
update_view()
# -----------------------------------------------------------------------------
# GUI Toolbar Buttons
# -----------------------------------------------------------------------------
def standard_buttons():
vuetify.VCheckbox(
v_model=("cube_axes_visibility", True),
on_icon="mdi-cube-outline",
off_icon="mdi-cube-off-outline",
classes="mx-1",
hide_details=True,
dense=True,
)
vuetify.VCheckbox(
v_model="$vuetify.theme.dark",
on_icon="mdi-lightbulb-off-outline",
off_icon="mdi-lightbulb-outline",
classes="mx-1",
hide_details=True,
dense=True,
)
vuetify.VCheckbox(
v_model=("local_vs_remote", True),
on_icon="mdi-lan-disconnect",
off_icon="mdi-lan-connect",
classes="mx-1",
hide_details=True,
dense=True,
)
with vuetify.VBtn(icon=True, click="$refs.view.resetCamera()"):
vuetify.VIcon("mdi-crop-free")
# -----------------------------------------------------------------------------
# GUI Pipelines Widget
# -----------------------------------------------------------------------------
def pipeline_widget():
widgets.GitTree(
sources=(
"pipeline",
[
{"id": "1", "parent": "0", "visible": 1, "name": "Mesh"},
{"id": "2", "parent": "1", "visible": 1, "name": "Contour"},
],
),
actives_change=(actives_change, "[$event]"),
visibility_change=(visibility_change, "[$event]"),
)
# -----------------------------------------------------------------------------
# GUI Cards
# -----------------------------------------------------------------------------
def ui_card(title, ui_name):
with vuetify.VCard(v_show=f"active_ui == '{ui_name}'"):
vuetify.VCardTitle(
title,
classes="grey lighten-1 py-1 grey--text text--darken-3",
style="user-select: none; cursor: pointer",
hide_details=True,
dense=True,
)
content = vuetify.VCardText(classes="py-2")
return content
def mesh_card():
with ui_card(title="Mesh", ui_name="mesh"):
vuetify.VSelect(
v_model=("mesh_representation", Representation.Surface),
items=(
"representations",
[
{"text": "Points", "value": 0},
{"text": "Wireframe", "value": 1},
{"text": "Surface", "value": 2},
{"text": "SurfaceWithEdges", "value": 3},
],
),
label="Representation",
hide_details=True,
dense=True,
outlined=True,
classes="pt-1",
)
with vuetify.VRow(classes="pt-2", dense=True):
with vuetify.VCol(cols="6"):
vuetify.VSelect(
label="Color by",
v_model=("mesh_color_array_idx", 0),
items=("array_list", dataset_arrays),
hide_details=True,
dense=True,
outlined=True,
classes="pt-1",
)
with vuetify.VCol(cols="6"):
vuetify.VSelect(
label="Colormap",
v_model=("mesh_color_preset", LookupTable.Rainbow),
items=(
"colormaps",
[
{"text": "Rainbow", "value": 0},
{"text": "Inv Rainbow", "value": 1},
{"text": "Greyscale", "value": 2},
{"text": "Inv Greyscale", "value": 3},
],
),
hide_details=True,
dense=True,
outlined=True,
classes="pt-1",
)
vuetify.VSlider(
v_model=("mesh_opacity", 1.0),
min=0,
max=1,
step=0.1,
label="Opacity",
classes="mt-1",
hide_details=True,
dense=True,
)
def contour_card():
with ui_card(title="Contour", ui_name="contour"):
vuetify.VSelect(
label="Contour by",
v_model=("contour_by_array_idx", 0),
items=("array_list", dataset_arrays),
hide_details=True,
dense=True,
outlined=True,
classes="pt-1",
)
vuetify.VSlider(
v_model=("contour_value", contour_value),
min=("contour_min", default_min),
max=("contour_max", default_max),
step=("contour_step", 0.01 * (default_max - default_min)),
label="Value",
classes="my-1",
hide_details=True,
dense=True,
)
vuetify.VSelect(
v_model=("contour_representation", Representation.Surface),
items=(
"representations",
[
{"text": "Points", "value": 0},
{"text": "Wireframe", "value": 1},
{"text": "Surface", "value": 2},
{"text": "SurfaceWithEdges", "value": 3},
],
),
label="Representation",
hide_details=True,
dense=True,
outlined=True,
classes="pt-1",
)
with vuetify.VRow(classes="pt-2", dense=True):
with vuetify.VCol(cols="6"):
vuetify.VSelect(
label="Color by",
v_model=("contour_color_array_idx", 0),
items=("array_list", dataset_arrays),
hide_details=True,
dense=True,
outlined=True,
classes="pt-1",
)
with vuetify.VCol(cols="6"):
vuetify.VSelect(
label="Colormap",
v_model=("contour_color_preset", LookupTable.Rainbow),
items=(
"colormaps",
[
{"text": "Rainbow", "value": 0},
{"text": "Inv Rainbow", "value": 1},
{"text": "Greyscale", "value": 2},
{"text": "Inv Greyscale", "value": 3},
],
),
hide_details=True,
dense=True,
outlined=True,
classes="pt-1",
)
vuetify.VSlider(
v_model=("contour_opacity", 1.0),
min=0,
max=1,
step=0.1,
label="Opacity",
classes="mt-1",
hide_details=True,
dense=True,
)
# -----------------------------------------------------------------------------
# GUI
# -----------------------------------------------------------------------------
layout = SinglePageWithDrawer("Viewer", on_ready=update_view)
layout.title.set_text("Viewer")
with layout.toolbar:
# toolbar components
vuetify.VSpacer()
vuetify.VDivider(vertical=True, classes="mx-2")
standard_buttons()
with layout.drawer as drawer:
# drawer components
drawer.width = 325
pipeline_widget()
vuetify.VDivider(classes="mb-2")
mesh_card()
contour_card()
with layout.content:
# content components
vuetify.VContainer(
fluid=True,
classes="pa-0 fill-height",
children=[html_view],
)
# State use to track active ui card
layout.state = {
"active_ui": None,
}
# -----------------------------------------------------------------------------
# Main
# -----------------------------------------------------------------------------
if __name__ == "__main__":
layout.start()
| 1.59375 | 2 |
webots_ros2_tutorials/webots_ros2_tutorials/master.py | AleBurzio11/webots_ros2 | 1 | 5309 | # Copyright 1996-2021 Soft_illusion.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rclpy
from rclpy.node import Node
from std_msgs.msg import Float64
from geometry_msgs.msg import Twist
class LineFollower(Node):
def __init__(self):
super().__init__('linefollower_cmdvel')
# Subscribe Infra Red sensors
self.subs_right_ir = self.create_subscription(
Float64, 'right_IR', self.right_infrared_callback, 1)
self.subs_left_ir = self.create_subscription(
Float64, 'left_IR', self.left_infrared_callback, 1)
self.subs_mid_ir = self.create_subscription(
Float64, 'mid_IR', self.mid_infrared_callback, 1)
# Publish cmd vel
self.pubs_cmdvel = self.create_publisher(Twist, 'cmd_vel', 1)
# vehicle parameters
self.speed = 0.2
self.angle_correction = 0.01
# Initialize parameters
self.ground_right, self.ground_mid, self.ground_left = 0, 0, 0
self.delta = 0
self.cmd = Twist()
self.stop = False
self.count = 0
self.count_threshold = 10
def lineFollowingModule(self):
# Constant velocity
self.cmd.linear.x = self.speed
# Correction parameters
self.delta = self.ground_right - self.ground_left
self.cmd.angular.z = self.angle_correction*self.delta
# Logic for stop if black line not seen .
if self.ground_right > 500 and self.ground_left > 500 and self.ground_mid > 500:
self.count += 1
else:
self.count = 0
if self.count > self.count_threshold:
self.stop = True
if self.stop:
self.cmd.linear.x = 0.0
self.cmd.angular.z = 0.0
# Publish cmd vel
self.pubs_cmdvel.publish(self.cmd)
self.stop = False
# Call backs to update sensor reading variables
def right_infrared_callback(self, msg):
self.ground_right = msg.data
self.lineFollowingModule()
def left_infrared_callback(self, msg):
self.ground_left = msg.data
def mid_infrared_callback(self, msg):
self.ground_mid = msg.data
def main(args=None):
rclpy.init(args=args)
ls = LineFollower()
rclpy.spin(ls)
ls.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| 2.046875 | 2 |
dev-template/src/mysql_connect_sample.py | arrowkato/pytest-CircleiCI | 0 | 5310 | import mysql.connector
from mysql.connector import errorcode
config = {
'user': 'user',
'password': 'password',
'host': 'mysql_container',
'database': 'sample_db',
'port': '3306',
}
if __name__ == "__main__":
try:
conn = mysql.connector.connect(**config)
cursor = conn.cursor()
cursor.execute('select * from users')
for row in cursor.fetchall():
print("name:" + str(row[0]) + "" + "time_zone_id" + str(row[1]))
conn.close()
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
conn.close()
| 2.96875 | 3 |
Mundo 1/ex011.py | viniciusbonito/CeV-Python-Exercicios | 0 | 5311 | # criar um programa que pergunte as dimensões de uma parede, calcule sua área e informe quantos litros de tinta
# seriam necessários para a pintura, após perguntar o rendimento da tinta informado na lata
print('=' * 40)
print('{:^40}'.format('Assistente de pintura'))
print('=' * 40)
altura = float(input('Informe a altura da parede em metros: '))
largura = float(input('Informe a largura da parede em metros: '))
area = altura * largura
print('\nA área total da parede é de {:.2f}m²'.format(area))
litros = float(input('\nQuantos litros contém a lata de tinta escolhida? '))
rendlata = float(input('Qual o rendimento em metros informado na lata? '))
rendlitro = rendlata / litros
print('\nSe a lata possui {:.2f}L e rende {:.2f}m²'.format(litros, rendlata))
print('então o rendimento por litro é de {:.2f}m²'.format(rendlitro))
print('\nSerão necessário {:.2f}L para pintar toda a parede'.format(area / rendlitro)) | 4.03125 | 4 |
Python/Mundo 3/ex088.py | henrique-tavares/Coisas | 1 | 5312 | from random import sample
from time import sleep
jogos = list()
print('-' * 20)
print(f'{"MEGA SENA":^20}')
print('-' * 20)
while True:
n = int(input("\nQuatos jogos você quer que eu sorteie? "))
if (n > 0):
break
print('\n[ERRO] Valor fora do intervalo')
print()
print('-=' * 3, end=' ')
print(f'SORTEANDO {n} JOGOS', end=' ')
print('-=' * 3)
for i in range(n):
jogos.append(sample(range(1,61), 6))
sleep(0.6)
print(f'Jogo {i+1}: {jogos[i]}')
print('-=' * 5, end=' ')
print('< BOA SORTE >', end=' ')
print('-=' * 3, end='\n\n') | 3.5 | 4 |
tests/test_utils.py | django-roles-access/master | 5 | 5313 | from importlib import import_module
from unittest import TestCase as UnitTestCase
from django.contrib.auth.models import Group
from django.core.management import BaseCommand
from django.conf import settings
from django.test import TestCase
from django.views.generic import TemplateView
try:
from unittest.mock import Mock, patch, MagicMock
except:
from mock import Mock, patch
from django_roles_access.decorator import access_by_role
from django_roles_access.mixin import RolesMixin
from django_roles_access.models import ViewAccess
from tests import views
from django_roles_access.utils import (walk_site_url, get_views_by_app,
view_access_analyzer,
get_view_analyze_report,
check_django_roles_is_used,
analyze_by_role, APP_NAME_FOR_NONE,
NOT_SECURED_DEFAULT, SECURED_DEFAULT,
PUBLIC_DEFAULT, NONE_TYPE_DEFAULT,
DISABLED_DEFAULT, OutputReport)
class MockRegex:
def __init__(self):
self.pattern = '^fake-regex-pattern/$'
class MockRegexResolver:
def __init__(self):
self.pattern = '^fake-resolver/'
class MockRegexResolverNested:
def __init__(self):
self.pattern = '^fake-nested-resolver/'
class MockPattern:
def __init__(self):
self.regex = MockRegex()
self.callback = 'fake-callback'
self.name = 'fake-view-name'
class MockResolver:
def __init__(self):
self.url_patterns = [MockPattern()]
self.regex = MockRegexResolver()
self.app_name = 'fake-app-name'
self.namespace = 'fake-namespace'
class MockResolverNested:
def __init__(self):
self.url_patterns = [MockResolver()]
self.regex = MockRegexResolverNested()
self.app_name = 'fake-app-name'
self.namespace = 'nested-namespace'
class MockPatternDjango2:
def __init__(self):
self.pattern = '^fake-pattern/'
self.callback = 'fake-callback'
self.name = 'fake-view-name'
class MockPatternDjango2None:
def __init__(self):
self.pattern = '^fake-pattern/'
self.callback = 'fake-callback'
self.name = 'fake-view-none'
class MockResolverDjango2:
def __init__(self):
self.pattern = '^fake-resolver/'
self.url_patterns = [MockPatternDjango2()]
self.app_name = 'fake-app-name'
self.namespace = 'fake-namespace'
class MockResolverDjango2None:
def __init__(self):
self.pattern = '^fake-resolver/'
self.url_patterns = [MockPatternDjango2None()]
self.app_name = None
self.namespace = None
class MockResolverDjango2None2:
def __init__(self):
self.pattern = '^fake-resolver/'
self.url_patterns = [MockResolverDjango2None()]
self.app_name = 'fake-app-name'
self.namespace = 'fake-namespace'
class MockResolverDjangoNested:
def __init__(self):
self.pattern = '^fake-nested-resolver/'
self.url_patterns = [MockResolverDjango2()]
self.app_name = 'fake-app-name'
self.namespace = 'nested-namespace'
class UnitTestWalkSiteURL(UnitTestCase):
def setUp(self):
self.pattern_1 = MockPattern()
self.data = [self.pattern_1]
def test_second_param_is_optional_return_a_list(self):
result = walk_site_url(self.data)
self.assertIsInstance(result, list)
def test_first_param_list_of_pattern_and_view(self):
result = walk_site_url(self.data)
self.assertEqual(result, [('fake-regex-pattern/', 'fake-callback',
'fake-view-name', None)])
def test_first_param_list_of_patterns_and_views(self):
pattern_2 = MockPattern()
pattern_2.regex.pattern = 'fake-regex-pattern-2/'
pattern_2.callback = 'fake-view-2'
result = walk_site_url([self.pattern_1, pattern_2])
self.assertEqual(result, [('fake-regex-pattern/', 'fake-callback',
'fake-view-name', None),
('fake-regex-pattern-2/', 'fake-view-2',
'fake-view-name', None)])
def test_param_list_with_pattern_and_resolver_django_1(self):
expected_result = [
('fake-regex-pattern/', 'fake-callback', 'fake-view-name', None),
('fake-resolver/fake-regex-pattern/',
'fake-callback', 'fake-namespace:fake-view-name', 'fake-app-name'
)]
resolver = MockResolver()
result = walk_site_url([self.pattern_1, resolver])
self.assertEqual(result, expected_result)
def test_param_list_with_pattern_and_nested_resolver_django_1(self):
expected_result = [
('fake-regex-pattern/', 'fake-callback', 'fake-view-name', None),
('fake-nested-resolver/fake-resolver/fake-regex-pattern/',
'fake-callback', 'nested-namespace:fake-namespace:fake-view-name',
'fake-app-name'
)
]
resolver = MockResolverNested()
result = walk_site_url([self.pattern_1, resolver])
self.assertEqual(result, expected_result)
def test_param_list_with_pattern_and_resolver_django_2(self):
expected_result = [
('fake-pattern/', 'fake-callback', 'fake-view-name', None),
('fake-resolver/fake-pattern/',
'fake-callback', 'fake-namespace:fake-view-name', 'fake-app-name'
)
]
resolver = MockResolverDjango2()
result = walk_site_url([MockPatternDjango2(), resolver])
self.assertEqual(result, expected_result)
def test_param_list_with_pattern_and_nested_resolver_django_2(self):
expected_result = [
('fake-pattern/', 'fake-callback', 'fake-view-name', None),
('fake-nested-resolver/fake-resolver/fake-pattern/',
'fake-callback', 'nested-namespace:fake-namespace:fake-view-name',
'fake-app-name'
)
]
result = walk_site_url([MockPatternDjango2(),
MockResolverDjangoNested()])
self.assertEqual(result, expected_result)
def test_param_list_with_resolver_get_app_name_and_view_name_django_1(self):
expected_result = [
('fake-resolver/fake-regex-pattern/',
'fake-callback', 'fake-namespace:fake-view-name', 'fake-app-name'
),
('fake-nested-resolver/fake-resolver/fake-regex-pattern/',
'fake-callback', 'nested-namespace:fake-namespace:fake-view-name',
'fake-app-name'
)
]
result = walk_site_url([MockResolver(), MockResolverNested()])
self.assertEqual(result, expected_result)
def test_param_list_with_resolver_get_app_name_and_view_name_django_2(self):
expected_result = [
('fake-resolver/fake-pattern/',
'fake-callback', 'fake-namespace:fake-view-name', 'fake-app-name'
),
('fake-nested-resolver/fake-resolver/fake-pattern/',
'fake-callback', 'nested-namespace:fake-namespace:fake-view-name',
'fake-app-name'
)
]
resolver = MockResolverDjango2()
nested_resolver = MockResolverDjangoNested()
result = walk_site_url([resolver, nested_resolver])
self.assertEqual(result, expected_result)
def test_when_url_namespace_is_None(self):
expected_result = [
('fake-resolver/fake-resolver/fake-pattern/',
'fake-callback', 'fake-view-none', None
)
]
resolver = MockResolverDjango2None2()
result = walk_site_url([resolver])
self.assertEqual(result, expected_result)
# def test_when_view_name_is_None(self):
# expected_result = [
# ('fake-resolver/fake-pattern/',
# 'fake-callback', 'fake-view-name', None
# )
# ]
# resolver = MockResolverDjango2None2()
# result = walk_site_url([resolver])
# print(result)
# self.assertEqual(result, expected_result)
class IntegratedTestWalkSiteURL(TestCase):
def setUp(self):
self.url = import_module(settings.ROOT_URLCONF).urlpatterns
def test_found_direct_access_view(self):
expected_result = ('direct_access_view/',
views.protected_view_by_role,
'direct_access_view', None)
result = walk_site_url(self.url)
self.assertIn(expected_result, result)
def test_found_included_view_without_namespace(self):
expected_result = ('role-included[135]/view_by_role/',
views.protected_view_by_role,
'django_roles_access:view_protected_by_role',
'django_roles_access')
result = walk_site_url(self.url)
self.assertIn(expected_result, result)
def test_found_included_view_with_namespace(self):
expected_result = ('role-included2/view_by_role/',
views.protected_view_by_role,
'app-ns2:view_protected_by_role',
'django_roles_access')
result = walk_site_url(self.url)
self.assertIn(expected_result, result)
def test_found_nested_access_view(self):
expected_result = ('nest1/nest2/view_by_role/',
views.protected_view_by_role,
'nest1_namespace:nest2_namespace:view_'
'protected_by_role',
'roles-app-name')
result = walk_site_url(self.url)
self.assertIn(expected_result, result)
class UnitTestGetViewsByApp(UnitTestCase):
"""
get_views_by_app receive the result of walk_site_url and is required to
return a dictionary with keys been installed applications.
"""
def setUp(self):
self.data = [('a', 'b', 'c', 'fake-app-1')]
@patch('django_roles_access.utils.settings')
def test_returns_a_dictionary(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1', 'fake-app-2']
result = get_views_by_app(self.data)
self.assertIsInstance(result, dict)
@patch('django_roles_access.utils.settings')
def test_returns_a_dictionary_with_all_installed_apps(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1', 'fake-app-2']
result = get_views_by_app(self.data)
assert 'fake-app-1' in result
assert 'fake-app-2' in result
@patch('django_roles_access.utils.settings')
def test_values_of_returned_dictionary_keys_are_lists(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1', 'fake-app-2']
result = get_views_by_app(self.data)
self.assertIsInstance(result['fake-app-1'], list)
self.assertIsInstance(result['fake-app-2'], list)
@patch('django_roles_access.utils.settings')
def test_receive_list_of_tuples_with_4_element(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1']
result = get_views_by_app(self.data)
assert 'fake-app-1' in result
@patch('django_roles_access.utils.settings')
def test_raise_type_error_if_receive_list_of_tuples_with_3_element(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1']
data = [('a', 'b', 'c')]
with self.assertRaises(TypeError):
get_views_by_app(data)
@patch('django_roles_access.utils.settings')
def test_raise_type_error_if_receive_list_of_tuples_with_5_element(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1']
data = [('a', 'b', 'c', 'd', 'e')]
with self.assertRaises(TypeError):
get_views_by_app(data)
@patch('django_roles_access.utils.settings')
def test_received_data_is_ordered_and_returned_by_application(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1', 'fake-app-2', None]
data = [('a', 'b', 'c', 'fake-app-1'), ('1', '2', '3', 'fake-app-2'),
('a1', 'b2', 'c3', None)]
expected_result = [('a', 'b', 'c')]
result = get_views_by_app(data)
self.assertEqual(expected_result, result['fake-app-1'])
@patch('django_roles_access.utils.settings')
def test_can_work_with_no_declared_application_name(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1', 'fake-app-2', None]
data = [('a', 'b', 'c', 'fake-app-1'), ('1', '2', '3', 'fake-app-2'),
('a1', 'b2', 'c3', None)]
expected_result = [('a1', 'b2', 'c3')]
result = get_views_by_app(data)
self.assertEqual(expected_result, result[APP_NAME_FOR_NONE])
@patch('django_roles_access.utils.settings')
def test_if_application_is_not_in_installed_apps_will_not_be_in_dict(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1', 'fake-app-2', None]
result = get_views_by_app(self.data)
assert 'fake-app-3' not in result
class IntegratedTestGetViewsByApp(TestCase):
def setUp(self):
self.url = import_module(settings.ROOT_URLCONF).urlpatterns
def test_not_declared_app_are_recognized_as_undefined_app(self):
expected_result = ('direct_access_view/',
views.protected_view_by_role,
'direct_access_view')
result = get_views_by_app(walk_site_url(self.url))
self.assertIn(expected_result, result[APP_NAME_FOR_NONE])
def test_views_without_namespace_are_added_with_app_name_in_view_name(self):
expected_result = ('role-included[135]/view_by_role/',
views.protected_view_by_role,
'django_roles_access:view_protected_by_role')
result = get_views_by_app(walk_site_url(self.url))
self.assertIn(expected_result, result['django_roles_access'])
def test_view_with_namespace_are_added_with_correct_app_name(self):
expected_result = ('role-included2/view_by_role/',
views.protected_view_by_role,
'app-ns2:view_protected_by_role')
result = get_views_by_app(walk_site_url(self.url))
self.assertIn(expected_result, result['django_roles_access'])
def test_nested_namespace_are_added_with_correct_app_name(self):
expected_result = ('nest1/nest2/view_by_role/',
views.protected_view_by_role,
'nest1_namespace:nest2_namespace:view_'
'protected_by_role')
result = get_views_by_app(walk_site_url(self.url))
self.assertIn(expected_result, result['roles-app-name'])
class TestGetViewAnalyzeReport(UnitTestCase):
def test_report_for_no_application_type(self):
expected = u'\t' + NONE_TYPE_DEFAULT
result = get_view_analyze_report(None)
self.assertEqual(result, expected)
def test_report_for_application_type_NOT_SECURED(self):
expected = u'\t' + NOT_SECURED_DEFAULT
result = get_view_analyze_report('NOT_SECURED')
self.assertEqual(result, expected)
self.assertEqual(result, expected)
def test_report_for_application_type_DISABLED(self):
expected = u'\t' + DISABLED_DEFAULT
result = get_view_analyze_report('DISABLED')
self.assertEqual(result, expected)
def test_report_for_application_type_SECURED(self):
expected = u'\t' + SECURED_DEFAULT
result = get_view_analyze_report('SECURED')
self.assertEqual(result, expected)
def test_report_for_application_type_PUBLIC(self):
expected = u'\t' + PUBLIC_DEFAULT
result = get_view_analyze_report('PUBLIC')
self.assertEqual(result, expected)
class TestCheckDjangoRolesIsUsed(UnitTestCase):
def test_detect_view_is_decorated(self):
@access_by_role
def function():
pass
self.assertTrue(check_django_roles_is_used(function))
def test_detect_view_is_not_decorated(self):
def function():
pass
self.assertFalse(check_django_roles_is_used(function()))
def test_detect_view_use_mixin(self):
class Aview(RolesMixin, TemplateView):
template_name = 'dummyTemplate.html'
self.assertTrue(check_django_roles_is_used(Aview))
def test_detect_view_not_use_mixin(self):
class Aview(TemplateView):
template_name = 'dummyTemplate.html'
self.assertFalse(check_django_roles_is_used(Aview))
@patch('django_roles_access.utils.ViewAccess')
class UnitTestAnalyzeByRoleAccess(UnitTestCase):
def test_detect_access_is_by_role(
self, mock_view_access
):
expected = u'ERROR: No roles configured to access de view.'
mock_view_access.type = 'br'
mock_view_access.roles.count.return_value = 0
result = analyze_by_role(mock_view_access)
self.assertEqual(result, expected)
def test_detect_access_is_not_by_role(
self, mock_view_access
):
expected = u''
mock_view_access.type = 'pu'
result = analyze_by_role(mock_view_access)
self.assertEqual(result, expected)
def test_detect_access_is_not_by_role_with_roles(
self, mock_view_access
):
expected = u'Roles with access: role-1, role-2'
mock_view_access.type = 'br'
role_1 = Mock()
role_1.name = u'role-1'
role_2 = Mock()
role_2.name = u'role-2'
mock_view_access.roles.all.return_value = [role_1, role_2]
result = analyze_by_role(mock_view_access)
self.assertEqual(result, expected)
def test_detect_access_is_not_by_role_without_roles(
self, mock_view_access
):
expected = u'ERROR: No roles configured to access de view.'
mock_view_access.type = 'br'
mock_view_access.roles.count.return_value = 0
result = analyze_by_role(mock_view_access)
self.assertEqual(result, expected)
class IntegratedTestAnalyzeByRoleAccess(TestCase):
def test_detect_access_is_by_role(self):
expected = u'ERROR: No roles configured to access de view.'
view_access = ViewAccess.objects.create(view='any-name', type='br')
result = analyze_by_role(view_access)
self.assertEqual(result, expected)
def test_detect_access_is_not_by_role(self):
expected = u''
view_access = ViewAccess.objects.create(view='any-name', type='pu')
result = analyze_by_role(view_access)
self.assertEqual(result, expected)
def test_detect_access_is_by_role_with_roles(self):
expected = u'Roles with access: role-1, role-2'
view_access = ViewAccess.objects.create(view='any-name', type='br')
role_1, created = Group.objects.get_or_create(name='role-1')
role_2, created = Group.objects.get_or_create(name='role-2')
view_access.roles.add(role_1)
view_access.roles.add(role_2)
view_access.save()
result = analyze_by_role(view_access)
self.assertEqual(result, expected)
def test_detect_access_is_not_by_role_without_roles(self):
expected = u'ERROR: No roles configured to access de view.'
view_access = ViewAccess.objects.create(view='any-name', type='br')
result = analyze_by_role(view_access)
self.assertEqual(result, expected)
@patch('django_roles_access.utils.ViewAccess.objects')
class UnitTestViewAnalyzer(UnitTestCase):
def test_view_analyzer_return_a_report(
self, mock_objects
):
view_access = Mock()
view_access.type = 'pu'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
result = view_access_analyzer('fake-app-type', 'fake-callback',
'fake-view-name', 'fake-site-active')
try:
self.assertIsInstance(result, unicode)
except:
self.assertIsInstance(result, str)
def test_view_analyzer_search_view_access_for_the_view(
self, mock_objects
):
view_access = Mock()
view_access.type = 'pu'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
view_access_analyzer('fake-app-type', 'fake-callback',
'fake-view-name', 'fake-site-active')
assert mock_objects.first.called
def test_view_analyzer_search_view_access_for_the_view_once(
self, mock_objects
):
view_access = Mock()
view_access.type = 'pu'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
view_access_analyzer('fake-app-type', 'fake-callback',
'fake-view-name', 'fake-site-active')
self.assertEqual(mock_objects.filter.call_count, 1)
def test_view_analyzer_search_view_access_with_view_name(
self, mock_objects
):
view_access = Mock()
view_access.type = 'pu'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
view_access_analyzer('fake-app-type', 'fake-callback',
'fake-view-name', 'fake-site-active')
mock_objects.filter.assert_called_once_with(view='fake-view-name')
def test_view_access_type_when_site_active_and_exists_view_access(
self, mock_objects
):
expected = u'View access is of type Public.'
view_access = Mock()
view_access.type = 'pu'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
result = view_access_analyzer('fake-app-type', 'fake-callback',
'fake-view-name', True)
self.assertEqual(result, expected)
@patch('django_roles_access.utils.analyze_by_role')
def test_view_access_type_by_role_call_analyze_by_role(
self, mock_analyze_by_role, mock_objects
):
view_access = Mock()
view_access.type = 'br'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
view_access_analyzer('fake-app-type', 'fake-callback',
'fake-view-name', True)
assert mock_analyze_by_role.called
@patch('django_roles_access.utils.analyze_by_role')
def test_view_access_type_by_role_call_analyze_by_role_once(
self, mock_analyze_by_role, mock_objects
):
view_access = Mock()
view_access.type = 'br'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
view_access_analyzer('fake-app-type', 'fake-callback',
'fake-view-name', True)
self.assertEqual(mock_analyze_by_role.call_count, 1)
@patch('django_roles_access.utils.analyze_by_role')
def test_view_access_type_by_role_call_analyze_by_role_with_view_access(
self, mock_analyze_by_role, mock_objects
):
view_access = Mock()
view_access.type = 'br'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
view_access_analyzer('fake-app-type', 'fake-callback',
'fake-view-name', True)
mock_analyze_by_role.assert_called_once_with(view_access)
def test_no_view_access_object_for_the_view_and_site_active_no_app_type(
self, mock_objects
):
expected = u'\t' + NONE_TYPE_DEFAULT
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = None
result = view_access_analyzer(None, 'fake-callback',
'fake-view-name', True)
self.assertEqual(result, expected)
def test_no_view_access_object_and_site_active_app_type_NOT_SECURED(
self, mock_objects
):
expected = u'\t' + NOT_SECURED_DEFAULT
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = None
result = view_access_analyzer('NOT_SECURED', 'fake-callback',
'fake-view-name', True)
self.assertEqual(result, expected)
def test_no_view_access_object_and_site_active_app_type_DISABLED(
self, mock_objects
):
expected = u'\t' + DISABLED_DEFAULT
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = None
result = view_access_analyzer('DISABLED', 'fake-callback',
'fake-view-name', True)
self.assertEqual(result, expected)
def test_no_view_access_object_and_site_active_app_type_SECURED(
self, mock_objects
):
expected = u'\t' + SECURED_DEFAULT
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = None
result = view_access_analyzer('SECURED', 'fake-callback',
'fake-view-name', True)
self.assertEqual(result, expected)
def test_no_view_access_object_and_site_active_app_type_PUBLIC(
self, mock_objects
):
expected = u'\t' + PUBLIC_DEFAULT
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = None
result = view_access_analyzer('PUBLIC', 'fake-callback',
'fake-view-name', True)
self.assertEqual(result, expected)
def test_middleware_not_used_view_access_object_exist_and_dr_tools_used(
self, mock_objects
):
expected = u'View access is of type Public.'
@access_by_role
def function():
pass
view_access = Mock()
view_access.type = 'pu'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
result = view_access_analyzer('fake-app-type', function,
'fake-view-name', False)
self.assertEqual(result, expected)
def test_middleware_not_used_view_access_object_exist_and_dr_tools_not_used(
self, mock_objects
):
expected = u'ERROR: View access object exist for the view, but no '
expected += u'Django role access tool is used: neither decorator, '
expected += u'mixin, or middleware.'
def function():
pass
view_access = Mock()
view_access.type = 'pu'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
result = view_access_analyzer('fake-app-type', function,
'fake-view-name', False)
self.assertEqual(result, expected)
def test_middleware_not_used_dr_tools_are_used_no_view_access_object(
self, mock_objects
):
expected = u'\t' + PUBLIC_DEFAULT
@access_by_role
def function():
pass
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = None
result = view_access_analyzer('PUBLIC', function,
'fake-view-name', False)
self.assertEqual(result, expected)
def test_no_django_roles_tools_used_no_application_type(
self, mock_objects
):
expected = u'No Django roles access tool used. Access to view depends '
expected += u'on its implementation.'
def function():
pass
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = None
result = view_access_analyzer(None, function,
'fake-view-name', False)
self.assertEqual(result, expected)
def test_no_django_roles_tools_used_application_type(
self, mock_objects
):
expected = u'No Django roles access tool used. Access to view depends '
expected += u'on its implementation.'
def function():
pass
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = None
result = view_access_analyzer('Authorized', function,
'fake-view-name', False)
self.assertEqual(result, expected)
class IntegratedTestViewAnalyzezr(TestCase):
def test_with_middleware_SECURED_without_view_access_object(self):
expected = u'\t' + SECURED_DEFAULT
result = view_access_analyzer(
'SECURED', views.MiddlewareView.as_view,
'django_roles_access:middleware_view_class',
True)
self.assertEqual(expected, result)
def test_with_middleware_NOT_SECURED_with_view_access_object(self):
ViewAccess.objects.create(
view='django_roles_access:middleware_view_class',
type='br')
result = view_access_analyzer(
'NOT_SECURED', views.MiddlewareView.as_view,
'django_roles_access:middleware_view_class',
True)
self.assertEqual(result, u'\t' + NOT_SECURED_DEFAULT)
def test_with_middleware_DISABLED_with_view_access_object(self):
ViewAccess.objects.create(
view='django_roles_access:middleware_view_class',
type='pu')
result = view_access_analyzer(
'DISABLED', views.MiddlewareView.as_view,
'django_roles_access:middleware_view_class',
True)
self.assertEqual(result, u'\t' + DISABLED_DEFAULT)
def test_with_middleware_with_view_access_object(self):
expected = u'View access is of type By role.'
expected += u'ERROR: No roles configured to access de view.'
ViewAccess.objects.create(
view='django_roles_access:middleware_view_class',
type='br')
result = view_access_analyzer(
'SECURED', views.MiddlewareView.as_view,
'django_roles_access:middleware_view_class',
True)
self.assertEqual(result, expected)
def test_with_middleware_with_view_access_object_with_roles(self):
expected = u'View access is of type By role.'
expected += u'Roles with access: test1, test2'
g1, created = Group.objects.get_or_create(name='test1')
g2, created = Group.objects.get_or_create(name='test2')
view_access = ViewAccess.objects.create(
view='django_roles_access:middleware_view_class',
type='br')
view_access.roles.add(g1)
view_access.roles.add(g2)
view_access.save()
result = view_access_analyzer(
'SECURED', views.MiddlewareView.as_view,
'django_roles_access:middleware_view_class',
True)
self.assertEqual(result, expected)
def test_with_middleware_with_view_access_object_authorized(self):
expected = u'View access is of type Authorized.'
ViewAccess.objects.create(
view='django_roles_access:middleware_view_class',
type='au')
result = view_access_analyzer(
'SECURED', views.MiddlewareView.as_view,
'django_roles_access:middleware_view_class',
True)
self.assertEqual(result, expected)
def test_with_middleware_with_view_access_object_public(self):
expected = u'View access is of type Public.'
ViewAccess.objects.create(
view='django_roles_access:middleware_view_class',
type='pu')
result = view_access_analyzer(
'SECURED', views.MiddlewareView.as_view,
'django_roles_access:middleware_view_class',
True)
self.assertEqual(result, expected)
def test_without_middleware_with_view_access_object(self):
expected = u'View access is of type By role.'
expected += u'ERROR: No roles configured to access de view.'
ViewAccess.objects.create(
view='django_roles_access:view_protected_by_role',
type='br')
result = view_access_analyzer(
'SECURED', views.protected_view_by_role,
'django_roles_access:view_protected_by_role',
False)
self.assertEqual(result, expected)
def test_without_middleware_with_view_access_object_with_roles(self):
expected = u'View access is of type By role.'
expected += u'Roles with access: test1, test2'
g1, created = Group.objects.get_or_create(name='test1')
g2, created = Group.objects.get_or_create(name='test2')
view_access = ViewAccess.objects.create(
view='django_roles_access:view_protected_by_role',
type='br')
view_access.roles.add(g1)
view_access.roles.add(g2)
view_access.save()
result = view_access_analyzer(
'SECURED', views.protected_view_by_role,
'django_roles_access:view_protected_by_role',
False)
self.assertEqual(result, expected)
def test_without_middleware_with_view_access_object_authorized(self):
expected = u'View access is of type Authorized.'
ViewAccess.objects.create(
view='django_roles_access:view_protected_by_role',
type='au')
result = view_access_analyzer(
'SECURED', views.protected_view_by_role,
'django_roles_access:view_protected_by_role',
False)
self.assertEqual(result, expected)
def test_without_middleware_with_view_access_object_public(self):
expected = u'View access is of type Public.'
ViewAccess.objects.create(
view='django_roles_access:view_protected_by_role',
type='pu')
result = view_access_analyzer(
'SECURED', views.protected_view_by_role,
'django_roles_access:view_protected_by_role',
False)
self.assertEqual(result, expected)
def test_without_middleware_without_view_access_object_and_view_protected(
self
):
expected = u'\t' + SECURED_DEFAULT
result = view_access_analyzer(
'SECURED', views.protected_view_by_role,
'django_roles_access:view_protected_by_role',
False)
self.assertEqual(result, expected)
def test_without_middleware_no_view_access_object_and_view_protected_without_app(
self
):
expected = u'\t' + NONE_TYPE_DEFAULT
result = view_access_analyzer(
None, views.protected_view_by_role,
'django_roles_access:view_protected_by_role',
False)
self.assertEqual(result, expected)
def test_without_middleware_with_view_access_object_and_view_not_protected(
self
):
expected = u'ERROR: View access object exist for the view, '
expected += 'but no Django role access tool is used: neither '
expected += 'decorator, mixin, or middleware.'
ViewAccess.objects.create(
view='django_roles_access:middleware_view_func',
type='pu')
result = view_access_analyzer(
None, views.middleware_view,
'django_roles_access:middleware_view_func',
False)
self.assertEqual(result, expected)
class UnitTestOutputReport(UnitTestCase):
def setUp(self):
self.patch_mock_stdout = patch.object(BaseCommand(), 'style')
self.patch_mock_style = patch.object(BaseCommand(), 'stdout')
self.mock_stdout = self.patch_mock_stdout.start()
self.mock_style = self.patch_mock_style.start()
self._output = OutputReport(self.mock_stdout, self.mock_style)
def tearDown(self):
self.patch_mock_stdout.stop()
self.patch_mock_style.stop()
def test_initial_with_parameter(self):
assert self._output.stdout == self.mock_stdout
assert self._output.style == self.mock_style
def test_internal_attributes_are_initialize(self):
assert hasattr(self._output, '_row') and self._output._row == u''
assert hasattr(self._output, '_format') and self._output._format == \
'console'
def test_initial_without_parameter(self):
with self.assertRaises(TypeError) as e:
OutputReport()
def test_default_output_format_is_correct_type(self):
assert self._output._format == 'console'
def test_set_format(self):
self._output.set_format('csv')
assert self._output._format == 'csv'
def test_add_to_row(self):
self._output.add_to_row('text')
self._output.add_to_row('other')
self.assertIn('text', self._output._row)
self.assertIn('other', self._output._row)
def test_write_method_write_to_stdout(self):
self._output.write(u'some text')
assert self.mock_stdout.write.called
def test_write_method_use_stdout_write_once(self):
self._output.write(u'some text')
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_write_method_use_SUCCESS_style_for_styling_output(self):
self._output.write(u'some text')
self.mock_stdout.write.assert_called_once_with(
self.mock_style.SUCCESS())
def test_write_method_use_SUCCESS_style_for_output(self):
self._output.write(u'some text')
assert self.mock_style.SUCCESS.called
def test_write_method_use_style_with_received_argument(self):
self._output.write(u'some text')
self.mock_style.SUCCESS.assert_called_once_with(u'some text')
def test_console_format_write_correct_header_to_stdout_with_SUCCESS_style(
self
):
expected = u'Start checking views access.\n'
expected += u'Start gathering information.'
self._output.write_header()
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
@patch('django_roles_access.utils.timezone')
def test_cvs_format_write_correct_header(
self, mock_timezone
):
mock_timezone.now.return_value = 'fake-date'
self._output.set_format('csv')
self._output.write_header()
self.mock_style.SUCCESS.assert_called_once_with(u'Reported: fake-date')
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_console_format_write_correct_middleware_status_and_end_of_header(
self
):
expected = u'Django roles access middleware is active: False.\n'
self._output.write_middleware_status(False)
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_console_format_write_correct_end_of_header(
self
):
expected = u'Finish gathering information.'
self._output.write_end_of_head()
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_cvs_format_write_correct_correct_middleware_status(
self
):
expected = u'Django roles access middleware is active: False.\n'
self._output.set_format('csv')
self._output.write_middleware_status(False)
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_cvs_format_write_correct_csv_columns(
self
):
expected = u'App Name,Type,View Name,Url,Status,Status description'
self._output.set_format('csv')
self._output.write_end_of_head()
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_console_format_process_app_data_to_stdout_with_SUCCESS_style(self):
app_name = u'fake-app-name'
app_type = u'fake-app-type'
view_list = ['fake-view']
expected = u'\tAnalyzing: {}\n'.format(app_name)
expected += u'\t\t{} is {} type.'.format(app_name, app_type)
self._output.process_application_data(app_name, app_type, view_list)
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_console_format_process_app_data_without_type(self):
app_name = u'fake-app-name'
app_type = None
view_list = ['fake-view']
expected = u'\tAnalyzing: {}\n'.format(app_name)
expected += u'\t\t{} has no type.'.format(app_name)
self._output.process_application_data(app_name, app_type, view_list)
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_console_format_process_app_data_without_views(self):
app_name = u'fake-app-name'
app_type = u'fake-app-type'
view_list = []
expected = u'\tAnalyzing: {}\n'.format(app_name)
expected += u'\t\t{} is {} type.'.format(app_name, app_type)
expected += u'\t\t{} does not have configured views.'.format(app_name)
self._output.process_application_data(app_name, app_type, view_list)
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_cvs_format_process_application_data_to_string(self):
app_name = u'fake-app-name'
app_type = u'fake-app-type'
view_list = ['fake-view-list']
expected = u'{},{},'.format(app_name, app_type, view_list)
self._output.set_format('csv')
self._output.process_application_data(app_name, app_type, view_list)
self.assertEqual(expected, self._output._row)
def test_cvs_format_process_application_data_without_type_to_string(self):
app_name = u'fake-app-name'
app_type = None
view_list = ['fake-view-list']
expected = u'fake-app-name,no type,'.format(app_name)
self._output.set_format('csv')
self._output.process_application_data(app_name, app_type, view_list)
self.assertEqual(expected, self._output._row)
def test_cvs_format_process_application_data_without_views(self):
app_name = u'fake-app-name'
app_type = u'fake-app-type'
view_list = []
expected = u'fake-app-name,fake-app-type,,,,,'.format(app_name)
self._output.set_format('csv')
self._output.process_application_data(app_name, app_type, view_list)
self.assertEqual(expected, self._output._row)
def test_console_format_process_view_data_to_stdout_with_SUCCESS_style(
self
):
view_name = u'fake-view-name'
url = '/fake-url/'
expected = u'\n\t\tAnalysis for view: {}'.format(view_name)
expected += u'\n\t\tView url: {}'.format(url)
self._output.process_view_data(view_name, url)
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_cvs_format_process_view_data(self):
view_name = u'fake-view-name'
url = '/fake-url/'
expected = u'{},{}'.format(view_name, url)
self._output.set_format('csv')
self._output.process_view_data(view_name, url)
self.assertIn(expected, self._output._row)
# View_access_analyzer output.
def test_console_format_write_vaa_to_stdout(self):
self._output.write_view_access_analyzer(u'some text')
assert self.mock_stdout.write.called
def test_console_format_use_stdout_write_once_with_vaa(self):
self._output.write_view_access_analyzer(u'some text')
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_console_format_use_SUCCESS_style_for_styling_output_of_vaa(self):
self._output.write_view_access_analyzer(u'some text')
self.mock_stdout.write.assert_called_once_with(
self.mock_style.SUCCESS())
def test_console_format_use_SUCCESS_style_for_output_of_vaa(self):
self._output.write_view_access_analyzer(u'some text')
assert self.mock_style.SUCCESS.called
def test_console_format_use_style_with_vaa_result(self):
self._output.write_view_access_analyzer(u'some text')
self.mock_style.SUCCESS.assert_called_once_with(u'\t\tsome text')
def test_console_format_use_ERROR_style_for_output_if_error_in_vaa(self):
self._output.write_view_access_analyzer('ERROR: fake report')
assert self.mock_style.ERROR.called
def test_console_format_use_ERROR_style_with_the_error_in_vaa(self):
self._output.write_view_access_analyzer('ERROR: fake report')
self.mock_style.ERROR.assert_called_once_with('\t\t' +
'ERROR: fake report')
def test_console_format_use_WARNING_style_for_output_if_warning_in_vaa(self):
self._output.write_view_access_analyzer('WARNING: fake report')
assert self.mock_style.WARNING.called
def test_console_format_use_WARNING_style_with_the_warning_in_vaa(self):
self._output.write_view_access_analyzer('WARNING: fake report')
self.mock_style.WARNING.assert_called_once_with(
'\t\t' + 'WARNING: fake report')
def test_csv_format_write_view_access_analyzer_with_Normal_to_stdout(self):
self._output.add_to_row('fake-app,fake-type,fake-view,fake-url,')
self._output._format = 'csv'
self._output.write_view_access_analyzer(u'fake-report')
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_csv_format_write_view_access_analyzer_with_Normal_to_style(self):
self._output.add_to_row('fake-app,fake-type,fake-view,fake-url,')
expected = u'fake-app,fake-type,fake-view,fake-url,Normal,fake-report\n'
self._output._format = 'csv'
self._output.write_view_access_analyzer(u'fake-report')
self.mock_style.SUCCESS.assert_called_once_with(expected)
def test_csv_format_write_view_access_analyzer_with_WARNING_to_stdout(self):
self._output.add_to_row('fake-app,fake-type,fake-view,fake-url,')
self._output._format = 'csv'
self._output.write_view_access_analyzer('WARNING: fake-report')
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_csv_format_write_view_access_analyzer_with_WARNING_with_style(
self
):
self._output.add_to_row('fake-app,fake-type,fake-view,fake-url,')
expected = u'fake-app,fake-type,fake-view,fake-url,Warning,' \
u'fake-report\n'
self._output._format = 'csv'
self._output.write_view_access_analyzer('WARNING: fake-report')
self.mock_style.WARNING.assert_called_once_with(expected)
def test_csv_format_write_view_access_analyzer_with_ERROR_to_stdout(self):
self._output.add_to_row('fake-app,fake-type,fake-view,fake-url,')
self._output._format = 'csv'
self._output.write_view_access_analyzer('ERROR: fake-report')
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_csv_format_write_view_access_analyzer_with_ERROR_with_style(self):
self._output.add_to_row('fake-app,fake-type,fake-view,fake-url,')
expected = u'fake-app,fake-type,fake-view,fake-url,Error,fake-report\n'
self._output._format = 'csv'
self._output.write_view_access_analyzer('ERROR: fake-report')
self.mock_style.ERROR.assert_called_once_with(expected)
def test_csv_format_write_view_access_analyzer_reset_OutputFormater_row(
self
):
self._output.add_to_row('fake-app,fake-type,fake-view,fake-url,')
self._output._format = 'csv'
self._output.write_view_access_analyzer('fake-report')
self.assertEqual(self._output._row, u'fake-app,fake-type,')
def test_console_format_close_application_data_to_stdout_with_SUCCESS_style(
self
):
expected = u'\tFinish analyzing fake-app-name.'
self._output.close_application_data('fake-app-name')
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_cvs_format_close_application_data_to_string(self):
expected = u''
self._output.set_format('csv')
self._output.close_application_data('fake-app-name')
self.assertEqual(self._output._row, expected)
def test_console_format_write_footer_to_stdout_with_SUCCESS_style(self):
expected = u'End checking view access.'
self._output.write_footer()
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_cvs_format_write_footer_to_string(self):
expected = u'\n'
self._output.set_format('csv')
self._output.write_footer()
self.assertEqual(self._output._row, expected)
| 2.21875 | 2 |
favorite_files.py | jasondavis/FavoriteFiles | 1 | 5314 | <gh_stars>1-10
'''
Favorite Files
Licensed under MIT
Copyright (c) 2012 <NAME> <<EMAIL>>
'''
import sublime
import sublime_plugin
from os.path import join, exists, normpath
from favorites import Favorites
Favs = Favorites(join(sublime.packages_path(), 'User', 'favorite_files_list.json'))
class Refresh:
dummy_file = normpath(join(sublime.packages_path(), 'FavoriteFiles', 'refresh.txt'))
on = False
class CleanOrphanedFavoritesCommand(sublime_plugin.WindowCommand):
def run(self):
# Clean out all dead links
if not Favs.load(clean=True, win_id=self.window.id()):
Favs.load(force=True, clean=True, win_id=self.window.id())
class SelectFavoriteFileCommand(sublime_plugin.WindowCommand):
def open_file(self, value, group=False):
if value >= 0:
active_group = self.window.active_group()
if value < self.num_files or (group and value < self.num_files + 1):
# Open global file, file in group, or all fiels in group
names = []
if group:
if value == 0:
# Open all files in group
names = [self.files[x][1] for x in range(0, self.num_files)]
else:
# Open file in group
names.append(self.files[value - 1][1])
else:
# Open global file
names.append(self.files[value][1])
# Iterate through file list ensure they load in proper view index order
count = 0
for n in names:
if exists(n):
view = self.window.open_file(n)
if view != None:
if active_group >= 0:
self.window.set_view_index(view, active_group, count)
count += 1
else:
sublime.error_message("The following file does not exist:\n%s" % n)
else:
# Decend into group
value -= self.num_files
self.files = Favs.all_files(group_name=self.groups[value][0].replace("Group: ", "", 1))
self.num_files = len(self.files)
self.groups = []
self.num_groups = 0
# Show files in group
if self.num_files:
self.window.show_quick_panel(
["Open Group"] + self.files,
lambda x: self.open_file(x, group=True)
)
else:
sublime.error_message("No favorites found! Try adding some.")
def run(self):
if not Favs.load(win_id=self.window.id()):
self.files = Favs.all_files()
self.num_files = len(self.files)
self.groups = Favs.all_groups()
self.num_groups = len(self.groups)
if self.num_files + self.num_groups > 0:
self.window.show_quick_panel(
self.files + self.groups,
self.open_file
)
else:
sublime.error_message("No favorites found! Try adding some.")
class AddFavoriteFileCommand(sublime_plugin.WindowCommand):
def add(self, names, group_name=None):
disk_omit_count = 0
added = 0
# Iterate names and add them to group/global if not already added
for n in names:
if not Favs.exists(n, group_name=group_name):
if exists(n):
Favs.set(n, group_name=group_name)
added += 1
else:
# File does not exist on disk; cannot add
disk_omit_count += 1
if added:
# Save if files were added
Favs.save(True)
if disk_omit_count:
# Alert that files could be added
message = "1 file does not exist on disk!" if disk_omit_count == 1 else "%d file(s) do not exist on disk!" % disk_omit_count
sublime.error_message(message)
def create_group(self, value):
repeat = False
if value == "":
# Require an actual name
sublime.error_message("Please provide a valid group name.")
repeat = True
elif Favs.exists(value, group=True):
# Do not allow duplicates
sublime.error_message("Group \"%s\" already exists.")
repeat = True
else:
# Add group
Favs.add_group(value)
self.add(self.name, value)
if repeat:
# Ask again if name was not sufficient
v = self.window.show_input_panel(
"Create Group: ",
"New Group",
self.create_group,
None,
None
)
v.run_command("select_all")
def select_group(self, value, replace=False):
if value >= 0:
group_name = self.groups[value][0].replace("Group: ", "", 1)
if replace:
# Start with empty group for "Replace Group" selection
Favs.add_group(group_name)
# Add favorites
self.add(self.name, group_name)
def show_groups(self, replace=False):
# Show availabe groups
self.groups = Favs.all_groups()
self.window.show_quick_panel(
self.groups,
lambda x: self.select_group(x, replace=replace)
)
def group_answer(self, value):
if value >= 0:
if value == 0:
# No group; add file to favorites
self.add(self.name)
elif value == 1:
# Request new group name
v = self.window.show_input_panel(
"Create Group: ",
"New Group",
self.create_group,
None,
None
)
v.run_command("select_all")
elif value == 2:
# "Add to Group"
self.show_groups()
elif value == 3:
# "Replace Group"
self.show_groups(replace=True)
def group_prompt(self):
# Default options
self.group = ["No Group", "Create Group"]
if Favs.group_count() > 0:
# Options if groups already exit
self.group += ["Add to Group", "Replace Group"]
# Present group options
self.window.show_quick_panel(
self.group,
self.group_answer
)
def file_answer(self, value):
if value >= 0:
view = self.window.active_view()
if view != None:
if value == 0:
# Single file
name = view.file_name()
if name != None:
self.name.append(name)
self.group_prompt()
if value == 1:
# All files in window
views = self.window.views()
if len(views) > 0:
for v in views:
name = v.file_name()
if name != None:
self.name.append(name)
if len(self.name) > 0:
self.group_prompt()
if value == 2:
# All files in layout group
group, idx = self.window.get_view_index(view)
views = self.window.views_in_group(group)
if len(views) > 0:
for v in views:
name = v.file_name()
if name != None:
self.name.append(name)
if len(self.name) > 0:
self.group_prompt()
def file_prompt(self, view_code):
# Add current active file
options = ["Add Current File to Favorites"]
if view_code > 0:
# Add all files in window
options.append("Add All Files to Favorites")
if view_code > 1:
# Add all files in layout group
options.append("Add All Files to in Active Group to Favorites")
# Preset file options
self.window.show_quick_panel(
options,
self.file_answer
)
def run(self):
view = self.window.active_view()
self.name = []
if view != None:
view_code = 0
views = self.window.views()
# If there is more than one view open allow saving all views
# TODO: Widget views probably show up here too, maybe look into exclduing them
if len(views) > 1:
view_code = 1
# See if there is more than one group; if so allow saving of a specific group
if self.window.num_groups() > 1:
group, idx = self.window.get_view_index(view)
group_views = self.window.views_in_group(group)
if len(group_views) > 1:
view_code = 2
self.file_prompt(view_code)
else:
# Only single file open, proceed without file options
name = view.file_name()
if name != None:
self.name.append(name)
self.group_prompt()
class RemoveFavoriteFileCommand(sublime_plugin.WindowCommand):
def remove(self, value, group=False, group_name=None):
if value >= 0:
# Remove file from global, file from group list, or entire group
if value < self.num_files or (group and value < self.num_files + 1):
name = None
if group:
if group_name == None:
return
if value == 0:
# Remove group
Favs.remove_group(group_name)
Favs.save(True)
return
else:
# Remove group file
name = self.files[value - 1][1]
else:
# Remove global file
name = self.files[value][1]
# Remove file and save
Favs.remove(name, group_name=group_name)
Favs.save(True)
else:
# Decend into group
value -= self.num_files
group_name = self.groups[value][0].replace("Group: ", "", 1)
self.files = Favs.all_files(group_name=group_name)
self.num_files = len(self.files)
self.groups = []
self.num_groups = 0
# Show group files
if self.num_files:
self.window.show_quick_panel(
["Remove Group"] + self.files,
lambda x: self.remove(x, group=True, group_name=group_name)
)
else:
sublime.error_message("No favorites found! Try adding some.")
def run(self):
if not Favs.load(win_id=self.window.id()):
# Present both files and groups for removal
self.files = Favs.all_files()
self.num_files = len(self.files)
self.groups = Favs.all_groups()
self.num_groups = len(self.groups)
# Show panel
if self.num_files + self.num_groups > 0:
self.window.show_quick_panel(
self.files + self.groups,
self.remove
)
else:
sublime.error_message("No favorites to remove!")
class FavoritesForceRefreshListenerCommand(sublime_plugin.EventListener):
def on_post_save(self, view):
if Refresh.on:
path = view.file_name()
if path != None:
if normpath(view.file_name()) == Refresh.dummy_file:
# Close refresh file if more than one view is open
if len(view.window().views()) > 1:
sublime.set_timeout(lambda: sublime.active_window().run_command("close_file"), 100)
# Attempt toggle again
sublime.set_timeout(lambda: sublime.active_window().run_command("toggle_per_project_favorites"), 1000)
class TogglePerProjectFavoritesCommand(sublime_plugin.WindowCommand):
def save(self, view):
if Refresh.on:
path = view.file_name()
if path != None:
if normpath(view.file_name()) == Refresh.dummy_file:
view.run_command('save')
def run(self):
refresh = True
win_id = self.window.id()
if Refresh.on:
Refresh.on = False
refresh = False
# Try and toggle back to global first
if not Favs.toggle_global(win_id):
return
# Try and toggle per project
if refresh:
view = self.window.open_file(Refresh.dummy_file)
if view != None:
Refresh.on = True
self.window.focus_view(view)
sublime.set_timeout(lambda: self.save(view), 100)
else:
sublime.error_message('Could not find a project file!')
else:
if Favs.toggle_per_projects(win_id):
sublime.error_message('Could not find a project file!')
else:
Favs.open(win_id=self.window.id())
def is_enabled(self):
return sublime.load_settings("favorite_files.sublime-settings").get("enable_per_projects", False)
| 2.46875 | 2 |
tests/list/list03.py | ktok07b6/polyphony | 83 | 5315 | <filename>tests/list/list03.py
from polyphony import testbench
def list03(x, y, z):
a = [1, 2, 3]
r0 = x
r1 = y
a[r0] = a[r1] + z
return a[r0]
@testbench
def test():
assert 4 == list03(0, 1 ,2)
assert 5 == list03(2, 1 ,3)
test()
| 2.796875 | 3 |
sc2clanman/views.py | paskausks/sc2cm | 0 | 5316 | <reponame>paskausks/sc2cm
#!/bin/env python3
from collections import Counter
from django.conf import settings
from django.contrib.auth.decorators import login_required, permission_required
from django.db import models as dm
from django.shortcuts import get_object_or_404, render
from django.views.generic.list import BaseListView
from django.views.generic import TemplateView
from django.utils.decorators import method_decorator
from . import models, apps, sc2, mixins
class BaseView(TemplateView):
"""
A TemplateView subclass which adds the Opts object to context.
"""
current_model = 'clanmember'
def get_context_data(self, **kwargs):
ctx = super(BaseView, self).get_context_data(**kwargs)
# Get links so we can display links to admin.
class Opts(object):
app_label = 'sc2clanman'
model_name = self.current_model
ctx['opts'] = Opts()
ctx['is_authorized'] = self.request.user.is_superuser or self.request.user.is_staff
return ctx
class AuthenticatedView(BaseView):
"""
BaseView subclass with the login required decorator applied.
"""
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(AuthenticatedView, self).dispatch(*args, **kwargs)
class ListView(BaseListView, BaseView):
"""
Combines BaseView with capability to show a paginated object list
"""
pass
class MemberView(ListView):
""" Show the clanmembers in a list ordered by ladder score"""
template_name = 'sc2clanman/members.html'
# No ordering since it's done by the front-end
queryset = models.ClanMember.clanmembers.all()
def get_context_data(self, **kwargs):
ctx = super(MemberView, self).get_context_data(**kwargs)
ctx['last_member_update'] = models.SyncLog.objects.filter(
action=models.SyncLog.CLAN_MEMBER_SYNC,
success=True,
).order_by('-time')[0].time
ctx['last_detail_update'] = models.SyncLog.objects.filter(
action=models.SyncLog.CLAN_MEMBER_DETAIL_SYNC,
success=True
).order_by('-time')[0].time
# Calculate quick stats
# Game stats - aggregate and sum wins and losses
gp = self.queryset.aggregate(dm.Sum('wins'), dm.Sum('losses'))
ctx['total_games_played'] = gp['wins__sum'] + gp['losses__sum']
# Annotate games played and winrate for each member
games_played = self.queryset.annotate(
games_played=dm.F('wins') + dm.F('losses')
).order_by('games_played')
ctx['least_games_played'] = games_played.filter(games_played__gt=0).first()
ctx['most_games_played'] = games_played.order_by('-games_played').first()
# Last game date
ctx['least_passionate'] = self.queryset.order_by('last_game').first()
# Most prominent league, country and race
league_breakdown = Counter(
self.queryset.exclude(score=models.ClanMember.SCORE_UNRANKED).values_list('league', flat=True)
).most_common()
ctx['league_breakdown'] = (
(sc2.League(l[0]), l[1]) for l in league_breakdown
)
ctx['country_breakdown'] = Counter(
self.queryset.exclude(country='').values_list('country', flat=True)
).most_common()
race_breakdown = Counter(
self.queryset.exclude(score=models.ClanMember.SCORE_UNRANKED).values_list('race', flat=True)
).most_common(4)
ctx['race_breakdown'] = (
(sc2.Race(r[0]), r[1]) for r in race_breakdown
)
ctx['version'] = apps.ClanManConfig.version_id
return ctx
class ClanWarView(BaseView):
template_name = 'sc2clanman/cw.html'
current_model = 'clanwar'
def get_context_data(self, **kwargs):
ctx = super(ClanWarView, self).get_context_data(**kwargs)
ctx['clanwars'] = models.ClanWar.objects.all()
return ctx
class ClanWarDetailView(BaseView):
template_name = 'sc2clanman/cwdetail.html'
current_model = 'clanwar'
def get_context_data(self, **kwargs):
ctx = super(ClanWarDetailView, self).get_context_data(**kwargs)
ctx['cw'] = get_object_or_404(models.ClanWar, id=kwargs.get('cw_id'))
ctx['clan_tag'] = settings.SC2_CLANMANAGER_CLAN_TAG
return ctx
| 2.28125 | 2 |
manimlib/mobject/functions.py | parmentelat/manim | 1 | 5317 | <reponame>parmentelat/manim<gh_stars>1-10
from manimlib.constants import *
from manimlib.mobject.types.vectorized_mobject import VMobject
from manimlib.utils.config_ops import digest_config
from manimlib.utils.space_ops import get_norm
class ParametricCurve(VMobject):
CONFIG = {
"t_range": [0, 1, 0.1],
"min_samples": 10,
"epsilon": 1e-8,
# TODO, automatically figure out discontinuities
"discontinuities": [],
"smoothing": True,
}
def __init__(self, t_func, t_range=None, **kwargs):
digest_config(self, kwargs)
if t_range is not None:
self.t_range[:len(t_range)] = t_range
# To be backward compatible with all the scenes specifying t_min, t_max, step_size
self.t_range = [
kwargs.get("t_min", self.t_range[0]),
kwargs.get("t_max", self.t_range[1]),
kwargs.get("step_size", self.t_range[2]),
]
self.t_func = t_func
VMobject.__init__(self, **kwargs)
def get_point_from_function(self, t):
return self.t_func(t)
def init_points(self):
t_min, t_max, step = self.t_range
jumps = np.array(self.discontinuities)
jumps = jumps[(jumps > t_min) & (jumps < t_max)]
boundary_times = [t_min, t_max, *(jumps - self.epsilon), *(jumps + self.epsilon)]
boundary_times.sort()
for t1, t2 in zip(boundary_times[0::2], boundary_times[1::2]):
t_range = [*np.arange(t1, t2, step), t2]
points = np.array([self.t_func(t) for t in t_range])
self.start_new_path(points[0])
self.add_points_as_corners(points[1:])
if self.smoothing:
self.make_smooth()
return self
class FunctionGraph(ParametricCurve):
CONFIG = {
"color": YELLOW,
"x_range": [-8, 8, 0.25],
}
def __init__(self, function, x_range=None, **kwargs):
digest_config(self, kwargs)
self.function = function
if x_range is not None:
self.x_range[:len(x_range)] = x_range
def parametric_function(t):
return [t, function(t), 0]
super().__init__(parametric_function, self.x_range, **kwargs)
def get_function(self):
return self.function
def get_point_from_function(self, x):
return self.t_func(x)
| 2.15625 | 2 |
lib/ecsmate/ecs.py | doudoudzj/ecsmate | 0 | 5318 | <gh_stars>0
#-*- coding: utf-8 -*-
#
# Copyright (c) 2012, ECSMate development team
# All rights reserved.
#
# ECSMate is distributed under the terms of the (new) BSD License.
# The full license can be found in 'LICENSE.txt'.
"""ECS SDK
"""
import time
import hmac
import base64
import hashlib
import urllib
import json
import inspect
from random import random
class ECS(object):
def __init__(self, AccessKeyID, AccessKeySecret, gateway='https://ecs.aliyuncs.com'):
self.AccessKeyID = AccessKeyID
self.AccessKeySecret = AccessKeySecret
self.gateway = gateway
@classmethod
def _urlencode(self, string):
return urllib.quote(string, '~')
def _sign(self, params):
paramstrings = []
for k, v in sorted(params.items()):
paramstrings.append('%s=%s' % (ECS._urlencode(k), ECS._urlencode(v)))
datastrings = [
ECS._urlencode('GET'),
ECS._urlencode('/'),
ECS._urlencode('&'.join(paramstrings)),
]
datastring = '&'.join(datastrings)
signature = hmac.new(self.AccessKeySecret+'&', datastring, hashlib.sha1).digest()
return base64.b64encode(signature)
def _http_get(self, params):
url = self.gateway + '/?'
sysparams = {
'Format': 'JSON',
'Version': '2012-09-13',
'AccessKeyID': self.AccessKeyID,
'SignatureMethod': 'HMAC-SHA1',
'Timestamp': time.strftime('%Y-%m-%dT%XZ'),
'SignatureVersion': '1.0',
'SignatureNonce': str(random()).replace('0.', ''),
}
params.update(sysparams)
params['Signature'] = self._sign(params)
params = urllib.urlencode(params)
url += params
f = urllib.urlopen(url)
data = f.read()
f.close()
return json.loads(data)
def _parse_response(self, apiname, response):
if response.has_key('Error'):
respdata = response['Error']
reqid = respdata['RequestID']
del respdata['RequestID']
return [False, respdata, reqid]
else:
respdata = response[apiname+'Response']
return [True, respdata[apiname+'Result'], respdata['ResponseMetadata']['RequestID']]
def _make_params(self, params):
params = dict((k, str(v)) for k, v in params.items() if k != 'self' and v != None)
params['Action'] = inspect.stack()[1][3]
return params
def _execute(self, params):
response = self._http_get(params)
return self._parse_response(params['Action'], response)
def CreateInstance(self, RegionCode, DiskSize, InstanceType, GroupCode, ImageCode,
MaxBandwidthIn=None, MaxBandwidthOut=None, InstanceName=None, HostName=None,
Password=<PASSWORD>, ZoneCode=None):
params = self._make_params(locals())
return self._execute(params)
def StartInstance(self, InstanceName):
params = self._make_params(locals())
return self._execute(params)
def StopInstance(self, InstanceName, ForceStop=None):
params = self._make_params(locals())
return self._execute(params)
def RebootInstance(self, InstanceName, ForceStop=None):
params = self._make_params(locals())
return self._execute(params)
def ResetInstance(self, InstanceName, ImageCode=None, DiskType=None):
params = self._make_params(locals())
return self._execute(params)
def ResetPassword(self, InstanceName, NewPassword=None):
params = self._make_params(locals())
return self._execute(params)
def DeleteInstance(self, InstanceName):
params = self._make_params(locals())
return self._execute(params)
def DescribeInstanceStatus(self, RegionCode=None, ZoneCode=None, PageNumber=None, PageSize=None):
params = self._make_params(locals())
return self._execute(params)
def DescribeInstanceAttribute(self, InstanceName):
params = self._make_params(locals())
return self._execute(params)
def ModifyInstanceAttribute(self, InstanceName, InstanceType):
params = self._make_params(locals())
return self._execute(params)
def ModifyBandwidth(self, InstanceName, MaxBandwidthOut, MaxBandwidthIn):
params = self._make_params(locals())
return self._execute(params)
def ModifyHostName(self, InstanceName, HostName):
params = self._make_params(locals())
return self._execute(params)
def CreateDisk(self, InstanceName, Size, SnapshotCode=None):
params = self._make_params(locals())
return self._execute(params)
def DeleteDisk(self, InstanceName, DiskCode):
params = self._make_params(locals())
return self._execute(params)
def DescribeDisks(self, InstanceName):
params = self._make_params(locals())
return self._execute(params)
def DescribeImages(self, RegionCode=None, PageNumber=None, PageSize=None):
params = self._make_params(locals())
return self._execute(params)
def AllocateAddress(self, InstanceName):
params = self._make_params(locals())
return self._execute(params)
def ReleaseAddress(self, PublicIpAddress):
params = self._make_params(locals())
return self._execute(params)
def CreateSecurityGroup(self, GroupCode, RegionCode, Description):
params = self._make_params(locals())
return self._execute(params)
def AuthorizeSecurityGroup(self, GroupCode, RegionCode, IpProtocol, PortRange,
SourceGroupCode=None, SourceCidrIp=None, Policy=None, NicType=None, Priority=None):
params = self._make_params(locals())
return self._execute(params)
def DescribeSecurityGroupAttribute(self, GroupCode, RegionCode, NicType=None):
params = self._make_params(locals())
return self._execute(params)
def DescribeSecurityGroups(self, RegionCode, PageNumber=None, PageSize=None):
params = self._make_params(locals())
return self._execute(params)
def ModifySecurityGroupAttribute(self, RegionCode, GroupCode, Adjust):
params = self._make_params(locals())
return self._execute(params)
def RevokeSecurityGroup(self, GroupCode, RegionCode, IpProtocol, PortRange,
SourceGroupCode=None, SourceCidrIp=None, Policy=None, NicType=None):
params = self._make_params(locals())
return self._execute(params)
def DeleteSecurityGroup(self, GroupCode, RegionCode):
params = self._make_params(locals())
return self._execute(params)
def CreateSnapshot(self, InstanceName, DiskCode):
params = self._make_params(locals())
return self._execute(params)
def DeleteSnapshot(self, DiskCode, InstanceName, SnapshotCode):
params = self._make_params(locals())
return self._execute(params)
def CancelSnapshotRequest(self, InstanceName, SnapshotCode):
params = self._make_params(locals())
return self._execute(params)
def DescribeSnapshots(self, InstanceName, DiskCode):
params = self._make_params(locals())
return self._execute(params)
def DescribeSnapshotAttribute(self, RegionCode, SnapshotCode):
params = self._make_params(locals())
return self._execute(params)
def RollbackSnapshot(self, InstanceName, DiskCode, SnapshotCode):
params = self._make_params(locals())
return self._execute(params)
def DescribeRegions(self):
params = self._make_params(locals())
return self._execute(params)
def DescribeZones(self, RegionCode):
params = self._make_params(locals())
return self._execute(params)
if __name__ == '__main__':
import pprint
pp = pprint.PrettyPrinter(indent=4)
AccessKeyID = ''
AccessKeySecret = ''
ecs = ECS(AccessKeyID, AccessKeySecret)
if 0:
print '## Regions\n'
regions = ecs.DescribeRegions()[1]
pp.pprint(regions)
print
for region in regions['Regions']:
print '## Zones in %s\n' % region['RegionCode']
zones = ecs.DescribeZones(region['RegionCode'])
if not zones[0]:
pp.pprint(zones)
continue
zones = zones[1]
pp.pprint(zones)
print
for zone in zones['Zones']:
print '## Instances in %s\n' % zone['ZoneCode']
instances = ecs.DescribeInstanceStatus(region['RegionCode'], zone['ZoneCode'])[1]
pp.pprint(instances)
print
print
#pp.pprint(ecs.DescribeInstanceStatus(PageSize=10, PageNumber=1))
#pp.pprint(ecs.DescribeInstanceStatus('cn-hangzhou-dg-a01', 'cn-hangzhou-dg101-a'))
#pp.pprint(ecs.StartInstance('AY1209220917063704221'))
#pp.pprint(ecs.StopInstance('AY1209220917063704221'))
#pp.pprint(ecs.RebootInstance('AY1209220917063704221'))
#pp.pprint(ecs.DescribeInstanceAttribute('AY1209220917063704221'))
#pp.pprint(ecs.DescribeImages(PageSize=10, PageNumber=9))
#pp.pprint(ecs.DescribeDisks('AY1209220917063704221'))
#pp.pprint(ecs.DescribeSnapshots('AY1209220917063704221', '1006-60002839'))
| 2.09375 | 2 |
tacker/api/v1/resource.py | mail2nsrajesh/tacker | 0 | 5319 | <filename>tacker/api/v1/resource.py
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods for working with WSGI servers redux
"""
from oslo_log import log as logging
import webob.dec
from tacker.api import api_common
from tacker import wsgi
LOG = logging.getLogger(__name__)
class Request(wsgi.Request):
pass
def Resource(controller, faults=None, deserializers=None, serializers=None):
"""API entity resource.
Represents an API entity resource and the associated serialization and
deserialization logic
"""
default_deserializers = {'application/json': wsgi.JSONDeserializer()}
default_serializers = {'application/json': wsgi.JSONDictSerializer()}
format_types = {'json': 'application/json'}
action_status = dict(create=201, delete=204)
default_deserializers.update(deserializers or {})
default_serializers.update(serializers or {})
deserializers = default_deserializers
serializers = default_serializers
faults = faults or {}
@webob.dec.wsgify(RequestClass=Request)
def resource(request):
route_args = request.environ.get('wsgiorg.routing_args')
if route_args:
args = route_args[1].copy()
else:
args = {}
# NOTE(jkoelker) by now the controller is already found, remove
# it from the args if it is in the matchdict
args.pop('controller', None)
fmt = args.pop('format', None)
action = args.pop('action', None)
content_type = format_types.get(fmt,
request.best_match_content_type())
language = request.best_match_language()
deserializer = deserializers.get(content_type)
serializer = serializers.get(content_type)
try:
if request.body:
args['body'] = deserializer.deserialize(request.body)['body']
method = getattr(controller, action)
result = method(request=request, **args)
except Exception as e:
mapped_exc = api_common.convert_exception_to_http_exc(e, faults,
language)
if hasattr(mapped_exc, 'code') and 400 <= mapped_exc.code < 500:
LOG.info(_('%(action)s failed (client error): %(exc)s'),
{'action': action, 'exc': mapped_exc})
else:
LOG.exception(
_('%(action)s failed: %(details)s'),
{
'action': action,
'details': extract_exc_details(e),
}
)
raise mapped_exc
status = action_status.get(action, 200)
body = serializer.serialize(result)
# NOTE(jkoelker) Comply with RFC2616 section 9.7
if status == 204:
content_type = ''
body = None
return webob.Response(request=request, status=status,
content_type=content_type,
body=body)
return resource
_NO_ARGS_MARKER = object()
def extract_exc_details(e):
for attr in ('_error_context_msg', '_error_context_args'):
if not hasattr(e, attr):
return _('No details.')
details = e._error_context_msg
args = e._error_context_args
if args is _NO_ARGS_MARKER:
return details
return details % args
| 1.984375 | 2 |
akinator/utils.py | GitHubEmploy/akinator.py | 0 | 5320 | <reponame>GitHubEmploy/akinator.py
"""
MIT License
Copyright (c) 2019 NinjaSnail1080
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from .exceptions import InvalidAnswerError, InvalidLanguageError, AkiConnectionFailure, AkiTimedOut, AkiNoQuestions, AkiServerDown, AkiTechnicalError
import re
import json
def ans_to_id(ans):
"""Convert an input answer string into an Answer ID for Akinator"""
ans = str(ans).lower()
if ans == "yes" or ans == "y" or ans == "0":
return "0"
elif ans == "no" or ans == "n" or ans == "1":
return "1"
elif ans == "i" or ans == "idk" or ans == "i dont know" or ans == "i don't know" or ans == "2":
return "2"
elif ans == "probably" or ans == "p" or ans == "3":
return "3"
elif ans == "probably not" or ans == "pn" or ans == "4":
return "4"
else:
raise InvalidAnswerError("""
You put "{}", which is an invalid answer.
The answer must be one of these:
- "yes" OR "y" OR "0" for YES
- "no" OR "n" OR "1" for NO
- "i" OR "idk" OR "i dont know" OR "i don't know" OR "2" for I DON'T KNOW
- "probably" OR "p" OR "3" for PROBABLY
- "probably not" OR "pn" OR "4" for PROBABLY NOT
""".format(ans))
def get_lang_and_theme(lang=None):
"""Returns the language code and theme based on what is input"""
if lang is None or lang == "en" or lang == "english":
return {"lang": "en", "theme": "c"}
elif lang == "en_animals" or lang == "english_animals":
return {"lang": "en", "theme": "a"}
elif lang == "en_objects" or lang == "english_objects":
return {"lang": "en", "theme": "o"}
elif lang == "ar" or lang == "arabic":
return {"lang": "ar", "theme": "c"}
elif lang == "cn" or lang == "chinese":
return {"lang": "cn", "theme": "c"}
elif lang == "de" or lang == "german":
return {"lang": "de", "theme": "c"}
elif lang == "de_animals" or lang == "german_animals":
return {"lang": "de", "theme": "a"}
elif lang == "es" or lang == "spanish":
return {"lang": "es", "theme": "c"}
elif lang == "es_animals" or lang == "spanish_animals":
return {"lang": "es", "theme": "a"}
elif lang == "fr" or lang == "french":
return {"lang": "fr", "theme": "c"}
elif lang == "fr_animals" or lang == "french_animals":
return {"lang": "fr", "theme": "a"}
elif lang == "fr_objects" or lang == "french_objects":
return {"lang": "fr", "theme": "o"}
elif lang == "il" or lang == "hebrew":
return {"lang": "il", "theme": "c"}
elif lang == "it" or lang == "italian":
return {"lang": "it", "theme": "c"}
elif lang == "it_animals" or lang == "italian_animals":
return {"lang": "it", "theme": "a"}
elif lang == "jp" or lang == "japanese":
return {"lang": "jp", "theme": "c"}
elif lang == "jp_animals" or lang == "japanese_animals":
return {"lang": "jp", "theme": "a"}
elif lang == "kr" or lang == "korean":
return {"lang": "kr", "theme": "c"}
elif lang == "nl" or lang == "dutch":
return {"lang": "nl", "theme": "c"}
elif lang == "pl" or lang == "polish":
return {"lang": "pl", "theme": "c"}
elif lang == "pt" or lang == "portuguese":
return {"lang": "pt", "theme": "c"}
elif lang == "ru" or lang == "russian":
return {"lang": "ru", "theme": "c"}
elif lang == "tr" or lang == "turkish":
return {"lang": "tr", "theme": "c"}
else:
raise InvalidLanguageError("You put \"{}\", which is an invalid language.".format(lang))
def raise_connection_error(response):
"""Raise the proper error if the API failed to connect"""
if response == "KO - SERVER DOWN":
raise AkiServerDown("Akinator's servers are down in this region. Try again later or use a different language")
elif response == "KO - TECHNICAL ERROR":
raise AkiTechnicalError("Akinator's servers have had a technical error. Try again later or use a different language")
elif response == "KO - TIMEOUT":
raise AkiTimedOut("Your Akinator session has timed out")
elif response == "KO - ELEM LIST IS EMPTY" or response == "WARN - NO QUESTION":
raise AkiNoQuestions("\"Akinator.step\" reached 80. No more questions")
else:
raise AkiConnectionFailure("An unknown error has occured. Server response: {}".format(response))
| 2.109375 | 2 |
ucs-python/create_ucs_sp_template.py | movinalot/ucs | 0 | 5321 | <reponame>movinalot/ucs
"""
create_ucs_sp_template.py
Purpose:
UCS Manager Create a UCS Service Profile Template
Author:
<NAME> (<EMAIL>) github: (@movinalot)
Cisco Systems, Inc.
"""
from ucsmsdk.ucshandle import UcsHandle
from ucsmsdk.mometa.ls.LsServer import LsServer
from ucsmsdk.mometa.org.OrgOrg import OrgOrg
HANDLE = UcsHandle(
"sandbox-ucsm1.cisco.com",
"admin",
"password"
)
HANDLE.login()
ORG_ORG = OrgOrg(
parent_mo_or_dn='org-root',
name="devnet",
)
HANDLE.add_mo(ORG_ORG, modify_present=True)
HANDLE.commit()
SP_TEMPLATE = LsServer(
parent_mo_or_dn='org-root/org-devnet',
name="devcore_template",
type="updating-template"
)
HANDLE.add_mo(SP_TEMPLATE, modify_present=True)
HANDLE.commit()
HANDLE.logout()
| 2.015625 | 2 |
epab/core/config.py | 132nd-etcher/epab | 2 | 5322 | <reponame>132nd-etcher/epab
# coding=utf-8
"""
Handles EPAB's config file
"""
import logging
import pathlib
import elib_config
CHANGELOG_DISABLE = elib_config.ConfigValueBool(
'changelog', 'disable', description='Disable changelog building', default=False
)
CHANGELOG_FILE_PATH = elib_config.ConfigValuePath(
'changelog', 'file_path', description='Path to changelog file', default='CHANGELOG.md'
)
CHANGELOG_FILE_PATH.must_be_file()
TEST_RUNNER_OPTIONS = elib_config.ConfigValueString(
'test', 'runner_options', description='Additional options for test run', default=''
)
TEST_DURATION_COUNT = elib_config.ConfigValueInteger(
'test', 'duration_count', description='Amount of "slow" tests to show', default=10
)
TEST_DURATION_COUNT.set_limits(min_=0, max_=50)
TEST_TARGET = elib_config.ConfigValueString(
'test', 'target', description='Target of pytest', default='test'
)
TEST_COVERAGE_FAIL_UNDER = elib_config.ConfigValueInteger(
'test', 'coverage_fail_under', description='Minimal coverage to pass tests', default=20
)
TEST_COVERAGE_FAIL_UNDER.set_limits(min_=0, max_=100)
TEST_PYTEST_TIMEOUT = elib_config.ConfigValueInteger(
'test', 'timeout', description='Timeout in seconds for pytest runner', default=300
)
TEST_PYTEST_TIMEOUT.set_limits(min_=0, max_=3600)
LINT_LINE_LENGTH = elib_config.ConfigValueInteger(
'lint', 'line_length', description='Linter max line width', default=120
)
LINT_LINE_LENGTH.set_limits(min_=0, max_=500)
PACKAGE_NAME = elib_config.ConfigValueString(
'package_name', description='Package name'
)
FREEZE_ENTRY_POINT = elib_config.ConfigValueString(
'freeze', 'entry_point', description='Main entry point for pyinstaller', default=''
)
FREEZE_DATA_FILES = elib_config.ConfigValueList(
'freeze', 'data_files', description='PyInstaller data-files list', element_type=str, default=[]
)
DOC_REPO = elib_config.ConfigValueString(
'doc', 'repo', description='Documentation repository on Github', default=''
)
DOC_FOLDER = elib_config.ConfigValuePath(
'doc', 'folder', description='Local documentation directory', default='./doc'
)
DOC_FOLDER.must_be_dir()
QUIET = elib_config.ConfigValueBool(
'quiet', description='Less console output', default=False
)
VERBOSE = elib_config.ConfigValueBool(
'verbose', description='More console output', default=False
)
TEST_AV_RUNNER_OPTIONS = elib_config.ConfigValueString(
'appveyor', 'test_runner_options', description='Additional command line options for tests run on AV',
default='--long'
)
ARTIFACTS = elib_config.ConfigValueList(
'appveyor', 'artifacts', description='List of artifacts for Appveyor', element_type=str, default=[]
)
FLAKE8_EXCLUDE = elib_config.ConfigValueString(
'lint', 'flake8_exclude', description='List of comma separated files for flake8 to exclude', default=''
)
MYPY_ARGS = elib_config.ConfigValueString(
'lint', 'mypy_args', description='Additional MyPy arguments', default=''
)
QT_RES_SRC = elib_config.ConfigValueString(
'qt', 'res_src', description='Qt resource file (.qrc) location', default=''
)
QT_RES_TGT = elib_config.ConfigValueString(
'qt', 'res_tgt', description='Compiled Qt resource file (.py) target location', default=''
)
UPLOAD_TO_TWINE = elib_config.ConfigValueBool(
'twine', 'upload', description='Upload package to Twine after build',
default=True,
)
MAKE_GRAPH = elib_config.ConfigValueBool(
'graph', 'make',
description='Generate graphs using PyReverse',
default=True,
)
def setup_config(epab_version: str):
"""
Set up elib_config package
:param epab_version: installed version of EPAB as as string
"""
logger = logging.getLogger('EPAB')
logger.debug('setting up config')
elib_config.ELIBConfig.setup(
app_name='EPAB',
app_version=epab_version,
config_file_path='pyproject.toml',
config_sep_str='__',
root_path=['tool', 'epab']
)
elib_config.write_example_config('pyproject.toml.example')
if not pathlib.Path('pyproject.toml').exists():
raise FileNotFoundError('pyproject.toml')
elib_config.validate_config()
| 1.921875 | 2 |
create_flask_app.py | Creativity-Hub/create_flask_app | 2 | 5323 | <gh_stars>1-10
import os
import argparse
def check_for_pkg(pkg):
try:
exec("import " + pkg)
except:
os.system("pip3 install --user " + pkg)
def create_flask_app(app='flask_app', threading=False, wsgiserver=False, unwanted_warnings=False, logging=False, further_logging=False, site_endpoints=None, endpoints=None, request_endpoints=None):
check_for_pkg('flask')
lines = ["from flask import Flask, send_from_directory","import codecs", "import os"]
params = {
'app': app,
'threading': threading,
'wsgiserver': wsgiserver,
'unwanted_warnings': unwanted_warnings,
'logging': logging,
'further_logging': further_logging,
'site_endpoints': site_endpoints,
'endpoints': endpoints,
'request_endpoints': request_endpoints
}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
for param in params.keys():
if 'endpoints' in param:
parser.add_argument('-'+param[0].lower(), '--'+param.lower(), nargs='+', help='', required=False)
else:
parser.add_argument('-'+param[0].lower(), '--'+param.lower(), help='', required=False)
args = vars(parser.parse_args())
for param in args.keys():
if 'request' in param and len(args[param]) % 3 != 0:
print('Request method endpoint format invalid, enter "Method" "Endpoint" "Parameter"')
if param == 'app':
if args[param] != None:
params[param] = args[param]
else:
params[param] = args[param]
index = "<!DOCTYPE html>\n<html>\n<head>\n\t<title>endpoint</title>\n\t<link href='static/style.css' rel='stylesheet'>\n</head>\n<body>\n\n<script src='static/script.js'></script>\n</body>\n</html>"
project = params['app']
if not os.path.exists(project):
os.mkdir(project)
if not os.path.exists(project+'/web'):
os.mkdir(project+'/web')
if not os.path.exists(project+'/static'):
os.mkdir(project+'/static')
os.system('touch '+project+'/static/style.css')
os.system('touch '+project+'/static/script.js')
indexFile = open(project+"/web/index.html","w+")
indexFile.write(index.replace('endpoint', project))
indexFile.close()
f = open(project+'/'+project+".py","w+")
headers = {
'threading': ["", "#Threading", "from threading import Thread"],
'wsgiserver': ["", "#WSGIServer", "from gevent.pywsgi import WSGIServer"],
'unwanted_warnings': ["", "#Disable Warnings", "import warnings", "warnings.filterwarnings('ignore')"],
'logging': ["", "#Logging", "import logging", "", "#Logging configuration set to debug on debug.log file", "logging.basicConfig(filename='debug.log',level=logging.DEBUG)", "logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')"],
'further_logging': ["", "#Disable unneeded dependencies logging", "werkzeugLog = logging.getLogger('werkzeug')", "werkzeugLog.disabled = True", "requestsLog = logging.getLogger('urllib3.connectionpool')", "requestsLog.disabled = True"],
}
for param in headers.keys():
if params[param]:
for line in headers[param]:
lines.append(line)
lines.append("\ndef run():")
if params['wsgiserver']:
check_for_pkg('gevent')
lines.append("\t#WSGIServer")
lines.append("\tWSGIServer(('', 8081), app).serve_forever()")
else:
lines.append("\tapp.run(host='0.0.0.0',port=8081)")
if params['threading']:
for line in ["", "#Thread", "def keep_alive():", "\tt = Thread(target=run)", "\tt.start()"]:
lines.append(line)
for line in ["", "app = Flask(__name__)", "", "@app.route('/')", "def main():", "\t#index.html", "\treturn codecs.open('web/index.html', 'r', 'utf-8').read()", "", "@app.route('/favicon.ico')", "def favicon():", "\treturn send_from_directory(os.path.join(app.root_path, 'static'),'favicon.ico', mimetype='image/vnd.microsoft.icon')"]:
lines.append(line)
site_endpoints = params['site_endpoints']
if site_endpoints is not None:
for ep in site_endpoints:
print('Endpoint: ' + ep)
tp = ["\n@<EMAIL>.route('/endpoint')", "def endpoint():", "\t#endpoint.html", "\treturn codecs.open('web/endpoint.html', 'r', 'utf-8').read()"]
for line in tp:
lines.append(line.replace('endpoint', ep))
epFile = open(project+"/web/endpoint.html".replace('endpoint', ep),"w+")
epFile.write(index.replace('endpoint', ep).replace('style.css', ep+'.css').replace('script.js', ep+'.js'))
epFile.close()
os.system('touch '+project+'/static/'+ep+'.css')
os.system('touch '+project+'/static/'+ep+'.js')
endpoints = params['endpoints']
if endpoints is not None:
for ep in endpoints:
print('Endpoint: ' + ep)
tp = ["\[email protected]('/endpoint')", "def endpoint():", "\t#endpoint.html", "\treturn endpoint_route"]
for line in tp:
lines.append(line.replace('endpoint', ep))
request_endpoints = params['request_endpoints']
print(request_endpoints)
request_method = request_endpoints[0]
if request_endpoints is not None:
request_endpoints = [request_endpoints[i * 3:(i + 1) * 3] for i in range((len(request_endpoints) + 3 - 1) // 3)]
for request_method, ep, request_param in request_endpoints:
print('Endpoint: ' + ep, '\nMethod: ' + request_method, '\nParameter: ' + request_param)
tp = ["\<EMAIL>('/"+ep+"/<"+request_param+">', methods=['"+request_method+"'])", "def "+ep+"("+request_param+"):", "\t#"+request_method+" method endpoint", "\treturn do_something("+request_param+")"]
for line in tp:
lines.append(line)
lines.append("\nif __name__ == '__main__':")
if params['wsgiserver']:
lines.append("\t#Run server forever")
lines.append("\tkeep_alive()")
else:
lines.append("\t#Run server")
lines.append("\trun()")
for line in lines:
f.write(line+'\n')
f.close()
print('Created' + project + ' app succesfully.')
for param in params.keys():
if params[param] and param != 'app':
print(param, params[param])
os.system('open '+ project)
if __name__ == '__main__':
create_flask_app()
| 2.484375 | 2 |
examples/flaskr/flaskr/__init__.py | Flared/flask-sqlalchemy | 2 | 5324 | <filename>examples/flaskr/flaskr/__init__.py
import os
import click
from flask import Flask
from flask.cli import with_appcontext
from flask_sqlalchemy import SQLAlchemy
__version__ = (1, 0, 0, "dev")
db = SQLAlchemy()
def create_app(test_config=None):
"""Create and configure an instance of the Flask application."""
app = Flask(__name__, instance_relative_config=True)
# some deploy systems set the database url in the environ
db_url = os.environ.get("DATABASE_URL")
if db_url is None:
# default to a sqlite database in the instance folder
db_url = "sqlite:///" + os.path.join(app.instance_path, "flaskr.sqlite")
# ensure the instance folder exists
os.makedirs(app.instance_path, exist_ok=True)
app.config.from_mapping(
# default secret that should be overridden in environ or config
SECRET_KEY=os.environ.get("SECRET_KEY", "dev"),
SQLALCHEMY_DATABASE_URI=db_url,
SQLALCHEMY_TRACK_MODIFICATIONS=False,
)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile("config.py", silent=True)
else:
# load the test config if passed in
app.config.update(test_config)
# initialize Flask-SQLAlchemy and the init-db command
db.init_app(app)
app.cli.add_command(init_db_command)
# apply the blueprints to the app
from flaskr import auth, blog
app.register_blueprint(auth.bp)
app.register_blueprint(blog.bp)
# make "index" point at "/", which is handled by "blog.index"
app.add_url_rule("/", endpoint="index")
return app
def init_db():
db.drop_all()
db.create_all()
@click.command("init-db")
@with_appcontext
def init_db_command():
"""Clear existing data and create new tables."""
init_db()
click.echo("Initialized the database.")
| 2.890625 | 3 |
simulator/cc.py | mcfx/trivm | 6 | 5325 | <gh_stars>1-10
import os, sys
fn = sys.argv[1]
if os.system('python compile.py %s __tmp.S' % fn) == 0:
os.system('python asm.py __tmp.S %s' % fn[:-2])
| 2.03125 | 2 |
ad2/Actor.py | ariadnepinheiro/Disease_Simulator | 4 | 5326 | <reponame>ariadnepinheiro/Disease_Simulator
#!/usr/bin/env python
# coding: UTF-8
#
# @package Actor
# @author <NAME>
# @date 26/08/2020
#
# Actor class, which is the base class for Disease objects.
#
##
class Actor:
# Holds the value of the next "free" id.
__ID = 0
##
# Construct a new Actor object.
# - Sets the initial values of its member variables.
# - Sets the unique ID for the object and initializes the reference to the World
# object to which this Actor object belongs to null.
# - The ID of the first Actor object is 0.
# - The ID gets incremented by one each time a new Actor object is created.
# - Sets the iteration counter to zero and initialize the location of the
# object to cell (0,0).
#
def __init__(self):
# X coordinate of this actor.
self.__locX = 0
# Y coordinate of this actor.
self.__locY = 0
# World this actor belongs to.
self.__world = None
# Unique identifier for this actor.
self.__actorID = Actor.__ID
Actor.__ID += 1
# Iteration counter.
self.__itCounter = 0
##
# Used for testing
# @return ActorID
#
def getID(self):
return self.__actorID
##
# Used for testing
# @return number of iterations
#
def Iteration(self):
return self.__itCounter
##
# Prints on screen in the format "Iteration <ID>: Actor <Actor ID>".
#
# The @f$<ID>@f$ is replaced by the current iteration number. @f$<Actor ID>@f$ is
# replaced by the unique ID of the Actor object that performs the act(self)
# method.
#
# For instance, the actor with ID 1 shows the following result on
# the output screen after its act(self) method has been called twice.
# <PRE>
# Iteration 0: Actor 1
# Iteration 1: Actor 1
# </PRE>
#
def act(self):
print("Iteration {}: Actor {}".format(self.__itCounter, self.__actorID))
self.__itCounter += 1
##
# Sets the cell coordinates of this object.
#
# @param x the column.
# @param y the row.
#
# @throws ValueError when x < 0 or x >= world width,
# @throws ValueError when y < 0 or y >= world height,
# @throws RuntimeError when the world is null.
#
def setLocation(self, x, y):
if self.__world is None:
raise RuntimeError
if (0 <= x < self.__world.getWidth()) and (0 <= y < self.__world.getHeight()):
self.__locX = x
self.__locY = y
else:
raise ValueError
##
# Sets the world this actor is into.
#
# @param world Reference to the World object this Actor object is added.
# @throws RuntimeError when world is null.
#
def addedToWorld(self, world):
if world is None:
raise RuntimeError
self.__world = world
##
# Gets the world this object in into.
#
# @return the world this object belongs to
#
def getWorld(self):
return self.__world
##
# Gets the X coordinate of the cell this actor object is into.
#
# @return the x coordinate of this Actor object.
#
def getX(self):
return self.__locX
##
# Gets the Y coordinate of the cell this actor object is into.
#
# @return the y coordinate of this Actor object.
#
def getY(self):
return self.__locY
##
# Return a string with this actor ID and position.
#
def __str__(self):
try:
st = "ID = %d "u'\u2192 '.encode('utf-8') % self.getID()
st += 'position = (%d, %d)\n' % (self.getX(), self.getY())
except TypeError:
st = "ID = %d "u'\u2192 ' % self.getID()
st += 'position = (%d, %d)\n' % (self.getX(), self.getY())
return st
| 3.265625 | 3 |
conversions/decimal_to_binary.py | smukk9/Python | 6 | 5327 | <reponame>smukk9/Python
"""Convert a Decimal Number to a Binary Number."""
def decimal_to_binary(num: int) -> str:
"""
Convert a Integer Decimal Number to a Binary Number as str.
>>> decimal_to_binary(0)
'0b0'
>>> decimal_to_binary(2)
'0b10'
>>> decimal_to_binary(7)
'0b111'
>>> decimal_to_binary(35)
'0b100011'
>>> # negatives work too
>>> decimal_to_binary(-2)
'-0b10'
>>> # other floats will error
>>> decimal_to_binary(16.16) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: 'float' object cannot be interpreted as an integer
>>> # strings will error as well
>>> decimal_to_binary('0xfffff') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: 'str' object cannot be interpreted as an integer
"""
if type(num) == float:
raise TypeError("'float' object cannot be interpreted as an integer")
if type(num) == str:
raise TypeError("'str' object cannot be interpreted as an integer")
if num == 0:
return "0b0"
negative = False
if num < 0:
negative = True
num = -num
binary = []
while num > 0:
binary.insert(0, num % 2)
num >>= 1
if negative:
return "-0b" + "".join(str(e) for e in binary)
return "0b" + "".join(str(e) for e in binary)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4.1875 | 4 |
src/HandNetwork.py | xausky/hand-network | 2 | 5328 | #!/usr/bin/python3
#-*- coding: utf-8 -*-
import urllib.parse
import json
import base64
import requests
import logging
class Network():
LOGIN_URL = 'http://192.168.211.101/portal/pws?t=li'
BEAT_URL = 'http://192.168.211.101/portal/page/doHeartBeat.jsp'
COMMON_HERADERS = {
'Accept-Language': 'en-US',
'Accept': 'text/html'
}
def __init__(self, username, password):
b64Password = base64.b64encode(bytes(password,'utf8'))
self.data = {'userName': username, 'userPwd': <PASSWORD>}
def login(self):
logging.info('login:%s'%(self.data))
response = requests.post(Network.LOGIN_URL, data=self.data,
headers=Network.COMMON_HERADERS, timeout=3)
responseText = base64.b64decode(response.text + '==')
responseJson = urllib.parse.unquote(responseText.decode('utf8'))
jsonDict = json.loads(responseJson)
heartBeatCyc = jsonDict.get('heartBeatCyc')
if heartBeatCyc == None:
raise BaseException(responseJson)
logging.info('login seccuss: %s'%(responseJson))
self.heartBeatCyc = int(heartBeatCyc)
self.serialNo = jsonDict.get('serialNo')
return self.heartBeatCyc
def beat(self):
response = requests.post(Network.BEAT_URL, data={'serialNo': self.serialNo},
headers=Network.COMMON_HERADERS, timeout=3)
if response.text.find('v_failedTimes') is -1:
raise BaseException(response.text)
| 2.578125 | 3 |
algorithms_keeper/parser/rules/use_fstring.py | Fongeme/algorithms-keeper | 50 | 5329 | import libcst as cst
import libcst.matchers as m
from fixit import CstLintRule
from fixit import InvalidTestCase as Invalid
from fixit import ValidTestCase as Valid
class UseFstringRule(CstLintRule):
MESSAGE: str = (
"As mentioned in the [Contributing Guidelines]"
+ "(https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md), "
+ "please do not use printf style formatting or `str.format()`. "
+ "Use [f-string](https://realpython.com/python-f-strings/) instead to be "
+ "more readable and efficient."
)
VALID = [
Valid("assigned='string'; f'testing {assigned}'"),
Valid("'simple string'"),
Valid("'concatenated' + 'string'"),
Valid("b'bytes %s' % 'string'.encode('utf-8')"),
]
INVALID = [
Invalid("'hello, {name}'.format(name='you')"),
Invalid("'hello, %s' % 'you'"),
Invalid("r'raw string value=%s' % val"),
]
def visit_Call(self, node: cst.Call) -> None:
if m.matches(
node,
m.Call(
func=m.Attribute(value=m.SimpleString(), attr=m.Name(value="format"))
),
):
self.report(node)
def visit_BinaryOperation(self, node: cst.BinaryOperation) -> None:
if (
m.matches(
node, m.BinaryOperation(left=m.SimpleString(), operator=m.Modulo())
)
# SimpleString can be bytes and fstring don't support bytes.
# https://www.python.org/dev/peps/pep-0498/#no-binary-f-strings
and isinstance(
cst.ensure_type(node.left, cst.SimpleString).evaluated_value, str
)
):
self.report(node)
| 2.609375 | 3 |
bert_multitask_learning/model_fn.py | akashnd/bert-multitask-learning | 0 | 5330 | # AUTOGENERATED! DO NOT EDIT! File to edit: source_nbs/13_model_fn.ipynb (unless otherwise specified).
__all__ = ['variable_summaries', 'filter_loss', 'BertMultiTaskBody', 'BertMultiTaskTop', 'BertMultiTask']
# Cell
from typing import Dict, Tuple
from inspect import signature
import tensorflow as tf
import transformers
from .modeling import MultiModalBertModel
from .params import BaseParams
from .top import (Classification, MultiLabelClassification, PreTrain,
Seq2Seq, SequenceLabel, MaskLM)
from .utils import get_embedding_table_from_model, get_transformer_main_model
def variable_summaries(var, name):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.compat.v1.name_scope(name):
mean = tf.reduce_mean(input_tensor=var)
tf.compat.v1.summary.scalar('mean', mean)
with tf.compat.v1.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(
input_tensor=tf.square(var - mean)))
tf.compat.v1.summary.scalar('stddev', stddev)
tf.compat.v1.summary.scalar('max', tf.reduce_max(input_tensor=var))
tf.compat.v1.summary.scalar('min', tf.reduce_min(input_tensor=var))
tf.compat.v1.summary.histogram('histogram', var)
@tf.function
def filter_loss(loss, features, problem):
if tf.reduce_mean(input_tensor=features['%s_loss_multiplier' % problem]) == 0:
return_loss = 0.0
else:
return_loss = loss
return return_loss
class BertMultiTaskBody(tf.keras.Model):
"""Model to extract bert features and dispatch corresponding rows to each problem_chunk.
for each problem chunk, we extract corresponding features
and hidden features for that problem. The reason behind this
is to save computation for downstream processing.
For example, we have a batch of two instances and they're from
problem a and b respectively:
Input:
[{'input_ids': [1,2,3], 'a_loss_multiplier': 1, 'b_loss_multiplier': 0},
{'input_ids': [4,5,6], 'a_loss_multiplier': 0, 'b_loss_multiplier': 1}]
Output:
{
'a': {'input_ids': [1,2,3], 'a_loss_multiplier': 1, 'b_loss_multiplier': 0}
'b': {'input_ids': [4,5,6], 'a_loss_multiplier': 0, 'b_loss_multiplier': 1}
}
"""
def __init__(self, params: BaseParams, name='BertMultiTaskBody'):
super(BertMultiTaskBody, self).__init__(name=name)
self.params = params
self.bert = MultiModalBertModel(params=self.params)
if self.params.custom_pooled_hidden_size:
self.custom_pooled_layer = tf.keras.layers.Dense(
self.params.custom_pooled_hidden_size, activation=tf.keras.activations.selu)
else:
self.custom_pooled_layer = None
@tf.function
def get_features_for_problem(self, features, hidden_feature, problem, mode):
# get features with ind == 1
if mode == tf.estimator.ModeKeys.PREDICT:
feature_this_round = features
hidden_feature_this_round = hidden_feature
else:
multiplier_name = '%s_loss_multiplier' % problem
record_ind = tf.where(tf.cast(
tf.squeeze(features[multiplier_name]), tf.bool))
hidden_feature_this_round = {}
for hidden_feature_name in hidden_feature:
if hidden_feature_name != 'embed_table':
hidden_feature_this_round[hidden_feature_name] = tf.squeeze(tf.gather(
hidden_feature[hidden_feature_name], record_ind, axis=0
), axis=1)
hidden_feature_this_round[hidden_feature_name].set_shape(
hidden_feature[hidden_feature_name].shape.as_list())
else:
hidden_feature_this_round[hidden_feature_name] = hidden_feature[hidden_feature_name]
feature_this_round = {}
for features_name in features:
feature_this_round[features_name] = tf.gather_nd(
features[features_name],
record_ind)
return feature_this_round, hidden_feature_this_round
def call(self, inputs: Dict[str, tf.Tensor],
mode: str) -> Tuple[Dict[str, Dict[str, tf.Tensor]], Dict[str, Dict[str, tf.Tensor]]]:
_ = self.bert(inputs, mode == tf.estimator.ModeKeys.TRAIN)
# extract bert hidden features
inputs['model_input_mask'] = self.bert.get_input_mask()
inputs['model_token_type_ids'] = self.bert.get_token_type_ids()
hidden_feature = {}
for logit_type in ['seq', 'pooled', 'all', 'embed', 'embed_table']:
if logit_type == 'seq':
# tensor, [batch_size, seq_length, hidden_size]
hidden_feature[logit_type] = self.bert.get_sequence_output()
elif logit_type == 'pooled':
# tensor, [batch_size, hidden_size]
hidden_feature[logit_type] = self.bert.get_pooled_output()
if self.custom_pooled_layer:
hidden_feature[logit_type] = self.custom_pooled_layer(
hidden_feature[logit_type])
elif logit_type == 'all':
# list, num_hidden_layers * [batch_size, seq_length, hidden_size]
hidden_feature[logit_type] = self.bert.get_all_encoder_layers()
elif logit_type == 'embed':
# for res connection
hidden_feature[logit_type] = self.bert.get_embedding_output()
elif logit_type == 'embed_table':
hidden_feature[logit_type] = self.bert.get_embedding_table()
# for each problem chunk, we extract corresponding features
# and hidden features for that problem. The reason behind this
# is to save computation for downstream processing.
# For example, we have a batch of two instances and they're from
# problem a and b respectively:
# Input:
# [{'input_ids': [1,2,3], 'a_loss_multiplier': 1, 'b_loss_multiplier': 0},
# {'input_ids': [4,5,6], 'a_loss_multiplier': 0, 'b_loss_multiplier': 1}]
# Output:
# {
# 'a': {'input_ids': [1,2,3], 'a_loss_multiplier': 1, 'b_loss_multiplier': 0}
# 'b': {'input_ids': [4,5,6], 'a_loss_multiplier': 0, 'b_loss_multiplier': 1}
# }
features = inputs
return_feature = {}
return_hidden_feature = {}
for problem_dict in self.params.run_problem_list:
for problem in problem_dict:
if self.params.task_transformer:
# hidden_feature = task_tranformer_hidden_feature[problem]
raise NotImplementedError
if len(self.params.run_problem_list) > 1:
feature_this_round, hidden_feature_this_round = self.get_features_for_problem(
features, hidden_feature, problem, mode)
else:
feature_this_round, hidden_feature_this_round = features, hidden_feature
if self.params.label_transfer and self.params.grid_transformer:
raise ValueError(
'Label Transfer and grid transformer cannot be enabled in the same time.'
)
if self.params.grid_transformer:
raise NotImplementedError
return_hidden_feature[problem] = hidden_feature_this_round
return_feature[problem] = feature_this_round
return return_feature, return_hidden_feature
# Cell
class BertMultiTaskTop(tf.keras.Model):
"""Model to create top layer, aka classification layer, for each problem.
"""
def __init__(self, params: BaseParams, name='BertMultiTaskTop', input_embeddings: tf.Tensor = None):
super(BertMultiTaskTop, self).__init__(name=name)
self.params = params
problem_type_layer = {
'seq_tag': SequenceLabel,
'cls': Classification,
'seq2seq_tag': Seq2Seq,
'seq2seq_text': Seq2Seq,
'multi_cls': MultiLabelClassification,
'pretrain': PreTrain,
'masklm': MaskLM
}
problem_type_layer.update(self.params.top_layer)
self.top_layer_dict = {}
for problem_dict in self.params.run_problem_list:
for problem in problem_dict:
problem_type = self.params.problem_type[problem]
# some layers has different signatures, assign inputs accordingly
layer_signature_name = signature(
problem_type_layer[problem_type].__init__).parameters.keys()
inputs_kwargs = {
'params': self.params,
'problem_name': problem
}
for signature_name in layer_signature_name:
if signature_name == 'input_embeddings':
inputs_kwargs.update(
{signature_name: input_embeddings})
self.top_layer_dict[problem] = problem_type_layer[problem_type](
**inputs_kwargs)
def call(self,
inputs: Tuple[Dict[str, Dict[str, tf.Tensor]], Dict[str, Dict[str, tf.Tensor]]],
mode: str) -> Dict[str, tf.Tensor]:
features, hidden_feature = inputs
return_dict = {}
for problem_dict in self.params.run_problem_list:
for problem in problem_dict:
feature_this_round = features[problem]
hidden_feature_this_round = hidden_feature[problem]
problem_type = self.params.problem_type[problem]
# if pretrain, return pretrain logit
if problem_type == 'pretrain':
pretrain = self.top_layer_dict[problem]
return_dict[problem] = pretrain(
(feature_this_round, hidden_feature_this_round), mode)
return return_dict
if self.params.label_transfer and self.params.grid_transformer:
raise ValueError(
'Label Transfer and grid transformer cannot be enabled in the same time.'
)
with tf.name_scope(problem):
layer = self.top_layer_dict[problem]
return_dict[problem] = layer(
(feature_this_round, hidden_feature_this_round), mode)
if self.params.augument_mask_lm and mode == tf.estimator.ModeKeys.TRAIN:
raise NotImplementedError
# try:
# mask_lm_top = MaskLM(self.params)
# return_dict['augument_mask_lm'] = \
# mask_lm_top(features,
# hidden_feature, mode, 'dummy')
# except ValueError:
# pass
return return_dict
# Cell
class BertMultiTask(tf.keras.Model):
def __init__(self, params: BaseParams, name='BertMultiTask') -> None:
super(BertMultiTask, self).__init__(name=name)
self.params = params
# initialize body model, aka transformers
self.body = BertMultiTaskBody(params=self.params)
# mlm might need word embedding from bert
# build sub-model
_ = get_embedding_table_from_model(self.body.bert.bert_model)
main_model = get_transformer_main_model(self.body.bert.bert_model)
# input_embeddings = self.body.bert.bert_model.bert.embeddings
input_embeddings = main_model.embeddings
self.top = BertMultiTaskTop(
params=self.params, input_embeddings=input_embeddings)
def call(self, inputs, mode=tf.estimator.ModeKeys.TRAIN):
feature_per_problem, hidden_feature_per_problem = self.body(
inputs, mode)
pred_per_problem = self.top(
(feature_per_problem, hidden_feature_per_problem), mode)
return pred_per_problem
def compile(self):
super(BertMultiTask, self).compile()
logger = tf.get_logger()
logger.info('Initial lr: {}'.format(self.params.lr))
logger.info('Train steps: {}'.format(self.params.train_steps))
logger.info('Warmup steps: {}'.format(self.params.num_warmup_steps))
self.optimizer, self.lr_scheduler = transformers.optimization_tf.create_optimizer(
init_lr=self.params.lr,
num_train_steps=self.params.train_steps,
num_warmup_steps=self.params.num_warmup_steps,
weight_decay_rate=0.01
)
self.mean_acc = tf.keras.metrics.Mean(name='mean_acc')
def train_step(self, data):
with tf.GradientTape() as tape:
# Forward pass
_ = self(data, mode=tf.estimator.ModeKeys.TRAIN)
# gather losses from all problems
loss_dict = {'{}_loss'.format(problem_name): tf.reduce_sum(top_layer.losses) for problem_name,
top_layer in self.top.top_layer_dict.items()}
# metric_dict = {'{}_metric'.format(problem_name): tf.reduce_mean(top_layer.metrics) for problem_name,
# top_layer in self.top.top_layer_dict.items()}
metric_dict = {m.name: m.result() for m in self.metrics}
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(self.losses, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
self.mean_acc.update_state(
[v for n, v in metric_dict.items() if n != 'mean_acc'])
return_dict = metric_dict
return_dict.update(loss_dict)
return_dict[self.mean_acc.name] = self.mean_acc.result()
# Return a dict mapping metric names to current value.
# Note that it will include the loss (tracked in self.metrics).
return return_dict
def test_step(self, data):
"""The logic for one evaluation step.
This method can be overridden to support custom evaluation logic.
This method is called by `Model.make_test_function`.
This function should contain the mathemetical logic for one step of
evaluation.
This typically includes the forward pass, loss calculation, and metrics
updates.
Configuration details for *how* this logic is run (e.g. `tf.function` and
`tf.distribute.Strategy` settings), should be left to
`Model.make_test_function`, which can also be overridden.
Arguments:
data: A nested structure of `Tensor`s.
Returns:
A `dict` containing values that will be passed to
`tf.keras.callbacks.CallbackList.on_train_batch_end`. Typically, the
values of the `Model`'s metrics are returned.
"""
y_pred = self(data, mode=tf.estimator.ModeKeys.EVAL)
# Updates stateful loss metrics.
self.compiled_loss(
None, y_pred, None, regularization_losses=self.losses)
self.compiled_metrics.update_state(None, y_pred, None)
# get metrics to calculate mean
m_list = []
for metric in self.metrics:
if 'mean_acc' in metric.name:
continue
if 'acc' in metric.name:
m_list.append(metric.result())
if 'f1' in metric.name:
m_list.append(metric.result())
self.mean_acc.update_state(
m_list)
return {m.name: m.result() for m in self.metrics}
def predict_step(self, data):
return self(data, mode=tf.estimator.ModeKeys.PREDICT)
| 2.328125 | 2 |
SiMon/visualization.py | Jennyx18/SiMon | 9 | 5331 | <gh_stars>1-10
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import math
from datetime import datetime
from matplotlib.colors import ListedColormap, BoundaryNorm
from matplotlib.collections import LineCollection
from matplotlib import cm
from SiMon.simulation import Simulation
from SiMon.callback import Callback
from matplotlib.ticker import MaxNLocator
import time
class VisualizationCallback(Callback):
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
def run(self):
self.plot_progress()
def plot_progress(self):
"""
Creates a graph showing the progress of the simulations
:param num_sim: number of simulations
:return:
"""
if 'container' in self.kwargs:
sim_inst_dict = self.kwargs['container'].sim_inst_dict
else:
return
num_sim = len(sim_inst_dict)
status = np.array([])
progresses = np.array([])
sim_idx = np.array([])
for i, sim_name in enumerate(sim_inst_dict):
sim = sim_inst_dict[sim_name]
sim_id = sim.id
if sim_id == 0:
continue # skip the root simulation instance, which is only a place holder
# only plot level=1 simulations
if sim.level > 1:
continue
s = sim.sim_get_status()
if sim.t_max > 0:
p = sim.t / sim.t_max
else:
p = 0.0
status = np.append(s, status)
progresses = np.append(p, progresses)
sim_idx = np.append(sim_id, sim_idx)
# Checks if num_sim has a square
if int(math.sqrt(num_sim) + 0.5) ** 2 == num_sim:
number = int(math.sqrt(num_sim))
y_num = num_sim // number
# If not square, find divisible number to get rectangle
else:
number = int(math.sqrt(num_sim))
while num_sim % number != 0:
number = number - 1
y_num = num_sim // number # Y-axis limit
# If prime number
if number == 1:
number = int(math.sqrt(num_sim)) + 1 # Make sure graph fits all num_sim
y_num = number
# 'Removes' extra white line if graph is too big
if (y_num * number) > num_sim and ((y_num - 1) * number) >= num_sim:
y_num = y_num - 1
x_sim = sim_idx % number
y_sim = sim_idx // number
plt.figure(1, figsize=(12, 12))
ax = plt.gca() # get the axis
ax.set_ylim(ax.get_ylim()[::-1]) # invert the axis
ax.xaxis.tick_top() # and move the X-Axis
ax.yaxis.set_ticks(np.arange(-0.5, y_num)) # set y-ticks
ax.yaxis.set_major_locator(MaxNLocator(integer=True)) # set to integers
ax.yaxis.tick_left() # remove right y-Ticks
symbols = ['o', 's', '>', '^', '*', 'x']
labels = ['NEW', 'STOP', 'RUN', 'STALL', 'DONE', 'ERROR']
for i, symbol in enumerate(symbols):
if (status == i).sum() == 0:
continue
else:
plt.scatter(
x_sim[status == i],
y_sim[status == i],
marker=symbol,
s=500,
c=progresses[status == i],
cmap=cm.RdYlBu,
vmin = 0., vmax = 1.,
label=labels[i])
for i in range(sim_idx.shape[0]):
plt.annotate(
text=str(sim_inst_dict[i].id),
xy=(x_sim[i], y_sim[i]),
color='black',
weight='bold',
size=15
)
plt.legend(
bbox_to_anchor=(0., -.15, 1., .102),
loc='lower center',
ncol=4,
mode="expand",
borderaxespad=0.,
borderpad=2,
labelspacing=3
)
plt.colorbar()
# # Save file with a new name
# if os.path.exists('progress.pdf'):
# plt.savefig('progress_{}.pdf'.format(int(time.time())))
# else:
# print('saving figure')
if 'plot_dir' in self.kwargs:
plot_dir = self.kwargs['plot_dir']
else:
plot_dir = os.getcwd()
if not os.path.isdir(plot_dir):
os.mkdir(plot_dir)
fn = datetime.now().strftime("%d_%m_%Y-%H_%M_%S")
if 'format' in self.kwargs:
fmt = self.kwargs['format']
else:
fmt = 'png'
fullpath = os.path.join(plot_dir, '%s.%s' % (fn, fmt))
print('Progress plot saved on %s' % fullpath)
plt.savefig(fullpath)
plt.close(1) | 2.578125 | 3 |
bin/psm/oil_jet.py | ChrisBarker-NOAA/tamoc | 18 | 5332 | <reponame>ChrisBarker-NOAA/tamoc
"""
Particle Size Models: Pure Oil Jet
===================================
Use the ``TAMOC`` `particle_size_models` module to simulate a laboratory
scale pure oil jet into water. This script demonstrates the typical steps
involved in using the `particle_size_models.PureJet` object, which requires
specification of all of the fluid properties of the jet.
"""
# <NAME>, March 2020, Texas A&M University <<EMAIL>>.
from __future__ import (absolute_import, division, print_function)
from tamoc import seawater, particle_size_models
import numpy as np
import warnings
warnings.filterwarnings("ignore")
if __name__ == '__main__':
print('\n---------------------------------------------------------------')
print('Demonstration using the PureJet class in the')
print('particle_size_models module of TAMOC for the ')
print('experiments in the paper by Brandvik et al. (2013).')
print('\nComparisons are for the data reported in Table 3')
print('of the paper')
print('---------------------------------------------------------------')
# Simulate an experiment from Brandvik et al. (2013). Their data uses
# Oseberg oil, with the following reported properties
rho_oil = 839.3
mu_oil = 5.e-3
sigma = 15.5e-3
# We will simulate data from Table 3 in the Brandvik et al. (2013) paper.
# These experiments have a nozzle diameter of 1.5 mm
d0 = 0.0015
# They also used seawater (assumed salinity of 34.5 psu) and released the
# oil from a depth of about 6 m at a temperature of 13 deg C
T = 273.15 + 13.
S = 34.5
rho = seawater.density(T, S, 101325.)
P = 101325. + rho * 9.81 * 6.
rho = seawater.density(T, S, P)
mu = seawater.mu(T, S, P)
# With this information, we can initialize a
# `particle_size_models.PureJet` object
jet = particle_size_models.PureJet(rho_oil, mu_oil, sigma, rho, mu,
fp_type = 1)
# Brandvik et al. (2013) report the exit velocity at the nozzle. We
# need to convert this to a mass flow rate. The mass flow rate should
# always be reported within a numpy array, which allows for different
# mass fluxes for different pseudocomponents of the oil.
u_oil = 11.3
A_oil = np.pi * (d0 / 2.)**2
q_oil = u_oil * A_oil
md_oil = np.array([rho_oil * q_oil])
# To simulate the no-dispersant case, all of the oil properties in the
# jet object are currently correct. Hence, we may use:
jet.simulate(d0, md_oil)
# We compare the result to the measured data as follows:
print('\nThe median droplet size for the no-disperant experiment is:')
print(' Measured: %3.3d um' % 237)
print(' Modeled : %3.3d um\n' % (jet.get_d50() * 1.e6))
# When dispersant is added in sufficient quantities, the interfacial
# tension reduces and the droplet size gets smaller. At a dispersant
# to oil ratio of 50, sigma is:
sigma = 0.05e-3
# We can run this case by updating the properties of the jet object and
# re-running the simualtion
jet.update_properties(rho_oil, mu_oil, sigma, rho, mu, fp_type = 1)
jet.simulate(d0, md_oil)
# We compare the result to the measured data as follows:
print('\nThe median droplet size for an experiments with a')
print('dispersant to oil ratio of 50 is:')
print(' Measured: %3.3d um' % 170)
print(' Modeled : %3.3d um\n' % (jet.get_d50() * 1.e6))
# We can also plot the size distribution
print('\nThe corresponding size distribution is plotted in Figure 1')
jet.get_distributions(15)
jet.plot_psd(1)
| 1.75 | 2 |
tron/Nubs/hal.py | sdss/tron | 0 | 5333 | import os.path
import tron.Misc
from tron import g, hub
from tron.Hub.Command.Encoders.ASCIICmdEncoder import ASCIICmdEncoder
from tron.Hub.Nub.SocketActorNub import SocketActorNub
from tron.Hub.Reply.Decoders.ASCIIReplyDecoder import ASCIIReplyDecoder
name = 'hal'
def start(poller):
cfg = tron.Misc.cfg.get(g.location, 'actors', doFlush=True)[name]
stop()
initCmds = ('ping', 'status', 'version')
safeCmdsList = ['ping', 'version', 'status']
safeCmds = r'^\s*({0})\s*$'.format('|'.join(safeCmdsList))
d = ASCIIReplyDecoder(cidFirst=True, debug=1)
e = ASCIICmdEncoder(sendCommander=True, useCID=False, debug=1)
nub = SocketActorNub(
poller,
cfg['host'],
cfg['port'],
name=name,
encoder=e,
decoder=d,
grabCID=True, # the actor spontaneously generates a line we can eat.
initCmds=initCmds,
safeCmds=safeCmds,
needsAuth=True,
logDir=os.path.join(g.logDir, name),
debug=3)
hub.addActor(nub)
def stop():
n = hub.findActor(name)
if n:
hub.dropActor(n)
del n
| 2.0625 | 2 |
tests/fixtures/defxmlschema/chapter15.py | gramm/xsdata | 0 | 5334 | <reponame>gramm/xsdata
from dataclasses import dataclass, field
from decimal import Decimal
from typing import Optional
from xsdata.models.datatype import XmlDate
@dataclass
class SizeType:
value: Optional[int] = field(
default=None,
metadata={
"required": True,
}
)
system: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
}
)
@dataclass
class ShirtType:
description: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
comment: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
number: Optional[int] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
size: Optional[SizeType] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
}
)
id: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
version: Optional[Decimal] = field(
default=None,
metadata={
"type": "Attribute",
}
)
eff_date: Optional[XmlDate] = field(
default=None,
metadata={
"name": "effDate",
"type": "Attribute",
}
)
@dataclass
class Shirt(ShirtType):
class Meta:
name = "shirt"
| 2.546875 | 3 |
extensions/domain.py | anubhavsinha98/oppia | 1 | 5335 | <gh_stars>1-10
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects used within multiple extensions."""
from __future__ import absolute_import # pylint: disable=import-only-modules
import python_utils
class CustomizationArgSpec(python_utils.OBJECT):
"""Value object for a customization arg specification."""
def __init__(self, name, description, schema, default_value):
self.name = name
self.description = description
self.schema = schema
self.default_value = default_value
| 1.765625 | 2 |
plugins/modules/oci_database_management_object_privilege_facts.py | LaudateCorpus1/oci-ansible-collection | 0 | 5336 | <reponame>LaudateCorpus1/oci-ansible-collection
#!/usr/bin/python
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_database_management_object_privilege_facts
short_description: Fetches details about one or multiple ObjectPrivilege resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple ObjectPrivilege resources in Oracle Cloud Infrastructure
- Gets the list of Object Privileges granted for the specified user.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
managed_database_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the Managed Database.
type: str
required: true
user_name:
description:
- The name of the user whose details are to be viewed.
type: str
required: true
name:
description:
- A filter to return only resources that match the entire name.
type: str
sort_by:
description:
- The field to sort information by. Only one sortOrder can be used. The default sort order
for 'NAME' is ascending. The 'NAME' sort order is case-sensitive.
type: str
choices:
- "NAME"
sort_order:
description:
- The option to sort information in ascending ('ASC') or descending ('DESC') order. Ascending order is the default order.
type: str
choices:
- "ASC"
- "DESC"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List object_privileges
oci_database_management_object_privilege_facts:
# required
managed_database_id: "ocid1.manageddatabase.oc1..xxxxxxEXAMPLExxxxxx"
user_name: user_name_example
# optional
name: name_example
sort_by: NAME
sort_order: ASC
"""
RETURN = """
object_privileges:
description:
- List of ObjectPrivilege resources
returned: on success
type: complex
contains:
name:
description:
- The name of the privilege on the object.
returned: on success
type: str
sample: name_example
schema_type:
description:
- The type of the object.
returned: on success
type: str
sample: schema_type_example
owner:
description:
- The owner of the object.
returned: on success
type: str
sample: owner_example
grantor:
description:
- The name of the user who performed the grant
returned: on success
type: str
sample: grantor_example
hierarchy:
description:
- Indicates whether the privilege was granted with the HIERARCHY OPTION (YES) or not (NO)
returned: on success
type: str
sample: YES
object:
description:
- The name of the object. The object can be any object, including tables, packages, indexes, sequences, and so on.
returned: on success
type: str
sample: object_example
grant_option:
description:
- Indicates whether the privilege was granted with the GRANT OPTION (YES) or not (NO)
returned: on success
type: str
sample: YES
common:
description:
- "Indicates how the grant was made. Possible values:
YES if the role was granted commonly (CONTAINER=ALL was used)
NO if the role was granted locally (CONTAINER=ALL was not used)"
returned: on success
type: str
sample: YES
inherited:
description:
- Indicates whether the role grant was inherited from another container (YES) or not (NO)
returned: on success
type: str
sample: YES
sample: [{
"name": "name_example",
"schema_type": "schema_type_example",
"owner": "owner_example",
"grantor": "grantor_example",
"hierarchy": "YES",
"object": "object_example",
"grant_option": "YES",
"common": "YES",
"inherited": "YES"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.database_management import DbManagementClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class ObjectPrivilegeFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: list"""
def get_required_params_for_list(self):
return [
"managed_database_id",
"user_name",
]
def list_resources(self):
optional_list_method_params = [
"name",
"sort_by",
"sort_order",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_object_privileges,
managed_database_id=self.module.params.get("managed_database_id"),
user_name=self.module.params.get("user_name"),
**optional_kwargs
)
ObjectPrivilegeFactsHelperCustom = get_custom_class("ObjectPrivilegeFactsHelperCustom")
class ResourceFactsHelper(
ObjectPrivilegeFactsHelperCustom, ObjectPrivilegeFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
managed_database_id=dict(type="str", required=True),
user_name=dict(type="str", required=True),
name=dict(type="str"),
sort_by=dict(type="str", choices=["NAME"]),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="object_privilege",
service_client_class=DbManagementClient,
namespace="database_management",
)
result = []
if resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(object_privileges=result)
if __name__ == "__main__":
main()
| 1.617188 | 2 |
rbc/externals/stdio.py | guilhermeleobas/rbc | 0 | 5337 | <reponame>guilhermeleobas/rbc
"""https://en.cppreference.com/w/c/io
"""
from rbc import irutils
from llvmlite import ir
from rbc.targetinfo import TargetInfo
from numba.core import cgutils, extending
from numba.core import types as nb_types
from rbc.errors import NumbaTypeError # some errors are available for Numba >= 0.55
int32_t = ir.IntType(32)
def cg_fflush(builder):
int8_t = ir.IntType(8)
fflush_fnty = ir.FunctionType(int32_t, [int8_t.as_pointer()])
fflush_fn = irutils.get_or_insert_function(builder.module, fflush_fnty, name="fflush")
builder.call(fflush_fn, [int8_t.as_pointer()(None)])
@extending.intrinsic
def fflush(typingctx):
"""``fflush`` that can be called from Numba jit-decorated functions.
.. note::
``fflush`` is available only for CPU target.
"""
sig = nb_types.void(nb_types.void)
def codegen(context, builder, signature, args):
target_info = TargetInfo()
if target_info.is_cpu:
cg_fflush(builder)
return sig, codegen
@extending.intrinsic
def printf(typingctx, format_type, *args):
"""``printf`` that can be called from Numba jit-decorated functions.
.. note::
``printf`` is available only for CPU target.
"""
if isinstance(format_type, nb_types.StringLiteral):
sig = nb_types.void(format_type, nb_types.BaseTuple.from_types(args))
def codegen(context, builder, signature, args):
target_info = TargetInfo()
if target_info.is_cpu:
cgutils.printf(builder, format_type.literal_value, *args[1:])
cg_fflush(builder)
return sig, codegen
else:
raise NumbaTypeError(f"expected StringLiteral but got {type(format_type).__name__}")
| 2.046875 | 2 |
setup.py | clach04/discoverhue | 10 | 5338 | <gh_stars>1-10
from setuptools import setup
try:
import pypandoc
long_description = pypandoc.convert_file('README.md', 'rst', extra_args=())
except ImportError:
import codecs
long_description = codecs.open('README.md', encoding='utf-8').read()
long_description = '\n'.join(long_description.splitlines())
setup(
name='discoverhue',
description='Auto discovery of Hue bridges',
long_description=long_description,
version='1.0.2',
url='https://github.com/Overboard/discoverhue',
author='Overboard',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='philips hue',
packages=['discoverhue'],
install_requires=['httpfind'],
)
| 1.632813 | 2 |
utils/logmmse.py | dbonattoj/Real-Time-Voice-Cloning | 3 | 5339 | <reponame>dbonattoj/Real-Time-Voice-Cloning
# The MIT License (MIT)
#
# Copyright (c) 2015 braindead
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# This code was extracted from the logmmse package (https://pypi.org/project/logmmse/) and I
# simply modified the interface to meet my needs.
import numpy as np
import math
from scipy.special import expn
from collections import namedtuple
NoiseProfile = namedtuple("NoiseProfile", "sampling_rate window_size len1 len2 win n_fft noise_mu2")
def profile_noise(noise, sampling_rate, window_size=0):
"""
Creates a profile of the noise in a given waveform.
:param noise: a waveform containing noise ONLY, as a numpy array of floats or ints.
:param sampling_rate: the sampling rate of the audio
:param window_size: the size of the window the logmmse algorithm operates on. A default value
will be picked if left as 0.
:return: a NoiseProfile object
"""
noise, dtype = to_float(noise)
noise += np.finfo(np.float64).eps
if window_size == 0:
window_size = int(math.floor(0.02 * sampling_rate))
if window_size % 2 == 1:
window_size = window_size + 1
perc = 50
len1 = int(math.floor(window_size * perc / 100))
len2 = int(window_size - len1)
win = np.hanning(window_size)
win = win * len2 / np.sum(win)
n_fft = 2 * window_size
noise_mean = np.zeros(n_fft)
n_frames = len(noise) // window_size
for j in range(0, window_size * n_frames, window_size):
noise_mean += np.absolute(np.fft.fft(win * noise[j:j + window_size], n_fft, axis=0))
noise_mu2 = (noise_mean / n_frames) ** 2
return NoiseProfile(sampling_rate, window_size, len1, len2, win, n_fft, noise_mu2)
def denoise(wav, noise_profile: NoiseProfile, eta=0.15):
"""
Cleans the noise from a speech waveform given a noise profile. The waveform must have the
same sampling rate as the one used to create the noise profile.
:param wav: a speech waveform as a numpy array of floats or ints.
:param noise_profile: a NoiseProfile object that was created from a similar (or a segment of
the same) waveform.
:param eta: voice threshold for noise update. While the voice activation detection value is
below this threshold, the noise profile will be continuously updated throughout the audio.
Set to 0 to disable updating the noise profile.
:return: the clean wav as a numpy array of floats or ints of the same length.
"""
wav, dtype = to_float(wav)
wav += np.finfo(np.float64).eps
p = noise_profile
nframes = int(math.floor(len(wav) / p.len2) - math.floor(p.window_size / p.len2))
x_final = np.zeros(nframes * p.len2)
aa = 0.98
mu = 0.98
ksi_min = 10 ** (-25 / 10)
x_old = np.zeros(p.len1)
xk_prev = np.zeros(p.len1)
noise_mu2 = p.noise_mu2
for k in range(0, nframes * p.len2, p.len2):
insign = p.win * wav[k:k + p.window_size]
spec = np.fft.fft(insign, p.n_fft, axis=0)
sig = np.absolute(spec)
sig2 = sig ** 2
gammak = np.minimum(sig2 / noise_mu2, 40)
if xk_prev.all() == 0:
ksi = aa + (1 - aa) * np.maximum(gammak - 1, 0)
else:
ksi = aa * xk_prev / noise_mu2 + (1 - aa) * np.maximum(gammak - 1, 0)
ksi = np.maximum(ksi_min, ksi)
log_sigma_k = gammak * ksi/(1 + ksi) - np.log(1 + ksi)
vad_decision = np.sum(log_sigma_k) / p.window_size
if vad_decision < eta:
noise_mu2 = mu * noise_mu2 + (1 - mu) * sig2
a = ksi / (1 + ksi)
vk = a * gammak
ei_vk = 0.5 * expn(1, np.maximum(vk, 1e-8))
hw = a * np.exp(ei_vk)
sig = sig * hw
xk_prev = sig ** 2
xi_w = np.fft.ifft(hw * spec, p.n_fft, axis=0)
xi_w = np.real(xi_w)
x_final[k:k + p.len2] = x_old + xi_w[0:p.len1]
x_old = xi_w[p.len1:p.window_size]
output = from_float(x_final, dtype)
output = np.pad(output, (0, len(wav) - len(output)), mode="constant")
return output
def to_float(_input):
if _input.dtype == np.float64:
return _input, _input.dtype
elif _input.dtype == np.float32:
return _input.astype(np.float64), _input.dtype
elif _input.dtype == np.uint8:
return (_input - 128) / 128., _input.dtype
elif _input.dtype == np.int16:
return _input / 32768., _input.dtype
elif _input.dtype == np.int32:
return _input / 2147483648., _input.dtype
raise ValueError('Unsupported wave file format')
def from_float(_input, dtype):
if dtype == np.float64:
return _input, np.float64
elif dtype == np.float32:
return _input.astype(np.float32)
elif dtype == np.uint8:
return ((_input * 128) + 128).astype(np.uint8)
elif dtype == np.int16:
return (_input * 32768).astype(np.int16)
elif dtype == np.int32:
print(_input)
return (_input * 2147483648).astype(np.int32)
raise ValueError('Unsupported wave file format')
| 1.875 | 2 |
lib/core/session.py | 6un9-h0-Dan/CIRTKit | 97 | 5340 | # This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
import time
import datetime
from lib.common.out import *
from lib.common.objects import File
from lib.core.database import Database
from lib.core.investigation import __project__
class Session(object):
def __init__(self):
self.id = None
# This will be assigned with the File object of the file currently
# being analyzed.
self.file = None
# Timestamp of the creation of the session.
self.created_at = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
# MISP event associated to the object
self.misp_event = None
class Sessions(object):
def __init__(self):
self.current = None
self.sessions = []
# Store the results of the last "find" command.
self.find = None
def close(self):
self.current = None
def is_set(self):
# Check if the session has been opened or not.
if self.current:
return True
else:
return False
def switch(self, session):
self.current = session
print_info("Switched to session #{0} on {1}".format(self.current.id, self.current.file.path))
def new(self, path=None, misp_event=None):
if path is None and misp_event is None:
print_error("You have to open a session on a path or on a misp event.")
return
if __project__.name:
pass
else:
print_error("You must open an investigation to store files")
return
session = Session()
total = len(self.sessions)
session.id = total + 1
if path is not None:
if self.is_set() and self.current.misp_event:
session.misp_event = self.current.misp_event
# Open a section on the given file.
session.file = File(path)
# Try to lookup the file in the database. If it is already present
# we get file name and
row = Database().find(key='sha256', value=session.file.sha256)
if row:
session.file.name = row[0].name
session.file.tags = ', '.join(tag.to_dict()['tag'] for tag in row[0].tag)
print_info("Session opened on {0}".format(path))
if misp_event is not None:
if self.is_set() and self.current.file:
session.file = self.current.file
refresh = False
if self.current is not None and self.current.misp_event is not None \
and self.current.misp_event.event_id == misp_event.event_id:
refresh = True
session.misp_event = misp_event
if refresh:
print_info("Session on MISP event {0} refreshed.".format(misp_event.event_id))
else:
print_info("Session opened on MISP event {0}.".format(misp_event.event_id))
if session.file is not None:
# Loop through all existing sessions and check whether there's another
# session open on the same file and delete it. This is to avoid
# duplicates in sessions.
# NOTE: in the future we might want to remove this if sessions have
# unique attributes (for example, an history just for each of them).
for entry in self.sessions:
if entry.file is not None and entry.file.sha256 == session.file.sha256:
self.sessions.remove(entry)
# Add new session to the list.
self.sessions.append(session)
# Mark the new session as the current one.
self.current = session
__sessions__ = Sessions()
| 2.640625 | 3 |
src/simple_report/xls/document.py | glibin/simple-report | 0 | 5341 | <filename>src/simple_report/xls/document.py
#coding: utf-8
import xlrd
from simple_report.core.document_wrap import BaseDocument, SpreadsheetDocument
from simple_report.xls.workbook import Workbook
from simple_report.xls.output_options import XSL_OUTPUT_SETTINGS
class DocumentXLS(BaseDocument, SpreadsheetDocument):
"""
Обертка для отчетов в формате XLS
"""
def __init__(self, ffile, tags=None, **kwargs):
self.file = ffile
self._workbook = Workbook(ffile, **kwargs)
@property
def workbook(self):
"""
Получение рабочей книги
:result: рабочая книга
"""
return self._workbook
def build(self, dst):
"""
Сборка отчета
:param dst: путь до выходного файла
:result:
"""
self._workbook.build(dst)
def __setattr__(self, key, value):
if key in XSL_OUTPUT_SETTINGS:
setattr(self._workbook, key, value)
else:
super(DocumentXLS, self).__setattr__(key, value)
| 2.421875 | 2 |
tests/ut/datavisual/common/test_error_handler.py | zengchen1024/mindinsight | 0 | 5342 | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Function:
Test error handler.
Usage:
pytest tests/ut/datavisual
"""
from unittest.mock import patch
from werkzeug.exceptions import MethodNotAllowed, NotFound
from ...backend.datavisual.conftest import TRAIN_ROUTES
from ..mock import MockLogger
from ....utils.tools import get_url
from mindinsight.datavisual.processors import scalars_processor
from mindinsight.datavisual.processors.scalars_processor import ScalarsProcessor
class TestErrorHandler:
"""Test train visual api."""
@patch.object(ScalarsProcessor, 'get_metadata_list')
def test_handle_http_exception_error_not_found(self, mock_scalar_processor, client):
"""Test handle http exception error not found."""
scalars_processor.logger = MockLogger
text = 'Test Message'
# NotFound
def get_metadata_list(train_ids, tag):
raise NotFound("%s" % text)
mock_scalar_processor.side_effect = get_metadata_list
test_train_ids = "aa"
test_tag = "bb"
params = dict(train_ids=test_train_ids, tag=test_tag)
url = get_url(TRAIN_ROUTES['scalar_metadata'], params)
response = client.get(url)
assert response.status_code == 404
response = response.get_json()
assert response['error_code'] == '50545001'
assert response['error_msg'] == '404 Not Found.'
@patch.object(ScalarsProcessor, 'get_metadata_list')
def test_handle_http_exception_error_method_not_allowed(self, mock_scalar_processor, client):
"""Test handling http exception error method not allowed."""
scalars_processor.logger = MockLogger
text = 'Test Message'
# MethodNotAllowed
def get_metadata_list(train_ids, tag):
raise MethodNotAllowed("%s" % text)
mock_scalar_processor.side_effect = get_metadata_list
test_train_ids = "aa"
test_tag = "bb"
params = dict(train_ids=test_train_ids, tag=test_tag)
url = get_url(TRAIN_ROUTES['scalar_metadata'], params)
response = client.get(url)
assert response.status_code == 405
response = response.get_json()
assert response['error_code'] == '50545002'
assert response['error_msg'] == '405 Method Not Allowed.'
@patch.object(ScalarsProcessor, 'get_metadata_list')
def test_handle_http_exception_error_method_other_errors(self, mock_scalar_processor, client):
"""Test handling http exception error method other errors."""
scalars_processor.logger = MockLogger
text = 'Test Message'
# Other errors
def get_metadata_list(train_ids, tag):
raise KeyError("%s" % text)
mock_scalar_processor.side_effect = get_metadata_list
test_train_ids = "aa"
test_tag = "bb"
params = dict(train_ids=test_train_ids, tag=test_tag)
url = get_url(TRAIN_ROUTES['scalar_metadata'], params)
response = client.get(url)
assert response.status_code == 500
response = response.get_json()
assert response['error_code'] == '50540000'
assert response['error_msg'] == 'System error.'
| 1.851563 | 2 |
pipeline_sdk/api/build/cancel_build_pb2.py | easyopsapis/easyops-api-python | 5 | 5343 | <reponame>easyopsapis/easyops-api-python<filename>pipeline_sdk/api/build/cancel_build_pb2.py
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: cancel_build.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='cancel_build.proto',
package='build',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x12\x63\x61ncel_build.proto\x12\x05\x62uild\x1a\x1bgoogle/protobuf/empty.proto\"!\n\rCancelRequest\x12\x10\n\x08\x62uild_id\x18\x01 \x01(\t\"o\n\x15\x43\x61ncelResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12$\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x16.google.protobuf.Emptyb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_CANCELREQUEST = _descriptor.Descriptor(
name='CancelRequest',
full_name='build.CancelRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='build_id', full_name='build.CancelRequest.build_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=58,
serialized_end=91,
)
_CANCELRESPONSEWRAPPER = _descriptor.Descriptor(
name='CancelResponseWrapper',
full_name='build.CancelResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='build.CancelResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='build.CancelResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='build.CancelResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='build.CancelResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=93,
serialized_end=204,
)
_CANCELRESPONSEWRAPPER.fields_by_name['data'].message_type = google_dot_protobuf_dot_empty__pb2._EMPTY
DESCRIPTOR.message_types_by_name['CancelRequest'] = _CANCELREQUEST
DESCRIPTOR.message_types_by_name['CancelResponseWrapper'] = _CANCELRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CancelRequest = _reflection.GeneratedProtocolMessageType('CancelRequest', (_message.Message,), {
'DESCRIPTOR' : _CANCELREQUEST,
'__module__' : 'cancel_build_pb2'
# @@protoc_insertion_point(class_scope:build.CancelRequest)
})
_sym_db.RegisterMessage(CancelRequest)
CancelResponseWrapper = _reflection.GeneratedProtocolMessageType('CancelResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _CANCELRESPONSEWRAPPER,
'__module__' : 'cancel_build_pb2'
# @@protoc_insertion_point(class_scope:build.CancelResponseWrapper)
})
_sym_db.RegisterMessage(CancelResponseWrapper)
# @@protoc_insertion_point(module_scope)
| 1.5 | 2 |
src/.ipynb_checkpoints/headpose_model-checkpoint.py | geochri/Intel_Edge_AI-Computer_Pointer_controller | 0 | 5344 | <filename>src/.ipynb_checkpoints/headpose_model-checkpoint.py<gh_stars>0
'''
This is a sample class for a model. You may choose to use it as-is or make any changes to it.
This has been provided just to give you an idea of how to structure your model class.
'''
from openvino.inference_engine import IENetwork, IECore
import numpy as np
import os
import cv2
import sys
class Model_HeadPose:
'''
Class for the Head Pose Estimation Model.
'''
def __init__(self, model_name, device='CPU', extensions=None):
self.model_weights = model_name+'.bin'
self.model_structure = model_name+'.xml'
self.device = device
self.extensions = extensions
# self.check_model()
# try:
# self.input_name = next(iter(self.model.inputs))
# self.input_shape = self.model.inputs[self.input_name].shape
# self.output_name = next(iter(self.model.outputs))
# self.output_shape = self.model.outputs[self.output_name].shape
# print('Initialise.. completed.')
# except Exception as e:
# raise ValueError('Something is wrong with input and output values..')
def load_model(self):
'''
This method is for loading the model to the device specified by the user.
If your model requires any Plugins, this is where you can load them.
'''
try:
print('Model is loading...')
self.core = IECore()
self.net = self.core.read_network(model=self.model_structure,weights=self.model_weights)
supported = self.core.query_network(self.net, self.device)
not_supported = [layer for layer in self.net.layers.keys() if layer not in supported]
if len(not_supported) != 0 and self.device == 'CPU':
print('Unsuported', not_supported)
if not self.extensions == None:
print('***Quick fix.\n ~CPU Extension added')
self.core.add_extension(self.extensions, device)
supported = self.core.query_network(self.net, self.device)
not_supported = [layer for layer in self.net.layers.keys() if layer not in supported]
if len(not_supported) == 0:
print('***Quick fix, Failed.')
else:
print('Check the extension path.')
self.net_exec = self.core.load_network(network=self.net, device_name=self.device)
except Exception as e:
raise('Something is wrong.. ~debug load model~')
try:
self.input_name = next(iter(self.net.inputs))
self.input_shape = self.net.inputs[self.input_name].shape
self.output_name = next(iter(self.net.outputs))
self.output_shape = self.net.outputs[self.output_name].shape
print('Initialise.. completed.')
except Exception as e:
raise ValueError('Something is wrong with input and output values..')
def predict(self, image):
'''
This method is meant for running predictions on the input image.
'''
self.image = image
print('HeadPose predict..')
pre_image = self.preprocess_input(self.image)
input_name = self.input_name
input_dict = {input_name: pre_image}
# infer = self.net_exec.start_async(request_id=0, inputs=input_dict)
# status = infer.wait()
results = self.net_exec.infer(input_dict)
outputs = self.preprocess_output(results)
# if status == 0:
# results = infer.outputs[self.output_name]
# print(results)
# print(self.input_name)
# outputs = self.preprocess_output(results)
return outputs
def check_model(self):
'''
Check - initialise the model
'''
try:
self.model = IENetwork(self.model_structure, self.model_weights)
except Exception as e:
raise ValueError("Could not Initialise the network. Have you enterred the correct model path?")
def preprocess_input(self, image):
'''
An input image in [1xCxHxW] format.
B - batch size
C - number of channels
H - image height
W - image width
'''
image = cv2.resize(image, (self.input_shape[3], self.input_shape[2]))
image = image.transpose((2, 0, 1))
image = image.reshape(1, *image.shape)
return image
def preprocess_output(self, outputs):
'''
Output layer names in Inference Engine format:
name: "angle_y_fc", shape: [1, 1] - Estimated yaw (in degrees).
name: "angle_p_fc", shape: [1, 1] - Estimated pitch (in degrees).
name: "angle_r_fc", shape: [1, 1] - Estimated roll (in degrees).
'''
object_list = []
print('PreOutput-headpose..')
# print(outputs)
object_list.append(outputs['angle_y_fc'].tolist()[0][0])
object_list.append(outputs['angle_p_fc'].tolist()[0][0])
object_list.append(outputs['angle_r_fc'].tolist()[0][0])
return object_list | 2.5625 | 3 |
src/minisaml/internal/constants.py | HENNGE/minisaml | 2 | 5345 | <gh_stars>1-10
NAMES_SAML2_PROTOCOL = "urn:oasis:names:tc:SAML:2.0:protocol"
NAMES_SAML2_ASSERTION = "urn:oasis:names:tc:SAML:2.0:assertion"
NAMEID_FORMAT_UNSPECIFIED = "urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified"
BINDINGS_HTTP_POST = "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST"
DATE_TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
DATE_TIME_FORMAT_FRACTIONAL = "%Y-%m-%dT%H:%M:%S.%fZ"
| 1.140625 | 1 |
Contests/Snackdown19_Qualifier/CHEFPRMS.py | PK-100/Competitive_Programming | 70 | 5346 | <reponame>PK-100/Competitive_Programming
import math
def square(n):
tmp=round(math.sqrt(n))
if tmp*tmp==n:
return False
else:
return True
def semprime(n):
ch = 0
if square(n)==False:
return False
for i in range(2, int(math.sqrt(n)) + 1):
while n%i==0:
n//=i
ch+=1
if ch >= 2:
break
if(n > 1):
ch += 1
return ch == 2
def check(n):
if semprime(n) == True:
return True
else:
return False
for _ in range(int(input())):
n=int(input())
flag=0
for i in range(2,n//2+1):
if check(i)==True and check(n-i)==True:
#print(i,n-i,square(i),square(n-i),"Yes")
print("YES")
flag=1
break
if flag==0:
#print(i,n-i,square(i),square(n-i),"No")
print("NO")
| 3.28125 | 3 |
setup.py | arokem/afq-deep-learning | 0 | 5347 | <reponame>arokem/afq-deep-learning<filename>setup.py
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='This repository hosts some work-in-progress experiments applying deep learning to predict age using tractometry data.',
author='<NAME>',
license='BSD-3',
)
| 1.070313 | 1 |
make_base_container.py | thiagodasilva/runway | 0 | 5348 | <filename>make_base_container.py
#!/usr/bin/env python3
import argparse
import os
import random
import requests
import sys
import tempfile
import uuid
from libs import colorprint
from libs.cli import run_command
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
# assume well-known lvm volume group on host
# ...later we'll figure out how to make this dynamic
VG_NAME = "swift-runway-vg01"
SWIFTSTACK_IMAGES_PREFIX = "ss-"
SWIFTSTACK_IMAGES_BASE_URL = \
"https://tellus.swiftstack.com/v1/AUTH_runway/lxd-images"
IMAGE_MANIFEST_OBJECT_NAME = "manifest.json"
UNIFIED_TARBALL_TYPE = "unified"
SPLIT_TARBALL_TYPE = "split"
TARBALL_TYPES = [UNIFIED_TARBALL_TYPE, SPLIT_TARBALL_TYPE]
def exit_with_error(error_text):
colorprint.error(error_text)
sys.exit(1)
def get_default_image(distro):
if distro.lower() == "rhel":
return "images:centos/7/amd64"
else:
return "ubuntu:16.04"
def is_swiftstack_hosted_image(base_image):
return base_image.lower().startswith(SWIFTSTACK_IMAGES_PREFIX)
def get_image_manifest(swift_container_name):
manifest_obj_url = "{}/{}/{}".format(SWIFTSTACK_IMAGES_BASE_URL,
swift_container_name,
IMAGE_MANIFEST_OBJECT_NAME)
try:
r = requests.get(manifest_obj_url)
r.raise_for_status()
return r.json()
except Exception as e:
raise Exception("Could not download container image manifest from '{}'."
"\n{}".format(manifest_obj_url, e))
def is_image_already_imported(fingerprint):
try:
run_command("lxc image info {} >/dev/null 2>&1".format(fingerprint),
shell=True)
except Exception:
return False
return True
def delete_image_with_alias(alias):
try:
run_command("lxc image delete {}".format(alias))
except Exception:
pass
def download_unified_image_file(manifest):
tarball_url = "{}/{}".format(SWIFTSTACK_IMAGES_BASE_URL,
manifest["tarball-object"])
try:
r = requests.get(tarball_url, stream=True)
r.raise_for_status()
with tempfile.NamedTemporaryFile(delete=False) as f:
file_path = f.name
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
except Exception as e:
print("Could not download file from '{}': {}".format(tarball_url, e))
return file_path
def import_unified_image(manifest, alias):
tarball_path = download_unified_image_file(manifest)
# There might be an older image with the same alias
delete_image_with_alias(alias)
run_command("lxc image import {} --alias {}".format(tarball_path, alias))
os.unlink(tarball_path)
def download_split_image_files(manifest):
metadata_tarball_url = "{}/{}".format(SWIFTSTACK_IMAGES_BASE_URL,
manifest["metadata-object"])
rootfs_tarball_url = "{}/{}".format(SWIFTSTACK_IMAGES_BASE_URL,
manifest["rootfs-object"])
file_paths = []
for url in [metadata_tarball_url, rootfs_tarball_url]:
try:
r = requests.get(url, stream=True)
r.raise_for_status()
with tempfile.NamedTemporaryFile(delete=False) as f:
file_paths.append(f.name)
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
except Exception as e:
print("Could not download file from '{}': {}".format(url, e))
return tuple(file_paths)
def import_split_image(manifest, alias):
metadata_tarball_path, rootfs_tarball_path = \
download_split_image_files(manifest)
# There might be an older image with the same alias
delete_image_with_alias(alias)
run_command("lxc image import {} {} --alias {}".format(
metadata_tarball_path, rootfs_tarball_path, alias))
os.unlink(metadata_tarball_path)
os.unlink(rootfs_tarball_path)
def import_image(manifest, alias):
'''
There are 2 possible image formats: unified and split. We support both.
For unified format, the manifest will look like this:
{
"tarball_type": "unified",
"fingerprint": "629d2c18b7bb0b52b80dfe62ae309937123d05b563ef057233e7802c9e18c018",
"tarball-object": "centos7.5/629d2c18b7bb0b52b80dfe62ae309937123d05b563ef057233e7802c9e18c018.tar.gz"
}
For split format, the manifest will look like this:
{
"tarball_type": "split",
"fingerprint": "22abbefe0c68943f264a7139c7a699a0b2adfbcf46fc661d2e89b1232301a5de",
"metadata-object": "centos7.5/meta-22abbefe0c68943f264a7139c7a699a0b2adfbcf46fc661d2e89b1232301a5de.tar.xz",
"rootfs-object": "centos7.5/22abbefe0c68943f264a7139c7a699a0b2adfbcf46fc661d2e89b1232301a5de.squashfs"
}
'''
if manifest["tarball_type"] not in TARBALL_TYPES:
raise Exception("Invalid tarball type: {}".format(
manifest["tarball_type"]))
elif manifest["tarball_type"] == UNIFIED_TARBALL_TYPE:
import_unified_image(manifest, alias)
elif manifest["tarball_type"] == SPLIT_TARBALL_TYPE:
import_split_image(manifest, alias)
else:
raise Exception("Tarball type '{}' is valid, but a method to import "
"it has not been implemented yet.")
def import_image_if_needed(base_image):
if not is_swiftstack_hosted_image(base_image):
raise Exception("{} is not an image hosted by "
"SwiftStack".format(base_image))
swift_container_name = base_image[len(SWIFTSTACK_IMAGES_PREFIX):]
manifest = get_image_manifest(swift_container_name)
if not is_image_already_imported(manifest["fingerprint"]):
print("Importing image '{}'...".format(base_image))
import_image(manifest, base_image)
else:
print("Image '{}' is already imported".format(base_image))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('distro', type=str, help='Container distro')
parser.add_argument('cname', metavar='containername', help='Container '
'name')
parser.add_argument('volsize', help='Volume size')
parser.add_argument('volcount', type=int, help='Volume count')
parser.add_argument('baseimage', nargs='?',
help='Base image. Defaults: \'images:centos/7/amd64\' '
'for RHEL distro, \'ubuntu:16.04\' otherwise')
args = parser.parse_args()
distro = args.distro
container_name = args.cname
base_image = args.baseimage
volume_size = args.volsize
volume_count = args.volcount
if is_swiftstack_hosted_image(distro):
import_image_if_needed(distro)
default_image = distro
else:
default_image = get_default_image(distro)
if base_image is None:
base_image = default_image
try:
# make a container profile that maps 8 block devices to the guest
rand_file_name = str(uuid.UUID(int=random.getrandbits(128)))
run_command("./make_lxc_profile.py {} {} {} {} > "
"/tmp/{}".format(container_name, VG_NAME, volume_size,
volume_count, rand_file_name),
cwd=SCRIPT_DIR, shell=True)
run_command("lxc profile create {}-profile".format(container_name))
run_command("cat /tmp/{} | lxc profile edit {}-profile".format(
rand_file_name, container_name), cwd=SCRIPT_DIR, shell=True)
# launch the new container
print("Trying to launch container from base image "
"{}".format(base_image))
run_command("lxc launch {} {} -p {}-profile || "
"lxc launch {} {} -p {}-profile".format(base_image,
container_name,
container_name,
default_image,
container_name,
container_name),
shell=True)
except Exception as e:
exit_with_error(str(e))
| 2.34375 | 2 |
exercicios_antigos/ex_01.py | jfklima/prog_pratica | 0 | 5349 | """Criar uma função que retorne min e max de uma sequência numérica
aleatória.
Só pode usar if, comparações, recursão e funções que sejam de sua
autoria.
Se quiser usar laços também pode.
Deve informar via docstring qual é a complexidade de tempo e espaço da
sua solução
"""
from math import inf
def minimo_e_maximo(sequencia_numerica):
''' Retorna o minimo e o maximo de uma sequência numérica aleatória.
Complexidade:
execução: O(n)
espaço: O(3)
'''
maximo = -inf # 1
minimo = +inf # 1
for elem in sequencia_numerica: # 1
if elem > maximo: # 2
maximo = elem # 1
if elem < minimo: # 2
minimo = elem # 2
return minimo, maximo # 1
def recursivo_minmax(sequencia_numerica):
def r_minimo(sequencia):
primeiro = sequencia[0]
if len(sequencia) == 1:
return primeiro
else:
menor = r_minimo(sequencia[1:])
return menor if menor < primeiro else primeiro
def r_maximo(sequencia):
primeiro = sequencia[0]
if len(sequencia) == 1:
return primeiro
else:
maior = r_maximo(sequencia[1:])
return maior if maior > primeiro else primeiro
return r_minimo(sequencia_numerica), r_maximo(sequencia_numerica)
def recursivo_minmax_1x(sequencia_numerica):
primeiro = sequencia_numerica[0]
if len(sequencia_numerica) == 1:
return primeiro, primeiro
else:
return
# print(minimo_e_maximo([1, 2, 3, 4]))
# print(minimo_e_maximo([1, 3, 10, 12, 44, 2, 24, 25]))
# print(minimo_e_maximo([88, 66, 10, 2, 8]))
print(recursivo_minmax([1, 2, 3, 4]))
| 3.96875 | 4 |
docs/demos/theme_explorer/util.py | harisbal/dash-bootstrap-components | 1 | 5350 | import dash_bootstrap_components as dbc
import dash_html_components as html
DBC_DOCS = (
"https://dash-bootstrap-components.opensource.faculty.ai/docs/components/"
)
def make_subheading(label, link):
slug = label.replace(" ", "")
heading = html.H2(
html.Span(
[
label,
html.A(
html.I(className="fas fa-book fa-xs ml-2"),
href=f"{DBC_DOCS}{link}",
target="_blank",
id=f"tooltip_target_{slug}",
),
],
),
)
return html.Div(
[
heading,
dbc.Tooltip(
f"See {label} documentation", target=f"tooltip_target_{slug}"
),
],
className="mt-3",
)
| 2.46875 | 2 |
pytorch_toolbox/visualization/visdom_logger.py | MathGaron/pytorch_toolbox | 10 | 5351 | <reponame>MathGaron/pytorch_toolbox
'''
The visualization class provides an easy access to some of the visdom functionalities
Accept as input a number that will be ploted over time or an image of type np.ndarray
'''
from visdom import Visdom
import numpy as np
import numbers
class VisdomLogger:
items_iterator = {}
items_to_visualize = {}
windows = {}
vis = Visdom()
def check_availability(vis):
# check if the Visdom server is running. only once.
is_done = vis.text('visdom check')
if is_done is False:
raise RuntimeError('Visdom server is not running. Run the server first: python -m visdom.server')
else:
print('Visdom available at: %s:%s' % (vis.server, vis.port))
vis.close() # close visdom check
check_availability(vis)
@classmethod
def visualize(cls, item, name, **args):
"""
Visualize an item in a new window (if the parameter "name" is not on the list of previously given names) or
updates an existing window identified by "name"
:param item: Item to be visualized (a number or a numpy image).
:param name: String to identify the item.
:param args: dict containing options for visdom
"""
if name not in cls.items_to_visualize:
cls.new_item(item, name, **args)
else:
cls.update_item(item, name, **args)
cls.items_to_visualize[name] = item
@classmethod
def new_item(cls, item, name, **args):
if isinstance(item, numbers.Number):
cls.items_iterator[name] = 0
win = cls.vis.line(
X=np.array([cls.items_iterator[name]]),
Y=np.array([item]),
opts=dict(title=name)
)
cls.windows[name] = win
elif isinstance(item, np.ndarray):
win = cls.vis.image(
item,
opts=args,
)
cls.windows[name] = win
else:
print("type {} not supported for visualization".format(type(item)))
@classmethod
def update_item(cls, item, name, **args):
if isinstance(item, numbers.Number):
cls.vis.line(
# to plot the number we need to give its position in the x axis hence we keep track of how many times we
# updates this item (stored in items_iterator)
X=np.array([cls.items_iterator[name]]),
Y=np.array([item]),
win=cls.windows[name],
update='append'
)
cls.items_iterator[name] += 1
elif isinstance(item, np.ndarray):
cls.vis.image(
item,
opts=args,
win=cls.windows[name]
)
else:
print("type {} not supported for visualization".format(type(item)))
| 3.28125 | 3 |
analytical/conditionnumber.py | gyyang/olfaction_evolution | 9 | 5352 | <filename>analytical/conditionnumber.py
"""Analyze condition number of the network."""
import numpy as np
import matplotlib.pyplot as plt
# import model
def _get_sparse_mask(nx, ny, non, complex=False, nOR=50):
"""Generate a binary mask.
The mask will be of size (nx, ny)
For all the nx connections to each 1 of the ny units, only non connections are 1.
Args:
nx: int
ny: int
non: int, must not be larger than nx
Return:
mask: numpy array (nx, ny)
"""
mask = np.zeros((nx, ny))
if not complex:
mask[:non] = 1
for i in range(ny):
np.random.shuffle(mask[:, i]) # shuffling in-place
return mask.astype(np.float32)
def _get_cond(q, n_orn, n_pn, n_kc, n_kc_claw):
M = np.random.rand(n_orn, n_pn)
M_new = M * (1-q) + np.eye(n_orn) * q
# J = np.random.rand(N_PN, N_KC) / np.sqrt(N_PN + N_KC)
# J = np.random.randn(N_PN, N_KC) / np.sqrt(N_PN + N_KC)
J = np.random.rand(n_pn, n_kc)
mask = _get_sparse_mask(n_pn, n_kc, n_kc_claw) / n_kc_claw
J = J * mask
K = np.dot(M_new, J)
# cond = np.linalg.cond(K)
cond = np.linalg.norm(np.linalg.pinv(K)) * np.linalg.norm(K)
return cond
def get_logcond(q=1, n_orn=50, n_pn=50, n_kc=2500, n_kc_claw=7, n_rep=10):
conds = [_get_cond(q, n_orn, n_pn, n_kc, n_kc_claw) for i in range(n_rep)]
return np.mean(np.log10(conds))
def plot_cond_by_q(n_kc=2500):
qs = np.linspace(0, 1, 100)
conds = [get_logcond(q=q, n_kc=n_kc) for q in qs]
plt.figure()
plt.plot(qs, conds, 'o-')
plt.title('N_KC: ' + str(n_kc))
plt.xlabel('fraction diagonal')
plt.ylabel('log condition number')
# plt.savefig('figures/condvsfracdiag_nkc'+str(n_kc)+'.pdf', transparent=True)
def plot_cond_by_n_kc():
n_kcs = np.logspace(1, 4, 10).astype(int)
conds_q1 = np.array([get_logcond(n_kc=n_kc, q=1) for n_kc in n_kcs])
plt.figure()
plt.plot(np.log10(n_kcs), conds_q1, 'o-')
plt.xticks(np.log10(n_kcs), n_kcs)
plt.xlabel('N_KC')
n_kcs = np.logspace(1, 4, 10).astype(int)
conds_q0 = np.array([get_logcond(n_kc=n_kc, q=0) for n_kc in n_kcs])
plt.figure()
plt.plot(np.log10(n_kcs), conds_q0, 'o-')
plt.xticks(np.log10(n_kcs), n_kcs)
plt.xlabel('N_KC')
plt.figure()
plt.plot(np.log10(n_kcs), conds_q1 - conds_q0, 'o-')
plt.xticks(np.log10(n_kcs), n_kcs)
plt.ylabel('Log decrease in condition number')
plt.xlabel('N_KC')
n_kc_claws = np.arange(1, 50)
conds = np.array([get_logcond(n_kc_claw=n) for n in n_kc_claws])
plt.figure()
plt.plot(n_kc_claws, conds, 'o-')
plt.xticks(n_kc_claws)
plt.xlabel('N_KC_claw')
plt.show()
| 2.609375 | 3 |
FusionIIIT/applications/placement_cell/api/serializers.py | 29rj/Fusion | 29 | 5353 | <reponame>29rj/Fusion
from rest_framework.authtoken.models import Token
from rest_framework import serializers
from applications.placement_cell.models import (Achievement, Course, Education,
Experience, Has, Patent,
Project, Publication, Skill,
PlacementStatus, NotifyStudent)
class SkillSerializer(serializers.ModelSerializer):
class Meta:
model = Skill
fields = ('__all__')
class HasSerializer(serializers.ModelSerializer):
skill_id = SkillSerializer()
class Meta:
model = Has
fields = ('skill_id','skill_rating')
def create(self, validated_data):
skill = validated_data.pop('skill_id')
skill_id, created = Skill.objects.get_or_create(**skill)
try:
has_obj = Has.objects.create(skill_id=skill_id,**validated_data)
except:
raise serializers.ValidationError({'skill': 'This skill is already present'})
return has_obj
class EducationSerializer(serializers.ModelSerializer):
class Meta:
model = Education
fields = ('__all__')
class CourseSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = ('__all__')
class ExperienceSerializer(serializers.ModelSerializer):
class Meta:
model = Experience
fields = ('__all__')
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = Project
fields = ('__all__')
class AchievementSerializer(serializers.ModelSerializer):
class Meta:
model = Achievement
fields = ('__all__')
class PublicationSerializer(serializers.ModelSerializer):
class Meta:
model = Publication
fields = ('__all__')
class PatentSerializer(serializers.ModelSerializer):
class Meta:
model = Patent
fields = ('__all__')
class NotifyStudentSerializer(serializers.ModelSerializer):
class Meta:
model = NotifyStudent
fields = ('__all__')
class PlacementStatusSerializer(serializers.ModelSerializer):
notify_id = NotifyStudentSerializer()
class Meta:
model = PlacementStatus
fields = ('notify_id', 'invitation', 'placed', 'timestamp', 'no_of_days')
| 2.171875 | 2 |
concat_col_app/factories.py | thinkAmi-sandbox/django-datatables-view-sample | 0 | 5354 | <filename>concat_col_app/factories.py
import factory
from concat_col_app.models import Color, Apple
class ColorFactory(factory.django.DjangoModelFactory):
class Meta:
model = Color
class AppleFactory(factory.django.DjangoModelFactory):
class Meta:
model = Apple
| 1.867188 | 2 |
defects4cpp/errors/argparser.py | HansolChoe/defects4cpp | 10 | 5355 | from pathlib import Path
from typing import Dict
from errors.common.exception import DppError
class DppArgparseError(DppError):
pass
class DppArgparseTaxonomyNotFoundError(DppArgparseError):
def __init__(self, taxonomy_name: str):
super().__init__(f"taxonomy '{taxonomy_name}' does not exist")
self.taxonomy_name: str = taxonomy_name
class DppArgparseNotProjectDirectory(DppArgparseError):
def __init__(self, path: Path):
super().__init__(f"directory '{str(path)}' is not a defect taxonomy project")
self.path: Path = path
class DppArgparseDefectIndexError(DppArgparseError):
def __init__(self, index: int):
super().__init__(f"invalid index '{index}' of defects")
self.index: int = index
class DppArgparseFileNotFoundError(DppArgparseError, FileNotFoundError):
def __init__(self, path: str):
super().__init__()
self.path: str = path
class DppArgparseInvalidEnvironment(DppArgparseError):
def __init__(self, value: str):
super().__init__(
f"invalid environment variable format '{value}' (should be KEY=VALUE)"
)
self.value: str = value
class DppArgparseInvalidConfigError(DppArgparseError):
def __init__(self):
super().__init__()
class DppArgparseConfigCorruptedError(DppArgparseError):
def __init__(self, data: Dict):
super().__init__(f"config is corrupted: {data}")
self.data = data
class DppArgparseInvalidCaseExpressionError(DppArgparseError):
def __init__(self, index: int, name: str, cases: int, expr: str):
super().__init__(
f"Defect#{index} of {name} has {cases} test cases, but expression was: {expr}"
)
self.index: int = index
self.name: str = name
self.cases: int = cases
self.expr: str = expr
| 2.546875 | 3 |
utils/__init__.py | wang97zh/EVS-Net-1 | 0 | 5356 |
from .utility import *
from .tricks import *
from .tensorlog import *
from .self_op import *
from .resume import *
from .optims import *
from .metric import *
| 0.972656 | 1 |
model-optimizer/extensions/front/mxnet/arange_ext.py | calvinfeng/openvino | 0 | 5357 | """
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from extensions.ops.range import Range
from mo.front.extractor import FrontExtractorOp
from mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from mo.graph.graph import Node
class ArangeExt(FrontExtractorOp):
op = '_arange'
enabled = True
@classmethod
def extract(cls, node: Node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
Range.update_node_stat(node, {
'start': attrs.int('start', 0),
'stop': attrs.int('stop', 0),
'repeat': attrs.int('repeat', 1),
'step': attrs.float('step', 1),
'dtype': np.dtype(attrs.str('dtype ', 'float32'))
})
return cls.enabled
| 1.765625 | 2 |
fold_cur_trans.py | lucasforever24/arcface_noonan | 0 | 5358 | import cv2
from PIL import Image
import argparse
from pathlib import Path
from multiprocessing import Process, Pipe,Value,Array
import torch
from config import get_config
from mtcnn import MTCNN
from Learner_trans_tf import face_learner
from utils import load_facebank, draw_box_name, prepare_facebank, save_label_score, label_binarize
from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score
from sklearn.model_selection import KFold
import os
import glob
import shutil
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import datetime
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='for face verification')
parser.add_argument("-ds", "--dataset_dir", help="where to get data", default="noonan", type=str)
parser.add_argument('-sd','--stored_result_dir',help='where to store data as np arrays',
default="results/trans/", type=str)
parser.add_argument("-k", "--kfold", help="returns the number of splitting iterations in the cross-validator.",
default=10, type=int)
parser.add_argument("-e", "--epochs", help="training epochs", default=20, type=int)
parser.add_argument("-n", "--names_considered", help="names for different types considered, separated by commas",
default="normal,noonan,others", type=str)
parser.add_argument("-g", "--gpu_id", help="gpu id to use", default="", type=str)
parser.add_argument("-s", "--use_shuffled_kfold", help="whether to use shuffled kfold.", action="store_true")
parser.add_argument("-rs", "--random_seed", help="random seed used for k-fold split.", default=6, type=int)
parser.add_argument("-tta", "--tta", help="whether test time augmentation",action="store_true")
parser.add_argument("-a", "--additional_data_dir", help="where to get the additional data",
default="", type=str)
parser.add_argument("-ta", "--additional_test_or_train", help="use additional data in only train, or test, or both",
default="", type=str)
parser.add_argument("-as", "--stylegan_data_dir", help="where to get the additional data",
default="", type=str)
parser.add_argument("-ts", "--stylegan_test_or_train", help="use stylegan data in only train, or test, or both",
default="", type=str)
parser.add_argument("-tf", "--transfer", help="how many layer(s) used for transfer learning, "
"but 0 means retraining the whole network.", default=0, type=int)
parser.add_argument("-ac", "--arch", help="types of model used for encoder", default="mobile", type=str)
args = parser.parse_args()
for arg in vars(args):
print(arg+':', getattr(args, arg))
emore_dir = 'faces_emore'
conf = get_config(True, args)
conf.emore_folder = conf.data_path/emore_dir
mtcnn = MTCNN()
print('mtcnn loaded')
names_considered = args.names_considered.strip().split(',')
exp_name = args.dataset_dir[:4]
if args.additional_data_dir:
if 'LAG' in args.additional_data_dir:
exp_name += '_lag'
elif 'literature' in args.additional_data_dir:
exp_name += '_ltr'
if args.kfold != 10:
exp_name += ('_k' + str(args.kfold))
if args.epochs != 20:
exp_name += ('_e' + str(args.epochs))
if args.transfer != 0 and args.transfer != 1:
exp_name += ('_td' + str(args.transfer))
if args.use_shuffled_kfold:
exp_name += ('_s' + str(args.random_seed))
print(exp_name)
# prepare folders
raw_dir = 'raw_112'
verify_type = 'trans'
if args.use_shuffled_kfold:
verify_type += '_shuffled'
# train_dir = conf.facebank_path/args.dataset_dir/verify_type/'train'
train_dir = conf.emore_folder/'imgs'
test_dir = conf.emore_folder/'test'
conf.facebank_path = train_dir
if os.path.exists(train_dir):
shutil.rmtree(train_dir)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
os.mkdir(train_dir)
os.mkdir(test_dir)
for name in names_considered:
os.makedirs(str(train_dir) + '/' + name, exist_ok=True)
os.makedirs(str(test_dir) + '/' + name, exist_ok=True)
if args.stylegan_data_dir:
#e.g. smile_refine_mtcnn_112_divi
full_stylegan_dir = str(conf.data_path/'facebank'/'stylegan'/args.stylegan_data_dir)
stylegan_folders = os.listdir(full_stylegan_dir)
if args.additional_data_dir:
full_additional_dir = str(conf.data_path/'facebank'/args.additional_data_dir)
# init kfold
if args.use_shuffled_kfold:
kf = KFold(n_splits=args.kfold, shuffle=True, random_state=args.random_seed)
else:
kf = KFold(n_splits=args.kfold, shuffle=False, random_state=None)
# collect and split raw data
data_dict = {}
idx_gen = {}
for name in names_considered:
tmp_list = glob.glob(str(conf.data_path/'facebank'/args.dataset_dir/raw_dir) +
'/' + name + '*')
if 'innm' in args.stylegan_data_dir:
tmp_list = tmp_list + glob.glob(str(full_stylegan_dir) + '/' + name + '*')
stylegan_folders = []
print(str(conf.data_path/'facebank'/args.dataset_dir/raw_dir))
data_dict[name] = np.array(tmp_list)
idx_gen[name] = kf.split(data_dict[name])
if 'literature' in args.additional_data_dir:
data_dict['ltr'] = np.array(glob.glob(str(full_additional_dir) + '/*'))
idx_gen['ltr'] = kf.split(data_dict['ltr'])
score_names = []
scores = []
wrong_names = []
args.stored_result_path = args.stored_result_dir + os.sep + str(datetime.datetime.now())[:19]
if not os.path.exists(args.stored_result_path):
os.mkdir(args.stored_result_path)
# for fold_idx, (train_index, test_index) in enumerate(kf.split(data_dict[names_considered[0]])):
for fold_idx in range(args.kfold):
train_set = {}
test_set = {}
for name in names_considered:
(train_index, test_index) = next(idx_gen[name])
train_set[name], test_set[name] = data_dict[name][train_index], data_dict[name][test_index]
if 'ltr' in data_dict.keys():
(train_index, test_index) = next(idx_gen['ltr'])
train_set['ltr'], test_set['ltr'] = data_dict['ltr'][train_index], data_dict['ltr'][test_index]
if 'train' in args.additional_test_or_train:
train_set['noonan'] = np.concatenate((train_set['noonan'], train_set['ltr']))
if 'test' in args.additional_test_or_train:
test_set['noonan'] = np.concatenate((test_set['noonan'], test_set['ltr']))
# remove previous data
prev = glob.glob(str(train_dir) + '/*/*')
for p in prev:
os.remove(p)
prev = glob.glob(str(test_dir) + '/*/*')
for p in prev:
os.remove(p)
# save trains to conf.facebank_path/args.dataset_dir/'train' and
# tests to conf.data_path/'facebank'/args.dataset_dir/'test'
# count unbalanced data
train_count = {}
test_count = {}
for name in names_considered:
train_count[name] = 0
for i in range(len(train_set[name])):
img_folder = str(train_set[name][i])
for img in os.listdir(img_folder):
shutil.copy(img_folder + os.sep + str(img),
os.path.join(str(train_dir), name, str(img)))
train_count[name] += 1
# addition data from stylegan
if 'interp' not in data_dict.keys():
folder = os.path.basename(train_set[name][i])
if args.stylegan_data_dir and ('train' in args.stylegan_test_or_train) and (folder in stylegan_folders):
for img in os.listdir(full_stylegan_dir + os.sep + folder):
shutil.copy(os.path.join(full_stylegan_dir, folder, str(img)),
os.path.join(str(train_dir), name, str(img)))
# ('/'.join(train_set[name][i].strip().split('/')[:-2]) +
# '/' + verify_type + '/train/' + name + os.sep + img))
train_count[name] += 1
# test
for i in range(len(test_set[name])):
test_count[name] = 0
img_folder = str(test_set[name][i])
for img in os.listdir(img_folder):
shutil.copy(img_folder + os.sep + str(img),
os.path.join(str(test_dir), name, str(img)))
test_count[name] += 1
# addition data from stylegan
if 'interp' not in data_dict.keys():
folder = os.path.basename(test_set[name][i])
if args.stylegan_data_dir and ('test' in args.stylegan_test_or_train) and (folder in stylegan_folders):
# and
# (folder not in ['noonan7','noonan19','noonan23','normal9','normal20','normal23'])):
for img in os.listdir(full_stylegan_dir + os.sep + folder):
shutil.copy(os.path.join(full_stylegan_dir, folder, str(img)),
os.path.join(str(test_dir), name, str(img)))
test_count[name] += 1
print(train_count, test_count)
# deal with unbalanced data
"""
if train_count['normal'] // train_count['noonan'] > 1:
aug_num = train_count['normal'] // train_count['noonan'] - 1
for img in os.listdir(os.path.join(str(train_dir), 'noonan')):
for aug_idx in range(aug_num):
aug_img = img[:img.rfind('.')] + '_' + str(aug_idx) + img[img.rfind('.'):]
shutil.copy(os.path.join(str(train_dir), 'noonan', img),
os.path.join(str(train_dir), 'noonan', aug_img))
"""
if 'fake' in args.additional_data_dir:
fake_dict = {'noonan':'normal', 'normal':'noonan'}
full_additional_dir = conf.data_path/'facebank'/'noonan+normal'/args.additional_data_dir
add_data = glob.glob(str(full_additional_dir) + os.sep + '*.png')
print('additional:', args.additional_data_dir, len(add_data))
for name in names_considered:
for img_f in add_data:
if name in img_f.strip().split(os.sep)[-1]:
# print('source:', img_f)
# print('copy to:', img_f.replace(str(full_additional_dir),
# str(train_dir) + os.sep + fake_dict[name]))
# print('copy to:', img_f.replace(args.additional_data_dir,
# verify_type + '/train/' + name))
shutil.copy(img_f, os.path.join(str(train_dir), fake_dict[name], os.path.basename(img_f)))
print(fold_idx)
print('datasets ready')
conf_train = get_config(True, args)
conf_train.emore_folder = conf.data_path/emore_dir
conf_train.stored_result_dir = args.stored_result_path
learner = face_learner(conf=conf_train, transfer=args.transfer, ext=exp_name+'_'+str(fold_idx))
# conf, inference=False, transfer=0
if args.transfer != 0:
learner.load_state(conf.save_path, False, True)
print('learner loaded')
learner.train(conf_train, args.epochs)
print('learner retrained.')
learner.save_state()
print('Model is saved')
# prepare_facebank
targets, names, names_idx = prepare_facebank(conf, learner.model, mtcnn, tta = args.tta)
print('names_classes:', names)
noonan_idx = names_idx['noonan']
print('facebank updated')
for path in test_dir.iterdir():
if path.is_file():
continue
# print(path)
for fil in path.iterdir():
# print(fil)
orig_name = ''.join([i for i in fil.name.strip().split('.')[0].split('_')[0] if not i.isdigit()])
for name in names_idx.keys():
if name in orig_name:
score_names.append(names_idx[name])
"""
if orig_name not in names_considered:
print("Un-considered name:", fil.name)
continue
"""
frame = cv2.imread(str(fil))
image = Image.fromarray(frame)
faces = [image,]
distance = learner.binfer(conf, faces, targets, args.tta)
label = score_names[-1]
score = np.exp(distance.dot(-1))
pred = np.argmax(score, 1)
if pred != label:
wrong_names.append(orig_name)
scores.append(score)
score_names = np.array(score_names)
wrong_names = np.array(wrong_names)
score_np = np.squeeze(np.array(scores))
n_classes = score_np.shape[1]
score_names = label_binarize(score_names, classes=range(n_classes))
score_sum = np.zeros([score_np.shape[0], 1])
for i in range(n_classes):
score_sum += score_np[:, i, None] # keep the dimension
relative_scores = (score_np / score_sum)
total_scores = relative_scores.ravel()
total_names = score_names.ravel()
name_path = os.path.join(args.stored_result_path, 'wrong_names.npy')
save_label_score(name_path, wrong_names)
label_path = os.path.join(args.stored_result_path, 'labels_trans.npy')
save_label_score(label_path, score_names)
score_path = os.path.join(args.stored_result_path, 'scores_trans.npy')
save_label_score(score_path, relative_scores)
print('saved!')
# Compute ROC curve and ROC area for noonan
fpr, tpr, _ = roc_curve(total_names, total_scores) #scores_np[:, noonan_idx]
roc_auc = auc(fpr, tpr)
# For PR curve
precision, recall, _ = precision_recall_curve(total_names, total_scores)
average_precision = average_precision_score(total_names, total_scores)
# plots
plt.figure()
colors = list(mcolors.TABLEAU_COLORS)
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.4f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC_{}'.format(exp_name))
plt.legend(loc="lower right")
plt.savefig(args.stored_result_path + os.sep + '/fp_tp_{}.png'.format(exp_name))
plt.close()
# plt.show()
plt.figure()
plt.step(recall, precision, where='post')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Average precision score ({}): AP={:0.4f}'.format(exp_name, average_precision))
plt.savefig(args.stored_result_path + os.sep + '/pr_{}.png'.format(exp_name))
plt.close()
| 2.078125 | 2 |
examples/tryclass.py | manahter/dirio | 0 | 5359 | <gh_stars>0
import time
class TryClass:
value = 1
valu = 2
val = 3
va = 4
v = 5
def __init__(self, value=4):
print("Created TryClass :", self)
self.value = value
def metod1(self, value, val2=""):
self.value += value
print(f"\t>>> metod 1, add: {value}, now value : {self.value}, val2: {val2}")
time.sleep(2)
return self.value
@classmethod
def metod2(cls, value, val2=""):
cls.value = 2
print(f"\t>>> metod 2, add: {value}, now value : {cls.value}, val2: {val2}")
return cls.value
@staticmethod
def metod3(value, val2=""):
TryClass.value += value
print(f"\t>>> metod 3, add: {value}, now value : {TryClass.value}, val2: {val2}")
return TryClass.value
def event_call(other_arg, kwarg="-", result=None):
"""Call this metod, on returned result"""
print(f"Bind Result, {result}\n"*10)
print("other_arg", other_arg)
print("kwarg", kwarg)
if __name__ == "__main__":
try:
from dirio import Dirio
except:
from ..dirio import Dirio
dr_cls = Dirio(target=TryClass, args=(888,), kwargs={}, worker=False)
print("Starting values :", dr_cls.value, dr_cls)
print("\n"*2)
print("Wait 1 sec for your reply. metod 1 :", dr_cls.metod1(5, val2="1", dr_wait=1))
print("Wait until the reply comes. metod 1 :", dr_cls.metod1(5, val2="1", dr_wait=-1))
code0 = dr_cls.metod1(5, val2="1", dr_code=True)
print("Metod 1, call, via bind to func", dr_cls.dr_bind(code0, event_call, args=("OtHeR aRg", ), kwargs={"kwarg": "KwArG"}))
while True:
#
dr_cls.dr_binds_check()
print("Run the method and give us the response reading code : dr_code=True")
code1 = dr_cls.metod1(5, val2="1", dr_code=True)
print("Is there data in the reading code? : dr_code=43534")
while not dr_cls.metod1(dr_code=code1):
print("We are waiting for the data with this code :", code1)
time.sleep(.5)
print("Returned metod 1 data :", dr_cls.metod1(dr_code=code1))
print("Methods called this way give the last return value : nothing or dr_code=False")
code2 = dr_cls.metod2(10, val2="2", dr_code=True)
print("Search by code only :", dr_cls.dr_code(code2, wait=1))
print("Trying metod 2, called and returned :", dr_cls.metod2(10, val2="2", dr_code=False))
print("Trying metod 3, called and returned :", dr_cls.metod3(15, val2="3"))
print("\n"*2)
time.sleep(3)
dr_cls.dr_terminate()
| 3.125 | 3 |
qiskit/providers/basebackend.py | ismaila-at-za-ibm/qiskit-terra | 2 | 5360 | <reponame>ismaila-at-za-ibm/qiskit-terra<filename>qiskit/providers/basebackend.py
# -*- coding: utf-8 -*-
# Copyright 2017, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""This module implements the abstract base class for backend modules.
To create add-on backend modules subclass the Backend class in this module.
Doing so requires that the required backend interface is implemented.
"""
from abc import ABC, abstractmethod
from qiskit.version import __version__
from .models import BackendStatus
class BaseBackend(ABC):
"""Base class for backends."""
@abstractmethod
def __init__(self, configuration, provider=None):
"""Base class for backends.
This method should initialize the module and its configuration, and
raise an exception if a component of the module is
not available.
Args:
configuration (BackendConfiguration): backend configuration
provider (BaseProvider): provider responsible for this backend
Raises:
FileNotFoundError if backend executable is not available.
QiskitError: if there is no name in the configuration
"""
self._configuration = configuration
self._provider = provider
@abstractmethod
def run(self, qobj):
"""Run a Qobj on the the backend."""
pass
def configuration(self):
"""Return the backend configuration.
Returns:
BackendConfiguration: the configuration for the backend.
"""
return self._configuration
def properties(self):
"""Return backend properties.
Returns:
BackendProperties: the configuration for the backend. If the backend
does not support properties, it returns ``None``.
"""
return None
def provider(self):
"""Return the backend Provider.
Returns:
BaseProvider: the Provider responsible for the backend.
"""
return self._provider
def status(self):
"""Return backend status.
Returns:
BackendStatus: the status of the backend.
"""
return BackendStatus(backend_name=self.name(),
backend_version=__version__,
operational=True,
pending_jobs=0,
status_msg='')
def name(self):
"""Return backend name.
Returns:
str: the name of the backend.
"""
return self._configuration.backend_name
def __str__(self):
return self.name()
def __repr__(self):
"""Official string representation of a Backend.
Note that, by Qiskit convention, it is consciously *not* a fully valid
Python expression. Subclasses should provide 'a string of the form
<...some useful description...>'. [0]
[0] https://docs.python.org/3/reference/datamodel.html#object.__repr__
"""
return "<{}('{}') from {}()>".format(self.__class__.__name__,
self.name(),
self._provider)
| 2.25 | 2 |
arrays/jump2/Solution.py | shahbagdadi/py-algo-n-ds | 0 | 5361 | from typing import List
import sys
class Solution:
def jump(self, nums: List[int]) -> int:
if len(nums) <=1: return 0
l , r , jumps = 0, nums[0] , 1
while r < len(nums)-1 :
jumps += 1
# you can land anywhere between l & r+1 in a jump and then use Num[i] to jump from there
nxt = max( i + nums[i] for i in range(l, r+1))
l , r = r, nxt
return jumps
s = Solution()
ans = s.jump([3,2,1,0,4])
print(ans) | 3.59375 | 4 |
share/tests.py | shared-tw/shared-tw | 2 | 5362 | import unittest
from . import states
class DonationStateTestCase(unittest.TestCase):
def test_approve_pending_state(self):
approve_pending_statue = states.PendingApprovalState()
approved_event = states.DonationApprovedEvent()
self.assertIsInstance(
approve_pending_statue.apply(approved_event),
states.PendingDispatchState,
)
cancelled_event = states.DonationCancelledEvent()
self.assertIsInstance(
approve_pending_statue.apply(cancelled_event), states.CancelledState
)
dispatch_event = states.DonationDispatchedEvent()
self.assertIsInstance(
approve_pending_statue.apply(dispatch_event), states.InvalidState
)
def test_dispatch_pending_state(self):
dispatch_pending_state = states.PendingDispatchState()
donation_dispatched_event = states.DonationDispatchedEvent()
self.assertIsInstance(
dispatch_pending_state.apply(donation_dispatched_event),
states.DoneState,
)
cancelled_event = states.DonationCancelledEvent()
self.assertIsInstance(
dispatch_pending_state.apply(cancelled_event), states.CancelledState
)
approved_event = states.DonationApprovedEvent()
self.assertIsInstance(
dispatch_pending_state.apply(approved_event), states.InvalidState
)
def test_collect_pending_state(self):
collect_pending_state = states.PendingDeliveryState()
collected_event = states.DonationDeliveredEvent()
self.assertIsInstance(
collect_pending_state.apply(collected_event), states.DoneState
)
cancelled_event = states.DonationCancelledEvent()
self.assertIsInstance(
collect_pending_state.apply(cancelled_event), states.InvalidState
)
| 2.65625 | 3 |
app/extensions.py | grow/airpress | 1 | 5363 | from jinja2 import nodes
from jinja2.ext import Extension
class FragmentCacheExtension(Extension):
# a set of names that trigger the extension.
tags = set(['cache'])
def __init__(self, environment):
super(FragmentCacheExtension, self).__init__(environment)
# add the defaults to the environment
environment.extend(
fragment_cache_prefix='fragment',
fragment_cache=None
)
def parse(self, parser):
# the first token is the token that started the tag. In our case
# we only listen to ``'cache'`` so this will be a name token with
# `cache` as value. We get the line number so that we can give
# that line number to the nodes we create by hand.
lineno = next(parser.stream).lineno
# now we parse a single expression that is used as cache key.
args = [parser.parse_expression()]
# if there is a comma, the user provided a timeout. If not use
# None as second parameter.
if parser.stream.skip_if('comma'):
args.append(parser.parse_expression())
else:
args.append(nodes.Const(None))
# now we parse the body of the cache block up to `endcache` and
# drop the needle (which would always be `endcache` in that case)
body = parser.parse_statements(['name:endcache'], drop_needle=True)
# now return a `CallBlock` node that calls our _cache_support
# helper method on this extension.
return nodes.CallBlock(self.call_method('_cache_support', args),
[], [], body).set_lineno(lineno)
def _cache_support(self, name, timeout, caller):
"""Helper callback."""
key = self.environment.fragment_cache_prefix + name
# try to load the block from the cache
# if there is no fragment in the cache, render it and store
# it in the cache.
rv = self.environment.fragment_cache.get(key)
if rv is not None:
return rv
rv = caller()
self.environment.fragment_cache.add(key, rv, timeout)
return rv
| 2.703125 | 3 |
modtox/Helpers/helpers.py | danielSoler93/modtox | 4 | 5364 | <gh_stars>1-10
import os
def retrieve_molecule_number(pdb, resname):
"""
IDENTIFICATION OF MOLECULE NUMBER BASED
ON THE TER'S
"""
count = 0
with open(pdb, 'r') as x:
lines = x.readlines()
for i in lines:
if i.split()[0] == 'TER': count += 1
if i.split()[3] == resname:
molecule_number = count + 1
break
return molecule_number
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
| 2.65625 | 3 |
bbio/platform/beaglebone/api.py | efargas/PyBBIO | 0 | 5365 | # api.py
# Part of PyBBIO
# github.com/alexanderhiam/PyBBIO
# MIT License
#
# Beaglebone platform API file.
from bbio.platform.platform import detect_platform
PLATFORM = detect_platform()
if "3.8" in PLATFORM:
from bone_3_8.adc import analog_init, analog_cleanup
from bone_3_8.pwm import pwm_init, pwm_cleanup
from serial_port import serial_cleanup
elif "3.2" in PLATFORM:
from bone_3_2.adc import analog_init, analog_cleanup
from bone_3_2.pwm import pwm_init, pwm_cleanup
from serial_port import serial_cleanup
def platform_init():
analog_init()
pwm_init()
def platform_cleanup():
analog_cleanup()
pwm_cleanup()
serial_cleanup()
| 2.21875 | 2 |
tryhackme/http.py | GnarLito/tryhackme.py | 0 | 5366 | import re
import sys
from urllib.parse import quote as _uriquote
import requests
from . import __version__, errors, utils
from .converters import _county_types, _leaderboard_types, _vpn_types, _not_none
from . import checks
from .cog import request_cog
GET='get'
POST='post'
class HTTPClient:
__CSRF_token_regex = re.compile("const csrfToken[ ]{0,1}=[ ]{0,1}[\"|'](.{36})[\"|']")
__Username_regex = re.compile("const username[ ]{0,1}=[ ]{0,1}[\"|'](.{1,16})[\"|']")
def __init__(self, session=None):
self._state = None
self.authenticated = False
self.__session = requests.Session()
self.static_session = requests.Session()
self.connect_sid = None
self._CSRF_token = None
self.username = None
self.user_agent = f'Tryhackme: (https://github.com/GnarLito/thm-api-py {__version__}) Python/{sys.version_info[0]}.{sys.version_info[1]} requests/{requests.__version__}'
if session is not None:
self.static_login(session)
def close(self):
if self.__session:
self.__session.close()
def static_login(self, session):
self.connect_sid = session
cookie = requests.cookies.create_cookie('connect.sid', session, domain='tryhackme.com')
self.__session.cookies.set_cookie(cookie)
try:
self.request(RouteList.get_unseen_notifications())
self.authenticated = True
self._CSRF_token = self.retrieve_CSRF_token()
self.username = self.retrieve_username()
except Exception as e:
print("session Issue:", e)
def retrieve_CSRF_token(self):
if not self.authenticated:
return None
try:
page = self.request(RouteList.get_profile_page())
return self._HTTPClient__CSRF_token_regex.search(page).group(1)
except AttributeError:
self.authenticated = False
return None
def retrieve_username(self):
if not self.authenticated:
return None
try:
page = self.request(RouteList.get_profile_page())
return self._HTTPClient__Username_regex.search(page).group(1)
except AttributeError:
self.authenticated = False
return None
def request(self, route, **kwargs):
session = self.__session
endpoint = route.url
method = route.method
settings = kwargs.pop('settings', {})
headers = {
'User-Agent': self.user_agent
}
if 'json' in kwargs:
headers['Content-Type'] = 'application/json'
kwargs['data'] = utils.to_json(kwargs.pop('json'))
if "static" in settings:
session = self.static_session
if "CSRF" in settings:
headers['CSRF-Token'] = self._CSRF_token
kwargs["data"]["_CSRF"] = self._CSRF_token
# TODO: retries, Pagenator
try:
with session.request(method, endpoint, **kwargs) as r:
data = utils.response_to_json_or_text(r)
# * valid return
if 300 > r.status_code >= 200:
# $ if return url is login then no auth
if r.url.split('/')[-1] == "login":
raise errors.Unauthorized(request=r, route=route, data=data)
return data
# $ no auth
if r.status_code in {401, 403}:
raise errors.Unauthorized(request=r, route=route, data=data)
# $ endpoint not found
if 404 == r.status_code:
raise errors.NotFound(request=r, route=route, data=data)
# $ server side issue's
if r.status_code in {500, 502}:
raise errors.ServerError(request=r, route=route, data=data)
except Exception as e:
raise e
class Route:
# TODO: add post payload capabilities
BASE = "https://www.tryhackme.com"
def __init__(self, method=GET, path='', **parameters):
self.method = method
self._path = path
self.path = path
url = self.BASE + self.path
options = parameters.pop("options", None)
if parameters:
try:
self.path = self.path.format(**{k: _uriquote(v) if isinstance(v, str) else v for k, v in parameters.items()})
self.url = self.BASE + self.path
except Exception as e:
raise errors.NotValidUrlParameters(e)
else:
self.url = url
if options:
if "?" not in self.url:
self.url + "?" + "&".join([f"{i}={options[i]}" for i in options.keys() if options[i] != None])
else:
self.url + "&" + "&".join([f"{i}={options[i]}" for i in options.keys() if options[i] != None])
self.bucket = f"{method} {path}"
class RouteList:
def get_profile_page(**parameters): return Route(path="/profile", **parameters)
# * normal site calls
def get_server_time( **parameters): return Route(path="/api/server-time", **parameters)
def get_site_stats( **parameters): return Route(path="/api/site-stats", **parameters)
def get_practise_rooms( **parameters): return Route(path="/api/practice-rooms", **parameters)
def get_series( **parameters): return Route(path="/api/series?show={show}", **parameters)
def get_glossary_terms( **parameters): return Route(path="/glossary/all-terms", **parameters)
# * Leaderboards
def get_leaderboards( **parameters): return Route(path="/api/leaderboards", **parameters)
def get_koth_leaderboards(**parameters): return Route(path="/api/leaderboards/koth", **parameters)
# * networks
def get_networks( **parameters): return Route(path="/api/networks", **parameters)
def get_network( **parameters): return Route(path="/api/room/network?code={network_code}", **parameters)
def get_network_cost( **parameters): return Route(path="/api/room/cost?code={network_code}", **parameters)
# * account
def get_subscription_cost(**parameters): return Route(path="/account/subscription/cost", **parameters)
# * paths
def get_path( **parameters): return Route(path="/paths/single/{path_code}", **parameters)
def get_public_paths( **parameters): return Route(path="/paths/public", **parameters)
def get_path_summary( **parameters): return Route(path="/paths/summary", **parameters)
# * modules
def get_modules_summary(**parameters): return Route(path="/modules/summary", **parameters)
def get_module( **parameters): return Route(path="/modules/data/{module_code}",**parameters)
# * games
def get_machine_pool( **parameters): return Route(path="/games/koth/get/machine-pool", **parameters)
def get_game_detail( **parameters): return Route(path="/games/koth/data/{game_code}", **parameters)
def get_recent_games( **parameters): return Route(path="/games/koth/recent/games", **parameters)
def get_user_games( **parameters): return Route(path="/games/koth/user/games", **parameters)
def get_game_tickets_won(**parameters): return Route(path="/games/tickets/won?username={username}", **parameters)
def post_join_koth( **parameters): return Route(method=POST, path="/games/koth/new", **parameters)
def post_new_koth( **parameters): return Route(method=POST, path="/games/koth/join-public", **parameters) # ? might be different for premium users
# * VPN
def get_available_vpns(**parameters): return Route(path="/vpn/get-available-vpns", **parameters)
def get_vpn_info( **parameters): return Route(path="/vpn/my-data", **parameters)
# * VM
def get_machine_running( **parameters): return Route(path="/api/vm/running", **parameters)
def post_renew_machine( **parameters): return Route(method=POST, path="/api/vm/renew", **parameters)
def post_terminate_machine( **parameters): return Route(method=POST, path="/api/vm/terminate", **parameters)
# * user -badge
def get_own_badges( **parameters): return Route(path="/api/badges/mine", **parameters)
def get_user_badges(**parameters): return Route(path="/api/badges/get/{username}", **parameters)
def get_all_badges( **parameters): return Route(path="/api/badges/get", **parameters)
# * user -team
def get_team_info(**parameters): return Route(path="/api/team/is-member", **parameters)
# * user -notifications
def get_unseen_notifications(**parameters): return Route(path="/notifications/has-unseen", **parameters)
def get_all_notifications( **parameters): return Route(path="/notifications/get", **parameters)
# * user -messages
def get_unseen_messages( **parameters): return Route(path="/message/has-unseen", **parameters)
def get_all_group_messages(**parameters): return Route(path="/message/group/get-all", **parameters)
def get_group_messages( **parameters): return Route(path="/message/group/get/{group_id}", **parameters)
# * user -room
def get_user_completed_rooms_count( **parameters): return Route(path="/api/no-completed-rooms-public/{username}", **parameters)
def get_user_completed_rooms( **parameters): return Route(path="/api/all-completed-rooms?username={username}", **parameters)
def get_user_created_rooms( **parameters): return Route(path="/api/created-rooms/{username}", **parameters)
# * user
def get_user_rank( **parameters): return Route(path="/api/user/rank/{username}", **parameters)
def get_user_activty(**parameters): return Route(path="/api/user/activity-events?username={username}", **parameters)
def get_all_friends( **parameters): return Route(path="/api/friend/all", **parameters)
def get_discord_user(**parameters): return Route(path="/api/discord/user/{username}", **parameters) # ? rename to user profile
def get_user_exist( **parameters): return Route(path="/api/user/exist/{username}", **parameters)
def search_user( **parameters): return Route(path="/api/similar-users/{username}", **parameters)
# * room
def get_new_rooms( **parameters): return Route(path="/api/new-rooms", **parameters)
def get_recommended_rooms( **parameters): return Route(path="/recommend/last-room?type=json", **parameters)
def get_questions_answered( **parameters): return Route(path="/api/questions-answered", **parameters)
def get_joined_rooms( **parameters): return Route(path="/api/my-rooms", **parameters)
def get_room_percetages( **parameters): return Route(method=POST, path="/api/room-percentages", **parameters) # ? is a post but it gets stuff
def get_room_scoreboard( **parameters): return Route(path="/api/room/scoreboard?code={room_code}", **parameters)
def get_room_votes( **parameters): return Route(path="/api/room/votes?code={room_code}", **parameters)
def get_room_details( **parameters): return Route(path="/api/room/details?codes={room_code}", **parameters) # ? list posibility
def get_room_tasks( **parameters): return Route(path="/api/tasks/{room_code}", **parameters)
def post_room_answer( **parameters): return Route(method=POST, path="/api/{room_code}/answer", **parameters)
def post_deploy_machine( **parameters): return Route(method=POST, path="/material/deploy", **parameters)
def post_reset_room_progress(**parameters): return Route(method=POST, path="/api/reset-progress", **parameters)
def post_leave_room( **parameters): return Route(method=POST, path="/api/room/leave", **parameters)
class HTTP(request_cog, HTTPClient):
# * normal site calls
def get_server_time(self, **attrs):
return self.request(RouteList.get_server_time(), **attrs)
def get_site_stats(self, **attrs):
return self.request(RouteList.get_site_stats(), **attrs)
def get_practise_rooms(self, **attrs):
return self.request(RouteList.get_practise_rooms(), **attrs)
def get_serie(self, show, serie_code, **attrs):
return self.request(RouteList.get_series(show=show, options={"name": serie_code}), **attrs)
def get_series(self, show, **attrs):
return self.request(RouteList.get_series(show=show), **attrs)
def get_glossary_terms(self, **attrs):
return self.request(RouteList.get_glossary_terms(), **attrs)
# * Leaderboards
def get_leaderboards(self, country: _county_types, type:_leaderboard_types, **attrs):
return self.request(RouteList.get_leaderboards(country=country.to_lower_case(), type=type), **attrs)
def get_koth_leaderboards(self, country: _county_types, type:_leaderboard_types, **attrs):
return self.request(RouteList.get_koth_leaderboards(country=country.to_lower_case(), type=type), **attrs)
# * networks
def get_network(self, network_code, **attrs):
return self.request(RouteList.get_network(network_code=network_code), **attrs)
def get_networks(self, **attrs):
return self.request(RouteList.get_networks(),**attrs)
def get_network_cost(self, network_code, **attrs):
return self.request(RouteList.get_networks(network_code=network_code), **attrs)
# * account
@checks.is_authenticated()
def get_subscription_cost(self, **attrs):
return self.request(RouteList.get_subscription_cost(), **attrs)
# * paths
def get_path(self, path_code, **attrs):
return self.request(RouteList.get_path(path_code=path_code), **attrs)
def get_public_paths(self, **attrs):
return self.request(RouteList.get_public_paths(), **attrs)
def get_path_summary(self, **attrs):
return self.request(RouteList.get_path_summary(), **attrs)
# * modules
def get_modules_summary(self, **attrs):
return self.request(RouteList.get_modules_summary(), **attrs)
def get_module(self, module_code, **attrs):
return self.request(RouteList.get_module(module_code), **attrs)
# * games
def get_machine_pool(self, **attrs):
return self.request(RouteList.get_machine_pool(), **attrs)
def get_game_detail(self, game_code, **attrs):
return self.request(RouteList.get_game_detail(game_code=game_code), **attrs)
def get_recent_games(self, **attrs):
return self.request(RouteList.get_recent_games(), **attrs)
def get_user_games(self, **attrs):
return self.request(RouteList.get_user_games(), **attrs)
def get_game_tickets_won(self, username, **attrs):
return self.request(RouteList.get_game_tickets_won(username=username), **attrs)
@checks.set_header_CSRF()
def post_join_koth(self, **attrs):
return self.request(RouteList.post_join_koth(), **attrs)
@checks.set_header_CSRF()
def post_new_koth(self, **attrs):
return self.request(RouteList.post_new_koth(), **attrs)
# * VPN
@checks.is_authenticated()
def get_available_vpns(self, type : _vpn_types, **attrs):
return self.request(RouteList.get_available_vpns(options={"type": type}), **attrs)
@checks.is_authenticated()
def get_vpn_info(self, **attrs):
return self.request(RouteList.get_vpn_info(), **attrs)
# * VM
def get_machine_running(self, **attrs):
return self.request(RouteList.get_machine_running(), **attrs)
@checks.set_header_CSRF()
def post_renew_machine(self, room_code, **attrs):
return self.request(RouteList.post_renew_machine(), json={"code": room_code}, **attrs)
@checks.set_header_CSRF()
def post_terminate_machine(self, room_code, **attrs):
return self.request(RouteList.post_terminate_machine(), json={"code": room_code}, **attrs)
# * user -badge
@checks.is_authenticated()
def get_own_badges(self, **attrs):
return self.request(RouteList.get_own_badges(), **attrs)
def get_user_badges(self, username, **attrs):
return self.request(RouteList.get_user_badges(username=username), **attrs)
def get_all_badges(self, **attrs):
return self.request(RouteList.get_all_badges(), **attrs)
# * user -team
@checks.is_authenticated()
def get_team_info(self, **attrs):
return self.request(RouteList.get_team_info(), **attrs)
# * user -notifications
@checks.is_authenticated()
def get_unseen_notifications(self, **attrs):
return self.request(RouteList.get_unseen_notifications(), **attrs)
@checks.is_authenticated()
def get_all_notifications(self, **attrs):
return self.request(RouteList.get_all_notifications(), **attrs)
# * user -messages
@checks.is_authenticated()
def get_unseen_messages(self, **attrs):
return self.request(RouteList.get_unseen_messages(), **attrs)
@checks.is_authenticated()
def get_all_group_messages(self, **attrs):
return self.request(RouteList.get_all_group_messages(), **attrs)
@checks.is_authenticated()
def get_group_messages(self, group_id, **attrs):
return self.request(RouteList.get_group_messages(group_id), **attrs)
# * user -room
def get_user_completed_rooms_count(self, username, **attrs):
return self.request(RouteList.get_user_completed_rooms_count(username=username), **attrs)
def get_user_completed_rooms(self, username, limit:int=10, page:int=1, **attrs):
return self.request(RouteList.get_user_completed_rooms(username=username, options={"limit": limit, "page": page}), **attrs)
def get_user_created_rooms(self, username, limit:int=10, page:int=1, **attrs):
return self.request(RouteList.get_user_created_rooms(username=username, options={"limit": limit, "page": page}), **attrs)
# * user
def get_user_rank(self, username : _not_none, **attrs):
return self.request(RouteList.get_user_rank(username=username), **attrs)
def get_user_activty(self, username : _not_none, **attrs):
return self.request(RouteList.get_user_activty(username=username), **attrs)
@checks.is_authenticated()
def get_all_friends(self, **attrs):
return self.request(RouteList.get_all_friends(), **attrs)
def get_discord_user(self, username : _not_none, **attrs):
return self.request(RouteList.get_discord_user(username=username), **attrs)
def get_user_exist(self, username : _not_none, **attrs):
return self.request(RouteList.get_user_exist(username=username), **attrs)
def search_user(self, username : _not_none, **attrs):
return self.request(RouteList.search_user(username=username), **attrs)
# * room
def get_new_rooms(self, **attrs):
return self.request(RouteList.get_new_rooms(), **attrs)
@checks.is_authenticated()
def get_recommended_rooms(self, **attrs):
return self.request(RouteList.get_recommended_rooms(), **attrs)
def get_questions_answered(self, **attrs):
return self.request(RouteList.get_questions_answered(), **attrs)
@checks.is_authenticated()
def get_joined_rooms(self, **attrs):
return self.request(RouteList.get_joined_rooms(), **attrs)
@checks.is_authenticated()
def get_room_percentages(self, room_codes, **attrs):
return self.request(RouteList.get_room_percetages(), json={"rooms": room_codes}, **attrs)
@checks.is_authenticated()
def get_room_scoreboard(self, room_code, **attrs):
return self.request(RouteList.get_room_scoreboard(room_code=room_code), **attrs)
def get_room_votes(self, room_code, **attrs):
return self.request(RouteList.get_room_votes(room_code=room_code), **attrs)
def get_room_details(self, room_code, loadWriteUps: bool=True, loadCreators: bool=True, loadUser: bool=True, **attrs):
return self.request(RouteList.get_room_details(room_code=room_code, options={"loadWriteUps": loadWriteUps, "loadCreators": loadCreators, "loadUser": loadUser}), **attrs).get(room_code, {})
def get_room_tasks(self, room_code, **attrs):
return self.request(RouteList.get_room_tasks(room_code=room_code), **attrs)
@checks.set_header_CSRF()
@checks.is_authenticated()
def post_room_answer(self, room_code, taskNo: int, questionNo: int, answer: str, **attrs):
return self.request(RouteList.post_room_answer(room_code=room_code), json={"taskNo": taskNo, "questionNo": questionNo, "answer": answer}, **attrs)
@checks.set_header_CSRF()
@checks.is_authenticated()
def post_deploy_machine(self, room_code, uploadId, **attrs):
return self.request(RouteList.post_deploy_machine(), json={"roomCode": room_code, "id": uploadId}, **attrs)
@checks.set_header_CSRF()
@checks.is_authenticated()
def post_reset_room_progress(self, room_code, **attrs):
return self.request(RouteList.post_reset_room_progress(), json={"code": room_code}, **attrs)
@checks.set_header_CSRF()
@checks.is_authenticated()
def post_leave_room(self, room_code, **attrs):
return self.request(RouteList.post_leave_room(), json={"code": room_code}, **attrs)
| 2.296875 | 2 |
flatlander/runner/experiment_runner.py | wullli/flatlander | 3 | 5367 | <reponame>wullli/flatlander<gh_stars>1-10
import os
from argparse import ArgumentParser
from pathlib import Path
import gym
import ray
import ray.tune.result as ray_results
import yaml
from gym.spaces import Tuple
from ray.cluster_utils import Cluster
from ray.rllib.utils import try_import_tf, try_import_torch
from ray.tune import run_experiments, register_env
from ray.tune.logger import TBXLogger
from ray.tune.resources import resources_to_json
from ray.tune.tune import _make_scheduler
from ray.tune.utils import merge_dicts
from flatlander.envs import get_eval_config
from flatlander.envs.flatland_sparse import FlatlandSparse
from flatlander.envs.observations import make_obs
from flatlander.envs.utils.global_gym_env import GlobalFlatlandGymEnv
from flatlander.envs.utils.gym_env_fill_missing import FillingFlatlandGymEnv
from flatlander.logging.custom_metrics import on_episode_end
from flatlander.logging.wandb_logger import WandbLogger
from flatlander.utils.loader import load_envs, load_models
ray_results.DEFAULT_RESULTS_DIR = os.path.join(os.getcwd(), "..", "..", "..", "flatland-challenge-data/results")
class ExperimentRunner:
group_algorithms = ["QMIX", "QMIXApex"]
def __init__(self):
self.tf = try_import_tf()
self.torch, _ = try_import_torch()
load_envs(os.path.dirname(__file__))
load_models(os.path.dirname(__file__))
@staticmethod
def get_experiments(run_args, arg_parser: ArgumentParser = None):
if run_args.config_file:
with open(run_args.config_file) as f:
experiments = yaml.safe_load(f)
else:
experiments = {
run_args.experiment_name: { # i.e. log to ~/ray_results/default
"run": run_args.run,
"checkpoint_freq": run_args.checkpoint_freq,
"keep_checkpoints_num": run_args.keep_checkpoints_num,
"checkpoint_score_attr": run_args.checkpoint_score_attr,
"local_dir": run_args.local_dir,
"resources_per_trial": (
run_args.resources_per_trial and
resources_to_json(run_args.resources_per_trial)),
"stop": run_args.stop,
"config": dict(run_args.config, env=run_args.env),
"restore": run_args.restore,
"num_samples": run_args.num_samples,
"upload_dir": run_args.upload_dir,
}
}
if arg_parser is not None:
for exp in experiments.values():
if not exp.get("run"):
arg_parser.error("the following arguments are required: --run")
if not exp.get("envs") and not exp.get("config", {}).get("envs"):
arg_parser.error("the following arguments are required: --envs")
return experiments
@staticmethod
def setup_grouping(config: dict):
grouping = {
"group_1": list(range(config["env_config"]["max_n_agents"])),
}
obs_space = Tuple([make_obs(config["env_config"]["observation"],
config["env_config"]["observation_config"]).observation_space()
for _ in range(config["env_config"]["max_n_agents"])])
act_space = Tuple([GlobalFlatlandGymEnv.action_space for _ in range(config["env_config"]["max_n_agents"])])
register_env(
"flatland_sparse_grouped",
lambda config: FlatlandSparse(config).with_agent_groups(
grouping, obs_space=obs_space, act_space=act_space))
def setup_policy_map(self, config: dict):
obs_space = make_obs(config["env_config"]["observation"],
config["env_config"]["observation_config"]).observation_space()
config["multiagent"] = {
"policies": {"pol_" + str(i): (None, obs_space, FillingFlatlandGymEnv.action_space, {"agent_id": i})
for i in range(config["env_config"]["observation_config"]["max_n_agents"])},
"policy_mapping_fn": lambda agent_id: "pol_" + str(agent_id)}
def setup_hierarchical_policies(self, config: dict):
obs_space: gym.spaces.Tuple = make_obs(config["env_config"]["observation"],
config["env_config"]["observation_config"]).observation_space()
config["multiagent"] = {
"policies": {"meta": (None, obs_space.spaces[0], gym.spaces.Box(high=1, low=0, shape=(1,)), {}),
"agent": (None, obs_space.spaces[1], FillingFlatlandGymEnv.action_space, {})
},
"policy_mapping_fn": lambda agent_id: "meta" if 'meta' in str(agent_id) else "agent"
}
def apply_args(self, run_args, experiments: dict):
verbose = 1
webui_host = '127.0.0.1'
for exp in experiments.values():
if run_args.eager:
exp["config"]["eager"] = True
if run_args.torch:
exp["config"]["use_pytorch"] = True
if run_args.v:
exp["config"]["log_level"] = "INFO"
verbose = 2
if run_args.vv:
exp["config"]["log_level"] = "DEBUG"
verbose = 3
if run_args.trace:
if not exp["config"].get("eager"):
raise ValueError("Must enable --eager to enable tracing.")
exp["config"]["eager_tracing"] = True
if run_args.bind_all:
webui_host = "0.0.0.0"
if run_args.log_flatland_stats:
exp['config']['callbacks'] = {
'on_episode_end': on_episode_end,
}
return experiments, verbose
@staticmethod
def evaluate(exp):
eval_configs = get_eval_config(exp['config'].get('env_config',
{}).get('eval_generator', "default"))
eval_seed = eval_configs.get('evaluation_config', {}).get('env_config', {}).get('seed')
# add evaluation config to the current config
exp['config'] = merge_dicts(exp['config'], eval_configs)
if exp['config'].get('evaluation_config'):
exp['config']['evaluation_config']['env_config'] = exp['config'].get('env_config')
eval_env_config = exp['config']['evaluation_config'].get('env_config')
if eval_seed and eval_env_config:
# We override the envs seed from the evaluation config
eval_env_config['seed'] = eval_seed
# Remove any wandb related configs
if eval_env_config:
if eval_env_config.get('wandb'):
del eval_env_config['wandb']
# Remove any wandb related configs
if exp['config']['evaluation_config'].get('wandb'):
del exp['config']['evaluation_config']['wandb']
def run(self, experiments: dict, args=None):
verbose = 1
webui_host = "localhost"
for exp in experiments.values():
if exp.get("config", {}).get("input"):
if not isinstance(exp.get("config", {}).get("input"), dict):
if not os.path.exists(exp["config"]["input"]):
rllib_dir = Path(__file__).parent
input_file = rllib_dir.absolute().joinpath(exp["config"]["input"])
exp["config"]["input"] = str(input_file)
if exp["run"] in self.group_algorithms:
self.setup_grouping(exp.get("config"))
if exp["run"] == "contrib/MADDPG" or exp["config"].get("individual_policies", False):
self.setup_policy_map(exp.get("config"))
if exp["config"].get("individual_policies", False):
del exp["config"]["individual_policies"]
if exp["run"] == "contrib/MADDPG":
exp.get("config")["env_config"]["learning_starts"] = 100
exp.get("config")["env_config"]["actions_are_logits"] = True
if exp["env"] == "flatland_sparse_hierarchical":
self.setup_hierarchical_policies(exp.get("config"))
if args is not None:
experiments, verbose = self.apply_args(run_args=args, experiments=experiments)
if args.eval:
self.evaluate(exp)
if args.config_file:
# TODO should be in exp['config'] directly
exp['config']['env_config']['yaml_config'] = args.config_file
exp['loggers'] = [WandbLogger, TBXLogger]
if args.ray_num_nodes:
cluster = Cluster()
for _ in range(args.ray_num_nodes):
cluster.add_node(
num_cpus=args.ray_num_cpus or 1,
num_gpus=args.ray_num_gpus or 1,
object_store_memory=args.ray_object_store_memory,
memory=args.ray_memory,
redis_max_memory=args.ray_redis_max_memory)
ray.init(address=cluster.address)
else:
import multiprocessing
n_cpu = multiprocessing.cpu_count()
import tensorflow as tf
n_gpu = len(tf.config.experimental.list_physical_devices('GPU'))
print("NUM_CPUS AVAILABLE: ", n_cpu)
print("NUM_GPUS AVAILABLE: ", n_gpu)
print("NUM_CPUS ARGS: ", args.ray_num_cpus)
print("NUM_GPUS ARGS: ", args.ray_num_gpus)
ray.init(
local_mode=True if args.local else False,
address=args.ray_address,
object_store_memory=args.ray_object_store_memory,
num_cpus=args.ray_num_cpus if args.ray_num_cpus is not None else n_cpu,
num_gpus=args.ray_num_gpus if args.ray_num_gpus is not None else n_gpu)
run_experiments(
experiments,
scheduler=_make_scheduler(args),
queue_trials=args.queue_trials,
resume=args.resume,
verbose=verbose,
concurrent=True)
ray.shutdown()
| 1.945313 | 2 |
syslib/utils_keywords.py | rahulmah/sample-cloud-native-toolchain-tutorial-20170720084529291 | 1 | 5368 | <filename>syslib/utils_keywords.py<gh_stars>1-10
#!/usr/bin/env python
r"""
This module contains keyword functions to supplement robot's built in
functions and use in test where generic robot keywords don't support.
"""
import time
from robot.libraries.BuiltIn import BuiltIn
from robot.libraries import DateTime
import re
###############################################################################
def run_until_keyword_fails(retry, retry_interval, name, *args):
r"""
Execute a robot keyword repeatedly until it either fails or the timeout
value is exceeded.
Note: Opposite of robot keyword "Wait Until Keyword Succeeds".
Description of argument(s):
retry Max timeout time in hour(s).
retry_interval Time interval in minute(s) for looping.
name Robot keyword to execute.
args Robot keyword arguments.
"""
# Convert the retry time in seconds
retry_seconds = DateTime.convert_time(retry)
timeout = time.time() + int(retry_seconds)
# Convert the interval time in seconds
interval_seconds = DateTime.convert_time(retry_interval)
interval = int(interval_seconds)
BuiltIn().log(timeout)
BuiltIn().log(interval)
while True:
status = BuiltIn().run_keyword_and_return_status(name, *args)
# Return if keywords returns as failure.
if status is False:
BuiltIn().log("Failed as expected")
return False
# Return if retry timeout as success.
elif time.time() > timeout > 0:
BuiltIn().log("Max retry timeout")
return True
time.sleep(interval)
BuiltIn().log(time.time())
return True
###############################################################################
###############################################################################
def htx_error_log_to_list(htx_error_log_output):
r"""
Parse htx error log output string and return list of strings in the form
"<field name>:<field value>".
The output of this function may be passed to the build_error_dict function.
Description of argument(s):
htx_error_log_output Error entry string containing the stdout
generated by "htxcmdline -geterrlog".
Example of htx_error_log_output contents:
######################## Result Starts Here ###############################
Currently running ECG/MDT : /usr/lpp/htx/mdt/mdt.whit
===========================
---------------------------------------------------------------------
Device id:/dev/nvidia0
Timestamp:Mar 29 19:41:54 2017
err=00000027
sev=1
Exerciser Name:hxenvidia
Serial No:Not Available
Part No:Not Available
Location:Not Available
FRU Number:Not Available
Device:Not Available
Error Text:cudaEventSynchronize for stopEvent returned err = 0039 from file
, line 430.
---------------------------------------------------------------------
---------------------------------------------------------------------
Device id:/dev/nvidia0
Timestamp:Mar 29 19:41:54 2017
err=00000027
sev=1
Exerciser Name:hxenvidia
Serial No:Not Available
Part No:Not Available
Location:Not Available
FRU Number:Not Available
Device:Not Available
Error Text:Hardware Exerciser stopped on error
---------------------------------------------------------------------
######################### Result Ends Here ################################
Example output:
Returns the lists of error string per entry
['Device id:/dev/nvidia0',
'Timestamp:Mar 29 19:41:54 2017',
'err=00000027',
'sev=1',
'Exerciser Name:hxenvidia',
'Serial No:Not Available',
'Part No:Not Available',
'Location:Not Available',
'FRU Number:Not Available',
'Device:Not Available',
'Error Text:cudaEventSynchronize for stopEvent returned err = 0039
from file , line 430.']
"""
# List which will hold all the list of entries.
error_list = []
temp_error_list = []
parse_walk = False
for line in htx_error_log_output.splitlines():
# Skip lines starting with "#"
if line.startswith("#"):
continue
# Mark line starting with "-" and set parse flag.
if line.startswith("-") and parse_walk is False:
parse_walk = True
continue
# Mark line starting with "-" and reset parse flag.
# Set temp error list to EMPTY.
elif line.startswith("-"):
error_list.append(temp_error_list)
parse_walk = False
temp_error_list = []
# Add entry to list if line is not emtpy
elif parse_walk:
temp_error_list.append(str(line))
return error_list
###############################################################################
###############################################################################
def build_error_dict(htx_error_log_output):
r"""
Builds error list into a list of dictionary entries.
Description of argument(s):
error_list Error list entries.
Example output dictionary:
{
0:
{
'sev': '1',
'err': '00000027',
'Timestamp': 'Mar 29 19:41:54 2017',
'Part No': 'Not Available',
'Serial No': 'Not Available',
'Device': 'Not Available',
'FRU Number': 'Not Available',
'Location': 'Not Available',
'Device id': '/dev/nvidia0',
'Error Text': 'cudaEventSynchronize for stopEvent returned err = 0039
from file , line 430.',
'Exerciser Name': 'hxenvidia'
},
1:
{
'sev': '1',
'err': '00000027',
'Timestamp': 'Mar 29 19:41:54 2017',
'Part No': 'Not Available',
'Serial No': 'Not Available',
'Device': 'Not Available',
'FRU Number': 'Not Available',
'Location': 'Not Available',
'Device id': '/dev/nvidia0',
'Error Text': 'Hardware Exerciser stopped on error',
'Exerciser Name': 'hxenvidia'
}
},
"""
# List which will hold all the list of entries.
error_list = []
error_list = htx_error_log_to_list(htx_error_log_output)
# dictionary which holds the error dictionry entry.
error_dict = {}
temp_error_dict = {}
error_index = 0
# Loop through the error list.
for entry_list in error_list:
# Loop through the first error list entry.
for entry in entry_list:
# Split string into list for key value update.
# Example: 'Device id:/dev/nvidia0'
# Example: 'err=00000027'
parm_split = re.split("[:=]", entry)
# Populate temp dictionary with key value pair data.
temp_error_dict[str(parm_split[0])] = parm_split[1]
# Update the master dictionary per entry index.
error_dict[error_index] = temp_error_dict
# Reset temp dict to EMPTY and increment index count.
temp_error_dict = {}
error_index += 1
return error_dict
###############################################################################
| 2.828125 | 3 |
tools/webcam/webcam_apis/nodes/__init__.py | ivmtorres/mmpose | 1 | 5369 | # Copyright (c) OpenMMLab. All rights reserved.
from .builder import NODES
from .faceswap_nodes import FaceSwapNode
from .frame_effect_nodes import (BackgroundNode, BugEyeNode, MoustacheNode,
NoticeBoardNode, PoseVisualizerNode,
SaiyanNode, SunglassesNode)
from .helper_nodes import ModelResultBindingNode, MonitorNode, RecorderNode
from .mmdet_nodes import DetectorNode
from .mmpose_nodes import TopDownPoseEstimatorNode
from .xdwendwen_nodes import XDwenDwenNode
__all__ = [
'NODES', 'PoseVisualizerNode', 'DetectorNode', 'TopDownPoseEstimatorNode',
'MonitorNode', 'BugEyeNode', 'SunglassesNode', 'ModelResultBindingNode',
'NoticeBoardNode', 'RecorderNode', 'FaceSwapNode', 'MoustacheNode',
'SaiyanNode', 'BackgroundNode', 'XDwenDwenNode'
]
| 1.265625 | 1 |
DBParser/DBMove.py | lelle1234/Db2Utils | 4 | 5370 | <gh_stars>1-10
#!/usr/bin/python3
import ibm_db
import getopt
import sys
import os
from toposort import toposort_flatten
db = None
host = "localhost"
port = "50000"
user = None
pwd = None
outfile = None
targetdb = None
try:
opts, args = getopt.getopt(sys.argv[1:], "h:d:P:u:p:o:t:")
except getopt.GetoptError:
sys.exit(-1)
for o, a in opts:
if o == "-d":
db = a
if o == "-h":
host = a
if o == "-P":
port = a
if o == "-u":
user = a
if o == "-p":
pwd = a
if o == "-t":
targetdb = a
if db is None or user is None or pwd is None or targetdb is None:
print("Usage: DBMove.py [-h <host> -P <port>] -d <db> -u <user> -p <pwd> -t <target>")
sys.exit(1)
db = db.upper()
targetdb = targetdb.upper()
cfg = (db, host, port, user, pwd)
conn = ibm_db.connect("DATABASE=%s; HOSTNAME=%s; PORT=%s; PROTOCOL=TCPIP; UID=%s; PWD=%s" % cfg, "", "")
get_db_type = "values nya.get_db_type()"
find_edges = """
SELECT rtrim(t.tabschema) || '.' || rtrim(t.tabname)
, coalesce(rtrim(r.reftabschema) || '.' || rtrim(r.reftabname), 'dummy')
FROM syscat.tables t
LEFT JOIN syscat.references r
ON (t.tabschema, t.tabname) = (r.tabschema, r.tabname)
WHERE t.tabschema not like 'SYS%'
AND t.type = 'T'
AND rtrim(t.tabschema) not like 'NYA_%'
AND t.tabschema <> 'TMP'
ORDER BY 1
"""
identity_skip = """
select rtrim(tabschema) || '.' || rtrim(tabname) from syscat.columns
where identity = 'Y' and generated = 'D'
"""
stmt = ibm_db.prepare(conn, get_db_type)
ibm_db.execute(stmt, ())
tpl = ibm_db.fetch_tuple(stmt)
db_type = tpl[0]
edges = dict()
stmt = ibm_db.prepare(conn, find_edges)
ibm_db.execute(stmt, ())
tpl = ibm_db.fetch_tuple(stmt)
while tpl:
n1, n2 = tpl
try:
edges[n1].add(n2)
except KeyError:
edges[n1] = set()
edges[n1].add(n2)
tpl = ibm_db.fetch_tuple(stmt)
sorted_nodes = list(toposort_flatten(edges))
# print(sorted_nodes)
identity_skip_arr = []
edges = dict()
stmt = ibm_db.prepare(conn, identity_skip)
ibm_db.execute(stmt, ())
tpl = ibm_db.fetch_tuple(stmt)
while tpl:
identity_skip_arr.append(tpl[0])
tpl = ibm_db.fetch_tuple(stmt)
# print(identity_skip)
os.makedirs(db, exist_ok=True)
export_file = open("%s/export.sql" % db, "w")
load_file = open("%s/load.sql" % db, "w")
export_file.write("connect to %s;\n" % db)
load_file.write("connect to %s;\n" % targetdb)
if db_type == "N":
load_file.write("""set integrity for nya.person off;\n""")
load_file.write("""alter table nya.person
alter column EMAIL_UC drop generated
alter column NORMALIZED_FIRSTNAME drop generated
alter column NORMALIZED_LASTNAME drop generated;\n""")
load_file.write("""set integrity for nya.person immediate checked;\n""")
for t in sorted_nodes:
if t == "dummy":
continue
export_file.write("export to %s.ixf of ixf lobs to . modified by codepage=819 messages export_%s.msg select * from %s;\n" % (t,t,t))
identityskip = "identityoverride"
if t in identity_skip_arr:
identityskip = " "
load_file.write("load from %s.ixf of ixf lobs from . modified by generatedoverride %s messages load_%s.msg replace into %s;\n" % (t, identityskip, t, t))
if db_type == "N":
load_file.write("""set integrity for nya.person off;\n""")
load_file.write("""alter table nya.person
alter column EMAIL_UC set generated always as ( upper(email))
alter column NORMALIZED_FIRSTNAME set generated always as ( NYA.REMOVE_DIACRITICS( FIRSTNAME ) )
alter column NORMALIZED_LASTNAME set generated always as ( NYA.REMOVE_DIACRITICS( LASTNAME ) );\n""")
load_file.write("""set integrity for nya.person immediate checked force generated;\n""")
load_file.write("""echo set integrity for all tables;\n""")
export_file.write("connect reset;\n")
load_file.write("connect reset;\n")
export_file.close()
load_file.close()
| 2.25 | 2 |
utils/glove.py | MirunaPislar/Word2vec | 13 | 5371 | import numpy as np
DEFAULT_FILE_PATH = "utils/datasets/glove.6B.50d.txt"
def loadWordVectors(tokens, filepath=DEFAULT_FILE_PATH, dimensions=50):
"""Read pretrained GloVe vectors"""
wordVectors = np.zeros((len(tokens), dimensions))
with open(filepath) as ifs:
for line in ifs:
line = line.strip()
if not line:
continue
row = line.split()
token = row[0]
if token not in tokens:
continue
data = [float(x) for x in row[1:]]
if len(data) != dimensions:
raise RuntimeError("wrong number of dimensions")
wordVectors[tokens[token]] = np.asarray(data)
return wordVectors
| 2.921875 | 3 |
composer/profiler/__init__.py | stanford-crfm/composer | 0 | 5372 | <gh_stars>0
# Copyright 2021 MosaicML. All Rights Reserved.
"""Performance profiling tools.
The profiler gathers performance metrics during a training run that can be used to diagnose bottlenecks and
facilitate model development.
The metrics gathered include:
* Duration of each :class:`.Event` during training
* Time taken by the data loader to return a batch
* Host metrics such as CPU, system memory, disk and network utilization over time
* Execution order, latency and attributes of PyTorch operators and GPU kernels (see :doc:`profiler`)
The following example demonstrates how to setup and perform profiling on a simple training run.
.. literalinclude:: ../../../examples/profiler_demo.py
:language: python
:linenos:
:emphasize-lines: 6, 27-49
It is required to specify an output ``profiler_trace_file`` during :class:`.Trainer` initialization to enable profiling.
The ``profiler_trace_file`` will contain the profiling trace data once the profiling run completes. By default, the :class:`.Profiler`,
:class:`.DataloaderProfiler` and :class:`.SystemProfiler` will be active. The :class:`.TorchProfiler` is **disabled** by default.
To activate the :class:`.TorchProfiler`, the ``torch_profiler_trace_dir`` must be specified *in addition* to the ``profiler_trace_file`` argument.
The ``torch_profiler_trace_dir`` will contain the Torch Profiler traces once the profiling run completes. The :class:`.Profiler` will
automatically merge the Torch traces in the ``torch_profiler_trace_dir`` into the ``profiler_trace_file``, allowing users to view a unified trace.
The complete traces can be viewed by in a Google Chrome browser navigating to ``chrome://tracing`` and loading the ``profiler_trace_file``.
Here is an example trace file:
.. image:: https://storage.googleapis.com/docs.mosaicml.com/images/profiler/profiler_trace_example.png
:alt: Example Profiler Trace File
:align: center
Additonal details an be found in the Profiler Guide.
"""
from composer.profiler._event_handler import ProfilerEventHandler
from composer.profiler._profiler import Marker, Profiler
from composer.profiler._profiler_action import ProfilerAction
# All needs to be defined properly for sphinx autosummary
__all__ = [
"Marker",
"Profiler",
"ProfilerAction",
"ProfilerEventHandler",
]
Marker.__module__ = __name__
Profiler.__module__ = __name__
ProfilerAction.__module__ = __name__
ProfilerEventHandler.__module__ = __name__
| 2.4375 | 2 |
gremlin-python/src/main/jython/setup.py | EvKissle/tinkerpop | 0 | 5373 | <reponame>EvKissle/tinkerpop<gh_stars>0
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
import codecs
import os
import sys
import time
from setuptools import setup
# Folder containing the setup.py
root = os.path.dirname(os.path.abspath(__file__))
# Path to __version__ module
version_file = os.path.join(root, 'gremlin_python', '__version__.py')
# Check if this is a source distribution.
# If not create the __version__ module containing the version
if not os.path.exists(os.path.join(root, 'PKG-INFO')):
timestamp = int(os.getenv('TIMESTAMP', time.time() * 1000)) / 1000
fd = codecs.open(version_file, 'w', 'utf-8')
fd.write("'''")
fd.write(__doc__)
fd.write("'''\n")
fd.write('version = %r\n' % os.getenv('VERSION', '?').replace('-SNAPSHOT', '.dev-%d' % timestamp))
fd.write('timestamp = %d\n' % timestamp)
fd.close()
# Load version
from gremlin_python import __version__
version = __version__.version
install_requires = [
'aenum==1.4.5',
'tornado==4.4.1',
'six==1.10.0'
]
if sys.version_info < (3,2):
install_requires += ['futures==3.0.5']
setup(
name='gremlinpython',
version=version,
packages=['gremlin_python', 'gremlin_python.driver',
'gremlin_python.driver.tornado', 'gremlin_python.process',
'gremlin_python.structure', 'gremlin_python.structure.io'],
license='Apache 2',
url='http://tinkerpop.apache.org',
description='Gremlin-Python for Apache TinkerPop',
long_description=codecs.open("README", "r", "UTF-8").read(),
test_suite="tests",
data_files=[("", ["LICENSE", "NOTICE"])],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest',
'mock'
],
install_requires=install_requires,
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
]
)
| 1.46875 | 1 |
src/_bar.py | yoshihikosuzuki/plotly_light | 0 | 5374 | <reponame>yoshihikosuzuki/plotly_light<gh_stars>0
from typing import Optional, Sequence
import plotly.graph_objects as go
def bar(x: Sequence,
y: Sequence,
text: Optional[Sequence] = None,
width: Optional[int] = None,
col: Optional[str] = None,
opacity: float = 1,
name: Optional[str] = None,
show_legend: bool = False,
show_init: bool = True) -> go.Bar:
"""Create a simple Trace object of a histogram.
positional arguments:
@ x : Coordinates of data on x-axis.
@ y : Coordinates of data on y-axis.
optional arguments:
@ col : Color of bars.
@ opacity : Opacity of bars.
@ name : Display name of the trace in legend.
@ show_legend : Show this trace in legend.
@ show_init : Show this trace initially.
"""
return go.Bar(x=x,
y=y,
text=text,
width=width,
marker_color=col,
opacity=opacity,
name=name,
showlegend=show_legend,
visible=None if show_init else "legendonly")
| 3.21875 | 3 |
pennylane/templates/subroutines/arbitrary_unitary.py | doomhammerhell/pennylane | 3 | 5375 | <reponame>doomhammerhell/pennylane
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Contains the ArbitraryUnitary template.
"""
import pennylane as qml
from pennylane.operation import Operation, AnyWires
from pennylane.ops import PauliRot
_PAULIS = ["I", "X", "Y", "Z"]
def _tuple_to_word(index_tuple):
"""Convert an integer tuple to the corresponding Pauli word.
The Pauli operators are converted as ``0 -> I``, ``1 -> X``,
``2 -> Y``, ``3 -> Z``.
Args:
index_tuple (Tuple[int]): An integer tuple describing the Pauli word
Returns:
str: The corresponding Pauli word
"""
return "".join([_PAULIS[i] for i in index_tuple])
def _n_k_gray_code(n, k, start=0):
"""Iterates over a full n-ary Gray code with k digits.
Args:
n (int): Base of the Gray code. Needs to be greater than one.
k (int): Number of digits of the Gray code. Needs to be greater than zero.
start (int, optional): Optional start of the Gray code. The generated code
will be shorter as the code does not wrap. Defaults to 0.
"""
for i in range(start, n ** k):
codeword = [0] * k
base_repesentation = []
val = i
for j in range(k):
base_repesentation.append(val % n)
val //= n
shift = 0
for j in reversed(range(k)):
codeword[j] = (base_repesentation[j] + shift) % n
shift += n - codeword[j]
yield codeword
def _all_pauli_words_but_identity(num_wires):
# Start at 1 to ignore identity
yield from (_tuple_to_word(idx_tuple) for idx_tuple in _n_k_gray_code(4, num_wires, start=1))
class ArbitraryUnitary(Operation):
"""Implements an arbitrary unitary on the specified wires.
An arbitrary unitary on :math:`n` wires is parametrized by :math:`4^n - 1`
independent real parameters. This templates uses Pauli word rotations to
parametrize the unitary.
**Example**
ArbitraryUnitary can be used as a building block, e.g. to parametrize arbitrary
two-qubit operations in a circuit:
.. code-block:: python
@qml.template
def arbitrary_nearest_neighbour_interaction(weights, wires):
qml.broadcast(unitary=ArbitraryUnitary, pattern="double", wires=wires, params=weights)
Args:
weights (tensor_like): The angles of the Pauli word rotations, needs to have length :math:`4^n - 1`
where :math:`n` is the number of wires the template acts upon.
wires (Iterable): wires that the template acts on
"""
num_params = 1
num_wires = AnyWires
par_domain = "A"
def __init__(self, weights, wires, do_queue=True, id=None):
shape = qml.math.shape(weights)
if shape != (4 ** len(wires) - 1,):
raise ValueError(
f"Weights tensor must be of shape {(4 ** len(wires) - 1,)}; got {shape}."
)
super().__init__(weights, wires=wires, do_queue=do_queue, id=id)
def expand(self):
weights = self.parameters[0]
with qml.tape.QuantumTape() as tape:
for i, pauli_word in enumerate(_all_pauli_words_but_identity(len(self.wires))):
PauliRot(weights[i], pauli_word, wires=self.wires)
return tape
@staticmethod
def shape(n_wires):
"""Compute the expected shape of the weights tensor.
Args:
n_wires (int): number of wires that template acts on
"""
return (4 ** n_wires - 1,)
| 2.90625 | 3 |
vae_celeba.py | aidiary/generative-models-pytorch | 0 | 5376 | <reponame>aidiary/generative-models-pytorch
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from pytorch_lightning.loggers import TensorBoardLogger
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import CelebA
class Encoder(nn.Module):
def __init__(self):
super().__init__()
self.conv_layers = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(32),
nn.LeakyReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(),
)
self.mu_layer = nn.Linear(4096, 200)
self.logvar_layer = nn.Linear(4096, 200)
def forward(self, imgs):
out = self.conv_layers(imgs)
out = nn.Flatten()(out)
mu = self.mu_layer(out)
logvar = self.logvar_layer(out)
return mu, logvar
class Decoder(nn.Module):
def __init__(self):
super().__init__()
self.decoder_input = nn.Linear(200, 4096)
self.deconv_layers = nn.Sequential(
nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(),
nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(),
nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.BatchNorm2d(32),
nn.LeakyReLU(),
nn.ConvTranspose2d(32, 3, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.BatchNorm2d(3),
nn.Sigmoid(),
)
def forward(self, z):
out = self.decoder_input(z)
out = out.view(-1, 64, 8, 8)
recon_img = self.deconv_layers(out)
return recon_img
class VanillaVAE(pl.LightningModule):
def __init__(self):
super().__init__()
self.encoder = Encoder()
self.decoder = Decoder()
def forward(self, img):
mu, logvar = self.encoder(img)
return mu
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def configure_optimizers(self):
optimizer = optim.Adam(self.parameters(), lr=0.005)
return optimizer
def training_step(self, train_batch, batch_idx):
img, labels = train_batch
mu, logvar = self.encoder(img)
z = self.reparameterize(mu, logvar)
recon_img = self.decoder(z)
recon_loss_factor = 10000
recon_loss = F.mse_loss(recon_img, img)
kld_loss = torch.mean(-0.5 * torch.sum(1 + logvar - mu**2 - logvar.exp(), dim=1))
loss = recon_loss_factor * recon_loss + kld_loss
self.log('train/loss', loss)
self.log('train/recon_loss', recon_loss)
self.log('train/kl_loss', kld_loss)
return loss
def validation_step(self, val_batch, batch_idx):
img, labels = val_batch
mu, logvar = self.encoder(img)
z = self.reparameterize(mu, logvar)
recon_img = self.decoder(z)
recon_loss_factor = 10000
recon_loss = F.mse_loss(recon_img, img)
kld_loss = torch.mean(-0.5 * torch.sum(1 + logvar - mu**2 - logvar.exp(), dim=1))
loss = recon_loss_factor * recon_loss + kld_loss
self.log('val/loss', loss)
self.log('val/recon_loss', recon_loss)
self.log('val/kl_loss', kld_loss)
return loss
def reconstruct(self, img):
mu, _ = self.encoder(img)
recon_img = self.decoder(mu)
return recon_img
def sample(self, num_samples=64):
z = torch.randn(num_samples, 200)
samples = self.decoder(z)
return samples
if __name__ == '__main__':
# data
transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.CenterCrop(148),
transforms.Resize(128),
transforms.ToTensor()
])
train_dataset = CelebA(root='data', split='train', transform=transform, download=False)
val_dataset = CelebA(root='data', split='test', transform=transform, download=False)
train_loader = DataLoader(train_dataset,
batch_size=32,
num_workers=8,
shuffle=True,
drop_last=True)
val_loader = DataLoader(val_dataset,
batch_size=32,
num_workers=8,
shuffle=False,
drop_last=True)
# model
model = VanillaVAE()
# training
tb_logger = TensorBoardLogger('lightning_logs', name='vanilla_vae_celeba', default_hp_metric=False)
trainer = pl.Trainer(gpus=[0], max_epochs=200, logger=tb_logger)
trainer.fit(model, train_loader, val_loader)
| 2.421875 | 2 |
data/process_data.py | julat/DisasterResponse | 0 | 5377 | <reponame>julat/DisasterResponse
# Import libraries
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
Load the data from the disaster response csvs
Parameters:
messages_filepath (str): Path to messages csv
categories_filepath (str): Path to categories csv
Returns:
Dataframe: Merged data
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = pd.merge(messages,categories,on='id')
return df
def clean_data(df):
"""
Cleans the categories
Parameters:
df (DataFrame): Messy DataFrame
Returns:
Dataframe: Cleaned dataframe
"""
categories = df['categories'].str.split( pat=';', expand=True)
row = categories.iloc[[1]]
category_colnames = row.apply(lambda x : x.values[0].split("-")[0])
categories.columns = category_colnames
for column in categories:
categories[column] = categories[column].astype(str).str[-1:]
categories[column] = categories[column].astype(int)
categories[column] = categories[column].map(lambda x: 1 if x > 1 else x)
df.drop(['categories'], axis=1, inplace=True)
df = df = pd.concat([df,categories], axis=1)
df.drop_duplicates(inplace=True)
return df
def save_data(df, database_filename):
"""
Saves the DataFrame
Parameters:
df (DataFrame): Cleaned DataFrame
database_filename (DataFrame): Path to the SQLite Database
"""
engine = create_engine('sqlite:///' + database_filename + '.db')
df.to_sql(database_filename, engine, index=False, if_exists='replace')
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main() | 3.09375 | 3 |
contrail-controller/files/plugins/check_contrail_status_controller.py | atsgen/tf-charms | 0 | 5378 | #!/usr/bin/env python3
import subprocess
import sys
import json
SERVICES = {
'control': [
'control',
'nodemgr',
'named',
'dns',
],
'config-database': [
'nodemgr',
'zookeeper',
'rabbitmq',
'cassandra',
],
'webui': [
'web',
'job',
],
'config': [
'svc-monitor',
'nodemgr',
'device-manager',
'api',
'schema',
],
}
WARNING = 1
CRITICAL = 2
def get_contrail_status_txt(services):
try:
output = subprocess.check_output("export CONTRAIL_STATUS_CONTAINER_NAME=contrail-status-controller-nrpe ; sudo -E contrail-status", shell=True).decode('UTF-8')
except subprocess.CalledProcessError as err:
message = ('CRITICAL: Could not get contrail-status.'
' return code: {} cmd: {} output: {}'.
format(err.returncode, err.cmd, err.output))
print(message)
sys.exit(CRITICAL)
statuses = dict()
group = None
for line in output.splitlines()[1:]:
words = line.split()
if len(words) == 4 and words[0] == '==' and words[3] == '==':
group = words[2]
continue
if len(words) == 0:
group = None
continue
if group and len(words) >= 2 and group in services:
srv = words[0].split(':')[0]
statuses.setdefault(group, list()).append(
{srv: ' '.join(words[1:])})
return statuses
def get_contrail_status_json(services):
try:
output = json.loads(subprocess.check_output("export CONTRAIL_STATUS_CONTAINER_NAME=contrail-status-controller-nrpe ; sudo -E contrail-status --format json", shell=True).decode('UTF-8'))
except subprocess.CalledProcessError as err:
message = ('CRITICAL: Could not get contrail-status.'
' return code: {} cmd: {} output: {}'.
format(err.returncode, err.cmd, err.output))
print(message)
sys.exit(CRITICAL)
statuses = output["pods"]
return statuses
def check_contrail_status(services, version=None):
if version > 1912:
statuses = get_contrail_status_json(services)
else:
statuses = get_contrail_status_txt(services)
for group in services:
if group not in statuses:
message = ('WARNING: POD {} is absent in the contrail-status'
.format(group))
print(message)
sys.exit(WARNING)
for srv in services[group]:
if not any(srv in key for key in statuses[group]):
message = ('WARNING: {} is absent in the contrail-status'
.format(srv))
print(message)
sys.exit(WARNING)
status = next(stat[srv] for stat in statuses[group] if srv in stat)
if status not in ['active', 'backup']:
message = ('CRITICAL: {} is not ready. Reason: {}'
.format(srv, status))
print(message)
sys.exit(CRITICAL)
print('Contrail status OK')
sys.exit()
if __name__ == '__main__':
cver = sys.argv[1]
if '.' in str(cver):
if cver == '5.0':
version = 500
elif cver == '5.1':
version = 510
else:
print("CRITICAL: invalid version: {}".format(cver))
sys.exit(CRITICAL)
elif not cver.isdigit():
print("CRITICAL: invalid version: {}".format(cver))
sys.exit(CRITICAL)
else:
version = int(cver)
check_contrail_status(SERVICES, version=version)
| 2.53125 | 3 |
leaderboard-server/leaderboard-server.py | harnitsignalfx/skogaming | 1 | 5379 | <filename>leaderboard-server/leaderboard-server.py
from flask import Flask, jsonify, request
from flask_cors import CORS, cross_origin
import simplejson as json
from leaderboard.leaderboard import Leaderboard
import uwsgidecorators
import signalfx
app = Flask(__name__)
app.config['CORS_HEADERS'] = 'Content-Type'
cors = CORS(app)
highscore_lb_starship = Leaderboard('highscores-starship',host='redis-instance')
sfx = signalfx.SignalFx(ingest_endpoint='http://otelcol:9943').ingest('token-at-collector')
def parseData(row):
metricDump1 = {}
counterArray = []
metricDump1["dimensions"] = {}
metricDump1["dimensions"]["ip"] = row["ip"] # dimension
metricDump1["metric"] = "starship.shots"
metricDump1["value"] = row["shots"]
counterArray.append(metricDump1)
print('Sending data:',counterArray)
sfx.send(counters=counterArray)
@app.route('/health')
def health():
return '{"status":"OK"}', 200
@app.route('/leaders/<game>')
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
def returnLeaders(game):
if game == "starship":
return json.dumps(highscore_lb_starship.all_leaders()), 200
return '{}', 200
@app.route('/submitScores', methods=['POST'])
@cross_origin(origin='localhost',headers=['Content-Type','application/json'])
def submitScores():
content = request.get_json(force=True)
print('Content:',content)
if "game" in content:
if content["game"]=="starship":
highscore_lb_starship.rank_member(content["aduser"], content["score"])
return '{"status":"OK"}', 200
@app.route("/get_my_ip", methods=["GET"])
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
def get_my_ip():
if 'X-Real-Ip' in request.headers:
return jsonify({'ip':request.headers['X-Real-Ip']}), 200
else:
return jsonify({'ip':'-'}), 200
#return json.dumps({k:v for k, v in request.headers.items()}), 200
@app.route('/submitShots', methods=['POST'])
@cross_origin(origin='localhost',headers=['Content-Type','application/json'])
def submitShots():
content = request.get_json(force=True)
print('Content:',content)
shotSubmission = {}
totalShots = 0
if "game" in content:
if content["game"]=="starship":
if "shots" in content:
totalShots = content["shots"]
shotSubmission["shots"] = totalShots
if 'X-Real-Ip' in request.headers:
shotSubmission["ip"] = request.headers['X-Real-Ip']
else:
shotSubmission["ip"] = "-"
parseData(shotSubmission)
return '{"status":"OK"}', 200
if __name__ == '__main__':
app.run(host='0.0.0.0', port=6001)
| 2.6875 | 3 |
meshio/_cli/_info.py | jorgensd/meshio | 1 | 5380 | import argparse
import numpy as np
from .._helpers import read, reader_map
from ._helpers import _get_version_text
def info(argv=None):
# Parse command line arguments.
parser = _get_info_parser()
args = parser.parse_args(argv)
# read mesh data
mesh = read(args.infile, file_format=args.input_format)
print(mesh)
# check if the cell arrays are consistent with the points
is_consistent = True
for cells in mesh.cells:
if np.any(cells.data > mesh.points.shape[0]):
print("\nATTENTION: Inconsistent mesh. Cells refer to nonexistent points.")
is_consistent = False
break
# check if there are redundant points
if is_consistent:
point_is_used = np.zeros(mesh.points.shape[0], dtype=bool)
for cells in mesh.cells:
point_is_used[cells.data] = True
if np.any(~point_is_used):
print("ATTENTION: Some points are not part of any cell.")
def _get_info_parser():
parser = argparse.ArgumentParser(
description=("Print mesh info."), formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument("infile", type=str, help="mesh file to be read from")
parser.add_argument(
"--input-format",
"-i",
type=str,
choices=sorted(list(reader_map.keys())),
help="input file format",
default=None,
)
parser.add_argument(
"--version",
"-v",
action="version",
version=_get_version_text(),
help="display version information",
)
return parser
| 2.84375 | 3 |
ccslink/Zip.py | Data-Linkage/ccslink | 0 | 5381 | <reponame>Data-Linkage/ccslink<gh_stars>0
import os, shutil
from CCSLink import Spark_Session as SS
def add_zipped_dependency(zip_from, zip_target):
"""
This method creates a zip of the code to be sent to the executors.
It essentially zips the Python packages installed by PIP and
submits them via addPyFile in the current PySpark context
E.g. if we want to submit "metaphone" package so that we
can do use `import metaphone` and use its methods inside UDF,
we run this method with:
- zip_from = /home/cdsw/.local/lib/python3.6/site-packages/
- zip_target = metaphone
"""
# change this to a path in your project
zipped_fpath = f'/home/cdsw/zipped_packages/{zip_target}'
if os.path.exists(zipped_fpath + '.zip'):
os.remove(zipped_fpath + '.zip')
shutil.make_archive(
# path to the resulting zipped file (without the suffix)
base_name=zipped_fpath, # resulting filename
# specifies the format --> implies .zip suffix
format='zip',
# the root dir from where we want to zip
root_dir=zip_from,
# the dir (relative to root dir) which we want to zip
# (all files in the final zip will have this prefix)
base_dir=zip_target,
)
# add the files to the executors
SS.SPARK().sparkContext.addPyFile(f'{zipped_fpath}.zip')
| 2.640625 | 3 |
moltemplate/nbody_Angles.py | Mopolino8/moltemplate | 0 | 5382 | <reponame>Mopolino8/moltemplate<filename>moltemplate/nbody_Angles.py
try:
from .nbody_graph_search import Ugraph
except (SystemError, ValueError):
# not installed as a package
from nbody_graph_search import Ugraph
# This file defines how 3-body angle interactions are generated by moltemplate
# by default. It can be overridden by supplying your own custom file.
# To find 3-body "angle" interactions, we would use this subgraph:
#
#
# *---*---* => 1st bond connects atoms 0 and 1
# 0 1 2 2nd bond connects atoms 1 and 2
#
bond_pattern = Ugraph([(0, 1), (1, 2)])
# (Ugraph atom indices begin at 0, not 1)
# The next function eliminates the redundancy between 0-1-2 and 2-1-0:
def canonical_order(match):
"""
Before defining a new interaction, we must check to see if an
interaction between these same 3 atoms has already been created
(perhaps listed in a different, but equivalent order).
If we don't check for this this, we will create many unnecessary redundant
interactions (which can slow down he simulation).
To avoid this, I define a "canonical_order" function which sorts the atoms
and bonds in a way which is consistent with the symmetry of the interaction
being generated... Later the re-ordered list of atom and bond ids will be
tested against the list of atom/bond ids in the matches-found-so-far,
before it is added to the list of interactions found so far. Note that
the energy of an angle interaction is a function of the angle between.
three consecutively bonded atoms (referred to here as: 0,1,2).
This angle does not change when swapping the atoms at either end (0 and 2).
So it does not make sense to define a separate 3-body angle
interaction between atoms 0,1,2 AS WELL AS an interaction between 2,1,0.
So we sort the atoms and bonds so that the first atom has a always has
a lower atomID than the third atom. (Later we will check to see if we
have already defined an interaction between these 3 atoms. If not then
we create a new one.)
"""
# match[0][0:2] contains the ID numbers for the 3 atoms in the match
atom0 = match[0][0]
atom1 = match[0][1]
atom2 = match[0][2]
# match[1][0:1] contains the ID numbers for the 2 bonds
bond0 = match[1][0]
bond1 = match[1][1]
if atom0 < atom2:
# return ((atom0, atom1, atom2), (bond0, bond1)) same thing as:
return match
else:
return ((atom2, atom1, atom0), (bond1, bond0))
| 2.5 | 2 |
extras/usd/examples/usdMakeFileVariantModelAsset/usdMakeFileVariantModelAsset.py | DougRogers-DigitalFish/USD | 3,680 | 5383 | #!/pxrpythonsubst
#
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
'''
Creates a top-level, referenceable asset USD file from one or more
'variant' files, each of which can contain arbitrary scene description.
When supplying multiple files, one must also provide the name for a
variantSet that will be constructed to switch between the files.
The asset file will place the variant files behind a "payload", which will
enable consumers to defer loading and processing of the data when composed
onto a UsdStage.
The names of the created variations will be taken directly from the basename
of their corresponding input file.
'''
from __future__ import print_function
from pxr import Tf, Kind, Sdf, Usd
# ToDo:
# - handle multiple variantSets
# - layer multiple kinds of files (e.g. shading.usd over geom.usd)
# - allow output filename to be independently specifiable? (Breaks with Pixar
# convention)
# - allow variant names to be specified independently of variant file names
# - Compute and present (per-variant) UsdGeomModelAPI.extentsHint
# - Compute and author UsdModelAPI::SetPayloadAssetDependencies()
def CreateModelStage(assetName,
assetIdentifier=None,
kind=Kind.Tokens.component,
filesToReference=None,
variantSetName=None,
defaultVariantSelection=None):
# Preconditions....
if not Tf.IsValidIdentifier(assetName):
print("assetName '%s' must be a valid identifier. Aborting." %
assetName)
return None
if variantSetName and not Tf.IsValidIdentifier(variantSetName):
print("variantSetName '%s' must be a valid identifier. Aborting." %
variantSetName)
return None
if filesToReference and len(filesToReference) > 1 and not variantSetName:
# For now, we only allow multiple files to reference if we're switching
# them with a variantSet. We can relax this restriction when we can
# make internal payload arcs (bug #119960)
print("Cannot create multiple-file-reference without a variantSet. Aborting")
return None
if not Kind.Registry.IsA(kind, Kind.Tokens.model):
print("kind '%s' is not a valid model kind, which must be one of:" %
kind)
print(Kind.Registry.GetAllKinds())
return None
# Create the root file for the stage, and make it ASCII text.
# We need some nicer sugar for this.
fileName = assetName + ".usd"
rootLayer = Sdf.Layer.CreateNew(fileName, args = {'format':'usda'})
stage = Usd.Stage.Open(rootLayer)
# Name the root prim after the asset. Don't give it a type, since we
# want that to come from referenced files. Make it be the "default prim"
# so that we can reference the resulting file without specifiying a
# prim path
rootPath = Sdf.Path.absoluteRootPath
modelRootPrim = stage.DefinePrim(rootPath.AppendChild(assetName))
stage.SetDefaultPrim(modelRootPrim)
modelAPI = Usd.ModelAPI(modelRootPrim)
modelAPI.SetKind(kind)
# See http://openusd.org/docs/api/class_usd_model_a_p_i.html#details
# for more on assetInfo
modelAPI.SetAssetName(assetName)
modelAPI.SetAssetIdentifier(assetIdentifier or fileName)
# Add a class named after the asset, and make the asset inherit from it.
# This is not necessary for a valid asset, and the class-naming is a Pixar
# convention. But always having a class associated with each asset is
# extremely useful for non-destructively editing many referenced or
# instanced assets of the same type.
classPrim = stage.CreateClassPrim(rootPath.AppendChild("_class_"+assetName))
modelRootPrim.GetInherits().AddInherit(classPrim.GetPath())
if not filesToReference:
# weird edge case... we're done
return stage
elif len(filesToReference) == 1 and not variantSetName:
# The other, more plausible edge case: we're just wrapping
# some other file (e.g. alembic) in order to give it a payload
# and other proper USD trappings - no variants
modelRootPrim.GetPayloads().AddPayload(Sdf.Payload(filesToReference[0]))
return stage
# OK, we're making a variantSet, and we are going to vary the payload
# in each variant
varSet = modelRootPrim.GetVariantSet(variantSetName)
for variantFile in filesToReference:
import os
variantName = os.path.splitext(os.path.basename(variantFile))[0]
# If we didn't specify a default selection, choose the first one
if not defaultVariantSelection:
defaultVariantSelection = variantName
varSet.AddVariant(variantName)
varSet.SetVariantSelection(variantName)
# The context object makes all edits "go inside" the variant we
# just created.
with varSet.GetVariantEditContext():
modelRootPrim.GetPayloads().AddPayload(Sdf.Payload(variantFile))
# Now put the variantSet into the state we want it to be in by default
varSet.SetVariantSelection(defaultVariantSelection)
return stage
if __name__ == "__main__":
import argparse, os, sys
descr = __doc__.strip()
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),
description=descr)
parser.add_argument('assetName')
parser.add_argument('variantFiles', nargs='+')
parser.add_argument(
'-k', '--kind', default='component', action='store', metavar='kind',
help="Model kind, one of: component, group, or assembly")
parser.add_argument(
'-v', '--variantSet', default='', action='store', metavar='variantSet',
help="Variantset to create to modulate variantFiles. Can be elided "
"if only one file is supplied")
parser.add_argument(
'-i', '--identifier', default='', action='store', metavar='identifier',
help="The identifier you would expect your Ar asset-resolver plugin "
"to resolve to the (installed) assetName.usd file this script creates. "
" If unspecified, defaults to assetName.usd")
parser.add_argument(
'-d', '--defaultVariantSelection', default='', action='store',
metavar='defaultVariantSelection',
help="This variant will be selected by default when the asset is "
"added to a composition. If unspecified, will be the variant for "
"'variantFile1'")
args = parser.parse_args()
if not args.assetName or args.assetName == '':
parser.error("No assetName specified")
stage = CreateModelStage(args.assetName,
assetIdentifier=args.identifier,
kind=args.kind,
filesToReference=args.variantFiles,
variantSetName=args.variantSet,
defaultVariantSelection=args.defaultVariantSelection)
if stage:
stage.GetRootLayer().Save()
exit(0)
else:
exit(1)
| 1.664063 | 2 |
src/main.py | fbdp1202/pyukf_kinect_body_tracking | 7 | 5384 | <gh_stars>1-10
import sys
import os
sys.path.append('./code/')
from skeleton import Skeleton
from read_data import *
from calibration import Calibration
from ukf_filter import ukf_Filter_Controler
from canvas import Canvas
from regression import *
import time
from functools import wraps
import os
def check_time(function):
@wraps(function)
def measure(*args, **kwargs):
start_time = time.time()
result = function(*args, **kwargs)
end_time = time.time()
print(f"@check_time: {function.__name__} took {end_time - start_time}")
return result
return measure
def get_dir_name(dir):
dir_list = []
for name in os.listdir(dir):
path = dir + '/' + name
if not os.path.isfile(path):
dir_list.append(name)
return dir_list
def scan_dir(dir):
dir_list = []
for name in os.listdir(dir):
path = dir + '/' + name
if os.path.isfile(path):
dir_list.append(path)
return dir_list
@check_time
def merge_skeleton_data(folder_name):
save_file_name = folder_name + '.txt'
dir_list = scan_dir(folder_name)
wf = open(save_file_name, 'w')
for filename in dir_list:
f = open(filename, 'r')
line = f.readline()
wf.write(line)
wf.close()
return save_file_name
@check_time
def init_simul(filename, test_num, cbr_num=50, div_step=1):
data = read_data_skeleton(filename)
# test_num, data = interval_compasation(data, test_num, div_step)
test_num = min(test_num, len(data))
skeletons = []
for i in range(test_num):
skeletons.append(Skeleton(data[i]))
cbr_num = min(test_num, cbr_num)
cal_skeletons = []
for i in range(cbr_num):
cal_skeletons.append(skeletons[i*div_step])
calibration = Calibration(cal_skeletons)
lower_init_mean, upper_init_mean = calibration.get_init_mean(0, filename)
return skeletons, lower_init_mean, upper_init_mean, test_num
@check_time
def make_filter(lower_init_mean, lower_init_cov, upper_init_mean, upper_init_cov, model):
flt = None
if model == 'ukf':
flt = ukf_Filter_Controler(lower_init_mean, lower_init_cov, upper_init_mean, upper_init_cov)
else:
print(model, "is not exist model name")
return flt
@check_time
def run_ukf(ukf, skeletons, test_num):
original_data = []
estimate_data = []
estimate_state = []
test_num = min(len(skeletons), test_num)
print("total test is {}".format(test_num))
print("test_num:", end=' ')
for i in range(test_num):
curr_input = skeletons[i].get_measurement()
original_data.append(curr_input)
state, data = ukf.update(curr_input)
estimate_data.append(data)
estimate_state.append(state)
if i % 10 == 0:
print(i, end=' ')
print('')
return original_data, estimate_data, estimate_state
def make_folder(folder_name):
if not os.path.isdir(folder_name):
os.mkdir(folder_name)
return folder_name
def get_save_skeleton_data_folder_name(person_name, pos_mode, model):
folder_name = make_folder('result')
folder_name = make_folder(folder_name + '/' + person_name)
folder_name = make_folder(folder_name + '/' + pos_mode)
folder_name = make_folder(folder_name + '/' + model)
return folder_name + '/'
def save_sk_data_to_csv(folder_name, filename, data):
filename = folder_name + filename
f = open(filename, "w", encoding="UTF-8")
for i in range(len(data)):
for j in range(len(data[i])):
for k in range(3):
f.write(str(data[i][j][k]))
if j == (len(data[i])-1) and k == 2:
f.write('\n')
else:
f.write(',')
def save_sk_state_to_csv(folder_name, filename, data):
filename = folder_name + filename
f = open(filename, 'w', encoding="UTF-8")
for i in range(len(data)):
for j in range(len(data[i])):
f.write(str(data[i][j]))
if j == (len(data[i])-1):
f.write('\n')
else:
f.write(',')
@check_time
def save_skeleton_data_to_csv(person_name, pos_mode, original_data, estimate_data, estimate_state, model):
csv_folder_name = get_save_skeleton_data_folder_name(person_name, pos_mode, model)
save_sk_data_to_csv(csv_folder_name, 'original_data.csv', original_data)
save_sk_data_to_csv(csv_folder_name, 'estimate_data.csv', estimate_data)
save_sk_state_to_csv(csv_folder_name, 'estimate_state.csv', estimate_state)
def read_csv(filename):
data = []
with open(filename, 'r') as reader:
for line in reader:
fields = line.split(',')
fields[len(fields)-1] = fields[len(fields)-1].replace('\n', '')
for i in range(len(fields)):
data.append(float(fields[i]))
data = np.array(data).reshape((int)(len(data)/32/3), 32, 3)
skeletons = []
for d in data:
skeletons.append(Skeleton(d))
return skeletons
@check_time
def read_skeleton_data_from_csv(person_name, pos_mode, model):
csv_folder_name = get_save_skeleton_data_folder_name(person_name, pos_mode, model)
original_data = read_csv(csv_folder_name + 'original_data.csv')
estimate_data = read_csv(csv_folder_name + 'estimate_data.csv')
return original_data, estimate_data
def get_save_image_file_name(person_name, pos_mode, model, plot_mode):
folder_name = make_folder('result')
folder_name = make_folder(folder_name + '/' + person_name)
folder_name = make_folder(folder_name + '/' + pos_mode)
folder_name = make_folder(folder_name + '/' + model)
folder_name = make_folder(folder_name + '/' + plot_mode)
return folder_name + '/'
@check_time
def skeleton_draw(person_name, pos_mode, model, original_data, estimate_data, sleep_t=100):
canvas = Canvas()
img_name_point = get_save_image_file_name(person_name, pos_mode, model, 'point')
img_name_length = get_save_image_file_name(person_name, pos_mode, model, 'length')
img_name_3D = get_save_image_file_name(person_name, pos_mode, model, 'plot_3D')
# canvas.skeleton_3D_plot(original_data, estimate_data)
canvas.skeleton_3D_animation_save(original_data, estimate_data, sleep_t, img_name_3D)
canvas.skeleton_point_plot(original_data, estimate_data, img_name_point)
canvas.skeleton_length_plot(original_data, estimate_data, img_name_length)
def set_lower_init_cov(value_cov=1e-6, velo_cov_0=1e-4, velo_cov_1=1e-2, len_cov=1e-10, obs_cov_factor=1e-4, trans_factor=100):
return [value_cov, velo_cov_0,value_cov, velo_cov_0,value_cov, velo_cov_1,value_cov, velo_cov_1,value_cov, velo_cov_0, len_cov,obs_cov_factor, trans_factor]
def set_upper_init_cov(value_cov=1e-6, velo_cov=1e-4, len_cov=1e-10, obs_cov_factor=1e-4, trans_factor=100):
return [value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,len_cov,obs_cov_factor,trans_factor]
@check_time
def simulation_ukf(filename, test_num, cbr_num, model):
skeletons, lower_init_mean, upper_init_mean, test_num = init_simul(filename, test_num, cbr_num)
lower_init_cov = set_lower_init_cov()
upper_init_cov = set_upper_init_cov()
flt = make_filter(lower_init_mean, lower_init_cov, upper_init_mean, upper_init_cov, model)
original_data, estimate_data, estimate_state = run_ukf(flt, skeletons, test_num)
return original_data, estimate_data, estimate_state
| 2.15625 | 2 |
cfgov/scripts/initial_data.py | Mario-Kart-Felix/cfgov-refresh | 1 | 5385 | from __future__ import print_function
import json
import os
from django.conf import settings
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User
from wagtail.wagtailcore.models import Page, Site
from v1.models import HomePage, BrowseFilterablePage
def run():
print('Running script \'scripts.initial_data\' ...')
admin_user = None
site_root = None
events = None
admin_user = User.objects.filter(username='admin')
if not admin_user:
admin_user = User(username='admin',
password=make_password(os.environ.get('WAGTAIL_ADMIN_PW')),
is_superuser=True, is_active=True, is_staff=True)
admin_user.save()
else:
admin_user = admin_user[0]
# Creates a new site root `CFGov`
site_root = HomePage.objects.filter(title='CFGOV')
if not site_root:
root = Page.objects.first()
site_root = HomePage(title='CFGOV', slug='home-page', depth=2, owner=admin_user)
site_root.live = True
root.add_child(instance=site_root)
latest = site_root.save_revision(user=admin_user, submitted_for_moderation=False)
latest.save()
else:
site_root = site_root[0]
# Setting new site root
if not Site.objects.filter(hostname='content.localhost').exists():
site = Site.objects.first()
site.port = 8000
site.root_page_id = site_root.id
site.save()
content_site = Site(hostname='content.localhost', port=8000, root_page_id=site_root.id)
content_site.save()
# Clean Up
old_site_root = Page.objects.filter(id=2)[0]
if old_site_root:
old_site_root.delete()
# Events Browse Page required for event `import-data` command
if not BrowseFilterablePage.objects.filter(title='Events').exists():
events = BrowseFilterablePage(title='Events', slug='events', owner=admin_user)
site_root.add_child(instance=events)
revision = events.save_revision(
user=admin_user,
submitted_for_moderation=False,
)
revision.publish()
# Archived Events Browse Filterable Page
if not BrowseFilterablePage.objects.filter(title='Archive').exists():
archived_events = BrowseFilterablePage(title='Archive', slug='archive', owner=admin_user)
if not events:
events = BrowseFilterablePage.objects.get(title='Events')
events.add_child(instance=archived_events)
revision = archived_events.save_revision(
user=admin_user,
submitted_for_moderation=False,
)
revision.publish()
| 1.9375 | 2 |
Scripts/compareOutputs.py | harmim/vut-avs-project1 | 0 | 5386 | <filename>Scripts/compareOutputs.py<gh_stars>0
# Simple python3 script to compare output with a reference output.
# Usage: python3 compareOutputs.py testOutput.h5 testRefOutput.h5
import sys
import h5py
import numpy as np
if len(sys.argv) != 3:
print("Expected two arguments. Output and reference output file.")
sys.exit(1)
filename = sys.argv[1]
ref_filename = sys.argv[2]
f = h5py.File(filename, 'r')
ref_f = h5py.File(ref_filename, 'r')
out = np.array(f['output_data'])
out_ref = np.array(ref_f['output_data'])
if out.shape != out_ref.shape:
print("The files do not contain the same number of outputs.")
print("The output size: {0}.".format(out.shape[0]))
print("The reference size: {0}.".format(out_ref.shape[0]))
sys.exit(1)
ref_value = np.copy(out_ref)
ref_value[ref_value == 0.0] = 1.0
error = (out_ref - out) / ref_value
maximal_error = np.amax(error)
print("Maximal error between the output and the reference is {0}.".format(maximal_error))
if maximal_error < 10**(-6):
print("OK:Output seems to match the reference.")
sys.exit(0)
print("Failure:Output does not match the reference.")
maximal_error = np.amax(error, axis=1)
print(maximal_error.shape)
for i in range(0, 5):
print("Image", i)
print("Expected:", end="")
for j in range(0, 10):
print(out_ref[i, j], end = " ")
print("\nGot:", end="")
for j in range(0, 10):
print(out[i, j], end=" ")
print("\nMaximal error:", maximal_error[i], "\n")
sys.exit(1)
| 3.140625 | 3 |
sanctuary/tag/serializers.py | 20CM/Sanctuary | 1 | 5387 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from rest_framework import serializers
from .models import Tag
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
read_only_fields = ('topics_count',)
| 1.546875 | 2 |
examples/management_api/aliveness_test.py | cloudamqp/amqpstorm | 0 | 5388 | <gh_stars>0
from amqpstorm.management import ApiConnectionError
from amqpstorm.management import ApiError
from amqpstorm.management import ManagementApi
if __name__ == '__main__':
API = ManagementApi('http://127.0.0.1:15672', 'guest', 'guest')
try:
result = API.aliveness_test('/')
if result['status'] == 'ok':
print("RabbitMQ is alive!")
else:
print("RabbitMQ is not alive! :(")
except ApiConnectionError as why:
print('Connection Error: %s' % why)
except ApiError as why:
print('ApiError: %s' % why)
| 2.671875 | 3 |
src/zvt/recorders/em/meta/em_stockhk_meta_recorder.py | vishalbelsare/zvt | 2,032 | 5389 | # -*- coding: utf-8 -*-
from zvt.contract.api import df_to_db
from zvt.contract.recorder import Recorder
from zvt.domain.meta.stockhk_meta import Stockhk
from zvt.recorders.em import em_api
class EMStockhkRecorder(Recorder):
provider = "em"
data_schema = Stockhk
def run(self):
df_south = em_api.get_tradable_list(entity_type="stockhk", hk_south=True)
df_south = df_south.set_index("code", drop=False)
df_south["south"] = True
df = em_api.get_tradable_list(entity_type="stockhk")
df = df.set_index("code", drop=False)
df_other = df.loc[~df.index.isin(df_south.index)].copy()
df_other["south"] = False
df_to_db(df=df_south, data_schema=self.data_schema, provider=self.provider, force_update=self.force_update)
df_to_db(df=df_other, data_schema=self.data_schema, provider=self.provider, force_update=self.force_update)
if __name__ == "__main__":
recorder = EMStockhkRecorder()
recorder.run()
# the __all__ is generated
__all__ = ["EMStockhkRecorder"]
| 1.9375 | 2 |
src/main.py | yanwunhao/auto-mshts | 0 | 5390 | <reponame>yanwunhao/auto-mshts
from util.io import read_setting_json, read_0h_data, read_24h_data, draw_single_curve
from util.convert import split_array_into_samples, calculate_avg_of_sample, convert_to_percentage
from util.calculus import calculate_summary_of_sample, fit_sigmoid_curve
import matplotlib.pyplot as plt
import numpy as np
import csv
setting = read_setting_json()
setting = setting["rule"]
# load experiment parameter
# experiment parameter is stored in file of ./data/setting.json
initial_filename = setting["0h_datafile"]
final_filename = setting["24h_datafile"]
# sample width and height are the size of each sample area
sample_width = setting["sample_width"]
sample_height = setting["sample_height"]
dilution_protocol = setting["dilution_protocol"]
# width of each dilution
basic_width = setting["basic_width"]
# number of each control group
control_number_list = setting["control_number"]
# output directory
output_directory = setting["output_directory"]
# import initial concentration and calculate x_data
initial_concentration = setting["initial_concentration"]
repeat_times = int(sample_width / basic_width)
x_data = []
current_concentration = initial_concentration
for i in range(repeat_times):
x_data.append(current_concentration)
current_concentration /= dilution_protocol
# load raw data
initial_sd_data = read_0h_data()
final_sd_data = read_24h_data()
# reshape data into the size of board
rebuild_0h_data = initial_sd_data.reshape((32, -1))
rebuild_24h_data = final_sd_data.reshape((32, -1))
# reshape data into a 2-dimensional array contains each group data
sample_divided_list_0h = split_array_into_samples(rebuild_0h_data, sample_width, sample_height)
sample_divided_list_24h = split_array_into_samples(rebuild_24h_data, sample_width, sample_height)
# handle data of control groups
control_0h_summary = 0
for number in control_number_list:
number = number - 1
sample = sample_divided_list_0h[number]
control_0h_summary = control_0h_summary + calculate_summary_of_sample(sample)
control_0h_average = control_0h_summary / (sample_width * sample_height * len(control_number_list))
control_24h_summary = 0
for number in control_number_list:
number = number - 1
sample = sample_divided_list_24h[number]
control_24h_summary = control_24h_summary + calculate_summary_of_sample(sample)
control_24h_average = control_24h_summary / (sample_width * sample_height * len(control_number_list))
# calculate standard deviation of each grid
sd_matrix = []
for line in rebuild_24h_data:
new_line = []
for element in line:
sd_data = (float(element) - control_0h_average.item()) \
/ (control_24h_average.item() - control_0h_average.item())
new_line.append(sd_data)
sd_matrix.append(new_line)
sd_matrix = np.array(sd_matrix)
# split array into different samples
sd_groups = split_array_into_samples(sd_matrix, sample_width, sample_height)
sd_groups = np.array(sd_groups, dtype=float)
RESULT_LIST = []
for sample in sd_groups:
result = calculate_avg_of_sample(sample, sample_width, basic_width)
RESULT_LIST.append(result)
RESULT_LIST = np.array(RESULT_LIST)
FULL_RESULT_LIST = []
for group in sd_groups:
x_index = 0
y_index = 0
sample_buffer = []
data_buffer = []
while y_index < sample_height:
while x_index < basic_width:
x = x_index
while x < sample_width:
data_buffer.append(group[y_index][x])
x += basic_width
sample_buffer.append(data_buffer)
data_buffer = []
x_index += 1
y_index += 1
x_index = 0
FULL_RESULT_LIST.append(sample_buffer)
FULL_RESULT_LIST = np.array(FULL_RESULT_LIST, dtype=float)
optional_color = ['red', 'orange', 'yellow', 'green', 'cyan', 'blue', 'purple']
EC50_LIST = []
EC50_AVG_LIST = []
sample_num = 0
for SAMPLE in FULL_RESULT_LIST:
sample_num += 1
fig, ax = plt.subplots()
index = 0
ax.set_title('Sample '+str(sample_num))
x_buffer = []
x_sampling_buffer = []
y_sampling_buffer = []
for repeat in SAMPLE:
x, y, x_sampling, y_sampling = fit_sigmoid_curve(x_data, repeat)
x_buffer.append(x)
x_sampling_buffer.append(x_sampling)
y_sampling_buffer.append(y_sampling)
draw_single_curve(ax, x, y, x_sampling, y_sampling, optional_color[index])
index += 1
EC50_LIST.append(x_buffer)
# draw the average result
avg = np.mean(x_buffer)
EC50_AVG_LIST.append(avg)
# draw the average curve
x_sampling_buffer = np.array(x_sampling_buffer).T
y_sampling_buffer = np.array(y_sampling_buffer).T
x_sampling_avg = []
y_sampling_avg = []
for line in x_sampling_buffer:
x_sampling_avg.append(np.mean(line))
for line in y_sampling_buffer:
y_sampling_avg.append(np.mean(line))
ax.plot(avg, 0.5, 'o', color='black')
ax.plot(x_sampling_avg, y_sampling_avg, color='black')
plt.savefig("./output/" + output_directory + "/figs" + "/Sample " + str(sample_num))
plt.cla()
plt.close(fig)
# output grouped result
output_f_grouped = open("./output/" + output_directory + "/result_grouped.csv", "w")
csv_writer_grouped = csv.writer(output_f_grouped)
csv_writer_grouped.writerow(["initial concentration: " + str(initial_concentration), "dilution protocol: " + str(dilution_protocol)])
csv_writer_grouped.writerow("")
sample_num = 0
for SAMPLE in FULL_RESULT_LIST:
SAMPLE = SAMPLE.T
sample_num += 1
csv_writer_grouped.writerow(["Sample " + str(sample_num)])
for repeat in SAMPLE:
csv_writer_grouped.writerow(repeat)
csv_writer_grouped.writerow("")
ec50_result_list = []
for ec50_index in EC50_LIST[sample_num-1]:
ec50_result_list.append(10**ec50_index)
csv_writer_grouped.writerow(ec50_result_list)
average_ec50 = np.power(10, EC50_AVG_LIST[sample_num-1])
csv_writer_grouped.writerow([])
csv_writer_grouped.writerow(["Average EC50", "Std"])
csv_writer_grouped.writerow([average_ec50, np.std(ec50_result_list)])
csv_writer_grouped.writerow("")
output_f_grouped.close()
output_f_full = open("./output/" + output_directory + "/result_full.csv", "w")
csv_writer_full = csv.writer(output_f_full)
for line in sd_matrix:
csv_writer_full.writerow(line)
output_f_full.close()
print("Finished")
| 2.6875 | 3 |
twisted/names/root.py | twonds/twisted | 1 | 5391 | # -*- test-case-name: twisted.names.test.test_rootresolve -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Resolver implementation for querying successive authoritative servers to
lookup a record, starting from the root nameservers.
@author: <NAME>
todo::
robustify it
break discoverAuthority into several smaller functions
documentation
"""
from twisted.internet import defer
from twisted.names import dns
from twisted.names import common
def retry(t, p, *args):
assert t, "Timeout is required"
t = list(t)
def errback(failure):
failure.trap(defer.TimeoutError)
if not t:
return failure
return p.query(timeout=t.pop(0), *args
).addErrback(errback
)
return p.query(timeout=t.pop(0), *args
).addErrback(errback
)
class _DummyController:
def messageReceived(self, *args):
pass
class Resolver(common.ResolverBase):
def __init__(self, hints):
common.ResolverBase.__init__(self)
self.hints = hints
def _lookup(self, name, cls, type, timeout):
d = discoverAuthority(name, self.hints
).addCallback(self.discoveredAuthority, name, cls, type, timeout
)
return d
def discoveredAuthority(self, auth, name, cls, type, timeout):
from twisted.names import client
q = dns.Query(name, type, cls)
r = client.Resolver(servers=[(auth, dns.PORT)])
d = r.queryUDP([q], timeout)
d.addCallback(r.filterAnswers)
return d
def lookupNameservers(host, atServer, p=None):
# print 'Nameserver lookup for', host, 'at', atServer, 'with', p
if p is None:
p = dns.DNSDatagramProtocol(_DummyController())
p.noisy = False
return retry(
(1, 3, 11, 45), # Timeouts
p, # Protocol instance
(atServer, dns.PORT), # Server to query
[dns.Query(host, dns.NS, dns.IN)] # Question to ask
)
def lookupAddress(host, atServer, p=None):
# print 'Address lookup for', host, 'at', atServer, 'with', p
if p is None:
p = dns.DNSDatagramProtocol(_DummyController())
p.noisy = False
return retry(
(1, 3, 11, 45), # Timeouts
p, # Protocol instance
(atServer, dns.PORT), # Server to query
[dns.Query(host, dns.A, dns.IN)] # Question to ask
)
def extractAuthority(msg, cache):
records = msg.answers + msg.authority + msg.additional
nameservers = [r for r in records if r.type == dns.NS]
# print 'Records for', soFar, ':', records
# print 'NS for', soFar, ':', nameservers
if not nameservers:
return None, nameservers
if not records:
raise IOError("No records")
for r in records:
if r.type == dns.A:
cache[str(r.name)] = r.payload.dottedQuad()
for r in records:
if r.type == dns.NS:
if str(r.payload.name) in cache:
return cache[str(r.payload.name)], nameservers
for addr in records:
if addr.type == dns.A and addr.name == r.name:
return addr.payload.dottedQuad(), nameservers
return None, nameservers
def discoverAuthority(host, roots, cache=None, p=None):
if cache is None:
cache = {}
rootAuths = list(roots)
parts = host.rstrip('.').split('.')
parts.reverse()
authority = rootAuths.pop()
soFar = ''
for part in parts:
soFar = part + '.' + soFar
# print '///////', soFar, authority, p
msg = defer.waitForDeferred(lookupNameservers(soFar, authority, p))
yield msg
msg = msg.getResult()
newAuth, nameservers = extractAuthority(msg, cache)
if newAuth is not None:
# print "newAuth is not None"
authority = newAuth
else:
if nameservers:
r = str(nameservers[0].payload.name)
# print 'Recursively discovering authority for', r
authority = defer.waitForDeferred(discoverAuthority(r, roots, cache, p))
yield authority
authority = authority.getResult()
# print 'Discovered to be', authority, 'for', r
## else:
## # print 'Doing address lookup for', soFar, 'at', authority
## msg = defer.waitForDeferred(lookupAddress(soFar, authority, p))
## yield msg
## msg = msg.getResult()
## records = msg.answers + msg.authority + msg.additional
## addresses = [r for r in records if r.type == dns.A]
## if addresses:
## authority = addresses[0].payload.dottedQuad()
## else:
## raise IOError("Resolution error")
# print "Yielding authority", authority
yield authority
discoverAuthority = defer.deferredGenerator(discoverAuthority)
def makePlaceholder(deferred, name):
def placeholder(*args, **kw):
deferred.addCallback(lambda r: getattr(r, name)(*args, **kw))
return deferred
return placeholder
class DeferredResolver:
def __init__(self, resolverDeferred):
self.waiting = []
resolverDeferred.addCallback(self.gotRealResolver)
def gotRealResolver(self, resolver):
w = self.waiting
self.__dict__ = resolver.__dict__
self.__class__ = resolver.__class__
for d in w:
d.callback(resolver)
def __getattr__(self, name):
if name.startswith('lookup') or name in ('getHostByName', 'query'):
self.waiting.append(defer.Deferred())
return makePlaceholder(self.waiting[-1], name)
raise AttributeError(name)
def bootstrap(resolver):
"""Lookup the root nameserver addresses using the given resolver
Return a Resolver which will eventually become a C{root.Resolver}
instance that has references to all the root servers that we were able
to look up.
"""
domains = [chr(ord('a') + i) for i in range(13)]
# f = lambda r: (log.msg('Root server address: ' + str(r)), r)[1]
f = lambda r: r
L = [resolver.getHostByName('%s.root-servers.net' % d).addCallback(f) for d in domains]
d = defer.DeferredList(L)
d.addCallback(lambda r: Resolver([e[1] for e in r if e[0]]))
return DeferredResolver(d)
| 2.59375 | 3 |
tools/apply_colormap_dir.py | edwardyehuang/iDS | 0 | 5392 | # ================================================================
# MIT License
# Copyright (c) 2021 edwardyehuang (https://github.com/edwardyehuang)
# ================================================================
import os, sys
rootpath = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
sys.path.insert(1, rootpath)
import tensorflow as tf
import numpy as np
from PIL import Image
from absl import app
from absl import flags
from common_flags import FLAGS
from ids.voc2012 import get_colormap as get_voc2012_colormap
from ids.cityscapes_fine import get_colormap as get_cityscapes_colormap
flags.DEFINE_string("input_dir", None, "input dir path")
flags.DEFINE_string("output_dir", None, "output dir path")
flags.DEFINE_string("colormap", "voc2012", "colormap name")
flags.DEFINE_integer("ignore_label", 255, "ignore label")
def apply_colormap_to_dir(input_dir, output_dir=None, colormap=None):
colormap = colormap.astype(np.uint8)
counter = 0
if not os.path.exists(output_dir):
os.mkdir(output_dir)
for filename in tf.io.gfile.listdir(input_dir):
input_path = os.path.join(input_dir, filename)
output_path = os.path.join(output_dir, filename)
img = Image.open(input_path)
if img.mode != "L" and img.mode != "P":
continue
img = img.convert("P")
img.putpalette(colormap)
img.save(output_path, format="PNG")
counter += 1
tf.print("Processed {}".format(counter))
def main(argv):
colormap_name = FLAGS.colormap
colormap_name = colormap_name.lower()
if colormap_name == "voc2012":
colormap = get_voc2012_colormap()
elif colormap_name == "cityscapes":
colormap = get_cityscapes_colormap()
else:
raise ValueError(f"Not support colormap = {colormap_name}")
if FLAGS.ignore_label == 0:
colormap = colormap[1:]
apply_colormap_to_dir(FLAGS.input_dir, FLAGS.output_dir, colormap=colormap)
if __name__ == "__main__":
app.run(main)
| 2.125 | 2 |
kairon/shared/sso/base.py | rit1200/kairon | 9 | 5393 | <reponame>rit1200/kairon<gh_stars>1-10
class BaseSSO:
async def get_redirect_url(self):
"""Returns redirect url for facebook."""
raise NotImplementedError("Provider not implemented")
async def verify(self, request):
"""
Fetches user details using code received in the request.
:param request: starlette request object
"""
raise NotImplementedError("Provider not implemented")
| 2.265625 | 2 |
EDScoutCore/JournalInterface.py | bal6765/ed-scout | 0 | 5394 | <gh_stars>0
from inspect import signature
import json
import time
import os
import glob
import logging
from pathlib import Path
from watchdog.observers import Observer
from watchdog.observers.polling import PollingObserver
from watchdog.events import PatternMatchingEventHandler
from EDScoutCore.FileSystemUpdatePrompter import FileSystemUpdatePrompter
default_journal_path = os.path.join(str(Path.home()), "Saved Games\\Frontier Developments\\Elite Dangerous")
journal_file_pattern = "journal.*.log"
logger = logging.getLogger('JournalInterface')
class JournalChangeIdentifier:
def __init__(self, journal_path=default_journal_path):
pass
self.journals = {}
self.journal_path = journal_path
logger.debug(f"watching for journal changes in {self.journal_path}")
self._init_journal_lists()
self._new_journal_entry_callback = None
self.latest_journal = self.identify_latest_journal()
# Prompter is required to force the file system to do updates on some systems so we get regular updates from the
# journal watcher.
self.prompter = FileSystemUpdatePrompter(self.latest_journal)
def identify_latest_journal(self):
if len(self.journals.keys()) == 0:
return None
keys = sorted(self.journals.keys())
return keys[-1]
def process_journal_change(self, changed_file):
if changed_file != self.latest_journal:
self.latest_journal = changed_file
self.prompter.set_watch_file(self.latest_journal)
new_size = os.stat(changed_file).st_size
new_data = None
# If the game was loaded after the scout it will start a new journal which we need to treat as unscanned.
if changed_file not in self.journals:
self.journals[changed_file] = 0
logger.debug(f'{changed_file} - Size change: {self.journals[changed_file]} to {new_size}')
if new_size > 0: # Don't try and read it if this is the first notification (we seem to get two; one from the file being cleared).
# Check how much it has grown and read the excess
size_diff = new_size - self.journals[changed_file]
if size_diff > 0:
with open(changed_file, 'rb') as f:
f.seek(-size_diff, os.SEEK_END) # Note minus sign
new_data = f.read()
entries = []
if new_data:
new_journal_lines = JournalChangeIdentifier.binary_file_data_to_lines(new_data)
try:
for line in new_journal_lines:
logger.debug(f'New journal entry detected: {line}')
entry = json.loads(line)
entry['type'] = "JournalEntry" # Add an identifier that's common to everything we shove down the outgoing pipe so the receiver can distiguish.
entries.append(entry)
logger.debug(f'Found {len(entries)} new entries')
for entry in entries:
yield entry
self.journals[changed_file] = new_size
except json.decoder.JSONDecodeError as e:
logger.exception(e)
@staticmethod
def binary_file_data_to_lines(binary_data):
as_ascii = binary_data.decode('UTF-8')
all_lines = as_ascii.split("\r\n")
all_lines.pop() # Drop the last empty line
return all_lines
def _init_journal_lists(self):
journal_files = glob.glob(os.path.join(self.journal_path, journal_file_pattern))
for journal_file in journal_files:
self.journals[journal_file] = os.stat(journal_file).st_size
class JournalWatcher:
def __init__(self, path=default_journal_path, force_polling=False):
self.path = path
self.force_polling = force_polling
self._configure_watchers()
def set_callback(self, on_journal_change):
self.event_handler.set_callback(on_journal_change)
def stop(self):
self.observer.stop()
self.observer.join()
class _EntriesChangeHandler(PatternMatchingEventHandler):
def __init__(self):
super(JournalWatcher._EntriesChangeHandler, self).__init__(
patterns=['*Journal*.log'],
ignore_patterns=[],
ignore_directories=True)
self.on_journal_change = None
def set_callback(self, on_new_journal_entry):
self.on_journal_change = on_new_journal_entry
def on_modified(self, event):
changed_file = str(event.src_path)
logger.debug("Journal change: " + changed_file)
self.on_journal_change(changed_file)
def on_created(self, event):
file = str(event.src_path)
logger.debug("Journal created: " + file)
def on_deleted(self, event):
file = str(event.src_path)
logger.debug("Journal deleted: " + file)
def on_moved(self, event):
file = str(event.src_path)
logger.debug("Journal moved: " + file)
def _configure_watchers(self):
self.event_handler = JournalWatcher._EntriesChangeHandler()
if self.force_polling:
self.observer = PollingObserver(0.25) # Poll every quarter of a second
else:
self.observer = Observer()
self.observer.schedule(self.event_handler, self.path, recursive=False)
self.observer.start()
if __name__ == '__main__':
def ReportJournalChange(journal_hange):
print('New route detected:' + str(journal_hange))
journalWatcher = JournalWatcher()
journalWatcher.set_callback(ReportJournalChange)
print('running')
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print('done')
journalWatcher.stop()
| 2.234375 | 2 |
labs-python/lab9/add_files.py | xR86/ml-stuff | 3 | 5395 | import sqlite3
conn = sqlite3.connect('example.db')
c = conn.cursor()
import os
import hashlib
import time
def get_file_md5(filePath):
h = hashlib.md5()
h.update(open(filePath,"rb").read())
return h.hexdigest()
def get_file_sha256(filePath):
h = hashlib.sha256()
h.update(open(filePath,"rb").read())
return h.hexdigest()
def get_dir_data(dir_path):
dir_path = os.path.realpath(dir_path)
#print next(os.walk(dir_path))[2]
#print os.path.basename(dir_path)
id_location = 0
id_file = 0
for dir_file in next(os.walk(dir_path))[2]:
file_name = dir_file
file_md5 = get_file_md5(dir_file)
file_sha256 = get_file_sha256(dir_file)
file_size = os.path.getsize(dir_file)
file_time = time.gmtime(os.path.getctime(dir_file))
file_formatted_time = time.strftime("%Y-%m-%d %I:%M:%S %p", file_time)
file_path = os.path.realpath(dir_file)
location_values = (id_location, file_path)
c.execute("INSERT INTO location VALUES (?, ?)", location_values)
files_values = (id_location, id_file)
c.execute("INSERT INTO files VALUES (?, ?)", files_values)
file_info_values = (id_file, file_name, file_size, file_formatted_time, file_md5)
c.execute("INSERT INTO file_info VALUES (?, ?, ?, ?, ?)", file_info_values)
id_location += 1
id_file += 1
get_dir_data('./')
# Save (commit) the changes
conn.commit()
conn.close() | 2.84375 | 3 |
lib/galaxy/model/migrate/versions/0073_add_ldda_to_implicit_conversion_table.py | blankenberg/galaxy-data-resource | 0 | 5396 | """
Migration script to add 'ldda_parent_id' column to the implicitly_converted_dataset_association table.
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
from migrate.changeset import *
import logging
log = logging.getLogger( __name__ )
metadata = MetaData()
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print __doc__
metadata.reflect()
try:
Implicitly_converted_table = Table( "implicitly_converted_dataset_association", metadata, autoload=True )
if migrate_engine.name != 'sqlite':
c = Column( "ldda_parent_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), index=True, nullable=True )
else:
#Can't use the ForeignKey in sqlite.
c = Column( "ldda_parent_id", Integer, index=True, nullable=True )
c.create( Implicitly_converted_table, index_name="ix_implicitly_converted_dataset_assoc_ldda_parent_id")
assert c is Implicitly_converted_table.c.ldda_parent_id
except Exception, e:
print "Adding ldda_parent_id column to implicitly_converted_dataset_association table failed: %s" % str( e )
log.debug( "Adding ldda_parent_id column to implicitly_converted_dataset_association table failed: %s" % str( e ) )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
try:
Implicitly_converted_table = Table( "implicitly_converted_dataset_association", metadata, autoload=True )
Implicitly_converted_table.c.ldda_parent_id.drop()
except Exception, e:
print "Dropping ldda_parent_id column from implicitly_converted_dataset_association table failed: %s" % str( e )
log.debug( "Dropping ldda_parent_id column from implicitly_converted_dataset_association table failed: %s" % str( e ) )
| 2.25 | 2 |
Replication Python and R Codes/Figure_6/cMCA_ESS2018_LABCON_org.py | tzuliu/Contrastive-Multiple-Correspondence-Analysis-cMCA | 3 | 5397 | <reponame>tzuliu/Contrastive-Multiple-Correspondence-Analysis-cMCA<filename>Replication Python and R Codes/Figure_6/cMCA_ESS2018_LABCON_org.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import prince
from sklearn import utils
from sklearn.cluster import DBSCAN
import itertools
from cmca import CMCA
from ccmca import CCMCA
from matplotlib import rc
plt.style.use('ggplot')
df = pd.read_csv("./uk2018.csv")
df["prtclcgb"].replace({5: 8, 9: 8, 10:8, 11:8, 12:8, 13:8, 15:8, 19:8}, inplace=True)
df["prtclcgb"].replace({6: 5}, inplace=True)
df["prtclcgb"].replace({7: 6}, inplace=True)
df["prtclcgb"].replace({8: 7}, inplace=True)
alpha = r'$ \alpha $'
tableau10 = {
'teal': '#78B7B2',
'blue': '#507AA6',
'orange': '#F08E39',
'red': '#DF585C',
'green': '#5BA053',
'purple': '#AF7BA1',
'yellow': '#ECC854',
'brown': '#9A7460',
'pink': '#FD9EA9',
'gray': '#BAB0AC',
7: '#9A7460',
1: '#507AA6',
2: '#F08E39',
3: '#DF585C',
4: '#5BA053',
0: '#78B7B2',
6: '#ECC854',
5: '#AF7BA1',
8: '#FD9EA9',
9: '#BAB0AC',
-1: '#BAB0AC',
99: '#BAB0AC',
'LDP': '#507AA6',
'DPJ': '#F08E39'
}
def fillna_based_on_dtype(df):
for key in dict(df.dtypes).keys():
if df.dtypes[key] == np.object:
df[key] = df[key].fillna('na')
else:
df[key] = df[key].fillna(99)
def df_to_mat(df):
X = df.iloc[:,np.r_[1:(df.shape[1])]]
X_con = X[X["prtclcgb"] == 1]
X_lab = X[X["prtclcgb"] == 2]
X_ldp = X[X["prtclcgb"] == 3]
X_snp = X[X["prtclcgb"] == 4]
X_gre = X[X["prtclcgb"] == 5]
X_uip = X[X["prtclcgb"] == 6]
X_oth = X[X["prtclcgb"] == 7]
print("missing value ratio (CON)", X_con.isna().sum().sum() / (X_con.shape[0] * X_con.shape[1]))
print("missing value ratio (LAB)", X_lab.isna().sum().sum() / (X_lab.shape[0] * X_lab.shape[1]))
print("missing value ratio (LDP)", X_ldp.isna().sum().sum() / (X_ldp.shape[0] * X_ldp.shape[1]))
print("missing value ratio (SNP)", X_snp.isna().sum().sum() / (X_snp.shape[0] * X_snp.shape[1]))
print("missing value ratio (GRE)", X_gre.isna().sum().sum() / (X_gre.shape[0] * X_gre.shape[1]))
print("missing value ratio (UIP)", X_uip.isna().sum().sum() / (X_uip.shape[0] * X_uip.shape[1]))
print("missing value ratio (OTH)", X_oth.isna().sum().sum() / (X_oth.shape[0] * X_oth.shape[1]))
fillna_based_on_dtype(X_con)
fillna_based_on_dtype(X_lab)
fillna_based_on_dtype(X_ldp)
fillna_based_on_dtype(X_snp)
fillna_based_on_dtype(X_gre)
fillna_based_on_dtype(X_uip)
fillna_based_on_dtype(X_oth)
return(X_con, X_lab, X_ldp, X_snp, X_gre, X_uip, X_oth)
X_con, X_lab, X_ldp, X_snp, X_gre, X_uip, X_oth = df_to_mat(df)
X = pd.concat([X_con, X_lab, X_ldp, X_snp, X_gre, X_uip, X_oth])
print(X_con.shape, X_lab.shape, X_ldp.shape, X_snp.shape, X_gre.shape, X_uip.shape, X_oth.shape, X.shape)
##Disctionay for Level and Party
party = {1:"Con", 2:"Lab", 3:"LD", 4:"SNP", 5:"Green", 6:"UKIP", 7:"Other"}
##Fitting cMCA and export plots
cmca = CMCA(n_components=2, copy=True, check_input=True)
cmca = cmca.fit(fg=X_lab.iloc[:,0:(X_lab.shape[1]-3)], bg=X_con.iloc[:,0:(X_con.shape[1]-3)], alpha=1.5)
Y_fg = np.array(cmca.transform(X_lab.iloc[:,0:(X.shape[1]-3)]))
Y_bg = np.array(cmca.transform(X_con.iloc[:,0:(X.shape[1]-3)]))
Y_fg_col = np.array(cmca.transform(X_lab.iloc[:,0:(X.shape[1]-3)], axis='col'))
prefix_to_info = cmca.gen_prefix_to_info()
f_6 = plt.figure()
plt.xlim([-2.5, 2.5])
plt.ylim([-2.5, 2.5])
plt.scatter(Y_fg[:, 0], Y_fg[:, 1], c=tableau10[X_lab["prtclcgb"].iloc[0]], label=party[X_lab["prtclcgb"].iloc[0]], alpha=0.3, linewidths=0)
plt.scatter(Y_bg[:, 0], Y_bg[:, 1], c=tableau10[X_con["prtclcgb"].iloc[0]], label=party[X_con["prtclcgb"].iloc[0]], alpha=0.3, linewidths=0)
handles, labels = plt.gca().get_legend_handles_labels()
handles = [handles[1],handles[0]]
labels = ["Con","Lab"]
plt.legend(handles, labels, loc="lower right", shadow=False, scatterpoints=1, fontsize=8)
plt.xlabel('cPC1')
plt.ylabel('cPC2')
plt.title("cMCA (tg: LAB, bg: CON, " + str(alpha) + ": 1.5)")
plt.show()
f_6.savefig("cMCA_ESS2018_labcon_org.pdf", bbox_inches='tight')
| 1.929688 | 2 |
Solutions/077.py | ruppysuppy/Daily-Coding-Problem-Solutions | 70 | 5398 | <gh_stars>10-100
"""
Problem:
Given a list of possibly overlapping intervals, return a new list of intervals where
all overlapping intervals have been merged.
The input list is not necessarily ordered in any way.
For example, given [(1, 3), (5, 8), (4, 10), (20, 25)], you should return
[(1, 3), (4, 10), (20, 25)].
"""
from typing import List, Tuple
def merge_intervals(intervals: List[Tuple[int, int]]) -> List[Tuple[int, int]]:
intervals.sort(key=lambda x: x[0])
merged_intervals = []
start = intervals[0][0]
end = intervals[0][1]
# generating the merged intervals
for interval in intervals[1:]:
curr_start, curr_end = interval
if end < curr_start:
merged_intervals.append((start, end))
start = curr_start
end = curr_end
elif end < curr_end and end > curr_start:
end = curr_end
# adding the last interval
merged_intervals.append((start, end))
return merged_intervals
if __name__ == "__main__":
print(merge_intervals([(1, 3), (5, 8), (4, 10), (20, 25)]))
print(merge_intervals([(1, 3), (5, 8), (4, 10), (20, 25), (6, 12)]))
"""
SPECS:
TIME COMPLEXITY: O(n)
SPACE COMPLEXITY: O(n)
"""
| 3.875 | 4 |
slackbot_wems/chris/slacklib.py | wray/wems | 4 | 5399 | <reponame>wray/wems<filename>slackbot_wems/chris/slacklib.py<gh_stars>1-10
import time
import emoji
# Put your commands here
COMMAND1 = "testing testing"
COMMAND2 = "roger roger"
BLUEON = str("blue on")
BLUEOFF = str("blue off")
REDON = str("red on")
REDOFF = str("red off")
GREENON = str("green on")
GREENOFF = str("green off")
YELLOWON = str("yellow on")
YELLOWOFF = str("yellow off")
CLOCK = str("update clock")
SCRAMBLE = str('scramble the 7')
HACKER = str('hack the 7')
SINGLEREADING = str('light')
def setup():
import RPi.GPIO as GPIO
import slackbot_wems.chris.light as lite
import slackbot_wems.chris.segment7 as segment
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# Pin Setup
GPIO.setup(17, GPIO.OUT) # BLUE LED
GPIO.setup(27, GPIO.OUT) # RED LED
GPIO.setup(5, GPIO.OUT) # GREEN LED
GPIO.setup(22, GPIO.OUT) # YELLOW LED
GPIO.setup(12, GPIO.OUT) # LDR
setup = False
# Your handling code goes in this function
def handle_command(command):
"""
Determine if the command is valid. If so, take action and return
a response, if necessary.
"""
if not setup:
setup_gpio()
setup = True
response = ""
if command.find(COMMAND1) >= 0:
response = str("Surprise!")
elif command.find(COMMAND2) >= 0:
response = (emoji.emojize('Python\n is\n :thumbs_up: :thumbs_up: :thumbs_up:'))
# Blue LED Commands
elif command.find(BLUEON) >= 0:
GPIO.output(17, True)
response = emoji.emojize("" + "Turning :radio_button: ON...")
elif command.find(BLUEOFF) >= 0:
GPIO.output(17, False)
response = emoji.emojize("" + "Turning :radio_button: OFF...")
# Red LED Commands
elif command.find(REDON) >= 0:
GPIO.output(27, True)
response = emoji.emojize("" + "Turning :red_circle: ON...")
elif command.find(REDOFF) >= 0:
GPIO.output(27, False)
response = emoji.emojize("" + "Turning :red_circle: OFF...")
# Green LED Commands
elif command.find(GREENON) >= 0:
GPIO.output(5, True)
response = emoji.emojize("" + "Turning :green_apple: ON...")
elif command.find(GREENOFF) >= 0:
GPIO.output(5, False)
response = emoji.emojize("" + "Turning :green_apple: OFF...")
# Yellow LED Commands
elif command.find(YELLOWON) >= 0:
GPIO.output(22, True)
response = emoji.emojize("" + "Turning :sunny: ON...")
elif command.find(YELLOWOFF) >= 0:
GPIO.output(22, False)
response = emoji.emojize("" + "Turning :sunny: OFF...")
# 7 Segment Commands
elif command.find(CLOCK) >= 0:
print('Updating the clock!')
response = segment.updateClock()
elif command.find(SCRAMBLE) >= 0:
print(emoji.emojize(":egg: There is nothing better than scrambled eggs! :egg:"))
response = segment.scramble()
elif command.find(HACKER) >= 0:
print('Message')
response = segment.hacker()
elif command.find(SINGLEREADING) >= 0:
a = lite.printReading()
a = int(a)
time.sleep(1)
print(a)
response = ('Here is what the LDR Sensor said to me: ' + str(a))
return response
| 3.015625 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.