max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
fts/fluxrss.py
AetherBlack/Veille-Informatique
0
1400
<gh_stars>0 #!/usr/bin/python3 from urllib.parse import urlparse import feedparser import requests import asyncio import discord import hashlib import os from const import CHANNEL_RSS, WAIT_UNTIL_NEW_CHECK, \ SQLITE_FOLDER_NAME, SQLITE_FILE_NAME from fts.database import Database from fts.cleandatabase import CleanDatabase class FluxRSS: """ Class of FluxRSS. Get news of the feedrss url parse in args. """ def __init__(self, bot, cwd): """ Initialize class @param => DiscordBot: `bot`: Discord Bot Instance. @param => str: `cwd`: Current Working Directory of main.py file. """ # Discord self.bot = bot self.bot_username = self.bot.user.name self.rss_channel = self.bot.get_channel(CHANNEL_RSS) # Path self.cwd = cwd # Database self.db_path = os.path.join(self.cwd, SQLITE_FOLDER_NAME) self.database = Database(self.db_path, SQLITE_FILE_NAME) def get_news(self, url): """ Get the news of the rss feed. @param => str: `url`: url of the rss feed. Return dict with an int index key and title, description and link in a list for the value. """ dict_news = dict() # Get the content of the requests content = requests.get(url).text # Parse the content parser = feedparser.parse(content) # Set the root parser = parser["entries"] # Get the number of news news_number = len(parser) # Construct the dict for index in range(news_number): # Get the title title = parser[index]["title"] # Get the description description = parser[index]["description"] # Get the link link = parser[index]["links"][0]["href"] # Set list args = [ title, description, link ] # Add the list to the dict dict_news[str(index)] = args # Return the dict return dict_news def is_new(self, root, name, title, description, link): """ Return True if the news in the feed is new. @param => str: `title`: Title of the news. @param => str: `description`: Description of the news. @param => str: `link`: Link of the rss feed. """ # Hash description hash_description = hashlib.sha256(bytes(description, "utf-8", errors="ignore")).hexdigest() # Return the check of the query return not self.database.isNewsExists(root, name, title, hash_description, link) def embeded_msg(self, root, name, title, content, link, color): """ Create the embeded message and send it to discord. @param => str: `root`: Name of the Website. @param => str: `name`: Name set in const. Categorie of the news @param => str: `title`: Title of the news. @param => str: `content`: Content description of the news. @param => str: `link`: Link of the news. @param => discord.Color: `color`: Color for the left panel. """ # Set the Name, description and color on the left news = discord.Embed(title="{0} - {1}".format(root, name), description="News :", color=(color or 0x00ff00)) #Set bot name and profil picture news.set_author(name=self.bot_username, icon_url=self.bot.user.avatar_url) #Set the description and the link for the main message content = content + "\n" + link news.add_field(name=title, value=content[:1024], inline=False) #Show the bot username in footer news.set_footer(text="Generate by @{0}".format(self.bot_username)) # Return the final Discord embeded message return news async def feedrss(self, json_rss): """ Get the news and send it to the channel. @param => dict: `json_rss`: JSON data of the RSS Flux. """ # Show const for the format self.json_rss = json_rss # While the connection is not closed while not self.bot.is_closed(): # For each key for key, sections in self.json_rss.items(): # Get the root name set in const root = key # For each sections for index_section, section in enumerate(sections): # Check customization of the section if "custom" in section.keys(): # Check color if "color" in section["custom"].keys(): color = getattr(discord.Color, section["custom"]["color"])() else: color = False else: color = False # Get the name of the section name = section["name"] # Get the time until the cleaning of the database for the root and name given wait_time = section["clean"] # Check if the cleaning database is already launched if isinstance(wait_time, str): # Launch the function to clean the database Thread = CleanDatabase(root, name, wait_time, self.db_path, SQLITE_FILE_NAME) Thread.start() # Change the variable type of the clean line in json_rss to launch relaunch the requests self.json_rss[root][index_section]["clean"] = True # For each link in the section for link in section["link"]: # Get title, description and link in a dict dict_news = self.get_news(link) # Verify if the news already exists for value in dict_news.values(): # Get title title = value[0] # Get description description = value[1] # Get link link = value[2] # Check if the news is new if self.is_new(root, name, title, description, link): # Hash the description hash_description = hashlib.sha256(bytes(description, "utf-8", errors="ignore")).hexdigest() # write the news into the database self.database.AddNews(root, name, title, hash_description, link) #Create the discord message message = self.embeded_msg(root, name, title, description, link, color) #Send to discord await self.rss_channel.send(embed=message) # Wait until the next verification await asyncio.sleep(WAIT_UNTIL_NEW_CHECK)
2.75
3
src/poke_env/player/player_network_interface.py
kiyohiro8/poke-env
0
1401
<filename>src/poke_env/player/player_network_interface.py<gh_stars>0 # -*- coding: utf-8 -*- """This module defines a base class for communicating with showdown servers. """ import json import logging import requests import websockets # pyre-ignore from abc import ABC from abc import abstractmethod from asyncio import CancelledError from asyncio import ensure_future from asyncio import Event from asyncio import Lock from asyncio import sleep from time import perf_counter from typing import List from typing import Optional from aiologger import Logger # pyre-ignore from poke_env.exceptions import ShowdownException from poke_env.player_configuration import PlayerConfiguration from poke_env.server_configuration import ServerConfiguration class PlayerNetwork(ABC): """ Network interface of a player. Responsible for communicating with showdown servers. Also implements some higher level methods for basic tasks, such as changing avatar and low-level message handling. """ def __init__( self, player_configuration: PlayerConfiguration, *, avatar: Optional[int] = None, log_level: Optional[int] = None, server_configuration: ServerConfiguration, start_listening: bool = True, ) -> None: """ :param player_configuration: Player configuration. :type player_configuration: PlayerConfiguration :param avatar: Player avatar id. Optional. :type avatar: int, optional :param log_level: The player's logger level. :type log_level: int. Defaults to logging's default level. :param server_configuration: Server configuration. :type server_configuration: ServerConfiguration :param start_listening: Wheter to start listening to the server. Defaults to True. :type start_listening: bool """ self._authentication_url = server_configuration.authentication_url self._avatar = avatar self._password = player_configuration.password self._username = player_configuration.username self._server_url = server_configuration.server_url self._logged_in: Event = Event() self._sending_lock = Lock() self._websocket: websockets.client.WebSocketClientProtocol # pyre-ignore self._logger: Logger = self._create_player_logger(log_level) # pyre-ignore if start_listening: self._listening_coroutine = ensure_future(self.listen()) async def _accept_challenge(self, username: str) -> None: assert self.logged_in.is_set() await self._set_team() await self._send_message("/accept %s" % username) async def _challenge(self, username: str, format_: str): assert self.logged_in.is_set() await self._set_team() await self._send_message(f"/challenge {username}, {format_}") async def _change_avatar(self, avatar_id: Optional[int]) -> None: """Changes the player's avatar. :param avatar_id: The new avatar id. If None, nothing happens. :type avatar_id: int """ await self._wait_for_login() if avatar_id is not None: await self._send_message(f"/avatar {avatar_id}") def _create_player_logger(self, log_level: Optional[int]) -> Logger: # pyre-ignore """Creates a logger for the player. Returns a Logger displaying asctime and the player's username before messages. :param log_level: The logger's level. :type log_level: int :return: The logger. :rtype: Logger """ logger = logging.getLogger(self._username) stream_handler = logging.StreamHandler() if log_level is not None: logger.setLevel(log_level) formatter = logging.Formatter( "%(asctime)s - %(name)s - %(levelname)s - %(message)s" ) stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) return logger async def _handle_message(self, message: str) -> None: """Handle received messages. :param message: The message to parse. :type message: str """ try: self.logger.debug("Received message to handle: %s", message) # Showdown websocket messages are pipe-separated sequences split_message = message.split("|") assert len(split_message) > 1 # The type of message is determined by the first entry in the message # For battles, this is the zero-th entry # Otherwise it is the one-th entry if split_message[1] == "challstr": # Confirms connection to the server: we can login await self._log_in(split_message) elif split_message[1] == "updateuser": if split_message[2] == " " + self._username: # Confirms successful login self.logged_in.set() elif not split_message[2].startswith(" Guest "): self.logger.warning( """Trying to login as %s, showdown returned %s """ """- this might prevent future actions from this agent. """ """Changing the agent's username might solve this problem.""", self.username, split_message[2], ) elif "updatechallenges" in split_message[1]: # Contain information about current challenge await self._update_challenges(split_message) elif split_message[0].startswith(">battle"): # Battle update await self._handle_battle_message(message) elif split_message[1] == "updatesearch": self.logger.debug("Ignored message: %s", message) pass elif split_message[1] == "popup": self.logger.warning("Popup message received: %s", message) elif split_message[1] in ["nametaken"]: self.logger.critical("Error message received: %s", message) raise ShowdownException("Error message received: %s", message) elif split_message[1] == "pm": self.logger.info("Received pm: %s", split_message) else: self.logger.critical("Unhandled message: %s", message) raise NotImplementedError("Unhandled message: %s" % message) except CancelledError as e: self.logger.critical("CancelledError intercepted. %s", e) except Exception as exception: self.logger.exception( "Unhandled exception raised while handling message:\n%s", message ) raise exception async def _log_in(self, split_message: List[str]) -> None: """Log the player with specified username and password. Split message contains information sent by the server. This information is necessary to log in. :param split_message: Message received from the server that triggers logging in. :type split_message: List[str] """ if self._password: log_in_request = requests.post( self._authentication_url, data={ "act": "login", "name": self._username, "pass": self._password, "challstr": split_message[2] + "%7C" + split_message[3], }, ) self.logger.info("Sending authentication request") assertion = json.loads(log_in_request.text[1:])["assertion"] else: self.logger.info("Bypassing authentication request") assertion = "" await self._send_message(f"/trn {self._username},0,{assertion}") await self._change_avatar(self._avatar) async def _search_ladder_game(self, format_): await self._set_team() await self._send_message(f"/search {format_}") async def _send_message( self, message: str, room: str = "", message_2: Optional[str] = None ) -> None: """Sends a message to the specified room. `message_2` can be used to send a sequence of length 2. :param message: The message to send. :type message: str :param room: The room to which the message should be sent. :type room: str :param message_2: Second element of the sequence to be sent. Optional. :type message_2: str, optional """ if message_2: to_send = "|".join([room, message, message_2]) else: to_send = "|".join([room, message]) await self._websocket.send(to_send) self.logger.info(">>> %s", to_send) async def _set_team(self): if self._team is not None: await self._send_message("/utm %s" % self._team.yield_team()) async def _wait_for_login( self, checking_interval: float = 0.001, wait_for: int = 5 ) -> None: start = perf_counter() while perf_counter() - start < wait_for: await sleep(checking_interval) if self.logged_in: return assert self.logged_in async def listen(self) -> None: """Listen to a showdown websocket and dispatch messages to be handled.""" self.logger.info("Starting listening to showdown websocket") coroutines = [] try: async with websockets.connect( self.websocket_url, max_queue=None ) as websocket: self._websocket = websocket async for message in websocket: self.logger.info("<<< %s", message) coroutines.append(ensure_future(self._handle_message(message))) except websockets.exceptions.ConnectionClosedOK: self.logger.warning( "Websocket connection with %s closed", self.websocket_url ) except (CancelledError, RuntimeError) as e: self.logger.critical("Listen interrupted by %s", e) except Exception as e: self.logger.exception(e) finally: for coroutine in coroutines: coroutine.cancel() async def stop_listening(self) -> None: if self._listening_coroutine is not None: self._listening_coroutine.cancel() await self._websocket.close() @abstractmethod async def _handle_battle_message(self, message: str) -> None: """Abstract method. Implementation should redirect messages to corresponding battles. """ @abstractmethod async def _update_challenges(self, split_message: List[str]) -> None: """Abstract method. Implementation should keep track of current challenges. """ @property def logged_in(self) -> Event: """Event object associated with user login. :return: The logged-in event :rtype: Event """ return self._logged_in @property def logger(self) -> Logger: # pyre-ignore """Logger associated with the player. :return: The logger. :rtype: Logger """ return self._logger @property def username(self) -> str: """The player's username. :return: The player's username. :rtype: str """ return self._username @property def websocket_url(self) -> str: """The websocket url. It is derived from the server url. :return: The websocket url. :rtype: str """ return f"ws://{self._server_url}/showdown/websocket"
2.265625
2
data/external/repositories/42139/KDDCup13Track2-master/blocking.py
Keesiu/meta-kaggle
0
1402
#!/usr/bin/env python from common import * import csv import argparse from unidecode import unidecode from nameparser import constants as npc from collections import defaultdict import cPickle as pickle import re stopwords_custom = set(['document', 'preparation', 'system', 'consortium', 'committee', 'international', 'artificial', 'network', 'distributed', 'based', 'research', 'language', 'technology', 'project', 'design', 'computer', 'control', 'object', 'internet', 'propulsion', 'corp', 'workshop', 'xml', 'world', 'work', 'thesis', 'test', 'tool', 'structure', 'statistical', 'laboratory', 'ltd', 'objects', 'process', 'scheduling', 'september', 'special', 'student', 'programs', 'capacitated', 'balancing', 'assembly', 'aspect', 'model', 'inc', 'psychological', 'psychology', 'mohammed', 'computing', 'software', 'programming', 'new', 'applications', 'jet', 'propulsion', 'classification', 'recommendation']) stopwords = stopwords_custom | npc.TITLES | npc.PREFIXES | npc.SUFFIXES | npc.CONJUNCTIONS def bin_exactsamename(authors): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['fullname']].add(id) if (i+1) % 10000 == 0: print_err(i+1) return bins def bin_samename(authors): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['fullname_joined']].add(id) if (i+1) % 10000 == 0: print_err(i+1) return bins def bin_fFfL(authors): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['fFfL']].add(id) if (i+1) % 10000 == 0: print_err(i+1) return bins def bin_fF3L(authors, max_bin_size=20): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): if ':' not in a['fFiL'] and len(a['name_last']) >= 3 and len(a['fFiL']) > 2: bins[a['fFiL'] + a['name_last'][1:3]].add(id) if (i+1) % 10000 == 0: print_err(i+1) bk = bins.keys() for b in bk: if len(bins[b]) > max_bin_size: del bins[b] return bins def bin_fFiL(authors, max_bin_size=20): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): if len(a['fFiL']) > 2: bins[a['fFiL']].add(id) if (i+1) % 10000 == 0: print_err(i+1) bk = bins.keys() for b in bk: if len(bins[b]) > max_bin_size: del bins[b] return bins def bin_iFfL(authors): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['iFfL']].add(id) if (i+1) % 10000 == 0: print_err(i+1) return bins def bin_fullparsedname(authors): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['fullname_parsed']].add(id) if (i+1) % 10000 == 0: print_err(i+1) return bins def bin_iFoffbyoneL(authors, max_bin_size=30): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): if ':' not in a['fullname'] and a['name_first'] and a['name_last']: bins[a['name_first'][0] + a['name_last']].add(id) if len(a['name_last']) > 1: bins[a['name_first'][0] + a['name_last'][:-1]].add(id) if (i+1) % 10000 == 0: print_err(i+1) bk = bins.keys() for b in bk: if len(bins[b]) > max_bin_size: del bins[b] return bins def bin_2FoffbyoneL(authors, max_bin_size=30): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): if ':' not in a['fullname'] and len(a['name_first']) >= 2 and a['name_last']: bins[a['name_first'][0:2] + a['name_last']].add(id) if len(a['name_last']) > 1: bins[a['name_first'][0:2] + a['name_last'][:-1]].add(id) if (i+1) % 10000 == 0: print_err(i+1) bk = bins.keys() for b in bk: if len(bins[b]) > max_bin_size: del bins[b] return bins def bin_metaphone(authors): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): if a['metaphone_fullname']: bins[a['metaphone_fullname']].add(id) if (i+1) % 10000 == 0: print_err(i+1) # bk = bins.keys() # for b in bk: # if len(bins[b]) > max_bin_size: # del bins[b] return bins def bin_offbylastone(authors): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): if ':' not in a['fullname_joined']: bins[a['fullname_joined']].add(id) if len(a['fullname_joined']) > 1: bins[a['fullname_joined'][:-1]].add(id) if (i+1) % 10000 == 0: print_err(i+1) return bins def bin_token(authors, nw=2, max_bin_size=100): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): if ':' not in a['name']: tokens = re.sub("[^\w]", " ", a['name']).split() tokens = [v for v in tokens if len(v) > 2 and v not in stopwords] ngrams = zip(*[tokens[j:] for j in range(nw)]) for p in ngrams: pg = ' '.join(p) if len(pg) > len(p)*2-1: bins[pg].add(id) if (i+1) % 10000 == 0: print_err(i+1) bk = bins.keys() for b in bk: if len(bins[b]) > max_bin_size: del bins[b] return bins def bin_ngrams(authors, n=15, max_bin_size=30): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): if ':' not in a['fullname']: lname = a['fullname'] ngrams = zip(*[lname[j:] for j in range(n)]) for p in ngrams: if not any(((s in p) for s in stopwords_custom)): bins[''.join(p)].add(id) if (i+1) % 10000 == 0: print_err(i+1) bk = bins.keys() for b in bk: if len(bins[b]) > max_bin_size: del bins[b] return bins def main(): parser = argparse.ArgumentParser() parser.add_argument('authorprefeat', nargs='?', default='generated/Author_prefeat.pickle') parser.add_argument('type', nargs='?', default='iFfL') args = parser.parse_args() print_err("Loading pickled author pre-features") authors = pickle.load(open(args.authorprefeat, 'rb')) bins = globals()["bin_"+args.type](authors) bins = sorted([(len(bv), blabel, bv) for blabel, bv in bins.iteritems()], reverse=True) for _, binlabel, binv in bins: print binlabel + ';' + ','.join(map(str, sorted(binv))) if __name__ == "__main__": main()
2.40625
2
resources/dot_PyCharm/system/python_stubs/-762174762/PySide/QtCore/QAbstractFileEngineIterator.py
basepipe/developer_onboarding
1
1403
<reponame>basepipe/developer_onboarding<gh_stars>1-10 # encoding: utf-8 # module PySide.QtCore # from C:\Python27\lib\site-packages\PySide\QtCore.pyd # by generator 1.147 # no doc # imports import Shiboken as __Shiboken class QAbstractFileEngineIterator(__Shiboken.Object): # no doc def currentFileInfo(self, *args, **kwargs): # real signature unknown pass def currentFileName(self, *args, **kwargs): # real signature unknown pass def currentFilePath(self, *args, **kwargs): # real signature unknown pass def filters(self, *args, **kwargs): # real signature unknown pass def hasNext(self, *args, **kwargs): # real signature unknown pass def nameFilters(self, *args, **kwargs): # real signature unknown pass def next(self, *args, **kwargs): # real signature unknown pass def path(self, *args, **kwargs): # real signature unknown pass def __init__(self, *args, **kwargs): # real signature unknown pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass
1.617188
2
tests/conftest.py
priyatharsan/beyond
0
1404
<gh_stars>0 import numpy as np from pytest import fixture, mark, skip from unittest.mock import patch from pathlib import Path from beyond.config import config from beyond.dates.eop import Eop from beyond.frames.stations import create_station from beyond.io.tle import Tle from beyond.propagators.keplernum import KeplerNum from beyond.dates import Date, timedelta from beyond.env.solarsystem import get_body np.set_printoptions(linewidth=200) @fixture(autouse=True, scope="session") def config_override(): """Create a dummy config dict containing basic data """ config.update({ "eop": { "missing_policy": "pass", } }) @fixture def common_env(): with patch('beyond.dates.date.EopDb.get') as m: m.return_value = Eop( x=-0.00951054166666622, y=0.31093590624999734, dpsi=-94.19544791666682, deps=-10.295645833333051, dy=-0.10067361111115315, dx=-0.06829513888889051, lod=1.6242802083331438, ut1_utc=0.01756018472222477, tai_utc=36.0 ) yield @fixture def station(common_env): return create_station('Toulouse', (43.604482, 1.443962, 172.)) @fixture def iss_tle(common_env): return Tle("""ISS (ZARYA) 1 25544U 98067A 18124.55610684 .00001524 00000-0 30197-4 0 9997 2 25544 51.6421 236.2139 0003381 47.8509 47.6767 15.54198229111731""") @fixture def molniya_tle(common_env): return Tle("""MOLNIYA 1-90 1 24960U 97054A 18123.22759647 .00000163 00000-0 24467-3 0 9999 2 24960 62.6812 182.7824 6470982 294.8616 12.8538 3.18684355160009""") @fixture(params=["tle", "ephem"]) def orbit(request, iss_tle): orb = iss_tle.orbit() if request.param == "tle": return orb elif request.param == "ephem": start = Date(2018, 4, 5, 16, 50) stop = timedelta(hours=6) step = timedelta(seconds=15) return orb.ephem(start=start, stop=stop, step=step) elif request.param == "kepler": orb.propagator = KeplerNum( timedelta(seconds=60), get_body('Earth') ) return orb @fixture(params=["tle", "ephem"]) def molniya(request, molniya_tle): orb = molniya_tle.orbit() if request.param == "tle": return orb elif request.param == "ephem": start = Date(2018, 4, 5, 16, 50) stop = timedelta(hours=15) step = timedelta(minutes=1) return orb.ephem(start=start, stop=stop, step=step) @fixture def jplfiles(): config['env'] = { 'jpl': [ str(Path(__file__).parent / "data" / "jpl" / "de403_2000-2020.bsp"), str(Path(__file__).parent / "data" / "jpl" / "pck00010.tpc"), str(Path(__file__).parent / "data" / "jpl" / "gm_de431.tpc"), ] } def _skip_if_no_mpl(): """Specific for dynamically skipping the test if matplotlib is not present as it is not a dependency of the library, but merely a convenience """ try: import matplotlib.pyplot as plt except ImportError: return True else: return False def pytest_configure(config): """Declare the skip_if_no_mpl marker in pytest's '--markers' helper option This has no actual effect on the tests """ config.addinivalue_line( "markers", "skip_if_no_mpl: skip if matplotlib is not installed" ) def pytest_runtest_setup(item): """This function is called for each test case. It looks if the test case has the skip_if_no_mpl decorator. If so, skip the test case """ if _skip_if_no_mpl() and list(item.iter_markers(name="skip_if_no_mpl")): skip("matplotlib not installed")
1.875
2
diofant/tests/integrals/test_heurisch.py
Electric-tric/diofant
1
1405
<filename>diofant/tests/integrals/test_heurisch.py import pytest from diofant import (Add, Derivative, Ei, Eq, Function, I, Integral, LambertW, Piecewise, Rational, Sum, Symbol, acos, asin, asinh, besselj, cos, cosh, diff, erf, exp, li, log, pi, ratsimp, root, simplify, sin, sinh, sqrt, symbols, tan) from diofant.integrals.heurisch import components, heurisch, heurisch_wrapper __all__ = () x, y, z, nu = symbols('x,y,z,nu') f = Function('f') def test_components(): assert components(x*y, x) == {x} assert components(1/(x + y), x) == {x} assert components(sin(x), x) == {sin(x), x} assert components(sin(x)*sqrt(log(x)), x) == \ {log(x), sin(x), sqrt(log(x)), x} assert components(x*sin(exp(x)*y), x) == \ {sin(y*exp(x)), x, exp(x)} assert components(x**Rational(17, 54)/sqrt(sin(x)), x) == \ {sin(x), root(x, 54), sqrt(sin(x)), x} assert components(f(x), x) == \ {x, f(x)} assert components(Derivative(f(x), x), x) == \ {x, f(x), Derivative(f(x), x)} assert components(f(x)*diff(f(x), x), x) == \ {x, f(x), Derivative(f(x), x), Derivative(f(x), x)} def test_heurisch_polynomials(): assert heurisch(1, x) == x assert heurisch(x, x) == x**2/2 assert heurisch(x**17, x) == x**18/18 def test_heurisch_fractions(): assert heurisch(1/x, x) == log(x) assert heurisch(1/(2 + x), x) == log(x + 2) assert heurisch(1/(x + sin(y)), x) == log(x + sin(y)) # Up to a constant, where C = 5*pi*I/12, Mathematica gives identical # result in the first case. The difference is because diofant changes # signs of expressions without any care. # XXX ^ ^ ^ is this still correct? assert heurisch(5*x**5/( 2*x**6 - 5), x) in [5*log(2*x**6 - 5) / 12, 5*log(-2*x**6 + 5) / 12] assert heurisch(5*x**5/(2*x**6 + 5), x) == 5*log(2*x**6 + 5) / 12 assert heurisch(1/x**2, x) == -1/x assert heurisch(-1/x**5, x) == 1/(4*x**4) def test_heurisch_log(): assert heurisch(log(x), x) == x*log(x) - x assert heurisch(log(3*x), x) == -x + x*log(3) + x*log(x) assert heurisch(log(x**2), x) in [x*log(x**2) - 2*x, 2*x*log(x) - 2*x] def test_heurisch_exp(): assert heurisch(exp(x), x) == exp(x) assert heurisch(exp(-x), x) == -exp(-x) assert heurisch(exp(17*x), x) == exp(17*x) / 17 assert heurisch(x*exp(x), x) == x*exp(x) - exp(x) assert heurisch(x*exp(x**2), x) == exp(x**2) / 2 assert heurisch(exp(-x**2), x) is None assert heurisch(2**x, x) == 2**x/log(2) assert heurisch(x*2**x, x) == x*2**x/log(2) - 2**x*log(2)**(-2) assert heurisch(Integral(x**z*y, (y, 1, 2), (z, 2, 3)).function, x) == (x*x**z*y)/(z+1) assert heurisch(Sum(x**z, (z, 1, 2)).function, z) == x**z/log(x) def test_heurisch_trigonometric(): assert heurisch(sin(x), x) == -cos(x) assert heurisch(pi*sin(x) + 1, x) == x - pi*cos(x) assert heurisch(cos(x), x) == sin(x) assert heurisch(tan(x), x) in [ log(1 + tan(x)**2)/2, log(tan(x) + I) + I*x, log(tan(x) - I) - I*x, ] assert heurisch(sin(x)*sin(y), x) == -cos(x)*sin(y) assert heurisch(sin(x)*sin(y), y) == -cos(y)*sin(x) # gives sin(x) in answer when run via setup.py and cos(x) when run via py.test assert heurisch(sin(x)*cos(x), x) in [sin(x)**2 / 2, -cos(x)**2 / 2] assert heurisch(cos(x)/sin(x), x) == log(sin(x)) assert heurisch(x*sin(7*x), x) == sin(7*x) / 49 - x*cos(7*x) / 7 assert heurisch(1/pi/4 * x**2*cos(x), x) == 1/pi/4*(x**2*sin(x) - 2*sin(x) + 2*x*cos(x)) assert heurisch(acos(x/4) * asin(x/4), x) == 2*x - (sqrt(16 - x**2))*asin(x/4) \ + (sqrt(16 - x**2))*acos(x/4) + x*asin(x/4)*acos(x/4) def test_heurisch_hyperbolic(): assert heurisch(sinh(x), x) == cosh(x) assert heurisch(cosh(x), x) == sinh(x) assert heurisch(x*sinh(x), x) == x*cosh(x) - sinh(x) assert heurisch(x*cosh(x), x) == x*sinh(x) - cosh(x) assert heurisch( x*asinh(x/2), x) == x**2*asinh(x/2)/2 + asinh(x/2) - x*sqrt(4 + x**2)/4 def test_heurisch_mixed(): assert heurisch(sin(x)*exp(x), x) == exp(x)*sin(x)/2 - exp(x)*cos(x)/2 def test_heurisch_radicals(): assert heurisch(1/sqrt(x), x) == 2*sqrt(x) assert heurisch(1/sqrt(x)**3, x) == -2/sqrt(x) assert heurisch(sqrt(x)**3, x) == 2*sqrt(x)**5/5 assert heurisch(sin(x)*sqrt(cos(x)), x) == -2*sqrt(cos(x))**3/3 y = Symbol('y') assert heurisch(sin(y*sqrt(x)), x) == 2/y**2*sin(y*sqrt(x)) - \ 2*sqrt(x)*cos(y*sqrt(x))/y assert heurisch_wrapper(sin(y*sqrt(x)), x) == Piecewise( (0, Eq(y, 0)), (-2*sqrt(x)*cos(sqrt(x)*y)/y + 2*sin(sqrt(x)*y)/y**2, True)) y = Symbol('y', positive=True) assert heurisch_wrapper(sin(y*sqrt(x)), x) == 2/y**2*sin(y*sqrt(x)) - \ 2*sqrt(x)*cos(y*sqrt(x))/y def test_heurisch_special(): assert heurisch(erf(x), x) == x*erf(x) + exp(-x**2)/sqrt(pi) assert heurisch(exp(-x**2)*erf(x), x) == sqrt(pi)*erf(x)**2 / 4 def test_heurisch_symbolic_coeffs(): assert heurisch(1/(x + y), x) == log(x + y) assert heurisch(1/(x + sqrt(2)), x) == log(x + sqrt(2)) assert simplify(diff(heurisch(log(x + y + z), y), y)) == log(x + y + z) def test_heurisch_symbolic_coeffs_1130(): y = Symbol('y') assert heurisch_wrapper(1/(x**2 + y), x) == Piecewise( (-1/x, Eq(y, 0)), (-I*log(x - I*sqrt(y))/(2*sqrt(y)) + I*log(x + I*sqrt(y))/(2*sqrt(y)), True)) y = Symbol('y', positive=True) assert heurisch_wrapper(1/(x**2 + y), x) in [I/sqrt(y)*log(x + sqrt(-y))/2 - I/sqrt(y)*log(x - sqrt(-y))/2, I*log(x + I*sqrt(y)) / (2*sqrt(y)) - I*log(x - I*sqrt(y))/(2*sqrt(y))] def test_heurisch_hacking(): assert (heurisch(sqrt(1 + 7*x**2), x, hints=[]) == x*sqrt(1 + 7*x**2)/2 + sqrt(7)*asinh(sqrt(7)*x)/14) assert (heurisch(sqrt(1 - 7*x**2), x, hints=[]) == x*sqrt(1 - 7*x**2)/2 + sqrt(7)*asin(sqrt(7)*x)/14) assert (heurisch(1/sqrt(1 + 7*x**2), x, hints=[]) == sqrt(7)*asinh(sqrt(7)*x)/7) assert (heurisch(1/sqrt(1 - 7*x**2), x, hints=[]) == sqrt(7)*asin(sqrt(7)*x)/7) assert (heurisch(exp(-7*x**2), x, hints=[]) == sqrt(7*pi)*erf(sqrt(7)*x)/14) assert heurisch(1/sqrt(9 - 4*x**2), x, hints=[]) == asin(2*x/3)/2 assert heurisch(1/sqrt(9 + 4*x**2), x, hints=[]) == asinh(2*x/3)/2 assert heurisch(li(x), x, hints=[]) == x*li(x) - Ei(2*log(x)) def test_heurisch_function(): assert heurisch(f(x), x) is None def test_heurisch_wrapper(): f = 1/(y + x) assert heurisch_wrapper(f, x) == log(x + y) f = 1/(y - x) assert heurisch_wrapper(f, x) == -log(x - y) f = 1/((y - x)*(y + x)) assert heurisch_wrapper(f, x) == \ Piecewise((1/x, Eq(y, 0)), (log(x + y)/2/y - log(x - y)/2/y, True)) # issue sympy/sympy#6926 f = sqrt(x**2/((y - x)*(y + x))) assert heurisch_wrapper(f, x) == x*sqrt(x**2)*sqrt(1/(-x**2 + y**2)) \ - y**2*sqrt(x**2)*sqrt(1/(-x**2 + y**2))/x def test_sympyissue_3609(): assert heurisch(1/(x * (1 + log(x)**2)), x) == I*log(log(x) + I)/2 - \ I*log(log(x) - I)/2 # These are examples from the Poor Man's Integrator # http://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/examples/ def test_pmint_rat(): # TODO: heurisch() is off by a constant: -3/4. Possibly different permutation # would give the optimal result? def drop_const(expr, x): if expr.is_Add: return Add(*[ arg for arg in expr.args if arg.has(x) ]) else: return expr f = (x**7 - 24*x**4 - 4*x**2 + 8*x - 8)/(x**8 + 6*x**6 + 12*x**4 + 8*x**2) g = (4 + 8*x**2 + 6*x + 3*x**3)/(x**5 + 4*x**3 + 4*x) + log(x) assert drop_const(ratsimp(heurisch(f, x)), x) == g def test_pmint_trig(): f = (x - tan(x)) / tan(x)**2 + tan(x) g = -x**2/2 - x/tan(x) + log(tan(x)**2 + 1)/2 assert heurisch(f, x) == g @pytest.mark.slow # 8 seconds on 3.4 GHz def test_pmint_logexp(): f = (1 + x + x*exp(x))*(x + log(x) + exp(x) - 1)/(x + log(x) + exp(x))**2/x g = log(x**2 + 2*x*exp(x) + 2*x*log(x) + exp(2*x) + 2*exp(x)*log(x) + log(x)**2)/2 + 1/(x + exp(x) + log(x)) # TODO: Optimal solution is g = 1/(x + log(x) + exp(x)) + log(x + log(x) + exp(x)), # but Diofant requires a lot of guidance to properly simplify heurisch() output. assert ratsimp(heurisch(f, x)) == g @pytest.mark.slow # 8 seconds on 3.4 GHz def test_pmint_erf(): f = exp(-x**2)*erf(x)/(erf(x)**3 - erf(x)**2 - erf(x) + 1) g = sqrt(pi)*log(erf(x) - 1)/8 - sqrt(pi)*log(erf(x) + 1)/8 - sqrt(pi)/(4*erf(x) - 4) assert ratsimp(heurisch(f, x)) == g def test_pmint_LambertW(): f = LambertW(x) g = x*LambertW(x) - x + x/LambertW(x) assert heurisch(f, x) == g @pytest.mark.xfail def test_pmint_besselj(): # TODO: in both cases heurisch() gives None. Wrong besselj() derivative? f = besselj(nu + 1, x)/besselj(nu, x) g = nu*log(x) - log(besselj(nu, x)) assert simplify(heurisch(f, x) - g) == 0 f = (nu*besselj(nu, x) - x*besselj(nu + 1, x))/x g = besselj(nu, x) assert simplify(heurisch(f, x) - g) == 0 @pytest.mark.slow def test_pmint_WrightOmega(): def omega(x): return LambertW(exp(x)) f = (1 + omega(x) * (2 + cos(omega(x)) * (x + omega(x))))/(1 + omega(x))/(x + omega(x)) g = log(x + LambertW(exp(x))) + sin(LambertW(exp(x))) assert heurisch(f, x) == g def test_RR(): # Make sure the algorithm does the right thing if the ring is RR. See # issue sympy/sympy#8685. assert heurisch(sqrt(1 + 0.25*x**2), x, hints=[]) == \ 0.5*x*sqrt(0.25*x**2 + 1) + 1.0*asinh(0.5*x) # TODO: convert the rest of PMINT tests: # Airy functions # f = (x - AiryAi(x)*AiryAi(1, x)) / (x**2 - AiryAi(x)**2) # g = Rational(1,2)*ln(x + AiryAi(x)) + Rational(1,2)*ln(x - AiryAi(x)) # f = x**2 * AiryAi(x) # g = -AiryAi(x) + AiryAi(1, x)*x # Whittaker functions # f = WhittakerW(mu + 1, nu, x) / (WhittakerW(mu, nu, x) * x) # g = x/2 - mu*ln(x) - ln(WhittakerW(mu, nu, x))
2.28125
2
kornia/color/adjust.py
carlosb1/kornia
0
1406
from typing import Union import torch import torch.nn as nn from kornia.color.hsv import rgb_to_hsv, hsv_to_rgb from kornia.constants import pi def adjust_saturation_raw(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor]) -> torch.Tensor: r"""Adjust color saturation of an image. Expecting input to be in hsv format already. See :class:`~kornia.color.AdjustSaturation` for details. """ if not torch.is_tensor(input): raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}") if not isinstance(saturation_factor, (float, torch.Tensor,)): raise TypeError(f"The saturation_factor should be a float number or torch.Tensor." f"Got {type(saturation_factor)}") if isinstance(saturation_factor, float): saturation_factor = torch.tensor([saturation_factor]) saturation_factor = saturation_factor.to(input.device).to(input.dtype) if (saturation_factor < 0).any(): raise ValueError(f"Saturation factor must be non-negative. Got {saturation_factor}") for _ in input.shape[1:]: saturation_factor = torch.unsqueeze(saturation_factor, dim=-1) # unpack the hsv values h, s, v = torch.chunk(input, chunks=3, dim=-3) # transform the hue value and appl module s_out: torch.Tensor = torch.clamp(s * saturation_factor, min=0, max=1) # pack back back the corrected hue out: torch.Tensor = torch.cat([h, s_out, v], dim=-3) return out def adjust_saturation(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor]) -> torch.Tensor: r"""Adjust color saturation of an image. See :class:`~kornia.color.AdjustSaturation` for details. """ # convert the rgb image to hsv x_hsv: torch.Tensor = rgb_to_hsv(input) # perform the conversion x_adjusted: torch.Tensor = adjust_saturation_raw(x_hsv, saturation_factor) # convert back to rgb out: torch.Tensor = hsv_to_rgb(x_adjusted) return out def adjust_hue_raw(input: torch.Tensor, hue_factor: Union[float, torch.Tensor]) -> torch.Tensor: r"""Adjust hue of an image. Expecting input to be in hsv format already. See :class:`~kornia.color.AdjustHue` for details. """ if not torch.is_tensor(input): raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}") if not isinstance(hue_factor, (float, torch.Tensor)): raise TypeError(f"The hue_factor should be a float number or torch.Tensor in the range between" f" [-PI, PI]. Got {type(hue_factor)}") if isinstance(hue_factor, float): hue_factor = torch.tensor([hue_factor]) hue_factor = hue_factor.to(input.device).to(input.dtype) if ((hue_factor < -pi) | (hue_factor > pi)).any(): raise ValueError(f"Hue-factor must be in the range [-PI, PI]. Got {hue_factor}") for _ in input.shape[1:]: hue_factor = torch.unsqueeze(hue_factor, dim=-1) # unpack the hsv values h, s, v = torch.chunk(input, chunks=3, dim=-3) # transform the hue value and appl module divisor: float = 2 * pi.item() h_out: torch.Tensor = torch.fmod(h + hue_factor, divisor) # pack back back the corrected hue out: torch.Tensor = torch.cat([h_out, s, v], dim=-3) return out def adjust_hue(input: torch.Tensor, hue_factor: Union[float, torch.Tensor]) -> torch.Tensor: r"""Adjust hue of an image. See :class:`~kornia.color.AdjustHue` for details. """ # convert the rgb image to hsv x_hsv: torch.Tensor = rgb_to_hsv(input) # perform the conversion x_adjusted: torch.Tensor = adjust_hue_raw(x_hsv, hue_factor) # convert back to rgb out: torch.Tensor = hsv_to_rgb(x_adjusted) return out def adjust_gamma(input: torch.Tensor, gamma: Union[float, torch.Tensor], gain: Union[float, torch.Tensor] = 1.) -> torch.Tensor: r"""Perform gamma correction on an image. See :class:`~kornia.color.AdjustGamma` for details. """ if not torch.is_tensor(input): raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}") if not isinstance(gamma, (float, torch.Tensor)): raise TypeError(f"The gamma should be a positive float or torch.Tensor. Got {type(gamma)}") if not isinstance(gain, (float, torch.Tensor)): raise TypeError(f"The gain should be a positive float or torch.Tensor. Got {type(gain)}") if isinstance(gamma, float): gamma = torch.tensor([gamma]) if isinstance(gain, float): gain = torch.tensor([gain]) gamma = gamma.to(input.device).to(input.dtype) gain = gain.to(input.device).to(input.dtype) if (gamma < 0.0).any(): raise ValueError(f"Gamma must be non-negative. Got {gamma}") if (gain < 0.0).any(): raise ValueError(f"Gain must be non-negative. Got {gain}") for _ in input.shape[1:]: gamma = torch.unsqueeze(gamma, dim=-1) gain = torch.unsqueeze(gain, dim=-1) # Apply the gamma correction x_adjust: torch.Tensor = gain * torch.pow(input, gamma) # Truncate between pixel values out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0) return out def adjust_contrast(input: torch.Tensor, contrast_factor: Union[float, torch.Tensor]) -> torch.Tensor: r"""Adjust Contrast of an image. See :class:`~kornia.color.AdjustContrast` for details. """ if not torch.is_tensor(input): raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}") if not isinstance(contrast_factor, (float, torch.Tensor,)): raise TypeError(f"The factor should be either a float or torch.Tensor. " f"Got {type(contrast_factor)}") if isinstance(contrast_factor, float): contrast_factor = torch.tensor([contrast_factor]) contrast_factor = contrast_factor.to(input.device).to(input.dtype) if (contrast_factor < 0).any(): raise ValueError(f"Contrast factor must be non-negative. Got {contrast_factor}") for _ in input.shape[1:]: contrast_factor = torch.unsqueeze(contrast_factor, dim=-1) # Apply contrast factor to each channel x_adjust: torch.Tensor = input * contrast_factor # Truncate between pixel values out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0) return out def adjust_brightness(input: torch.Tensor, brightness_factor: Union[float, torch.Tensor]) -> torch.Tensor: r"""Adjust Brightness of an image. See :class:`~kornia.color.AdjustBrightness` for details. """ if not torch.is_tensor(input): raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}") if not isinstance(brightness_factor, (float, torch.Tensor,)): raise TypeError(f"The factor should be either a float or torch.Tensor. " f"Got {type(brightness_factor)}") if isinstance(brightness_factor, float): brightness_factor = torch.tensor([brightness_factor]) brightness_factor = brightness_factor.to(input.device).to(input.dtype) for _ in input.shape[1:]: brightness_factor = torch.unsqueeze(brightness_factor, dim=-1) # Apply brightness factor to each channel x_adjust: torch.Tensor = input + brightness_factor # Truncate between pixel values out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0) return out class AdjustSaturation(nn.Module): r"""Adjust color saturation of an image. The input image is expected to be an RGB image in the range of [0, 1]. Args: input (torch.Tensor): Image/Tensor to be adjusted in the shape of (\*, N). saturation_factor (float): How much to adjust the saturation. 0 will give a black and white image, 1 will give the original image while 2 will enhance the saturation by a factor of 2. Returns: torch.Tensor: Adjusted image. """ def __init__(self, saturation_factor: Union[float, torch.Tensor]) -> None: super(AdjustSaturation, self).__init__() self.saturation_factor: Union[float, torch.Tensor] = saturation_factor def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore return adjust_saturation(input, self.saturation_factor) class AdjustHue(nn.Module): r"""Adjust hue of an image. The input image is expected to be an RGB image in the range of [0, 1]. Args: input (torch.Tensor): Image/Tensor to be adjusted in the shape of (\*, N). hue_factor (float): How much to shift the hue channel. Should be in [-PI, PI]. PI and -PI give complete reversal of hue channel in HSV space in positive and negative direction respectively. 0 means no shift. Therefore, both -PI and PI will give an image with complementary colors while 0 gives the original image. Returns: torch.Tensor: Adjusted image. """ def __init__(self, hue_factor: Union[float, torch.Tensor]) -> None: super(AdjustHue, self).__init__() self.hue_factor: Union[float, torch.Tensor] = hue_factor def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore return adjust_hue(input, self.hue_factor) class AdjustGamma(nn.Module): r"""Perform gamma correction on an image. The input image is expected to be in the range of [0, 1]. Args: input (torch.Tensor): Image/Tensor to be adjusted in the shape of (\*, N). gamma (float): Non negative real number, same as γ\gammaγ in the equation. gamma larger than 1 make the shadows darker, while gamma smaller than 1 make dark regions lighter. gain (float, optional): The constant multiplier. Default 1. Returns: torch.Tensor: Adjusted image. """ def __init__(self, gamma: Union[float, torch.Tensor], gain: Union[float, torch.Tensor] = 1.) -> None: super(AdjustGamma, self).__init__() self.gamma: Union[float, torch.Tensor] = gamma self.gain: Union[float, torch.Tensor] = gain def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore return adjust_gamma(input, self.gamma, self.gain) class AdjustContrast(nn.Module): r"""Adjust Contrast of an image. This implementation aligns OpenCV, not PIL. Hence, the output differs from TorchVision. The input image is expected to be in the range of [0, 1]. Args: input (torch.Tensor): Image to be adjusted in the shape of (\*, N). contrast_factor (Union[float, torch.Tensor]): Contrast adjust factor per element in the batch. 0 generates a compleatly black image, 1 does not modify the input image while any other non-negative number modify the brightness by this factor. Returns: torch.Tensor: Adjusted image. """ def __init__(self, contrast_factor: Union[float, torch.Tensor]) -> None: super(AdjustContrast, self).__init__() self.contrast_factor: Union[float, torch.Tensor] = contrast_factor def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore return adjust_contrast(input, self.contrast_factor) class AdjustBrightness(nn.Module): r"""Adjust Brightness of an image. This implementation aligns OpenCV, not PIL. Hence, the output differs from TorchVision. The input image is expected to be in the range of [0, 1]. Args: input (torch.Tensor): Image/Input to be adjusted in the shape of (\*, N). brightness_factor (Union[float, torch.Tensor]): Brightness adjust factor per element in the batch. 0 does not modify the input image while any other number modify the brightness. Returns: torch.Tensor: Adjusted image. """ def __init__(self, brightness_factor: Union[float, torch.Tensor]) -> None: super(AdjustBrightness, self).__init__() self.brightness_factor: Union[float, torch.Tensor] = brightness_factor def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore return adjust_brightness(input, self.brightness_factor)
2.640625
3
pommerman/__init__.py
rmccann01/playground
725
1407
<filename>pommerman/__init__.py '''Entry point into the pommerman module''' import gym import inspect from . import agents from . import configs from . import constants from . import forward_model from . import helpers from . import utility from . import network gym.logger.set_level(40) REGISTRY = None def _register(): global REGISTRY REGISTRY = [] for name, f in inspect.getmembers(configs, inspect.isfunction): if not name.endswith('_env'): continue config = f() gym.envs.registration.register( id=config['env_id'], entry_point=config['env_entry_point'], kwargs=config['env_kwargs'] ) REGISTRY.append(config['env_id']) # Register environments with gym _register() def make(config_id, agent_list, game_state_file=None, render_mode='human'): '''Makes the pommerman env and registers it with gym''' assert config_id in REGISTRY, "Unknown configuration '{}'. " \ "Possible values: {}".format(config_id, REGISTRY) env = gym.make(config_id) for id_, agent in enumerate(agent_list): assert isinstance(agent, agents.BaseAgent) # NOTE: This is IMPORTANT so that the agent character is initialized agent.init_agent(id_, env.spec._kwargs['game_type']) env.set_agents(agent_list) env.set_init_game_state(game_state_file) env.set_render_mode(render_mode) return env from . import cli
2.46875
2
demo/demo/accounts/urls.py
caravancoop/rest-auth-toolkit
1
1408
from django.urls import path from .views import ProfileView urlpatterns = [ path('', ProfileView.as_view(), name='user-profile'), ]
1.585938
2
test/test_pipeline/components/classification/test_passive_aggressive.py
vardaan-raj/auto-sklearn
1
1409
import sklearn.linear_model from autosklearn.pipeline.components.classification.passive_aggressive import \ PassiveAggressive from .test_base import BaseClassificationComponentTest class PassiveAggressiveComponentTest(BaseClassificationComponentTest): __test__ = True res = dict() res["default_iris"] = 0.92 res["iris_n_calls"] = 5 res["default_iris_iterative"] = 0.92 res["iris_iterative_n_iter"] = 32 res["default_iris_proba"] = 0.29271032477461295 res["default_iris_sparse"] = 0.4 res["default_digits"] = 0.9156041287188829 res["digits_n_calls"] = 6 res["default_digits_iterative"] = 0.9156041287188829 res["digits_iterative_n_iter"] = 64 res["default_digits_binary"] = 0.9927140255009107 res["default_digits_multilabel"] = 0.90997912489192 res["default_digits_multilabel_proba"] = 1.0 res['ignore_hps'] = ['max_iter'] sk_mod = sklearn.linear_model.PassiveAggressiveClassifier module = PassiveAggressive step_hyperparameter = { 'name': 'max_iter', 'value': module.get_max_iter(), }
2.40625
2
tensorflow_datasets/structured/dart/dart_test.py
harsh020/datasets
1
1410
<gh_stars>1-10 # coding=utf-8 # Copyright 2020 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Dart dataset tests.""" import json import mock import tensorflow.compat.v2 as tf import tensorflow_datasets.public_api as tfds from tensorflow_datasets.structured.dart import dart class DartTest(tfds.testing.DatasetBuilderTestCase): DATASET_CLASS = dart.Dart SPLITS = { 'train': 2, 'validation': 1, 'test': 2, } def test_split_generators(self): json_str = """ [ { "tripleset": [ [ "Mars Hill College", "JOINED", "1973" ], [ "Mars Hill College", "LOCATION", "Mars Hill, North Carolina" ] ], "subtree_was_extended": true, "annotations": [ { "source": "WikiSQL_decl_sents", "text": "A school from Mars Hill, North Carolina, joined in 1973." } ] } ] """ expected_examples = [{ 'input_text': { 'table': [ { 'column_header': 'subject', 'row_number': 0, 'content': 'Mars Hill College', }, { 'column_header': 'predicate', 'row_number': 0, 'content': 'JOINED', }, { 'column_header': 'object', 'row_number': 0, 'content': '1973', }, { 'column_header': 'subject', 'row_number': 1, 'content': 'Mars Hill College', }, { 'column_header': 'predicate', 'row_number': 1, 'content': 'LOCATION', }, { 'column_header': 'object', 'row_number': 1, 'content': 'Mars Hill, North Carolina', }, ] }, 'target_text': 'A school from Mars Hill, North Carolina, joined in 1973.' }] dart_dataset = dart.Dart() with mock.patch.object( json, 'load', return_value=json.loads(json_str)), mock.patch.object( tf, 'io'): for i, (_, example) in enumerate(dart_dataset._generate_examples('')): self.assertCountEqual(example, expected_examples[i]) if __name__ == '__main__': tfds.testing.test_main()
2.265625
2
exp/exp_informer_dad.py
AdamLohSg/GTA
8
1411
from data.data_loader_dad import ( NASA_Anomaly, WADI ) from exp.exp_basic import Exp_Basic from models.model import Informer from utils.tools import EarlyStopping, adjust_learning_rate from utils.metrics import metric from sklearn.metrics import classification_report import numpy as np import torch import torch.nn as nn from torch import optim from torch.utils.data import DataLoader import os import time import warnings warnings.filterwarnings('ignore') class Exp_Informer_DAD(Exp_Basic): def __init__(self, args): super(Exp_Informer_DAD, self).__init__(args) def _build_model(self): model_dict = { 'informer':Informer, } if self.args.model=='informer': model = model_dict[self.args.model]( self.args.enc_in, self.args.dec_in, self.args.c_out, self.args.seq_len, self.args.label_len, self.args.pred_len, self.args.factor, self.args.d_model, self.args.n_heads, self.args.e_layers, self.args.d_layers, self.args.d_ff, self.args.dropout, self.args.attn, self.args.embed, self.args.data[:-1], self.args.activation, self.device ) return model.double() def _get_data(self, flag): args = self.args data_dict = { 'SMAP':NASA_Anomaly, 'MSL':NASA_Anomaly, 'WADI':WADI, } Data = data_dict[self.args.data] if flag == 'test': shuffle_flag = False; drop_last = True; batch_size = args.batch_size else: shuffle_flag = True; drop_last = True; batch_size = args.batch_size data_set = Data( root_path=args.root_path, data_path=args.data_path, flag=flag, size=[args.seq_len, args.label_len, args.pred_len], features=args.features, target=args.target ) print(flag, len(data_set)) data_loader = DataLoader( data_set, batch_size=batch_size, shuffle=shuffle_flag, num_workers=args.num_workers, drop_last=drop_last) return data_set, data_loader def _select_optimizer(self): model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate) return model_optim def _select_criterion(self): criterion = nn.MSELoss() return criterion def vali(self, vali_data, vali_loader, criterion): self.model.eval() total_loss = [] for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(vali_loader): batch_x = batch_x.double().to(self.device) batch_y = batch_y.double() batch_x_mark = batch_x_mark.double().to(self.device) batch_y_mark = batch_y_mark.double().to(self.device) # decoder input dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double() dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device) # encoder - decoder outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) pred = outputs.detach().cpu() true = batch_y.detach().cpu() loss = criterion(pred, true) total_loss.append(loss) total_loss = np.average(total_loss) self.model.train() return total_loss def train(self, setting): train_data, train_loader = self._get_data(flag = 'train') vali_data, vali_loader = self._get_data(flag = 'val') test_data, test_loader = self._get_data(flag = 'test') path = './checkpoints/'+setting if not os.path.exists(path): os.makedirs(path) time_now = time.time() train_steps = len(train_loader) early_stopping = EarlyStopping(patience=self.args.patience, verbose=True) model_optim = self._select_optimizer() criterion = self._select_criterion() for epoch in range(self.args.train_epochs): iter_count = 0 train_loss = [] self.model.train() for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(train_loader): iter_count += 1 model_optim.zero_grad() batch_x = batch_x.double().to(self.device) batch_y = batch_y.double() batch_x_mark = batch_x_mark.double().to(self.device) batch_y_mark = batch_y_mark.double().to(self.device) # decoder input dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double() dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device) # encoder - decoder outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) loss = criterion(outputs, batch_y) train_loss.append(loss.item()) if (i+1) % 100==0: print("\titers: {0}, epoch: {1} | loss: {2:.7f}".format(i + 1, epoch + 1, loss.item())) speed = (time.time()-time_now)/iter_count left_time = speed*((self.args.train_epochs - epoch)*train_steps - i) print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time)) iter_count = 0 time_now = time.time() loss.backward() model_optim.step() train_loss = np.average(train_loss) vali_loss = self.vali(vali_data, vali_loader, criterion) test_loss = self.vali(test_data, test_loader, criterion) print("Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}".format( epoch + 1, train_steps, train_loss, vali_loss, test_loss)) early_stopping(vali_loss, self.model, path) if early_stopping.early_stop: print("Early stopping") break adjust_learning_rate(model_optim, epoch+1, self.args) best_model_path = path+'/'+'checkpoint.pth' self.model.load_state_dict(torch.load(best_model_path)) return self.model def test(self, setting): test_data, test_loader = self._get_data(flag='test') self.model.eval() preds = [] trues = [] labels = [] with torch.no_grad(): for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(test_loader): batch_x = batch_x.double().to(self.device) batch_y = batch_y.double() batch_x_mark = batch_x_mark.double().to(self.device) batch_y_mark = batch_y_mark.double().to(self.device) # decoder input dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double() dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device) # encoder - decoder outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) pred = outputs.detach().cpu().numpy()#.squeeze() true = batch_y.detach().cpu().numpy()#.squeeze() batch_label = batch_label.long().detach().numpy() preds.append(pred) trues.append(true) labels.append(batch_label) preds = np.array(preds) trues = np.array(trues) labels = np.array(labels) print('test shape:', preds.shape, trues.shape) preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1]) trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1]) labels = labels.reshape(-1, labels.shape[-1]) print('test shape:', preds.shape, trues.shape) # result save folder_path = './results/' + setting +'/' if not os.path.exists(folder_path): os.makedirs(folder_path) mae, mse, rmse, mape, mspe = metric(preds, trues) print('mse:{}, mae:{}'.format(mse, mae)) np.save(folder_path+'metrics.npy', np.array([mae, mse, rmse, mape, mspe])) np.save(folder_path+'pred.npy', preds) np.save(folder_path+'true.npy', trues) np.save(folder_path+'label.npy', labels) return
2.078125
2
tests/components/mysensors/conftest.py
liangleslie/core
30,023
1412
<filename>tests/components/mysensors/conftest.py """Provide common mysensors fixtures.""" from __future__ import annotations from collections.abc import AsyncGenerator, Callable, Generator import json from typing import Any from unittest.mock import AsyncMock, MagicMock, patch from mysensors import BaseSyncGateway from mysensors.persistence import MySensorsJSONDecoder from mysensors.sensor import Sensor import pytest from homeassistant.components.device_tracker.legacy import Device from homeassistant.components.mqtt import DOMAIN as MQTT_DOMAIN from homeassistant.components.mysensors.config_flow import DEFAULT_BAUD_RATE from homeassistant.components.mysensors.const import ( CONF_BAUD_RATE, CONF_DEVICE, CONF_GATEWAY_TYPE, CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION, DOMAIN, ) from homeassistant.core import HomeAssistant from homeassistant.setup import async_setup_component from tests.common import MockConfigEntry, load_fixture @pytest.fixture(autouse=True) def device_tracker_storage(mock_device_tracker_conf: list[Device]) -> list[Device]: """Mock out device tracker known devices storage.""" devices = mock_device_tracker_conf return devices @pytest.fixture(name="mqtt") def mock_mqtt_fixture(hass: HomeAssistant) -> None: """Mock the MQTT integration.""" hass.config.components.add(MQTT_DOMAIN) @pytest.fixture(name="is_serial_port") def is_serial_port_fixture() -> Generator[MagicMock, None, None]: """Patch the serial port check.""" with patch("homeassistant.components.mysensors.gateway.cv.isdevice") as is_device: is_device.side_effect = lambda device: device yield is_device @pytest.fixture(name="gateway_nodes") def gateway_nodes_fixture() -> dict[int, Sensor]: """Return the gateway nodes dict.""" return {} @pytest.fixture(name="serial_transport") async def serial_transport_fixture( gateway_nodes: dict[int, Sensor], is_serial_port: MagicMock, ) -> AsyncGenerator[dict[int, Sensor], None]: """Mock a serial transport.""" with patch( "mysensors.gateway_serial.AsyncTransport", autospec=True ) as transport_class, patch("mysensors.task.OTAFirmware", autospec=True), patch( "mysensors.task.load_fw", autospec=True ), patch( "mysensors.task.Persistence", autospec=True ) as persistence_class: persistence = persistence_class.return_value mock_gateway_features(persistence, transport_class, gateway_nodes) yield transport_class def mock_gateway_features( persistence: MagicMock, transport_class: MagicMock, nodes: dict[int, Sensor] ) -> None: """Mock the gateway features.""" async def mock_schedule_save_sensors() -> None: """Load nodes from via persistence.""" gateway = transport_class.call_args[0][0] gateway.sensors.update(nodes) persistence.schedule_save_sensors = AsyncMock( side_effect=mock_schedule_save_sensors ) # For some reason autospeccing does not recognize these methods. persistence.safe_load_sensors = MagicMock() persistence.save_sensors = MagicMock() async def mock_connect() -> None: """Mock the start method.""" transport.connect_task = MagicMock() gateway = transport_class.call_args[0][0] gateway.on_conn_made(gateway) transport = transport_class.return_value transport.connect_task = None transport.connect.side_effect = mock_connect @pytest.fixture(name="transport") def transport_fixture(serial_transport: MagicMock) -> MagicMock: """Return the default mocked transport.""" return serial_transport @pytest.fixture def transport_write(transport: MagicMock) -> MagicMock: """Return the transport mock that accepts string messages.""" return transport.return_value.send @pytest.fixture(name="serial_entry") async def serial_entry_fixture(hass: HomeAssistant) -> MockConfigEntry: """Create a config entry for a serial gateway.""" entry = MockConfigEntry( domain=DOMAIN, data={ CONF_GATEWAY_TYPE: CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION: "2.3", CONF_DEVICE: "/test/device", CONF_BAUD_RATE: DEFAULT_BAUD_RATE, }, ) return entry @pytest.fixture(name="config_entry") def config_entry_fixture(serial_entry: MockConfigEntry) -> MockConfigEntry: """Provide the config entry used for integration set up.""" return serial_entry @pytest.fixture(name="integration") async def integration_fixture( hass: HomeAssistant, transport: MagicMock, config_entry: MockConfigEntry ) -> AsyncGenerator[MockConfigEntry, None]: """Set up the mysensors integration with a config entry.""" config: dict[str, Any] = {} config_entry.add_to_hass(hass) with patch("homeassistant.components.mysensors.device.UPDATE_DELAY", new=0): await async_setup_component(hass, DOMAIN, config) await hass.async_block_till_done() yield config_entry @pytest.fixture def receive_message( transport: MagicMock, integration: MockConfigEntry ) -> Callable[[str], None]: """Receive a message for the gateway.""" def receive_message_callback(message_string: str) -> None: """Receive a message with the transport. The message_string parameter is a string in the MySensors message format. """ gateway = transport.call_args[0][0] # node_id;child_id;command;ack;type;payload\n gateway.logic(message_string) return receive_message_callback @pytest.fixture(name="gateway") def gateway_fixture( transport: MagicMock, integration: MockConfigEntry ) -> BaseSyncGateway: """Return a setup gateway.""" return transport.call_args[0][0] def load_nodes_state(fixture_path: str) -> dict: """Load mysensors nodes fixture.""" return json.loads(load_fixture(fixture_path), cls=MySensorsJSONDecoder) def update_gateway_nodes( gateway_nodes: dict[int, Sensor], nodes: dict[int, Sensor] ) -> dict: """Update the gateway nodes.""" gateway_nodes.update(nodes) return nodes @pytest.fixture(name="gps_sensor_state", scope="session") def gps_sensor_state_fixture() -> dict: """Load the gps sensor state.""" return load_nodes_state("mysensors/gps_sensor_state.json") @pytest.fixture def gps_sensor(gateway_nodes: dict[int, Sensor], gps_sensor_state: dict) -> Sensor: """Load the gps sensor.""" nodes = update_gateway_nodes(gateway_nodes, gps_sensor_state) node = nodes[1] return node @pytest.fixture(name="power_sensor_state", scope="session") def power_sensor_state_fixture() -> dict: """Load the power sensor state.""" return load_nodes_state("mysensors/power_sensor_state.json") @pytest.fixture def power_sensor(gateway_nodes: dict[int, Sensor], power_sensor_state: dict) -> Sensor: """Load the power sensor.""" nodes = update_gateway_nodes(gateway_nodes, power_sensor_state) node = nodes[1] return node @pytest.fixture(name="energy_sensor_state", scope="session") def energy_sensor_state_fixture() -> dict: """Load the energy sensor state.""" return load_nodes_state("mysensors/energy_sensor_state.json") @pytest.fixture def energy_sensor( gateway_nodes: dict[int, Sensor], energy_sensor_state: dict ) -> Sensor: """Load the energy sensor.""" nodes = update_gateway_nodes(gateway_nodes, energy_sensor_state) node = nodes[1] return node @pytest.fixture(name="sound_sensor_state", scope="session") def sound_sensor_state_fixture() -> dict: """Load the sound sensor state.""" return load_nodes_state("mysensors/sound_sensor_state.json") @pytest.fixture def sound_sensor(gateway_nodes: dict[int, Sensor], sound_sensor_state: dict) -> Sensor: """Load the sound sensor.""" nodes = update_gateway_nodes(gateway_nodes, sound_sensor_state) node = nodes[1] return node @pytest.fixture(name="distance_sensor_state", scope="session") def distance_sensor_state_fixture() -> dict: """Load the distance sensor state.""" return load_nodes_state("mysensors/distance_sensor_state.json") @pytest.fixture def distance_sensor( gateway_nodes: dict[int, Sensor], distance_sensor_state: dict ) -> Sensor: """Load the distance sensor.""" nodes = update_gateway_nodes(gateway_nodes, distance_sensor_state) node = nodes[1] return node @pytest.fixture(name="temperature_sensor_state", scope="session") def temperature_sensor_state_fixture() -> dict: """Load the temperature sensor state.""" return load_nodes_state("mysensors/temperature_sensor_state.json") @pytest.fixture def temperature_sensor( gateway_nodes: dict[int, Sensor], temperature_sensor_state: dict ) -> Sensor: """Load the temperature sensor.""" nodes = update_gateway_nodes(gateway_nodes, temperature_sensor_state) node = nodes[1] return node @pytest.fixture(name="text_node_state", scope="session") def text_node_state_fixture() -> dict: """Load the text node state.""" return load_nodes_state("mysensors/text_node_state.json") @pytest.fixture def text_node(gateway_nodes: dict[int, Sensor], text_node_state: dict) -> Sensor: """Load the text child node.""" nodes = update_gateway_nodes(gateway_nodes, text_node_state) node = nodes[1] return node
2.171875
2
Detect.py
SymenYang/Vanish-Point-Detect
2
1413
import cv2 as cv import numpy as np import copy import math import Edges import INTPoint eps = 1e-7 votes = {} Groups = [] VPoints = [] Centers = [] Cluster = [] voters = {} def getEdges(image): #moved to Edges.py return Edges.getEdges(image) def getLines(edges): #moved to Edges.py return Edges.getLines(edges) def checkRound(pos,edges): #moved to Edges.py return Edges.checkRound(pos,edges) def outOfSize(pos,edges): #moved to Edges.py return Edges.outOfSize(pos,edges) def extenLine(line,edges): #moved to Edges.py return Edges.extenLine(line,edges) def extenLines(lines,edges): #moved to Edges.py return Edges.extenLines(lines,edges) def shouldMerge(line1,line2): #moved to Edges.py return Edges.shouldMerge(line1,line2) def mergeLines(lines): #moved to Edges.py return Edges.mergeLines(lines) def getLineABC(line): #moved to Edges.py return Edges.getLineABC(line) def getCirAnch(a,b): #moved to Edges.py return Edges.getCirAnch(a,b) def getCrossPoint(linea,lineb): #moved to INTPoint.py return INTPoint.getIntersectPoint(linea,lineb) def sortLines(lines): #moved to Edges.py return Edges.sortLines(lines) def getVPoints2(lines,arange = 0.2617): #moved to INTPoint.py global VPoints VPoints = INTPoint.getVPoints2(lines,arange) return VPoints def getVPoints(num = 16): #this function is fallen into disuse because of the low speed for i in range(0,num + 1,1): lens = len(Groups[i]) for j in range(0,lens,1): for k in range(j+1,lens,1): VPoints.append(getCrossPoint(Groups[i][j],Groups[i][k])) def removeSame(list): #moved to INTPoint.py return INTPoint.removeSame(list) def getLinesLength(line): #moved to INTPoint.py return INTPoint.getLinesLength(line) def getMidPoint(line): #moved to INTPoint.py return INTPoint.getMidPoint(line) def getArch(line,point): #moved to INTPoint.py return INTPoint.getArch(line,point) def voteForPoint(lines): #moved to INTPoint.py global votes global voters votes,voters = INTPoint.voteForPoint(lines,VPoints) return def getGraPoint(points): count = 1.0 sumx = 0.0 sumy = 0.0 for point in points: w = votes[point] count += w sumx += w * point[0] sumy += w * point[1] return (sumx/count,sumy/count) def devideIntoPoints(Points): global Cluster lens = len(Cluster) for i in range(0,lens,1): Cluster[i] = [] for point in Points: if point[0] == 'p' or point[0] == 'h' or point[0] == 'v': continue if votes[point] == 0: continue minlens = 1e15 minpos = 0 now = -1 for cen in Centers: now += 1 lens = getLinesLength((point[0],point[1],cen[0],cen[1])) if lens < minlens: minlens = lens minpos = now Cluster[minpos].append(point) def KMean(points,K = 3,step = 50): global Cluster global Centers Cluster = [] Centers = [] if K == 1: step = 1 for i in range(0,K,1): Cluster.append([]) Centers.append([0,0]) count = 0 for point in points: if point[0] != 'p' and point[0] != 'v' and point[0] != 'h' and votes[point] != 0: Centers[count][0] = point[0] Centers[count][1] = point[1] count += 1 if count == K: break for i in range(0,step,1): devideIntoPoints(points) for i in range(0,K,1): Centers[i] = getGraPoint(Cluster[i]) def getFinal(points): count = 0.0 num = 0 p1 = 0.0 ret1 = [] p2 = 0.0 ret2 = [] for item in votes: if item[0] == 'p' or item[0] == 'h' or item[0] == 'v': if votes[item] > p1: p2 = p1 ret2 = ret1 p1 = votes[item] ret1 = item else: if votes[item] > p2: p2 = votes[item] ret2 = item else: count += votes[item] num += 1 K = 3 ret = [] count = count / num * 0.1 if p1 > count: K -= 1 ret.append(ret1) if p2 > count: K -= 1 ret.append(ret2) KMean(points,K) for i in range(0,K,1): ret.append(Centers[i]) return ret def deal(inputname,outputname): global votes global Groups global VPoints global Centers global Cluster global voters votes = {} Groups = [] VPoints = [] Centers = [] Cluster = [] voters = {} image = cv.imread(inputname) edges = getEdges(image) cv.imwrite(outputname + 'edges.jpg',edges) lines = getLines(edges) lines2 = copy.deepcopy(lines) lines2 = extenLines(lines2,edges) lines2 = mergeLines(lines2) #devideIntoGroups(lines2,3) lines2 = sortLines(lines2) getVPoints2(lines2) VPoints = removeSame(VPoints) voteForPoint(lines2) votes2 = sorted(votes.iteritems(),key=lambda votes:votes[1],reverse=True) lenofvotes = min(len(votes2),max(5,int(len(votes2) * 0.2))) votesFinal = {} VPoints = [] for i in range(0,lenofvotes,1): votesFinal[votes2[i][0]] = votes2[i][1] VPoints.append(votes2[i][0]) for i in range(lenofvotes,len(votes2),1): if votes2[i][0][0] == 'h' or votes2[i][0][0] == 'v' or votes2[i][0][0] == 'p': votesFinal[votes2[i][0]] = votes2[i][1] VPoints.append(votes2[i][0]) votes = votesFinal ans = getFinal(VPoints) print ans edges = cv.cvtColor(edges,cv.COLOR_GRAY2BGR) edges2 = copy.deepcopy(edges) for item in lines: if item[0] == 'N': continue cv.line(edges,(item[0],item[1]),(item[2],item[3]),(0,0,255),2) for item in lines2: cv.line(edges2,(item[0],item[1]),(item[2],item[3]),(0,0,255),2) color = [255,0,0,0] for clu in Cluster: for i in range(0,4,1): if color[i] == 255: color[i+1] = 255 color[i] = 0 break for point in clu: if point[0] > 0 and point[1] > 0: if point[0] < edges.shape[1] and point[1] < edges.shape[0]: if votes[point] == 0: continue cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(color[1],color[2],color[3]),10) for point in ans: if point[0] > 0 and point[1] > 0: if point[0] < edges.shape[1] and point[1] < edges.shape[0]: cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(255,255,255),10) cv.imwrite(outputname + 'linedetect.jpg',edges) cv.imwrite(outputname + 'answer.jpg',edges2) fd = open(outputname + 'answer.txt','w') fd.write('(' + str(ans[0][0]) + ',' + str(ans[0][1]) + ')(' + str(ans[1][0]) + ',' + str(ans[1][1]) + ')(' + str(ans[2][0]) + ',' + str(ans[2][1]) + ')') fd.close deal("data/1.jpg",'1')
2.453125
2
test/test_files.py
wanasit/labelling-notebook
0
1414
def test_list_example_directory(client): response = client.get("/api/files") assert response.status_code == 200 file_list = response.get_json() assert len(file_list) == 5 assert file_list[0]['key'] == 'image_annotated.jpg' assert file_list[1]['key'] == 'image.jpg' assert file_list[2]['key'] == 'more_images/' assert file_list[3]['key'] == 'more_images/01.jpg' assert file_list[4]['key'] == 'more_images/02.png' def test_list_example_directory_nested(client): response = client.get("/api/files?path=more_images") assert response.status_code == 200 file_list = response.get_json() assert len(file_list) == 2 assert file_list[0]['key'] == '01.jpg' assert file_list[1]['key'] == '02.png' def test_get_example_image(client): response = client.get("/api/files/image/x.jpg") assert response.status_code == 404 response = client.get("/api/files/image/image.jpg") assert response.status_code == 200 response = client.get("/api/files/image/more_images/01.jpg") assert response.status_code == 200 def test_get_example_image_data(client): response = client.get("/api/files/image_data/image.jpg") assert response.status_code == 404 response = client.get("/api/files/image_data/image_annotated.jpg") assert response.status_code == 200 data = response.get_json() assert 'annotations' in data assert 'tags' in data def test_put_example_image_data(client): response = client.get("/api/files/image_data/image.jpg") assert response.status_code == 404 response = client.put("/api/files/image_data/image.jpg", json={ 'annotations': [{'width': 10, 'height': 10, 'x': 0, 'y': 0}], 'tags': ['a', 'b'] }) assert response.status_code == 200 response = client.get("/api/files/image_data/image.jpg") assert response.status_code == 200 data = response.get_json() assert 'annotations' in data assert 'tags' in data
2.515625
3
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/course_groups/migrations/0001_initial.py
osoco/better-ways-of-thinking-about-software
3
1415
from django.db import migrations, models from django.conf import settings from opaque_keys.edx.django.models import CourseKeyField class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='CohortMembership', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('course_id', CourseKeyField(max_length=255)), ], ), migrations.CreateModel( name='CourseCohort', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('assignment_type', models.CharField(default='manual', max_length=20, choices=[('random', 'Random'), ('manual', 'Manual')])), ], ), migrations.CreateModel( name='CourseCohortsSettings', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('is_cohorted', models.BooleanField(default=False)), ('course_id', CourseKeyField(help_text='Which course are these settings associated with?', unique=True, max_length=255, db_index=True)), ('_cohorted_discussions', models.TextField(null=True, db_column='cohorted_discussions', blank=True)), ('always_cohort_inline_discussions', models.BooleanField(default=True)), ], ), migrations.CreateModel( name='CourseUserGroup', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(help_text='What is the name of this group? Must be unique within a course.', max_length=255)), ('course_id', CourseKeyField(help_text='Which course is this group associated with?', max_length=255, db_index=True)), ('group_type', models.CharField(max_length=20, choices=[('cohort', 'Cohort')])), ('users', models.ManyToManyField(help_text='Who is in this group?', related_name='course_groups', to=settings.AUTH_USER_MODEL, db_index=True)), ], ), migrations.CreateModel( name='CourseUserGroupPartitionGroup', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('partition_id', models.IntegerField(help_text='contains the id of a cohorted partition in this course')), ('group_id', models.IntegerField(help_text='contains the id of a specific group within the cohorted partition')), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('course_user_group', models.OneToOneField(to='course_groups.CourseUserGroup', on_delete=models.CASCADE)), ], ), migrations.AddField( model_name='coursecohort', name='course_user_group', field=models.OneToOneField(related_name='cohort', to='course_groups.CourseUserGroup', on_delete=models.CASCADE), ), migrations.AddField( model_name='cohortmembership', name='course_user_group', field=models.ForeignKey(to='course_groups.CourseUserGroup', on_delete=models.CASCADE), ), migrations.AddField( model_name='cohortmembership', name='user', field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE), ), migrations.AlterUniqueTogether( name='courseusergroup', unique_together={('name', 'course_id')}, ), migrations.AlterUniqueTogether( name='cohortmembership', unique_together={('user', 'course_id')}, ), ]
1.953125
2
kafka-rockset-integration/generate_customers_data.py
farkaskid/recipes
21
1416
<filename>kafka-rockset-integration/generate_customers_data.py """Generate Customer Data""" import csv import random from config import MIN_CUSTOMER_ID, MAX_CUSTOMER_ID ACQUISITION_SOURCES = [ 'OrganicSearch', 'PaidSearch', 'Email', 'SocialMedia', 'Display', 'Affiliate' 'Referral' ] def main(): with open('customers.csv', 'w') as fout: writer = csv.DictWriter(fout, fieldnames=['CustomerID', 'AcquisitionSource']) writer.writeheader() for customer_id in range(MIN_CUSTOMER_ID, MAX_CUSTOMER_ID + 1): record = { 'CustomerID': int(customer_id), 'AcquisitionSource': random.choices(ACQUISITION_SOURCES).pop() } writer.writerow(record) if __name__ == '__main__': main()
3.296875
3
parsl/tests/test_error_handling/test_resource_spec.py
MatthewBM/parsl
0
1417
<gh_stars>0 import parsl from parsl.app.app import python_app from parsl.tests.configs.local_threads import config from parsl.executors.errors import UnsupportedFeatureError from parsl.executors import WorkQueueExecutor @python_app def double(x, parsl_resource_specification={}): return x * 2 def test_resource(n=2): spec = {'cores': 2, 'memory': '1GiB'} fut = double(n, parsl_resource_specification=spec) try: fut.result() except Exception as e: assert isinstance(e, UnsupportedFeatureError) else: executors = parsl.dfk().executors executor = None for label in executors: if label != 'data_manager': executor = executors[label] break assert isinstance(executor, WorkQueueExecutor) if __name__ == '__main__': local_config = config parsl.load(local_config) x = test_resource(2)
2.359375
2
cincan/file_tool.py
cincanproject/cincan-command
1
1418
<filename>cincan/file_tool.py import pathlib import re from typing import List, Optional, Dict, Set, Tuple, Iterable import shlex class FileMatcher: """Match files based on a pattern""" def __init__(self, match_string: str, include: bool): self.match_string = match_string self.exact = '*' not in match_string self.absolute_path = match_string.startswith('/') self.include = include @classmethod def parse(cls, match_strings: List[str]) -> List['FileMatcher']: """Parse pattens from a list""" res = [] for m in match_strings: if m.startswith('^'): res.append(FileMatcher(m[1:], include=False)) else: res.append(FileMatcher(m, include=True)) return res def filter_upload_files(self, files: List[pathlib.Path]) -> List[pathlib.Path]: """Filter uploaded files by this pattern""" return list(filter(lambda f: self.__match(f.as_posix()) == self.include, files)) def filter_download_files(self, files: List[str], work_dir: str) -> List[str]: """Filter downloaded files by this pattern""" if self.absolute_path: # matching absolute files res = [] for file in files: if self.__match(file) == self.include: res.append(file) return res else: # matching files relative to working directory res = [] for file in files: try: rel_file = pathlib.Path(file).relative_to(work_dir).as_posix() except ValueError: if not self.include: res.append(file) continue if self.__match(rel_file) == self.include: res.append(file) return res def __match(self, value: str) -> bool: """Match value with this pattern""" if self.exact: return self.match_string == value split = self.match_string.split("*") i = 0 off = 0 len_v = len(value) s = split[0] len_s = len(s) if len_s > 0: if len_v < i + len_s or value[i:i + len_s] != s: return False off += len_s i += 1 while i < len(split): s = split[i] len_s = len(s) if len_s > 0: off = value.find(s, off) if off < 0: return False i += 1 off += len_s if split[-1] != '' and off != len_v: return False return True class FileResolver: """Resolve files from command line arguments""" def __init__(self, args: List[str], directory: pathlib.Path, output_dirs: List[str] = None, do_resolve: bool = True, input_filters: List[FileMatcher] = None): self.original_args = args self.directory = directory self.host_files: List[pathlib.Path] = [] self.command_args = args.copy() # Additional punctuation chars, whereas we might split command (On top of shlex basic) self.additional_punc_chars = "=," # these are output directories, upload them without contents for dir in output_dirs or []: self.host_files.append(pathlib.Path(dir)) self.output_dirs = set([pathlib.Path(d) for d in (output_dirs or [])]) if do_resolve: # autodetect input files self.__analyze() # exclude files by filters, perhaps? for filth in input_filters or []: self.host_files = filth.filter_upload_files(self.host_files) def __file_exists(self, path: str, already_listed: Set[pathlib.Path], parent_check: bool = True) -> Optional[str]: """ Method for evaluating the possible existence of input files and potential output directories. If there is local match for file/directory, it is marked as uploadable file into container, and path is changed to be relative of working directory of container, when command is passed into container. Special case: when possible argument is coming from first layer (not quoted) of arguments, is valid path and has no whitespace in arguments, we are processing this part later, because we can support special markups such as % and & in here. """ o_file = pathlib.Path(path) # does file/dir exists? No attempt to copy '/', leave it as it is... file_exists = o_file.exists() and not all([c == '/' for c in path]) # When filename contains potentially spaces, were are only interested about absolute path # Not checking parents if not file_exists and not parent_check and not " " in path: return None if not file_exists and not o_file.is_absolute() and '..' not in o_file.as_posix(): # the file does not exist, but it is relative path to a file/directory... o_parent = o_file.parent while not file_exists and o_parent and o_parent.as_posix() != '.': if o_parent.is_dir() and o_parent not in self.host_files: file_exists = True # ...and there is existing parent directory, perhaps for output o_parent = o_parent.parent if file_exists: h_file, a_name = self.__archive_name_for(o_file) if h_file not in already_listed: self.host_files.append(h_file) already_listed.add(h_file) # '/' in the end gets eaten away... fix for p in range(len(path) - 1, 0, -1): if path[p] != '/': break a_name += '/' if file_exists and o_file.is_dir() and o_file not in self.output_dirs: # include files in sub directories self.__include_sub_dirs(o_file.iterdir(), already_listed) if file_exists: return a_name else: return None def __analyze(self): """Analyze the command line""" self.command_args = [] already_listed: Set[pathlib.Path] = self.output_dirs.copy() for o_arg in self.original_args: a_name = self.__file_exists(o_arg, already_listed, parent_check=False) # Potential path as argument, not dividing it pieces yet for further analysis if a_name: self.command_args.append(a_name) continue # NOTE: Shlex not Windows compatible! lex = shlex.shlex(o_arg, posix=True, punctuation_chars=self.additional_punc_chars) split = list(lex) modified_paths = [] for part in split: a_name = self.__file_exists(part, already_listed) if a_name: modified_paths.append((part, a_name)) for m_part, m_name in modified_paths: o_arg = o_arg.replace(m_part, m_name) self.command_args.append(o_arg) def __include_sub_dirs(self, files: Iterable[pathlib.Path], file_set: Set[pathlib.Path]): """Include files from sub directories""" for f in files: if f not in file_set: self.host_files.append(f) file_set.add(f) if f.is_dir(): self.__include_sub_dirs(f.iterdir(), file_set) def resolve_upload_files(self, upload_files: Dict[pathlib.Path, str]): """Resolve the files to upload""" for up_file in self.detect_upload_files(): host_file, arc_name = self.__archive_name_for(up_file) upload_files[host_file] = arc_name cmd_args = self.command_args return cmd_args def detect_upload_files(self, files: Optional[Iterable[pathlib.Path]] = None) -> List[pathlib.Path]: """Detect files to upload""" it_files = sorted(self.host_files) if files is None else files res = [] # filter out files which do not exist nor should exists for file in it_files: if file.exists() or file in self.output_dirs: res.append(file) if files is None: # make sure also paths leading to output files are uploaded all_dirs = set() for file in res: all_dirs.add(file) for p in file.parents: all_dirs.add(p) for file in filter(lambda f: not f.exists(), it_files): # file not exists, but marked for upload - must mean some sub directory for output p = file.parent while not p.exists(): p = p.parent if p not in all_dirs: res.append(p) return res @classmethod def __archive_name_for(cls, file: pathlib.Path) -> Tuple[pathlib.Path, str]: """Resolve host file and archive name for uploaded file""" if cls.__use_absolute_path(file): h_file = file.resolve() a_file = file.resolve().as_posix() a_file = a_file[1:] if a_file.startswith('/') else a_file else: h_file = file a_file = file.as_posix() return h_file, a_file @classmethod def __use_absolute_path(cls, file: pathlib.Path) -> bool: """Should use absolute path to refer a file path?""" # - use absolute paths, if /../ used (ok, quite weak) return file.is_absolute() or (".." in file.as_posix())
3.171875
3
aws_interface/cloud/auth/set_me.py
hubaimaster/aws-interface
53
1419
from cloud.permission import Permission, NeedPermission from cloud.message import error # Define the input output format of the function. # This information is used when creating the *SDK*. info = { 'input_format': { 'session_id': 'str', 'field': 'str', 'value?': 'str', }, 'output_format': { 'user_id?': 'str', }, 'description': 'Set my information' } @NeedPermission(Permission.Run.Auth.set_me) def do(data, resource): body = {} params = data['params'] user = data['user'] user_id = user['id'] field = params.get('field') value = params.get('value', None) user = resource.db_get_item(user_id) # For security if field in ['id', 'email', 'password_hash', 'salt', 'groups', 'login_method']: body['error'] = error.FORBIDDEN_MODIFICATION return body else: user[field] = value resource.db_update_item(user_id, user) body['user_id'] = user_id return body
2.59375
3
doc/gallery-src/analysis/run_blockMcnpMaterialCard.py
celikten/armi
1
1420
<filename>doc/gallery-src/analysis/run_blockMcnpMaterialCard.py """ Write MCNP Material Cards ========================= Here we load a test reactor and write each component of one fuel block out as MCNP material cards. Normally, code-specific utility code would belong in a code-specific ARMI plugin. But in this case, the need for MCNP materials cards is so pervasive that it made it into the framework. """ from armi.reactor.tests import test_reactors from armi.reactor.flags import Flags from armi.utils.densityTools import formatMaterialCard from armi.nucDirectory import nuclideBases as nb from armi import configure configure(permissive=True) _o, r = test_reactors.loadTestReactor() bFuel = r.core.getBlocks(Flags.FUEL)[0] for ci, component in enumerate(bFuel, start=1): ndens = component.getNumberDensities() # convert nucName (str) keys to nuclideBase keys ndensByBase = {nb.byName[nucName]: dens for nucName, dens in ndens.items()} print("".join(formatMaterialCard(ndensByBase, matNum=ci)))
2.46875
2
life_line_chart/_autogenerate_data.py
mustaqimM/life_line_chart
0
1421
import names import os import datetime from random import random def generate_gedcom_file(): """generate some gedcom file""" db = {} db['n_individuals'] = 0 db['max_individuals'] = 8000 db['n_families'] = 0 db['yougest'] = None gedcom_content = """ 0 HEAD 1 SOUR Gramps 2 VERS 3.3.0 2 NAME Gramps 1 DATE {} 2 TIME 15:35:24 1 SUBM @SUBM@ 1 COPR Copyright (c) 2020 <NAME>,,,. 1 GEDC 2 VERS 5.5 1 CHAR UTF-8 1 LANG German """.format(datetime.date.today()) def generate_individual(db, birth_year, sex=None, last_name=None): if not sex: sex = 'F' if random() < 0.5 else 'M' first_name = names.get_first_name( gender='male' if sex == 'M' else 'female') if random() < 0.3: first_name += ' ' + \ names.get_first_name(gender='male' if sex == 'M' else 'female') if not last_name: last_name = names.get_last_name() birth_place = 'Paris' if random() < 0.5 else 'Rome' death_place = 'Zorge' if random() < 0.5 else 'Bruegge' db['n_individuals'] += 1 individual_id = '@I{}@'.format(db["n_individuals"]) death_year = birth_year + 40 + int(random()*20) db[individual_id] = { 'birth': birth_year, 'death': death_year, 'sex': sex, 'last_name': last_name } birth_date = '1 JUN {}'.format(birth_year) death_date = '1 JUN {}'.format(birth_year) if not db['yougest']: db['yougest'] = individual_id elif db[db['yougest']]['birth'] < birth_year: db['yougest'] = individual_id db[individual_id]['string'] = """0 {individual_id} INDI 1 NAME {first_name} /{last_name}/ 1 SEX {sex} 1 BIRT 2 DATE {birth_date} 2 PLAC {birth_place} 1 DEAT 2 DATE {death_date} 2 PLAC {death_place} """.format(**locals()) return individual_id def generate_family(db, husband_id, wife_id, children_ids, marriage_year, marriage_place=None): if not marriage_place: marriage_place = 'London' if random() < 0.5 else 'Tokio' db['n_families'] += 1 marriage_date = '1 MAY {}'.format(marriage_year) family_id = "@F{}@".format(db['n_families']) db[family_id] = {'string': """0 {family_id} FAM 1 HUSB {husband_id} 1 WIFE {wife_id} 1 MARR 2 DATE {marriage_date} 2 PLAC {marriage_place} """.format( **locals() )} for child_id in children_ids: db[family_id]['string'] += "1 CHIL {}\n".format(child_id) return family_id def find_by_birth_date(db, from_year, to_year, sex, exclude=[]): ids = [] for individual_id, data in db.items(): if not individual_id.startswith('@I'): continue if 'famc' in data: if data['birth'] > from_year and data['birth'] < to_year: if sex == data['sex']: if individual_id not in exclude: ids.append(individual_id) if ids: return ids[int(random()*len(ids))] return None def generate_recursive_family(db, start_year=1000, generations=2, husband_id=None, wife_id=None, siblings=[], max_children=5): if not husband_id: if random() < 0.2: exclude = siblings.copy() if wife_id: exclude += [wife_id] husband_id = find_by_birth_date( db, start_year, start_year + 10, sex='M', exclude=exclude) if not husband_id: husband_id = generate_individual( db, start_year + int(random()*5), sex='M') else: print('reused {}'.format(husband_id)) if not wife_id: if random() < 10.9: exclude = siblings.copy() + [husband_id] wife_id = find_by_birth_date( db, start_year, start_year + 10, sex='F', exclude=exclude) if not wife_id: wife_id = generate_individual( db, start_year + int(random()*5), sex='F') else: print('reused {}'.format(wife_id)) n_children = int((1+random()*(max_children-1)) * (1 - db['n_individuals'] / db['max_individuals'])) marriage_year = start_year + 20 + int(random()*5) children_ids = [] for i in range(n_children): children_ids.append(generate_individual( db, birth_year=marriage_year + 1 + int(random()*10), last_name=db[husband_id]['last_name'])) family_id = generate_family( db, husband_id, wife_id, children_ids, marriage_year) for i in range(n_children): db[children_ids[i]]['string'] += "1 FAMC "+family_id + '\n' db[children_ids[i]]['famc'] = family_id if generations > 0: generate_recursive_family( db, db[children_ids[i]]['birth'], generations - 1, children_ids[i] if db[children_ids[i] ]['sex'] == 'M' else None, children_ids[i] if db[children_ids[i] ]['sex'] == 'F' else None, children_ids) db[husband_id]['string'] += "1 FAMS "+family_id + '\n' db[wife_id]['string'] += "1 FAMS "+family_id + '\n' generate_recursive_family(db, generations=8, max_children=4) for k, v in db.items(): if k.startswith('@I'): gedcom_content += v['string'] for k, v in db.items(): if k.startswith('@F'): gedcom_content += v['string'] gedcom_content += '0 TRLR\n' open(os.path.join(os.path.dirname(__file__), '..', 'tests', 'autogenerated.ged'), 'w').write(gedcom_content) # generate_gedcom_file() def generate_individual_images(): from PIL import Image, ImageDraw, ImageFont def generate_one_image(filename, text, font_size=22, pos=(15, 40), size=(100, 100), color=(160, 160, 160)): img = Image.new('RGB', size, color=color) d = ImageDraw.Draw(img) font = ImageFont.truetype(r'arial.ttf', font_size) d.text(pos, text, fill=(0, 0, 0), font=font) img.save(filename) for i in range(20): generate_one_image( 'tests/images/individual_I6_image_age_{}.png'.format( 1+i*4 ), 'Age {}'.format( 1+i*4, )) generate_individual_images()
3.375
3
arcade/examples/sprite_bullets_enemy_aims.py
LiorAvrahami/arcade
1
1422
<filename>arcade/examples/sprite_bullets_enemy_aims.py """ Show how to have enemies shoot bullets aimed at the player. If Python and Arcade are installed, this example can be run from the command line with: python -m arcade.examples.sprite_bullets_enemy_aims """ import arcade import math import os SCREEN_WIDTH = 800 SCREEN_HEIGHT = 600 SCREEN_TITLE = "Sprites and Bullets Enemy Aims Example" BULLET_SPEED = 4 class MyGame(arcade.Window): """ Main application class """ def __init__(self, width, height, title): super().__init__(width, height, title) # Set the working directory (where we expect to find files) to the same # directory this .py file is in. You can leave this out of your own # code, but it is needed to easily run the examples using "python -m" # as mentioned at the top of this program. file_path = os.path.dirname(os.path.abspath(__file__)) os.chdir(file_path) arcade.set_background_color(arcade.color.BLACK) self.frame_count = 0 self.enemy_list = None self.bullet_list = None self.player_list = None self.player = None def setup(self): self.enemy_list = arcade.SpriteList() self.bullet_list = arcade.SpriteList() self.player_list = arcade.SpriteList() # Add player ship self.player = arcade.Sprite(":resources:images/space_shooter/playerShip1_orange.png", 0.5) self.player_list.append(self.player) # Add top-left enemy ship enemy = arcade.Sprite(":resources:images/space_shooter/playerShip1_green.png", 0.5) enemy.center_x = 120 enemy.center_y = SCREEN_HEIGHT - enemy.height enemy.angle = 180 self.enemy_list.append(enemy) # Add top-right enemy ship enemy = arcade.Sprite(":resources:images/space_shooter/playerShip1_green.png", 0.5) enemy.center_x = SCREEN_WIDTH - 120 enemy.center_y = SCREEN_HEIGHT - enemy.height enemy.angle = 180 self.enemy_list.append(enemy) def on_draw(self): """Render the screen. """ arcade.start_render() self.enemy_list.draw() self.bullet_list.draw() self.player_list.draw() def on_update(self, delta_time): """All the logic to move, and the game logic goes here. """ self.frame_count += 1 # Loop through each enemy that we have for enemy in self.enemy_list: # First, calculate the angle to the player. We could do this # only when the bullet fires, but in this case we will rotate # the enemy to face the player each frame, so we'll do this # each frame. # Position the start at the enemy's current location start_x = enemy.center_x start_y = enemy.center_y # Get the destination location for the bullet dest_x = self.player.center_x dest_y = self.player.center_y # Do math to calculate how to get the bullet to the destination. # Calculation the angle in radians between the start points # and end points. This is the angle the bullet will travel. x_diff = dest_x - start_x y_diff = dest_y - start_y angle = math.atan2(y_diff, x_diff) # Set the enemy to face the player. enemy.angle = math.degrees(angle)-90 # Shoot every 60 frames change of shooting each frame if self.frame_count % 60 == 0: bullet = arcade.Sprite(":resources:images/space_shooter/laserBlue01.png") bullet.center_x = start_x bullet.center_y = start_y # Angle the bullet sprite bullet.angle = math.degrees(angle) # Taking into account the angle, calculate our change_x # and change_y. Velocity is how fast the bullet travels. bullet.change_x = math.cos(angle) * BULLET_SPEED bullet.change_y = math.sin(angle) * BULLET_SPEED self.bullet_list.append(bullet) # Get rid of the bullet when it flies off-screen for bullet in self.bullet_list: if bullet.top < 0: bullet.remove_from_sprite_lists() self.bullet_list.update() def on_mouse_motion(self, x, y, delta_x, delta_y): """Called whenever the mouse moves. """ self.player.center_x = x self.player.center_y = y def main(): """ Main method """ window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE) window.setup() arcade.run() if __name__ == "__main__": main()
3.828125
4
app1.py
FreakX23/EBook_Training
0
1423
<reponame>FreakX23/EBook_Training<filename>app1.py # This Part will gather Infos and demonstrate the use of Variables. usrName = input("What is your Name?") usrAge = int(input("What is your Age?")) usrGPA = float(input("What is your GPA?")) print () #cheap way to get a new line print ("Hello, %s" % (usrName)) print ("Did you know that in two years you will be %d years old? " % (usrAge +2)) print ("Also you need to improve your GPA by %f points to have a perfect score." % (4.0 - usrGPA)) print ()
3.84375
4
borze.py
AmitHasanShuvo/Programming
8
1424
<reponame>AmitHasanShuvo/Programming a = input() a = a.replace('--', '2') a = a.replace('-.', '1') a = a.replace('.', '0') print(a)  
3.734375
4
distalg/message.py
charlesemurray/DistributedProgramming
0
1425
<reponame>charlesemurray/DistributedProgramming class Message: def __init__(self, from_channel=None, **kwargs): self._channel = from_channel if kwargs is not None: for key, value in kwargs.items(): setattr(self, key, value) @property def carrier(self): return self._channel def sender(self): return self._channel.sender def receiver(self): return self._channel.receiver class CallbackMessage(Message): def __init__(self, function): super(CallbackMessage, self).__init__(function=function) if __name__ == "__main__": msg = Message(sender="A", receiver="B") assert msg.sender is "A" assert msg.receiver is "B"
2.96875
3
myenv/lib/python3.5/site-packages/tests/handlers/logging/logging_tests.py
rupeshparab/techscan
1
1426
<reponame>rupeshparab/techscan import logging from opbeat.handlers.logging import OpbeatHandler from opbeat.utils.stacks import iter_stack_frames from tests.helpers import get_tempstoreclient from tests.utils.compat import TestCase class LoggingIntegrationTest(TestCase): def setUp(self): self.client = get_tempstoreclient(include_paths=['tests', 'opbeat']) self.handler = OpbeatHandler(self.client) self.logger = logging.getLogger(__name__) self.logger.handlers = [] self.logger.addHandler(self.handler) def test_logger_basic(self): self.logger.error('This is a test error') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['logger'], __name__) self.assertEquals(event['level'], "error") self.assertEquals(event['message'], 'This is a test error') self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test error') self.assertEquals(msg['params'], ()) def test_logger_warning(self): self.logger.warning('This is a test warning') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['logger'], __name__) self.assertEquals(event['level'], "warning") self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test warning') self.assertEquals(msg['params'], ()) def test_logger_extra_data(self): self.logger.info('This is a test info with a url', extra=dict( data=dict( url='http://example.com', ), )) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['extra']['url'], 'http://example.com') self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test info with a url') self.assertEquals(msg['params'], ()) def test_logger_exc_info(self): try: raise ValueError('This is a test ValueError') except ValueError: self.logger.info('This is a test info with an exception', exc_info=True) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) # self.assertEquals(event['message'], 'This is a test info with an exception') self.assertTrue('stacktrace' in event) self.assertTrue('exception' in event) exc = event['exception'] self.assertEquals(exc['type'], 'ValueError') self.assertEquals(exc['value'], 'This is a test ValueError') self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test info with an exception') self.assertEquals(msg['params'], ()) def test_message_params(self): self.logger.info('This is a test of %s', 'args') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) # self.assertEquals(event['message'], 'This is a test of args') # print event.keys() self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test of %s') self.assertEquals(msg['params'], ('args',)) def test_record_stack(self): self.logger.info('This is a test of stacks', extra={'stack': True}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertTrue('stacktrace' in event) frames = event['stacktrace']['frames'] self.assertNotEquals(len(frames), 1) frame = frames[0] self.assertEquals(frame['module'], __name__) self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test of stacks') self.assertEquals(msg['params'], ()) self.assertEquals(event['culprit'], 'tests.handlers.logging.logging_tests.test_record_stack') self.assertEquals(event['message'], 'This is a test of stacks') def test_no_record_stack(self): self.logger.info('This is a test of no stacks', extra={'stack': False}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event.get('culprit'), None) self.assertEquals(event['message'], 'This is a test of no stacks') self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test of no stacks') self.assertEquals(msg['params'], ()) def test_explicit_stack(self): self.logger.info('This is a test of stacks', extra={'stack': iter_stack_frames()}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertTrue('culprit' in event, event) self.assertEquals(event['culprit'], 'tests.handlers.logging.logging_tests.test_explicit_stack') self.assertTrue('message' in event, event) self.assertEquals(event['message'], 'This is a test of stacks') self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test of stacks') self.assertEquals(msg['params'], ()) self.assertTrue('stacktrace' in event) def test_extra_culprit(self): self.logger.info('This is a test of stacks', extra={'culprit': 'foo.bar'}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['culprit'], 'foo.bar') def test_logger_exception(self): try: raise ValueError('This is a test ValueError') except ValueError: self.logger.exception('This is a test with an exception') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['message'], 'This is a test with an exception') self.assertTrue('stacktrace' in event) self.assertTrue('exception' in event) exc = event['exception'] self.assertEquals(exc['type'], 'ValueError') self.assertEquals(exc['value'], 'This is a test ValueError') self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test with an exception') self.assertEquals(msg['params'], ()) class LoggingHandlerTest(TestCase): def test_client_arg(self): client = get_tempstoreclient(include_paths=['tests']) handler = OpbeatHandler(client) self.assertEquals(handler.client, client) def test_client_kwarg(self): client = get_tempstoreclient(include_paths=['tests']) handler = OpbeatHandler(client=client) self.assertEquals(handler.client, client) def test_invalid_first_arg_type(self): self.assertRaises(ValueError, OpbeatHandler, object)
2.421875
2
amnesia/modules/mime/model.py
silenius/amnesia
4
1427
# -*- coding: utf-8 -*- # pylint: disable=E1101 from sqlalchemy import sql from sqlalchemy import orm from sqlalchemy.orm.exc import NoResultFound from .. import Base # http://www.iana.org/assignments/media-types/media-types.xhtml class MimeMajor(Base): """Mime major""" def __init__(self, name): super().__init__() self.name = name class Mime(Base): def __init__(self, name, template, major): super().__init__() self.name = name self.template = template self.major = major @property def full(self): return '{0}/{1}'.format(self.major.name, self.name) @staticmethod def q_major_minor(dbsession, major, minor): cond = sql.and_( MimeMajor.name == major, Mime.name == minor ) result = dbsession.execute( sql.select(Mime).join(Mime.major).options( orm.contains_eager(Mime.major) ).filter(cond) ).scalar_one_or_none() return result ########### # Filters # ########### @classmethod def filter_mime(cls, value): (major, minor) = value.split('/') cond = sql.and_() cond.append(MimeMajor.name == major) if minor and minor != '*': cond.append(Mime.name == minor) return cond
2.140625
2
apps/goods/views_base.py
sunwei19910119/DjangoShop
3
1428
# encoding: utf-8 from goods.models import Goods from django.views.generic.base import View class GoodsListView(View): def get(self, request): """ 通过django的view实现商品列表页 """ json_list = [] goods = Goods.objects.all()[:10] # for good in goods: # json_dict = {} # json_dict["name"] = good.name # json_dict["category"] = good.category.name # json_dict["market_price"] = good.market_price # json_dict["add_time"] = good.add_time # json_list.append(json_dict) # from django.http import HttpResponse # import json # return HttpResponse(json.dumps(json_list),content_type="application/json") from django.forms.models import model_to_dict for good in goods: json_dict = model_to_dict(good) json_list.append(json_dict) import json from django.core import serializers json_data = serializers.serialize('json', goods) json_data = json.loads(json_data) from django.http import HttpResponse, JsonResponse # jsonResponse做的工作也就是加上了dumps和content_type # return HttpResponse(json.dumps(json_data), content_type="application/json") # 注释掉loads,下面语句正常 # return HttpResponse(json_data, content_type="application/json") return JsonResponse(json_data, safe=False)
2.25
2
launcher/src/main/scripts/bin/launcher.py
iyersathya/airlift
0
1429
<filename>launcher/src/main/scripts/bin/launcher.py #!/usr/bin/env python import errno import os import platform import sys import traceback from fcntl import flock, LOCK_EX, LOCK_NB from optparse import OptionParser from os import O_RDWR, O_CREAT, O_WRONLY, O_APPEND from os.path import basename, dirname, exists, realpath from os.path import join as pathjoin from signal import SIGTERM, SIGKILL from stat import S_ISLNK from time import sleep COMMANDS = ['run', 'start', 'stop', 'restart', 'kill', 'status'] LSB_NOT_RUNNING = 3 LSB_STATUS_UNKNOWN = 4 def find_install_path(f): """Find canonical parent of bin/launcher.py""" if basename(f) != 'launcher.py': raise Exception("Expected file '%s' to be 'launcher.py' not '%s'" % (f, basename(f))) p = realpath(dirname(f)) if basename(p) != 'bin': raise Exception("Expected file '%s' directory to be 'bin' not '%s" % (f, basename(p))) return dirname(p) def makedirs(p): """Create directory and all intermediate ones""" try: os.makedirs(p) except OSError as e: if e.errno != errno.EEXIST: raise def load_properties(f): """Load key/value pairs from a file""" properties = {} for line in load_lines(f): k, v = line.split('=', 1) properties[k.strip()] = v.strip() return properties def load_lines(f): """Load lines from a file, ignoring blank or comment lines""" lines = [] for line in open(f, 'r').readlines(): line = line.strip() if len(line) > 0 and not line.startswith('#'): lines.append(line) return lines def try_lock(f): """Try to open an exclusive lock (inheritable) on a file""" try: flock(f, LOCK_EX | LOCK_NB) return True except (IOError, OSError): # IOError in Python 2, OSError in Python 3. return False def open_read_write(f, mode): """Open file in read/write mode (without truncating it)""" return os.fdopen(os.open(f, O_RDWR | O_CREAT, mode), 'r+') class Process: def __init__(self, path): makedirs(dirname(path)) self.path = path self.pid_file = open_read_write(path, 0o600) self.refresh() def refresh(self): self.locked = try_lock(self.pid_file) def clear_pid(self): assert self.locked, 'pid file not locked by us' self.pid_file.seek(0) self.pid_file.truncate() def write_pid(self, pid): self.clear_pid() self.pid_file.write(str(pid) + '\n') self.pid_file.flush() def alive(self): self.refresh() if self.locked: return False pid = self.read_pid() try: os.kill(pid, 0) return True except OSError as e: raise Exception('Signaling pid %s failed: %s' % (pid, e)) def read_pid(self): assert not self.locked, 'pid file is locked by us' self.pid_file.seek(0) line = self.pid_file.readline().strip() if len(line) == 0: raise Exception("Pid file '%s' is empty" % self.path) try: pid = int(line) except ValueError: raise Exception("Pid file '%s' contains garbage: %s" % (self.path, line)) if pid <= 0: raise Exception("Pid file '%s' contains an invalid pid: %s" % (self.path, pid)) return pid def redirect_stdin_to_devnull(): """Redirect stdin to /dev/null""" fd = os.open(os.devnull, O_RDWR) os.dup2(fd, sys.stdin.fileno()) os.close(fd) def open_append(f): """Open a raw file descriptor in append mode""" # noinspection PyTypeChecker return os.open(f, O_WRONLY | O_APPEND | O_CREAT, 0o644) def redirect_output(fd): """Redirect stdout and stderr to a file descriptor""" os.dup2(fd, sys.stdout.fileno()) os.dup2(fd, sys.stderr.fileno()) def symlink_exists(p): """Check if symlink exists and raise if another type of file exists""" try: st = os.lstat(p) if not S_ISLNK(st.st_mode): raise Exception('Path exists and is not a symlink: %s' % p) return True except OSError as e: if e.errno != errno.ENOENT: raise return False def create_symlink(source, target): """Create a symlink, removing the target first if it is a symlink""" if symlink_exists(target): os.remove(target) if exists(source): os.symlink(source, target) def create_app_symlinks(options): """ Symlink the 'etc' and 'plugin' directory into the data directory. This is needed to support programs that reference 'etc/xyz' from within their config files: log.levels-file=etc/log.properties """ if options.etc_dir != pathjoin(options.data_dir, 'etc'): create_symlink( options.etc_dir, pathjoin(options.data_dir, 'etc')) if options.install_path != options.data_dir: create_symlink( pathjoin(options.install_path, 'plugin'), pathjoin(options.data_dir, 'plugin')) def build_java_execution(options, daemon): if not exists(options.config_path): raise Exception('Config file is missing: %s' % options.config_path) if not exists(options.jvm_config): raise Exception('JVM config file is missing: %s' % options.jvm_config) if not exists(options.launcher_config): raise Exception('Launcher config file is missing: %s' % options.launcher_config) if options.log_levels_set and not exists(options.log_levels): raise Exception('Log levels file is missing: %s' % options.log_levels) properties = options.properties.copy() if exists(options.log_levels): properties['log.levels-file'] = options.log_levels if daemon: properties['log.output-file'] = options.server_log properties['log.enable-console'] = 'false' jvm_properties = load_lines(options.jvm_config) launcher_properties = load_properties(options.launcher_config) try: main_class = launcher_properties['main-class'] except KeyError: raise Exception("Launcher config is missing 'main-class' property") properties['config'] = options.config_path system_properties = ['-D%s=%s' % i for i in properties.items()] classpath = pathjoin(options.install_path, 'lib', '*') command = ['java', '-cp', classpath] command += jvm_properties + system_properties command += [main_class] command += options.arguments if options.verbose: print(command) print("") env = os.environ.copy() # set process name: https://github.com/electrum/procname process_name = launcher_properties.get('process-name', '') if len(process_name) > 0: system = platform.system() + '-' + platform.machine() shim = pathjoin(options.install_path, 'bin', 'procname', system, 'libprocname.so') if exists(shim): env['LD_PRELOAD'] = (env.get('LD_PRELOAD', '') + ':' + shim).strip() env['PROCNAME'] = process_name return command, env def run(process, options): if process.alive(): print('Already running as %s' % process.read_pid()) return create_app_symlinks(options) args, env = build_java_execution(options, False) makedirs(options.data_dir) os.chdir(options.data_dir) process.write_pid(os.getpid()) redirect_stdin_to_devnull() os.execvpe(args[0], args, env) def start(process, options): if process.alive(): print('Already running as %s' % process.read_pid()) return create_app_symlinks(options) args, env = build_java_execution(options, True) makedirs(dirname(options.launcher_log)) log = open_append(options.launcher_log) makedirs(options.data_dir) os.chdir(options.data_dir) pid = os.fork() if pid > 0: process.write_pid(pid) print('Started as %s' % pid) return if hasattr(os, "set_inheritable"): # See https://docs.python.org/3/library/os.html#inheritance-of-file-descriptors # Since Python 3.4 os.set_inheritable(process.pid_file.fileno(), True) os.setsid() redirect_stdin_to_devnull() redirect_output(log) os.close(log) os.execvpe(args[0], args, env) def terminate(process, signal, message): if not process.alive(): print('Not running') return pid = process.read_pid() while True: try: os.kill(pid, signal) except OSError as e: if e.errno != errno.ESRCH: raise Exception('Signaling pid %s failed: %s' % (pid, e)) if not process.alive(): process.clear_pid() break sleep(0.1) print('%s %s' % (message, pid)) def stop(process): terminate(process, SIGTERM, 'Stopped') def kill(process): terminate(process, SIGKILL, 'Killed') def status(process): if not process.alive(): print('Not running') sys.exit(LSB_NOT_RUNNING) print('Running as %s' % process.read_pid()) def handle_command(command, options): process = Process(options.pid_file) if command == 'run': run(process, options) elif command == 'start': start(process, options) elif command == 'stop': stop(process) elif command == 'restart': stop(process) start(process, options) elif command == 'kill': kill(process) elif command == 'status': status(process) else: raise AssertionError('Unhandled command: ' + command) def create_parser(): commands = 'Commands: ' + ', '.join(COMMANDS) parser = OptionParser(prog='launcher', usage='usage: %prog [options] command', description=commands) parser.add_option('-v', '--verbose', action='store_true', default=False, help='Run verbosely') parser.add_option('--etc-dir', metavar='DIR', help='Defaults to INSTALL_PATH/etc') parser.add_option('--launcher-config', metavar='FILE', help='Defaults to INSTALL_PATH/bin/launcher.properties') parser.add_option('--node-config', metavar='FILE', help='Defaults to ETC_DIR/node.properties') parser.add_option('--jvm-config', metavar='FILE', help='Defaults to ETC_DIR/jvm.config') parser.add_option('--config', metavar='FILE', help='Defaults to ETC_DIR/config.properties') parser.add_option('--log-levels-file', metavar='FILE', help='Defaults to ETC_DIR/log.properties') parser.add_option('--data-dir', metavar='DIR', help='Defaults to INSTALL_PATH') parser.add_option('--pid-file', metavar='FILE', help='Defaults to DATA_DIR/var/run/launcher.pid') parser.add_option('--arg', action='append', metavar='ARG', dest='arguments', help='Add a program argument of the Java application') parser.add_option('--launcher-log-file', metavar='FILE', help='Defaults to DATA_DIR/var/log/launcher.log (only in daemon mode)') parser.add_option('--server-log-file', metavar='FILE', help='Defaults to DATA_DIR/var/log/server.log (only in daemon mode)') parser.add_option('-D', action='append', metavar='NAME=VALUE', dest='properties', help='Set a Java system property') return parser def parse_properties(parser, args): properties = {} for arg in args: if '=' not in arg: parser.error('property is malformed: %s' % arg) key, value = [i.strip() for i in arg.split('=', 1)] if key == 'config': parser.error('cannot specify config using -D option (use --config)') if key == 'log.output-file': parser.error('cannot specify server log using -D option (use --server-log-file)') if key == 'log.levels-file': parser.error('cannot specify log levels using -D option (use --log-levels-file)') properties[key] = value return properties def print_options(options): if options.verbose: for i in sorted(vars(options)): print("%-15s = %s" % (i, getattr(options, i))) print("") class Options: pass def main(): parser = create_parser() (options, args) = parser.parse_args() if len(args) != 1: if len(args) == 0: parser.error('command name not specified') else: parser.error('too many arguments') command = args[0] if command not in COMMANDS: parser.error('unsupported command: %s' % command) try: install_path = find_install_path(sys.argv[0]) except Exception as e: print('ERROR: %s' % e) sys.exit(LSB_STATUS_UNKNOWN) o = Options() o.verbose = options.verbose o.install_path = install_path o.launcher_config = realpath(options.launcher_config or pathjoin(o.install_path, 'bin/launcher.properties')) o.etc_dir = realpath(options.etc_dir or pathjoin(o.install_path, 'etc')) o.node_config = realpath(options.node_config or pathjoin(o.etc_dir, 'node.properties')) o.jvm_config = realpath(options.jvm_config or pathjoin(o.etc_dir, 'jvm.config')) o.config_path = realpath(options.config or pathjoin(o.etc_dir, 'config.properties')) o.log_levels = realpath(options.log_levels_file or pathjoin(o.etc_dir, 'log.properties')) o.log_levels_set = bool(options.log_levels_file) if options.node_config and not exists(o.node_config): parser.error('Node config file is missing: %s' % o.node_config) node_properties = {} if exists(o.node_config): node_properties = load_properties(o.node_config) data_dir = node_properties.get('node.data-dir') o.data_dir = realpath(options.data_dir or data_dir or o.install_path) o.pid_file = realpath(options.pid_file or pathjoin(o.data_dir, 'var/run/launcher.pid')) o.launcher_log = realpath(options.launcher_log_file or pathjoin(o.data_dir, 'var/log/launcher.log')) o.server_log = realpath(options.server_log_file or pathjoin(o.data_dir, 'var/log/server.log')) o.properties = parse_properties(parser, options.properties or {}) for k, v in node_properties.items(): if k not in o.properties: o.properties[k] = v o.arguments = options.arguments or [] if o.verbose: print_options(o) try: handle_command(command, o) except SystemExit: raise except Exception as e: if o.verbose: traceback.print_exc() else: print('ERROR: %s' % e) sys.exit(LSB_STATUS_UNKNOWN) if __name__ == '__main__': main()
2.375
2
code/sim/test.py
vectorcrumb/Ballbot_IEE2913
0
1430
<filename>code/sim/test.py from direct.showbase.ShowBase import ShowBase from direct.task import Task from direct.actor.Actor import Actor import numpy as np class MyApp(ShowBase): def __init__(self): ShowBase.__init__(self) # Load environment model self.scene = self.loader.loadModel("models/environment") # Reparent model to render self.scene.reparentTo(self.render) # Scale and position model self.scene.setScale(0.25, 0.25, 0.25) self.scene.setPos(-8, 42, 0) # Add spinCameraTask to task manager to execute self.taskMgr.add(self.spinCameraTask, "SpinCameraTask") # Load and transform panda actor self.pandaActor = Actor("models/panda-model", {"walk": "models/panda-walk4"}) self.pandaActor.setScale(0.005, 0.005, 0.005) self.pandaActor.reparentTo(self.render) # Loop animation self.pandaActor.loop("walk") def spinCameraTask(self, task): angleDegs = task.time * 6.0 angleRads = angleDegs * (np.pi / 180.0) self.camera.setPos(20*np.sin(angleRads), -20.0 * np.cos(angleRads), 3) self.camera.setHpr(angleDegs, 0, 0) return Task.cont app = MyApp() app.run()
2.375
2
run_locally.py
nationalarchives/tdr-service-unavailable
0
1431
<filename>run_locally.py from app import app app.run()
1.289063
1
src/pandas_profiling/model/describe.py
briangrahamww/pandas-profiling
0
1432
"""Organize the calculation of statistics for each series in this DataFrame.""" import warnings from datetime import datetime from typing import Optional import pandas as pd from tqdm.auto import tqdm from visions import VisionsTypeset from pandas_profiling.config import Settings from pandas_profiling.model.correlations import calculate_correlation from pandas_profiling.model.duplicates import get_duplicates from pandas_profiling.model.sample import Sample, get_sample from pandas_profiling.model.summarizer import BaseSummarizer from pandas_profiling.model.summary import ( get_messages, get_missing_diagrams, get_scatter_matrix, get_series_descriptions, get_table_stats, ) from pandas_profiling.version import __version__ def describe( config: Settings, df: pd.DataFrame, summarizer: BaseSummarizer, typeset: VisionsTypeset, sample: Optional[dict] = None, ) -> dict: """Calculate the statistics for each series in this DataFrame. Args: config: report Settings object df: DataFrame. sample: optional, dict with custom sample Returns: This function returns a dictionary containing: - table: overall statistics. - variables: descriptions per series. - correlations: correlation matrices. - missing: missing value diagrams. - messages: direct special attention to these patterns in your data. - package: package details. """ if df is None: raise ValueError("Can not describe a `lazy` ProfileReport without a DataFrame.") if not isinstance(df, pd.DataFrame): warnings.warn("df is not of type pandas.DataFrame") disable_progress_bar = not config.progress_bar date_start = datetime.utcnow() correlation_names = [ correlation_name for correlation_name in [ "pearson", "spearman", "kendall", "phi_k", "cramers", ] if config.correlations[correlation_name].calculate ] number_of_tasks = 8 + len(df.columns) + len(correlation_names) with tqdm( total=number_of_tasks, desc="Summarize dataset", disable=disable_progress_bar ) as pbar: series_description = get_series_descriptions( config, df, summarizer, typeset, pbar ) pbar.set_postfix_str("Get variable types") variables = { column: description["type"] for column, description in series_description.items() } supported_columns = [ column for column, type_name in variables.items() if type_name != "Unsupported" ] interval_columns = [ column for column, type_name in variables.items() if type_name == "Numeric" ] pbar.update() # Get correlations correlations = {} for correlation_name in correlation_names: pbar.set_postfix_str(f"Calculate {correlation_name} correlation") correlations[correlation_name] = calculate_correlation( config, df, correlation_name, series_description ) pbar.update() # make sure correlations is not None correlations = { key: value for key, value in correlations.items() if value is not None } # Scatter matrix pbar.set_postfix_str("Get scatter matrix") scatter_matrix = get_scatter_matrix(config, df, interval_columns) pbar.update() # Table statistics pbar.set_postfix_str("Get table statistics") table_stats = get_table_stats(config, df, series_description) pbar.update() # missing diagrams pbar.set_postfix_str("Get missing diagrams") missing = get_missing_diagrams(config, df, table_stats) pbar.update() # Sample pbar.set_postfix_str("Take sample") if sample is None: samples = get_sample(config, df) else: if "name" not in sample: sample["name"] = None if "caption" not in sample: sample["caption"] = None samples = [ Sample( id="custom", data=sample["data"], name=sample["name"], caption=sample["caption"], ) ] pbar.update() # Duplicates pbar.set_postfix_str("Locating duplicates") metrics, duplicates = get_duplicates(config, df, supported_columns) table_stats.update(metrics) pbar.update() # Messages pbar.set_postfix_str("Get messages/warnings") messages = get_messages(config, table_stats, series_description, correlations) pbar.update() pbar.set_postfix_str("Get reproduction details") package = { "pandas_profiling_version": __version__, "pandas_profiling_config": config.json(), } pbar.update() pbar.set_postfix_str("Completed") date_end = datetime.utcnow() analysis = { "title": config.title, "date_start": date_start, "date_end": date_end, "duration": date_end - date_start, } return { # Analysis metadata "analysis": analysis, # Overall dataset description "table": table_stats, # Per variable descriptions "variables": series_description, # Bivariate relations "scatter": scatter_matrix, # Correlation matrices "correlations": correlations, # Missing values "missing": missing, # Warnings "messages": messages, # Package "package": package, # Sample "sample": samples, # Duplicates "duplicates": duplicates, }
2.453125
2
maxOfferNum.py
Ruanxingzhi/King-of-Pigeon
0
1433
<gh_stars>0 import operator class Std(object): def __init__(self): self.name = '' self.offerNum = 0 self.offers = [] stds = [] stdsDict = {} index = 0 def readStd(name,camper): global stds global stdsDict global index if name not in stdsDict: newStd = Std() newStd.name = name stds.append(newStd) stdsDict[name] = index index += 1 if camper not in stds[stdsDict[name]].offers: stds[stdsDict[name]].offers.append(camper) stds[stdsDict[name]].offerNum += 1 if __name__ == "__main__": campers = ['PKUxk','THUsz_ai','THUsz_cs','THUsz_data','USTC_cs'] for camper in campers: filename = camper + '.txt' with open('data/%s'%(filename), "r") as f: data = f.readlines() for std in data: readStd(std,camper) cmpfun = operator.attrgetter('offerNum','name') stds.sort(key = cmpfun,reverse = True) for std in stds: if std.name[-1] == '\n': std.name = std.name[:-1] print(f'{std.name} 拿了 {std.offerNum} 个 offer: {std.offers}')
3.125
3
tabnine-vim/third_party/ycmd/third_party/python-future/setup.py
MrMonk3y/vimrc
2
1434
#!/usr/bin/env python from __future__ import absolute_import, print_function import os import os.path import sys try: from setuptools import setup except ImportError: from distutils.core import setup if sys.argv[-1] == 'publish': os.system('python setup.py sdist upload') sys.exit() NAME = "future" PACKAGES = ["future", "future.builtins", "future.types", "future.standard_library", "future.backports", "future.backports.email", "future.backports.email.mime", "future.backports.html", "future.backports.http", "future.backports.test", "future.backports.urllib", "future.backports.xmlrpc", "future.moves", "future.moves.dbm", "future.moves.html", "future.moves.http", "future.moves.test", "future.moves.tkinter", "future.moves.urllib", "future.moves.xmlrpc", "future.tests", # for future.tests.base # "future.tests.test_email", "future.utils", "past", "past.builtins", "past.types", "past.utils", # "past.tests", "past.translation", "libfuturize", "libfuturize.fixes", "libpasteurize", "libpasteurize.fixes", ] # PEP 3108 stdlib moves: if sys.version_info[:2] < (3, 0): PACKAGES += [ "builtins", "configparser", "copyreg", "html", "http", "queue", "reprlib", "socketserver", "tkinter", "winreg", "xmlrpc", "_dummy_thread", "_markupbase", "_thread", ] PACKAGE_DATA = {'': [ 'README.rst', 'LICENSE.txt', 'futurize.py', 'pasteurize.py', 'discover_tests.py', 'check_rst.sh', 'TESTING.txt', ], 'tests': ['*.py'], } REQUIRES = [] TEST_REQUIRES = [] if sys.version_info[:2] == (2, 6): REQUIRES += ['importlib', 'argparse'] TEST_REQUIRES += ['unittest2'] import src.future VERSION = src.future.__version__ DESCRIPTION = "Clean single-source support for Python 3 and 2" LONG_DESC = src.future.__doc__ AUTHOR = "<NAME>" AUTHOR_EMAIL = "<EMAIL>" URL="https://python-future.org" LICENSE = "MIT" KEYWORDS = "future past python3 migration futurize backport six 2to3 modernize pasteurize 3to2" CLASSIFIERS = [ "Programming Language :: Python", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "License :: OSI Approved", "License :: OSI Approved :: MIT License", "Development Status :: 4 - Beta", "Intended Audience :: Developers", ] setup_kwds = {} # * Important * # We forcibly remove the build folder to avoid breaking the # user's Py3 installation if they run "python2 setup.py # build" and then "python3 setup.py install". try: # If the user happens to run: # python2 setup.py build # python3 setup.py install # then folders like "configparser" will be in build/lib. # If so, we CANNOT let the user install this, because # this may break his/her Python 3 install, depending on the folder order in # sys.path. (Running "import configparser" etc. may pick up our Py2 # substitute packages, instead of the intended system stdlib modules.) SYSTEM_MODULES = set([ '_dummy_thread', '_markupbase', '_thread', 'builtins', 'configparser', 'copyreg', 'html', 'http', 'queue', 'reprlib', 'socketserver', 'tkinter', 'winreg', 'xmlrpc' ]) if sys.version_info[0] >= 3: # Do any of the above folders exist in build/lib? files = os.listdir(os.path.join('build', 'lib')) if len(set(files) & set(SYSTEM_MODULES)) > 0: print('ERROR: Your build folder is in an inconsistent state for ' 'a Python 3.x install. Please remove it manually and run ' 'setup.py again.', file=sys.stderr) sys.exit(1) except OSError: pass setup(name=NAME, version=VERSION, author=AUTHOR, author_email=AUTHOR_EMAIL, url=URL, description=DESCRIPTION, long_description=LONG_DESC, license=LICENSE, keywords=KEYWORDS, entry_points={ 'console_scripts': [ 'futurize = libfuturize.main:main', 'pasteurize = libpasteurize.main:main' ] }, package_dir={'': 'src'}, packages=PACKAGES, package_data=PACKAGE_DATA, include_package_data=True, install_requires=REQUIRES, classifiers=CLASSIFIERS, test_suite = "discover_tests", tests_require=TEST_REQUIRES, **setup_kwds )
1.695313
2
url_shortener/src/__init__.py
Andrelpoj/hire.me
0
1435
<gh_stars>0 from flask import Flask from .extensions import db from .routes import short from . import config def create_app(): """ Creates Flask App, connect to Database and register Blueprint of routes""" app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = config.DATABASE_CONNECTION_URI app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.app_context().push() db.init_app(app) db.create_all() app.register_blueprint(short) return app
2.46875
2
python-百度翻译调用/Baidu_translate/com/translate/baidu/stackoverflow_question_handler.py
wangchuanli001/Project-experience
12
1436
import requests from bs4 import BeautifulSoup import urllib.request import os import random import time def html(url): user_agents = [ 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11', 'Opera/9.25 (Windows NT 5.1; U; en)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12', 'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9', "Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7", "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 "] user_agent = random.choice(user_agents) headers = { 'User-Agent': user_agent, 'Accept-Encoding': 'gzip'} req = requests.get(url=url, headers=headers) html_doc = req.text soup = BeautifulSoup(html_doc, "html.parser") times = soup.select("time") views = soup.select("p.label-key > b") active_str = str(views[2]) active = active_str[active_str.find("title=\"") + 7:active_str.find("Z")] answers = soup.select("#answers-header > div > h2 >span") question_content = soup.select("div.post-text") tags = soup.select("#question > div.post-layout > div.postcell.post-layout--right > " "div.post-taglist.grid.gs4.gsy.fd-column > div >a") title = soup.select("h1 >a") tags_str = "" item = [] for tag in tags: tags_str += tag.get_text() + "," answer_contetnts = [] for i in range(1, len(question_content)): answer_contetnts.append(question_content[i]) for i in range(len(times)): if len(times[i].get_text()) > 1: asked_time = times[i].get("datetime").replace("T", " ") item.append(title[ 0].get_text()) # title views answersnum asked_time tag_str active_time quest_content_ text answer_content_list item.append(views[1].get_text()) item.append(answers[0].get_text()) item.append(asked_time) item.append(tags_str) item.append(active) item.append(question_content[0]) item.append(answer_contetnts) print(item) # updatetosql(item) def updatetosql(item): ansers_text = "[split]".join(item[7]) updatesql = "UPDATE `t_stackoverflow_question` " \ "SET `tags`='%s', `views`='%s', `answers_num`='%s', `asked_time`='%s', `last_active_time`='%s', `question_content`='%s', `answers_contetnt`='%s' " \ "WHERE (`question_id`='%s') " \ % (item[4], item[1], item[2], item[3], item[5], item[6], ansers_text, item[0],) pass if __name__ == '__main__': html("https://stackoverflow.com/questions/50119673/nginx-fast-cgi-cache-on-error-page-404")
2.84375
3
Research/data_loader.py
ALEXKIRNAS/Kaggle-C-CORE-Iceberg-Classifier-Challenge
0
1437
import os import numpy as np import pandas as pd from keras.utils import to_categorical from sklearn.model_selection import KFold, train_test_split def load_data(path): train = pd.read_json(os.path.join(path, "./train.json")) test = pd.read_json(os.path.join(path, "./test.json")) return (train, test) def preprocess(df, means=(-22.159262, -24.953745, 40.021883465782651), stds=(5.33146, 4.5463958, 4.0815391476694414)): X_band_1 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in df["band_1"]]) X_band_2 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in df["band_2"]]) angl = df['inc_angle'].map(lambda x: np.cos(x * np.pi / 180) if x != 'na' else means[3]) angl = np.array([np.full(shape=(75, 75), fill_value=angel).astype(np.float32) for angel in angl]) X_band_1 = (X_band_1 - means[0]) / stds[0] X_band_2 = (X_band_2 - means[1]) / stds[1] angl = (angl - means[2]) / stds[2] images = np.concatenate([X_band_1[:, :, :, np.newaxis], X_band_2[:, :, :, np.newaxis], angl[:, :, :, np.newaxis]], axis=-1) return images def prepare_data_cv(path): train, test = load_data(path) X_train, y_train = (preprocess(train), to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1))) kfold_data = [] kf = KFold(n_splits=5, shuffle=True, random_state=0xCAFFE) for train_indices, val_indices in kf.split(y_train): X_train_cv = X_train[train_indices] y_train_cv = y_train[train_indices] X_val = X_train[val_indices] y_val = y_train[val_indices] kfold_data.append((X_train_cv, y_train_cv, X_val, y_val)) X_test = preprocess(test) return (kfold_data, X_test) def prepare_data(path): train, test = load_data(path) X_train, y_train = (preprocess(train), to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1))) X_train_cv, X_valid, y_train_cv, y_valid = train_test_split(X_train, y_train, random_state=0xCAFFE, train_size=0.8) X_test = preprocess(test) return ([(X_train_cv, y_train_cv, X_valid, y_valid)], X_test)
2.65625
3
polyaxon_cli/cli/experiment.py
tiagopms/polyaxon-cli
0
1438
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function import sys import click import rhea from polyaxon_cli.cli.getters.experiment import ( get_experiment_job_or_local, get_project_experiment_or_local ) from polyaxon_cli.cli.upload import upload from polyaxon_cli.client import PolyaxonClient from polyaxon_cli.client.exceptions import PolyaxonHTTPError, PolyaxonShouldExitError from polyaxon_cli.logger import clean_outputs from polyaxon_cli.managers.experiment import ExperimentManager from polyaxon_cli.managers.experiment_job import ExperimentJobManager from polyaxon_cli.utils import cache from polyaxon_cli.utils.formatting import ( Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate ) from polyaxon_cli.utils.log_handler import get_logs_handler from polyaxon_cli.utils.validation import validate_tags from polyaxon_client.exceptions import PolyaxonClientException def get_experiment_details(experiment): # pylint:disable=redefined-outer-name if experiment.description: Printer.print_header("Experiment description:") click.echo('{}\n'.format(experiment.description)) if experiment.resources: get_resources(experiment.resources.to_dict(), header="Experiment resources:") if experiment.declarations: Printer.print_header("Experiment declarations:") dict_tabulate(experiment.declarations) if experiment.last_metric: Printer.print_header("Experiment last metrics:") dict_tabulate(experiment.last_metric) response = experiment.to_light_dict( humanize_values=True, exclude_attrs=[ 'uuid', 'config', 'project', 'experiments', 'description', 'declarations', 'last_metric', 'resources', 'jobs', 'run_env' ]) Printer.print_header("Experiment info:") dict_tabulate(Printer.add_status_color(response)) @click.group() @click.option('--project', '-p', type=str, help="The project name, e.g. 'mnist' or 'adam/mnist'.") @click.option('--experiment', '-xp', type=int, help="The experiment id number.") @click.pass_context @clean_outputs def experiment(ctx, project, experiment): # pylint:disable=redefined-outer-name """Commands for experiments.""" ctx.obj = ctx.obj or {} ctx.obj['project'] = project ctx.obj['experiment'] = experiment @experiment.command() @click.option('--job', '-j', type=int, help="The job id.") @click.pass_context @clean_outputs def get(ctx, job): """Get experiment or experiment job. Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting an experiment: \b ```bash $ polyaxon experiment get # if experiment is cached ``` \b ```bash $ polyaxon experiment --experiment=1 get ``` \b ```bash $ polyaxon experiment -xp 1 --project=cats-vs-dogs get ``` \b ```bash $ polyaxon experiment -xp 1 -p alain/cats-vs-dogs get ``` Examples for getting an experiment job: \b ```bash $ polyaxon experiment get -j 1 # if experiment is cached ``` \b ```bash $ polyaxon experiment --experiment=1 get --job=10 ``` \b ```bash $ polyaxon experiment -xp 1 --project=cats-vs-dogs get -j 2 ``` \b ```bash $ polyaxon experiment -xp 1 -p alain/cats-vs-dogs get -j 2 ``` """ def get_experiment(): try: response = PolyaxonClient().experiment.get_experiment(user, project_name, _experiment) cache.cache(config_manager=ExperimentManager, response=response) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not load experiment `{}` info.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) get_experiment_details(response) def get_experiment_job(): try: response = PolyaxonClient().experiment_job.get_job(user, project_name, _experiment, _job) cache.cache(config_manager=ExperimentJobManager, response=response) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get job `{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) if response.resources: get_resources(response.resources.to_dict(), header="Job resources:") response = Printer.add_status_color(response.to_light_dict( humanize_values=True, exclude_attrs=['uuid', 'definition', 'experiment', 'unique_name', 'resources'] )) Printer.print_header("Job info:") dict_tabulate(response) user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job() else: get_experiment() @experiment.command() @click.pass_context @clean_outputs def delete(ctx): """Delete experiment. Uses [Caching](/references/polyaxon-cli/#caching) Example: \b ```bash $ polyaxon experiment delete ``` """ user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if not click.confirm("Are sure you want to delete experiment `{}`".format(_experiment)): click.echo('Existing without deleting experiment.') sys.exit(1) try: response = PolyaxonClient().experiment.delete_experiment( user, project_name, _experiment) # Purge caching ExperimentManager.purge() except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not delete experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) if response.status_code == 204: Printer.print_success("Experiment `{}` was delete successfully".format(_experiment)) @experiment.command() @click.option('--name', type=str, help='Name of the experiment, must be unique within the project, could be none.') @click.option('--description', type=str, help='Description of the experiment.') @click.option('--tags', type=str, help='Tags of the experiment, comma separated values.') @click.pass_context @clean_outputs def update(ctx, name, description, tags): """Update experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon experiment -xp 2 update --description="new description for my experiments" ``` \b ```bash $ polyaxon experiment -xp 2 update --tags="foo, bar" --name="unique-name" ``` """ user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) update_dict = {} if name: update_dict['name'] = name if description: update_dict['description'] = description tags = validate_tags(tags) if tags: update_dict['tags'] = tags if not update_dict: Printer.print_warning('No argument was provided to update the experiment.') sys.exit(0) try: response = PolyaxonClient().experiment.update_experiment( user, project_name, _experiment, update_dict) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not update experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success("Experiment updated.") get_experiment_details(response) @experiment.command() @click.option('--yes', '-y', is_flag=True, default=False, help="Automatic yes to prompts. " "Assume \"yes\" as answer to all prompts and run non-interactively.") @click.pass_context @clean_outputs def stop(ctx, yes): """Stop experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon experiment stop ``` \b ```bash $ polyaxon experiment -xp 2 stop ``` """ user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if not yes and not click.confirm("Are sure you want to stop " "experiment `{}`".format(_experiment)): click.echo('Existing without stopping experiment.') sys.exit(0) try: PolyaxonClient().experiment.stop(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not stop experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success("Experiment is being stopped.") @experiment.command() @click.option('--copy', '-c', is_flag=True, default=False, help="To copy the experiment before restarting.") @click.option('--file', '-f', multiple=True, type=click.Path(exists=True), help="The polyaxon files to update with.") @click.option('-u', is_flag=True, default=False, help="To upload the repo before restarting.") @click.pass_context @clean_outputs def restart(ctx, copy, file, u): # pylint:disable=redefined-builtin """Restart experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon experiment --experiment=1 restart ``` """ config = None update_code = None if file: config = rhea.read(file) # Check if we need to upload if u: ctx.invoke(upload, sync=False) update_code = True user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: if copy: response = PolyaxonClient().experiment.copy( user, project_name, _experiment, config=config, update_code=update_code) Printer.print_success('Experiment was copied with id {}'.format(response.id)) else: response = PolyaxonClient().experiment.restart( user, project_name, _experiment, config=config, update_code=update_code) Printer.print_success('Experiment was restarted with id {}'.format(response.id)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not restart experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) @experiment.command() @click.option('--file', '-f', multiple=True, type=click.Path(exists=True), help="The polyaxon files to update with.") @click.option('-u', is_flag=True, default=False, help="To upload the repo before resuming.") @click.pass_context @clean_outputs def resume(ctx, file, u): # pylint:disable=redefined-builtin """Resume experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon experiment --experiment=1 resume ``` """ config = None update_code = None if file: config = rhea.read(file) # Check if we need to upload if u: ctx.invoke(upload, sync=False) update_code = True user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: response = PolyaxonClient().experiment.resume( user, project_name, _experiment, config=config, update_code=update_code) Printer.print_success('Experiment was resumed with id {}'.format(response.id)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not resume experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) @experiment.command() @click.option('--page', type=int, help="To paginate through the list of jobs.") @click.pass_context @clean_outputs def jobs(ctx, page): """List jobs for experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon experiment --experiment=1 jobs ``` """ user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) page = page or 1 try: response = PolyaxonClient().experiment.list_jobs( user, project_name, _experiment, page=page) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get jobs for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) meta = get_meta_response(response) if meta: Printer.print_header('Jobs for experiment `{}`.'.format(_experiment)) Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No jobs found for experiment `{}`.'.format(_experiment)) objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True)) for o in response['results']] objects = list_dicts_to_tabulate(objects) if objects: Printer.print_header("Jobs:") objects.pop('experiment', None) dict_tabulate(objects, is_list_dict=True) @experiment.command() @click.option('--job', '-j', type=int, help="The job id.") @click.option('--page', type=int, help="To paginate through the list of statuses.") @click.pass_context @clean_outputs def statuses(ctx, job, page): """Get experiment or experiment job statuses. Uses [Caching](/references/polyaxon-cli/#caching) Examples getting experiment statuses: \b ```bash $ polyaxon experiment statuses ``` \b ```bash $ polyaxon experiment -xp 1 statuses ``` Examples getting experiment job statuses: \b ```bash $ polyaxon experiment statuses -j 3 ``` \b ```bash $ polyaxon experiment -xp 1 statuses --job 1 ``` """ def get_experiment_statuses(): try: response = PolyaxonClient().experiment.get_statuses( user, project_name, _experiment, page=page) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could get status for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) meta = get_meta_response(response) if meta: Printer.print_header('Statuses for experiment `{}`.'.format(_experiment)) Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No statuses found for experiment `{}`.'.format(_experiment)) objects = list_dicts_to_tabulate( [Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status') for o in response['results']]) if objects: Printer.print_header("Statuses:") objects.pop('experiment', None) dict_tabulate(objects, is_list_dict=True) def get_experiment_job_statuses(): try: response = PolyaxonClient().experiment_job.get_statuses(user, project_name, _experiment, _job, page=page) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get status for job `{}`.'.format(job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) meta = get_meta_response(response) if meta: Printer.print_header('Statuses for Job `{}`.'.format(_job)) Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No statuses found for job `{}`.'.format(_job)) objects = list_dicts_to_tabulate( [Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status') for o in response['results']]) if objects: Printer.print_header("Statuses:") objects.pop('job', None) dict_tabulate(objects, is_list_dict=True) page = page or 1 user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job_statuses() else: get_experiment_statuses() @experiment.command() @click.option('--job', '-j', type=int, help="The job id.") @click.option('--gpu', '-g', is_flag=True, help="List experiment GPU resources.") @click.pass_context @clean_outputs def resources(ctx, job, gpu): """Get experiment or experiment job resources. Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting experiment resources: \b ```bash $ polyaxon experiment -xp 19 resources ``` For GPU resources \b ```bash $ polyaxon experiment -xp 19 resources --gpu ``` Examples for getting experiment job resources: \b ```bash $ polyaxon experiment -xp 19 resources -j 1 ``` For GPU resources \b ```bash $ polyaxon experiment -xp 19 resources -j 1 --gpu ``` """ def get_experiment_resources(): try: message_handler = Printer.gpu_resources if gpu else Printer.resources PolyaxonClient().experiment.resources( user, project_name, _experiment, message_handler=message_handler) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get resources for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) def get_experiment_job_resources(): try: message_handler = Printer.gpu_resources if gpu else Printer.resources PolyaxonClient().experiment_job.resources(user, project_name, _experiment, _job, message_handler=message_handler) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get resources for job `{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job_resources() else: get_experiment_resources() @experiment.command() @click.option('--job', '-j', type=int, help="The job id.") @click.option('--past', '-p', is_flag=True, help="Show the past logs.") @click.option('--follow', '-f', is_flag=True, default=False, help="Stream logs after showing past logs.") @click.option('--hide_time', is_flag=True, default=False, help="Whether or not to hide timestamps from the log stream.") @click.pass_context @clean_outputs def logs(ctx, job, past, follow, hide_time): """Get experiment or experiment job logs. Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting experiment logs: \b ```bash $ polyaxon experiment logs ``` \b ```bash $ polyaxon experiment -xp 10 -p mnist logs ``` Examples for getting experiment job logs: \b ```bash $ polyaxon experiment -xp 1 -j 1 logs ``` """ def get_experiment_logs(): if past: try: response = PolyaxonClient().experiment.logs( user, project_name, _experiment, stream=False) get_logs_handler(handle_job_info=True, show_timestamp=not hide_time, stream=False)(response.content.decode().split('\n')) print() if not follow: return except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: if not follow: Printer.print_error( 'Could not get logs for experiment `{}`.'.format(_experiment)) Printer.print_error( 'Error message `{}`.'.format(e)) sys.exit(1) try: PolyaxonClient().experiment.logs( user, project_name, _experiment, message_handler=get_logs_handler(handle_job_info=True, show_timestamp=not hide_time)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get logs for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) def get_experiment_job_logs(): if past: try: response = PolyaxonClient().experiment_job.logs( user, project_name, _experiment, _job, stream=False) get_logs_handler(handle_job_info=True, show_timestamp=not hide_time, stream=False)(response.content.decode().split('\n')) print() if not follow: return except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: if not follow: Printer.print_error( 'Could not get logs for experiment `{}`.'.format(_experiment)) Printer.print_error( 'Error message `{}`.'.format(e)) sys.exit(1) try: PolyaxonClient().experiment_job.logs( user, project_name, _experiment, _job, message_handler=get_logs_handler(handle_job_info=True, show_timestamp=not hide_time)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get logs for job `{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job_logs() else: get_experiment_logs() @experiment.command() @click.pass_context @clean_outputs def outputs(ctx): """Download outputs for experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon experiment -xp 1 outputs ``` """ user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.download_outputs(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not download outputs for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success('Files downloaded.') @experiment.command() @click.pass_context @clean_outputs def bookmark(ctx): """Bookmark experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon experiment bookmark ``` \b ```bash $ polyaxon experiment -xp 2 bookmark ``` """ user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.bookmark(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not bookmark experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success("Experiment is bookmarked.") @experiment.command() @click.pass_context @clean_outputs def unbookmark(ctx): """Unbookmark experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon experiment unbookmark ``` \b ```bash $ polyaxon experiment -xp 2 unbookmark ``` """ user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.unbookmark(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not unbookmark experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success("Experiment is unbookmarked.")
1.945313
2
Problem_09.py
Habbo3/Project-Euler
0
1439
""" A Pythagorean triplet is a set of three natural numbers, a < b < c, for which, a2 + b2 = c2 For example, 32 + 42 = 9 + 16 = 25 = 52. There exists exactly one Pythagorean triplet for which a + b + c = 1000. Find the product abc. """ solved = False for a in range(1, 1000): for b in range(1, 1000): for c in range(1, 1000): if a < b < c: if a + b + c == 1000: if a**2 + b**2 == c**2: solved = True break if solved: break if solved: break product = a*b*c print("The product of only triplet who exists is : ", product)
4.21875
4
fanscribed/apps/transcripts/tests/test_transcripts.py
fanscribed/fanscribed
8
1440
<filename>fanscribed/apps/transcripts/tests/test_transcripts.py from decimal import Decimal import os from django.test import TestCase from unipath import Path from ....utils import refresh from ...media import tests from ..models import Transcript, TranscriptMedia MEDIA_TESTDATA_PATH = Path(tests.__file__).parent.child('testdata') RAW_MEDIA_PATH = MEDIA_TESTDATA_PATH.child('raw').child( 'NA-472-2012-12-23-Final-excerpt.mp3').absolute() class TranscriptsTestCase(TestCase): def test_transcript_starts_out_with_unknown_length(self): transcript = Transcript.objects.create(title='test') self.assertEqual(transcript.length, None) def test_setting_transcript_length_creates_fragments_and_stitches(self): t = Transcript.objects.create(title='test') t.set_length('3.33') f0, = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('3.33')) self.assertEqual(t.stitches.count(), 0) t = Transcript.objects.create(title='test') t.set_length('7.77') f0, = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('7.77')) self.assertEqual(t.stitches.count(), 0) t = Transcript.objects.create(title='test') t.set_length('17.77') f0, f1, f2 = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('5.00')) self.assertEqual(f1.start, Decimal('5.00')) self.assertEqual(f1.end, Decimal('10.00')) self.assertEqual(f2.start, Decimal('10.00')) self.assertEqual(f2.end, Decimal('17.77')) s0, s1 = t.stitches.all() self.assertEqual(s0.left, f0) self.assertEqual(s0.right, f1) self.assertEqual(s0.state, 'notready') self.assertEqual(s1.left, f1) self.assertEqual(s1.right, f2) self.assertEqual(s1.state, 'notready') if os.environ.get('FAST_TEST') != '1': from django.core.files import File class SlowTranscriptsTestCase(TestCase): def test_transcript_with_processed_media_has_length(self): transcript = Transcript.objects.create( title='test transcript', ) raw_media = TranscriptMedia( transcript=transcript, is_processed=False, is_full_length=True, ) with open(RAW_MEDIA_PATH, 'rb') as f: raw_media.file.save('{transcript.id}_raw.mp3'.format(**locals()), File(f)) raw_media.save() # Process raw media. raw_media.create_processed_task() transcript = refresh(transcript) # Check length. expected_length = 5 * 60 # 5 minutes. self.assertAlmostEqual( transcript.length, expected_length, delta=0.2)
2.328125
2
buildAncestryFeats.py
BurcinSayin/pf2
0
1441
from bs4 import BeautifulSoup import requests import json import datetime import codecs import re featHolder = {} featHolder['name'] = 'Pathfinder 2.0 Ancestry feat list' featHolder['date'] = datetime.date.today().strftime("%B %d, %Y") def get_details(link): res = requests.get(link) res.raise_for_status() soup = BeautifulSoup(res.text, 'lxml') feat = soup.find_all("div", {'class':'main'}) detailraw = soup.find("meta", {'name':'description'})['content'] #First we grab the content from the meta tag detailsplit = re.split('<(.*?)>', detailraw) #Now we split it into groups of strings seperated by < >, to pull out any links detail = ''.join(detailsplit[::2]) #Finally, we join every other group together (passing over the link groups) into one string #print(detail) return detail def get_feats(link): feats = [] res = requests.get(link) res.raise_for_status() soup = BeautifulSoup(res.text, 'lxml') table = soup.find(lambda tag: tag.name=='table' and tag.has_attr('id') and tag['id']=="ctl00_MainContent_TableElement") rows = table.findAll(lambda tag: tag.name=='tr') t = 0 for row in rows: t += 1 #print(row) #print("-----------------------------------") feat = {} entries = row.find_all(lambda tag: tag.name=='td') if entries is not None: if len(entries) > 0: name = entries[0].find("a").next_sibling.text #We do next_sibling here because the source puts PFS links first, which we want to skip over. link = entries[0].find("a").next_sibling.a['href'] #for entry in entries: # print(entry) # print("row---------------") level = entries[1].text traits = entries[2].text prereq = entries[3].text source = entries[4].text feat['name'] = name feat['level'] = level feat['traits'] = traits.split(", ") feat['link'] = "https://2e.aonprd.com/" +link feat['prereq'] = prereq feat['benefits'] = source details = get_details(feat['link']) feat['text'] = details feats.append(feat) #if t > 5: #break return feats listOfPages = codecs.open("ancestryFeats.csv", encoding='utf-8') for line in listOfPages: featMD = line.split(",") print("Getting feats for :", featMD[0],"This url:", featMD[2]) featHolder[featMD[1]] = get_feats(featMD[2].strip('\n')) json_data = json.dumps(featHolder, indent=4) #print(json_data) filename = "ancestry-feats-pf2.json" f = open(filename, "w") f.write(json_data) f.close
3.140625
3
Random_item_selector_module.py
Jahronimo/public_question_book_framework
0
1442
<reponame>Jahronimo/public_question_book_framework import random def Randomise(questions_lists): import random import secrets secure_random = secrets.SystemRandom()# creates a secure random object. group_of_items = questions_lists num_qustion_t_select = num_question_to_display list_of_random_items = secure_random.sample(group_of_items, num_qustion_t_select) # randomly selecting from strings within each question list for each_question in range (0, num_qustion_t_select): # I think this is where i need to add in some information but don't understand. #printing some kind of structure with numbers of question and space to answer. print (("Q."),(each_question + 1),((list_of_random_items[each_question]))) print (("A."),(each_question + 1),("_______________________")) print ("\n")
4.125
4
python_scrape/test_functions.py
jose-marquez89/tech-job-landscape
0
1443
import unittest import scrape class TestScrapeFunctions(unittest.TestCase): def test_build_url(self): url = scrape.build_url("indeed", "/jobs?q=Data+Scientist&l=Texas&start=10", join_next=True) expected = ("https://www.indeed.com/" "jobs?q=Data+Scientist&l=Texas&start=10") url2 = scrape.build_url("indeed", job="Data Scientist", state="Texas") expected2 = ("https://www.indeed.com/" "jobs?q=Data%20Scientist&l=Texas&start=0") self.assertEqual(url, expected) self.assertEqual(url2, expected2) def test_fetch_page(self): fpl = scrape.fetch_page_listings job_data = fpl("indeed", job="Data Scientist", state="Texas") self.assertNotEqual(len(job_data), 0) self.assertIsInstance(job_data, tuple) self.assertIsInstance(job_data[0][0], dict) self.assertIsInstance(job_data[1], str) job_data = fpl("indeed", next_page="/jobs?q=Data+Scientist" "&l=Texas&start=10") if __name__ == '__main__': unittest.main()
3.125
3
Level1_Input_Output/10172.py
jaeheeLee17/BOJ_Algorithms
0
1444
def main(): print("|\_/|") print("|q p| /}") print("( 0 )\"\"\"\\") print("|\"^\"` |") print("||_/=\\\\__|") if __name__ == "__main__": main()
2.578125
3
Whats Cooking/KaggleCookingComparison.py
rupakc/Kaggle-Compendium
17
1445
<reponame>rupakc/Kaggle-Compendium # -*- coding: utf-8 -*- """ Created on Sat Dec 26 13:20:45 2015 Code for Kaggle What's Cooking Competition It uses the following classifiers with tf-idf,hashvectors and bag_of_words approach 1. Adaboost 2. Extratrees 3. Bagging 4. Random Forests @author: <NAME> """ import numpy as np import time import json import ClassificationUtils from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import HashingVectorizer from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import BaggingClassifier from sklearn import metrics # Create the feature extractors bag_of_words = CountVectorizer(stop_words='english') tfidf = TfidfVectorizer(stop_words='english') hashvec = HashingVectorizer(stop_words='english') # Create the Classifier objects adaboost = AdaBoostClassifier() randomforest = RandomForestClassifier() extratrees = ExtraTreesClassifier() bagging = BaggingClassifier() filepath = "train.json" f = open(filepath,"r") content = f.read() jsonData = json.loads(content) cuisine_set = set([]) ingredient_set = set([]) cuisine_map = {} cuisine_numerical_map = {} ingredient_numerical_map = {} ingredient_map = {} ingredient_list = list([]) c = 0 print "Size of the data set : ", len(jsonData) print "Starting Loading of Data Set...." start = time.time() for recipe in jsonData: if "cuisine" in recipe: s = "" if recipe["cuisine"] in cuisine_set: cuisine_map[recipe["cuisine"]] = cuisine_map[recipe["cuisine"]] + 1 else: cuisine_map[recipe["cuisine"]] = 1 cuisine_set.add(recipe["cuisine"]) for ingredient in recipe["ingredients"]: if ingredient in ingredient_set: ingredient_map[ingredient] = ingredient_map[ingredient] + 1 else: ingredient_map[ingredient] = 1 ingredient_set.add(ingredient) s = s + " " + ingredient ingredient_list.append(s) end = time.time() print "Time Taken to Load the Dataset : ",end-start for cuisine in cuisine_set: cuisine_numerical_map[cuisine] = c c = c+1 c = 0 for ingredient in ingredient_set: ingredient_numerical_map[ingredient] = c c = c+1 print "Starting Feature Extracting ......" start = time.time() train_labels = np.zeros(len(ingredient_list)) train_data_tfidf = tfidf.fit_transform(ingredient_list) train_data_hash = hashvec.fit_transform(ingredient_list) train_data_bag = bag_of_words.fit_transform(ingredient_list) c = 0 for recipe in jsonData: if "cuisine" in recipe: train_labels[c] = cuisine_numerical_map[recipe["cuisine"]] c = c+1 end = time.time() print "Time Taken to Train Extract Different Features : ", end-start test_labels = train_labels[1:30000] test_data_tfidf = tfidf.transform(ingredient_list[1:30000]) test_data_hash = hashvec.transform(ingredient_list[1:30000]) test_data_bag = bag_of_words.transform(ingredient_list[1:30000]) print "Starting Training of Models for Hash Vectorizer Feature....." start = time.time() adaboost.fit(train_data_bag,train_labels) randomforest.fit(train_data_bag,train_labels) extratrees.fit(train_data_bag,train_labels) bagging.fit(train_data_bag,train_labels) end=time.time() print "Time Taken to train all Ensemble Models : ", end-start print "Starting Prediction of Test Labels ...." start = time.time() ada_predict = adaboost.predict(test_data_bag) rf_predict = randomforest.predict(test_data_bag) extree_predict = extratrees.predict(test_data_bag) bagging_predict = bagging.predict(test_data_bag) end = time.time() print "Time Taken to Test the models : ", end-start print "Accuracy of AdaBoost Algorithm : ", metrics.accuracy_score(test_labels,ada_predict) print "Accuracy of Random Forests : ", metrics.accuracy_score(test_labels,rf_predict) print "Accuracy of Extra Trees : ", metrics.accuracy_score(test_labels,extree_predict) print "Accuracy of Bagging : ", metrics.accuracy_score(test_labels,bagging_predict) # Saving the tf-idf model and classifiers ClassificationUtils.save_classifier("ada_bag_cook.pickle",adaboost) ClassificationUtils.save_classifier("rf_bag_cook.pickle",randomforest) ClassificationUtils.save_classifier("extree_bag_cook.pickle",extratrees) ClassificationUtils.save_classifier("bagging_bag_cook.pickle",bagging) ClassificationUtils.save_classifier("bag_of_words.pickle",tfidf) def printIngredientDistribution(): print "----------- Distribution of the Recipe Ingredients ------------------" for key in ingredient_map.keys(): print key, " : " ,ingredient_map[key] def printCuisineDistribution(): print "----------- Distribution of the Cuisines ------------------" for key in cuisine_map.keys(): print key, " : " ,cuisine_map[key]
2.59375
3
pybook/ch10/DeckOfCards.py
YanhaoXu/python-learning
2
1446
import random # Create a deck of cards deck = [x for x in range(52)] # Create suits and ranks lists suits = ["Spades", "Hearts", "Diamonds", "Clubs"] ranks = ["Ace", "2", "3", "4", "5", "6", "7", "8", "9", "10", "Jack", "Queen", "King"] # Shuffle the cards random.shuffle(deck) # Display the first four cards for i in range(4): suit = suits[deck[i] // 13] rank = ranks[deck[i] % 13] print("Card number", deck[i], "is the", rank, "of", suit)
3.734375
4
nlpgnn/gnn/RGCNConv.py
ojipadeson/NLPGNN
263
1447
<gh_stars>100-1000 #! usr/bin/env python3 # -*- coding:utf-8 -*- """ @Author:<NAME> Usage: node_embeddings = tf.random.normal(shape=(5, 3)) adjacency_lists = [ tf.constant([[0, 1], [2, 4], [2, 4]], dtype=tf.int32), tf.constant([[0, 1], [2, 4], [2, 4]], dtype=tf.int32) ] layer = RGraphConvolution(out_features=12) x = layer(GNNInput(node_embeddings, adjacency_lists)) """ import tensorflow as tf from nlpgnn.gnn.messagepassing import MessagePassing class RGraphConvolution(MessagePassing): def __init__(self, out_features, epsion=1e-7, aggr="sum", normalize=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', use_bias=True, **kwargs): super(RGraphConvolution, self).__init__(aggr, **kwargs) self.kernel_initializer = tf.keras.initializers.get(kernel_initializer) self.bias_initializer = tf.keras.initializers.get(bias_initializer) self.use_bias = use_bias self.normalize = normalize self.out_features = out_features self.epsion = epsion def build(self, input_shapes): node_embedding_shapes = input_shapes.node_embeddings adjacency_list_shapes = input_shapes.adjacency_lists num_edge_type = len(adjacency_list_shapes) in_features = node_embedding_shapes[-1] self._edge_type_weights = [] self._edge_type_bias = [] for i in range(num_edge_type): weight = self.add_weight( shape=(in_features, self.out_features), initializer=self.kernel_initializer, name='wt_{}'.format(i), ) self._edge_type_weights.append(weight) if self.use_bias: self.bias = self.add_weight( shape=(self.out_features), initializer=self.bias_initializer, name='b', ) else: self.bias = None self.weight_o = self.add_weight( shape=(in_features, self.out_features), initializer=self.kernel_initializer, name='wo', ) self.built = True def message_function(self, edge_source_states, edge_target_states, num_incoming_to_node_per_message, num_outing_to_node_per_message, edge_type_idx): """ :param edge_source_states: [M,H] :param edge_target_states: [M,H] :param num_incoming_to_node_per_message:[M] :param edge_type_idx: :param training: :return: """ weight_r = self._edge_type_weights[edge_type_idx] messages = tf.linalg.matmul(edge_source_states, weight_r) if self.normalize: messages = ( tf.expand_dims(1.0 / (tf.cast(num_incoming_to_node_per_message, tf.float32) + self.epsion), axis=-1) * messages ) return messages def call(self, inputs): aggr_out = self.propagate(inputs) # message_passing + update aggr_out += tf.linalg.matmul(inputs.node_embeddings, self.weight_o) if self.bias is not None: aggr_out += self.bias return aggr_out
2.25
2
automl/google/cloud/automl_v1beta1/gapic/auto_ml_client.py
erikwebb/google-cloud-python
1
1448
# -*- coding: utf-8 -*- # # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Accesses the google.cloud.automl.v1beta1 AutoMl API.""" import functools import pkg_resources import warnings from google.oauth2 import service_account import google.api_core.gapic_v1.client_info import google.api_core.gapic_v1.config import google.api_core.gapic_v1.method import google.api_core.grpc_helpers import google.api_core.operation import google.api_core.operations_v1 import google.api_core.page_iterator import google.api_core.path_template import grpc from google.cloud.automl_v1beta1.gapic import auto_ml_client_config from google.cloud.automl_v1beta1.gapic import enums from google.cloud.automl_v1beta1.gapic.transports import auto_ml_grpc_transport from google.cloud.automl_v1beta1.proto import data_items_pb2 from google.cloud.automl_v1beta1.proto import dataset_pb2 from google.cloud.automl_v1beta1.proto import io_pb2 from google.cloud.automl_v1beta1.proto import model_evaluation_pb2 from google.cloud.automl_v1beta1.proto import model_pb2 from google.cloud.automl_v1beta1.proto import operations_pb2 as proto_operations_pb2 from google.cloud.automl_v1beta1.proto import prediction_service_pb2 from google.cloud.automl_v1beta1.proto import prediction_service_pb2_grpc from google.cloud.automl_v1beta1.proto import service_pb2 from google.cloud.automl_v1beta1.proto import service_pb2_grpc from google.longrunning import operations_pb2 as longrunning_operations_pb2 from google.protobuf import empty_pb2 _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-automl").version class AutoMlClient(object): """ AutoML Server API. The resource names are assigned by the server. The server never reuses names that it has created after the resources with those names are deleted. An ID of a resource is the last element of the item's resource name. For ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, then the id for the item is ``{dataset_id}``. """ SERVICE_ADDRESS = "automl.googleapis.com:443" """The default address of the service.""" # The name of the interface for this client. This is the key used to # find the method configuration in the client_config dictionary. _INTERFACE_NAME = "google.cloud.automl.v1beta1.AutoMl" @classmethod def from_service_account_file(cls, filename, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: AutoMlClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @classmethod def location_path(cls, project, location): """Return a fully-qualified location string.""" return google.api_core.path_template.expand( "projects/{project}/locations/{location}", project=project, location=location, ) @classmethod def dataset_path(cls, project, location, dataset): """Return a fully-qualified dataset string.""" return google.api_core.path_template.expand( "projects/{project}/locations/{location}/datasets/{dataset}", project=project, location=location, dataset=dataset, ) @classmethod def model_path(cls, project, location, model): """Return a fully-qualified model string.""" return google.api_core.path_template.expand( "projects/{project}/locations/{location}/models/{model}", project=project, location=location, model=model, ) @classmethod def model_evaluation_path(cls, project, location, model, model_evaluation): """Return a fully-qualified model_evaluation string.""" return google.api_core.path_template.expand( "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}", project=project, location=location, model=model, model_evaluation=model_evaluation, ) def __init__( self, transport=None, channel=None, credentials=None, client_config=None, client_info=None, ): """Constructor. Args: transport (Union[~.AutoMlGrpcTransport, Callable[[~.Credentials, type], ~.AutoMlGrpcTransport]): A transport instance, responsible for actually making the API calls. The default transport uses the gRPC protocol. This argument may also be a callable which returns a transport instance. Callables will be sent the credentials as the first argument and the default transport class as the second argument. channel (grpc.Channel): DEPRECATED. A ``Channel`` instance through which to make calls. This argument is mutually exclusive with ``credentials``; providing both will raise an exception. credentials (google.auth.credentials.Credentials): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. This argument is mutually exclusive with providing a transport instance to ``transport``; doing so will raise an exception. client_config (dict): DEPRECATED. A dictionary of call options for each method. If not specified, the default configuration is used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. """ # Raise deprecation warnings for things we want to go away. if client_config is not None: warnings.warn( "The `client_config` argument is deprecated.", PendingDeprecationWarning, stacklevel=2, ) else: client_config = auto_ml_client_config.config if channel: warnings.warn( "The `channel` argument is deprecated; use " "`transport` instead.", PendingDeprecationWarning, stacklevel=2, ) # Instantiate the transport. # The transport is responsible for handling serialization and # deserialization and actually sending data to the service. if transport: if callable(transport): self.transport = transport( credentials=credentials, default_class=auto_ml_grpc_transport.AutoMlGrpcTransport, ) else: if credentials: raise ValueError( "Received both a transport instance and " "credentials; these are mutually exclusive." ) self.transport = transport else: self.transport = auto_ml_grpc_transport.AutoMlGrpcTransport( address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials ) if client_info is None: client_info = google.api_core.gapic_v1.client_info.ClientInfo( gapic_version=_GAPIC_LIBRARY_VERSION ) else: client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info = client_info # Parse out the default settings for retry and timeout for each RPC # from the client configuration. # (Ordinarily, these are the defaults specified in the `*_config.py` # file next to this one.) self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( client_config["interfaces"][self._INTERFACE_NAME] ) # Save a dictionary of cached API call functions. # These are the actual callables which invoke the proper # transport methods, wrapped with `wrap_method` to add retry, # timeout, and the like. self._inner_api_calls = {} # Service calls def create_dataset( self, parent, dataset, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Creates a dataset. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # TODO: Initialize `dataset`: >>> dataset = {} >>> >>> response = client.create_dataset(parent, dataset) Args: parent (str): The resource name of the project to create the dataset for. dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): The dataset to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.Dataset` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "create_dataset" not in self._inner_api_calls: self._inner_api_calls[ "create_dataset" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_dataset, default_retry=self._method_configs["CreateDataset"].retry, default_timeout=self._method_configs["CreateDataset"].timeout, client_info=self._client_info, ) request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset) return self._inner_api_calls["create_dataset"]( request, retry=retry, timeout=timeout, metadata=metadata ) def get_dataset( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Gets a dataset. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> response = client.get_dataset(name) Args: name (str): The resource name of the dataset to retrieve. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "get_dataset" not in self._inner_api_calls: self._inner_api_calls[ "get_dataset" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_dataset, default_retry=self._method_configs["GetDataset"].retry, default_timeout=self._method_configs["GetDataset"].timeout, client_info=self._client_info, ) request = service_pb2.GetDatasetRequest(name=name) return self._inner_api_calls["get_dataset"]( request, retry=retry, timeout=timeout, metadata=metadata ) def list_datasets( self, parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Lists datasets in a project. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # Iterate over all results >>> for element in client.list_datasets(parent): ... # process element ... pass >>> >>> >>> # Alternatively: >>> >>> # Iterate over results one page at a time >>> for page in client.list_datasets(parent).pages: ... for element in page: ... # process element ... pass Args: parent (str): The resource name of the project from which to list datasets. filter_ (str): An expression for filtering the results of the request. - ``dataset_metadata`` - for existence of the case. An example of using the filter is: - ``translation_dataset_metadata:*`` --> The dataset has translation\_dataset\_metadata. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page streaming is performed per-page, this determines the maximum number of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.gax.PageIterator` instance. By default, this is an iterable of :class:`~google.cloud.automl_v1beta1.types.Dataset` instances. This object can also be configured to iterate over the pages of the response through the `options` parameter. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "list_datasets" not in self._inner_api_calls: self._inner_api_calls[ "list_datasets" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_datasets, default_retry=self._method_configs["ListDatasets"].retry, default_timeout=self._method_configs["ListDatasets"].timeout, client_info=self._client_info, ) request = service_pb2.ListDatasetsRequest( parent=parent, filter=filter_, page_size=page_size ) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls["list_datasets"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field="datasets", request_token_field="page_token", response_token_field="next_page_token", ) return iterator def delete_dataset( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Deletes a dataset and all of its contents. Returns empty response in the ``response`` field when it completes, and ``delete_details`` in the ``metadata`` field. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> response = client.delete_dataset(name) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: name (str): The resource name of the dataset to delete. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "delete_dataset" not in self._inner_api_calls: self._inner_api_calls[ "delete_dataset" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_dataset, default_retry=self._method_configs["DeleteDataset"].retry, default_timeout=self._method_configs["DeleteDataset"].timeout, client_info=self._client_info, ) request = service_pb2.DeleteDatasetRequest(name=name) operation = self._inner_api_calls["delete_dataset"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def import_data( self, name, input_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Imports data into a dataset. Returns an empty response in the ``response`` field when it completes. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> # TODO: Initialize `input_config`: >>> input_config = {} >>> >>> response = client.import_data(name, input_config) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: name (str): Required. Dataset name. Dataset must already exist. All imported annotations and examples will be added. input_config (Union[dict, ~google.cloud.automl_v1beta1.types.InputConfig]): Required. The desired input location. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.InputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "import_data" not in self._inner_api_calls: self._inner_api_calls[ "import_data" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.import_data, default_retry=self._method_configs["ImportData"].retry, default_timeout=self._method_configs["ImportData"].timeout, client_info=self._client_info, ) request = service_pb2.ImportDataRequest(name=name, input_config=input_config) operation = self._inner_api_calls["import_data"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def export_data( self, name, output_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Exports dataset's data to a Google Cloud Storage bucket. Returns an empty response in the ``response`` field when it completes. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> # TODO: Initialize `output_config`: >>> output_config = {} >>> >>> response = client.export_data(name, output_config) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: name (str): Required. The resource name of the dataset. output_config (Union[dict, ~google.cloud.automl_v1beta1.types.OutputConfig]): Required. The desired output location. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.OutputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "export_data" not in self._inner_api_calls: self._inner_api_calls[ "export_data" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.export_data, default_retry=self._method_configs["ExportData"].retry, default_timeout=self._method_configs["ExportData"].timeout, client_info=self._client_info, ) request = service_pb2.ExportDataRequest(name=name, output_config=output_config) operation = self._inner_api_calls["export_data"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def create_model( self, parent, model, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Creates a model. Returns a Model in the ``response`` field when it completes. When you create a model, several model evaluations are created for it: a global evaluation, and one evaluation for each annotation spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # TODO: Initialize `model`: >>> model = {} >>> >>> response = client.create_model(parent, model) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: parent (str): Resource name of the parent project where the model is being created. model (Union[dict, ~google.cloud.automl_v1beta1.types.Model]): The model to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.Model` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "create_model" not in self._inner_api_calls: self._inner_api_calls[ "create_model" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_model, default_retry=self._method_configs["CreateModel"].retry, default_timeout=self._method_configs["CreateModel"].timeout, client_info=self._client_info, ) request = service_pb2.CreateModelRequest(parent=parent, model=model) operation = self._inner_api_calls["create_model"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, model_pb2.Model, metadata_type=proto_operations_pb2.OperationMetadata, ) def get_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Gets a model. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> response = client.get_model(name) Args: name (str): Resource name of the model. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Model` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "get_model" not in self._inner_api_calls: self._inner_api_calls[ "get_model" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_model, default_retry=self._method_configs["GetModel"].retry, default_timeout=self._method_configs["GetModel"].timeout, client_info=self._client_info, ) request = service_pb2.GetModelRequest(name=name) return self._inner_api_calls["get_model"]( request, retry=retry, timeout=timeout, metadata=metadata ) def list_models( self, parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Lists models. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # Iterate over all results >>> for element in client.list_models(parent): ... # process element ... pass >>> >>> >>> # Alternatively: >>> >>> # Iterate over results one page at a time >>> for page in client.list_models(parent).pages: ... for element in page: ... # process element ... pass Args: parent (str): Resource name of the project, from which to list the models. filter_ (str): An expression for filtering the results of the request. - ``model_metadata`` - for existence of the case. - ``dataset_id`` - for = or !=. Some examples of using the filter are: - ``image_classification_model_metadata:*`` --> The model has image\_classification\_model\_metadata. - ``dataset_id=5`` --> The model was created from a sibling dataset with ID 5. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page streaming is performed per-page, this determines the maximum number of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.gax.PageIterator` instance. By default, this is an iterable of :class:`~google.cloud.automl_v1beta1.types.Model` instances. This object can also be configured to iterate over the pages of the response through the `options` parameter. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "list_models" not in self._inner_api_calls: self._inner_api_calls[ "list_models" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_models, default_retry=self._method_configs["ListModels"].retry, default_timeout=self._method_configs["ListModels"].timeout, client_info=self._client_info, ) request = service_pb2.ListModelsRequest( parent=parent, filter=filter_, page_size=page_size ) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls["list_models"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field="model", request_token_field="page_token", response_token_field="next_page_token", ) return iterator def delete_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Deletes a model. If a model is already deployed, this only deletes the model in AutoML BE, and does not change the status of the deployed model in the production environment. Returns ``google.protobuf.Empty`` in the ``response`` field when it completes, and ``delete_details`` in the ``metadata`` field. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> response = client.delete_model(name) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: name (str): Resource name of the model being deleted. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "delete_model" not in self._inner_api_calls: self._inner_api_calls[ "delete_model" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_model, default_retry=self._method_configs["DeleteModel"].retry, default_timeout=self._method_configs["DeleteModel"].timeout, client_info=self._client_info, ) request = service_pb2.DeleteModelRequest(name=name) operation = self._inner_api_calls["delete_model"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def deploy_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Deploys model. Returns a ``DeployModelResponse`` in the ``response`` field when it completes. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> response = client.deploy_model(name) Args: name (str): Resource name of the model to deploy. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Operation` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "deploy_model" not in self._inner_api_calls: self._inner_api_calls[ "deploy_model" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.deploy_model, default_retry=self._method_configs["DeployModel"].retry, default_timeout=self._method_configs["DeployModel"].timeout, client_info=self._client_info, ) request = service_pb2.DeployModelRequest(name=name) return self._inner_api_calls["deploy_model"]( request, retry=retry, timeout=timeout, metadata=metadata ) def undeploy_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Undeploys model. Returns an ``UndeployModelResponse`` in the ``response`` field when it completes. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> response = client.undeploy_model(name) Args: name (str): Resource name of the model to undeploy. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Operation` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "undeploy_model" not in self._inner_api_calls: self._inner_api_calls[ "undeploy_model" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.undeploy_model, default_retry=self._method_configs["UndeployModel"].retry, default_timeout=self._method_configs["UndeployModel"].timeout, client_info=self._client_info, ) request = service_pb2.UndeployModelRequest(name=name) return self._inner_api_calls["undeploy_model"]( request, retry=retry, timeout=timeout, metadata=metadata ) def get_model_evaluation( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Gets a model evaluation. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.model_evaluation_path('[PROJECT]', '[LOCATION]', '[MODEL]', '[MODEL_EVALUATION]') >>> >>> response = client.get_model_evaluation(name) Args: name (str): Resource name for the model evaluation. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "get_model_evaluation" not in self._inner_api_calls: self._inner_api_calls[ "get_model_evaluation" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_model_evaluation, default_retry=self._method_configs["GetModelEvaluation"].retry, default_timeout=self._method_configs["GetModelEvaluation"].timeout, client_info=self._client_info, ) request = service_pb2.GetModelEvaluationRequest(name=name) return self._inner_api_calls["get_model_evaluation"]( request, retry=retry, timeout=timeout, metadata=metadata ) def list_model_evaluations( self, parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Lists model evaluations. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> parent = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> # Iterate over all results >>> for element in client.list_model_evaluations(parent): ... # process element ... pass >>> >>> >>> # Alternatively: >>> >>> # Iterate over results one page at a time >>> for page in client.list_model_evaluations(parent).pages: ... for element in page: ... # process element ... pass Args: parent (str): Resource name of the model to list the model evaluations for. If modelId is set as "-", this will list model evaluations from across all models of the parent location. filter_ (str): An expression for filtering the results of the request. - ``annotation_spec_id`` - for =, != or existence. See example below for the last. Some examples of using the filter are: - ``annotation_spec_id!=4`` --> The model evaluation was done for annotation spec with ID different than 4. - ``NOT annotation_spec_id:*`` --> The model evaluation was done for aggregate of all annotation specs. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page streaming is performed per-page, this determines the maximum number of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.gax.PageIterator` instance. By default, this is an iterable of :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instances. This object can also be configured to iterate over the pages of the response through the `options` parameter. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "list_model_evaluations" not in self._inner_api_calls: self._inner_api_calls[ "list_model_evaluations" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_model_evaluations, default_retry=self._method_configs["ListModelEvaluations"].retry, default_timeout=self._method_configs["ListModelEvaluations"].timeout, client_info=self._client_info, ) request = service_pb2.ListModelEvaluationsRequest( parent=parent, filter=filter_, page_size=page_size ) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls["list_model_evaluations"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field="model_evaluation", request_token_field="page_token", response_token_field="next_page_token", ) return iterator
1.09375
1
addons/project/models/project.py
SHIVJITH/Odoo_Machine_Test
0
1449
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. import ast from datetime import timedelta, datetime from random import randint from odoo import api, fields, models, tools, SUPERUSER_ID, _ from odoo.exceptions import UserError, AccessError, ValidationError, RedirectWarning from odoo.tools.misc import format_date, get_lang from odoo.osv.expression import OR from .project_task_recurrence import DAYS, WEEKS class ProjectTaskType(models.Model): _name = 'project.task.type' _description = 'Task Stage' _order = 'sequence, id' def _get_default_project_ids(self): default_project_id = self.env.context.get('default_project_id') return [default_project_id] if default_project_id else None active = fields.Boolean('Active', default=True) name = fields.Char(string='Stage Name', required=True, translate=True) description = fields.Text(translate=True) sequence = fields.Integer(default=1) project_ids = fields.Many2many('project.project', 'project_task_type_rel', 'type_id', 'project_id', string='Projects', default=_get_default_project_ids) legend_blocked = fields.Char( 'Red Kanban Label', default=lambda s: _('Blocked'), translate=True, required=True, help='Override the default value displayed for the blocked state for kanban selection, when the task or issue is in that stage.') legend_done = fields.Char( 'Green Kanban Label', default=lambda s: _('Ready'), translate=True, required=True, help='Override the default value displayed for the done state for kanban selection, when the task or issue is in that stage.') legend_normal = fields.Char( 'Grey Kanban Label', default=lambda s: _('In Progress'), translate=True, required=True, help='Override the default value displayed for the normal state for kanban selection, when the task or issue is in that stage.') mail_template_id = fields.Many2one( 'mail.template', string='Email Template', domain=[('model', '=', 'project.task')], help="If set an email will be sent to the customer when the task or issue reaches this step.") fold = fields.Boolean(string='Folded in Kanban', help='This stage is folded in the kanban view when there are no records in that stage to display.') rating_template_id = fields.Many2one( 'mail.template', string='Rating Email Template', domain=[('model', '=', 'project.task')], help="If set and if the project's rating configuration is 'Rating when changing stage', then an email will be sent to the customer when the task reaches this step.") auto_validation_kanban_state = fields.Boolean('Automatic kanban status', default=False, help="Automatically modify the kanban state when the customer replies to the feedback for this stage.\n" " * A good feedback from the customer will update the kanban state to 'ready for the new stage' (green bullet).\n" " * A medium or a bad feedback will set the kanban state to 'blocked' (red bullet).\n") is_closed = fields.Boolean('Closing Stage', help="Tasks in this stage are considered as closed.") disabled_rating_warning = fields.Text(compute='_compute_disabled_rating_warning') def unlink_wizard(self, stage_view=False): self = self.with_context(active_test=False) # retrieves all the projects with a least 1 task in that stage # a task can be in a stage even if the project is not assigned to the stage readgroup = self.with_context(active_test=False).env['project.task'].read_group([('stage_id', 'in', self.ids)], ['project_id'], ['project_id']) project_ids = list(set([project['project_id'][0] for project in readgroup] + self.project_ids.ids)) wizard = self.with_context(project_ids=project_ids).env['project.task.type.delete.wizard'].create({ 'project_ids': project_ids, 'stage_ids': self.ids }) context = dict(self.env.context) context['stage_view'] = stage_view return { 'name': _('Delete Stage'), 'view_mode': 'form', 'res_model': 'project.task.type.delete.wizard', 'views': [(self.env.ref('project.view_project_task_type_delete_wizard').id, 'form')], 'type': 'ir.actions.act_window', 'res_id': wizard.id, 'target': 'new', 'context': context, } def write(self, vals): if 'active' in vals and not vals['active']: self.env['project.task'].search([('stage_id', 'in', self.ids)]).write({'active': False}) return super(ProjectTaskType, self).write(vals) @api.depends('project_ids', 'project_ids.rating_active') def _compute_disabled_rating_warning(self): for stage in self: disabled_projects = stage.project_ids.filtered(lambda p: not p.rating_active) if disabled_projects: stage.disabled_rating_warning = '\n'.join('- %s' % p.name for p in disabled_projects) else: stage.disabled_rating_warning = False class Project(models.Model): _name = "project.project" _description = "Project" _inherit = ['portal.mixin', 'mail.alias.mixin', 'mail.thread', 'rating.parent.mixin'] _order = "sequence, name, id" _rating_satisfaction_days = False # takes all existing ratings _check_company_auto = True def _compute_attached_docs_count(self): Attachment = self.env['ir.attachment'] for project in self: project.doc_count = Attachment.search_count([ '|', '&', ('res_model', '=', 'project.project'), ('res_id', '=', project.id), '&', ('res_model', '=', 'project.task'), ('res_id', 'in', project.task_ids.ids) ]) def _compute_task_count(self): task_data = self.env['project.task'].read_group([('project_id', 'in', self.ids), '|', '&', ('stage_id.is_closed', '=', False), ('stage_id.fold', '=', False), ('stage_id', '=', False)], ['project_id'], ['project_id']) result = dict((data['project_id'][0], data['project_id_count']) for data in task_data) for project in self: project.task_count = result.get(project.id, 0) def attachment_tree_view(self): action = self.env['ir.actions.act_window']._for_xml_id('base.action_attachment') action['domain'] = str([ '|', '&', ('res_model', '=', 'project.project'), ('res_id', 'in', self.ids), '&', ('res_model', '=', 'project.task'), ('res_id', 'in', self.task_ids.ids) ]) action['context'] = "{'default_res_model': '%s','default_res_id': %d}" % (self._name, self.id) return action def _compute_is_favorite(self): for project in self: project.is_favorite = self.env.user in project.favorite_user_ids def _inverse_is_favorite(self): favorite_projects = not_fav_projects = self.env['project.project'].sudo() for project in self: if self.env.user in project.favorite_user_ids: favorite_projects |= project else: not_fav_projects |= project # Project User has no write access for project. not_fav_projects.write({'favorite_user_ids': [(4, self.env.uid)]}) favorite_projects.write({'favorite_user_ids': [(3, self.env.uid)]}) def _get_default_favorite_user_ids(self): return [(6, 0, [self.env.uid])] name = fields.Char("Name", index=True, required=True, tracking=True) description = fields.Html() active = fields.Boolean(default=True, help="If the active field is set to False, it will allow you to hide the project without removing it.") sequence = fields.Integer(default=10, help="Gives the sequence order when displaying a list of Projects.") partner_id = fields.Many2one('res.partner', string='Customer', auto_join=True, tracking=True, domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]") partner_email = fields.Char( compute='_compute_partner_email', inverse='_inverse_partner_email', string='Email', readonly=False, store=True, copy=False) partner_phone = fields.Char( compute='_compute_partner_phone', inverse='_inverse_partner_phone', string="Phone", readonly=False, store=True, copy=False) company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env.company) currency_id = fields.Many2one('res.currency', related="company_id.currency_id", string="Currency", readonly=True) analytic_account_id = fields.Many2one('account.analytic.account', string="Analytic Account", copy=False, ondelete='set null', domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]", check_company=True, help="Analytic account to which this project is linked for financial management. " "Use an analytic account to record cost and revenue on your project.") favorite_user_ids = fields.Many2many( 'res.users', 'project_favorite_user_rel', 'project_id', 'user_id', default=_get_default_favorite_user_ids, string='Members') is_favorite = fields.Boolean(compute='_compute_is_favorite', inverse='_inverse_is_favorite', string='Show Project on dashboard', help="Whether this project should be displayed on your dashboard.") label_tasks = fields.Char(string='Use Tasks as', default='Tasks', help="Label used for the tasks of the project.", translate=True) tasks = fields.One2many('project.task', 'project_id', string="Task Activities") resource_calendar_id = fields.Many2one( 'resource.calendar', string='Working Time', related='company_id.resource_calendar_id') type_ids = fields.Many2many('project.task.type', 'project_task_type_rel', 'project_id', 'type_id', string='Tasks Stages') task_count = fields.Integer(compute='_compute_task_count', string="Task Count") task_ids = fields.One2many('project.task', 'project_id', string='Tasks', domain=['|', ('stage_id.fold', '=', False), ('stage_id', '=', False)]) color = fields.Integer(string='Color Index') user_id = fields.Many2one('res.users', string='Project Manager', default=lambda self: self.env.user, tracking=True) alias_enabled = fields.Boolean(string='Use email alias', compute='_compute_alias_enabled', readonly=False) alias_id = fields.Many2one('mail.alias', string='Alias', ondelete="restrict", required=True, help="Internal email associated with this project. Incoming emails are automatically synchronized " "with Tasks (or optionally Issues if the Issue Tracker module is installed).") privacy_visibility = fields.Selection([ ('followers', 'Invited internal users'), ('employees', 'All internal users'), ('portal', 'Invited portal users and all internal users'), ], string='Visibility', required=True, default='portal', help="Defines the visibility of the tasks of the project:\n" "- Invited internal users: employees may only see the followed project and tasks.\n" "- All internal users: employees may see all project and tasks.\n" "- Invited portal and all internal users: employees may see everything." " Portal users may see project and tasks followed by\n" " them or by someone of their company.") allowed_user_ids = fields.Many2many('res.users', compute='_compute_allowed_users', inverse='_inverse_allowed_user') allowed_internal_user_ids = fields.Many2many('res.users', 'project_allowed_internal_users_rel', string="Allowed Internal Users", default=lambda self: self.env.user, domain=[('share', '=', False)]) allowed_portal_user_ids = fields.Many2many('res.users', 'project_allowed_portal_users_rel', string="Allowed Portal Users", domain=[('share', '=', True)]) doc_count = fields.Integer(compute='_compute_attached_docs_count', string="Number of documents attached") date_start = fields.Date(string='Start Date') date = fields.Date(string='Expiration Date', index=True, tracking=True) subtask_project_id = fields.Many2one('project.project', string='Sub-task Project', ondelete="restrict", help="Project in which sub-tasks of the current project will be created. It can be the current project itself.") allow_subtasks = fields.Boolean('Sub-tasks', default=lambda self: self.env.user.has_group('project.group_subtask_project')) allow_recurring_tasks = fields.Boolean('Recurring Tasks', default=lambda self: self.env.user.has_group('project.group_project_recurring_tasks')) # rating fields rating_request_deadline = fields.Datetime(compute='_compute_rating_request_deadline', store=True) rating_active = fields.Boolean('Customer Ratings', default=lambda self: self.env.user.has_group('project.group_project_rating')) rating_status = fields.Selection( [('stage', 'Rating when changing stage'), ('periodic', 'Periodical Rating') ], 'Customer Ratings Status', default="stage", required=True, help="How to get customer feedback?\n" "- Rating when changing stage: an email will be sent when a task is pulled in another stage.\n" "- Periodical Rating: email will be sent periodically.\n\n" "Don't forget to set up the mail templates on the stages for which you want to get the customer's feedbacks.") rating_status_period = fields.Selection([ ('daily', 'Daily'), ('weekly', 'Weekly'), ('bimonthly', 'Twice a Month'), ('monthly', 'Once a Month'), ('quarterly', 'Quarterly'), ('yearly', 'Yearly')], 'Rating Frequency', required=True, default='monthly') _sql_constraints = [ ('project_date_greater', 'check(date >= date_start)', 'Error! project start-date must be lower than project end-date.') ] @api.depends('partner_id.email') def _compute_partner_email(self): for project in self: if project.partner_id and project.partner_id.email != project.partner_email: project.partner_email = project.partner_id.email def _inverse_partner_email(self): for project in self: if project.partner_id and project.partner_email != project.partner_id.email: project.partner_id.email = project.partner_email @api.depends('partner_id.phone') def _compute_partner_phone(self): for project in self: if project.partner_id and project.partner_phone != project.partner_id.phone: project.partner_phone = project.partner_id.phone def _inverse_partner_phone(self): for project in self: if project.partner_id and project.partner_phone != project.partner_id.phone: project.partner_id.phone = project.partner_phone @api.onchange('alias_enabled') def _onchange_alias_name(self): if not self.alias_enabled: self.alias_name = False def _compute_alias_enabled(self): for project in self: project.alias_enabled = project.alias_domain and project.alias_id.alias_name @api.depends('allowed_internal_user_ids', 'allowed_portal_user_ids') def _compute_allowed_users(self): for project in self: users = project.allowed_internal_user_ids | project.allowed_portal_user_ids project.allowed_user_ids = users def _inverse_allowed_user(self): for project in self: allowed_users = project.allowed_user_ids project.allowed_portal_user_ids = allowed_users.filtered('share') project.allowed_internal_user_ids = allowed_users - project.allowed_portal_user_ids def _compute_access_url(self): super(Project, self)._compute_access_url() for project in self: project.access_url = '/my/project/%s' % project.id def _compute_access_warning(self): super(Project, self)._compute_access_warning() for project in self.filtered(lambda x: x.privacy_visibility != 'portal'): project.access_warning = _( "The project cannot be shared with the recipient(s) because the privacy of the project is too restricted. Set the privacy to 'Visible by following customers' in order to make it accessible by the recipient(s).") @api.depends('rating_status', 'rating_status_period') def _compute_rating_request_deadline(self): periods = {'daily': 1, 'weekly': 7, 'bimonthly': 15, 'monthly': 30, 'quarterly': 90, 'yearly': 365} for project in self: project.rating_request_deadline = fields.datetime.now() + timedelta(days=periods.get(project.rating_status_period, 0)) @api.model def _map_tasks_default_valeus(self, task, project): """ get the default value for the copied task on project duplication """ return { 'stage_id': task.stage_id.id, 'name': task.name, 'company_id': project.company_id.id, } def map_tasks(self, new_project_id): """ copy and map tasks from old to new project """ project = self.browse(new_project_id) tasks = self.env['project.task'] # We want to copy archived task, but do not propagate an active_test context key task_ids = self.env['project.task'].with_context(active_test=False).search([('project_id', '=', self.id)], order='parent_id').ids old_to_new_tasks = {} for task in self.env['project.task'].browse(task_ids): # preserve task name and stage, normally altered during copy defaults = self._map_tasks_default_valeus(task, project) if task.parent_id: # set the parent to the duplicated task defaults['parent_id'] = old_to_new_tasks.get(task.parent_id.id, False) new_task = task.copy(defaults) old_to_new_tasks[task.id] = new_task.id tasks += new_task return project.write({'tasks': [(6, 0, tasks.ids)]}) @api.returns('self', lambda value: value.id) def copy(self, default=None): if default is None: default = {} if not default.get('name'): default['name'] = _("%s (copy)") % (self.name) project = super(Project, self).copy(default) if self.subtask_project_id == self: project.subtask_project_id = project for follower in self.message_follower_ids: project.message_subscribe(partner_ids=follower.partner_id.ids, subtype_ids=follower.subtype_ids.ids) if 'tasks' not in default: self.map_tasks(project.id) return project @api.model def create(self, vals): # Prevent double project creation self = self.with_context(mail_create_nosubscribe=True) project = super(Project, self).create(vals) if not vals.get('subtask_project_id'): project.subtask_project_id = project.id if project.privacy_visibility == 'portal' and project.partner_id.user_ids: project.allowed_user_ids |= project.partner_id.user_ids return project def write(self, vals): allowed_users_changed = 'allowed_portal_user_ids' in vals or 'allowed_internal_user_ids' in vals if allowed_users_changed: allowed_users = {project: project.allowed_user_ids for project in self} # directly compute is_favorite to dodge allow write access right if 'is_favorite' in vals: vals.pop('is_favorite') self._fields['is_favorite'].determine_inverse(self) res = super(Project, self).write(vals) if vals else True if allowed_users_changed: for project in self: permission_removed = allowed_users.get(project) - project.allowed_user_ids allowed_portal_users_removed = permission_removed.filtered('share') project.message_unsubscribe(allowed_portal_users_removed.partner_id.commercial_partner_id.ids) for task in project.task_ids: task.allowed_user_ids -= permission_removed if 'allow_recurring_tasks' in vals and not vals.get('allow_recurring_tasks'): self.env['project.task'].search([('project_id', 'in', self.ids), ('recurring_task', '=', True)]).write({'recurring_task': False}) if 'active' in vals: # archiving/unarchiving a project does it on its tasks, too self.with_context(active_test=False).mapped('tasks').write({'active': vals['active']}) if vals.get('partner_id') or vals.get('privacy_visibility'): for project in self.filtered(lambda project: project.privacy_visibility == 'portal'): project.allowed_user_ids |= project.partner_id.user_ids return res def action_unlink(self): wizard = self.env['project.delete.wizard'].create({ 'project_ids': self.ids }) return { 'name': _('Confirmation'), 'view_mode': 'form', 'res_model': 'project.delete.wizard', 'views': [(self.env.ref('project.project_delete_wizard_form').id, 'form')], 'type': 'ir.actions.act_window', 'res_id': wizard.id, 'target': 'new', 'context': self.env.context, } def unlink(self): # Check project is empty for project in self.with_context(active_test=False): if project.tasks: raise UserError(_('You cannot delete a project containing tasks. You can either archive it or first delete all of its tasks.')) # Delete the empty related analytic account analytic_accounts_to_delete = self.env['account.analytic.account'] for project in self: if project.analytic_account_id and not project.analytic_account_id.line_ids: analytic_accounts_to_delete |= project.analytic_account_id result = super(Project, self).unlink() analytic_accounts_to_delete.unlink() return result def message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None): """ Subscribe to all existing active tasks when subscribing to a project And add the portal user subscribed to allowed portal users """ res = super(Project, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids) project_subtypes = self.env['mail.message.subtype'].browse(subtype_ids) if subtype_ids else None task_subtypes = (project_subtypes.mapped('parent_id') | project_subtypes.filtered(lambda sub: sub.internal or sub.default)).ids if project_subtypes else None if not subtype_ids or task_subtypes: self.mapped('tasks').message_subscribe( partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=task_subtypes) if partner_ids: all_users = self.env['res.partner'].browse(partner_ids).user_ids portal_users = all_users.filtered('share') internal_users = all_users - portal_users self.allowed_portal_user_ids |= portal_users self.allowed_internal_user_ids |= internal_users return res def message_unsubscribe(self, partner_ids=None, channel_ids=None): """ Unsubscribe from all tasks when unsubscribing from a project """ self.mapped('tasks').message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids) return super(Project, self).message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids) def _alias_get_creation_values(self): values = super(Project, self)._alias_get_creation_values() values['alias_model_id'] = self.env['ir.model']._get('project.task').id if self.id: values['alias_defaults'] = defaults = ast.literal_eval(self.alias_defaults or "{}") defaults['project_id'] = self.id return values # --------------------------------------------------- # Actions # --------------------------------------------------- def toggle_favorite(self): favorite_projects = not_fav_projects = self.env['project.project'].sudo() for project in self: if self.env.user in project.favorite_user_ids: favorite_projects |= project else: not_fav_projects |= project # Project User has no write access for project. not_fav_projects.write({'favorite_user_ids': [(4, self.env.uid)]}) favorite_projects.write({'favorite_user_ids': [(3, self.env.uid)]}) def action_view_tasks(self): action = self.with_context(active_id=self.id, active_ids=self.ids) \ .env.ref('project.act_project_project_2_project_task_all') \ .sudo().read()[0] action['display_name'] = self.name return action def action_view_account_analytic_line(self): """ return the action to see all the analytic lines of the project's analytic account """ action = self.env["ir.actions.actions"]._for_xml_id("analytic.account_analytic_line_action") action['context'] = {'default_account_id': self.analytic_account_id.id} action['domain'] = [('account_id', '=', self.analytic_account_id.id)] return action def action_view_all_rating(self): """ return the action to see all the rating of the project and activate default filters""" action = self.env['ir.actions.act_window']._for_xml_id('project.rating_rating_action_view_project_rating') action['name'] = _('Ratings of %s') % (self.name,) action_context = ast.literal_eval(action['context']) if action['context'] else {} action_context.update(self._context) action_context['search_default_parent_res_name'] = self.name action_context.pop('group_by', None) return dict(action, context=action_context) # --------------------------------------------------- # Business Methods # --------------------------------------------------- @api.model def _create_analytic_account_from_values(self, values): analytic_account = self.env['account.analytic.account'].create({ 'name': values.get('name', _('Unknown Analytic Account')), 'company_id': values.get('company_id') or self.env.company.id, 'partner_id': values.get('partner_id'), 'active': True, }) return analytic_account def _create_analytic_account(self): for project in self: analytic_account = self.env['account.analytic.account'].create({ 'name': project.name, 'company_id': project.company_id.id, 'partner_id': project.partner_id.id, 'active': True, }) project.write({'analytic_account_id': analytic_account.id}) # --------------------------------------------------- # Rating business # --------------------------------------------------- # This method should be called once a day by the scheduler @api.model def _send_rating_all(self): projects = self.search([ ('rating_active', '=', True), ('rating_status', '=', 'periodic'), ('rating_request_deadline', '<=', fields.Datetime.now()) ]) for project in projects: project.task_ids._send_task_rating_mail() project._compute_rating_request_deadline() self.env.cr.commit() class Task(models.Model): _name = "project.task" _description = "Task" _date_name = "date_assign" _inherit = ['portal.mixin', 'mail.thread.cc', 'mail.activity.mixin', 'rating.mixin'] _mail_post_access = 'read' _order = "priority desc, sequence, id desc" _check_company_auto = True def _get_default_stage_id(self): """ Gives default stage_id """ project_id = self.env.context.get('default_project_id') if not project_id: return False return self.stage_find(project_id, [('fold', '=', False), ('is_closed', '=', False)]) @api.model def _default_company_id(self): if self._context.get('default_project_id'): return self.env['project.project'].browse(self._context['default_project_id']).company_id return self.env.company @api.model def _read_group_stage_ids(self, stages, domain, order): search_domain = [('id', 'in', stages.ids)] if 'default_project_id' in self.env.context: search_domain = ['|', ('project_ids', '=', self.env.context['default_project_id'])] + search_domain stage_ids = stages._search(search_domain, order=order, access_rights_uid=SUPERUSER_ID) return stages.browse(stage_ids) active = fields.Boolean(default=True) name = fields.Char(string='Title', tracking=True, required=True, index=True) description = fields.Html(string='Description') priority = fields.Selection([ ('0', 'Normal'), ('1', 'Important'), ], default='0', index=True, string="Priority") sequence = fields.Integer(string='Sequence', index=True, default=10, help="Gives the sequence order when displaying a list of tasks.") stage_id = fields.Many2one('project.task.type', string='Stage', compute='_compute_stage_id', store=True, readonly=False, ondelete='restrict', tracking=True, index=True, default=_get_default_stage_id, group_expand='_read_group_stage_ids', domain="[('project_ids', '=', project_id)]", copy=False) tag_ids = fields.Many2many('project.tags', string='Tags') kanban_state = fields.Selection([ ('normal', 'In Progress'), ('done', 'Ready'), ('blocked', 'Blocked')], string='Kanban State', copy=False, default='normal', required=True) kanban_state_label = fields.Char(compute='_compute_kanban_state_label', string='Kanban State Label', tracking=True) create_date = fields.Datetime("Created On", readonly=True, index=True) write_date = fields.Datetime("Last Updated On", readonly=True, index=True) date_end = fields.Datetime(string='Ending Date', index=True, copy=False) date_assign = fields.Datetime(string='Assigning Date', index=True, copy=False, readonly=True) date_deadline = fields.Date(string='Deadline', index=True, copy=False, tracking=True) date_last_stage_update = fields.Datetime(string='Last Stage Update', index=True, copy=False, readonly=True) project_id = fields.Many2one('project.project', string='Project', compute='_compute_project_id', store=True, readonly=False, index=True, tracking=True, check_company=True, change_default=True) planned_hours = fields.Float("Initially Planned Hours", help='Time planned to achieve this task (including its sub-tasks).', tracking=True) subtask_planned_hours = fields.Float("Sub-tasks Planned Hours", compute='_compute_subtask_planned_hours', help="Sum of the time planned of all the sub-tasks linked to this task. Usually less or equal to the initially time planned of this task.") user_id = fields.Many2one('res.users', string='Assigned to', default=lambda self: self.env.uid, index=True, tracking=True) partner_id = fields.Many2one('res.partner', string='Customer', compute='_compute_partner_id', store=True, readonly=False, domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]") partner_is_company = fields.Boolean(related='partner_id.is_company', readonly=True) commercial_partner_id = fields.Many2one(related='partner_id.commercial_partner_id') partner_email = fields.Char( compute='_compute_partner_email', inverse='_inverse_partner_email', string='Email', readonly=False, store=True, copy=False) partner_phone = fields.Char( compute='_compute_partner_phone', inverse='_inverse_partner_phone', string="Phone", readonly=False, store=True, copy=False) ribbon_message = fields.Char('Ribbon message', compute='_compute_ribbon_message') partner_city = fields.Char(related='partner_id.city', readonly=False) manager_id = fields.Many2one('res.users', string='Project Manager', related='project_id.user_id', readonly=True) company_id = fields.Many2one( 'res.company', string='Company', compute='_compute_company_id', store=True, readonly=False, required=True, copy=True, default=_default_company_id) color = fields.Integer(string='Color Index') user_email = fields.Char(related='user_id.email', string='User Email', readonly=True, related_sudo=False) attachment_ids = fields.One2many('ir.attachment', compute='_compute_attachment_ids', string="Main Attachments", help="Attachment that don't come from message.") # In the domain of displayed_image_id, we couln't use attachment_ids because a one2many is represented as a list of commands so we used res_model & res_id displayed_image_id = fields.Many2one('ir.attachment', domain="[('res_model', '=', 'project.task'), ('res_id', '=', id), ('mimetype', 'ilike', 'image')]", string='Cover Image') legend_blocked = fields.Char(related='stage_id.legend_blocked', string='Kanban Blocked Explanation', readonly=True, related_sudo=False) legend_done = fields.Char(related='stage_id.legend_done', string='Kanban Valid Explanation', readonly=True, related_sudo=False) legend_normal = fields.Char(related='stage_id.legend_normal', string='Kanban Ongoing Explanation', readonly=True, related_sudo=False) is_closed = fields.Boolean(related="stage_id.is_closed", string="Closing Stage", readonly=True, related_sudo=False) parent_id = fields.Many2one('project.task', string='Parent Task', index=True) child_ids = fields.One2many('project.task', 'parent_id', string="Sub-tasks", context={'active_test': False}) subtask_project_id = fields.Many2one('project.project', related="project_id.subtask_project_id", string='Sub-task Project', readonly=True) allow_subtasks = fields.Boolean(string="Allow Sub-tasks", related="project_id.allow_subtasks", readonly=True) subtask_count = fields.Integer("Sub-task count", compute='_compute_subtask_count') email_from = fields.Char(string='Email From', help="These people will receive email.", index=True, compute='_compute_email_from', store="True", readonly=False) allowed_user_ids = fields.Many2many('res.users', string="Visible to", groups='project.group_project_manager', compute='_compute_allowed_user_ids', store=True, readonly=False, copy=False) project_privacy_visibility = fields.Selection(related='project_id.privacy_visibility', string="Project Visibility") # Computed field about working time elapsed between record creation and assignation/closing. working_hours_open = fields.Float(compute='_compute_elapsed', string='Working hours to assign', store=True, group_operator="avg") working_hours_close = fields.Float(compute='_compute_elapsed', string='Working hours to close', store=True, group_operator="avg") working_days_open = fields.Float(compute='_compute_elapsed', string='Working days to assign', store=True, group_operator="avg") working_days_close = fields.Float(compute='_compute_elapsed', string='Working days to close', store=True, group_operator="avg") # customer portal: include comment and incoming emails in communication history website_message_ids = fields.One2many(domain=lambda self: [('model', '=', self._name), ('message_type', 'in', ['email', 'comment'])]) # recurrence fields allow_recurring_tasks = fields.Boolean(related='project_id.allow_recurring_tasks') recurring_task = fields.Boolean(string="Recurrent") recurring_count = fields.Integer(string="Tasks in Recurrence", compute='_compute_recurring_count') recurrence_id = fields.Many2one('project.task.recurrence', copy=False) recurrence_update = fields.Selection([ ('this', 'This task'), ('subsequent', 'This and following tasks'), ('all', 'All tasks'), ], default='this', store=False) recurrence_message = fields.Char(string='Next Recurrencies', compute='_compute_recurrence_message') repeat_interval = fields.Integer(string='Repeat Every', default=1, compute='_compute_repeat', readonly=False) repeat_unit = fields.Selection([ ('day', 'Days'), ('week', 'Weeks'), ('month', 'Months'), ('year', 'Years'), ], default='week', compute='_compute_repeat', readonly=False) repeat_type = fields.Selection([ ('forever', 'Forever'), ('until', 'End Date'), ('after', 'Number of Repetitions'), ], default="forever", string="Until", compute='_compute_repeat', readonly=False) repeat_until = fields.Date(string="End Date", compute='_compute_repeat', readonly=False) repeat_number = fields.Integer(string="Repetitions", default=1, compute='_compute_repeat', readonly=False) repeat_on_month = fields.Selection([ ('date', 'Date of the Month'), ('day', 'Day of the Month'), ], default='date', compute='_compute_repeat', readonly=False) repeat_on_year = fields.Selection([ ('date', 'Date of the Year'), ('day', 'Day of the Year'), ], default='date', compute='_compute_repeat', readonly=False) mon = fields.Boolean(string="Mon", compute='_compute_repeat', readonly=False) tue = fields.Boolean(string="Tue", compute='_compute_repeat', readonly=False) wed = fields.Boolean(string="Wed", compute='_compute_repeat', readonly=False) thu = fields.Boolean(string="Thu", compute='_compute_repeat', readonly=False) fri = fields.Boolean(string="Fri", compute='_compute_repeat', readonly=False) sat = fields.Boolean(string="Sat", compute='_compute_repeat', readonly=False) sun = fields.Boolean(string="Sun", compute='_compute_repeat', readonly=False) repeat_day = fields.Selection([ (str(i), str(i)) for i in range(1, 32) ], compute='_compute_repeat', readonly=False) repeat_week = fields.Selection([ ('first', 'First'), ('second', 'Second'), ('third', 'Third'), ('last', 'Last'), ], default='first', compute='_compute_repeat', readonly=False) repeat_weekday = fields.Selection([ ('mon', 'Monday'), ('tue', 'Tuesday'), ('wed', 'Wednesday'), ('thu', 'Thursday'), ('fri', 'Friday'), ('sat', 'Saturday'), ('sun', 'Sunday'), ], string='Day Of The Week', compute='_compute_repeat', readonly=False) repeat_month = fields.Selection([ ('january', 'January'), ('february', 'February'), ('march', 'March'), ('april', 'April'), ('may', 'May'), ('june', 'June'), ('july', 'July'), ('august', 'August'), ('september', 'September'), ('october', 'October'), ('november', 'November'), ('december', 'December'), ], compute='_compute_repeat', readonly=False) repeat_show_dow = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_day = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_week = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_month = fields.Boolean(compute='_compute_repeat_visibility') @api.model def _get_recurrence_fields(self): return ['repeat_interval', 'repeat_unit', 'repeat_type', 'repeat_until', 'repeat_number', 'repeat_on_month', 'repeat_on_year', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun', 'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday'] @api.depends('recurring_task', 'repeat_unit', 'repeat_on_month', 'repeat_on_year') def _compute_repeat_visibility(self): for task in self: task.repeat_show_day = task.recurring_task and (task.repeat_unit == 'month' and task.repeat_on_month == 'date') or (task.repeat_unit == 'year' and task.repeat_on_year == 'date') task.repeat_show_week = task.recurring_task and (task.repeat_unit == 'month' and task.repeat_on_month == 'day') or (task.repeat_unit == 'year' and task.repeat_on_year == 'day') task.repeat_show_dow = task.recurring_task and task.repeat_unit == 'week' task.repeat_show_month = task.recurring_task and task.repeat_unit == 'year' @api.depends('recurring_task') def _compute_repeat(self): rec_fields = self._get_recurrence_fields() defaults = self.default_get(rec_fields) for task in self: for f in rec_fields: if task.recurrence_id: task[f] = task.recurrence_id[f] else: if task.recurring_task: task[f] = defaults.get(f) else: task[f] = False def _get_weekdays(self, n=1): self.ensure_one() if self.repeat_unit == 'week': return [fn(n) for day, fn in DAYS.items() if self[day]] return [DAYS.get(self.repeat_weekday)(n)] @api.depends( 'recurring_task', 'repeat_interval', 'repeat_unit', 'repeat_type', 'repeat_until', 'repeat_number', 'repeat_on_month', 'repeat_on_year', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun', 'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday') def _compute_recurrence_message(self): self.recurrence_message = False for task in self.filtered(lambda t: t.recurring_task and t._is_recurrence_valid()): date = fields.Date.today() number_occurrences = min(5, task.repeat_number if task.repeat_type == 'after' else 5) delta = task.repeat_interval if task.repeat_unit == 'day' else 1 recurring_dates = self.env['project.task.recurrence']._get_next_recurring_dates( date + timedelta(days=delta), task.repeat_interval, task.repeat_unit, task.repeat_type, task.repeat_until, task.repeat_on_month, task.repeat_on_year, task._get_weekdays(WEEKS.get(task.repeat_week)), task.repeat_day, task.repeat_week, task.repeat_month, count=number_occurrences) date_format = self.env['res.lang']._lang_get(self.env.user.lang).date_format task.recurrence_message = '<ul>' for date in recurring_dates[:5]: task.recurrence_message += '<li>%s</li>' % date.strftime(date_format) if task.repeat_type == 'after' and task.repeat_number > 5 or task.repeat_type == 'forever' or len(recurring_dates) > 5: task.recurrence_message += '<li>...</li>' task.recurrence_message += '</ul>' if task.repeat_type == 'until': task.recurrence_message += _('<p><em>Number of tasks: %(tasks_count)s</em></p>') % {'tasks_count': len(recurring_dates)} def _is_recurrence_valid(self): self.ensure_one() return self.repeat_interval > 0 and\ (not self.repeat_show_dow or self._get_weekdays()) and\ (self.repeat_type != 'after' or self.repeat_number) and\ (self.repeat_type != 'until' or self.repeat_until and self.repeat_until > fields.Date.today()) @api.depends('recurrence_id') def _compute_recurring_count(self): self.recurring_count = 0 recurring_tasks = self.filtered(lambda l: l.recurrence_id) count = self.env['project.task'].read_group([('recurrence_id', 'in', recurring_tasks.recurrence_id.ids)], ['id'], 'recurrence_id') tasks_count = {c.get('recurrence_id')[0]: c.get('recurrence_id_count') for c in count} for task in recurring_tasks: task.recurring_count = tasks_count.get(task.recurrence_id.id, 0) @api.depends('partner_id.email') def _compute_partner_email(self): for task in self: if task.partner_id and task.partner_id.email != task.partner_email: task.partner_email = task.partner_id.email def _inverse_partner_email(self): for task in self: if task.partner_id and task.partner_email != task.partner_id.email: task.partner_id.email = task.partner_email @api.depends('partner_id.phone') def _compute_partner_phone(self): for task in self: if task.partner_id and task.partner_phone != task.partner_id.phone: task.partner_phone = task.partner_id.phone def _inverse_partner_phone(self): for task in self: if task.partner_id and task.partner_phone != task.partner_id.phone: task.partner_id.phone = task.partner_phone @api.depends('partner_email', 'partner_phone', 'partner_id') def _compute_ribbon_message(self): for task in self: will_write_email = task.partner_id and task.partner_email != task.partner_id.email will_write_phone = task.partner_id and task.partner_phone != task.partner_id.phone if will_write_email and will_write_phone: task.ribbon_message = _('By saving this change, the customer email and phone number will also be updated.') elif will_write_email: task.ribbon_message = _('By saving this change, the customer email will also be updated.') elif will_write_phone: task.ribbon_message = _('By saving this change, the customer phone number will also be updated.') else: task.ribbon_message = False @api.constrains('parent_id') def _check_parent_id(self): if not self._check_recursion(): raise ValidationError(_('Error! You cannot create recursive hierarchy of tasks.')) @api.constrains('allowed_user_ids') def _check_no_portal_allowed(self): for task in self.filtered(lambda t: t.project_id.privacy_visibility != 'portal'): portal_users = task.allowed_user_ids.filtered('share') if portal_users: user_names = ', '.join(portal_users[:10].mapped('name')) raise ValidationError(_("The project visibility setting doesn't allow portal users to see the project's tasks. (%s)", user_names)) def _compute_attachment_ids(self): for task in self: attachment_ids = self.env['ir.attachment'].search([('res_id', '=', task.id), ('res_model', '=', 'project.task')]).ids message_attachment_ids = task.mapped('message_ids.attachment_ids').ids # from mail_thread task.attachment_ids = [(6, 0, list(set(attachment_ids) - set(message_attachment_ids)))] @api.depends('project_id.allowed_user_ids', 'project_id.privacy_visibility') def _compute_allowed_user_ids(self): for task in self: portal_users = task.allowed_user_ids.filtered('share') internal_users = task.allowed_user_ids - portal_users if task.project_id.privacy_visibility == 'followers': task.allowed_user_ids |= task.project_id.allowed_internal_user_ids task.allowed_user_ids -= portal_users elif task.project_id.privacy_visibility == 'portal': task.allowed_user_ids |= task.project_id.allowed_portal_user_ids if task.project_id.privacy_visibility != 'portal': task.allowed_user_ids -= portal_users elif task.project_id.privacy_visibility != 'followers': task.allowed_user_ids -= internal_users @api.depends('create_date', 'date_end', 'date_assign') def _compute_elapsed(self): task_linked_to_calendar = self.filtered( lambda task: task.project_id.resource_calendar_id and task.create_date ) for task in task_linked_to_calendar: dt_create_date = fields.Datetime.from_string(task.create_date) if task.date_assign: dt_date_assign = fields.Datetime.from_string(task.date_assign) duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_assign, compute_leaves=True) task.working_hours_open = duration_data['hours'] task.working_days_open = duration_data['days'] else: task.working_hours_open = 0.0 task.working_days_open = 0.0 if task.date_end: dt_date_end = fields.Datetime.from_string(task.date_end) duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_end, compute_leaves=True) task.working_hours_close = duration_data['hours'] task.working_days_close = duration_data['days'] else: task.working_hours_close = 0.0 task.working_days_close = 0.0 (self - task_linked_to_calendar).update(dict.fromkeys( ['working_hours_open', 'working_hours_close', 'working_days_open', 'working_days_close'], 0.0)) @api.depends('stage_id', 'kanban_state') def _compute_kanban_state_label(self): for task in self: if task.kanban_state == 'normal': task.kanban_state_label = task.legend_normal elif task.kanban_state == 'blocked': task.kanban_state_label = task.legend_blocked else: task.kanban_state_label = task.legend_done def _compute_access_url(self): super(Task, self)._compute_access_url() for task in self: task.access_url = '/my/task/%s' % task.id def _compute_access_warning(self): super(Task, self)._compute_access_warning() for task in self.filtered(lambda x: x.project_id.privacy_visibility != 'portal'): task.access_warning = _( "The task cannot be shared with the recipient(s) because the privacy of the project is too restricted. Set the privacy of the project to 'Visible by following customers' in order to make it accessible by the recipient(s).") @api.depends('child_ids.planned_hours') def _compute_subtask_planned_hours(self): for task in self: task.subtask_planned_hours = sum(child_task.planned_hours + child_task.subtask_planned_hours for child_task in task.child_ids) @api.depends('child_ids') def _compute_subtask_count(self): for task in self: task.subtask_count = len(task._get_all_subtasks()) @api.onchange('company_id') def _onchange_task_company(self): if self.project_id.company_id != self.company_id: self.project_id = False @api.depends('project_id.company_id') def _compute_company_id(self): for task in self.filtered(lambda task: task.project_id): task.company_id = task.project_id.company_id @api.depends('project_id') def _compute_stage_id(self): for task in self: if task.project_id: if task.project_id not in task.stage_id.project_ids: task.stage_id = task.stage_find(task.project_id.id, [ ('fold', '=', False), ('is_closed', '=', False)]) else: task.stage_id = False @api.returns('self', lambda value: value.id) def copy(self, default=None): if default is None: default = {} if not default.get('name'): default['name'] = _("%s (copy)", self.name) if self.recurrence_id: default['recurrence_id'] = self.recurrence_id.copy().id return super(Task, self).copy(default) @api.constrains('parent_id') def _check_parent_id(self): for task in self: if not task._check_recursion(): raise ValidationError(_('Error! You cannot create recursive hierarchy of task(s).')) @api.model def get_empty_list_help(self, help): tname = _("task") project_id = self.env.context.get('default_project_id', False) if project_id: name = self.env['project.project'].browse(project_id).label_tasks if name: tname = name.lower() self = self.with_context( empty_list_help_id=self.env.context.get('default_project_id'), empty_list_help_model='project.project', empty_list_help_document_name=tname, ) return super(Task, self).get_empty_list_help(help) def message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None): """ Add the users subscribed to allowed portal users """ res = super(Task, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids) if partner_ids: new_allowed_users = self.env['res.partner'].browse(partner_ids).user_ids.filtered('share') tasks = self.filtered(lambda task: task.project_id.privacy_visibility == 'portal') tasks.sudo().write({'allowed_user_ids': [(4, user.id) for user in new_allowed_users]}) return res # ---------------------------------------- # Case management # ---------------------------------------- def stage_find(self, section_id, domain=[], order='sequence'): """ Override of the base.stage method Parameter of the stage search taken from the lead: - section_id: if set, stages must belong to this section or be a default stage; if not set, stages must be default stages """ # collect all section_ids section_ids = [] if section_id: section_ids.append(section_id) section_ids.extend(self.mapped('project_id').ids) search_domain = [] if section_ids: search_domain = [('|')] * (len(section_ids) - 1) for section_id in section_ids: search_domain.append(('project_ids', '=', section_id)) search_domain += list(domain) # perform search, return the first found return self.env['project.task.type'].search(search_domain, order=order, limit=1).id # ------------------------------------------------ # CRUD overrides # ------------------------------------------------ @api.model def default_get(self, default_fields): vals = super(Task, self).default_get(default_fields) days = list(DAYS.keys()) week_start = fields.Datetime.today().weekday() if all(d in default_fields for d in days): vals[days[week_start]] = True if 'repeat_day' in default_fields: vals['repeat_day'] = str(fields.Datetime.today().day) if 'repeat_month' in default_fields: vals['repeat_month'] = self._fields.get('repeat_month').selection[fields.Datetime.today().month - 1][0] if 'repeat_until' in default_fields: vals['repeat_until'] = fields.Date.today() + timedelta(days=7) if 'repeat_weekday' in default_fields: vals['repeat_weekday'] = self._fields.get('repeat_weekday').selection[week_start][0] return vals @api.model_create_multi def create(self, vals_list): default_stage = dict() for vals in vals_list: project_id = vals.get('project_id') or self.env.context.get('default_project_id') if project_id and not "company_id" in vals: vals["company_id"] = self.env["project.project"].browse( project_id ).company_id.id or self.env.company.id if project_id and "stage_id" not in vals: # 1) Allows keeping the batch creation of tasks # 2) Ensure the defaults are correct (and computed once by project), # by using default get (instead of _get_default_stage_id or _stage_find), if project_id not in default_stage: default_stage[project_id] = self.with_context( default_project_id=project_id ).default_get(['stage_id']).get('stage_id') vals["stage_id"] = default_stage[project_id] # user_id change: update date_assign if vals.get('user_id'): vals['date_assign'] = fields.Datetime.now() # Stage change: Update date_end if folded stage and date_last_stage_update if vals.get('stage_id'): vals.update(self.update_date_end(vals['stage_id'])) vals['date_last_stage_update'] = fields.Datetime.now() # recurrence rec_fields = vals.keys() & self._get_recurrence_fields() if rec_fields and vals.get('recurring_task') is True: rec_values = {rec_field: vals[rec_field] for rec_field in rec_fields} rec_values['next_recurrence_date'] = fields.Datetime.today() recurrence = self.env['project.task.recurrence'].create(rec_values) vals['recurrence_id'] = recurrence.id tasks = super().create(vals_list) for task in tasks: if task.project_id.privacy_visibility == 'portal': task._portal_ensure_token() return tasks def write(self, vals): now = fields.Datetime.now() if 'parent_id' in vals and vals['parent_id'] in self.ids: raise UserError(_("Sorry. You can't set a task as its parent task.")) if 'active' in vals and not vals.get('active') and any(self.mapped('recurrence_id')): # TODO: show a dialog to stop the recurrence raise UserError(_('You cannot archive recurring tasks. Please, disable the recurrence first.')) # stage change: update date_last_stage_update if 'stage_id' in vals: vals.update(self.update_date_end(vals['stage_id'])) vals['date_last_stage_update'] = now # reset kanban state when changing stage if 'kanban_state' not in vals: vals['kanban_state'] = 'normal' # user_id change: update date_assign if vals.get('user_id') and 'date_assign' not in vals: vals['date_assign'] = now # recurrence fields rec_fields = vals.keys() & self._get_recurrence_fields() if rec_fields: rec_values = {rec_field: vals[rec_field] for rec_field in rec_fields} for task in self: if task.recurrence_id: task.recurrence_id.write(rec_values) elif vals.get('recurring_task'): rec_values['next_recurrence_date'] = fields.Datetime.today() recurrence = self.env['project.task.recurrence'].create(rec_values) task.recurrence_id = recurrence.id if 'recurring_task' in vals and not vals.get('recurring_task'): self.recurrence_id.unlink() tasks = self recurrence_update = vals.pop('recurrence_update', 'this') if recurrence_update != 'this': recurrence_domain = [] if recurrence_update == 'subsequent': for task in self: recurrence_domain = OR([recurrence_domain, ['&', ('recurrence_id', '=', task.recurrence_id.id), ('create_date', '>=', task.create_date)]]) else: recurrence_domain = [('recurrence_id', 'in', self.recurrence_id.ids)] tasks |= self.env['project.task'].search(recurrence_domain) result = super(Task, tasks).write(vals) # rating on stage if 'stage_id' in vals and vals.get('stage_id'): self.filtered(lambda x: x.project_id.rating_active and x.project_id.rating_status == 'stage')._send_task_rating_mail(force_send=True) return result def update_date_end(self, stage_id): project_task_type = self.env['project.task.type'].browse(stage_id) if project_task_type.fold or project_task_type.is_closed: return {'date_end': fields.Datetime.now()} return {'date_end': False} def unlink(self): if any(self.mapped('recurrence_id')): # TODO: show a dialog to stop the recurrence raise UserError(_('You cannot delete recurring tasks. Please, disable the recurrence first.')) return super().unlink() # --------------------------------------------------- # Subtasks # --------------------------------------------------- @api.depends('parent_id.partner_id', 'project_id.partner_id') def _compute_partner_id(self): """ If a task has no partner_id, use the project partner_id if any, or else the parent task partner_id. Once the task partner_id has been set: 1) if the project partner_id changes, the task partner_id is automatically changed also. 2) if the parent task partner_id changes, the task partner_id remains the same. """ for task in self: if task.partner_id: if task.project_id.partner_id: task.partner_id = task.project_id.partner_id else: task.partner_id = task.project_id.partner_id or task.parent_id.partner_id @api.depends('partner_id.email', 'parent_id.email_from') def _compute_email_from(self): for task in self: task.email_from = task.partner_id.email or ((task.partner_id or task.parent_id) and task.email_from) or task.parent_id.email_from @api.depends('parent_id.project_id.subtask_project_id') def _compute_project_id(self): for task in self: if not task.project_id: task.project_id = task.parent_id.project_id.subtask_project_id # --------------------------------------------------- # Mail gateway # --------------------------------------------------- def _track_template(self, changes): res = super(Task, self)._track_template(changes) test_task = self[0] if 'stage_id' in changes and test_task.stage_id.mail_template_id: res['stage_id'] = (test_task.stage_id.mail_template_id, { 'auto_delete_message': True, 'subtype_id': self.env['ir.model.data'].xmlid_to_res_id('mail.mt_note'), 'email_layout_xmlid': 'mail.mail_notification_light' }) return res def _creation_subtype(self): return self.env.ref('project.mt_task_new') def _track_subtype(self, init_values): self.ensure_one() if 'kanban_state_label' in init_values and self.kanban_state == 'blocked': return self.env.ref('project.mt_task_blocked') elif 'kanban_state_label' in init_values and self.kanban_state == 'done': return self.env.ref('project.mt_task_ready') elif 'stage_id' in init_values: return self.env.ref('project.mt_task_stage') return super(Task, self)._track_subtype(init_values) def _notify_get_groups(self, msg_vals=None): """ Handle project users and managers recipients that can assign tasks and create new one directly from notification emails. Also give access button to portal users and portal customers. If they are notified they should probably have access to the document. """ groups = super(Task, self)._notify_get_groups(msg_vals=msg_vals) local_msg_vals = dict(msg_vals or {}) self.ensure_one() project_user_group_id = self.env.ref('project.group_project_user').id group_func = lambda pdata: pdata['type'] == 'user' and project_user_group_id in pdata['groups'] if self.project_id.privacy_visibility == 'followers': allowed_user_ids = self.project_id.allowed_internal_user_ids.partner_id.ids group_func = lambda pdata: pdata['type'] == 'user' and project_user_group_id in pdata['groups'] and pdata['id'] in allowed_user_ids new_group = ('group_project_user', group_func, {}) if not self.user_id and not self.stage_id.fold: take_action = self._notify_get_action_link('assign', **local_msg_vals) project_actions = [{'url': take_action, 'title': _('I take it')}] new_group[2]['actions'] = project_actions groups = [new_group] + groups if self.project_id.privacy_visibility == 'portal': allowed_user_ids = self.project_id.allowed_portal_user_ids.partner_id.ids groups.insert(0, ( 'allowed_portal_users', lambda pdata: pdata['type'] == 'portal' and pdata['id'] in allowed_user_ids, {} )) portal_privacy = self.project_id.privacy_visibility == 'portal' for group_name, group_method, group_data in groups: if group_name in ('customer', 'user') or group_name == 'portal_customer' and not portal_privacy: group_data['has_button_access'] = False elif group_name == 'portal_customer' and portal_privacy: group_data['has_button_access'] = True return groups def _notify_get_reply_to(self, default=None, records=None, company=None, doc_names=None): """ Override to set alias of tasks to their project if any. """ aliases = self.sudo().mapped('project_id')._notify_get_reply_to(default=default, records=None, company=company, doc_names=None) res = {task.id: aliases.get(task.project_id.id) for task in self} leftover = self.filtered(lambda rec: not rec.project_id) if leftover: res.update(super(Task, leftover)._notify_get_reply_to(default=default, records=None, company=company, doc_names=doc_names)) return res def email_split(self, msg): email_list = tools.email_split((msg.get('to') or '') + ',' + (msg.get('cc') or '')) # check left-part is not already an alias aliases = self.mapped('project_id.alias_name') return [x for x in email_list if x.split('@')[0] not in aliases] @api.model def message_new(self, msg, custom_values=None): """ Overrides mail_thread message_new that is called by the mailgateway through message_process. This override updates the document according to the email. """ # remove default author when going through the mail gateway. Indeed we # do not want to explicitly set user_id to False; however we do not # want the gateway user to be responsible if no other responsible is # found. create_context = dict(self.env.context or {}) create_context['default_user_id'] = False if custom_values is None: custom_values = {} defaults = { 'name': msg.get('subject') or _("No Subject"), 'email_from': msg.get('from'), 'planned_hours': 0.0, 'partner_id': msg.get('author_id') } defaults.update(custom_values) task = super(Task, self.with_context(create_context)).message_new(msg, custom_values=defaults) email_list = task.email_split(msg) partner_ids = [p.id for p in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=task, force_create=False) if p] task.message_subscribe(partner_ids) return task def message_update(self, msg, update_vals=None): """ Override to update the task according to the email. """ email_list = self.email_split(msg) partner_ids = [p.id for p in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=self, force_create=False) if p] self.message_subscribe(partner_ids) return super(Task, self).message_update(msg, update_vals=update_vals) def _message_get_suggested_recipients(self): recipients = super(Task, self)._message_get_suggested_recipients() for task in self: if task.partner_id: reason = _('Customer Email') if task.partner_id.email else _('Customer') task._message_add_suggested_recipient(recipients, partner=task.partner_id, reason=reason) elif task.email_from: task._message_add_suggested_recipient(recipients, email=task.email_from, reason=_('Customer Email')) return recipients def _notify_email_header_dict(self): headers = super(Task, self)._notify_email_header_dict() if self.project_id: current_objects = [h for h in headers.get('X-Odoo-Objects', '').split(',') if h] current_objects.insert(0, 'project.project-%s, ' % self.project_id.id) headers['X-Odoo-Objects'] = ','.join(current_objects) if self.tag_ids: headers['X-Odoo-Tags'] = ','.join(self.tag_ids.mapped('name')) return headers def _message_post_after_hook(self, message, msg_vals): if message.attachment_ids and not self.displayed_image_id: image_attachments = message.attachment_ids.filtered(lambda a: a.mimetype == 'image') if image_attachments: self.displayed_image_id = image_attachments[0] if self.email_from and not self.partner_id: # we consider that posting a message with a specified recipient (not a follower, a specific one) # on a document without customer means that it was created through the chatter using # suggested recipients. This heuristic allows to avoid ugly hacks in JS. new_partner = message.partner_ids.filtered(lambda partner: partner.email == self.email_from) if new_partner: self.search([ ('partner_id', '=', False), ('email_from', '=', new_partner.email), ('stage_id.fold', '=', False)]).write({'partner_id': new_partner.id}) return super(Task, self)._message_post_after_hook(message, msg_vals) def action_assign_to_me(self): self.write({'user_id': self.env.user.id}) # If depth == 1, return only direct children # If depth == 3, return children to third generation # If depth <= 0, return all children without depth limit def _get_all_subtasks(self, depth=0): children = self.mapped('child_ids').filtered(lambda children: children.active) if not children: return self.env['project.task'] if depth == 1: return children return children + children._get_all_subtasks(depth - 1) def action_open_parent_task(self): return { 'name': _('Parent Task'), 'view_mode': 'form', 'res_model': 'project.task', 'res_id': self.parent_id.id, 'type': 'ir.actions.act_window', 'context': dict(self._context, create=False) } def action_subtask(self): action = self.env["ir.actions.actions"]._for_xml_id("project.project_task_action_sub_task") # display all subtasks of current task action['domain'] = [('id', 'child_of', self.id), ('id', '!=', self.id)] # update context, with all default values as 'quick_create' does not contains all field in its view if self._context.get('default_project_id'): default_project = self.env['project.project'].browse(self.env.context['default_project_id']) else: default_project = self.project_id.subtask_project_id or self.project_id ctx = dict(self.env.context) ctx = {k: v for k, v in ctx.items() if not k.startswith('search_default_')} ctx.update({ 'default_name': self.env.context.get('name', self.name) + ':', 'default_parent_id': self.id, # will give default subtask field in `default_get` 'default_company_id': default_project.company_id.id if default_project else self.env.company.id, }) action['context'] = ctx return action def action_recurring_tasks(self): return { 'name': 'Tasks in Recurrence', 'type': 'ir.actions.act_window', 'res_model': 'project.task', 'view_mode': 'tree,form', 'domain': [('recurrence_id', 'in', self.recurrence_id.ids)], } # --------------------------------------------------- # Rating business # --------------------------------------------------- def _send_task_rating_mail(self, force_send=False): for task in self: rating_template = task.stage_id.rating_template_id if rating_template: task.rating_send_request(rating_template, lang=task.partner_id.lang, force_send=force_send) def rating_get_partner_id(self): res = super(Task, self).rating_get_partner_id() if not res and self.project_id.partner_id: return self.project_id.partner_id return res def rating_apply(self, rate, token=None, feedback=None, subtype_xmlid=None): return super(Task, self).rating_apply(rate, token=token, feedback=feedback, subtype_xmlid="project.mt_task_rating") def _rating_get_parent_field_name(self): return 'project_id' class ProjectTags(models.Model): """ Tags of project's tasks """ _name = "project.tags" _description = "Project Tags" def _get_default_color(self): return randint(1, 11) name = fields.Char('Name', required=True) color = fields.Integer(string='Color', default=_get_default_color) _sql_constraints = [ ('name_uniq', 'unique (name)', "Tag name already exists!"), ]
1.960938
2
app/config.py
Maethorin/pivocram
5
1450
<gh_stars>1-10 # -*- coding: utf-8 -*- """ Config File for enviroment variables """ import os from importlib import import_module class Config(object): """ Base class for all config variables """ DEBUG = False TESTING = False DEVELOPMENT = False CSRF_ENABLED = True SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL'] SECRET_KEY = os.environ['SECRET_KEY'] class ProductionConfig(Config): """ Production Config... this is the real thing """ DEBUG = False class StagingConfig(Config): """ Staging Config is for... staging things """ DEBUG = True class DevelopmentConfig(Config): """ Development Config... this is your home developer! """ DEVELOPMENT = True DEBUG = True class TestingConfig(Config): """ Test Config... You should be testing right now instead reading docs!!! """ TESTING = True KEY_ON_TEST = 'KEY ON TEST' class ConfigClassNotFound(Exception): """ Raises when the APP_SETTINGS environment variable have a value which does not point to an uninstantiable class. """ pass def get_config(): """ Get the Config Class instance defined in APP_SETTINGS environment variable :return The config class instance :rtype: Config """ config_imports = os.environ['APP_SETTINGS'].split('.') config_class_name = config_imports[-1] config_module = import_module('.'.join(config_imports[:-1])) config_class = getattr(config_module, config_class_name, None) if not config_class: raise ConfigClassNotFound('Unable to find a config class in {}'.format(os.environ['APP_SETTINGS'])) return config_class()
2.71875
3
initial_load.py
hongyuanChrisLi/RealEstateDBConvert
0
1451
<reponame>hongyuanChrisLi/RealEstateDBConvert from mysql_dao.select_dao import SelectDao as MysqlSelectDao from postgres_dao.ddl_dao import DdlDao from postgres_dao.dml_dao import DmlDao as PsqlDmlDao psql_ddl_dao = DdlDao() mysql_select_dao = MysqlSelectDao() psql_dml_dao = PsqlDmlDao() psql_ddl_dao.create_tables() county_data = mysql_select_dao.select_all_counties() psql_dml_dao.insert_county(county_data) city_data = mysql_select_dao.select_all_cities() psql_dml_dao.insert_city(city_data) zipcode_data = mysql_select_dao.select_all_zipcodes() psql_dml_dao.insert_zipcode(zipcode_data) data = mysql_select_dao.select_full_addr_month_rpt() psql_dml_dao.trunc_addr_month_rpt() psql_dml_dao.insert_addr_month_rpt(data) data = mysql_select_dao.select_full_mls_daily_rpt() psql_dml_dao.trunc_mls_rpt() psql_dml_dao.insert_mls_rpt(data) mysql_select_dao.close() psql_dml_dao.close()
1.859375
2
pytests/docs/docs.py
ramalingam-cb/testrunner
0
1452
import time import logger from basetestcase import BaseTestCase from couchbase_helper.documentgenerator import DocumentGenerator from membase.api.rest_client import RestConnection from couchbase_helper.documentgenerator import BlobGenerator class DocsTests(BaseTestCase): def setUp(self): super(DocsTests, self).setUp() def tearDown(self): super(DocsTests, self).tearDown() def test_docs_int_big_values(self): degree = self.input.param("degree", 53) error = self.input.param("error", False) number = 2**degree first = ['james', 'sharon'] template = '{{ "number": {0}, "first_name": "{1}" }}' gen_load = DocumentGenerator('test_docs', template, [number,], first, start=0, end=self.num_items) self.log.info("create %s documents..." % (self.num_items)) try: self._load_all_buckets(self.master, gen_load, "create", 0) self._verify_stats_all_buckets([self.master]) except Exception as e: if error: self.log.info("Unable to create documents as expected: %s" % str(e)) else: raise e else: if error: self.fail("Able to create documents with value: %s" % str(number)) #docs.docs.DocsTests.test_load_memory,nodes_init=3,standard_buckets=3,memcached_buckets=1,replicas=2,quota_percent=75 """ 1) Configure a cluster with 4 Couchbase Buckets and 1 Memcached Buckets. 2) Total memory quota allocated for Couchbase should be approx. 75% (12G) of total RAM. 3) Load initial data on all buckets upto 60% of each memory quota 4) Pick one bucket and do the following (5) to (8) 5) Insert new items upto high_wat_mark (75% of memory quota) 6) Expire/Delete/update random items (ratio of expiration vs delete ~= 8:2) 7) Repeat (6) until "ep_total_del_items" is ~= (3 X # of items being loaded in (3)) 8) Expire 90% of remaining items 9) Insert new items or update existing items across buckets 10) See if we can run into "Hard out of Memory" error (UI) """ def test_load_memory(self): num_items = self.quota * 1024 * 0.6 / self.value_size num_items = num_items / len(self.buckets) self.log.info("Load initial data on all buckets upto 60% of each memory quota") gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=0, end=num_items) self._load_all_buckets(self.master, gen_load, "create", 0) self.log.info("Insert new items upto high_wat_mark (75% of memory quota)") for bucket in self.buckets: if bucket.type != 'memcached': bucket_to_load = bucket break new_num_items = self.quota * 1024 * 0.15 / self.value_size gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=num_items, end=new_num_items + num_items) load = self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_load, bucket_to_load.kvs[1], 'create', compression=self.sdk_compression) load.result() end_time = time.time() + 60*60*3 while time.time() < end_time: self.log.info("check memUsed") rest = RestConnection(self.master) for bucket in rest.get_buckets(): self.log.info("*****************************\ bucket %s: memUsed %s\ ****************************" % (bucket.name, bucket.stats.memUsed)) self.log.info("Expire/Delete/update random items (ratio \ of expiration vs delete ~= 8:2)") current_num = 0 wait_task = self.cluster.async_wait_for_stats(self.servers[:self.nodes_init], bucket_to_load, 'all', 'ep_total_del_items', '==', num_items * 3) while wait_task.state != "FINISHED": gen_update = BlobGenerator('mike', 'mike-', self.value_size, start=current_num, end=current_num + 5000) gen_expire = BlobGenerator('mike', 'mike-', self.value_size, start=current_num + 5000, end=current_num + 6600) gen_delete = BlobGenerator('mike', 'mike-', self.value_size, start=current_num + 6600, end=current_num + 7000) tasks = [] tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_update, bucket_to_load.kvs[1], 'update', compression=self.sdk_compression)) tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_expire, bucket_to_load.kvs[1], 'update', exp=1, compression=self.sdk_compression)) tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_delete, bucket_to_load.kvs[1], 'delete', compression=self.sdk_compression)) for task in tasks: task.result() current_num += 7000 self.log.info("Expire 90% of remaining items") remain_keys, _ = bucket_to_load.kvs[1].key_set() last_key_to_expire = remain_keys[0.9 * len(remain_keys)][4:] gen_expire = BlobGenerator('mike', 'mike-', self.value_size, start=0, end=last_key_to_expire) load = self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_expire, bucket_to_load.kvs[1], 'update', exp=1, compression=self.sdk_compression) load.result() self.log.info("Insert new items or update existing items across buckets") gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=new_num_items + num_items, end=new_num_items * 2 + num_items) self._load_all_buckets(self.master, gen_load, "create", 0)
2.25
2
lichthi.py
truongaxin123/lichthidtu
0
1453
from bs4 import BeautifulSoup import requests from urllib.request import urlretrieve ROOT = 'http://pdaotao.duytan.edu.vn' def get_url_sub(sub, id_, page): all_td_tag = [] for i in range(1, page+1): print('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i)) r = requests.get('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i)) soup = BeautifulSoup(r.text, 'lxml') list_td_tag = soup.find_all('td', attrs={'style': 'padding-top:10px'}) all_td_tag = all_td_tag + list_td_tag for td_tag in all_td_tag: if (((sub+id_) in str(td_tag.a.contents[0])) or ((sub+' '+id_) in str(td_tag.a.contents[0])) or ((sub+'_'+id_) in str(td_tag.a.contents[0]))): print('\nComplete!!!') print(' '.join(str(td_tag.a.string).split())) print(str(td_tag.a['href']).replace('..', ROOT)) return str(td_tag.a['href']).replace('..', ROOT) def get_excel_url(url): r = requests.get(url) soup = BeautifulSoup(r.text,'lxml') list_span_tags = soup.find_all('span',class_='txt_l4') excel_url = list_span_tags[1].a['href'].replace('..',ROOT) return excel_url # a = get_excel_url('http://pdaotao.duytan.edu.vn/EXAM_LIST_Detail/?ID=52289&lang=VN') def main(): sub = input('Nhap ten mon: ') id_ = input('Nhap id mon: ') url = get_url_sub(sub,id_,4) if url == None: print('Khong tim thay mon nao nhu nay ({} {}) ca :('.format(sub, id_)) return else: print('get excel URL!!!') excel_url = get_excel_url(url) excel_url = excel_url.replace(' ','%20') print('Download excel file!!!') save_at = 'C:/Users/truon/Desktop/' filename = save_at + excel_url.split('/')[-1].replace('%20',' ') urlretrieve(excel_url,filename) print('Done!') main()
3.171875
3
appengine/uploader/main.py
isabella232/feedloader
5
1454
# coding=utf-8 # Copyright 2021 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Uploader module that handles batch jobs sent from Task Queue. This module receives batch jobs from TaskQueue. For each job, the module loads data from BigQuery and sends it to Merchant Center. """ import http import json import logging import socket from typing import List, Tuple import flask from google.cloud import bigquery from google.cloud import logging as cloud_logging from googleapiclient import errors import batch_creator import bigquery_client import constants import content_api_client import result_recorder import shoptimizer_client from models import failure from models import process_result from models import upload_task app = flask.Flask(__name__) _logging_client = cloud_logging.Client() _logging_client.setup_logging(log_level=logging.DEBUG) _SHOPTIMIZER_CONFIG_FILE_PATH = 'config/shoptimizer_config.json' OPERATION_TO_METHOD = { constants.Operation.UPSERT: constants.Method.INSERT, constants.Operation.DELETE: constants.Method.DELETE, constants.Operation.PREVENT_EXPIRING: constants.Method.INSERT } # Used to check if this is the last retry for alerting purposes. # Should match task_retry_limit in appengine/initiator/queue.yaml. TASK_RETRY_LIMIT = 5 @app.route('/insert_items', methods=['POST']) def run_insert_process() -> Tuple[str, http.HTTPStatus]: """Handles uploading tasks pushed from Task Queue.""" return _run_process(constants.Operation.UPSERT) @app.route('/delete_items', methods=['POST']) def run_delete_process() -> Tuple[str, http.HTTPStatus]: """Handles deleting tasks pushed from Task Queue.""" return _run_process(constants.Operation.DELETE) @app.route('/prevent_expiring_items', methods=['POST']) def run_prevent_expiring_process() -> Tuple[str, http.HTTPStatus]: """Handles prevent expiring tasks pushed from Task Queue.""" return _run_process(constants.Operation.PREVENT_EXPIRING) def _run_process(operation: constants.Operation) -> Tuple[str, http.HTTPStatus]: """Handles tasks pushed from Task Queue. When tasks are enqueued to Task Queue by initiator, this method will be called. It extracts necessary information from a Task Queue message. The following processes are executed in this function: - Loading items to process from BigQuery. - Converts items into a batch that can be sent to Content API for Shopping. - Sending items to Content API for Shopping (Merchant Center). - Records the results of the Content API for Shopping call. Args: operation: Type of operation to perform on the items. Returns: The result of HTTP request. """ request_body = json.loads(flask.request.data.decode('utf-8')) task = upload_task.UploadTask.from_json(request_body) if task.batch_size == 0: return 'OK', http.HTTPStatus.OK batch_number = int(task.start_index / task.batch_size) + 1 logging.info( '%s started. Batch #%d info: start_index: %d, batch_size: %d,' 'initiation timestamp: %s', operation.value, batch_number, task.start_index, task.batch_size, task.timestamp) try: items = _load_items_from_bigquery(operation, task) except errors.HttpError: return 'Error loading items from BigQuery', http.HTTPStatus.INTERNAL_SERVER_ERROR result = process_result.ProcessResult([], [], []) try: if not items: logging.error( 'Batch #%d, operation %s: 0 items loaded from BigQuery so batch not sent to Content API. Start_index: %d, batch_size: %d,' 'initiation timestamp: %s', batch_number, operation.value, task.start_index, task.batch_size, task.timestamp) return 'No items to process', http.HTTPStatus.OK method = OPERATION_TO_METHOD.get(operation) # Creates batch from items loaded from BigQuery original_batch, skipped_item_ids, batch_id_to_item_id = batch_creator.create_batch( batch_number, items, method) # Optimizes batch via Shoptimizer for upsert/prevent_expiring operations if operation != constants.Operation.DELETE and constants.SHOPTIMIZER_API_INTEGRATION_ON: batch_to_send_to_content_api = _create_optimized_batch( original_batch, batch_number, operation) else: batch_to_send_to_content_api = original_batch # Sends batch of items to Content API for Shopping api_client = content_api_client.ContentApiClient() successful_item_ids, item_failures = api_client.process_items( batch_to_send_to_content_api, batch_number, batch_id_to_item_id, method) result = process_result.ProcessResult( successfully_processed_item_ids=successful_item_ids, content_api_failures=item_failures, skipped_item_ids=skipped_item_ids) except errors.HttpError as http_error: error_status_code = http_error.resp.status error_reason = http_error.resp.reason result = _handle_content_api_error(error_status_code, error_reason, batch_number, http_error, items, operation, task) return error_reason, error_status_code except socket.timeout as timeout_error: error_status_code = http.HTTPStatus.REQUEST_TIMEOUT error_reason = 'Socket timeout' result = _handle_content_api_error(error_status_code, error_reason, batch_number, timeout_error, items, operation, task) return error_reason, error_status_code else: logging.info( 'Batch #%d with operation %s and initiation timestamp %s successfully processed %s items, failed to process %s items and skipped %s items.', batch_number, operation.value, task.timestamp, result.get_success_count(), result.get_failure_count(), result.get_skipped_count()) finally: recorder = result_recorder.ResultRecorder.from_service_account_json( constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_MONITORING, constants.TABLE_ID_FOR_RESULT_COUNTS_MONITORING, constants.TABLE_ID_FOR_ITEM_RESULTS_MONITORING) recorder.insert_result(operation.value, result, task.timestamp, batch_number) return 'OK', http.HTTPStatus.OK def _load_items_from_bigquery( operation: constants.Operation, task: upload_task.UploadTask) -> List[bigquery.Row]: """Loads items from BigQuery. Args: operation: The operation to be performed on this batch of items. task: The Cloud Task object that initiated this request. Returns: The list of items loaded from BigQuery. """ table_id = f'process_items_to_{operation.value}_{task.timestamp}' bq_client = bigquery_client.BigQueryClient.from_service_account_json( constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_PROCESSING, table_id) try: items_iterator = bq_client.load_items(task.start_index, task.batch_size) except errors.HttpError as http_error: logging.exception( 'Error loading items from %s.%s. HTTP status: %s. Error: %s', constants.DATASET_ID_FOR_PROCESSING, table_id, http_error.resp.status, http_error.resp.reason) raise return list(items_iterator) def _create_optimized_batch(batch: constants.Batch, batch_number: int, operation: constants.Operation) -> constants.Batch: """Creates an optimized batch by calling the Shoptimizer API. Args: batch: The batch of product data to be optimized. batch_number: The number that identifies this batch. operation: The operation to be performed on this batch (upsert, delete, prevent_expiring). Returns: The batch returned from the Shoptimizer API Client. """ try: optimization_client = shoptimizer_client.ShoptimizerClient( batch_number, operation) except (OSError, ValueError): return batch return optimization_client.shoptimize(batch) def _handle_content_api_error( error_status_code: int, error_reason: str, batch_num: int, error: Exception, item_rows: List[bigquery.Row], operation: constants.Operation, task: upload_task.UploadTask) -> process_result.ProcessResult: """Logs network related errors returned from Content API and returns a list of item failures. Args: error_status_code: HTTP status code from Content API. error_reason: The reason for the error. batch_num: The batch number. error: The error thrown by Content API. item_rows: The items being processed in this batch. operation: The operation to be performed on this batch of items. task: The Cloud Task object that initiated this request. Returns: The list of items that failed due to the error, wrapped in a process_result. """ logging.warning( 'Batch #%d with operation %s and initiation timestamp %s failed. HTTP status: %s. Error: %s', batch_num, operation.value, task.timestamp, error_status_code, error_reason) # If the batch API call received an HttpError, mark every id as failed. item_failures = [ failure.Failure(str(item_row.get('item_id', 'Missing ID')), error_reason) for item_row in item_rows ] api_result = process_result.ProcessResult([], item_failures, []) if content_api_client.suggest_retry( error_status_code) and _get_execution_attempt() < TASK_RETRY_LIMIT: logging.warning( 'Batch #%d with operation %s and initiation timestamp %s will be requeued for retry', batch_num, operation.value, task.timestamp) else: logging.error( 'Batch #%d with operation %s and initiation timestamp %s failed and will not be retried. Error: %s', batch_num, operation.value, task.timestamp, error) return api_result def _get_execution_attempt() -> int: """Returns the number of times this task has previously been executed. If the execution count header does not exist, it means the request did not come from Cloud Tasks. In this case, there will be no retry, so set execution attempt to the retry limit. Returns: int, the number of times this task has previously been executed. """ execution_attempt = flask.request.headers.get( 'X-AppEngine-TaskExecutionCount', '') if execution_attempt: return int(execution_attempt) else: return TASK_RETRY_LIMIT if __name__ == '__main__': # This is used when running locally. Gunicorn is used to run the # application on Google App Engine. See entrypoint in app.yaml. app.run(host='127.0.0.1', port=8080, debug=True)
1.757813
2
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/transforms.py
BadDevCode/lumberyard
1,738
1455
<filename>dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/transforms.py """ Implement transformation on Numba IR """ from __future__ import absolute_import, print_function from collections import namedtuple, defaultdict import logging from numba.analysis import compute_cfg_from_blocks, find_top_level_loops from numba import ir, errors, ir_utils from numba.analysis import compute_use_defs _logger = logging.getLogger(__name__) def _extract_loop_lifting_candidates(cfg, blocks): """ Returns a list of loops that are candidate for loop lifting """ # check well-formed-ness of the loop def same_exit_point(loop): "all exits must point to the same location" outedges = set() for k in loop.exits: succs = set(x for x, _ in cfg.successors(k)) if not succs: # If the exit point has no successor, it contains an return # statement, which is not handled by the looplifting code. # Thus, this loop is not a candidate. _logger.debug("return-statement in loop.") return False outedges |= succs ok = len(outedges) == 1 _logger.debug("same_exit_point=%s (%s)", ok, outedges) return ok def one_entry(loop): "there is one entry" ok = len(loop.entries) == 1 _logger.debug("one_entry=%s", ok) return ok def cannot_yield(loop): "cannot have yield inside the loop" insiders = set(loop.body) | set(loop.entries) | set(loop.exits) for blk in map(blocks.__getitem__, insiders): for inst in blk.body: if isinstance(inst, ir.Assign): if isinstance(inst.value, ir.Yield): _logger.debug("has yield") return False _logger.debug("no yield") return True _logger.info('finding looplift candidates') # the check for cfg.entry_point in the loop.entries is to prevent a bad # rewrite where a prelude for a lifted loop would get written into block -1 # if a loop entry were in block 0 candidates = [] for loop in find_top_level_loops(cfg): _logger.debug("top-level loop: %s", loop) if (same_exit_point(loop) and one_entry(loop) and cannot_yield(loop) and cfg.entry_point() not in loop.entries): candidates.append(loop) _logger.debug("add candidate: %s", loop) return candidates def find_region_inout_vars(blocks, livemap, callfrom, returnto, body_block_ids): """Find input and output variables to a block region. """ inputs = livemap[callfrom] outputs = livemap[returnto] # ensure live variables are actually used in the blocks, else remove, # saves having to create something valid to run through postproc # to achieve similar loopblocks = {} for k in body_block_ids: loopblocks[k] = blocks[k] used_vars = set() def_vars = set() defs = compute_use_defs(loopblocks) for vs in defs.usemap.values(): used_vars |= vs for vs in defs.defmap.values(): def_vars |= vs used_or_defined = used_vars | def_vars # note: sorted for stable ordering inputs = sorted(set(inputs) & used_or_defined) outputs = sorted(set(outputs) & used_or_defined & def_vars) return inputs, outputs _loop_lift_info = namedtuple('loop_lift_info', 'loop,inputs,outputs,callfrom,returnto') def _loop_lift_get_candidate_infos(cfg, blocks, livemap): """ Returns information on looplifting candidates. """ loops = _extract_loop_lifting_candidates(cfg, blocks) loopinfos = [] for loop in loops: [callfrom] = loop.entries # requirement checked earlier an_exit = next(iter(loop.exits)) # anyone of the exit block if len(loop.exits) > 1: # Pre-Py3.8 may have multiple exits [(returnto, _)] = cfg.successors(an_exit) # requirement checked earlier else: # Post-Py3.8 DO NOT have multiple exits returnto = an_exit local_block_ids = set(loop.body) | set(loop.entries) inputs, outputs = find_region_inout_vars( blocks=blocks, livemap=livemap, callfrom=callfrom, returnto=returnto, body_block_ids=local_block_ids, ) lli = _loop_lift_info(loop=loop, inputs=inputs, outputs=outputs, callfrom=callfrom, returnto=returnto) loopinfos.append(lli) return loopinfos def _loop_lift_modify_call_block(liftedloop, block, inputs, outputs, returnto): """ Transform calling block from top-level function to call the lifted loop. """ scope = block.scope loc = block.loc blk = ir.Block(scope=scope, loc=loc) ir_utils.fill_block_with_call( newblock=blk, callee=liftedloop, label_next=returnto, inputs=inputs, outputs=outputs, ) return blk def _loop_lift_prepare_loop_func(loopinfo, blocks): """ Inplace transform loop blocks for use as lifted loop. """ entry_block = blocks[loopinfo.callfrom] scope = entry_block.scope loc = entry_block.loc # Lowering assumes the first block to be the one with the smallest offset firstblk = min(blocks) - 1 blocks[firstblk] = ir_utils.fill_callee_prologue( block=ir.Block(scope=scope, loc=loc), inputs=loopinfo.inputs, label_next=loopinfo.callfrom, ) blocks[loopinfo.returnto] = ir_utils.fill_callee_epilogue( block=ir.Block(scope=scope, loc=loc), outputs=loopinfo.outputs, ) def _loop_lift_modify_blocks(func_ir, loopinfo, blocks, typingctx, targetctx, flags, locals): """ Modify the block inplace to call to the lifted-loop. Returns a dictionary of blocks of the lifted-loop. """ from numba.dispatcher import LiftedLoop # Copy loop blocks loop = loopinfo.loop loopblockkeys = set(loop.body) | set(loop.entries) if len(loop.exits) > 1: # Pre-Py3.8 may have multiple exits loopblockkeys |= loop.exits loopblocks = dict((k, blocks[k].copy()) for k in loopblockkeys) # Modify the loop blocks _loop_lift_prepare_loop_func(loopinfo, loopblocks) # Create a new IR for the lifted loop lifted_ir = func_ir.derive(blocks=loopblocks, arg_names=tuple(loopinfo.inputs), arg_count=len(loopinfo.inputs), force_non_generator=True) liftedloop = LiftedLoop(lifted_ir, typingctx, targetctx, flags, locals) # modify for calling into liftedloop callblock = _loop_lift_modify_call_block(liftedloop, blocks[loopinfo.callfrom], loopinfo.inputs, loopinfo.outputs, loopinfo.returnto) # remove blocks for k in loopblockkeys: del blocks[k] # update main interpreter callsite into the liftedloop blocks[loopinfo.callfrom] = callblock return liftedloop def loop_lifting(func_ir, typingctx, targetctx, flags, locals): """ Loop lifting transformation. Given a interpreter `func_ir` returns a 2 tuple of `(toplevel_interp, [loop0_interp, loop1_interp, ....])` """ blocks = func_ir.blocks.copy() cfg = compute_cfg_from_blocks(blocks) loopinfos = _loop_lift_get_candidate_infos(cfg, blocks, func_ir.variable_lifetime.livemap) loops = [] if loopinfos: _logger.debug('loop lifting this IR with %d candidates:\n%s', len(loopinfos), func_ir.dump_to_string()) for loopinfo in loopinfos: lifted = _loop_lift_modify_blocks(func_ir, loopinfo, blocks, typingctx, targetctx, flags, locals) loops.append(lifted) # Make main IR main = func_ir.derive(blocks=blocks) return main, loops def canonicalize_cfg_single_backedge(blocks): """ Rewrite loops that have multiple backedges. """ cfg = compute_cfg_from_blocks(blocks) newblocks = blocks.copy() def new_block_id(): return max(newblocks.keys()) + 1 def has_multiple_backedges(loop): count = 0 for k in loop.body: blk = blocks[k] edges = blk.terminator.get_targets() # is a backedge? if loop.header in edges: count += 1 if count > 1: # early exit return True return False def yield_loops_with_multiple_backedges(): for lp in cfg.loops().values(): if has_multiple_backedges(lp): yield lp def replace_target(term, src, dst): def replace(target): return (dst if target == src else target) if isinstance(term, ir.Branch): return ir.Branch(cond=term.cond, truebr=replace(term.truebr), falsebr=replace(term.falsebr), loc=term.loc) elif isinstance(term, ir.Jump): return ir.Jump(target=replace(term.target), loc=term.loc) else: assert not term.get_targets() return term def rewrite_single_backedge(loop): """ Add new tail block that gathers all the backedges """ header = loop.header tailkey = new_block_id() for blkkey in loop.body: blk = newblocks[blkkey] if header in blk.terminator.get_targets(): newblk = blk.copy() # rewrite backedge into jumps to new tail block newblk.body[-1] = replace_target(blk.terminator, header, tailkey) newblocks[blkkey] = newblk # create new tail block entryblk = newblocks[header] tailblk = ir.Block(scope=entryblk.scope, loc=entryblk.loc) # add backedge tailblk.append(ir.Jump(target=header, loc=tailblk.loc)) newblocks[tailkey] = tailblk for loop in yield_loops_with_multiple_backedges(): rewrite_single_backedge(loop) return newblocks def canonicalize_cfg(blocks): """ Rewrite the given blocks to canonicalize the CFG. Returns a new dictionary of blocks. """ return canonicalize_cfg_single_backedge(blocks) def with_lifting(func_ir, typingctx, targetctx, flags, locals): """With-lifting transformation Rewrite the IR to extract all withs. Only the top-level withs are extracted. Returns the (the_new_ir, the_lifted_with_ir) """ from numba import postproc def dispatcher_factory(func_ir, objectmode=False, **kwargs): from numba.dispatcher import LiftedWith, ObjModeLiftedWith myflags = flags.copy() if objectmode: # Lifted with-block cannot looplift myflags.enable_looplift = False # Lifted with-block uses object mode myflags.enable_pyobject = True myflags.force_pyobject = True myflags.no_cpython_wrapper = False cls = ObjModeLiftedWith else: cls = LiftedWith return cls(func_ir, typingctx, targetctx, myflags, locals, **kwargs) postproc.PostProcessor(func_ir).run() # ensure we have variable lifetime assert func_ir.variable_lifetime vlt = func_ir.variable_lifetime blocks = func_ir.blocks.copy() # find where with-contexts regions are withs = find_setupwiths(blocks) cfg = vlt.cfg _legalize_withs_cfg(withs, cfg, blocks) # For each with-regions, mutate them according to # the kind of contextmanager sub_irs = [] for (blk_start, blk_end) in withs: body_blocks = [] for node in _cfg_nodes_in_region(cfg, blk_start, blk_end): body_blocks.append(node) _legalize_with_head(blocks[blk_start]) # Find the contextmanager cmkind, extra = _get_with_contextmanager(func_ir, blocks, blk_start) # Mutate the body and get new IR sub = cmkind.mutate_with_body(func_ir, blocks, blk_start, blk_end, body_blocks, dispatcher_factory, extra) sub_irs.append(sub) if not sub_irs: # Unchanged new_ir = func_ir else: new_ir = func_ir.derive(blocks) return new_ir, sub_irs def _get_with_contextmanager(func_ir, blocks, blk_start): """Get the global object used for the context manager """ _illegal_cm_msg = "Illegal use of context-manager." def get_var_dfn(var): """Get the definition given a variable""" return func_ir.get_definition(var) def get_ctxmgr_obj(var_ref): """Return the context-manager object and extra info. The extra contains the arguments if the context-manager is used as a call. """ # If the contextmanager used as a Call dfn = func_ir.get_definition(var_ref) if isinstance(dfn, ir.Expr) and dfn.op == 'call': args = [get_var_dfn(x) for x in dfn.args] kws = {k: get_var_dfn(v) for k, v in dfn.kws} extra = {'args': args, 'kwargs': kws} var_ref = dfn.func else: extra = None ctxobj = ir_utils.guard(ir_utils.find_global_value, func_ir, var_ref) # check the contextmanager object if ctxobj is ir.UNDEFINED: raise errors.CompilerError( "Undefined variable used as context manager", loc=blocks[blk_start].loc, ) if ctxobj is None: raise errors.CompilerError(_illegal_cm_msg, loc=dfn.loc) return ctxobj, extra # Scan the start of the with-region for the contextmanager for stmt in blocks[blk_start].body: if isinstance(stmt, ir.EnterWith): var_ref = stmt.contextmanager ctxobj, extra = get_ctxmgr_obj(var_ref) if not hasattr(ctxobj, 'mutate_with_body'): raise errors.CompilerError( "Unsupported context manager in use", loc=blocks[blk_start].loc, ) return ctxobj, extra # No contextmanager found? raise errors.CompilerError( "malformed with-context usage", loc=blocks[blk_start].loc, ) def _legalize_with_head(blk): """Given *blk*, the head block of the with-context, check that it doesn't do anything else. """ counters = defaultdict(int) for stmt in blk.body: counters[type(stmt)] += 1 if counters.pop(ir.EnterWith) != 1: raise errors.CompilerError( "with's head-block must have exactly 1 ENTER_WITH", loc=blk.loc, ) if counters.pop(ir.Jump) != 1: raise errors.CompilerError( "with's head-block must have exactly 1 JUMP", loc=blk.loc, ) # Can have any number of del counters.pop(ir.Del, None) # There MUST NOT be any other statements if counters: raise errors.CompilerError( "illegal statements in with's head-block", loc=blk.loc, ) def _cfg_nodes_in_region(cfg, region_begin, region_end): """Find the set of CFG nodes that are in the given region """ region_nodes = set() stack = [region_begin] while stack: tos = stack.pop() succs, _ = zip(*cfg.successors(tos)) nodes = set([node for node in succs if node not in region_nodes and node != region_end]) stack.extend(nodes) region_nodes |= nodes return region_nodes def _legalize_withs_cfg(withs, cfg, blocks): """Verify the CFG of the with-context(s). """ doms = cfg.dominators() postdoms = cfg.post_dominators() # Verify that the with-context has no side-exits for s, e in withs: loc = blocks[s].loc if s not in doms[e]: # Not sure what condition can trigger this error. msg = "Entry of with-context not dominating the exit." raise errors.CompilerError(msg, loc=loc) if e not in postdoms[s]: msg = ( "Does not support with-context that contain branches " "(i.e. break/return/raise) that can leave the with-context. " "Details: exit of with-context not post-dominating the entry. " ) raise errors.CompilerError(msg, loc=loc) def find_setupwiths(blocks): """Find all top-level with. Returns a list of ranges for the with-regions. """ def find_ranges(blocks): for blk in blocks.values(): for ew in blk.find_insts(ir.EnterWith): yield ew.begin, ew.end def previously_occurred(start, known_ranges): for a, b in known_ranges: if s >= a and s < b: return True return False known_ranges = [] for s, e in sorted(find_ranges(blocks)): if not previously_occurred(s, known_ranges): if e not in blocks: # this's possible if there's an exit path in the with-block raise errors.CompilerError( 'unsupported controlflow due to return/raise ' 'statements inside with block' ) assert s in blocks, 'starting offset is not a label' known_ranges.append((s, e)) return known_ranges
1.914063
2
tests/test_masked_inference_wsi_dataset.py
HabibMrad/MONAI
1
1456
<filename>tests/test_masked_inference_wsi_dataset.py import os import unittest from unittest import skipUnless import numpy as np from numpy.testing import assert_array_equal from parameterized import parameterized from monai.apps.pathology.datasets import MaskedInferenceWSIDataset from monai.apps.utils import download_url from monai.utils import optional_import from tests.utils import skip_if_quick _, has_cim = optional_import("cucim") _, has_osl = optional_import("openslide") FILE_URL = "http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff" FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", os.path.basename(FILE_URL)) MASK1 = os.path.join(os.path.dirname(__file__), "testing_data", "tissue_mask1.npy") MASK2 = os.path.join(os.path.dirname(__file__), "testing_data", "tissue_mask2.npy") MASK4 = os.path.join(os.path.dirname(__file__), "testing_data", "tissue_mask4.npy") HEIGHT = 32914 WIDTH = 46000 def prepare_data(): mask = np.zeros((WIDTH // 2, HEIGHT // 2)) mask[100, 100] = 1 np.save(MASK1, mask) mask[100, 100:102] = 1 np.save(MASK2, mask) mask[100:102, 100:102] = 1 np.save(MASK4, mask) TEST_CASE_0 = [ { "data": [ {"image": FILE_PATH, "mask": MASK1}, ], "patch_size": 1, "image_reader_name": "cuCIM", }, [ { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [100, 100], }, ], ] TEST_CASE_1 = [ { "data": [{"image": FILE_PATH, "mask": MASK2}], "patch_size": 1, "image_reader_name": "cuCIM", }, [ { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [100, 100], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [101, 100], }, ], ] TEST_CASE_2 = [ { "data": [{"image": FILE_PATH, "mask": MASK4}], "patch_size": 1, "image_reader_name": "cuCIM", }, [ { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [100, 100], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [100, 101], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [101, 100], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [101, 101], }, ], ] TEST_CASE_3 = [ { "data": [ {"image": FILE_PATH, "mask": MASK1}, ], "patch_size": 2, "image_reader_name": "cuCIM", }, [ { "image": np.array( [ [[243, 243], [243, 243]], [[243, 243], [243, 243]], [[243, 243], [243, 243]], ], dtype=np.uint8, ), "name": "CMU-1", "mask_location": [100, 100], }, ], ] TEST_CASE_4 = [ { "data": [ {"image": FILE_PATH, "mask": MASK1}, {"image": FILE_PATH, "mask": MASK2}, ], "patch_size": 1, "image_reader_name": "cuCIM", }, [ { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [100, 100], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [100, 100], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [101, 100], }, ], ] TEST_CASE_OPENSLIDE_0 = [ { "data": [ {"image": FILE_PATH, "mask": MASK1}, ], "patch_size": 1, "image_reader_name": "OpenSlide", }, [ { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [100, 100], }, ], ] TEST_CASE_OPENSLIDE_1 = [ { "data": [{"image": FILE_PATH, "mask": MASK2}], "patch_size": 1, "image_reader_name": "OpenSlide", }, [ { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [100, 100], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [101, 100], }, ], ] class TestMaskedInferenceWSIDataset(unittest.TestCase): def setUp(self): prepare_data() download_url(FILE_URL, FILE_PATH, "5a3cfd4fd725c50578ddb80b517b759f") @parameterized.expand( [ TEST_CASE_0, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, ] ) @skipUnless(has_cim, "Requires CuCIM") @skip_if_quick def test_read_patches_cucim(self, input_parameters, expected): dataset = MaskedInferenceWSIDataset(**input_parameters) self.compare_samples_expected(dataset, expected) @parameterized.expand( [ TEST_CASE_OPENSLIDE_0, TEST_CASE_OPENSLIDE_1, ] ) @skipUnless(has_osl, "Requires OpenSlide") @skip_if_quick def test_read_patches_openslide(self, input_parameters, expected): dataset = MaskedInferenceWSIDataset(**input_parameters) self.compare_samples_expected(dataset, expected) def compare_samples_expected(self, dataset, expected): for i in range(len(dataset)): self.assertTupleEqual(dataset[i][0]["image"].shape, expected[i]["image"].shape) self.assertIsNone(assert_array_equal(dataset[i][0]["image"], expected[i]["image"])) self.assertEqual(dataset[i][0]["name"], expected[i]["name"]) self.assertListEqual(dataset[i][0]["mask_location"], expected[i]["mask_location"]) if __name__ == "__main__": unittest.main()
2.015625
2
manga_py/providers/doujins_com.py
paulolimac/manga-py
1
1457
<gh_stars>1-10 from manga_py.provider import Provider from .helpers.std import Std class DoujinsCom(Provider, Std): img_selector = '#image-container img.doujin' def get_archive_name(self) -> str: return 'archive' def get_chapter_index(self) -> str: return '0' def get_main_content(self): return self._get_content('{}/gallery/{}') def get_manga_name(self) -> str: return self._get_name('/gallery/([^/]+)') def get_chapters(self): return [b''] def get_files(self): items = self.document_fromstring(self.content, self.img_selector) return [i.get('data-file').replace('&amp;', '&') for i in items] def get_cover(self) -> str: return self._cover_from_content(self.img_selector) def book_meta(self) -> dict: # todo meta pass def chapter_for_json(self): return self.get_url() main = DoujinsCom
2.359375
2
src/urh/ui/delegates/CheckBoxDelegate.py
awesome-archive/urh
1
1458
<reponame>awesome-archive/urh<filename>src/urh/ui/delegates/CheckBoxDelegate.py from PyQt5.QtCore import QModelIndex, QAbstractItemModel, Qt, pyqtSlot from PyQt5.QtWidgets import QItemDelegate, QWidget, QStyleOptionViewItem, QCheckBox class CheckBoxDelegate(QItemDelegate): def __init__(self, parent=None): super().__init__(parent) self.enabled = True def createEditor(self, parent: QWidget, option: QStyleOptionViewItem, index: QModelIndex): editor = QCheckBox(parent) editor.stateChanged.connect(self.stateChanged) return editor def setEditorData(self, editor: QCheckBox, index: QModelIndex): editor.blockSignals(True) editor.setChecked(index.model().data(index)) self.enabled = editor.isChecked() editor.blockSignals(False) def setModelData(self, editor: QCheckBox, model: QAbstractItemModel, index: QModelIndex): model.setData(index, editor.isChecked(), Qt.EditRole) @pyqtSlot() def stateChanged(self): self.commitData.emit(self.sender())
1.96875
2
neural_network/backup_casestudy/denbigh/tf_RNN.py
acceleratedmaterials/AMDworkshop_demo
5
1459
# -*- coding: utf-8 -*- ''' Framework: Tensorflow Training samples: 1600 Validation samples: 400 RNN with 128 units Optimizer: Adam Epoch: 100 Loss: Cross Entropy Activation function: Relu for network and Soft-max for regression Regularization: Drop-out, keep_prob = 0.8 Accuracy of Validation set: 95% ''' from __future__ import division, print_function, absolute_import import tflearn from tflearn.data_utils import to_categorical, pad_sequences from data_denbigh import * X, Y = getDenbighData() #Hyperparams neurons_num = 128 # Number of neurons in the RNN layer keep_prob = 0.5 # Keep probability for the drop-out regularization learning_rate = 0.001 # Learning rate for mini-batch SGD batch_size = 32 # Batch size n_epoch = 100 # Number of epoch #Data preprocessing/ Converting data to vector for the X = pad_sequences(X, maxlen=5, value=0.) Y = to_categorical(Y, 2) #Build the network net = tflearn.input_data([None, 5]) net = tflearn.embedding(net, input_dim=10000, output_dim=128) net = tflearn.simple_rnn(net, neurons_num, dropout=keep_prob) net = tflearn.fully_connected(net, 2, activation='softmax') net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate, loss='categorical_crossentropy') model = tflearn.DNN(net, tensorboard_verbose=0) model.fit(X, Y, validation_set=0.2, show_metric=True, batch_size=batch_size, n_epoch=n_epoch) model.save('./model.tfl')
2.84375
3
code/tests/test_tile_tf.py
Nocty-chan/cs224n-squad
2
1460
import numpy as np import tensorflow as tf H = 2 N = 2 M = 3 BS = 10 def my_softmax(arr): max_elements = np.reshape(np.max(arr, axis = 2), (BS, N, 1)) arr = arr - max_elements exp_array = np.exp(arr) print (exp_array) sum_array = np.reshape(np.sum(exp_array, axis=2), (BS, N, 1)) return exp_array /sum_array def masked_softmax(logits, mask, dim): """ Takes masked softmax over given dimension of logits. Inputs: logits: Numpy array. We want to take softmax over dimension dim. mask: Numpy array of same shape as logits. Has 1s where there's real data in logits, 0 where there's padding dim: int. dimension over which to take softmax Returns: masked_logits: Numpy array same shape as logits. This is the same as logits, but with 1e30 subtracted (i.e. very large negative number) in the padding locations. prob_dist: Numpy array same shape as logits. The result of taking softmax over masked_logits in given dimension. Should be 0 in padding locations. Should sum to 1 over given dimension. """ exp_mask = (1 - tf.cast(mask, 'float64')) * (-1e30) # -large where there's padding, 0 elsewhere print (exp_mask) masked_logits = tf.add(logits, exp_mask) # where there's padding, set logits to -large prob_dist = tf.nn.softmax(masked_logits, dim) return masked_logits, prob_dist def test_build_similarity(contexts, questions): w_sim_1 = tf.get_variable('w_sim_1', initializer=w_1) # 2 * H w_sim_2 = tf.get_variable('w_sim_2', initializer=w_2) # 2 * self.hidden_size w_sim_3 = tf.get_variable('w_sim_3', initializer=w_3) # 2 * self.hidden_size q_tile = tf.tile(tf.expand_dims(questions, 0), [N, 1, 1, 1]) # N x BS x M x 2H q_tile = tf.transpose(q_tile, (1, 0, 3, 2)) # BS x N x 2H x M contexts = tf.expand_dims(contexts, -1) # BS x N x 2H x 1 result = (contexts * q_tile) # BS x N x 2H x M tf.assert_equal(tf.shape(result), [BS, N, 2 * H, M]) result = tf.transpose(result, (0, 1, 3, 2)) # BS x N x M x 2H result = tf.reshape(result, (-1, N * M, 2 * H)) # BS x (NxM) x 2H tf.assert_equal(tf.shape(result), [BS, N*M, 2*H]) # w_sim_1 = tf.tile(tf.expand_dims(w_sim_1, 0), [BS, 1]) # w_sim_2 = tf.tile(tf.expand_dims(w_sim_2, 0), [BS, 1]) # w_sim_3 = tf.tile(tf.expand_dims(w_sim_3, 0), [BS, 1]) term1 = tf.matmul(tf.reshape(contexts, (BS * N, 2*H)), tf.expand_dims(w_sim_1, -1)) # BS x N term1 = tf.reshape(term1, (-1, N)) term2 = tf.matmul(tf.reshape(questions, (BS * M, 2*H)), tf.expand_dims(w_sim_2, -1)) # BS x M term2 = tf.reshape(term2, (-1, M)) term3 = tf.matmul(tf.reshape(result, (BS * N * M, 2* H)), tf.expand_dims(w_sim_3, -1)) term3 = tf.reshape(term3, (-1, N, M)) # BS x N x M S = tf.reshape(term1,(-1, N, 1)) + term3 + tf.reshape(term2, (-1, 1, M)) return S def test_build_sim_mask(): context_mask = np.array([True, True]) # BS x N question_mask = np.array([True, True, False]) # BS x M context_mask = np.tile(context_mask, [BS, 1]) question_mask = np.tile(question_mask, [BS, 1]) context_mask = tf.get_variable('context_mask', initializer=context_mask) question_mask = tf.get_variable('question_mask', initializer=question_mask) context_mask = tf.expand_dims(context_mask, -1) # BS x N x 1 question_mask = tf.expand_dims(question_mask, -1) # BS x M x 1 question_mask = tf.transpose(question_mask, (0, 2, 1)) # BS x 1 x M sim_mask = tf.matmul(tf.cast(context_mask, dtype=tf.int32), tf.cast(question_mask, dtype=tf.int32)) # BS x N x M return sim_mask def test_build_c2q(S, S_mask, questions): _, alpha = masked_softmax(S, mask, 2) # BS x N x M return tf.matmul(alpha, questions) def test_build_q2c(S, S_mask, contexts): # S = BS x N x M # contexts = BS x N x 2H m = tf.reduce_max(S * tf.cast(S_mask, dtype=tf.float64), axis=2) # BS x N beta = tf.expand_dims(tf.nn.softmax(m), -1) # BS x N x 1 beta = tf.transpose(beta, (0, 2, 1)) q2c = tf.matmul(beta, contexts) return m, beta, q2c def test_concatenation(c2q, q2c): q2c = tf.tile(q2c, (1, N, 1)) output = tf.concat([c2q, q2c], axis=2) tf.assert_equal(tf.shape(output), [BS, N, 4*H]) return output if __name__== "__main__": w_1 = np.array([1., 2., 3., 4.]) w_2 = np.array([5., 6., 7., 8.]) w_3 = np.array([13., 12., 11., 10.]) c = np.array([[[1., 2., 3., 4.], [5., 6., 7., 8.]]]) # BS x N x 2H q = np.array([[[1., 2., 3., 0.], [5., 6., 7., 4.], [8., 9. , 10., 11.]]]) # BS x M x 2H c = np.tile(c, [BS, 1, 1]) q = np.tile(q, [BS, 1, 1]) questions = tf.get_variable('questions', initializer=q) contexts = tf.get_variable('contexts', initializer=c) S = test_build_similarity(contexts, questions) mask = test_build_sim_mask() c2q = test_build_c2q(S, mask, questions) m, beta, q2c = test_build_q2c(S, mask, contexts) output = test_concatenation(c2q, q2c) init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) S_result, mask_result, c2q_r = sess.run([S, mask, c2q]) actual_result = np.tile(np.array([[228, 772, 1372], [548, 1828, 3140]]), [BS, 1, 1]) assert np.array_equal(actual_result, S_result), 'Arrays are not equal' print ("Building similarity matrix is successful!") print ("Context 2 Question attention") m_r, beta_r, q2c_r = sess.run([m, beta, q2c]) output_r = sess.run(output)
3.453125
3
specutils/tests/test_smoothing.py
hamogu/specutils
0
1461
import numpy as np import pytest from astropy import convolution from scipy.signal import medfilt import astropy.units as u from ..spectra.spectrum1d import Spectrum1D from ..tests.spectral_examples import simulated_spectra from ..manipulation.smoothing import (convolution_smooth, box_smooth, gaussian_smooth, trapezoid_smooth, median_smooth) def compare_flux(flux_smooth1, flux_smooth2, flux_original, rtol=0.01): """ There are two things to compare for each set of smoothing: 1. Compare the smoothed flux from the astropy machinery vs the smoothed flux from specutils. This is done by comparing flux_smooth1 and flux_smooth2. 2. Next we want to compare the smoothed flux to the original flux. This is a little more difficult as smoothing will make a difference for median filter, but less so for convolution based smoothing if the kernel is normalized (area under the kernel = 1). In this second case the rtol (relative tolerance) is used judiciously. """ # Compare, element by element, the two smoothed fluxes. assert np.allclose(flux_smooth1, flux_smooth2) # Compare the total spectral flux of the smoothed to the original. assert np.allclose(sum(flux_smooth1), sum(flux_original), rtol=rtol) def test_smooth_custom_kernel(simulated_spectra): """ Test CustomKernel smoothing with correct parmaeters. """ # Create the original spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Create a custom kernel (some weird asymmetric-ness) numpy_kernel = np.array([0.5, 1, 2, 0.5, 0.2]) numpy_kernel = numpy_kernel / np.sum(numpy_kernel) custom_kernel = convolution.CustomKernel(numpy_kernel) flux_smoothed_astropy = convolution.convolve(flux_original, custom_kernel) # Calculate the custom smoothed spec1_smoothed = convolution_smooth(spec1, custom_kernel) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) @pytest.mark.parametrize("width", [1, 2.3]) def test_smooth_box_good(simulated_spectra, width): """ Test Box1DKernel smoothing with correct parmaeters. Width values need to be a number greater than 0. """ # Create the original spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Calculate the smoothed flux using Astropy box_kernel = convolution.Box1DKernel(width) flux_smoothed_astropy = convolution.convolve(flux_original, box_kernel) # Calculate the box smoothed spec1_smoothed = box_smooth(spec1, width) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) # Check the input and output units assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit assert spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize("width", [-1, 0, 'a']) def test_smooth_box_bad(simulated_spectra, width): """ Test Box1DKernel smoothing with incorrect parmaeters. Width values need to be a number greater than 0. """ # Create the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 # Test bad input parameters with pytest.raises(ValueError): box_smooth(spec1, width) @pytest.mark.parametrize("stddev", [1, 2.3]) def test_smooth_gaussian_good(simulated_spectra, stddev): """ Test Gaussian1DKernel smoothing with correct parmaeters. Standard deviation values need to be a number greater than 0. """ # Create the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Calculate the smoothed flux using Astropy gaussian_kernel = convolution.Gaussian1DKernel(stddev) flux_smoothed_astropy = convolution.convolve(flux_original, gaussian_kernel) # Test gaussian smoothing spec1_smoothed = gaussian_smooth(spec1, stddev) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value, rtol=0.02) # Check the input and output units assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit assert spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize("stddev", [-1, 0, 'a']) def test_smooth_gaussian_bad(simulated_spectra, stddev): """ Test MexicanHat1DKernel smoothing with incorrect parmaeters. Standard deviation values need to be a number greater than 0. """ # Create the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 # Test bad input paramters with pytest.raises(ValueError): gaussian_smooth(spec1, stddev) @pytest.mark.parametrize("stddev", [1, 2.3]) def test_smooth_trapezoid_good(simulated_spectra, stddev): """ Test Trapezoid1DKernel smoothing with correct parmaeters. Standard deviation values need to be a number greater than 0. """ # Create the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Create the flux_smoothed which is what we want to compare to trapezoid_kernel = convolution.Trapezoid1DKernel(stddev) flux_smoothed_astropy = convolution.convolve(flux_original, trapezoid_kernel) # Test trapezoid smoothing spec1_smoothed = trapezoid_smooth(spec1, stddev) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) # Check the input and output units assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit assert spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize("stddev", [-1, 0, 'a']) def test_smooth_trapezoid_bad(simulated_spectra, stddev): """ Test Trapezoid1DKernel smoothing with incorrect parmaeters. Standard deviation values need to be a number greater than 0. """ # Create the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 # Test bad parameters with pytest.raises(ValueError): trapezoid_smooth(spec1, stddev) @pytest.mark.parametrize("width", [1, 3, 9]) def test_smooth_median_good(simulated_spectra, width): """ Test Median smoothing with correct parmaeters. Width values need to be a number greater than 0. """ # Create the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Create the flux_smoothed which is what we want to compare to flux_smoothed_astropy = medfilt(flux_original, width) # Test median smoothing spec1_smoothed = median_smooth(spec1, width) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value, rtol=0.15) # Check the input and output units assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit assert spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize("width", [-1, 0, 'a']) def test_smooth_median_bad(simulated_spectra, width): """ Test Median smoothing with incorrect parmaeters. Width values need to be a number greater than 0. """ # Create the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 # Test bad parameters with pytest.raises(ValueError): median_smooth(spec1, width)
2.296875
2
modules/interpolator.py
buulikduong/1d_sgl_solver
0
1462
<reponame>buulikduong/1d_sgl_solver<gh_stars>0 """Module interpolating mathematical functions out of support points""" from scipy.interpolate import interp1d, lagrange, CubicSpline def interpolator(x_sup, y_sup, method): """Interpolates a mathematical function from a given set of points using either linear, polynomial or cubic spline for the interpolation. Args: x_sup (list): x-coordinates of the function y_sup (list): y-coordinates of the function method (string): name of the interpolation method to be used Returns: intfunc: interpolated function """ if method == "linear": intfunc = interp1d(x_sup, y_sup, kind="linear") return intfunc elif method == "polynomial": intfunc = lagrange(x_sup, y_sup) return intfunc elif method == "cspline": intfunc = CubicSpline(x_sup, y_sup, bc_type="natural") return intfunc return None
2.890625
3
frappe/patches/v13_0/remove_web_view.py
chentaoz/frappe
3,755
1463
<filename>frappe/patches/v13_0/remove_web_view.py import frappe def execute(): frappe.delete_doc_if_exists("DocType", "Web View") frappe.delete_doc_if_exists("DocType", "Web View Component") frappe.delete_doc_if_exists("DocType", "CSS Class")
1.429688
1
games.py
cpratim/DSA-Research-Paper
0
1464
import json import matplotlib.pyplot as plt from pprint import pprint import numpy as np from scipy.stats import linregress from util.stats import * with open('data/game_stats.json', 'r') as f: df = json.load(f) X, y = [], [] for match, stats in df.items(): home, away = stats['home'], stats['away'] if home['mp'] != away['mp'] != '240': continue try: ft_dif = float(home['fta']) - float(away['fta']) pt_dif = float(home['pts']) - float(away['pts']) if abs(pt_dif) > 10: continue except: continue X.append(ft_dif) y.append(pt_dif) c = 0 for f, p in zip(X, y): if f * p > 0: c += 1 print(c / len(X)) slope, intercept, r, p, std = linregress(X, y) f = lambda x: x*slope + intercept fit_y = [f(min(X)), f(max(X))] plt.xlabel('Free Throw Attempts') plt.ylabel('Point Differential') plt.title('FTA vs Point Differential') print(correlation(X, y)) plt.plot([min(X), max(X)], fit_y, color = 'red') plt.scatter(X, y) plt.show()
2.921875
3
src/generate_data.py
gycggd/leaf-classification
0
1465
import os import numpy as np import pandas as pd import tensorflow as tf from keras.preprocessing.image import ImageDataGenerator from keras.preprocessing.image import img_to_array, load_img from keras.utils.np_utils import to_categorical from sklearn.model_selection import StratifiedShuffleSplit from sklearn.preprocessing import LabelEncoder, StandardScaler def load_numeric_training(standardize=True): data = pd.read_csv('../train.csv') ID = data.pop('id') y = data.pop('species') y = LabelEncoder().fit(y).transform(y) X = StandardScaler().fit(data).transform(data) if standardize else data.values return ID.values, X, y def load_numeric_test(standardize=True): data = pd.read_csv('../test.csv') ID = data.pop('id') test = StandardScaler().fit(data).transform(data) if standardize else data.values return ID.values, test def resize_img(img, max_dim=96): max_axis = np.argmax(img.size) scale = max_dim / img.size[max_axis] return img.resize((int(img.size[0] * scale), int(img.size[1] * scale))) def load_img_data(ids, max_dim=96, center=True): X = np.empty((len(ids), max_dim, max_dim, 1)) for i, id in enumerate(ids): img = load_img('../images/{}.jpg'.format(id), grayscale=True) img = resize_img(img, max_dim=max_dim) x = img_to_array(img) h, w = x.shape[:2] if center: h1 = (max_dim - h) >> 1 h2 = h1 + h w1 = (max_dim - w) >> 1 w2 = w1 + w else: h1, h2, w1, w2 = 0, h, 0, w X[i][h1:h2, w1:w2][:] = x return np.around(X / 255) def load_train_data(split=0.9, random_state=7): ID, X_num_train, y = load_numeric_training() X_img_train = load_img_data(ID) sss = StratifiedShuffleSplit(n_splits=1, train_size=split, test_size=1 - split, random_state=random_state) train_idx, val_idx = next(sss.split(X_num_train, y)) ID_tr, X_num_tr, X_img_tr, y_tr = ID[train_idx], X_num_train[train_idx], X_img_train[train_idx], y[train_idx] ID_val, X_num_val, X_img_val, y_val = ID[val_idx], X_num_train[val_idx], X_img_train[val_idx], y[val_idx] return (ID_tr, X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val, X_img_val, y_val) def load_test_data(): ID, X_num_test = load_numeric_test() X_img_test = load_img_data(ID) return ID, X_num_test, X_img_test print('Loading train data ...') (ID_train, X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val, X_img_val, y_val) = load_train_data() # Prepare ID-to-label and ID-to-numerical dictionary ID_y_dic, ID_num_dic = {}, {} for i in range(len(ID_train)): ID_y_dic[ID_train[i]] = y_tr[i] ID_num_dic[ID_train[i]] = X_num_tr[i, :] print('Loading test data ...') ID_test, X_num_test, X_img_test = load_test_data() # Convert label to categorical/one-hot ID_train, y_tr, y_val = to_categorical(ID_train), to_categorical(y_tr), to_categorical((y_val)) def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def _float32_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def write_val_data(): val_data_path = '../tfrecords/val_data_1.tfrecords' if os.path.exists(val_data_path): print('Warning: old file exists, removed.') os.remove(val_data_path) val_image, val_num, val_label = X_img_val.astype(np.bool), X_num_val.astype(np.float64), y_val.astype(np.bool) print(val_image.shape, val_num.shape, val_label.shape) val_writer = tf.python_io.TFRecordWriter(val_data_path) print('Writing data into tfrecord ...') for i in range(len(val_image)): image, num, label = val_image[i], val_num[i], val_label[i] feature = {'image': _bytes_feature(image.tostring()), 'num': _bytes_feature(num.tostring()), 'label': _bytes_feature(label.tostring())} example = tf.train.Example(features=tf.train.Features(feature=feature)) val_writer.write(example.SerializeToString()) print('Done!') def write_train_data(): imgen = ImageDataGenerator(rotation_range=20, zoom_range=0.2, horizontal_flip=True, vertical_flip=True, fill_mode='nearest') imgen_train = imgen.flow(X_img_tr, ID_train, batch_size=32, seed=7) print('Generating augmented images') all_images = [] all_ID = [] p = True for i in range(28 * 200): print('Generating augmented images for epoch {}, batch {}'.format(i // 28, i % 28)) X, ID = imgen_train.next() all_images.append(X) all_ID.append(np.argmax(ID, axis=1)) all_images = np.concatenate(all_images).astype(np.bool) all_ID = np.concatenate(all_ID) all_y = np.zeros(all_ID.shape) all_nums = np.zeros((all_ID.shape[0], X_num_tr.shape[1])) for i in range(len(all_ID)): all_nums[i, :] = ID_num_dic[all_ID[i]] all_y[i] = ID_y_dic[all_ID[i]] all_y = to_categorical(all_y).astype(np.bool) print('Data shapes:') print('Image:', all_images.shape) print('Label:', all_y.shape) print('Numerical:', all_nums.shape) train_data_path = '../tfrecords/train_data_1.tfrecords' if os.path.exists(train_data_path): print('Warning: old file exists, removed.') os.remove(train_data_path) # compression = tf.python_io.TFRecordCompressionType.GZIP # train_writer = tf.python_io.TFRecordWriter(train_data_path, options=tf.python_io.TFRecordOptions(compression)) train_writer = tf.python_io.TFRecordWriter(train_data_path) print('Writing data into tfrecord ...') for i in range(len(all_images)): if i % 891 == 0: print('Writing {} th epoch data ...'.format(i // 891)) image, num, label = all_images[i], all_nums[i], all_y[i] feature = {'image': _bytes_feature(image.tostring()), 'num': _bytes_feature(num.tostring()), 'label': _bytes_feature(label.tostring())} example = tf.train.Example(features=tf.train.Features(feature=feature)) train_writer.write(example.SerializeToString()) print('Done!') write_val_data()
2.765625
3
2650-construindo-muralhas.py
ErickSimoes/URI-Online-Judge
0
1466
<reponame>ErickSimoes/URI-Online-Judge<gh_stars>0 # -*- coding: utf-8 -*- n, w = map(int, input().split()) for _ in range(n): entrada = input() last_space = entrada.rfind(' ') if int(entrada[last_space:]) > w: print(entrada[:last_space])
3.40625
3
tests/assemblers/test_ensemble.py
yarix/m2cgen
1
1467
<gh_stars>1-10 from sklearn import ensemble from m2cgen import assemblers, ast from tests import utils def test_single_condition(): estimator = ensemble.RandomForestRegressor(n_estimators=2, random_state=1) estimator.fit([[1], [2]], [1, 2]) assembler = assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble() expected = ast.BinNumExpr( ast.BinNumExpr( ast.NumVal(1.0), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.NumVal(1.0), ast.NumVal(2.0)), ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual, expected) def test_two_conditions(): estimator = ensemble.RandomForestRegressor(n_estimators=2, random_state=13) estimator.fit([[1], [2], [3]], [1, 2, 3]) assembler = assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble() expected = ast.BinNumExpr( ast.BinNumExpr( ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.NumVal(1.0), ast.NumVal(2.0)), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(2.5), ast.CompOpType.LTE), ast.NumVal(2.0), ast.NumVal(3.0)), ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual, expected) def test_multi_class(): estimator = ensemble.RandomForestClassifier( n_estimators=2, random_state=13) estimator.fit([[1], [2], [3]], [1, -1, 1]) assembler = assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble() expected = ast.BinVectorNumExpr( ast.BinVectorExpr( ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.VectorVal([ ast.NumVal(0.0), ast.NumVal(1.0)]), ast.VectorVal([ ast.NumVal(1.0), ast.NumVal(0.0)])), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(2.5), ast.CompOpType.LTE), ast.VectorVal([ ast.NumVal(1.0), ast.NumVal(0.0)]), ast.VectorVal([ ast.NumVal(0.0), ast.NumVal(1.0)])), ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual, expected)
2.109375
2
setup.py
Parquery/pynumenc
1
1468
<reponame>Parquery/pynumenc """A setuptools based setup module. See: https://packaging.python.org/en/latest/distributing.html https://github.com/pypa/sampleproject """ import os from setuptools import setup, find_packages, Extension import pynumenc_meta # pylint: disable=redefined-builtin here = os.path.abspath(os.path.dirname(__file__)) # pylint: disable=invalid-name with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() # pylint: disable=invalid-name setup( name=pynumenc_meta.__title__, version=pynumenc_meta.__version__, description=pynumenc_meta.__description__, long_description=long_description, url=pynumenc_meta.__url__, author=pynumenc_meta.__author__, author_email=pynumenc_meta.__author_email__, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: End Users/Desktop', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6' ], license='License :: OSI Approved :: MIT License', keywords='C++ encode decode bytes encoding decoding sorted', packages=find_packages(exclude=['docs', 'tests']), install_requires=[], extras_require={ 'dev': [ # yapf: disable, 'docutils>=0.14,<1', 'mypy==0.641', 'hypothesis==3.82.1', 'pygments>=2.2.0,<3', 'pydocstyle>=3.0.0,<4', 'pylint==2.1.1', 'yapf==0.24.0' # yapf: enable ] }, ext_modules=[ Extension('numenc', sources=['numenc-cpp/encoder_decoder.cpp']) ], scripts=['bin/pynumenc'], py_modules=['pynumenc_meta'], package_data={'pynumenc': ['py.typed']}, data_files=[('.', ['LICENSE.txt', 'README.rst'])])
1.65625
2
Models/License-Plate-Recognition-Nigerian-vehicles-master/License-Plate-Recognition-Nigerian-vehicles-master/ocr.py
nipunjain099/AutoGuard
147
1469
import numpy as np from skimage.transform import resize from skimage import measure from skimage.measure import regionprops class OCROnObjects(): def __init__(self, license_plate): character_objects = self.identify_boundary_objects(license_plate) self.get_regions(character_objects, license_plate) def identify_boundary_objects(self, a_license_plate): labelImage = measure.label(a_license_plate) character_dimensions = (0.4*a_license_plate.shape[0], 0.85*a_license_plate.shape[0], 0.04*a_license_plate.shape[1], 0.15*a_license_plate.shape[1]) minHeight, maxHeight, minWidth, maxWidth = character_dimensions regionLists = regionprops(labelImage) return regionLists def get_regions(self, character_objects, a_license_plate): """ used to map out regions where the license plate charcters are the principle of connected component analysis and labelling were used Parameters: ----------- a_license_plate: 2D numpy binary image of the license plate Returns: -------- a dictionary containing the index fullscale: 3D array containig 2D array of each character columnsVal: 1D array the starting column of each character coordinates: """ cord = [] counter=0 column_list = [] character_dimensions = (0.35*a_license_plate.shape[0], 0.60*a_license_plate.shape[0], 0.05*a_license_plate.shape[1], 0.15*a_license_plate.shape[1]) minHeight, maxHeight, minWidth, maxWidth = character_dimensions for regions in character_objects: minimumRow, minimumCol, maximumRow, maximumCol = regions.bbox character_height = maximumRow - minimumRow character_width = maximumCol - minimumCol roi = a_license_plate[minimumRow:maximumRow, minimumCol:maximumCol] if character_height > minHeight and character_height < maxHeight and character_width > minWidth and character_width < maxWidth: if counter == 0: samples = resize(roi, (20,20)) cord.append(regions.bbox) counter += 1 elif counter == 1: roismall = resize(roi, (20,20)) samples = np.concatenate((samples[None,:,:], roismall[None,:,:]), axis=0) cord.append(regions.bbox) counter+=1 else: roismall = resize(roi, (20,20)) samples = np.concatenate((samples[:,:,:], roismall[None,:,:]), axis=0) cord.append(regions.bbox) column_list.append(minimumCol) if len(column_list) == 0: self.candidates = {} else: self.candidates = { 'fullscale': samples, 'coordinates': np.array(cord), 'columnsVal': column_list } return self.candidates
3.078125
3
project/server/models.py
mvlima/flask-jwt-auth
0
1470
# project/server/models.py import jwt import datetime from project.server import app, db, bcrypt class User(db.Model): """ User Model for storing user related details """ __tablename__ = "users" id = db.Column(db.Integer, primary_key=True, autoincrement=True) username = db.Column(db.String(255), unique=True, nullable=False) email = db.Column(db.String(255), unique=True, nullable=False) password = db.Column(db.String(255), nullable=False) name = db.Column(db.String(255), nullable=False) age = db.Column(db.Integer, nullable=False) address = db.Column(db.Integer(255), nullable=False) registered_on = db.Column(db.DateTime, nullable=False) admin = db.Column(db.Boolean, nullable=False, default=False) def __init__(self, email, username, password, name, age, address, admin=False): self.email = email self.username = username self.password = <PASSWORD>.generate_password_hash( password, app.config.get('BCRYPT_LOG_ROUNDS') ).decode() self.name = name self.age = age self.address = address self.registered_on = datetime.datetime.now() self.admin = admin def encode_auth_token(self, user_id): """ Generates the Auth Token :return: string """ try: payload = { 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=0, seconds=5), 'iat': datetime.datetime.utcnow(), 'sub': user_id } return jwt.encode( payload, app.config.get('SECRET_KEY'), algorithm='HS256' ) except Exception as e: return e @staticmethod def decode_auth_token(auth_token): """ Validates the auth token :param auth_token: :return: integer|string """ try: payload = jwt.decode(auth_token, app.config.get('SECRET_KEY')) is_blacklisted_token = BlacklistToken.check_blacklist(auth_token) if is_blacklisted_token: return 'Token blacklisted. Please log in again.' else: return payload['sub'] except jwt.ExpiredSignatureError: return 'Signature expired. Please log in again.' except jwt.InvalidTokenError: return 'Invalid token. Please log in again.' class BlacklistToken(db.Model): """ Token Model for storing JWT tokens """ __tablename__ = 'blacklist_tokens' id = db.Column(db.Integer, primary_key=True, autoincrement=True) token = db.Column(db.String(500), unique=True, nullable=False) blacklisted_on = db.Column(db.DateTime, nullable=False) def __init__(self, token): self.token = token self.blacklisted_on = datetime.datetime.now() def __repr__(self): return '<id: token: {}'.format(self.token) @staticmethod def check_blacklist(auth_token): # Check whether auth token has been blacklisted res = BlacklistToken.query.filter_by(token=str(auth_token)).first() if res: return True else: return False
2.75
3
letsencrypt/setup.py
ccppuu/certbot
1
1471
<reponame>ccppuu/certbot import codecs import os import sys from setuptools import setup from setuptools import find_packages def read_file(filename, encoding='utf8'): """Read unicode from given file.""" with codecs.open(filename, encoding=encoding) as fd: return fd.read() here = os.path.abspath(os.path.dirname(__file__)) readme = read_file(os.path.join(here, 'README.rst')) # This package is a simple shim around certbot install_requires = ['certbot'] version = '0.7.0.dev0' setup( name='letsencrypt', version=version, description="ACME client", long_description=readme, url='https://github.com/letsencrypt/letsencrypt', author="Certbot Project", author_email='<EMAIL>', license='Apache License 2.0', classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Environment :: Console :: Curses', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: Apache Software License', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Security', 'Topic :: System :: Installation/Setup', 'Topic :: System :: Networking', 'Topic :: System :: Systems Administration', 'Topic :: Utilities', ], packages=find_packages(), include_package_data=True, install_requires=install_requires, entry_points={ 'console_scripts': [ 'letsencrypt = certbot.main:main', ], }, )
2.078125
2
elastalert/alerts.py
dekhrekh/elastalert
0
1472
<filename>elastalert/alerts.py<gh_stars>0 # -*- coding: utf-8 -*- import copy import datetime import json import logging import subprocess import sys import warnings from email.mime.text import MIMEText from email.utils import formatdate from smtplib import SMTP from smtplib import SMTP_SSL from smtplib import SMTPAuthenticationError from smtplib import SMTPException from socket import error import boto3 import requests import stomp from exotel import Exotel from jira.client import JIRA from jira.exceptions import JIRAError from requests.exceptions import RequestException from staticconf.loader import yaml_loader from texttable import Texttable from twilio.base.exceptions import TwilioRestException from twilio.rest import Client as TwilioClient from util import EAException from util import elastalert_logger from util import lookup_es_key from util import pretty_ts from util import ts_now from util import ts_to_dt class DateTimeEncoder(json.JSONEncoder): def default(self, obj): if hasattr(obj, 'isoformat'): return obj.isoformat() else: return json.JSONEncoder.default(self, obj) class BasicMatchString(object): """ Creates a string containing fields in match for the given rule. """ def __init__(self, rule, match): self.rule = rule self.match = match def _ensure_new_line(self): while self.text[-2:] != '\n\n': self.text += '\n' def _add_custom_alert_text(self): missing = '<MISSING VALUE>' alert_text = unicode(self.rule.get('alert_text', '')) if 'alert_text_args' in self.rule: alert_text_args = self.rule.get('alert_text_args') alert_text_values = [lookup_es_key(self.match, arg) for arg in alert_text_args] # Support referencing other top-level rule properties # This technically may not work if there is a top-level rule property with the same name # as an es result key, since it would have been matched in the lookup_es_key call above for i in xrange(len(alert_text_values)): if alert_text_values[i] is None: alert_value = self.rule.get(alert_text_args[i]) if alert_value: alert_text_values[i] = alert_value alert_text_values = [missing if val is None else val for val in alert_text_values] alert_text = alert_text.format(*alert_text_values) elif 'alert_text_kw' in self.rule: kw = {} for name, kw_name in self.rule.get('alert_text_kw').items(): val = lookup_es_key(self.match, name) # Support referencing other top-level rule properties # This technically may not work if there is a top-level rule property with the same name # as an es result key, since it would have been matched in the lookup_es_key call above if val is None: val = self.rule.get(name) kw[kw_name] = missing if val is None else val alert_text = alert_text.format(**kw) self.text += alert_text def _add_rule_text(self): self.text += self.rule['type'].get_match_str(self.match) def _add_top_counts(self): for key, counts in self.match.items(): if key.startswith('top_events_'): self.text += '%s:\n' % (key[11:]) top_events = counts.items() if not top_events: self.text += 'No events found.\n' else: top_events.sort(key=lambda x: x[1], reverse=True) for term, count in top_events: self.text += '%s: %s\n' % (term, count) self.text += '\n' def _add_match_items(self): match_items = self.match.items() match_items.sort(key=lambda x: x[0]) for key, value in match_items: if key.startswith('top_events_'): continue value_str = unicode(value) value_str.replace('\\n', '\n') if type(value) in [list, dict]: try: value_str = self._pretty_print_as_json(value) except TypeError: # Non serializable object, fallback to str pass self.text += '%s: %s\n' % (key, value_str) def _pretty_print_as_json(self, blob): try: return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, ensure_ascii=False) except UnicodeDecodeError: # This blob contains non-unicode, so lets pretend it's Latin-1 to show something return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, encoding='Latin-1', ensure_ascii=False) def __str__(self): self.text = '' if 'alert_text' not in self.rule: self.text += self.rule['name'] + '\n\n' self._add_custom_alert_text() self._ensure_new_line() if self.rule.get('alert_text_type') != 'alert_text_only': self._add_rule_text() self._ensure_new_line() if self.rule.get('top_count_keys'): self._add_top_counts() if self.rule.get('alert_text_type') != 'exclude_fields': self._add_match_items() return self.text class JiraFormattedMatchString(BasicMatchString): def _add_match_items(self): match_items = dict([(x, y) for x, y in self.match.items() if not x.startswith('top_events_')]) json_blob = self._pretty_print_as_json(match_items) preformatted_text = u'{{code:json}}{0}{{code}}'.format(json_blob) self.text += preformatted_text class Alerter(object): """ Base class for types of alerts. :param rule: The rule configuration. """ required_options = frozenset([]) def __init__(self, rule): self.rule = rule # pipeline object is created by ElastAlerter.send_alert() # and attached to each alerters used by a rule before calling alert() self.pipeline = None self.resolve_rule_references(self.rule) def resolve_rule_references(self, root): # Support referencing other top-level rule properties to avoid redundant copy/paste if type(root) == list: # Make a copy since we may be modifying the contents of the structure we're walking for i, item in enumerate(copy.copy(root)): if type(item) == dict or type(item) == list: self.resolve_rule_references(root[i]) else: root[i] = self.resolve_rule_reference(item) elif type(root) == dict: # Make a copy since we may be modifying the contents of the structure we're walking for key, value in root.copy().iteritems(): if type(value) == dict or type(value) == list: self.resolve_rule_references(root[key]) else: root[key] = self.resolve_rule_reference(value) def resolve_rule_reference(self, value): strValue = unicode(value) if strValue.startswith('$') and strValue.endswith('$') and strValue[1:-1] in self.rule: if type(value) == int: return int(self.rule[strValue[1:-1]]) else: return self.rule[strValue[1:-1]] else: return value def alert(self, match): """ Send an alert. Match is a dictionary of information about the alert. :param match: A dictionary of relevant information to the alert. """ raise NotImplementedError() def get_info(self): """ Returns a dictionary of data related to this alert. At minimum, this should contain a field type corresponding to the type of Alerter. """ return {'type': 'Unknown'} def create_title(self, matches): """ Creates custom alert title to be used, e.g. as an e-mail subject or JIRA issue summary. :param matches: A list of dictionaries of relevant information to the alert. """ if 'alert_subject' in self.rule: return self.create_custom_title(matches) return self.create_default_title(matches) def create_custom_title(self, matches): alert_subject = unicode(self.rule['alert_subject']) if 'alert_subject_args' in self.rule: alert_subject_args = self.rule['alert_subject_args'] alert_subject_values = [lookup_es_key(matches[0], arg) for arg in alert_subject_args] # Support referencing other top-level rule properties # This technically may not work if there is a top-level rule property with the same name # as an es result key, since it would have been matched in the lookup_es_key call above for i in xrange(len(alert_subject_values)): if alert_subject_values[i] is None: alert_value = self.rule.get(alert_subject_args[i]) if alert_value: alert_subject_values[i] = alert_value alert_subject_values = ['<MISSING VALUE>' if val is None else val for val in alert_subject_values] return alert_subject.format(*alert_subject_values) return alert_subject def create_alert_body(self, matches): body = self.get_aggregation_summary_text(matches) for match in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += '\n----------------------------------------\n' return body def get_aggregation_summary_text(self, matches): text = '' if 'aggregation' in self.rule and 'summary_table_fields' in self.rule: summary_table_fields = self.rule['summary_table_fields'] if not isinstance(summary_table_fields, list): summary_table_fields = [summary_table_fields] # Include a count aggregation so that we can see at a glance how many of each aggregation_key were encountered summary_table_fields_with_count = summary_table_fields + ['count'] text += "Aggregation resulted in the following data for summary_table_fields ==> {0}:\n\n".format( summary_table_fields_with_count ) text_table = Texttable() text_table.header(summary_table_fields_with_count) match_aggregation = {} # Maintain an aggregate count for each unique key encountered in the aggregation period for match in matches: key_tuple = tuple([unicode(lookup_es_key(match, key)) for key in summary_table_fields]) if key_tuple not in match_aggregation: match_aggregation[key_tuple] = 1 else: match_aggregation[key_tuple] = match_aggregation[key_tuple] + 1 for keys, count in match_aggregation.iteritems(): text_table.add_row([key for key in keys] + [count]) text += text_table.draw() + '\n\n' return unicode(text) def create_default_title(self, matches): return self.rule['name'] def get_account(self, account_file): """ Gets the username and password from an account file. :param account_file: Name of the file which contains user and password information. """ account_conf = yaml_loader(account_file) if 'user' not in account_conf or 'password' not in account_conf: raise EAException('Account file must have user and password fields') self.user = account_conf['user'] self.password = account_conf['password'] class StompAlerter(Alerter): """ The stomp alerter publishes alerts via stomp to a broker. """ required_options = frozenset(['stomp_hostname', 'stomp_hostport', 'stomp_login', 'stomp_password']) def alert(self, matches): alerts = [] qk = self.rule.get('query_key', None) fullmessage = {} for match in matches: if qk in match: elastalert_logger.info( 'Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field']))) alerts.append( '1)Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field'])) ) fullmessage['match'] = match[qk] else: elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))) alerts.append( '2)Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])) ) fullmessage['match'] = lookup_es_key(match, self.rule['timestamp_field']) elastalert_logger.info(unicode(BasicMatchString(self.rule, match))) fullmessage['alerts'] = alerts fullmessage['rule'] = self.rule['name'] fullmessage['matching'] = unicode(BasicMatchString(self.rule, match)) fullmessage['alertDate'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") fullmessage['body'] = self.create_alert_body(matches) self.stomp_hostname = self.rule.get('stomp_hostname', 'localhost') self.stomp_hostport = self.rule.get('stomp_hostport', '61613') self.stomp_login = self.rule.get('stomp_login', 'admin') self.stomp_password = self.rule.get('stomp_password', '<PASSWORD>') self.stomp_destination = self.rule.get('stomp_destination', '/queue/ALERT') conn = stomp.Connection([(self.stomp_hostname, self.stomp_hostport)]) conn.start() conn.connect(self.stomp_login, self.stomp_password) conn.send(self.stomp_destination, json.dumps(fullmessage)) conn.disconnect() def get_info(self): return {'type': 'stomp'} class DebugAlerter(Alerter): """ The debug alerter uses a Python logger (by default, alerting to terminal). """ def alert(self, matches): qk = self.rule.get('query_key', None) for match in matches: if qk in match: elastalert_logger.info( 'Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field']))) else: elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))) elastalert_logger.info(unicode(BasicMatchString(self.rule, match))) def get_info(self): return {'type': 'debug'} class EmailAlerter(Alerter): """ Sends an email alert """ required_options = frozenset(['email']) def __init__(self, *args): super(EmailAlerter, self).__init__(*args) self.smtp_host = self.rule.get('smtp_host', 'localhost') self.smtp_ssl = self.rule.get('smtp_ssl', False) self.from_addr = self.rule.get('from_addr', 'ElastAlert') self.smtp_port = self.rule.get('smtp_port') if self.rule.get('smtp_auth_file'): self.get_account(self.rule['smtp_auth_file']) self.smtp_key_file = self.rule.get('smtp_key_file') self.smtp_cert_file = self.rule.get('smtp_cert_file') # Convert email to a list if it isn't already if isinstance(self.rule['email'], basestring): self.rule['email'] = [self.rule['email']] # If there is a cc then also convert it a list if it isn't cc = self.rule.get('cc') if cc and isinstance(cc, basestring): self.rule['cc'] = [self.rule['cc']] # If there is a bcc then also convert it to a list if it isn't bcc = self.rule.get('bcc') if bcc and isinstance(bcc, basestring): self.rule['bcc'] = [self.rule['bcc']] add_suffix = self.rule.get('email_add_domain') if add_suffix and not add_suffix.startswith('@'): self.rule['email_add_domain'] = '@' + add_suffix def alert(self, matches): body = self.create_alert_body(matches) # Add JIRA ticket if it exists if self.pipeline is not None and 'jira_ticket' in self.pipeline: url = '%s/browse/%s' % (self.pipeline['jira_server'], self.pipeline['jira_ticket']) body += '\nJIRA ticket: %s' % (url) to_addr = self.rule['email'] if 'email_from_field' in self.rule: recipient = lookup_es_key(matches[0], self.rule['email_from_field']) if isinstance(recipient, basestring): if '@' in recipient: to_addr = [recipient] elif 'email_add_domain' in self.rule: to_addr = [recipient + self.rule['email_add_domain']] elif isinstance(recipient, list): to_addr = recipient if 'email_add_domain' in self.rule: to_addr = [name + self.rule['email_add_domain'] for name in to_addr] email_msg = MIMEText(body.encode('UTF-8'), _charset='UTF-8') email_msg['Subject'] = self.create_title(matches) email_msg['To'] = ', '.join(to_addr) email_msg['From'] = self.from_addr email_msg['Reply-To'] = self.rule.get('email_reply_to', email_msg['To']) email_msg['Date'] = formatdate() if self.rule.get('cc'): email_msg['CC'] = ','.join(self.rule['cc']) to_addr = to_addr + self.rule['cc'] if self.rule.get('bcc'): to_addr = to_addr + self.rule['bcc'] try: if self.smtp_ssl: if self.smtp_port: self.smtp = SMTP_SSL(self.smtp_host, self.smtp_port, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) else: self.smtp = SMTP_SSL(self.smtp_host, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) else: if self.smtp_port: self.smtp = SMTP(self.smtp_host, self.smtp_port) else: self.smtp = SMTP(self.smtp_host) self.smtp.ehlo() if self.smtp.has_extn('STARTTLS'): self.smtp.starttls(keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) if 'smtp_auth_file' in self.rule: self.smtp.login(self.user, self.password) except (SMTPException, error) as e: raise EAException("Error connecting to SMTP host: %s" % (e)) except SMTPAuthenticationError as e: raise EAException("SMTP username/password rejected: %s" % (e)) self.smtp.sendmail(self.from_addr, to_addr, email_msg.as_string()) self.smtp.close() elastalert_logger.info("Sent email to %s" % (to_addr)) def create_default_title(self, matches): subject = 'ElastAlert: %s' % (self.rule['name']) # If the rule has a query_key, add that value plus timestamp to subject if 'query_key' in self.rule: qk = matches[0].get(self.rule['query_key']) if qk: subject += ' - %s' % (qk) return subject def get_info(self): return {'type': 'email', 'recipients': self.rule['email']} class JiraAlerter(Alerter): """ Creates a Jira ticket for each alert """ required_options = frozenset(['jira_server', 'jira_account_file', 'jira_project', 'jira_issuetype']) # Maintain a static set of built-in fields that we explicitly know how to set # For anything else, we will do best-effort and try to set a string value known_field_list = [ 'jira_account_file', 'jira_assignee', 'jira_bump_after_inactivity', 'jira_bump_in_statuses', 'jira_bump_not_in_statuses', 'jira_bump_tickets', 'jira_component', 'jira_components', 'jira_description', 'jira_ignore_in_title', 'jira_issuetype', 'jira_label', 'jira_labels', 'jira_max_age', 'jira_priority', 'jira_project', 'jira_server', 'jira_watchers', ] # Some built-in jira types that can be used as custom fields require special handling # Here is a sample of one of them: # {"id":"customfield_12807","name":"My Custom Field","custom":true,"orderable":true,"navigable":true,"searchable":true, # "clauseNames":["cf[12807]","My Custom Field"],"schema":{"type":"array","items":"string", # "custom":"com.atlassian.jira.plugin.system.customfieldtypes:multiselect","customId":12807}} # There are likely others that will need to be updated on a case-by-case basis custom_string_types_with_special_handling = [ 'com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes', 'com.atlassian.jira.plugin.system.customfieldtypes:multiselect', 'com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons', ] def __init__(self, rule): super(JiraAlerter, self).__init__(rule) self.server = self.rule['jira_server'] self.get_account(self.rule['jira_account_file']) self.project = self.rule['jira_project'] self.issue_type = self.rule['jira_issuetype'] # We used to support only a single component. This allows us to maintain backwards compatibility # while also giving the user-facing API a more representative name self.components = self.rule.get('jira_components', self.rule.get('jira_component')) # We used to support only a single label. This allows us to maintain backwards compatibility # while also giving the user-facing API a more representative name self.labels = self.rule.get('jira_labels', self.rule.get('jira_label')) self.description = self.rule.get('jira_description', '') self.assignee = self.rule.get('jira_assignee') self.max_age = self.rule.get('jira_max_age', 30) self.priority = self.rule.get('jira_priority') self.bump_tickets = self.rule.get('jira_bump_tickets', False) self.bump_not_in_statuses = self.rule.get('jira_bump_not_in_statuses') self.bump_in_statuses = self.rule.get('jira_bump_in_statuses') self.bump_after_inactivity = self.rule.get('jira_bump_after_inactivity', self.max_age) self.watchers = self.rule.get('jira_watchers') if self.bump_in_statuses and self.bump_not_in_statuses: msg = 'Both jira_bump_in_statuses (%s) and jira_bump_not_in_statuses (%s) are set.' % \ (','.join(self.bump_in_statuses), ','.join(self.bump_not_in_statuses)) intersection = list(set(self.bump_in_statuses) & set(self.bump_in_statuses)) if intersection: msg = '%s Both have common statuses of (%s). As such, no tickets will ever be found.' % ( msg, ','.join(intersection)) msg += ' This should be simplified to use only one or the other.' logging.warning(msg) self.jira_args = {'project': {'key': self.project}, 'issuetype': {'name': self.issue_type}} if self.components: # Support single component or list if type(self.components) != list: self.jira_args['components'] = [{'name': self.components}] else: self.jira_args['components'] = [{'name': component} for component in self.components] if self.labels: # Support single label or list if type(self.labels) != list: self.labels = [self.labels] self.jira_args['labels'] = self.labels if self.watchers: # Support single watcher or list if type(self.watchers) != list: self.watchers = [self.watchers] if self.assignee: self.jira_args['assignee'] = {'name': self.assignee} try: self.client = JIRA(self.server, basic_auth=(self.user, self.password)) self.get_priorities() self.get_arbitrary_fields() except JIRAError as e: # JIRAError may contain HTML, pass along only first 1024 chars raise EAException("Error connecting to JIRA: %s" % (str(e)[:1024])) try: if self.priority is not None: self.jira_args['priority'] = {'id': self.priority_ids[self.priority]} except KeyError: logging.error("Priority %s not found. Valid priorities are %s" % (self.priority, self.priority_ids.keys())) def get_arbitrary_fields(self): # This API returns metadata about all the fields defined on the jira server (built-ins and custom ones) fields = self.client.fields() for jira_field, value in self.rule.iteritems(): # If we find a field that is not covered by the set that we are aware of, it means it is either: # 1. A built-in supported field in JIRA that we don't have on our radar # 2. A custom field that a JIRA admin has configured if jira_field.startswith('jira_') and jira_field not in self.known_field_list: # Remove the jira_ part. Convert underscores to spaces normalized_jira_field = jira_field[5:].replace('_', ' ').lower() # All jira fields should be found in the 'id' or the 'name' field. Therefore, try both just in case for identifier in ['name', 'id']: field = next((f for f in fields if normalized_jira_field == f[identifier].replace('_', ' ').lower()), None) if field: break if not field: # Log a warning to ElastAlert saying that we couldn't find that type? # OR raise and fail to load the alert entirely? Probably the latter... raise Exception("Could not find a definition for the jira field '{0}'".format(normalized_jira_field)) arg_name = field['id'] # Check the schema information to decide how to set the value correctly # If the schema information is not available, raise an exception since we don't know how to set it # Note this is only the case for two built-in types, id: issuekey and id: thumbnail if not ('schema' in field or 'type' in field['schema']): raise Exception("Could not determine schema information for the jira field '{0}'".format(normalized_jira_field)) arg_type = field['schema']['type'] # Handle arrays of simple types like strings or numbers if arg_type == 'array': # As a convenience, support the scenario wherein the user only provides # a single value for a multi-value field e.g. jira_labels: Only_One_Label if type(value) != list: value = [value] array_items = field['schema']['items'] # Simple string types if array_items in ['string', 'date', 'datetime']: # Special case for multi-select custom types (the JIRA metadata says that these are strings, but # in reality, they are required to be provided as an object. if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling: self.jira_args[arg_name] = [{'value': v} for v in value] else: self.jira_args[arg_name] = value elif array_items == 'number': self.jira_args[arg_name] = [int(v) for v in value] # Also attempt to handle arrays of complex types that have to be passed as objects with an identifier 'key' elif array_items == 'option': self.jira_args[arg_name] = [{'value': v} for v in value] else: # Try setting it as an object, using 'name' as the key # This may not work, as the key might actually be 'key', 'id', 'value', or something else # If it works, great! If not, it will manifest itself as an API error that will bubble up self.jira_args[arg_name] = [{'name': v} for v in value] # Handle non-array types else: # Simple string types if arg_type in ['string', 'date', 'datetime']: # Special case for custom types (the JIRA metadata says that these are strings, but # in reality, they are required to be provided as an object. if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling: self.jira_args[arg_name] = {'value': value} else: self.jira_args[arg_name] = value # Number type elif arg_type == 'number': self.jira_args[arg_name] = int(value) elif arg_type == 'option': self.jira_args[arg_name] = {'value': value} # Complex type else: self.jira_args[arg_name] = {'name': value} def get_priorities(self): """ Creates a mapping of priority index to id. """ priorities = self.client.priorities() self.priority_ids = {} for x in range(len(priorities)): self.priority_ids[x] = priorities[x].id def set_assignee(self, assignee): self.assignee = assignee if assignee: self.jira_args['assignee'] = {'name': assignee} elif 'assignee' in self.jira_args: self.jira_args.pop('assignee') def find_existing_ticket(self, matches): # Default title, get stripped search version if 'alert_subject' not in self.rule: title = self.create_default_title(matches, True) else: title = self.create_title(matches) if 'jira_ignore_in_title' in self.rule: title = title.replace(matches[0].get(self.rule['jira_ignore_in_title'], ''), '') # This is necessary for search to work. Other special characters and dashes # directly adjacent to words appear to be ok title = title.replace(' - ', ' ') title = title.replace('\\', '\\\\') date = (datetime.datetime.now() - datetime.timedelta(days=self.max_age)).strftime('%Y-%m-%d') jql = 'project=%s AND summary~"%s" and created >= "%s"' % (self.project, title, date) if self.bump_in_statuses: jql = '%s and status in (%s)' % (jql, ','.join(self.bump_in_statuses)) if self.bump_not_in_statuses: jql = '%s and status not in (%s)' % (jql, ','.join(self.bump_not_in_statuses)) try: issues = self.client.search_issues(jql) except JIRAError as e: logging.exception("Error while searching for JIRA ticket using jql '%s': %s" % (jql, e)) return None if len(issues): return issues[0] def comment_on_ticket(self, ticket, match): text = unicode(JiraFormattedMatchString(self.rule, match)) timestamp = pretty_ts(lookup_es_key(match, self.rule['timestamp_field'])) comment = "This alert was triggered again at %s\n%s" % (timestamp, text) self.client.add_comment(ticket, comment) def alert(self, matches): title = self.create_title(matches) if self.bump_tickets: ticket = self.find_existing_ticket(matches) if ticket: inactivity_datetime = ts_now() - datetime.timedelta(days=self.bump_after_inactivity) if ts_to_dt(ticket.fields.updated) >= inactivity_datetime: if self.pipeline is not None: self.pipeline['jira_ticket'] = None self.pipeline['jira_server'] = self.server return None elastalert_logger.info('Commenting on existing ticket %s' % (ticket.key)) for match in matches: try: self.comment_on_ticket(ticket, match) except JIRAError as e: logging.exception("Error while commenting on ticket %s: %s" % (ticket, e)) if self.pipeline is not None: self.pipeline['jira_ticket'] = ticket self.pipeline['jira_server'] = self.server return None self.jira_args['summary'] = title self.jira_args['description'] = self.create_alert_body(matches) try: self.issue = self.client.create_issue(**self.jira_args) # You can not add watchers on initial creation. Only as a follow-up action if self.watchers: for watcher in self.watchers: try: self.client.add_watcher(self.issue.key, watcher) except Exception as ex: # Re-raise the exception, preserve the stack-trace, and give some # context as to which watcher failed to be added raise Exception( "Exception encountered when trying to add '{0}' as a watcher. Does the user exist?\n{1}" .format( watcher, ex )), None, sys.exc_info()[2] except JIRAError as e: raise EAException("Error creating JIRA ticket using jira_args (%s): %s" % (self.jira_args, e)) elastalert_logger.info("Opened Jira ticket: %s" % (self.issue)) if self.pipeline is not None: self.pipeline['jira_ticket'] = self.issue self.pipeline['jira_server'] = self.server def create_alert_body(self, matches): body = self.description + '\n' body += self.get_aggregation_summary_text(matches) for match in matches: body += unicode(JiraFormattedMatchString(self.rule, match)) if len(matches) > 1: body += '\n----------------------------------------\n' return body def get_aggregation_summary_text(self, matches): text = super(JiraAlerter, self).get_aggregation_summary_text(matches) if text: text = u'{{noformat}}{0}{{noformat}}'.format(text) return text def create_default_title(self, matches, for_search=False): # If there is a query_key, use that in the title if 'query_key' in self.rule and lookup_es_key(matches[0], self.rule['query_key']): title = 'ElastAlert: %s matched %s' % (lookup_es_key(matches[0], self.rule['query_key']), self.rule['name']) else: title = 'ElastAlert: %s' % (self.rule['name']) if for_search: return title title += ' - %s' % (pretty_ts(matches[0][self.rule['timestamp_field']], self.rule.get('use_local_time'))) # Add count for spikes count = matches[0].get('spike_count') if count: title += ' - %s+ events' % (count) return title def get_info(self): return {'type': 'jira'} class CommandAlerter(Alerter): required_options = set(['command']) def __init__(self, *args): super(CommandAlerter, self).__init__(*args) self.last_command = [] self.shell = False if isinstance(self.rule['command'], basestring): self.shell = True if '%' in self.rule['command']: logging.warning('Warning! You could be vulnerable to shell injection!') self.rule['command'] = [self.rule['command']] self.new_style_string_format = False if 'new_style_string_format' in self.rule and self.rule['new_style_string_format']: self.new_style_string_format = True def alert(self, matches): # Format the command and arguments try: if self.new_style_string_format: command = [command_arg.format(match=matches[0]) for command_arg in self.rule['command']] else: command = [command_arg % matches[0] for command_arg in self.rule['command']] self.last_command = command except KeyError as e: raise EAException("Error formatting command: %s" % (e)) # Run command and pipe data try: subp = subprocess.Popen(command, stdin=subprocess.PIPE, shell=self.shell) if self.rule.get('pipe_match_json'): match_json = json.dumps(matches, cls=DateTimeEncoder) + '\n' stdout, stderr = subp.communicate(input=match_json) if self.rule.get("fail_on_non_zero_exit", False) and subp.wait(): raise EAException("Non-zero exit code while running command %s" % (' '.join(command))) except OSError as e: raise EAException("Error while running command %s: %s" % (' '.join(command), e)) def get_info(self): return {'type': 'command', 'command': ' '.join(self.last_command)} class SnsAlerter(Alerter): """ Send alert using AWS SNS service """ required_options = frozenset(['sns_topic_arn']) def __init__(self, *args): super(SnsAlerter, self).__init__(*args) self.sns_topic_arn = self.rule.get('sns_topic_arn', '') self.aws_access_key_id = self.rule.get('aws_access_key_id') self.aws_secret_access_key = self.rule.get('aws_secret_access_key') self.aws_region = self.rule.get('aws_region', 'us-east-1') self.profile = self.rule.get('boto_profile', None) # Deprecated self.profile = self.rule.get('aws_profile', None) def create_default_title(self, matches): subject = 'ElastAlert: %s' % (self.rule['name']) return subject def alert(self, matches): body = self.create_alert_body(matches) session = boto3.Session( aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, region_name=self.aws_region, profile_name=self.profile ) sns_client = session.client('sns') sns_client.publish( TopicArn=self.sns_topic_arn, Message=body, Subject=self.create_title(matches) ) elastalert_logger.info("Sent sns notification to %s" % (self.sns_topic_arn)) class HipChatAlerter(Alerter): """ Creates a HipChat room notification for each alert """ required_options = frozenset(['hipchat_auth_token', 'hipchat_room_id']) def __init__(self, rule): super(HipChatAlerter, self).__init__(rule) self.hipchat_msg_color = self.rule.get('hipchat_msg_color', 'red') self.hipchat_message_format = self.rule.get('hipchat_message_format', 'html') self.hipchat_auth_token = self.rule['hipchat_auth_token'] self.hipchat_room_id = self.rule['hipchat_room_id'] self.hipchat_domain = self.rule.get('hipchat_domain', 'api.hipchat.com') self.hipchat_ignore_ssl_errors = self.rule.get('hipchat_ignore_ssl_errors', False) self.hipchat_notify = self.rule.get('hipchat_notify', True) self.hipchat_from = self.rule.get('hipchat_from', '') self.url = 'https://%s/v2/room/%s/notification?auth_token=%s' % ( self.hipchat_domain, self.hipchat_room_id, self.hipchat_auth_token) self.hipchat_proxy = self.rule.get('hipchat_proxy', None) def alert(self, matches): body = self.create_alert_body(matches) # HipChat sends 400 bad request on messages longer than 10000 characters if (len(body) > 9999): body = body[:9980] + '..(truncated)' # Use appropriate line ending for text/html if self.hipchat_message_format == 'html': body = body.replace('\n', '<br />') # Post to HipChat headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = {'https': self.hipchat_proxy} if self.hipchat_proxy else None payload = { 'color': self.hipchat_msg_color, 'message': body, 'message_format': self.hipchat_message_format, 'notify': self.hipchat_notify, 'from': self.hipchat_from } try: if self.hipchat_ignore_ssl_errors: requests.packages.urllib3.disable_warnings() response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, verify=not self.hipchat_ignore_ssl_errors, proxies=proxies) warnings.resetwarnings() response.raise_for_status() except RequestException as e: raise EAException("Error posting to HipChat: %s" % e) elastalert_logger.info("Alert sent to HipChat room %s" % self.hipchat_room_id) def get_info(self): return {'type': 'hipchat', 'hipchat_room_id': self.hipchat_room_id} class MsTeamsAlerter(Alerter): """ Creates a Microsoft Teams Conversation Message for each alert """ required_options = frozenset(['ms_teams_webhook_url', 'ms_teams_alert_summary']) def __init__(self, rule): super(MsTeamsAlerter, self).__init__(rule) self.ms_teams_webhook_url = self.rule['ms_teams_webhook_url'] if isinstance(self.ms_teams_webhook_url, basestring): self.ms_teams_webhook_url = [self.ms_teams_webhook_url] self.ms_teams_proxy = self.rule.get('ms_teams_proxy', None) self.ms_teams_alert_summary = self.rule.get('ms_teams_alert_summary', 'ElastAlert Message') self.ms_teams_alert_fixed_width = self.rule.get('ms_teams_alert_fixed_width', False) self.ms_teams_theme_color = self.rule.get('ms_teams_theme_color', '') def format_body(self, body): body = body.encode('UTF-8') if self.ms_teams_alert_fixed_width: body = body.replace('`', "'") body = "```{0}```".format('```\n\n```'.join(x for x in body.split('\n'))).replace('\n``````', '') return body def alert(self, matches): body = self.create_alert_body(matches) body = self.format_body(body) # post to Teams headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = {'https': self.ms_teams_proxy} if self.ms_teams_proxy else None payload = { '@type': 'MessageCard', '@context': 'http://schema.org/extensions', 'summary': self.ms_teams_alert_summary, 'title': self.create_title(matches), 'text': body } if self.ms_teams_theme_color != '': payload['themeColor'] = self.ms_teams_theme_color for url in self.ms_teams_webhook_url: try: response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException("Error posting to ms teams: %s" % e) elastalert_logger.info("Alert sent to MS Teams") def get_info(self): return {'type': 'ms_teams', 'ms_teams_webhook_url': self.ms_teams_webhook_url} class SlackAlerter(Alerter): """ Creates a Slack room message for each alert """ required_options = frozenset(['slack_webhook_url']) def __init__(self, rule): super(SlackAlerter, self).__init__(rule) self.slack_webhook_url = self.rule['slack_webhook_url'] if isinstance(self.slack_webhook_url, basestring): self.slack_webhook_url = [self.slack_webhook_url] self.slack_proxy = self.rule.get('slack_proxy', None) self.slack_username_override = self.rule.get('slack_username_override', 'elastalert') self.slack_channel_override = self.rule.get('slack_channel_override', '') self.slack_emoji_override = self.rule.get('slack_emoji_override', ':ghost:') self.slack_icon_url_override = self.rule.get('slack_icon_url_override', '') self.slack_msg_color = self.rule.get('slack_msg_color', 'danger') self.slack_parse_override = self.rule.get('slack_parse_override', 'none') self.slack_text_string = self.rule.get('slack_text_string', '') def format_body(self, body): # https://api.slack.com/docs/formatting body = body.encode('UTF-8') body = body.replace('&', '&amp;') body = body.replace('<', '&lt;') body = body.replace('>', '&gt;') return body def alert(self, matches): body = self.create_alert_body(matches) body = self.format_body(body) # post to slack headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = {'https': self.slack_proxy} if self.slack_proxy else None payload = { 'username': self.slack_username_override, 'channel': self.slack_channel_override, 'parse': self.slack_parse_override, 'text': self.slack_text_string, 'attachments': [ { 'color': self.slack_msg_color, 'title': self.create_title(matches), 'text': body, 'mrkdwn_in': ['text', 'pretext'], 'fields': [] } ] } if self.slack_icon_url_override != '': payload['icon_url'] = self.slack_icon_url_override else: payload['icon_emoji'] = self.slack_emoji_override for url in self.slack_webhook_url: try: response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException("Error posting to slack: %s" % e) elastalert_logger.info("Alert sent to Slack") def get_info(self): return {'type': 'slack', 'slack_username_override': self.slack_username_override, 'slack_webhook_url': self.slack_webhook_url} class PagerDutyAlerter(Alerter): """ Create an incident on PagerDuty for each alert """ required_options = frozenset(['pagerduty_service_key', 'pagerduty_client_name']) def __init__(self, rule): super(PagerDutyAlerter, self).__init__(rule) self.pagerduty_service_key = self.rule['pagerduty_service_key'] self.pagerduty_client_name = self.rule['pagerduty_client_name'] self.pagerduty_incident_key = self.rule.get('pagerduty_incident_key', '') self.pagerduty_incident_key_args = self.rule.get('pagerduty_incident_key_args', None) self.pagerduty_proxy = self.rule.get('pagerduty_proxy', None) self.url = 'https://events.pagerduty.com/generic/2010-04-15/create_event.json' def alert(self, matches): body = self.create_alert_body(matches) # post to pagerduty headers = {'content-type': 'application/json'} payload = { 'service_key': self.pagerduty_service_key, 'description': self.create_title(matches), 'event_type': 'trigger', 'incident_key': self.get_incident_key(matches), 'client': self.pagerduty_client_name, 'details': { "information": body.encode('UTF-8'), }, } # set https proxy, if it was provided proxies = {'https': self.pagerduty_proxy} if self.pagerduty_proxy else None try: response = requests.post( self.url, data=json.dumps(payload, cls=DateTimeEncoder, ensure_ascii=False), headers=headers, proxies=proxies ) response.raise_for_status() except RequestException as e: raise EAException("Error posting to pagerduty: %s" % e) elastalert_logger.info("Trigger sent to PagerDuty") def get_incident_key(self, matches): if self.pagerduty_incident_key_args: incident_key_values = [lookup_es_key(matches[0], arg) for arg in self.pagerduty_incident_key_args] # Populate values with rule level properties too for i in range(len(incident_key_values)): if incident_key_values[i] is None: key_value = self.rule.get(self.pagerduty_incident_key_args[i]) if key_value: incident_key_values[i] = key_value incident_key_values = ['<MISSING VALUE>' if val is None else val for val in incident_key_values] return self.pagerduty_incident_key.format(*incident_key_values) else: return self.pagerduty_incident_key def get_info(self): return {'type': 'pagerduty', 'pagerduty_client_name': self.pagerduty_client_name} class ExotelAlerter(Alerter): required_options = frozenset(['exotel_account_sid', 'exotel_auth_token', 'exotel_to_number', 'exotel_from_number']) def __init__(self, rule): super(ExotelAlerter, self).__init__(rule) self.exotel_account_sid = self.rule['exotel_account_sid'] self.exotel_auth_token = self.rule['exotel_auth_token'] self.exotel_to_number = self.rule['exotel_to_number'] self.exotel_from_number = self.rule['exotel_from_number'] self.sms_body = self.rule.get('exotel_message_body', '') def alert(self, matches): client = Exotel(self.exotel_account_sid, self.exotel_auth_token) try: message_body = self.rule['name'] + self.sms_body response = client.sms(self.rule['exotel_from_number'], self.rule['exotel_to_number'], message_body) if response != 200: raise EAException("Error posting to Exotel, response code is %s" % response) except: raise EAException("Error posting to Exotel"), None, sys.exc_info()[2] elastalert_logger.info("Trigger sent to Exotel") def get_info(self): return {'type': 'exotel', 'exotel_account': self.exotel_account_sid} class TwilioAlerter(Alerter): required_options = frozenset(['twilio_account_sid', 'twilio_auth_token', 'twilio_to_number', 'twilio_from_number']) def __init__(self, rule): super(TwilioAlerter, self).__init__(rule) self.twilio_account_sid = self.rule['twilio_account_sid'] self.twilio_auth_token = self.rule['twilio_auth_token'] self.twilio_to_number = self.rule['twilio_to_number'] self.twilio_from_number = self.rule['twilio_from_number'] def alert(self, matches): client = TwilioClient(self.twilio_account_sid, self.twilio_auth_token) try: client.messages.create(body=self.rule['name'], to=self.twilio_to_number, from_=self.twilio_from_number) except TwilioRestException as e: raise EAException("Error posting to twilio: %s" % e) elastalert_logger.info("Trigger sent to Twilio") def get_info(self): return {'type': 'twilio', 'twilio_client_name': self.twilio_from_number} class VictorOpsAlerter(Alerter): """ Creates a VictorOps Incident for each alert """ required_options = frozenset(['victorops_api_key', 'victorops_routing_key', 'victorops_message_type']) def __init__(self, rule): super(VictorOpsAlerter, self).__init__(rule) self.victorops_api_key = self.rule['victorops_api_key'] self.victorops_routing_key = self.rule['victorops_routing_key'] self.victorops_message_type = self.rule['victorops_message_type'] self.victorops_entity_display_name = self.rule.get('victorops_entity_display_name', 'no entity display name') self.url = 'https://alert.victorops.com/integrations/generic/20131114/alert/%s/%s' % ( self.victorops_api_key, self.victorops_routing_key) self.victorops_proxy = self.rule.get('victorops_proxy', None) def alert(self, matches): body = self.create_alert_body(matches) # post to victorops headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = {'https': self.victorops_proxy} if self.victorops_proxy else None payload = { "message_type": self.victorops_message_type, "entity_display_name": self.victorops_entity_display_name, "monitoring_tool": "ElastAlert", "state_message": body } try: response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException("Error posting to VictorOps: %s" % e) elastalert_logger.info("Trigger sent to VictorOps") def get_info(self): return {'type': 'victorops', 'victorops_routing_key': self.victorops_routing_key} class TelegramAlerter(Alerter): """ Send a Telegram message via bot api for each alert """ required_options = frozenset(['telegram_bot_token', 'telegram_room_id']) def __init__(self, rule): super(TelegramAlerter, self).__init__(rule) self.telegram_bot_token = self.rule['telegram_bot_token'] self.telegram_room_id = self.rule['telegram_room_id'] self.telegram_api_url = self.rule.get('telegram_api_url', 'api.telegram.org') self.url = 'https://%s/bot%s/%s' % (self.telegram_api_url, self.telegram_bot_token, "sendMessage") self.telegram_proxy = self.rule.get('telegram_proxy', None) def alert(self, matches): body = u'⚠ *%s* ⚠ ```\n' % (self.create_title(matches)) for match in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += '\n----------------------------------------\n' body += u' ```' headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = {'https': self.telegram_proxy} if self.telegram_proxy else None payload = { 'chat_id': self.telegram_room_id, 'text': body, 'parse_mode': 'markdown', 'disable_web_page_preview': True } try: response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) warnings.resetwarnings() response.raise_for_status() except RequestException as e: raise EAException("Error posting to Telegram: %s" % e) elastalert_logger.info( "Alert sent to Telegram room %s" % self.telegram_room_id) def get_info(self): return {'type': 'telegram', 'telegram_room_id': self.telegram_room_id} class GitterAlerter(Alerter): """ Creates a Gitter activity message for each alert """ required_options = frozenset(['gitter_webhook_url']) def __init__(self, rule): super(GitterAlerter, self).__init__(rule) self.gitter_webhook_url = self.rule['gitter_webhook_url'] self.gitter_proxy = self.rule.get('gitter_proxy', None) self.gitter_msg_level = self.rule.get('gitter_msg_level', 'error') def alert(self, matches): body = self.create_alert_body(matches) # post to Gitter headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = {'https': self.gitter_proxy} if self.gitter_proxy else None payload = { 'message': body, 'level': self.gitter_msg_level } try: response = requests.post(self.gitter_webhook_url, json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException("Error posting to Gitter: %s" % e) elastalert_logger.info("Alert sent to Gitter") def get_info(self): return {'type': 'gitter', 'gitter_webhook_url': self.gitter_webhook_url} class ServiceNowAlerter(Alerter): """ Creates a ServiceNow alert """ required_options = set([ 'username', 'password', 'servicenow_rest_url', 'short_description', 'comments', 'assignment_group', 'category', 'subcategory', 'cmdb_ci', 'caller_id' ]) def __init__(self, rule): super(ServiceNowAlerter, self).__init__(rule) self.servicenow_rest_url = self.rule['servicenow_rest_url'] self.servicenow_proxy = self.rule.get('servicenow_proxy', None) def alert(self, matches): for match in matches: # Parse everything into description. description = str(BasicMatchString(self.rule, match)) # Set proper headers headers = { "Content-Type": "application/json", "Accept": "application/json;charset=utf-8" } proxies = {'https': self.servicenow_proxy} if self.servicenow_proxy else None payload = { "description": description, "short_description": self.rule['short_description'], "comments": self.rule['comments'], "assignment_group": self.rule['assignment_group'], "category": self.rule['category'], "subcategory": self.rule['subcategory'], "cmdb_ci": self.rule['cmdb_ci'], "caller_id": self.rule["caller_id"] } try: response = requests.post( self.servicenow_rest_url, auth=(self.rule['username'], self.rule['password']), headers=headers, data=json.dumps(payload, cls=DateTimeEncoder), proxies=proxies ) response.raise_for_status() except RequestException as e: raise EAException("Error posting to ServiceNow: %s" % e) elastalert_logger.info("Alert sent to ServiceNow") def get_info(self): return {'type': 'ServiceNow', 'self.servicenow_rest_url': self.servicenow_rest_url} class HTTPPostAlerter(Alerter): """ Requested elasticsearch indices are sent by HTTP POST. Encoded with JSON. """ def __init__(self, rule): super(HTTPPostAlerter, self).__init__(rule) post_url = self.rule.get('http_post_url') if isinstance(post_url, basestring): post_url = [post_url] self.post_url = post_url self.post_proxy = self.rule.get('http_post_proxy') self.post_payload = self.rule.get('http_post_payload', {}) self.post_static_payload = self.rule.get('http_post_static_payload', {}) self.post_all_values = self.rule.get('http_post_all_values', not self.post_payload) def alert(self, matches): """ Each match will trigger a POST to the specified endpoint(s). """ for match in matches: payload = match if self.post_all_values else {} payload.update(self.post_static_payload) for post_key, es_key in self.post_payload.items(): payload[post_key] = lookup_es_key(match, es_key) headers = { "Content-Type": "application/json", "Accept": "application/json;charset=utf-8" } proxies = {'https': self.post_proxy} if self.post_proxy else None for url in self.post_url: try: response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException("Error posting HTTP Post alert: %s" % e) elastalert_logger.info("HTTP Post alert sent.") def get_info(self): return {'type': 'http_post', 'http_post_webhook_url': self.post_url}
2.1875
2
tests/unit/python/fledge/services/core/scheduler/test_scheduler.py
DDC-NDRS/fledge-iot_fledge
69
1473
<filename>tests/unit/python/fledge/services/core/scheduler/test_scheduler.py<gh_stars>10-100 # -*- coding: utf-8 -*- # FLEDGE_BEGIN # See: http://fledge-iot.readthedocs.io/ # FLEDGE_END import asyncio import datetime import uuid import time import json from unittest.mock import MagicMock, call import sys import copy import pytest from fledge.services.core.scheduler.scheduler import Scheduler, AuditLogger, ConfigurationManager from fledge.services.core.scheduler.entities import * from fledge.services.core.scheduler.exceptions import * from fledge.common.storage_client.storage_client import StorageClientAsync __author__ = "<NAME>" __copyright__ = "Copyright (c) 2017 OSIsoft, LLC" __license__ = "Apache 2.0" __version__ = "${VERSION}" async def mock_task(): return "" async def mock_process(): m = MagicMock() m.pid = 9999 m.terminate = lambda: True return m @pytest.allure.feature("unit") @pytest.allure.story("scheduler") class TestScheduler: async def scheduler_fixture(self, mocker): # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: _rv = await mock_process() else: _rv = asyncio.ensure_future(mock_process()) scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_paused', False) mocker.patch.object(scheduler, '_process_scripts', return_value="North Readings to PI") mocker.patch.object(scheduler, '_wait_for_task_completion', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_terminate_child_processes') mocker.patch.object(asyncio, 'create_subprocess_exec', return_value=_rv) await scheduler._get_schedules() schedule = scheduler._ScheduleRow( id=uuid.UUID("2b614d26-760f-11e7-b5a5-be2e44b06b34"), process_name="North Readings to PI", name="OMF to PI north", type=Schedule.Type.INTERVAL, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, time=None, day=None, exclusive=True, enabled=True) log_exception = mocker.patch.object(scheduler._logger, "exception") log_error = mocker.patch.object(scheduler._logger, "error") log_debug = mocker.patch.object(scheduler._logger, "debug") log_info = mocker.patch.object(scheduler._logger, "info") return scheduler, schedule, log_info, log_exception, log_error, log_debug @pytest.mark.asyncio async def test__resume_check_schedules(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) # WHEN # Check IF part mocker.patch.object(scheduler, '_scheduler_loop_sleep_task', asyncio.Task(asyncio.sleep(5))) scheduler._resume_check_schedules() # THEN assert scheduler._check_processes_pending is False # WHEN # Check ELSE part mocker.patch.object(scheduler, '_scheduler_loop_sleep_task', None) scheduler._resume_check_schedules() # THEN assert scheduler._check_processes_pending is True @pytest.mark.asyncio async def test__wait_for_task_completion(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, "info") mock_schedules = dict() mock_schedule = scheduler._ScheduleRow( id=uuid.UUID("2b614d26-760f-11e7-b5a5-be2e44b06b34"), process_name="North Readings to PI", name="OMF to PI north", type=Schedule.Type.INTERVAL, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, time=None, day=None, exclusive=True, enabled=True) mock_schedules[mock_schedule.id] = mock_schedule mock_task_process = scheduler._TaskProcess() mock_task_processes = dict() mock_task_process.process = await asyncio.create_subprocess_exec("sleep", ".1") mock_task_process.schedule = mock_schedule mock_task_id = uuid.uuid4() mock_task_process.task_id = mock_task_id mock_task_processes[mock_task_process.task_id] = mock_task_process mock_schedule_executions = dict() mock_schedule_execution = scheduler._ScheduleExecution() mock_schedule_executions[mock_schedule.id] = mock_schedule_execution mock_schedule_executions[mock_schedule.id].task_processes[mock_task_id] = mock_task_process mocker.patch.object(scheduler, '_resume_check_schedules') mocker.patch.object(scheduler, '_schedule_next_task') mocker.patch.multiple(scheduler, _schedules=mock_schedules, _task_processes=mock_task_processes, _schedule_executions=mock_schedule_executions) mocker.patch.object(scheduler, '_process_scripts', return_value="North Readings to PI") # WHEN await scheduler._wait_for_task_completion(mock_task_process) # THEN # After task completion, sleep above, no task processes should be left pending assert 0 == len(scheduler._task_processes) assert 0 == len(scheduler._schedule_executions[mock_schedule.id].task_processes) args, kwargs = log_info.call_args_list[0] assert 'OMF to PI north' in args assert 'North Readings to PI' in args @pytest.mark.asyncio async def test__start_task(self, mocker): # TODO: Mandatory - Add negative tests for full code coverage # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, "info") mocker.patch.object(scheduler, '_schedule_first_task') await scheduler._get_schedules() schedule = scheduler._ScheduleRow( id=uuid.UUID("2b614d26-760f-11e7-b5a5-be2e44b06b34"), process_name="North Readings to PI", name="OMF to PI north", type=Schedule.Type.INTERVAL, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, time=None, day=None, exclusive=True, enabled=True) mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules') # Assert that there is no task queued for mock_schedule with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[schedule.id] is True # Now queue task and assert that the task has been queued await scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: _rv = await mock_process() else: _rv = asyncio.ensure_future(mock_process()) mocker.patch.object(asyncio, 'create_subprocess_exec', return_value=_rv) mocker.patch.object(asyncio, 'ensure_future', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_resume_check_schedules') mocker.patch.object(scheduler, '_process_scripts', return_value="North Readings to PI") mocker.patch.object(scheduler, '_wait_for_task_completion') # Confirm that task has not started yet assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes) # WHEN await scheduler._start_task(schedule) # THEN # Confirm that task has started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) assert 1 == log_info.call_count # assert call("Queued schedule '%s' for execution", 'OMF to PI north') == log_info.call_args_list[0] args, kwargs = log_info.call_args_list[0] assert "Process started: Schedule '%s' process '%s' task %s pid %s, %s running tasks\n%s" in args assert 'OMF to PI north' in args assert 'North Readings to PI' in args @pytest.mark.asyncio async def test_purge_tasks(self, mocker): # TODO: Mandatory - Add negative tests for full code coverage # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.multiple(scheduler, _ready=True, _paused=False) mocker.patch.object(scheduler, '_max_completed_task_age', datetime.datetime.now()) # WHEN await scheduler.purge_tasks() # THEN assert scheduler._purge_tasks_task is None assert scheduler._last_task_purge_time is not None @pytest.mark.asyncio async def test__check_purge_tasks(self, mocker): # TODO: Mandatory - Add negative tests for full code coverage # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.multiple(scheduler, _purge_tasks_task=None, _last_task_purge_time=None) mocker.patch.object(scheduler, 'purge_tasks', return_value=asyncio.ensure_future(mock_task())) # WHEN scheduler._check_purge_tasks() # THEN assert scheduler._purge_tasks_task is not None @pytest.mark.asyncio async def test__check_schedules(self, mocker): # TODO: Mandatory - Add negative tests for full code coverage # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, "info") current_time = time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time) await scheduler._get_schedules() mocker.patch.object(scheduler, '_start_task', return_value=asyncio.ensure_future(mock_task())) # WHEN earliest_start_time = await scheduler._check_schedules() # THEN assert earliest_start_time is not None assert 3 == log_info.call_count args0, kwargs0 = log_info.call_args_list[0] args1, kwargs1 = log_info.call_args_list[1] args2, kwargs2 = log_info.call_args_list[2] assert 'stats collection' in args0 assert 'COAP listener south' in args1 assert 'OMF to PI north' in args2 @pytest.mark.asyncio @pytest.mark.skip("_scheduler_loop() not suitable for unit testing. Will be tested during System tests.") async def test__scheduler_loop(self, mocker): pass @pytest.mark.asyncio async def test__schedule_next_timed_task(self, mocker): # TODO: Mandatory - Add negative tests for full code coverage # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, "info") current_time = time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time) await scheduler._get_schedules() sch_id = uuid.UUID("2176eb68-7303-11e7-8cf7-a6006ad3dba0") # stat collector sch = scheduler._schedules[sch_id] sch_execution = scheduler._schedule_executions[sch_id] time_before_call = sch_execution.next_start_time # WHEN next_dt = datetime.datetime.fromtimestamp(sch_execution.next_start_time) next_dt += datetime.timedelta(seconds=sch.repeat_seconds) scheduler._schedule_next_timed_task(sch, sch_execution, next_dt) time_after_call = sch_execution.next_start_time # THEN assert time_after_call > time_before_call assert 3 == log_info.call_count args0, kwargs0 = log_info.call_args_list[0] args1, kwargs1 = log_info.call_args_list[1] args2, kwargs2 = log_info.call_args_list[2] assert 'stats collection' in args0 assert 'COAP listener south' in args1 assert 'OMF to PI north' in args2 @pytest.mark.asyncio async def test__schedule_next_task(self, mocker): # TODO: Mandatory - Add negative tests for full code coverage # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, "info") current_time = time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time-3600) await scheduler._get_schedules() sch_id = uuid.UUID("2176eb68-7303-11e7-8cf7-a6006ad3dba0") # stat collector sch = scheduler._schedules[sch_id] sch_execution = scheduler._schedule_executions[sch_id] time_before_call = sch_execution.next_start_time # WHEN scheduler._schedule_next_task(sch) time_after_call = sch_execution.next_start_time # THEN assert time_after_call > time_before_call assert 4 == log_info.call_count args0, kwargs0 = log_info.call_args_list[0] args1, kwargs1 = log_info.call_args_list[1] args2, kwargs2 = log_info.call_args_list[2] args3, kwargs3 = log_info.call_args_list[3] assert 'stats collection' in args0 assert 'COAP listener south' in args1 assert 'OMF to PI north' in args2 # As part of scheduler._get_schedules(), scheduler._schedule_first_task() also gets executed, hence # "stat collector" appears twice in this list. assert 'stats collection' in args3 @pytest.mark.asyncio async def test__schedule_first_task(self, mocker): # TODO: Mandatory - Add negative tests for full code coverage # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, "info") current_time = time.time() curr_time = datetime.datetime.fromtimestamp(current_time) mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time) await scheduler._get_schedules() sch_id = uuid.UUID("2176eb68-7303-11e7-8cf7-a6006ad3dba0") # stat collector sch = scheduler._schedules[sch_id] sch_execution = scheduler._schedule_executions[sch_id] # WHEN scheduler._schedule_first_task(sch, current_time) time_after_call = sch_execution.next_start_time # THEN assert time_after_call > time.mktime(curr_time.timetuple()) assert 4 == log_info.call_count args0, kwargs0 = log_info.call_args_list[0] args1, kwargs1 = log_info.call_args_list[1] args2, kwargs2 = log_info.call_args_list[2] args3, kwargs3 = log_info.call_args_list[3] assert 'stats collection' in args0 assert 'COAP listener south' in args1 assert 'OMF to PI north' in args2 # As part of scheduler._get_schedules(), scheduler._schedule_first_task() also gets executed, hence # "stat collector" appears twice in this list. assert 'stats collection' in args3 @pytest.mark.asyncio async def test__get_process_scripts(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) # WHEN await scheduler._get_process_scripts() # THEN assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) @pytest.mark.asyncio async def test__get_process_scripts_exception(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_debug = mocker.patch.object(scheduler._logger, "debug", side_effect=Exception()) log_exception = mocker.patch.object(scheduler._logger, "exception") # WHEN # THEN with pytest.raises(Exception): await scheduler._get_process_scripts() log_args = 'Query failed: %s', 'scheduled_processes' log_exception.assert_called_once_with(*log_args) @pytest.mark.asyncio @pytest.mark.parametrize("test_interval, is_exception", [ ('"Blah" 0 days', True), ('12:30:11', False), ('0 day 12:30:11', False), ('1 day 12:40:11', False), ('2 days', True), ('2 days 00:00:59', False), ('00:25:61', True) ]) async def test__get_schedules(self, test_interval, is_exception, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') log_exception = mocker.patch.object(scheduler._logger, "exception") new_schedules = copy.deepcopy(MockStorageAsync.schedules) new_schedules[5]['schedule_interval'] = test_interval mocker.patch.object(MockStorageAsync, 'schedules', new_schedules) # WHEN # THEN if is_exception is True: with pytest.raises(Exception): await scheduler._get_schedules() assert 1 == log_exception.call_count else: await scheduler._get_schedules() assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) @pytest.mark.asyncio async def test__get_schedules_exception(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_debug = mocker.patch.object(scheduler._logger, "debug", side_effect=Exception()) log_exception = mocker.patch.object(scheduler._logger, "exception") mocker.patch.object(scheduler, '_schedule_first_task', side_effect=Exception()) # WHEN # THEN with pytest.raises(Exception): await scheduler._get_schedules() log_args = 'Query failed: %s', 'schedules' log_exception.assert_called_once_with(*log_args) @pytest.mark.asyncio async def test__read_storage(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') # WHEN await scheduler._read_storage() # THEN assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) @pytest.mark.asyncio @pytest.mark.skip("_mark_tasks_interrupted() not implemented in main Scheduler class.") async def test__mark_tasks_interrupted(self, mocker): pass @pytest.mark.asyncio async def test__read_config(self, mocker): async def get_cat(): return { "max_running_tasks": { "description": "The maximum number of tasks that can be running at any given time", "type": "integer", "default": str(Scheduler._DEFAULT_MAX_RUNNING_TASKS), "value": str(Scheduler._DEFAULT_MAX_RUNNING_TASKS) }, "max_completed_task_age_days": { "description": "The maximum age, in days (based on the start time), for a rows " "in the tasks table that do not have a status of running", "type": "integer", "default": str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS), "value": str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS) }, } # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: _rv = await get_cat() else: _rv = asyncio.ensure_future(get_cat()) # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) cr_cat = mocker.patch.object(ConfigurationManager, "create_category", return_value=asyncio.ensure_future(mock_task())) get_cat = mocker.patch.object(ConfigurationManager, "get_category_all_items", return_value=_rv) # WHEN assert scheduler._max_running_tasks is None assert scheduler._max_completed_task_age is None await scheduler._read_config() # THEN assert 1 == cr_cat.call_count assert 1 == get_cat.call_count assert scheduler._max_running_tasks is not None assert scheduler._max_completed_task_age is not None @pytest.mark.asyncio async def test_start(self, mocker): # TODO: Mandatory - Add negative tests for full code coverage # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_debug = mocker.patch.object(scheduler._logger, "debug") log_info = mocker.patch.object(scheduler._logger, "info") current_time = time.time() mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler, '_scheduler_loop', return_value=asyncio.ensure_future(mock_task())) mocker.patch.multiple(scheduler, _core_management_port=9999, _core_management_host="0.0.0.0", current_time=current_time - 3600) # TODO: Remove after implementation of above test test__read_config() mocker.patch.object(scheduler, '_read_config', return_value=asyncio.ensure_future(mock_task())) assert scheduler._ready is False # WHEN await scheduler.start() # THEN assert scheduler._ready is True assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) calls = [call('Starting'), call('Starting Scheduler: Management port received is %d', 9999)] log_info.assert_has_calls(calls, any_order=True) calls = [call('Database command: %s', 'scheduled_processes'), call('Database command: %s', 'schedules')] log_debug.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async def test_stop(self, mocker): # TODO: Mandatory - Add negative tests for full code coverage # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, "info") log_exception = mocker.patch.object(scheduler._logger, "exception") mocker.patch.object(scheduler, '_scheduler_loop', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_resume_check_schedules', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_purge_tasks_task', return_value=asyncio.ensure_future(asyncio.sleep(.1))) mocker.patch.object(scheduler, '_scheduler_loop_task', return_value=asyncio.ensure_future(asyncio.sleep(.1))) current_time = time.time() mocker.patch.multiple(scheduler, _core_management_port=9999, _core_management_host="0.0.0.0", _start_time=current_time - 3600, _paused=False, _task_processes={}) # WHEN retval = await scheduler.stop() # THEN assert retval is True assert scheduler._schedule_executions is None assert scheduler._task_processes is None assert scheduler._schedules is None assert scheduler._process_scripts is None assert scheduler._ready is False assert scheduler._paused is False assert scheduler._start_time is None calls = [call('Processing stop request'), call('Stopped')] log_info.assert_has_calls(calls, any_order=True) # TODO: Find why these exceptions are being raised despite mocking _purge_tasks_task, _scheduler_loop_task calls = [call('An exception was raised by Scheduler._purge_tasks %s', "object MagicMock can't be used in 'await' expression"), call('An exception was raised by Scheduler._scheduler_loop %s', "object MagicMock can't be used in 'await' expression")] log_exception.assert_has_calls(calls) @pytest.mark.asyncio async def test_get_scheduled_processes(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) await scheduler._get_process_scripts() mocker.patch.object(scheduler, '_ready', True) # WHEN processes = await scheduler.get_scheduled_processes() # THEN assert len(scheduler._storage_async.scheduled_processes) == len(processes) @pytest.mark.asyncio async def test_schedule_row_to_schedule(self, mocker): # GIVEN scheduler = Scheduler() schedule_id = uuid.uuid4() schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.INTERVAL, day=0, time=0, repeat=10, repeat_seconds=10, exclusive=False, enabled=True, process_name='TestProcess') # WHEN schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # THEN assert isinstance(schedule, Schedule) assert schedule.schedule_id == schedule_row[0] assert schedule.name == schedule_row[1] assert schedule.schedule_type == schedule_row[2] assert schedule_row[3] is 0 # 0 for Interval Schedule assert schedule_row[4] is 0 # 0 for Interval Schedule assert schedule.repeat == schedule_row[5] assert schedule.exclusive == schedule_row[7] assert schedule.enabled == schedule_row[8] assert schedule.process_name == schedule_row[9] @pytest.mark.asyncio async def test_get_schedules(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) # WHEN schedules = await scheduler.get_schedules() # THEN assert len(scheduler._storage_async.schedules) == len(schedules) @pytest.mark.asyncio async def test_get_schedule(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) schedule_id = uuid.UUID("cea17db8-6ccc-11e7-907b-a6006ad3dba0") # purge schedule # WHEN schedule = await scheduler.get_schedule(schedule_id) # THEN assert isinstance(schedule, Schedule) assert schedule.schedule_id == schedule_id assert schedule.name == "purge" assert schedule.schedule_type == Schedule.Type.MANUAL assert schedule.repeat == datetime.timedelta(0, 3600) assert schedule.exclusive is True assert schedule.enabled is True assert schedule.process_name == "purge" @pytest.mark.asyncio async def test_get_schedule_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) schedule_id = uuid.uuid4() # WHEN # THEN with pytest.raises(ScheduleNotFoundError): schedule = await scheduler.get_schedule(schedule_id) @pytest.mark.asyncio async def test_save_schedule_new(self, mocker): @asyncio.coroutine def mock_coro(): return "" # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) first_task = mocker.patch.object(scheduler, '_schedule_first_task') resume_sch = mocker.patch.object(scheduler, '_resume_check_schedules') log_info = mocker.patch.object(scheduler._logger, "info") enable_schedule = mocker.patch.object(scheduler, "enable_schedule", return_value=mock_coro()) disable_schedule = mocker.patch.object(scheduler, "disable_schedule", return_value=mock_coro()) schedule_id = uuid.uuid4() schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.INTERVAL, day=0, time=0, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # WHEN await scheduler.save_schedule(schedule) # THEN assert len(scheduler._storage_async.schedules) + 1 == len(scheduler._schedules) assert 1 == audit_logger.call_count calls =[call('SCHAD', {'schedule': {'name': 'Test Schedule', 'processName': 'TestProcess', 'type': Schedule.Type.INTERVAL, 'repeat': 30.0, 'enabled': True, 'exclusive': False}})] audit_logger.assert_has_calls(calls, any_order=True) assert 1 == first_task.call_count assert 1 == resume_sch.call_count assert 0 == enable_schedule.call_count assert 0 == disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_new_with_enable_modified(self, mocker): @asyncio.coroutine def mock_coro(): return "" # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) first_task = mocker.patch.object(scheduler, '_schedule_first_task') resume_sch = mocker.patch.object(scheduler, '_resume_check_schedules') log_info = mocker.patch.object(scheduler._logger, "info") enable_schedule = mocker.patch.object(scheduler, "enable_schedule", return_value=mock_coro()) disable_schedule = mocker.patch.object(scheduler, "disable_schedule", return_value=mock_coro()) schedule_id = uuid.uuid4() schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.INTERVAL, day=0, time=0, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # WHEN await scheduler.save_schedule(schedule, is_enabled_modified=True) # THEN assert len(scheduler._storage_async.schedules) + 1 == len(scheduler._schedules) assert 1 == audit_logger.call_count calls =[call('SCHAD', {'schedule': {'name': 'Test Schedule', 'processName': 'TestProcess', 'type': Schedule.Type.INTERVAL, 'repeat': 30.0, 'enabled': True, 'exclusive': False}})] audit_logger.assert_has_calls(calls, any_order=True) assert 1 == first_task.call_count assert 1 == resume_sch.call_count assert 1 == enable_schedule.call_count assert 0 == disable_schedule.call_count # WHEN await scheduler.save_schedule(schedule, is_enabled_modified=False) # THEN assert 1 == disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_update(self, mocker): @asyncio.coroutine def mock_coro(): return "" # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) first_task = mocker.patch.object(scheduler, '_schedule_first_task') resume_sch = mocker.patch.object(scheduler, '_resume_check_schedules') log_info = mocker.patch.object(scheduler._logger, "info") schedule_id = uuid.UUID("2b614d26-760f-11e7-b5a5-be2e44b06b34") # OMF to PI North schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.TIMED, day=1, time=datetime.time(), repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) enable_schedule = mocker.patch.object(scheduler, "enable_schedule", return_value=mock_coro()) disable_schedule = mocker.patch.object(scheduler, "disable_schedule", return_value=mock_coro()) # WHEN await scheduler.save_schedule(schedule) # THEN assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert 1 == audit_logger.call_count calls = [call('SCHCH', {'schedule': {'name': 'Test Schedule', 'enabled': True, 'repeat': 30.0, 'exclusive': False, 'day': 1, 'time': '0:0:0', 'processName': 'TestProcess', 'type': Schedule.Type.TIMED}})] audit_logger.assert_has_calls(calls, any_order=True) assert 1 == first_task.call_count assert 1 == resume_sch.call_count assert 0 == enable_schedule.call_count assert 0 == disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_update_with_enable_modified(self, mocker): @asyncio.coroutine def mock_coro(): return "" # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) first_task = mocker.patch.object(scheduler, '_schedule_first_task') resume_sch = mocker.patch.object(scheduler, '_resume_check_schedules') log_info = mocker.patch.object(scheduler._logger, "info") schedule_id = uuid.UUID("2b614d26-760f-11e7-b5a5-be2e44b06b34") # OMF to PI North schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.TIMED, day=1, time=datetime.time(), repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) enable_schedule = mocker.patch.object(scheduler, "enable_schedule", return_value=mock_coro()) disable_schedule = mocker.patch.object(scheduler, "disable_schedule", return_value=mock_coro()) # WHEN await scheduler.save_schedule(schedule, is_enabled_modified=True) # THEN assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert 1 == audit_logger.call_count calls = [call('SCHCH', {'schedule': {'name': 'Test Schedule', 'enabled': True, 'repeat': 30.0, 'exclusive': False, 'day': 1, 'time': '0:0:0', 'processName': 'TestProcess', 'type': Schedule.Type.TIMED}})] audit_logger.assert_has_calls(calls, any_order=True) assert 1 == first_task.call_count assert 1 == resume_sch.call_count assert 1 == enable_schedule.call_count assert 0 == disable_schedule.call_count # WHEN await scheduler.save_schedule(schedule, is_enabled_modified=False) # THEN assert 1 == disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) schedule_id = uuid.uuid4() schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.TIMED, day=0, time=0, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') # WHEN # THEN with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.name = None await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith("name can not be empty") with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.name = "" await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith("name can not be empty") with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.repeat = 1234 await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('repeat must be of type datetime.timedelta') with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.exclusive = None await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('exclusive can not be None') with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.time = 1234 await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('time must be of type datetime.time') with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.day = 0 temp_schedule.time = datetime.time() await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('day must be between 1 and 7') @pytest.mark.asyncio @pytest.mark.skip(reason="To be done") async def test_remove_service_from_task_processes(self): pass @pytest.mark.asyncio async def test_disable_schedule(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_task_processes') audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) log_info = mocker.patch.object(scheduler._logger, "info") sch_id = uuid.UUID("2b614d26-760f-11e7-b5a5-be2e44b06b34") # OMF to PI North # WHEN status, message = await scheduler.disable_schedule(sch_id) # THEN assert status is True assert message == "Schedule successfully disabled" assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is False assert 2 == log_info.call_count calls = [call('No Task running for Schedule %s', '2b614d26-760f-11e7-b5a5-be2e44b06b34'), call("Disabled Schedule '%s/%s' process '%s'\n", 'OMF to PI north', '2b614d26-760f-11e7-b5a5-be2e44b06b34', 'North Readings to PI')] log_info.assert_has_calls(calls) assert 1 == audit_logger.call_count calls = [call('SCHCH', {'schedule': {'name': 'OMF to PI north', 'repeat': 30.0, 'enabled': False, 'type': Schedule.Type.INTERVAL, 'exclusive': True, 'processName': 'North Readings to PI'}})] audit_logger.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async def test_disable_schedule_wrong_schedule_id(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_task_processes') log_exception = mocker.patch.object(scheduler._logger, "exception") random_schedule_id = uuid.uuid4() # WHEN await scheduler.disable_schedule(random_schedule_id) # THEN log_params = "No such Schedule %s", str(random_schedule_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def test_disable_schedule_already_disabled(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_task_processes') log_info = mocker.patch.object(scheduler._logger, "info") sch_id = uuid.UUID("d1631422-9ec6-11e7-abc4-cec278b6b50a") # backup # WHEN status, message = await scheduler.disable_schedule(sch_id) # THEN assert status is True assert message == "Schedule {} already disabled".format(str(sch_id)) assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is False log_params = "Schedule %s already disabled", str(sch_id) log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def test_enable_schedule(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) sch_id = uuid.UUID("d1631422-9ec6-11e7-abc4-cec278b6b50a") # backup queue_task = mocker.patch.object(scheduler, 'queue_task', return_value=asyncio.ensure_future(mock_task())) audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) # WHEN status, message = await scheduler.enable_schedule(sch_id) # THEN assert status is True assert message == "Schedule successfully enabled" assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is True assert 1 == queue_task.call_count calls = [call("Enabled Schedule '%s/%s' process '%s'\n", 'backup hourly', 'd1631422-9ec6-11e7-abc4-cec278b6b50a', 'backup')] log_info.assert_has_calls(calls, any_order=True) assert 1 == audit_logger.call_count calls = [call('SCHCH', {'schedule': {'name': 'backup hourly', 'type': Schedule.Type.INTERVAL, 'processName': 'backup', 'exclusive': True, 'repeat': 3600.0, 'enabled': True}})] audit_logger.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async def test_enable_schedule_already_enabled(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) sch_id = uuid.UUID("ada12840-68d3-11e7-907b-a6006ad3dba0") #Coap mocker.patch.object(scheduler, 'queue_task', return_value=asyncio.ensure_future(mock_task())) # WHEN status, message = await scheduler.enable_schedule(sch_id) # THEN assert status is True assert message == "Schedule is already enabled" assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is True log_params = "Schedule %s already enabled", str(sch_id) log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def test_enable_schedule_wrong_schedule_id(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) random_schedule_id = uuid.uuid4() # WHEN await scheduler.enable_schedule(random_schedule_id) # THEN log_params = "No such Schedule %s", str(random_schedule_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def test_queue_task(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') # log_info = mocker.patch.object(scheduler._logger, "info") await scheduler._get_schedules() sch_id = uuid.UUID("cea17db8-6ccc-11e7-907b-a6006ad3dba0") # backup mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules') # Assert that there is no task queued for this schedule at first with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[sch_id] is True # WHEN await scheduler.queue_task(sch_id) # THEN assert isinstance(scheduler._schedule_executions[sch_id], scheduler._ScheduleExecution) # log_params = "Queued schedule '%s' for execution", 'purge' # log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def test_queue_task_schedule_not_found(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules') # WHEN # THEN with pytest.raises(ScheduleNotFoundError) as excinfo: await scheduler.queue_task(uuid.uuid4()) @pytest.mark.asyncio async def test_delete_schedule(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) sch_id = uuid.UUID("d1631422-9ec6-11e7-abc4-cec278b6b50a") # backup await scheduler._get_schedules() # Confirm no. of schedules assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) mocker.patch.object(scheduler, '_ready', True) # WHEN # Now delete schedule await scheduler.delete_schedule(sch_id) # THEN # Now confirm there is one schedule less assert len(scheduler._storage_async.schedules) - 1 == len(scheduler._schedules) @pytest.mark.asyncio async def test_delete_schedule_enabled_schedule(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) sch_id = uuid.UUID("ada12840-68d3-11e7-907b-a6006ad3dba0") #Coap await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) # Confirm there are 14 schedules assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) # WHEN # Now delete schedule with pytest.raises(RuntimeWarning): await scheduler.delete_schedule(sch_id) # THEN # Now confirm no schedule is deleted assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert 1 == log_exception.call_count log_params = 'Attempt to delete an enabled Schedule %s. Not deleted.', str(sch_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def test_delete_schedule_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) log_debug = mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) sch_id = uuid.UUID("d1631422-9ec6-11e7-abc4-cec278b6b50a") # backup # WHEN # THEN with pytest.raises(ScheduleNotFoundError) as excinfo: await scheduler.delete_schedule(uuid.uuid4()) @pytest.mark.asyncio async def test_delete_schedule_not_found(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) # WHEN # THEN with pytest.raises(ScheduleNotFoundError) as excinfo: await scheduler.delete_schedule(uuid.uuid4()) @pytest.mark.asyncio async def test_get_running_tasks(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) # Assert that there is no task queued for schedule with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[schedule.id] is True # Now queue task and assert that the task has been queued await scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) # Confirm that no task has started yet assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes) await scheduler._start_task(schedule) # Confirm that task has started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) # WHEN tasks = await scheduler.get_running_tasks() # THEN assert 1 == len(tasks) assert schedule.process_name == tasks[0].process_name assert tasks[0].reason is None assert tasks[0].state == Task.State.RUNNING assert tasks[0].cancel_requested is None assert tasks[0].start_time is not None assert tasks[0].end_time is None assert tasks[0].exit_code is None @pytest.mark.asyncio async def test_get_task(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) # Assert that there is no North task queued for schedule with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[schedule.id] is True # Now queue task and assert that the North task has been queued await scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) # Confirm that no task has started yet assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes) await scheduler._start_task(schedule) # Confirm that task has started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # WHEN task = await scheduler.get_task(task_id) # THEN assert schedule.process_name == task.process_name assert task.reason is '' assert task.state is not None assert task.cancel_requested is None assert task.start_time is not None assert task.end_time is not None assert task.exit_code is '0' @pytest.mark.skip("Need a suitable fixture") @pytest.mark.asyncio async def test_get_task_not_found(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) # WHEN # THEN with pytest.raises(TaskNotFoundError) as excinfo: tasks = await scheduler.get_task(uuid.uuid4()) @pytest.mark.asyncio async def test_get_task_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) log_debug = mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) # WHEN # THEN task_id = uuid.uuid4() with pytest.raises(Exception) as excinfo: await scheduler.get_task(task_id) # THEN payload = {"return": ["id", "process_name", "schedule_name", "state", {"alias": "start_time", "format": "YYYY-MM-DD HH24:MI:SS.MS", "column": "start_time"}, {"alias": "end_time", "format": "YYYY-MM-DD HH24:MI:SS.MS", "column": "end_time"}, "reason", "exit_code"], "where": {"column": "id", "condition": "=", "value": str(task_id)}} args, kwargs = log_exception.call_args assert 'Query failed: %s' == args[0] p = json.loads(args[1]) assert payload == p @pytest.mark.asyncio async def test_get_tasks(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) # Assert that there is no North task queued for schedule with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[schedule.id] is True # Now queue task and assert that the North task has been queued await scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) # Confirm that no task has started yet assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes) await scheduler._start_task(schedule) # Confirm that task has started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # WHEN tasks = await scheduler.get_tasks() # THEN assert schedule.process_name == tasks[0].process_name assert tasks[0].reason is '' assert tasks[0].state is not None assert tasks[0].cancel_requested is None assert tasks[0].start_time is not None assert tasks[0].end_time is not None assert tasks[0].exit_code is '0' @pytest.mark.asyncio async def test_get_tasks_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) log_debug = mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) # WHEN with pytest.raises(Exception) as excinfo: tasks = await scheduler.get_tasks() # THEN payload = {"return": ["id", "process_name", "schedule_name", "state", {"alias": "start_time", "column": "start_time", "format": "YYYY-MM-DD HH24:MI:SS.MS"}, {"alias": "end_time", "column": "end_time", "format": "YYYY-MM-DD HH24:MI:SS.MS"}, "reason", "exit_code"], "limit": 100} args, kwargs = log_exception.call_args assert 'Query failed: %s' == args[0] p = json.loads(args[1]) assert payload == p @pytest.mark.asyncio async def test_cancel_task_all_ok(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) # Assert that there is no task queued for schedule with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[schedule.id] is True # Now queue task and assert that the task has been queued await scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) # Confirm that no task has started yet assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes) await scheduler._start_task(schedule) # Confirm that task has started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # Confirm that cancel request has not been made assert scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested is None # WHEN await scheduler.cancel_task(task_id) # THEN assert scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested is not None assert 2 == log_info.call_count # args, kwargs = log_info.call_args_list[0] # assert ("Queued schedule '%s' for execution", 'OMF to PI north') == args args, kwargs = log_info.call_args_list[0] assert "Process started: Schedule '%s' process '%s' task %s pid %s, %s running tasks\n%s" in args assert 'OMF to PI north' in args assert 'North Readings to PI' in args args, kwargs = log_info.call_args_list[1] assert "Stopping process: Schedule '%s' process '%s' task %s pid %s\n%s" in args assert 'OMF to PI north' in args assert 'North Readings to PI' in args @pytest.mark.asyncio async def test_cancel_task_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) # WHEN # THEN with pytest.raises(TaskNotRunningError) as excinfo: await scheduler.cancel_task(uuid.uuid4()) @pytest.mark.asyncio async def test_not_ready_and_paused(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) mocker.patch.object(scheduler, '_ready', False) mocker.patch.object(scheduler, '_paused', True) # WHEN # THEN with pytest.raises(NotReadyError) as excinfo: await scheduler.start() with pytest.raises(NotReadyError) as excinfo: await scheduler.get_scheduled_processes() with pytest.raises(NotReadyError) as excinfo: await scheduler.get_schedules() with pytest.raises(NotReadyError) as excinfo: await scheduler.get_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.save_schedule(Schedule(Schedule.Type.INTERVAL)) with pytest.raises(NotReadyError) as excinfo: await scheduler.disable_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.enable_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.queue_task(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.delete_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.get_running_tasks() with pytest.raises(NotReadyError) as excinfo: await scheduler.cancel_task(uuid.uuid4()) @pytest.mark.skip("_terminate_child_processes() not fit for unit test.") @pytest.mark.asyncio async def test__terminate_child_processes(self, mocker): pass class MockStorage(StorageClientAsync): def __init__(self, core_management_host=None, core_management_port=None): super().__init__(core_management_host, core_management_port) def _get_storage_service(self, host, port): return { "id": uuid.uuid4(), "name": "Fledge Storage", "type": "Storage", "service_port": 9999, "management_port": 9999, "address": "0.0.0.0", "protocol": "http" } class MockStorageAsync(StorageClientAsync): schedules = [ { "id": "cea17db8-6ccc-11e7-907b-a6006ad3dba0", "process_name": "purge", "schedule_name": "purge", "schedule_type": 4, "schedule_interval": "01:00:00", "schedule_time": "", "schedule_day": 0, "exclusive": "t", "enabled": "t" }, { "id": "2176eb68-7303-11e7-8cf7-a6006ad3dba0", "process_name": "stats collector", "schedule_name": "stats collection", "schedule_type": 2, "schedule_interval": "00:00:15", "schedule_time": "00:00:15", "schedule_day": 3, "exclusive": "f", "enabled": "t" }, { "id": "d1631422-9ec6-11e7-abc4-cec278b6b50a", "process_name": "backup", "schedule_name": "backup hourly", "schedule_type": 3, "schedule_interval": "01:00:00", "schedule_time": "", "schedule_day": 0, "exclusive": "t", "enabled": "f" }, { "id": "ada12840-68d3-11e7-907b-a6006ad3dba0", "process_name": "COAP", "schedule_name": "COAP listener south", "schedule_type": 1, "schedule_interval": "00:00:00", "schedule_time": "", "schedule_day": 0, "exclusive": "t", "enabled": "t" }, { "id": "2b614d26-760f-11e7-b5a5-be2e44b06b34", "process_name": "North Readings to PI", "schedule_name": "OMF to PI north", "schedule_type": 3, "schedule_interval": "00:00:30", "schedule_time": "", "schedule_day": 0, "exclusive": "t", "enabled": "t" }, { "id": "5d7fed92-fb9a-11e7-8c3f-9a214cf093ae", "process_name": "North Readings to OCS", "schedule_name": "OMF to OCS north", "schedule_type": 3, "schedule_interval": "1 day 00:00:40", "schedule_time": "", "schedule_day": 0, "exclusive": "t", "enabled": "f" }, ] scheduled_processes = [ { "name": "purge", "script": [ "tasks/purge" ] }, { "name": "stats collector", "script": [ "tasks/statistics" ] }, { "name": "backup", "script": [ "tasks/backup_postgres" ] }, { "name": "COAP", "script": [ "services/south" ] }, { "name": "North Readings to PI", "script": [ "tasks/north", "--stream_id", "1", "--debug_level", "1" ] }, { "name": "North Readings to OCS", "script": [ "tasks/north", "--stream_id", "4", "--debug_level", "1" ] }, ] tasks = [ { "id": "259b8570-65c1-4b92-8c62-e9642631a600", "process_name": "North Readings to PI", "state": 1, "start_time": "2018-02-06 13:28:14.477868", "end_time": "2018-02-06 13:28:14.856375", "exit_code": "0", "reason": "" } ] def __init__(self, core_management_host=None, core_management_port=None): super().__init__(core_management_host, core_management_port) def _get_storage_service(self, host, port): return { "id": uuid.uuid4(), "name": "Fledge Storage", "type": "Storage", "service_port": 9999, "management_port": 9999, "address": "0.0.0.0", "protocol": "http" } @classmethod async def insert_into_tbl(cls, table_name, payload): pass @classmethod async def update_tbl(cls, table_name, payload): # Only valid for test_save_schedule_update if table_name == "schedules": return {"count": 1} @classmethod async def delete_from_tbl(cls, table_name, condition=None): pass @classmethod async def query_tbl_with_payload(cls, table_name, query_payload): if table_name == 'tasks': return { "count": len(MockStorageAsync.tasks), "rows": MockStorageAsync.tasks } @classmethod async def query_tbl(cls, table_name, query=None): if table_name == 'schedules': return { "count": len(MockStorageAsync.schedules), "rows": MockStorageAsync.schedules } if table_name == 'scheduled_processes': return { "count": len(MockStorageAsync.scheduled_processes), "rows": MockStorageAsync.scheduled_processes }
1.984375
2
problem_solving/python/algorithms/greedy/marcs_cakewalk.py
kcc3/hackerrank-solutions
0
1474
def marcs_cakewalk(calorie): """Hackerrank Problem: https://www.hackerrank.com/challenges/marcs-cakewalk/problem Marc loves cupcakes, but he also likes to stay fit. Each cupcake has a calorie count, and Marc can walk a distance to expend those calories. If Marc has eaten j cupcakes so far, after eating a cupcake with c calories he must walk at least 2**j x c miles to maintain his weight. Solve: To calculate the minimum miles, you solve based on the highest calorie to lowest calorie cupcake Args: calorie (list): List of integers denoting the calories for each cupcake Returns: int: The minimum number of miels Marc must walk to maintain his weight """ calories = 0 for i, c in enumerate(sorted(calorie, reverse=True)): calories += (2 ** i * c) return calories if __name__ == "__main__": assert marcs_cakewalk([5, 10, 7]) == 44 assert marcs_cakewalk([1, 3, 2]) == 11 assert marcs_cakewalk([7, 4, 9, 6]) == 79
4.21875
4
coronaindiatracker/coronatracker/views.py
ankitgoswami23/CoronaIndiaTracker
2
1475
from django.shortcuts import render import requests from bs4 import BeautifulSoup def corona_data(request): "Testaaaa" corona_html = requests.get("https://www.mygov.in/covid-19") soup = BeautifulSoup(corona_html.content, 'html.parser') state_wise_data = soup.find_all('div', class_='views-row') information = soup.find('div', class_='information_row') info = { 'update_data': information.find('div', class_='info_title').find('span').string, 'active_case': information.find('div', class_='active-case').find('span', class_='icount').string, 'discharge': information.find('div', class_='discharge').find('span', class_='icount').string, 'death': information.find('div', class_='death_case').find('span', class_='icount').string } corona_info = [ { "state_name": state.find_all('span', class_='st_name')[0].string, "confirm_case": state.find_all('div', class_='tick-confirmed')[0].find_all('small')[0].string, "active_case": state.find_all('div', class_='tick-active')[0].find_all('small')[0].string, "discharge": state.find_all('div', class_='tick-discharged')[0].find_all('small')[0].string, "death": state.find_all('div', class_='tick-death')[0].find_all('small')[0].string } for state in state_wise_data ] context = { 'corona_info': info, 'data': sorted(corona_info, key=lambda i: int(''.join(i['confirm_case'].replace(',', ''))), reverse=True) } return render(request, 'coronainfo/index.html', context)
2.390625
2
compare.py
geohackweek/ghw2019_wiggles
3
1476
# Script tests GPD model using UW truth data # Test outputs: # - type of event tested [EQS, EQP, SUS, SUP, THS, THP, SNS, SNP, PXS, PXP] # - phase [P, S, N] Note: N - not detected # - model time offset (t_truth - t_model_pick) import numpy import math import string import datetime import sys import os import csv from datetime import datetime from datetime import timedelta # params padding_time = 10 fudge_factor = timedelta(seconds=27) time_diff = timedelta(seconds=10) # file dirs parsed_arrivals = [] model_in = [] model_out = [] comp_out = [] for etype in ['EQS','EQP','SUS','SUP','THS','THP','SNS','SNP','PXS','PXP']: arrival = "parsed_arrivals/" + etype + ".arrivals.txt" infile = "input_files/GPD." + etype + ".in" outfile = "output_files/GPD." + etype + ".out" parsed_arrivals.append(arrival) model_in.append(infile) model_out.append(outfile) comp_out.append("comparison_out/comp." + etype + ".out") # ------------------ # read in UW arrival times as an array def read_arrivals_to_arr(filename): model_list = [] with open(filename) as f: for ln in f: row = ln.split() line = [] line.extend([row[0].strip(), row[1].strip(), row[2].strip()]) formatted_time = datetime.strptime(row[3], "%Y-%m-%dT%H:%M:%S.%f") - fudge_factor line.extend([formatted_time, row[4].strip(), row[5].strip()]) model_list.append(line) return model_list def arrivals_to_dictionary(arrivals): picks = {} for arr in arrivals: key = datetime.strftime(arr[3], "%Y-%m-%dT%H:%M:%S.%f") key = key[0:-7] picks[key] = arr return picks def model_in_to_array(file): timestamps = [] with open(file) as f: for ln in f: entry = ln.split() entry = entry[0].strip() entry = entry[len(entry)-20:len(entry)-6] entry = entry[0:4] + "-" + entry[4:6] + "-" + entry[6:8] + "T" + entry[8:10] + ":" + entry[10:12] + ":" + entry[12:14] # ------------- TIME STAMP ISSUES -------------------- # case 1: run if .mseed files have correct timestamps """ time = datetime.strptime(entry, "%Y-%m-%dT%H:%M:%S") - fudge_factor # + time_diff (might need to add this) time = datetime.strftime(time, "%Y-%m-%dT%H:%M:%S") """ # case 2: run if .mseed files have buggy minutes in the timestamps time = datetime.strptime(entry, "%Y-%m-%dT%H:%M:%S") if time.second >=37 and time.second <=51: time = time + timedelta(seconds=23) + time_diff time = datetime.strftime(time, "%Y-%m-%dT%H:%M:%S") else: sec_int = time.second + 23 if sec_int > 59: sec_int = sec_int - 60 sec_int = str(sec_int).zfill(2) time = datetime.strftime(time, "%Y-%m-%dT%H:%M:%S") time = time[:-2] + sec_int time = datetime.strptime(time, "%Y-%m-%dT%H:%M:%S") + time_diff time = datetime.strftime(time, "%Y-%m-%dT%H:%M:%S") # ----------------------------------------------------- timestamps.append(time) return timestamps def filter_times(arrivals, model_in): filtered = [] for key in model_in: if key in arrivals: filtered.append(arrivals[key]) return filtered # read in Caltech model output and create a dictionary def read_output_to_dict(filename): model_dict = {} with open(filename) as f: for line in f: tmp = line.split() key = tmp[0] + "-" + tmp[1] + "-" + tmp[2] try: # fails if date is missing floating point numbers formatted_time = datetime.strptime(tmp[3], "%Y-%m-%dT%H:%M:%S.%f") if key not in model_dict: model_dict[key] = [] model_dict[key].append(formatted_time) except: pass return model_dict # lookup time in the dictionary def key_lookup(event, phase, model_dict): key = event[0] + "-" + event[1] + "-" + phase times = [] if key in model_dict.keys(): times = model_dict[key] times = time_lookup(event[3], times) return times # search for arrivals within the padding time window def time_lookup(t, time_arr): t_lower = t - timedelta(seconds=padding_time) t_upper = t + timedelta(seconds=padding_time) offsets = [] for time in time_arr: if time > t_lower and time < t_upper: offset = t - time # or format time to absolute value: abs(t - time) offset = offset.total_seconds() offsets.append('{:.6f}'.format(offset)) return offsets def execute_script(arrival, inf, outf, comp_out): # write outputs to file outp_file = open(comp_out, 'w') truth_arr = read_arrivals_to_arr(arrival) # read in the arrival times to a list truth_dict = arrivals_to_dictionary(truth_arr) # convert arrivals to a dictionary (key=truncated timestamp) model_in = model_in_to_array(inf) # read in model .in file as a list truth_arr = filter_times(truth_dict, model_in) # filter arrivals to picks that were passed to the model (.in file) model_dict = read_output_to_dict(outf) # read output file for event in truth_arr: phase = event[2] times = key_lookup(event, phase, model_dict) if len(times) == 0: if phase == 'P': phase = 'S' else: phase = 'P' times = key_lookup(event, phase, model_dict) if len(times) == 0: phase = 'N' times = ['nan'] outp_file.write(str(event[5]) + " " + phase) for offset in times: outp_file.write(" " + str(offset)) outp_file.write('\n') outp_file.close() for i in range(len(model_out)): execute_script(parsed_arrivals[i], model_in[i], model_out[i], comp_out[i])
2.5
2
ultitrackerapi/ultitrackerapi/extract_and_upload_video.py
atheheath/ultitracker-api
0
1477
import argparse import boto3 import datetime import json import os import posixpath import re import shutil import tempfile import uuid from concurrent import futures from multiprocessing import Pool from ultitrackerapi import get_backend, get_logger, get_s3Client, video backend_instance = get_backend() logger = get_logger(__name__, level="DEBUG") s3Client = get_s3Client() def update_game_video_length(game_id, video_length): command = """ UPDATE ultitracker.game_metadata SET data = jsonb_set(data, '{{length}}', '"{video_length}"', true) WHERE game_id = '{game_id}' """.format( video_length=video_length, game_id=game_id ) backend_instance.client.execute(command) def get_frame_number(key, chunk_multiplier=60): frame_number = int(posixpath.splitext(posixpath.basename(key))[0].split("_")[1]) chunk_number = int(posixpath.basename(posixpath.dirname(key)).split("_")[1]) return chunk_number * chunk_multiplier + frame_number def insert_images( img_raw_paths, img_types, img_metadatas, game_id, frame_numbers ): command = """ INSERT INTO ultitracker.img_location (img_id, img_raw_path, img_type, img_metadata, game_id, frame_number) VALUES """ for i, (img_raw_path, img_type, img_metadata, frame_number) in enumerate(zip(img_raw_paths, img_types, img_metadatas, frame_numbers)): command += """('{img_id}', '{img_raw_path}', '{img_type}', '{img_metadata}', '{game_id}', {frame_number}){include_comma} """.format( img_id=uuid.uuid4(), img_raw_path=img_raw_path, img_type=img_type, img_metadata=json.dumps(img_metadata), game_id=game_id, frame_number=frame_number, include_comma="," if i < (len(img_raw_paths) - 1) else "" ) backend_instance.client.execute(command) def extract_and_upload_video( bucket, video_filename, thumbnail_filename, video_key, thumbnail_key, game_id ): logger.debug("extract_and_upload_video: Getting video length") video_length_seconds = int(video.get_video_duration(video_filename)) video_length = str(datetime.timedelta(seconds=video_length_seconds)) logger.debug("extract_and_upload_video: Finished getting video length") logger.debug("extract_and_upload_video: Getting video height and width") video_height_width = video.get_video_height_width(video_filename) logger.debug("extract_and_upload_video: Finished getting height and width") logger.debug("extract_and_upload_video: Updating length in db") update_game_video_length(game_id, video_length) logger.debug("extract_and_upload_video: Finished updating length in db") logger.debug("extract_and_upload_video: Extracting thumbnail") video.get_thumbnail(video_filename, thumbnail_filename, time=video_length_seconds // 2) logger.debug("extract_and_upload_video: Finished extracting thumbnail") logger.debug("extract_and_upload_video: Uploading thumbnail") s3Client.upload_file( thumbnail_filename, bucket, thumbnail_key ) logger.debug("extract_and_upload_video: Finished uploading thumbnail") logger.debug("extract_and_upload_video: Uploading video to S3") s3Client.upload_file( video_filename, bucket, video_key ) logger.debug("extract_and_upload_video: Finished uploading video to S3") logger.debug("extract_and_upload_video: Chunking video") chunked_video_dir = tempfile.mkdtemp() video.chunk_video(video_filename, chunked_video_dir, chunk_size=60) logger.debug("extract_and_upload_video: Finished chunking video") logger.debug("extract_and_upload_video: Uploading video chunks") with futures.ThreadPoolExecutor(8) as ex: for vid in os.listdir(chunked_video_dir): ex.submit( s3Client.upload_file, os.path.join(chunked_video_dir, vid), bucket, posixpath.join( posixpath.dirname(video_key), "chunks", vid ) ) logger.debug("extract_and_upload_video: Finished uploading video chunks") logger.debug("extract_and_upload_video: Submitting lambda frame extraction") aws_lambda_payloads = [ json.dumps({ "s3_bucket_path": bucket, "s3_video_path": posixpath.join(posixpath.dirname(video_key), "chunks", basename), "s3_output_frames_path": posixpath.join(posixpath.dirname(video_key), "frames", posixpath.splitext(basename)[0]), "video_metadata": video_height_width }).encode() for basename in os.listdir(chunked_video_dir) ] client = boto3.client('lambda') aws_lambda_responses = [] with futures.ThreadPoolExecutor(max_workers=16) as ex: result_futures = [] for payload in aws_lambda_payloads: result_futures.append(ex.submit( client.invoke, FunctionName="extractFrames", # InvocationType="Event", Payload=payload )) logger.debug("extract_and_upload_video: Submitted lambda frame extraction") for result_future in futures.as_completed(result_futures): aws_lambda_response = json.loads(result_future.result()["Payload"].read().decode("utf-8")) aws_lambda_responses.append(aws_lambda_response) raw_paths = ["s3://" + posixpath.join(frame["bucket"], frame["key"]) for frame in aws_lambda_response["frames"]] img_types = ["png" for frame in aws_lambda_response["frames"]] metadatas = [ {"bucket": bucket} for frame in aws_lambda_response["frames"] ] frame_numbers = [-1 for frame in aws_lambda_response["frames"]] insert_images( raw_paths, img_types, metadatas, game_id, frame_numbers ) logger.debug("extract_and_upload_video: Received all lambda responses") logger.debug("extract_and_upload_video: Finished inserting image metadata") os.remove(video_filename) os.remove(thumbnail_filename) shutil.rmtree(chunked_video_dir) def main(): parser = argparse.ArgumentParser() parser.add_argument("bucket") parser.add_argument("video_filename") parser.add_argument("thumbnail_filename") parser.add_argument("video_key") parser.add_argument("thumbnail_key") parser.add_argument("game_id") args = parser.parse_args() extract_and_upload_video( bucket=args.bucket, video_filename=args.video_filename, thumbnail_filename=args.thumbnail_filename, video_key=args.video_key, thumbnail_key=args.thumbnail_key, game_id=args.game_id ) if __name__ == "__main__": main()
1.992188
2
Chapter03/scikit_soft_voting_2knn.py
PacktPublishing/Hands-On-Ensemble-Learning-with-Python
31
1478
# --- SECTION 1 --- # Import the required libraries from sklearn import datasets, naive_bayes, svm, neighbors from sklearn.ensemble import VotingClassifier from sklearn.metrics import accuracy_score # Load the dataset breast_cancer = datasets.load_breast_cancer() x, y = breast_cancer.data, breast_cancer.target # Split the train and test samples test_samples = 100 x_train, y_train = x[:-test_samples], y[:-test_samples] x_test, y_test = x[-test_samples:], y[-test_samples:] # --- SECTION 2 --- # Instantiate the learners (classifiers) learner_1 = neighbors.KNeighborsClassifier(n_neighbors=5) learner_2 = naive_bayes.GaussianNB() learner_3 = neighbors.KNeighborsClassifier(n_neighbors=50) # --- SECTION 3 --- # Instantiate the voting classifier voting = VotingClassifier([('5NN', learner_1), ('NB', learner_2), ('50NN', learner_3)], voting='soft') # --- SECTION 4 --- # Fit classifier with the training data voting.fit(x_train, y_train) learner_1.fit(x_train, y_train) learner_2.fit(x_train, y_train) learner_3.fit(x_train, y_train) # --- SECTION 5 --- # Predict the most probable class hard_predictions = voting.predict(x_test) # --- SECTION 6 --- # Get the base learner predictions predictions_1 = learner_1.predict(x_test) predictions_2 = learner_2.predict(x_test) predictions_3 = learner_3.predict(x_test) # --- SECTION 7 --- # Accuracies of base learners print('L1:', accuracy_score(y_test, predictions_1)) print('L2:', accuracy_score(y_test, predictions_2)) print('L3:', accuracy_score(y_test, predictions_3)) # Accuracy of hard voting print('-'*30) print('Hard Voting:', accuracy_score(y_test, hard_predictions)) # --- SECTION 1 --- # Import the required libraries import matplotlib as mpl import matplotlib.pyplot as plt mpl.style.use('seaborn-paper') # --- SECTION 2 --- # Get the wrongly predicted instances # and the predicted probabilities for the whole test set errors = y_test-hard_predictions probabilities_1 = learner_1.predict_proba(x_test) probabilities_2 = learner_2.predict_proba(x_test) probabilities_3 = learner_3.predict_proba(x_test) # --- SECTION 2 --- # Store the predicted probability for # each wrongly predicted instance, for each base learner # as well as the average predicted probability # x=[] y_1=[] y_2=[] y_3=[] y_avg=[] for i in range(len(errors)): if not errors[i] == 0: x.append(i) y_1.append(probabilities_1[i][0]) y_2.append(probabilities_2[i][0]) y_3.append(probabilities_3[i][0]) y_avg.append((probabilities_1[i][0]+probabilities_2[i][0]+probabilities_3[i][0])/3) # --- SECTION 3 --- # Plot the predicted probaiblity of each base learner as # a bar and the average probability as an X plt.bar(x, y_1, 3, label='5NN') plt.bar(x, y_2, 2, label='NB') plt.bar(x, y_3, 1, label='50NN') plt.scatter(x, y_avg, marker='x', c='k', s=150, label='Average Positive', zorder=10) y = [0.5 for x in range(len(errors))] plt.plot(y, c='k', linestyle='--') plt.title('Positive Probability') plt.xlabel('Test sample') plt.ylabel('probability') plt.legend()
3.015625
3
API/migrations/0005_alter_news_date_time_alter_news_headline.py
kgarchie/ReSTful-Django-API
0
1479
<gh_stars>0 # Generated by Django 4.0.3 on 2022-03-23 14:31 import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('API', '0004_alter_news_date_time_alter_news_headline'), ] operations = [ migrations.AlterField( model_name='news', name='date_time', field=models.DateTimeField(default=datetime.datetime(2022, 3, 23, 17, 31, 17, 27766)), ), migrations.AlterField( model_name='news', name='headline', field=models.CharField(max_length=100), ), ]
1.648438
2
qiskit/ml/datasets/iris.py
stefan-woerner/aqua
504
1480
# This code is part of Qiskit. # # (C) Copyright IBM 2018, 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """ iris dataset """ import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.decomposition import PCA from qiskit.aqua import MissingOptionalLibraryError def iris(training_size, test_size, n, plot_data=False): """ returns iris dataset """ class_labels = [r'A', r'B', r'C'] data, target = datasets.load_iris(return_X_y=True) sample_train, sample_test, label_train, label_test = \ train_test_split(data, target, test_size=1, random_state=42) # Now we standardize for gaussian around 0 with unit variance std_scale = StandardScaler().fit(sample_train) sample_train = std_scale.transform(sample_train) sample_test = std_scale.transform(sample_test) # Now reduce number of features to number of qubits pca = PCA(n_components=n).fit(sample_train) sample_train = pca.transform(sample_train) sample_test = pca.transform(sample_test) # Scale to the range (-1,+1) samples = np.append(sample_train, sample_test, axis=0) minmax_scale = MinMaxScaler((-1, 1)).fit(samples) sample_train = minmax_scale.transform(sample_train) sample_test = minmax_scale.transform(sample_test) # Pick training size number of samples from each distro training_input = {key: (sample_train[label_train == k, :])[:training_size] for k, key in enumerate(class_labels)} test_input = {key: (sample_test[label_test == k, :])[:test_size] for k, key in enumerate(class_labels)} if plot_data: try: import matplotlib.pyplot as plt except ImportError as ex: raise MissingOptionalLibraryError( libname='Matplotlib', name='iris', pip_install='pip install matplotlib') from ex for k in range(0, 3): plt.scatter(sample_train[label_train == k, 0][:training_size], sample_train[label_train == k, 1][:training_size]) plt.title("Iris dataset") plt.show() return sample_train, training_input, test_input, class_labels
2.625
3
tests/h/views/api_auth_test.py
discodavey/h
0
1481
# -*- coding: utf-8 -*- from __future__ import unicode_literals import datetime import json import mock import pytest from oauthlib.oauth2 import InvalidRequestFatalError from oauthlib.common import Request as OAuthRequest from pyramid import httpexceptions from h._compat import urlparse from h.exceptions import OAuthTokenError from h.models.auth_client import ResponseType from h.services.auth_token import auth_token_service_factory from h.services.oauth_provider import OAuthProviderService from h.services.oauth_validator import DEFAULT_SCOPES from h.services.user import user_service_factory from h.util.datetime import utc_iso8601 from h.views import api_auth as views @pytest.mark.usefixtures('routes', 'oauth_provider', 'user_svc') class TestOAuthAuthorizeController(object): @pytest.mark.usefixtures('authenticated_user') @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_validates_request(self, controller, pyramid_request, view_name): view = getattr(controller, view_name) view() controller.oauth.validate_authorization_request.assert_called_once_with( pyramid_request.url) @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_raises_for_invalid_request(self, controller, view_name): controller.oauth.validate_authorization_request.side_effect = InvalidRequestFatalError('boom!') with pytest.raises(InvalidRequestFatalError) as exc: view = getattr(controller, view_name) view() assert exc.value.description == 'boom!' @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_redirects_to_login_when_not_authenticated(self, controller, pyramid_request, view_name): with pytest.raises(httpexceptions.HTTPFound) as exc: view = getattr(controller, view_name) view() parsed_url = urlparse.urlparse(exc.value.location) assert parsed_url.path == '/login' assert urlparse.parse_qs(parsed_url.query) == {'next': [pyramid_request.url], 'for_oauth': ['True']} @pytest.mark.parametrize('response_mode,view_name', [ (None, 'get'), ('web_message', 'get_web_message'), ]) def test_get_returns_expected_context(self, controller, auth_client, authenticated_user, oauth_request, response_mode, view_name): oauth_request.response_mode = response_mode view = getattr(controller, view_name) assert view() == { 'client_id': auth_client.id, 'client_name': auth_client.name, 'response_mode': response_mode, 'response_type': auth_client.response_type.value, 'state': 'foobar', 'username': authenticated_user.username, } @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_creates_authorization_response_for_trusted_clients(self, controller, auth_client, authenticated_user, pyramid_request, view_name): auth_client.trusted = True view = getattr(controller, view_name) view() controller.oauth.create_authorization_response.assert_called_once_with( pyramid_request.url, credentials={'user': authenticated_user}, scopes=DEFAULT_SCOPES) def test_get_returns_redirect_immediately_for_trusted_clients(self, controller, auth_client, authenticated_user, pyramid_request): auth_client.trusted = True response = controller.get() expected = '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri) assert response.location == expected @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_renders_template_for_trusted_clients(self, controller, auth_client): auth_client.trusted = True assert controller.request.override_renderer is None controller.get_web_message() assert controller.request.override_renderer == 'h:templates/oauth/authorize_web_message.html.jinja2' @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_returns_context_for_trusted_clients(self, controller, auth_client): auth_client.trusted = True response = controller.get_web_message() assert response == { 'code': 'abcdef123456', 'origin': 'http://client.com', 'state': 'foobar', } @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_allows_empty_state_in_context_for_trusted_clients(self, controller, auth_client, oauth_provider): auth_client.trusted = True headers = {'Location': '{}?code=abcdef123456'.format(auth_client.redirect_uri)} oauth_provider.create_authorization_response.return_value = (headers, None, 302) response = controller.get_web_message() assert response['state'] is None @pytest.mark.parametrize('view_name', ['post', 'post_web_message']) def test_post_creates_authorization_response(self, controller, pyramid_request, authenticated_user, view_name): pyramid_request.url = 'http://example.com/auth?client_id=the-client-id' + \ '&response_type=code' + \ '&state=foobar' + \ '&scope=exploit' view = getattr(controller, view_name) view() controller.oauth.create_authorization_response.assert_called_once_with( pyramid_request.url, credentials={'user': authenticated_user}, scopes=DEFAULT_SCOPES) @pytest.mark.usefixtures('authenticated_user') @pytest.mark.parametrize('view_name', ['post', 'post_web_message']) def test_post_raises_for_invalid_request(self, controller, view_name): controller.oauth.create_authorization_response.side_effect = InvalidRequestFatalError('boom!') with pytest.raises(InvalidRequestFatalError) as exc: view = getattr(controller, view_name) view() assert exc.value.description == 'boom!' def test_post_redirects_to_client(self, controller, auth_client): response = controller.post() expected = '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri) assert response.location == expected def test_post_web_message_returns_expected_context(self, controller, auth_client): response = controller.post_web_message() assert response == { 'code': 'abcdef123456', 'origin': 'http://client.com', 'state': 'foobar', } def test_post_web_message_allows_empty_state_in_context(self, controller, auth_client, oauth_provider): auth_client.trusted = True headers = {'Location': '{}?code=abcdef123456'.format(auth_client.redirect_uri)} oauth_provider.create_authorization_response.return_value = (headers, None, 302) response = controller.post_web_message() assert response['state'] is None @pytest.fixture def controller(self, pyramid_request): pyramid_request.override_renderer = None return views.OAuthAuthorizeController(None, pyramid_request) @pytest.fixture def oauth_request(self): return OAuthRequest('/') @pytest.fixture def oauth_provider(self, pyramid_config, auth_client, oauth_request): svc = mock.create_autospec(OAuthProviderService, instance=True) scopes = ['annotation:read', 'annotation:write'] credentials = {'client_id': auth_client.id, 'state': 'foobar', 'request': oauth_request} svc.validate_authorization_request.return_value = (scopes, credentials) headers = {'Location': '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri)} body = None status = 302 svc.create_authorization_response.return_value = (headers, body, status) pyramid_config.register_service(svc, name='oauth_provider') return svc @pytest.fixture def auth_client(self, factories): return factories.AuthClient(name='Test Client', redirect_uri='http://client.com/auth/callback', response_type=ResponseType.code) @pytest.fixture def user_svc(self, pyramid_config, pyramid_request): svc = mock.Mock(spec_set=user_service_factory(None, pyramid_request)) pyramid_config.register_service(svc, name='user') return svc @pytest.fixture def pyramid_request(self, pyramid_request): pyramid_request.url = 'http://example.com/auth?client_id=the-client-id&response_type=code&state=foobar' return pyramid_request @pytest.fixture def authenticated_user(self, factories, pyramid_config, user_svc): user = factories.User.build() pyramid_config.testing_securitypolicy(user.userid) def fake_fetch(userid): if userid == user.userid: return user user_svc.fetch.side_effect = fake_fetch return user @pytest.fixture def routes(self, pyramid_config): pyramid_config.add_route('login', '/login') @pytest.mark.usefixtures('oauth_provider') class TestOAuthAccessTokenController(object): def test_it_creates_token_response(self, pyramid_request, controller, oauth_provider): controller.post() oauth_provider.create_token_response.assert_called_once_with( pyramid_request.url, pyramid_request.method, pyramid_request.POST, pyramid_request.headers) def test_it_returns_correct_response_on_success(self, controller, oauth_provider): body = json.dumps({'access_token': 'the-access-token'}) oauth_provider.create_token_response.return_value = ({}, body, 200) assert controller.post() == {'access_token': 'the-access-token'} def test_it_raises_when_error(self, controller, oauth_provider): body = json.dumps({'error': 'invalid_request'}) oauth_provider.create_token_response.return_value = ({}, body, 400) with pytest.raises(httpexceptions.HTTPBadRequest) as exc: controller.post() assert exc.value.body == body @pytest.fixture def controller(self, pyramid_request): pyramid_request.method = 'POST' pyramid_request.POST['grant_type'] = 'authorization_code' pyramid_request.POST['code'] = 'the-authz-code' pyramid_request.headers = {'X-Test-ID': '1234'} return views.OAuthAccessTokenController(pyramid_request) @pytest.fixture def oauth_provider(self, pyramid_config): svc = mock.Mock(spec_set=['create_token_response']) svc.create_token_response.return_value = ({}, '{}', 200) pyramid_config.register_service(svc, name='oauth_provider') return svc @pytest.mark.usefixtures('oauth_provider') class TestOAuthRevocationController(object): def test_it_creates_revocation_response(self, pyramid_request, controller, oauth_provider): controller.post() oauth_provider.create_revocation_response.assert_called_once_with( pyramid_request.url, pyramid_request.method, pyramid_request.POST, pyramid_request.headers) def test_it_returns_empty_response_on_success(self, controller): response = controller.post() assert response == {} def test_it_raises_when_error(self, controller, oauth_provider): body = json.dumps({'error': 'invalid_request'}) oauth_provider.create_revocation_response.return_value = ({}, body, 400) with pytest.raises(httpexceptions.HTTPBadRequest) as exc: controller.post() assert exc.value.body == body @pytest.fixture def controller(self, pyramid_request): pyramid_request.method = 'POST' pyramid_request.POST['token'] = 'the-token' pyramid_request.headers = {'X-Test-ID': '1234'} return views.OAuthRevocationController(pyramid_request) @pytest.fixture def oauth_provider(self, pyramid_config): svc = mock.Mock(spec_set=['create_revocation_response']) svc.create_revocation_response.return_value = ({}, '{}', 200) pyramid_config.register_service(svc, name='oauth_provider') return svc class TestDebugToken(object): def test_it_raises_error_when_token_is_missing(self, pyramid_request): pyramid_request.auth_token = None with pytest.raises(OAuthTokenError) as exc: views.debug_token(pyramid_request) assert exc.value.type == 'missing_token' assert 'Bearer token is missing' in exc.value.message def test_it_raises_error_when_token_is_empty(self, pyramid_request): pyramid_request.auth_token = '' with pytest.raises(OAuthTokenError) as exc: views.debug_token(pyramid_request) assert exc.value.type == 'missing_token' assert 'Bearer token is missing' in exc.value.message def test_it_validates_token(self, pyramid_request, token_service): pyramid_request.auth_token = 'the-access-token' views.debug_token(pyramid_request) token_service.validate.assert_called_once_with('the-access-token') def test_it_raises_error_when_token_is_invalid(self, pyramid_request, token_service): pyramid_request.auth_token = 'the-token' token_service.validate.return_value = None with pytest.raises(OAuthTokenError) as exc: views.debug_token(pyramid_request) assert exc.value.type == 'missing_token' assert 'Bearer token does not exist or is expired' in exc.value.message def test_returns_debug_data_for_oauth_token(self, pyramid_request, token_service, oauth_token): pyramid_request.auth_token = oauth_token.value token_service.fetch.return_value = oauth_token result = views.debug_token(pyramid_request) assert result == {'userid': oauth_token.userid, 'client': {'id': oauth_token.authclient.id, 'name': oauth_token.authclient.name}, 'issued_at': utc_iso8601(oauth_token.created), 'expires_at': utc_iso8601(oauth_token.expires), 'expired': oauth_token.expired} def test_returns_debug_data_for_developer_token(self, pyramid_request, token_service, developer_token): pyramid_request.auth_token = developer_token.value token_service.fetch.return_value = developer_token result = views.debug_token(pyramid_request) assert result == {'userid': developer_token.userid, 'issued_at': utc_iso8601(developer_token.created), 'expires_at': None, 'expired': False} @pytest.fixture def token_service(self, pyramid_config, pyramid_request): svc = mock.Mock(spec_set=auth_token_service_factory(None, pyramid_request)) pyramid_config.register_service(svc, name='auth_token') return svc @pytest.fixture def oauth_token(self, factories): authclient = factories.AuthClient(name='Example Client') expires = datetime.datetime.utcnow() + datetime.timedelta(minutes=10) return factories.DeveloperToken(authclient=authclient, expires=expires) @pytest.fixture def developer_token(self, factories): return factories.DeveloperToken() class TestAPITokenError(object): def test_it_sets_the_response_status_code(self, pyramid_request): context = OAuthTokenError('the error message', 'error_type', status_code=403) views.api_token_error(context, pyramid_request) assert pyramid_request.response.status_code == 403 def test_it_returns_the_error(self, pyramid_request): context = OAuthTokenError('', 'error_type') result = views.api_token_error(context, pyramid_request) assert result['error'] == 'error_type' def test_it_returns_error_description(self, pyramid_request): context = OAuthTokenError('error description', 'error_type') result = views.api_token_error(context, pyramid_request) assert result['error_description'] == 'error description' def test_it_skips_description_when_missing(self, pyramid_request): context = OAuthTokenError(None, 'invalid_request') result = views.api_token_error(context, pyramid_request) assert 'error_description' not in result def test_it_skips_description_when_empty(self, pyramid_request): context = OAuthTokenError('', 'invalid_request') result = views.api_token_error(context, pyramid_request) assert 'error_description' not in result
2.265625
2
tools/build/v2/test/conditionals.py
juslee/boost-svn
1
1482
<filename>tools/build/v2/test/conditionals.py #!/usr/bin/python # Copyright 2003 <NAME> # Copyright 2002, 2003, 2004 <NAME> # Distributed under the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) # Test conditional properties. import BoostBuild t = BoostBuild.Tester() # Arrange a project which will build only if 'a.cpp' is compiled with "STATIC" # define. t.write("a.cpp", """\ #ifdef STATIC int main() {} #endif """) # Test conditionals in target requirements. t.write("jamroot.jam", "exe a : a.cpp : <link>static:<define>STATIC ;") t.run_build_system(["link=static"]) t.expect_addition("bin/$toolset/debug/link-static/a.exe") t.rm("bin") # Test conditionals in project requirements. t.write("jamroot.jam", """ project : requirements <link>static:<define>STATIC ; exe a : a.cpp ; """) t.run_build_system(["link=static"]) t.expect_addition("bin/$toolset/debug/link-static/a.exe") t.rm("bin") # Regression test for a bug found by <NAME>ani. Conditionals inside # usage requirement were not being evaluated. t.write("jamroot.jam", """ lib l : l.cpp : : : <link>static:<define>STATIC ; exe a : a.cpp l ; """) t.write("l.cpp", "int i;") t.run_build_system(["link=static"]) t.expect_addition("bin/$toolset/debug/link-static/a.exe") t.cleanup()
1.960938
2
examples/setuptools-rust-starter/tests/test_setuptools_rust_starter.py
FriendRat/pyo3
1
1483
<gh_stars>1-10 from setuptools_rust_starter import PythonClass, ExampleClass def test_python_class() -> None: py_class = PythonClass(value=10) assert py_class.value == 10 def test_example_class() -> None: example = ExampleClass(value=11) assert example.value == 11
2.171875
2
spiders/juejin_spider.py
sunhailin-Leo/TeamLeoX_BlogsCrawler
0
1484
import time from typing import Dict, List, Tuple, Optional from utils.logger_utils import LogManager from utils.str_utils import check_is_json from config import LOG_LEVEL, PROCESS_STATUS_FAIL from utils.time_utils import datetime_str_change_fmt from utils.exception_utils import LoginException, ParseDataException from spiders import BaseSpider, BaseSpiderParseMethodType, CookieUtils from utils.str_utils import check_is_phone_number, check_is_email_address logger = LogManager(__name__).get_logger_and_add_handlers( formatter_template=5, log_level_int=LOG_LEVEL ) class JuejinSpider(BaseSpider): def __init__(self, task_id: str, username: str, password: str): self._main_url = "https://juejin.im/auth/type" self._blogs_url = "https://timeline-merger-ms.juejin.im/v1/get_entry_by_self" self._like_blogs_url = "https://user-like-wrapper-ms.juejin.im/v1/user" self._task_id = task_id self._login_username = username self._login_password = password self._spider_name: str = f"juejin:{self._login_username}" self._login_cookies: Optional[str] = None self._login_token: Optional[str] = None self._login_uid: Optional[str] = None self._login_client_id: Optional[str] = None self._response_data = None self._blogs_data: List = [] self._like_blogs_data: List = [] self._like_blogs_total_page: int = 0 super().__init__() self._login_cookies = self.get_cookies(spider_name=self._spider_name) def _check_username(self) -> Optional[Tuple[str, Dict]]: """ 解析用户名 :return: 结果 """ phone_login = check_is_phone_number(data=self._login_username) email_login = check_is_email_address(data=self._login_username) login_data: Dict = {"password": self._login_password} if phone_login is None and email_login is None: raise ValueError("Your login username is illegal!") if phone_login is not None: login_data.update(phoneNumber=self._login_username) return f"{self._main_url}/phoneNumber", login_data if email_login is not None: login_data.update(email=self._login_username) return f"{self._main_url}/email", login_data return None def parse_data_with_method(self, method: str): if method == BaseSpiderParseMethodType.LoginResult: self._parse_login_data() elif method == BaseSpiderParseMethodType.PersonalBlogs: self._parse_personal_blogs() self._parse_personal_like_blogs() elif method == BaseSpiderParseMethodType.Finish: self.send_data() def login(self): if self._login_cookies is None: login_url, login_data = self._check_username() response = self.make_request( url=login_url, headers=self._common_headers, method="POST", json=login_data, ) if response.content.decode() != "": logger.info("登录成功!") self._response_data = response.json() self._login_cookies = CookieUtils( cookie_list=response.cookies.items() ).to_str() logger.debug(self._login_cookies) self.set_cookies( spider_name=self._spider_name, cookies=self._login_cookies ) self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) else: logger.error("登录失败!") raise LoginException() else: get_result: str = self.get_data(spider_name=f"{self._spider_name}:params") if get_result is None: self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) else: try: login_params = get_result.split("&")[1:-1] self._login_uid = [d for d in login_params if "uid" in d][ 0 ].replace("uid=", "") self._login_token = [d for d in login_params if "token" in d][ 0 ].replace("token=", "") self._login_client_id = [ d for d in login_params if "device_id" in d ][0].replace("device_id=", "") self.parse_data_with_method( method=BaseSpiderParseMethodType.PersonalBlogs ) except Exception as err: logger.error(f"解析 Redis 返回数据失败! 错误原因: {err}") self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) def _parse_login_data(self): # 公共参数 self._login_token = self._response_data["token"] self._login_uid = self._response_data["userId"] self._login_client_id = self._response_data["clientId"] # 重要参数持久化 params: str = f"?src=web&uid={self._login_uid}" f"&token={self._login_token}" f"&device_id={self._login_client_id}" f"&current_uid={self._login_uid}" self.set_data(spider_name=f"{self._spider_name}:params", data=params) # 个人数据 username = self._response_data["user"]["username"] description = self._response_data["user"]["selfDescription"] avatar_img = self._response_data["user"]["avatarLarge"] followee = self._response_data["user"]["followeesCount"] follower = self._response_data["user"]["followersCount"] like_blogs = self._response_data["user"]["collectedEntriesCount"] personal_data: Dict = { "username": username, "description": description, "avatarImg": avatar_img, "followee": followee, "follower": follower, "likeBlogs": like_blogs, } logger.debug(personal_data) self.data_model.set_personal_data(data=personal_data) self.parse_data_with_method(method=BaseSpiderParseMethodType.PersonalBlogs) def _parse_personal_blogs(self, next_params: Optional[str] = None): req_data: dict = { "src": "web", "uid": self._login_uid, "device_id": self._login_client_id, "token": self._login_token, "targetUid": self._login_uid, "type": "post", "limit": "20", "order": "createdAt", } if next_params is not None: req_data.update(before=next_params) url_params: str = "" for index, data in enumerate(req_data.items()): if index == 0: url_params += f"?{data[0]}={data[1]}" else: url_params += f"&{data[0]}={data[1]}" blogs_url: str = f"{self._blogs_url}{url_params}" response = self.make_request(url=blogs_url, headers=self._common_headers) if response.content.decode() != "": self._response_data = response.json() if self._response_data is not None and self._response_data["m"] == "ok": next_page_variable = None entry_list = self._response_data["d"]["entrylist"] if len(entry_list) > 0: for personal_blog in entry_list: blog_create_time = datetime_str_change_fmt( time_str=personal_blog["createdAt"], prev_fmt="%Y-%m-%dT%H:%M:%S.%fZ", ) blog_data: Dict = { "blogId": personal_blog["objectId"], "blogTitle": personal_blog["title"], "blogHref": personal_blog["originalUrl"], "blogViewers": personal_blog["viewsCount"], "blogCreateTime": blog_create_time, } self._blogs_data.append(blog_data) next_page_variable = personal_blog["verifyCreatedAt"] if self._response_data["d"]["total"] > 20: time.sleep(0.5) self._parse_personal_blogs(next_params=next_page_variable) else: logger.debug(self._blogs_data) self.data_model.set_personal_blogs_data(data=self._blogs_data) logger.info("获取个人博客数据成功!") else: logger.error("查询个人博客失败!") self.update_task_status( task_id=self._task_id, data=str(PROCESS_STATUS_FAIL) ) raise LoginException() def _parse_personal_like_blogs(self, page_no: int = 0): like_blogs_url: str = f"{self._like_blogs_url}/{self._login_uid}/like/entry?page={page_no}&pageSize=20" self._common_headers.update( { "X-Juejin-Client": str(self._login_client_id), "X-Juejin-Src": "web", "X-Juejin-Token": self._login_token, "X-Juejin-Uid": self._login_uid, } ) response = self.make_request(url=like_blogs_url, headers=self._common_headers) if response.content.decode() != "": self._response_data = response.json() if ( self._response_data is not None and self._response_data["m"] == "success" ): logger.info(f"当前正在获取第{page_no + 1}页的数据!") if page_no == 0: total_count = self._response_data["d"]["total"] total_pages = total_count // 20 rest_count = total_count % 20 if rest_count != 0: total_pages += 1 self._like_blogs_total_page = total_pages entry_list = self._response_data["d"]["entryList"] if len(entry_list) > 0: for entry_data in entry_list: if entry_data is None: continue blog_data: Dict = { "blogId": entry_data["objectId"], "blogTitle": entry_data["title"], "blogHref": entry_data["originalUrl"], "blogViewers": entry_data["viewsCount"], "blogCreateTime": datetime_str_change_fmt( time_str=entry_data["createdAt"], prev_fmt="%Y-%m-%dT%H:%M:%S.%fZ", ), } self._like_blogs_data.append(blog_data) page_no += 1 if page_no <= self._like_blogs_total_page: # TODO 后面考虑多线程进行任务拆分,并发获取数据 time.sleep(0.5) self._parse_personal_like_blogs(page_no=page_no) else: # logger.debug(self._like_blogs_data) logger.debug(f"获取到 {len(self._like_blogs_data)} 条个人点赞博客") self.data_model.set_personal_like_blogs_data( data=self._like_blogs_data ) logger.info("获取个人点赞博客成功!") # 任务末尾 self.parse_data_with_method(method=BaseSpiderParseMethodType.Finish) else: logger.error("查询个人点赞博客失败!") self.update_task_status( task_id=self._task_id, data=str(PROCESS_STATUS_FAIL) ) raise ParseDataException() def _test_cookies(self, cookies: Optional[str] = None) -> bool: params = self.get_data(spider_name=f"{self._spider_name}:params") if params is None: return False test_user_url: str = f"https://user-storage-api-ms.juejin.im/v1/getUserInfo{params}" test_request_headers: Dict = self.get_default_headers() test_response = self.make_request( url=test_user_url, headers=test_request_headers ) if ( test_response.status_code != 200 or check_is_json(test_response.content.decode()) is not True ): logger.error(f"当前掘金账号登录状态: 已退出!") self._async_task.remove_async_scheduler(job_id=self._spider_name) return False test_json_response = test_response.json() if test_json_response["s"] == 1: logger.info(f"当前掘金账号为: {self._login_username}, 状态: 已登录") return True else: logger.error(f"当前掘金账号登录状态: 已退出!") return False
2
2
NAS/PaddleSlim/train_supernet.py
naviocean/SimpleCVReproduction
923
1485
from paddle.vision.transforms import ( ToTensor, RandomHorizontalFlip, RandomResizedCrop, SaturationTransform, Compose, HueTransform, BrightnessTransform, ContrastTransform, RandomCrop, Normalize, RandomRotation ) from paddle.vision.datasets import Cifar100 from paddle.io import DataLoader from paddle.optimizer.lr import CosineAnnealingDecay, MultiStepDecay, LinearWarmup import random from resnet20 import * import paddle # supernet trainning 基于paddleslim模型压缩包 # https://github.com/PaddlePaddle/PaddleSlim 欢迎大家多多star from paddleslim.nas.ofa.convert_super import Convert, supernet from paddleslim.nas.ofa import OFA, RunConfig, DistillConfig from paddleslim.nas.ofa.utils import utils channel_list = [] for i in range(1, 21): if 0 < i <= 7: # channel_list.append(random.choice([ 4, 8, 12, 16])) channel_list.append(16) elif 7 < i <= 13: # channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32])) channel_list.append(32) elif 13 < i <= 19: # channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56,60, 64])) channel_list.append(64) else: # channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56,60, 64])) channel_list.append(64) net = ResNet20(100, channel_list) net2 = ResNet20(100, channel_list) net2.set_state_dict(paddle.load('./pretrained_model/resnet20.pdparams')) channel_optional = [] for i in range(0, 23): if i <= 7: channel_optional.append([4, 8, 12, 16]) # channel_optional.append([12, 16]) elif 7 < i <= 14: channel_optional.append([4, 8, 12, 16, 20, 24, 28, 32]) # channel_optional.append([20, 24, 28, 32]) elif 14 < i <= 21: channel_optional.append( [4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64]) # channel_optional.append([36, 40, 44, 48, 52, 56,60, 64]) else: channel_optional.append( [4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64]) # channel_optional.append([36, 40, 44, 48, 52, 56,60, 64]) distill_config = DistillConfig(teacher_model=net2) sp_net_config = supernet(channel=channel_optional) sp_model = Convert(sp_net_config).convert(net) ofa_net = OFA(sp_model, distill_config=distill_config) ofa_net.set_task('channel') model = paddle.Model(ofa_net) MAX_EPOCH = 300 LR = 0.1 WEIGHT_DECAY = 5e-4 MOMENTUM = 0.9 BATCH_SIZE = 128 CIFAR_MEAN = [0.5071, 0.4865, 0.4409] CIFAR_STD = [0.1942, 0.1918, 0.1958] DATA_FILE = './data/data76994/cifar-100-python.tar.gz' model.prepare( paddle.optimizer.Momentum( learning_rate=LinearWarmup( CosineAnnealingDecay(LR, MAX_EPOCH), 2000, 0., LR), momentum=MOMENTUM, parameters=model.parameters(), weight_decay=WEIGHT_DECAY), CrossEntropyLoss(), paddle.metric.Accuracy(topk=(1, 5))) transforms = Compose([ RandomCrop(32, padding=4), RandomApply(BrightnessTransform(0.1)), RandomApply(ContrastTransform(0.1)), RandomHorizontalFlip(), RandomRotation(15), ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD), ]) val_transforms = Compose([ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD)]) train_set = Cifar100(DATA_FILE, mode='train', transform=transforms) test_set = Cifar100(DATA_FILE, mode='test', transform=val_transforms) callbacks = [LRSchedulerM(), callbacks.VisualDL('vis_logs/ofa_resnet20')] model.fit( train_set, test_set, epochs=MAX_EPOCH, batch_size=BATCH_SIZE, save_dir='checkpoints', save_freq=100, shuffle=True, num_workers=4, verbose=1, callbacks=callbacks, )
2.125
2
slashtags/mixins/commands.py
Myst1c-a/phen-cogs
0
1486
<reponame>Myst1c-a/phen-cogs<filename>slashtags/mixins/commands.py<gh_stars>0 """ MIT License Copyright (c) 2020-present phenom4n4n Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import asyncio import logging import re import types from collections import Counter from copy import copy from typing import Dict, List, Union import discord from redbot.core import commands from redbot.core.utils.chat_formatting import box, humanize_list, inline, pagify from redbot.core.utils.menus import DEFAULT_CONTROLS, menu from redbot.core.utils.predicates import MessagePredicate from tabulate import tabulate from ..abc import MixinMeta from ..converters import ( GlobalTagConverter, GuildTagConverter, PastebinConverter, TagConverter, TagName, TagScriptConverter, ) from ..http import ApplicationOptionChoice, SlashOptionType from ..objects import ApplicationCommand, ApplicationCommandType, SlashOption, SlashTag from ..testing.button_menus import menu as button_menu from ..utils import ARGUMENT_NAME_DESCRIPTION, chunks, dev_check TAG_RE = re.compile(r"(?i)(\[p\])?\b(slash\s?)?tag'?s?\b") CHOICE_RE = re.compile(r".{1,100}:.{1,100}") CHOICE_LIMIT = 25 log = logging.getLogger("red.phenom4n4n.slashtags.commands") def _sub(match: re.Match) -> str: if match.group(1): return "[p]slashtag global" repl = "global " name = match.group(0) repl += name if name.istitle(): repl = repl.title() return repl def copy_doc(original: Union[commands.Command, types.FunctionType]): def decorator(overriden: Union[commands.Command, types.FunctionType]): doc = original.help if isinstance(original, commands.Command) else original.__doc__ doc = TAG_RE.sub(_sub, doc) if isinstance(overriden, commands.Command): overriden._help_override = doc else: overriden.__doc__ = doc return overriden return decorator class Commands(MixinMeta): @commands.guild_only() @commands.group(aliases=["st"]) async def slashtag(self, ctx: commands.Context): """ Slash Tag management with TagScript. These commands use TagScriptEngine. [This site](https://phen-cogs.readthedocs.io/en/latest/index.html) has documentation on how to use TagScript blocks. """ @commands.mod_or_permissions(manage_guild=True) @slashtag.command("add", aliases=["create", "+"]) async def slashtag_add( self, ctx: commands.Context, tag_name: TagName(check_global=False), *, tagscript: TagScriptConverter, ): """ Add a slash tag with TagScript. [Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) """ await self.create_slash_tag(ctx, tag_name, tagscript, is_global=False) async def create_slash_tag( self, ctx: commands.Context, tag_name: str, tagscript: str, *, is_global: bool = False, command_type: ApplicationCommandType = ApplicationCommandType.CHAT_INPUT, ): options: List[SlashOption] = [] guild_id = None if is_global else ctx.guild.id if command_type == ApplicationCommandType.CHAT_INPUT: try: description = await self.send_and_query_response( ctx, "What should the tag description to be? (maximum 100 characters)", pred=MessagePredicate.length_less(101, ctx), ) except asyncio.TimeoutError: return await ctx.send("Tag addition timed out.") else: description = "" if command_type == ApplicationCommandType.CHAT_INPUT: pred = MessagePredicate.yes_or_no(ctx) try: await self.send_and_query_response( ctx, "Would you like to add arguments to this tag? (Y/n)", pred ) except asyncio.TimeoutError: await ctx.send("Query timed out, not adding arguments.") else: if pred.result is True: await self.get_options(ctx, options) command = ApplicationCommand( self, name=tag_name, description=description, guild_id=guild_id, options=options, type=command_type, ) try: await command.register() except discord.Forbidden as error: log.error( "Failed to create command {command!r} on guild {ctx.guild!r}", exc_info=error ) text = ( "Looks like I don't have permission to add Slash Commands here. Reinvite me " "with this invite link and try again: <https://discordapp.com/oauth2/authorize" f"?client_id={self.bot.user.id}&scope=bot%20applications.commands>" ) return await ctx.send(text) except Exception: log.error("Failed to create command {command!r} on guild {ctx.guild!r}") # exc info unneeded since error handler should print it, however info on the command options is needed raise tag = SlashTag( self, tagscript, guild_id=guild_id, author_id=ctx.author.id, command=command, ) await ctx.send(await tag.initialize()) async def get_options( self, ctx: commands.Context, options: List[SlashOption] ) -> List[SlashOption]: added_required = False for i in range(1, 11): try: option = await self.get_option(ctx, added_required=added_required) if not option.required: added_required = True except asyncio.TimeoutError: await ctx.send("Adding this argument timed out.", delete_after=15) break options.append(option) if i == 10: break pred = MessagePredicate.yes_or_no(ctx) try: await self.send_and_query_response( ctx, "Would you like to add another argument? (Y/n)", pred ) except asyncio.TimeoutError: await ctx.send("Query timed out, not adding additional arguments.") break if pred.result is False: break return options async def send_and_query_response( self, ctx: commands.Context, query: str, pred: MessagePredicate = None, *, timeout: int = 60, ) -> str: if pred is None: pred = MessagePredicate.same_context(ctx) ask = await ctx.send(query) try: message = await self.bot.wait_for("message", check=pred, timeout=timeout) except asyncio.TimeoutError: await self.delete_quietly(ask) raise await self.delete_quietly(ask) await self.delete_quietly(message) return message.content async def get_choices(self, ctx: commands.Context) -> List[ApplicationOptionChoice]: query = ( "Send the list of choice names and values you would like to add as choices to " "the tag. Choice names and values should be seperated by `:`, and each choice " "should be seperated by `|`. Example:\n`dog:Doggo|cat:Catto`" ) response = await self.send_and_query_response(ctx, query) choices = [] for choice_text in response.split("|"): if ":" not in choice_text: await ctx.send( f"Failed to parse `{choice_text}` to a choice as its name and value " "weren't seperated by a `:`.", delete_after=15, ) continue if not CHOICE_RE.match(choice_text): await ctx.send( f"Failed to parse `{choice_text}` to a choice as " "its name or value exceeded the 100 character limit.", delete_after=15, ) continue choice = ApplicationOptionChoice(*choice_text.split(":", 1)) choices.append(choice) if len(choices) >= CHOICE_LIMIT: await ctx.send(f"Reached max choices ({CHOICE_LIMIT}).") break return choices async def get_option( self, ctx: commands.Context, *, added_required: bool = False ) -> SlashOption: name_desc = [ "What should the argument name be and description be?", "The argument name and description should be split by a `:`.", "Example: `member:A member of this server.`\n", "*Slash argument names may not exceed 32 characters and can only contain characters " "that are alphanumeric or '_' or '-'.", "The argument description must be less than or equal to 100 characters.*", ] name_pred = MessagePredicate.regex(ARGUMENT_NAME_DESCRIPTION, ctx) await self.send_and_query_response(ctx, "\n".join(name_desc), name_pred) match = name_pred.result name, description = match.group(1), match.group(2) valid_option_types = [ name.lower() for name in SlashOptionType.__members__.keys() if not name.startswith("SUB") ] valid_option_types.append("choices") option_query = [ "What should the argument type be?", f"Valid option types: {humanize_list([inline(n) for n in valid_option_types])}", "(select `string` if you don't understand)", ] option_type = await self.send_and_query_response( ctx, "\n".join(option_query), MessagePredicate.lower_contained_in(valid_option_types, ctx), ) if option_type.lower() == "choices": choices = await self.get_choices(ctx) option_type = "STRING" else: choices = [] option_type = SlashOptionType[option_type.upper()] if not added_required: pred = MessagePredicate.yes_or_no(ctx) await self.send_and_query_response( ctx, "Is this argument required? (Y/n)\n*Keep in mind that if you choose to make this argument optional, all following arguments must also be optional.*", pred, ) required = pred.result else: await ctx.send( "This argument was automatically made optional as the previous one was optional.", delete_after=15, ) required = False return SlashOption( name=name.lower(), description=description, option_type=option_type, required=required, choices=choices, ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command("message") async def slashtag_message( self, ctx: commands.Context, tag_name: TagName(check_global=False, check_regex=False), *, tagscript: TagScriptConverter, ): """ Add a message command tag with TagScript. [Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) """ await self.create_slash_tag( ctx, tag_name, tagscript, is_global=False, command_type=ApplicationCommandType.MESSAGE ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command("user") async def slashtag_user( self, ctx: commands.Context, tag_name: TagName(check_global=False, check_regex=False), *, tagscript: TagScriptConverter, ): """ Add a user command tag with TagScript. [Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) """ await self.create_slash_tag( ctx, tag_name, tagscript, is_global=False, command_type=ApplicationCommandType.USER ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command("pastebin", aliases=["++"]) async def slashtag_pastebin( self, ctx: commands.Context, tag_name: TagName(check_global=False), *, link: PastebinConverter, ): """ Add a slash tag with a Pastebin link. """ await self.create_slash_tag(ctx, tag_name, link, is_global=False) @commands.mod_or_permissions(manage_guild=True) @slashtag.group("edit", aliases=["e"], invoke_without_command=True) async def slashtag_edit( self, ctx: commands.Context, tag: GuildTagConverter, *, tagscript: TagScriptConverter ): """Edit a slash tag.""" await ctx.send(await tag.edit_tagscript(tagscript)) @slashtag_edit.command("tagscript") async def slashtag_edit_tagscript( self, ctx: commands.Context, tag: GuildTagConverter, *, tagscript: TagScriptConverter ): """Edit a slash tag's TagScript.""" await self.slashtag_edit(ctx, tag, tagscript=tagscript) @slashtag_edit.command("name") async def slashtag_edit_name( self, ctx: commands.Context, tag: GuildTagConverter, *, name: TagName(check_global=False) ): """Edit a slash tag's name.""" await ctx.send(await tag.edit_name(name)) @slashtag_edit.command("description") async def slashtag_edit_description( self, ctx: commands.Context, tag: GuildTagConverter, *, description: str ): """Edit a slash tag's description.""" await ctx.send(await tag.edit_description(description)) @slashtag_edit.command("arguments", aliases=["options"]) async def slashtag_edit_arguments(self, ctx: commands.Context, tag: GuildTagConverter): """ Edit a slash tag's arguments. See [this documentation page](https://phen-cogs.readthedocs.io/en/latest/slashtags/slash_arguments.html) for more information on slash tag arguments. """ await tag.edit_options(ctx) @slashtag_edit.command("argument", aliases=["option"]) async def slashtag_edit_argument( self, ctx: commands.Context, tag: GuildTagConverter, argument: str ): """Edit a single slash tag's argument by name.""" await tag.edit_single_option(ctx, argument) @commands.mod_or_permissions(manage_guild=True) @slashtag.command("remove", aliases=["delete", "-"]) async def slashtag_remove(self, ctx: commands.Context, *, tag: GuildTagConverter): """Delete a slash tag.""" await ctx.send(await tag.delete()) @slashtag.command("info") async def slashtag_info(self, ctx: commands.Context, *, tag: TagConverter): """Get info about a slash tag that is stored on this server.""" await tag.send_info(ctx) @slashtag.command("raw") async def slashtag_raw(self, ctx: commands.Context, *, tag: GuildTagConverter): """Get a slash tag's raw content.""" await tag.send_raw_tagscript(ctx) @classmethod def format_tagscript(cls, tag: SlashTag, limit: int = 60) -> str: title = f"`{tag.type.get_prefix()}{tag.name}` - " limit -= len(title) tagscript = tag.tagscript if len(tagscript) > limit - 3: tagscript = tagscript[:limit] + "..." tagscript = tagscript.replace("\n", " ") return f"{title}{discord.utils.escape_markdown(tagscript)}" async def view_slash_tags( self, ctx: commands.Context, tags: Dict[int, SlashTag], *, is_global: bool, ): description = [ self.format_tagscript(tag) for tag in sorted(tags.values(), key=lambda t: t.name) ] description = "\n".join(description) e = discord.Embed(color=await ctx.embed_color()) if is_global: slash_tags = "global slash tags" e.set_author(name="Global Slash Tags", icon_url=ctx.me.avatar_url) else: slash_tags = "slash tags" e.set_author(name="Stored Slash Tags", icon_url=ctx.guild.icon_url) embeds = [] pages = list(pagify(description)) for index, page in enumerate(pages, 1): embed = e.copy() embed.description = page embed.set_footer(text=f"{index}/{len(pages)} | {len(tags)} {slash_tags}") embeds.append(embed) # await menu(ctx, embeds, DEFAULT_CONTROLS) await button_menu(ctx, embeds) @slashtag.command("list") async def slashtag_list(self, ctx: commands.Context): """View stored slash tags.""" tags = self.guild_tag_cache[ctx.guild.id] if not tags: return await ctx.send("There are no slash tags on this server.") await self.view_slash_tags(ctx, tags, is_global=False) async def show_slash_tag_usage(self, ctx: commands.Context, guild: discord.Guild = None): tags = self.guild_tag_cache[guild.id] if guild else self.global_tag_cache if not tags: message = ( "This server has no slash tags." if guild else "There are no global slash tags." ) return await ctx.send(message) counter = Counter({tag.name: tag.uses for tag in tags.copy().values()}) e = discord.Embed(title="Slash Tag Stats", color=await ctx.embed_color()) embeds = [] for usage_data in chunks(counter.most_common(), 10): usage_chart = box(tabulate(usage_data, headers=("Tag", "Uses")), "prolog") embed = e.copy() embed.description = usage_chart embeds.append(embed) await menu(ctx, embeds, DEFAULT_CONTROLS) @slashtag.command("usage", aliases=["stats"]) async def slashtag_usage(self, ctx: commands.Context): """ See this slash tag usage stats. **Example:** `[p]slashtag usage` """ await self.show_slash_tag_usage(ctx, ctx.guild) @commands.is_owner() @slashtag.command("restore", hidden=True) async def slashtag_restore(self, ctx: commands.Context): """Restore all slash tags from the database.""" await self.restore_tags(ctx, ctx.guild) @commands.is_owner() @slashtag.command("clear", hidden=True) async def slashtag_clear(self, ctx: commands.Context): """Clear all slash tags for this server.""" pred = MessagePredicate.yes_or_no(ctx) try: await self.send_and_query_response( ctx, "Are you sure you want to delete all slash tags on this server? (Y/n)", pred ) except asyncio.TimeoutError: return await ctx.send("Timed out, not deleting slash tags.") if not pred.result: return await ctx.send("Ok, not deleting slash tags.") guild: discord.Guild = ctx.guild await self.http.put_guild_slash_commands(guild.id, []) for tag in copy(self.guild_tag_cache[guild.id]).values(): tag.remove_from_cache() tag.command.remove_from_cache() del tag self.guild_tag_cache[guild.id].clear() await self.config.guild(guild).tags.clear() await ctx.send("Tags deleted.") @commands.is_owner() @slashtag.group("global") @copy_doc(slashtag) async def slashtag_global(self, ctx: commands.Context): pass @slashtag_global.command("add") @copy_doc(slashtag_add) async def slashtag_global_add( self, ctx: commands.Context, tag_name: TagName(global_priority=True), *, tagscript: TagScriptConverter, ): await self.create_slash_tag(ctx, tag_name, tagscript, is_global=True) @commands.mod_or_permissions(manage_guild=True) @slashtag_global.command("message") @copy_doc(slashtag_message) async def slashtag_global_message( self, ctx: commands.Context, tag_name: TagName(global_priority=True, check_regex=False), *, tagscript: TagScriptConverter, ): await self.create_slash_tag( ctx, tag_name, tagscript, is_global=True, command_type=ApplicationCommandType.MESSAGE ) @commands.mod_or_permissions(manage_guild=True) @slashtag_global.command("user") @copy_doc(slashtag_user) async def slashtag_global_user( self, ctx: commands.Context, tag_name: TagName(global_priority=True, check_regex=False), *, tagscript: TagScriptConverter, ): await self.create_slash_tag( ctx, tag_name, tagscript, is_global=True, command_type=ApplicationCommandType.USER ) @slashtag_global.command("pastebin", aliases=["++"]) @copy_doc(slashtag_pastebin) async def slashtag_global_pastebin( self, ctx: commands.Context, tag_name: TagName(check_global=False), *, link: PastebinConverter, ): await self.create_slash_tag(ctx, tag_name, link, is_global=True) @slashtag_global.group("edit", aliases=["e"], invoke_without_command=True) @copy_doc(slashtag_edit) async def slashtag_global_edit( self, ctx: commands.Context, tag: GlobalTagConverter, *, tagscript: TagScriptConverter ): await ctx.send(await tag.edit_tagscript(tagscript)) @slashtag_global_edit.command("tagscript") @copy_doc(slashtag_edit_tagscript) async def slashtag_global_edit_tagscript( self, ctx: commands.Context, tag: GlobalTagConverter, *, tagscript: TagScriptConverter ): await self.slashtag_global_edit(ctx, tag, tagscript=tagscript) @slashtag_global_edit.command("name") @copy_doc(slashtag_edit_name) async def slashtag_global_edit_name( self, ctx: commands.Context, tag: GlobalTagConverter, *, name: TagName(global_priority=True), ): await ctx.send(await tag.edit_name(name)) @slashtag_global_edit.command("description") @copy_doc(slashtag_edit_description) async def slashtag_global_edit_description( self, ctx: commands.Context, tag: GlobalTagConverter, *, description: str ): await ctx.send(await tag.edit_description(description)) @slashtag_global_edit.command("arguments", aliases=["options"]) @copy_doc(slashtag_edit_arguments) async def slashtag_global_edit_arguments(self, ctx: commands.Context, tag: GlobalTagConverter): await tag.edit_options(ctx) @slashtag_global_edit.command("argument", aliases=["option"]) @copy_doc(slashtag_edit_argument) async def slashtag_global_edit_argument( self, ctx: commands.Context, tag: GuildTagConverter, argument: str ): await tag.edit_single_option(ctx, argument) @slashtag_global.command("remove", aliases=["delete", "-"]) @copy_doc(slashtag_remove) async def slashtag_global_remove(self, ctx: commands.Context, *, tag: GlobalTagConverter): await ctx.send(await tag.delete()) @slashtag_global.command("raw") @copy_doc(slashtag_raw) async def slashtag_global_raw(self, ctx: commands.Context, *, tag: GlobalTagConverter): await tag.send_raw_tagscript(ctx) @slashtag_global.command("list") @copy_doc(slashtag_list) async def slashtag_global_list(self, ctx: commands.Context): tags = self.global_tag_cache if not tags: return await ctx.send("There are no global slash tags.") await self.view_slash_tags(ctx, tags, is_global=True) @slashtag_global.command("usage", aliases=["stats"]) @copy_doc(slashtag_usage) async def slashtag_global_usage(self, ctx: commands.Context): await self.show_slash_tag_usage(ctx) @slashtag_global.command("restore", hidden=True) @copy_doc(slashtag_restore) async def slashtag_global_restore(self, ctx: commands.Context): await self.restore_tags(ctx, None) @commands.is_owner() @commands.group(aliases=["slashset"]) async def slashtagset(self, ctx: commands.Context): """Manage SlashTags settings.""" @slashtagset.command("settings") async def slashtagset_settings(self, ctx: commands.Context): """View SlashTags settings.""" eval_command = f"✅ (**{self.eval_command}**)" if self.eval_command else "❎" testing_enabled = "✅" if self.testing_enabled else "❎" description = [ f"Application ID: **{self.application_id}**", f"Eval command: {eval_command}", f"Test cog loaded: {testing_enabled}", ] embed = discord.Embed( color=0xC9C9C9, title="SlashTags Settings", description="\n".join(description) ) await ctx.send(embed=embed) @slashtagset.command("appid") async def slashtagset_appid(self, ctx: commands.Context, id: int = None): """ Manually set the application ID for [botname] slash commands if it differs from the bot user ID. This only applies to legacy bots. If you don't know what this means, you don't need to worry about it. """ app_id = id or self.bot.user.id await self.config.application_id.set(app_id) self.application_id = app_id await ctx.send(f"Application ID set to `{id}`.") @commands.check(dev_check) @slashtagset.command("addeval") async def slashtagset_addeval(self, ctx: commands.Context): """Add a slash eval command for debugging.""" if self.eval_command: return await ctx.send("An eval command is already registered.") slasheval = ApplicationCommand( self, name="eval", description="SlashTags debugging eval command. Only bot owners can use this.", options=[ SlashOption(name="body", description="Code body to evaluate.", required=True) ], ) await slasheval.register() await self.config.eval_command.set(slasheval.id) self.eval_command = slasheval.id await ctx.send("`/eval` has been registered.") @commands.check(dev_check) @slashtagset.command("rmeval") async def slashtagset_rmeval(self, ctx: commands.Context): """Remove the slash eval command.""" if not self.eval_command: return await ctx.send("The eval command hasn't been registered.") try: await self.http.remove_slash_command(self.eval_command) except discord.HTTPException: pass await self.config.eval_command.clear() self.eval_command = None await ctx.send("`/eval` has been deleted.") @slashtagset.command("testing") async def slashtagset_testing(self, ctx: commands.Context, true_or_false: bool = None): """ Load or unload the SlashTag interaction development test cog. """ target_state = ( true_or_false if true_or_false is not None else not await self.config.testing_enabled() ) if target_state is self.testing_enabled: loaded = "loaded" if target_state else "unloaded" return await ctx.send(f"The SlashTag interaction testing cog is already {loaded}.") await self.config.testing_enabled.set(target_state) if target_state: loaded = "Loaded" self.add_test_cog() else: loaded = "Unloaded" self.remove_test_cog() await ctx.send(f"{loaded} the SlashTag interaction testing cog.")
1.140625
1
prgm6.py
pooja-bs-3003/Project_21
0
1487
str1= input("enter a string :") l1 ="" for i in str1 [::-1]: l1 = i+l1 print(l1) if str1 == l1: print("string is a palindrome") else : print("string is not a palindrome")
4.15625
4
product_spider/spiders/jk_spider.py
Pandaaaa906/product_spider
0
1488
import json import re from string import ascii_uppercase from time import time from urllib.parse import urljoin import scrapy from more_itertools import first from scrapy import Request from product_spider.items import JkProduct, JKPackage from product_spider.utils.functions import strip class JkPrdSpider(scrapy.Spider): name = "jk" allowed_domains = ["jkchemical.com"] base_url = "http://www.jkchemical.com" start_urls = map(lambda x: "http://www.jkchemical.com/CH/products/index/ProductName/{0}.html".format(x), ascii_uppercase) prd_size_url = "http://www.jkchemical.com/Controls/Handler/GetPackAgeJsonp.ashx?callback=py27&value={value}&cid={cid}&type=product&_={ts}" def parse(self, response): for xp_url in response.xpath("//div[@class='yy toa']//a/@href"): tmp_url = self.base_url + xp_url.extract() yield Request(tmp_url.replace("EN", "CH"), callback=self.parse_list) def parse_list(self, response): xp_boxes = response.xpath("//table[@id]//div[@class='PRODUCT_box']") for xp_box in xp_boxes: div = xp_box.xpath(".//div[2][@class='left_right mulu_text']") brand = strip(div.xpath('.//li[@id="ctl00_cph_Content_li_lt_Brand"]/text()').get(), '') rel_url = div.xpath('.//a[@class="name"]/@href').get() img_url = div.xpath('.//img/@src').get() d = { 'brand': brand.replace('-', '') or None, "purity": div.xpath(".//li[1]/text()").get('').split(u":")[-1].strip(), "cas": strip(div.xpath(".//li[2]//a/text()").get()), "cat_no": div.xpath(".//li[4]/text()").get().split(u":")[-1].strip(), "en_name": strip(xp_box.xpath(".//a[@class='name']/text()").get()), "cn_name": strip(xp_box.xpath(".//a[@class='name']//span[1]/text()").get()), 'prd_url': rel_url and urljoin(response.url, rel_url), 'img_url': img_url and urljoin(response.url, img_url), } data_jkid = xp_box.xpath(".//div[@data-jkid]/@data-jkid").get() data_cid = xp_box.xpath(".//div[@data-cid]/@data-cid").get() yield Request(self.prd_size_url.format(value=data_jkid, cid=data_cid, ts=int(time())), body=u"", meta={"prd_data": d}, callback=self.parse_package) next_page = response.xpath('//a[contains(text(), "下一页")]/@href').get() if next_page: yield Request(urljoin(response.url, next_page), callback=self.parse_list) def parse_package(self, response): s = re.findall(r"(?<=\().+(?=\))", response.text)[0] packages = json.loads(s) d = response.meta.get('prd_data', {}) package = first(packages, {}) if package: d['brand'] = d['brand'] or package.get('Product', {}).get('BrandName') yield JkProduct(**d) for package_obj in packages: catalog_price = package_obj.get("CatalogPrice", {}) dd = { 'brand': d.get('brand'), 'cat_no': d.get('cat_no'), 'package': package_obj.get("stringFormat"), 'price': catalog_price and catalog_price.get('Value'), 'currency': catalog_price and strip(catalog_price.get('Currency')), 'attrs': json.dumps(package_obj), } yield JKPackage(**dd)
2.671875
3
env/LaneChangeEnv_v2.py
byq-luo/Lane_change_RL
4
1489
import os import sys import random import datetime import gym from gym import spaces import numpy as np from env.IDM import IDM from env.Road import Road from env.Vehicle import Vehicle import math # add sumo/tools into python environment if 'SUMO_HOME' in os.environ: tools = os.path.join(os.environ['SUMO_HOME'], 'tools') sys.path.append(tools) print('success') else: sys.exit("please declare environment variable 'SUMO_HOME'") import traci ###################################################################### # simulation environments class LaneChangeEnv(gym.Env): def __init__(self, id=None, traffic=1, gui=False, seed=None): # todo check traffic flow density if traffic == 0: # average 9 vehicles self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/mapFree.sumo.cfg' elif traffic == 2: # average 19 vehicles self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/mapDense.sumo.cfg' else: # average 14 vehicles self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/map.sumo.cfg' # arguments must be string, if float/int, must be converted to str(float/int), instead of '3.0' self.sumoBinary = "/usr/local/Cellar/sumo/1.2.0/bin/sumo" self.sumoCmd = ['-c', self.cfg, # '--lanechange.duration', str(3), # using 'Simple Continuous lane-change model' '--lateral-resolution', str(0.8), # using 'Sublane-Model' '--step-length', str(0.1), '--default.action-step-length', str(0.1)] # randomness if seed is None: self.sumoCmd += ['--random'] else: self.sumoCmd += ['--seed', str(seed)] # gui if gui is True: self.sumoBinary += '-gui' self.sumoCmd = [self.sumoBinary] + self.sumoCmd + ['--quit-on-end', str(True), '--start', str(True)] else: self.sumoCmd = [self.sumoBinary] + self.sumoCmd traci.start(self.sumoCmd) self.rd = Road() self.timestep = 0 self.dt = traci.simulation.getDeltaT() self.randomseed = None self.sumoseed = None self.veh_dict = {} self.vehID_tuple_all = () self.egoID = id self.ego = None # self.tgtLane = tgtlane self.is_success = False self.collision_num = 0 self.lateral_action = 2 # self.observation = [[0, 0, 0], # ego lane position and speed # [0, 0, 0], # leader # [0, 0, 0], # target lane leader # [0, 0, 0]] # target lane follower self.observation = np.empty(20) self.reward = None # (float) : amount of reward returned after previous action self.done = True # (bool): whether the episode has ended, in which case further step() calls will return undefined results self.info = { 'resetFlag': 0} # (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning) self.action_space = spaces.Discrete(6) self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(20,)) def update_veh_dict(self, veh_id_tuple): for veh_id in veh_id_tuple: if veh_id not in self.veh_dict.keys(): self.veh_dict[veh_id] = Vehicle(veh_id, self.rd) for veh_id in list(self.veh_dict.keys()): if veh_id not in veh_id_tuple: self.veh_dict.pop(veh_id) for veh_id in list(self.veh_dict.keys()): self.veh_dict[veh_id].update_info(self.rd, self.veh_dict) def _updateObservationSingle(self, name, veh): """ :param name: 0:ego; 1:leader; 2:target leader; 3:target follower :param id: vehicle id corresponding to name :return: """ if veh is not None: self.observation[name * 4 + 0] = veh.lanePos self.observation[name * 4 + 1] = veh.speed self.observation[name * 4 + 2] = veh.pos_lat self.observation[name * 4 + 3] = veh.acce else: self.observation[name * 4 + 0] = self.observation[0] + 300. self.observation[name * 4 + 1] = self.observation[1] self.observation[name * 4 + 2] = 4.8 self.observation[name * 4 + 3] = 0 # todo check if rational def updateObservation(self): self.observation[0] = self.ego.lanePos self.observation[1] = self.ego.speed self.observation[2] = self.ego.pos_lat self.observation[3] = self.ego.acce self._updateObservationSingle(1, self.ego.orig_leader) self._updateObservationSingle(2, self.ego.orig_follower) self._updateObservationSingle(3, self.ego.trgt_leader) self._updateObservationSingle(4, self.ego.trgt_follower) # self.observation = np.array(self.observation).flatten() # print(self.observation.shape) def updateReward(self): return -self.ego.dis2tgtLane def updateReward2(self): wc1 = 1 wc2 = 1 wt = 1 ws = 1 we = 1 # reward related to comfort r_comf = wc1 * self.ego.acce ** 2 + wc2 * self.ego.delta_acce ** 2 # reward related to efficiency r_time = - wt * self.timestep r_speed = ws * (self.ego.speed - self.ego_speedLimit) r_effi = we * self.ego.dis2tgtLane / self.ego.dis2entrance r_effi_all = r_time + r_speed + r_effi # reward related to safety w_lateral = 1 w_longi = 1 if self.ego.leaderID is not None: # compute longitudinal time gap delta_V = self.veh_dict[self.ego.leaderID].speed - self.ego.speed delta_A = self.veh_dict[self.ego.leaderID].acce - self.ego.acce if delta_A == 0: TTC = - abs(self.ego.leaderDis)/delta_V else: TTC = -delta_V - math.sqrt(delta_V**2 + 2*delta_A * self.ego.leaderDis) TTC = TTC/delta_A if self.lateral_action != 1 and 0 < TTC < 2: r_long_c = - math.exp(-2*TTC+5) else: r_long_c = 0 if self.lateral_action == 0: #abort lane change alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat) / 3.2 assert 0 <= alpha <= 1.1 r_lat_c = -math.exp(-4*alpha+5) else: r_lat_c = 0 if self.ego.targetLeaderID is not None: # compute longitudinal time gap delta_V2 = self.veh_dict[self.ego.targetLeaderID].speed - self.ego.speed delta_A2 = self.veh_dict[self.ego.targetLeaderID].acce - self.ego.acce delta_D2 = self.veh_dict[self.ego.targetLeaderID].lanePos - self.ego.lanePos if delta_A2 == 0: TTC2 = - abs(delta_D2) / delta_V2 else: TTC2 = -delta_V2 - math.sqrt(delta_V2 ** 2 + 2 * delta_A2 * delta_D2) TTC2 = TTC2 / delta_A2 if self.lateral_action == 1 and 0 < TTC2 < 2: r_long_t = - math.exp(-2 * TTC2 + 5) else: r_long_t = 0 if self.lateral_action == 1: # lane change alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2 assert 0 <= alpha <= 1.1 r_lat_t = -math.exp(-4*alpha+5) else: r_lat_t = 0 r_safe = w_lateral * (r_lat_c + r_lat_t) + w_longi * (r_long_c+ r_long_t) # # if self.ego.leaderID is not None: # # ('lateralPos2leader', abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat)) # alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat) / 3.2 # assert 0 <= alpha <= 1.1 # r_safe_leader = w_lateral * alpha + w_longi * (1 - alpha) * abs(self.ego.leaderDis) # else: # r_safe_leader = 0 # if self.ego.targetLeaderID is not None: # # print('lateralPos2tgtleader', abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat)) # alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2 # # print('alpha', alpha) # assert 0 <= alpha <= 1.1 # # r_safe_tgtleader = w_lateral * alpha + w_longi * (1 - alpha) * abs( # self.ego.lanePos - self.veh_dict[self.ego.targetLeaderID].lanePos) # else: # r_safe_tgtleader = 0 # # # r_safe = r_safe_leader + r_safe_tgtleader # total reward r_total = r_comf + r_effi_all + r_safe return r_total def is_done(self): # lane change successfully executed, episode ends, reset env # todo modify if self.is_success: self.done = True # print('reset on: successfully lane change, dis2targetlane:', # self.ego.dis2tgtLane) # too close to ramp entrance if self.ego.dis2entrance < 10.0: self.done = True # print('reset on: too close to ramp entrance, dis2targetlane:', # self.ego.dis2tgtLane) # ego vehicle out of env if self.egoID not in self.vehID_tuple_all: self.done = True # print('reset on: self.ego not in env:', self.egoID not in self.vehID_tuple_all) # collision occurs self.collision_num = traci.simulation.getCollidingVehiclesNumber() if self.collision_num > 0: self.done = True # print('reset on: self.collision_num:', self.collision_num) def preStep(self): traci.simulationStep() self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID) self.update_veh_dict(self.vehID_tuple_all) def step(self, action=2): """Run one timestep of the environment's dynamics. When end of episode is reached, call `reset()` outside env!! to reset this environment's state. Accepts an action and returns a tuple (observation, reward, done, info). Args: action (object): longitudinal0: action[0] = 1: accelerate action[0] = -1: decelerate action[0] = 0: use SUMO default action[0] = others: acce = 0.0 longitudinal1: action[0] = 0: follow original lane leader action[0] = 1: follow closer leader longitudinal2: action[0] = 0: follow original lane leader action[0] = 1: follow target lane leader **important**: orginal/target lane leader will not change despite the lateral position of the ego may change lateral: action[1] = 1: lane change action[1] = 0: abort lane change, change back to original lane action[1] = 2: keep in current lateral position Returns: described in __init__ """ action_longi = action // 3 action_lateral = action % 3 self.lateral_action = action_lateral # action_longi = action[0] # action_lateral = action[1] assert self.done is False, 'self.done is not False' assert action is not None, 'action is None' assert self.egoID in self.vehID_tuple_all, 'vehicle not in env' self.timestep += 1 # lateral control------------------------- # episode in progress; 0:change back to original line; 1:lane change to target lane; 2:keep current # lane change to target lane if not self.is_success: if action_lateral == 1: # and abs(self.ego.pos_lat - (0.5+self.ego.targetLane)*self.rd.laneWidth) > 0.01: self.is_success = self.ego.changeLane(True, self.ego.trgt_laneIndex, self.rd) # print('posLat', self.ego.pos_lat, 'lane', self.ego.curr_laneIndex, 'rdWdith', self.rd.laneWidth) # print('right', -(self.ego.pos_lat - 0.5*self.rd.laneWidth)) # abort lane change, change back to ego's original lane if action_lateral == 0: # and abs(self.ego.pos_lat - (0.5+self.ego.origLane)*self.rd.laneWidth) > 0.01: self.is_success = self.ego.changeLane(True, self.ego.orig_laneIndex, self.rd) # print('left', 1.5 * self.rd.laneWidth - self.ego.pos_lat) # keep current lateral position if action_lateral == 2: self.is_success = self.ego.changeLane(True, -1, self.rd) # longitudinal control2--------------------- acceNext = self.ego.updateLongitudinalSpeedIDM(action_longi) # print(acceNext) vNext = self.ego.speed + acceNext * 0.1 traci.vehicle.setSpeed(self.egoID, vNext) # update info------------------------------ traci.simulationStep() self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID) self.update_veh_dict(self.vehID_tuple_all) # check if episode ends self.is_done() if self.done is True: self.info['resetFlag'] = True return self.observation, 0.0, self.done, self.info else: self.updateObservation() self.reward = self.updateReward() return self.observation, self.reward, self.done, self.info def seed(self, seed=None): if seed is None: self.randomseed = datetime.datetime.now().microsecond else: self.randomseed = seed random.seed(self.randomseed) def reset(self, egoid, tlane=0, tfc=1, is_gui=True, sumoseed=None, randomseed=None): """ reset env :param id: ego vehicle id :param tfc: int. 0:light; 1:medium; 2:dense :return: initial observation """ self.seed(randomseed) if sumoseed is None: self.sumoseed = self.randomseed traci.close() self.__init__(id=egoid, traffic=tfc, gui=is_gui, seed=self.sumoseed) # continue step until ego appears in env if self.egoID is not None: while self.egoID not in self.veh_dict.keys(): # must ensure safety in preStpe self.preStep() if self.timestep > 5000: raise Exception('cannot find ego after 5000 timesteps') assert self.egoID in self.vehID_tuple_all, "cannot start training while ego is not in env" self.done = False self.ego = self.veh_dict[self.egoID] self.ego.trgt_laneIndex = tlane self.ego.is_ego = 1 # set ego vehicle speed mode traci.vehicle.setSpeedMode(self.ego.veh_id, 0) self.ego_speedFactor = traci.vehicle.getSpeedFactor(egoid) self.ego_speedLimit = self.ego_speedFactor * traci.lane.getMaxSpeed(traci.vehicle.getLaneID(self.egoID)) self.ego.idm_obj = IDM() self.ego.idm_obj.__init__(self.ego_speedLimit) self.ego.update_info(self.rd, self.veh_dict) self.updateObservation() return self.observation return def close(self): traci.close()
2.328125
2
cidr/o365/o365.py
jblukach/distillery
1
1490
import boto3 import ipaddress import json import logging import os import requests import uuid logger = logging.getLogger() logger.setLevel(logging.INFO) dynamodb = boto3.resource('dynamodb') table = dynamodb.Table(os.environ['DYNAMODB_TABLE']) client = boto3.client('ssm') def downloader(instance, latest, parameter, link): r = requests.get(link) cidrs = r.json() if r.status_code == 200: for cidr in cidrs: try: if len(cidr['ips']) != 0: for ip in cidr['ips']: sortkey = 'O365#'+instance+'#'+cidr['serviceArea']+'#'+ip hostmask = ip.split('/') iptype = ipaddress.ip_address(hostmask[0]) nametype = 'IPv'+str(iptype.version)+'#' if nametype == 'IPv4#': netrange = ipaddress.IPv4Network(ip) first, last = netrange[0], netrange[-1] firstip = int(ipaddress.IPv4Address(first)) lastip = int(ipaddress.IPv4Address(last)) elif nametype == 'IPv6#': netrange = ipaddress.IPv6Network(ip) first, last = netrange[0], netrange[-1] firstip = int(ipaddress.IPv6Address(first)) lastip = int(ipaddress.IPv6Address(last)) table.put_item( Item= { 'pk': nametype, 'sk': sortkey, 'service': cidr['serviceArea'], 'cidr': ip, 'created': latest, 'endpoint': instance, 'firstip': firstip, 'lastip': lastip } ) except: pass logger.info('o365 '+instance+' IP Ranges Updated') response = client.put_parameter( Name = parameter, Value = str(latest), Type = 'String', Overwrite = True ) def handler(event, context): r = requests.get('https://endpoints.office.com/version?clientrequestid='+str(uuid.uuid4())) logger.info('Link Status Code: '+str(r.status_code)) if r.status_code == 200: versions = r.json() logger.info(versions) for version in versions: if version['instance'] == 'Worldwide': response = client.get_parameter(Name=os.environ['WORLD_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365 Worldwide IP Ranges') link = 'https://endpoints.office.com/endpoints/worldwide?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['WORLD_PARAMETER'], link) elif version['instance'] == 'USGovDoD': response = client.get_parameter(Name=os.environ['DOD_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365 USGovDoD IP Ranges') link = 'https://endpoints.office.com/endpoints/USGOVDoD?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['DOD_PARAMETER'], link) elif version['instance'] == 'USGovGCCHigh': response = client.get_parameter(Name=os.environ['HIGH_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365 USGovGCCHigh IP Ranges') link = 'https://endpoints.office.com/endpoints/USGOVGCCHigh?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['HIGH_PARAMETER'], link) elif version['instance'] == 'China': response = client.get_parameter(Name=os.environ['CHINA_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365 China IP Ranges') link = 'https://endpoints.office.com/endpoints/China?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['CHINA_PARAMETER'], link) elif version['instance'] == 'Germany': response = client.get_parameter(Name=os.environ['GERMANY_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365 Germany IP Ranges') link = 'https://endpoints.office.com/endpoints/Germany?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['GERMANY_PARAMETER'], link) else: logger.info('No o365 IP Range Updates') return { 'statusCode': 200, 'body': json.dumps('Download o365 IP Ranges') }
2.03125
2
exampleinc.py
zulip/finbot
7
1491
<reponame>zulip/finbot #!/usr/bin/python from money import * c = Company("Example Inc") c.add_flow(FixedCost("Initial Cash", -500000)) c.add_flow(FixedCost("Incorporation", 500)) c.add_flow(ConstantCost("Office", 50000)) c.add_flow(PeriodicCost("Subscription", 4000, "2012-01-05", 14)) c.add_flow(DelayedCost("2012-02-01", ConstantCost("Office", 50000))) c.add_flow(DelayedCost("2012-02-01", FixedCost("Financing", 50000))) c.add_flow(SemiMonthlyCost("Payroll", 4000, "2012-01-01")) c.add_flow(SemiMonthlyWages("Payroll", 6000, "2012-01-01")) print(c) c.cash_monthly_summary("2012-01-01", "2013-07-01")
3.03125
3
guardian/validators.py
dawid1stanek/guardian
0
1492
#!/usr/bin/env python import os import socket import subprocess import argparse import logging LOGGER = logging.getLogger(__name__) class ValidatorError(Exception): pass def ping(address): try: subprocess.check_call(('ping', '-c 1', '-W 1', address), stdout=subprocess.PIPE, stderr=subprocess.PIPE) LOGGER.info('Ping server %s - OK', address) except subprocess.CalledProcessError as e: LOGGER.error('Ping server %s - Failed', address) raise ValidatorError(e) ping.short_name = 'PING' def port(address, port): s = socket.socket() try: s.connect((address, port)) LOGGER.info('Checking port %s:%d - OK', address, port) except socket.error as e: LOGGER.error('Checking port %s:%d - Failed', address, port) raise ValidatorError(e) port.short_name = 'PORT'
3.046875
3
tests/data/udf_noop.py
Open-EO/openeo-geopyspark-driver
12
1493
<reponame>Open-EO/openeo-geopyspark-driver from openeo.udf import XarrayDataCube def apply_datacube(cube: XarrayDataCube, context: dict) -> XarrayDataCube: return cube
1.664063
2
StateGoHome.py
LHGames-2017/superintelligence
0
1494
from PlayerState import * from pathFinder import PathFinder from StateLook4Resources import * class StateGoHome(PlayerState): """ State Implementation: has a resource and go back home """ def __init__(self, player): self.player = player self.player.setTarget(self.player.playerData.HouseLocation) def doAction(self): origin = self.player.playerData.Position target = self.player.target moves = PathFinder(self.player.mapView).getPath(origin, target) # If player just gave the resource home, look 4 resources again if(not self.player.hasResources()): self.player.state = StateLook4Resources(self.player) return create_purchase_action(0) return create_move_action(moves[0]) def toString(): return "StateGoHome"
2.9375
3
hoomd/mpcd/test-py/stream_slit_test.py
schwendp/hoomd-blue
2
1495
<reponame>schwendp/hoomd-blue # Copyright (c) 2009-2019 The Regents of the University of Michigan # This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. # Maintainer: mphoward import unittest import numpy as np import hoomd from hoomd import md from hoomd import mpcd # unit tests for mpcd slit streaming geometry class mpcd_stream_slit_test(unittest.TestCase): def setUp(self): # establish the simulation context hoomd.context.initialize() # set the decomposition in z for mpi builds if hoomd.comm.get_num_ranks() > 1: hoomd.comm.decomposition(nz=2) # default testing configuration hoomd.init.read_snapshot(hoomd.data.make_snapshot(N=0, box=hoomd.data.boxdim(L=10.))) # initialize the system from the starting snapshot snap = mpcd.data.make_snapshot(N=2) snap.particles.position[:] = [[4.95,-4.95,3.85],[0.,0.,-3.8]] snap.particles.velocity[:] = [[1.,-1.,1.],[-1.,-1.,-1.]] self.s = mpcd.init.read_snapshot(snap) mpcd.integrator(dt=0.1) # test creation can happen (with all parameters set) def test_create(self): mpcd.stream.slit(H=4., V=0.1, boundary="no_slip", period=2) # test for setting parameters def test_set_params(self): slit = mpcd.stream.slit(H=4.) self.assertAlmostEqual(slit.H, 4.) self.assertAlmostEqual(slit.V, 0.) self.assertEqual(slit.boundary, "no_slip") self.assertAlmostEqual(slit._cpp.geometry.getH(), 4.) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.) self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip) # change H and also ensure other parameters stay the same slit.set_params(H=2.) self.assertAlmostEqual(slit.H, 2.) self.assertAlmostEqual(slit.V, 0.) self.assertEqual(slit.boundary, "no_slip") self.assertAlmostEqual(slit._cpp.geometry.getH(), 2.) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.) self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip) # change V slit.set_params(V=0.1) self.assertAlmostEqual(slit.V, 0.1) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.1) # change BCs slit.set_params(boundary="slip") self.assertEqual(slit.boundary, "slip") self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.slip) # test for invalid boundary conditions being set def test_bad_boundary(self): slit = mpcd.stream.slit(H=4.) slit.set_params(boundary="no_slip") slit.set_params(boundary="slip") with self.assertRaises(ValueError): slit.set_params(boundary="invalid") # test basic stepping behavior with no slip boundary conditions def test_step_noslip(self): mpcd.stream.slit(H=4.) # take one step hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take another step where one particle will now hit the wall hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take another step, wrapping the second particle through the boundary hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [4.95,-4.95,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [1.,1.,1.]) def test_step_moving_wall(self): mpcd.stream.slit(H=4., boundary="no_slip", V=1.0, period=3) # change velocity of lower particle so it is translating relative to wall snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: snap.particles.velocity[1] = [-2.,-1.,-1.] self.s.restore_snapshot(snap) # run one step and check bounce back of particles hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: # the first particle is matched exactly to the wall speed, and so it will translate at # same velocity along +x for 3 steps. It will bounce back in y and z to where it started. # (vx stays the same, and vy and vz flip.) np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,-4.95,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,1.,-1.]) # the second particle has y and z velocities flip again, and since it started closer, # it moves relative to original position. np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.4,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [0.,1.,1.]) # test basic stepping behavior with slip boundary conditions def test_step_slip(self): mpcd.stream.slit(H=4., boundary="slip") # take one step hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take another step where one particle will now hit the wall hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.85,4.85,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take another step, wrapping the second particle through the boundary hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,4.75,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.3,-0.3,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,1.]) # test that setting the slit size too large raises an error def test_validate_box(self): # initial configuration is invalid slit = mpcd.stream.slit(H=10.) with self.assertRaises(RuntimeError): hoomd.run(1) # now it should be valid slit.set_params(H=4.) hoomd.run(2) # make sure we can invalidate it again slit.set_params(H=10.) with self.assertRaises(RuntimeError): hoomd.run(1) # test that particles out of bounds can be caught def test_out_of_bounds(self): slit = mpcd.stream.slit(H=3.8) with self.assertRaises(RuntimeError): hoomd.run(1) slit.set_params(H=3.85) hoomd.run(1) # test that virtual particle filler can be attached, removed, and updated def test_filler(self): # initialization of a filler slit = mpcd.stream.slit(H=4.) slit.set_filler(density=5., kT=1.0, seed=42, type='A') self.assertTrue(slit._filler is not None) # run should be able to setup the filler, although this all happens silently hoomd.run(1) # changing the geometry should still be OK with a run slit.set_params(V=1.0) hoomd.run(1) # changing filler should be allowed slit.set_filler(density=10., kT=1.5, seed=7) self.assertTrue(slit._filler is not None) hoomd.run(1) # assert an error is raised if we set a bad particle type with self.assertRaises(RuntimeError): slit.set_filler(density=5., kT=1.0, seed=42, type='B') # assert an error is raised if we set a bad density with self.assertRaises(RuntimeError): slit.set_filler(density=-1.0, kT=1.0, seed=42) # removing the filler should still allow a run slit.remove_filler() self.assertTrue(slit._filler is None) hoomd.run(1) def tearDown(self): del self.s if __name__ == '__main__': unittest.main(argv = ['test.py', '-v'])
2.1875
2
tests/functional/model_models.py
haoyuchen1992/CourseBuilder
1
1496
# Copyright 2013 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functional tests for models.models.""" __author__ = [ '<EMAIL> (<NAME>)', ] import datetime from models import models from tests.functional import actions # Disable complaints about docstrings for self-documenting tests. # pylint: disable-msg=g-missing-docstring class EventEntityTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly(self): event = models.EventEntity(source='source', user_id='1') key = event.put() exported = event.for_export(self.transform) self.assert_blacklisted_properties_removed(event, exported) self.assertEqual('source', event.source) self.assertEqual('transformed_1', exported.user_id) self.assertEqual(key, models.EventEntity.safe_key(key, self.transform)) class PersonalProfileTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly_and_sets_safe_key(self): date_of_birth = datetime.date.today() email = '<EMAIL>' legal_name = 'legal_name' nick_name = 'nick_name' user_id = '1' profile = models.PersonalProfile( date_of_birth=date_of_birth, email=email, key_name=user_id, legal_name=legal_name, nick_name=nick_name) profile.put() exported = profile.for_export(self.transform) self.assert_blacklisted_properties_removed(profile, exported) self.assertEqual( self.transform(user_id), exported.safe_key.name()) class QuestionDAOTestCase(actions.TestBase): """Functional tests for QuestionDAO.""" # Name determined by parent. pylint: disable-msg=g-bad-name def setUp(self): """Sets up datastore contents.""" super(QuestionDAOTestCase, self).setUp() self.used_twice_question_id = 1 self.used_twice_question_dto = models.QuestionDTO( self.used_twice_question_id, {}) self.used_once_question_id = 2 self.used_once_question_dto = models.QuestionDTO( self.used_once_question_id, {}) self.unused_question_id = 3 self.unused_question_dto = models.QuestionDTO( self.unused_question_id, {}) models.QuestionDAO.save_all([ self.used_twice_question_dto, self.used_once_question_dto, self.unused_question_dto]) # Handcoding the dicts. This is dangerous because they're handcoded # elsewhere, the implementations could fall out of sync, and these tests # may then pass erroneously. self.first_question_group_description = 'first_question_group' self.first_question_group_id = 4 self.first_question_group_dto = models.QuestionGroupDTO( self.first_question_group_id, {'description': self.first_question_group_description, 'items': [{'question': str(self.used_once_question_id)}]}) self.second_question_group_description = 'second_question_group' self.second_question_group_id = 5 self.second_question_group_dto = models.QuestionGroupDTO( self.second_question_group_id, {'description': self.second_question_group_description, 'items': [{'question': str(self.used_twice_question_id)}]}) self.third_question_group_description = 'third_question_group' self.third_question_group_id = 6 self.third_question_group_dto = models.QuestionGroupDTO( self.third_question_group_id, {'description': self.third_question_group_description, 'items': [{'question': str(self.used_twice_question_id)}]}) models.QuestionGroupDAO.save_all([ self.first_question_group_dto, self.second_question_group_dto, self.third_question_group_dto]) def test_used_by_returns_description_of_single_question_group(self): self.assertEqual( [self.first_question_group_description], models.QuestionDAO.used_by(self.used_once_question_id)) def test_used_by_returns_descriptions_of_multiple_question_groups(self): self.assertEqual( [self.second_question_group_description, self.third_question_group_description], models.QuestionDAO.used_by(self.used_twice_question_id)) def test_used_by_returns_empty_list_for_unused_question(self): not_found_id = 7 self.assertFalse(models.QuestionDAO.load(not_found_id)) self.assertEqual([], models.QuestionDAO.used_by(not_found_id)) class StudentTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly(self): user_id = '1' student = models.Student(key_name='name', user_id='1', is_enrolled=True) key = student.put() exported = student.for_export(self.transform) self.assert_blacklisted_properties_removed(student, exported) self.assertTrue(exported.is_enrolled) self.assertEqual('transformed_1', exported.user_id) self.assertEqual( 'transformed_' + user_id, exported.key_by_user_id.name()) self.assertEqual( models.Student.safe_key(key, self.transform), exported.safe_key) def test_get_key_does_not_transform_by_default(self): user_id = 'user_id' student = models.Student(key_name='name', user_id=user_id) student.put() self.assertEqual(user_id, student.get_key().name()) def test_safe_key_transforms_name(self): key = models.Student(key_name='name').put() self.assertEqual( 'transformed_name', models.Student.safe_key(key, self.transform).name()) class StudentAnswersEntityTestCase(actions.ExportTestBase): def test_safe_key_transforms_name(self): student_key = models.Student(key_name='name').put() answers = models.StudentAnswersEntity(key_name=student_key.name()) answers_key = answers.put() self.assertEqual( 'transformed_name', models.StudentAnswersEntity.safe_key( answers_key, self.transform).name()) class StudentPropertyEntityTestCase(actions.ExportTestBase): def test_safe_key_transforms_user_id_component(self): user_id = 'user_id' student = models.Student(key_name='<EMAIL>', user_id=user_id) student.put() property_name = 'property-name' student_property_key = models.StudentPropertyEntity.create( student, property_name).put() self.assertEqual( 'transformed_%s-%s' % (user_id, property_name), models.StudentPropertyEntity.safe_key( student_property_key, self.transform).name())
1.96875
2
torchaudio/functional/functional.py
iseessel/audio
0
1497
# -*- coding: utf-8 -*- import io import math import warnings from typing import Optional, Tuple import torch from torch import Tensor from torchaudio._internal import module_utils as _mod_utils import torchaudio __all__ = [ "spectrogram", "griffinlim", "amplitude_to_DB", "DB_to_amplitude", "compute_deltas", "compute_kaldi_pitch", "create_fb_matrix", "create_dct", "compute_deltas", "detect_pitch_frequency", "DB_to_amplitude", "mu_law_encoding", "mu_law_decoding", "complex_norm", "angle", "magphase", "phase_vocoder", 'mask_along_axis', 'mask_along_axis_iid', 'sliding_window_cmn', "spectral_centroid", "apply_codec", ] def spectrogram( waveform: Tensor, pad: int, window: Tensor, n_fft: int, hop_length: int, win_length: int, power: Optional[float], normalized: bool, center: bool = True, pad_mode: str = "reflect", onesided: bool = True ) -> Tensor: r"""Create a spectrogram or a batch of spectrograms from a raw audio signal. The spectrogram can be either magnitude-only or complex. Args: waveform (Tensor): Tensor of audio of dimension (..., time) pad (int): Two sided padding of signal window (Tensor): Window tensor that is applied/multiplied to each frame/window n_fft (int): Size of FFT hop_length (int): Length of hop between STFT windows win_length (int): Window size power (float or None): Exponent for the magnitude spectrogram, (must be > 0) e.g., 1 for energy, 2 for power, etc. If None, then the complex spectrum is returned instead. normalized (bool): Whether to normalize by magnitude after stft center (bool, optional): whether to pad :attr:`waveform` on both sides so that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`. Default: ``True`` pad_mode (string, optional): controls the padding method used when :attr:`center` is ``True``. Default: ``"reflect"`` onesided (bool, optional): controls whether to return half of results to avoid redundancy. Default: ``True`` Returns: Tensor: Dimension (..., freq, time), freq is ``n_fft // 2 + 1`` and ``n_fft`` is the number of Fourier bins, and time is the number of window hops (n_frame). """ if pad > 0: # TODO add "with torch.no_grad():" back when JIT supports it waveform = torch.nn.functional.pad(waveform, (pad, pad), "constant") # pack batch shape = waveform.size() waveform = waveform.reshape(-1, shape[-1]) # default values are consistent with librosa.core.spectrum._spectrogram spec_f = torch.stft( input=waveform, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=center, pad_mode=pad_mode, normalized=False, onesided=onesided, return_complex=True, ) # unpack batch spec_f = spec_f.reshape(shape[:-1] + spec_f.shape[-2:]) if normalized: spec_f /= window.pow(2.).sum().sqrt() if power is not None: if power == 1.0: return spec_f.abs() return spec_f.abs().pow(power) return torch.view_as_real(spec_f) def griffinlim( specgram: Tensor, window: Tensor, n_fft: int, hop_length: int, win_length: int, power: float, normalized: bool, n_iter: int, momentum: float, length: Optional[int], rand_init: bool ) -> Tensor: r"""Compute waveform from a linear scale magnitude spectrogram using the Griffin-Lim transformation. Implementation ported from `librosa`. * [1] McFee, Brian, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. "librosa: Audio and music signal analysis in python." In Proceedings of the 14th python in science conference, pp. 18-25. 2015. * [2] <NAME>., <NAME>., & <NAME>. "A fast Griffin-Lim algorithm," IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (pp. 1-4), Oct. 2013. * [3] <NAME> and <NAME>, "Signal estimation from modified short-time Fourier transform," IEEE Trans. ASSP, vol.32, no.2, pp.236–243, Apr. 1984. Args: specgram (Tensor): A magnitude-only STFT spectrogram of dimension (..., freq, frames) where freq is ``n_fft // 2 + 1``. window (Tensor): Window tensor that is applied/multiplied to each frame/window n_fft (int): Size of FFT, creates ``n_fft // 2 + 1`` bins hop_length (int): Length of hop between STFT windows. ( Default: ``win_length // 2``) win_length (int): Window size. (Default: ``n_fft``) power (float): Exponent for the magnitude spectrogram, (must be > 0) e.g., 1 for energy, 2 for power, etc. normalized (bool): Whether to normalize by magnitude after stft. n_iter (int): Number of iteration for phase recovery process. momentum (float): The momentum parameter for fast Griffin-Lim. Setting this to 0 recovers the original Griffin-Lim method. Values near 1 can lead to faster convergence, but above 1 may not converge. length (int or None): Array length of the expected output. rand_init (bool): Initializes phase randomly if True, to zero otherwise. Returns: torch.Tensor: waveform of (..., time), where time equals the ``length`` parameter if given. """ assert momentum < 1, 'momentum={} > 1 can be unstable'.format(momentum) assert momentum >= 0, 'momentum={} < 0'.format(momentum) if normalized: warnings.warn( "The argument normalized is not used in Griffin-Lim, " "and will be removed in v0.9.0 release. To suppress this warning, " "please use `normalized=False`.") # pack batch shape = specgram.size() specgram = specgram.reshape([-1] + list(shape[-2:])) specgram = specgram.pow(1 / power) # randomly initialize the phase batch, freq, frames = specgram.size() if rand_init: angles = 2 * math.pi * torch.rand(batch, freq, frames) else: angles = torch.zeros(batch, freq, frames) angles = torch.stack([angles.cos(), angles.sin()], dim=-1) \ .to(dtype=specgram.dtype, device=specgram.device) specgram = specgram.unsqueeze(-1).expand_as(angles) # And initialize the previous iterate to 0 rebuilt = torch.tensor(0.) for _ in range(n_iter): # Store the previous iterate tprev = rebuilt # Invert with our current estimate of the phases inverse = torch.istft(specgram * angles, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, length=length).float() # Rebuild the spectrogram rebuilt = torch.view_as_real( torch.stft( input=inverse, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=True, pad_mode='reflect', normalized=False, onesided=True, return_complex=True, ) ) # Update our phase estimates angles = rebuilt if momentum: angles = angles - tprev.mul_(momentum / (1 + momentum)) angles = angles.div(complex_norm(angles).add(1e-16).unsqueeze(-1).expand_as(angles)) # Return the final phase estimates waveform = torch.istft(specgram * angles, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, length=length) # unpack batch waveform = waveform.reshape(shape[:-2] + waveform.shape[-1:]) return waveform def amplitude_to_DB( x: Tensor, multiplier: float, amin: float, db_multiplier: float, top_db: Optional[float] = None ) -> Tensor: r"""Turn a spectrogram from the power/amplitude scale to the decibel scale. The output of each tensor in a batch depends on the maximum value of that tensor, and so may return different values for an audio clip split into snippets vs. a full clip. Args: x (Tensor): Input spectrogram(s) before being converted to decibel scale. Input should take the form `(..., freq, time)`. Batched inputs should include a channel dimension and have the form `(batch, channel, freq, time)`. multiplier (float): Use 10. for power and 20. for amplitude amin (float): Number to clamp ``x`` db_multiplier (float): Log10(max(reference value and amin)) top_db (float or None, optional): Minimum negative cut-off in decibels. A reasonable number is 80. (Default: ``None``) Returns: Tensor: Output tensor in decibel scale """ x_db = multiplier * torch.log10(torch.clamp(x, min=amin)) x_db -= multiplier * db_multiplier if top_db is not None: # Expand batch shape = x_db.size() packed_channels = shape[-3] if x_db.dim() > 2 else 1 x_db = x_db.reshape(-1, packed_channels, shape[-2], shape[-1]) x_db = torch.max(x_db, (x_db.amax(dim=(-3, -2, -1)) - top_db).view(-1, 1, 1, 1)) # Repack batch x_db = x_db.reshape(shape) return x_db def DB_to_amplitude( x: Tensor, ref: float, power: float ) -> Tensor: r"""Turn a tensor from the decibel scale to the power/amplitude scale. Args: x (Tensor): Input tensor before being converted to power/amplitude scale. ref (float): Reference which the output will be scaled by. power (float): If power equals 1, will compute DB to power. If 0.5, will compute DB to amplitude. Returns: Tensor: Output tensor in power/amplitude scale. """ return ref * torch.pow(torch.pow(10.0, 0.1 * x), power) def _hz_to_mel(freq: float, mel_scale: str = "htk") -> float: r"""Convert Hz to Mels. Args: freqs (float): Frequencies in Hz mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``) Returns: mels (float): Frequency in Mels """ if mel_scale not in ['slaney', 'htk']: raise ValueError('mel_scale should be one of "htk" or "slaney".') if mel_scale == "htk": return 2595.0 * math.log10(1.0 + (freq / 700.0)) # Fill in the linear part f_min = 0.0 f_sp = 200.0 / 3 mels = (freq - f_min) / f_sp # Fill in the log-scale part min_log_hz = 1000.0 min_log_mel = (min_log_hz - f_min) / f_sp logstep = math.log(6.4) / 27.0 if freq >= min_log_hz: mels = min_log_mel + math.log(freq / min_log_hz) / logstep return mels def _mel_to_hz(mels: Tensor, mel_scale: str = "htk") -> Tensor: """Convert mel bin numbers to frequencies. Args: mels (Tensor): Mel frequencies mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``) Returns: freqs (Tensor): Mels converted in Hz """ if mel_scale not in ['slaney', 'htk']: raise ValueError('mel_scale should be one of "htk" or "slaney".') if mel_scale == "htk": return 700.0 * (10.0**(mels / 2595.0) - 1.0) # Fill in the linear scale f_min = 0.0 f_sp = 200.0 / 3 freqs = f_min + f_sp * mels # And now the nonlinear scale min_log_hz = 1000.0 min_log_mel = (min_log_hz - f_min) / f_sp logstep = math.log(6.4) / 27.0 log_t = (mels >= min_log_mel) freqs[log_t] = min_log_hz * torch.exp(logstep * (mels[log_t] - min_log_mel)) return freqs def create_fb_matrix( n_freqs: int, f_min: float, f_max: float, n_mels: int, sample_rate: int, norm: Optional[str] = None, mel_scale: str = "htk", ) -> Tensor: r"""Create a frequency bin conversion matrix. Args: n_freqs (int): Number of frequencies to highlight/apply f_min (float): Minimum frequency (Hz) f_max (float): Maximum frequency (Hz) n_mels (int): Number of mel filterbanks sample_rate (int): Sample rate of the audio waveform norm (Optional[str]): If 'slaney', divide the triangular mel weights by the width of the mel band (area normalization). (Default: ``None``) mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``) Returns: Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_mels``) meaning number of frequencies to highlight/apply to x the number of filterbanks. Each column is a filterbank so that assuming there is a matrix A of size (..., ``n_freqs``), the applied result would be ``A * create_fb_matrix(A.size(-1), ...)``. """ if norm is not None and norm != "slaney": raise ValueError("norm must be one of None or 'slaney'") # freq bins # Equivalent filterbank construction by Librosa all_freqs = torch.linspace(0, sample_rate // 2, n_freqs) # calculate mel freq bins m_min = _hz_to_mel(f_min, mel_scale=mel_scale) m_max = _hz_to_mel(f_max, mel_scale=mel_scale) m_pts = torch.linspace(m_min, m_max, n_mels + 2) f_pts = _mel_to_hz(m_pts, mel_scale=mel_scale) # calculate the difference between each mel point and each stft freq point in hertz f_diff = f_pts[1:] - f_pts[:-1] # (n_mels + 1) slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_mels + 2) # create overlapping triangles zero = torch.zeros(1) down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_mels) up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_mels) fb = torch.max(zero, torch.min(down_slopes, up_slopes)) if norm is not None and norm == "slaney": # Slaney-style mel is scaled to be approx constant energy per channel enorm = 2.0 / (f_pts[2:n_mels + 2] - f_pts[:n_mels]) fb *= enorm.unsqueeze(0) if (fb.max(dim=0).values == 0.).any(): warnings.warn( "At least one mel filterbank has all zero values. " f"The value for `n_mels` ({n_mels}) may be set too high. " f"Or, the value for `n_freqs` ({n_freqs}) may be set too low." ) return fb def create_dct( n_mfcc: int, n_mels: int, norm: Optional[str] ) -> Tensor: r"""Create a DCT transformation matrix with shape (``n_mels``, ``n_mfcc``), normalized depending on norm. Args: n_mfcc (int): Number of mfc coefficients to retain n_mels (int): Number of mel filterbanks norm (str or None): Norm to use (either 'ortho' or None) Returns: Tensor: The transformation matrix, to be right-multiplied to row-wise data of size (``n_mels``, ``n_mfcc``). """ # http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II n = torch.arange(float(n_mels)) k = torch.arange(float(n_mfcc)).unsqueeze(1) dct = torch.cos(math.pi / float(n_mels) * (n + 0.5) * k) # size (n_mfcc, n_mels) if norm is None: dct *= 2.0 else: assert norm == "ortho" dct[0] *= 1.0 / math.sqrt(2.0) dct *= math.sqrt(2.0 / float(n_mels)) return dct.t() def mu_law_encoding( x: Tensor, quantization_channels: int ) -> Tensor: r"""Encode signal based on mu-law companding. For more info see the `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This algorithm assumes the signal has been scaled to between -1 and 1 and returns a signal encoded with values from 0 to quantization_channels - 1. Args: x (Tensor): Input tensor quantization_channels (int): Number of channels Returns: Tensor: Input after mu-law encoding """ mu = quantization_channels - 1.0 if not x.is_floating_point(): x = x.to(torch.float) mu = torch.tensor(mu, dtype=x.dtype) x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu) x_mu = ((x_mu + 1) / 2 * mu + 0.5).to(torch.int64) return x_mu def mu_law_decoding( x_mu: Tensor, quantization_channels: int ) -> Tensor: r"""Decode mu-law encoded signal. For more info see the `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This expects an input with values between 0 and quantization_channels - 1 and returns a signal scaled between -1 and 1. Args: x_mu (Tensor): Input tensor quantization_channels (int): Number of channels Returns: Tensor: Input after mu-law decoding """ mu = quantization_channels - 1.0 if not x_mu.is_floating_point(): x_mu = x_mu.to(torch.float) mu = torch.tensor(mu, dtype=x_mu.dtype) x = ((x_mu) / mu) * 2 - 1.0 x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu return x def complex_norm( complex_tensor: Tensor, power: float = 1.0 ) -> Tensor: r"""Compute the norm of complex tensor input. Args: complex_tensor (Tensor): Tensor shape of `(..., complex=2)` power (float): Power of the norm. (Default: `1.0`). Returns: Tensor: Power of the normed input tensor. Shape of `(..., )` """ # Replace by torch.norm once issue is fixed # https://github.com/pytorch/pytorch/issues/34279 return complex_tensor.pow(2.).sum(-1).pow(0.5 * power) def angle( complex_tensor: Tensor ) -> Tensor: r"""Compute the angle of complex tensor input. Args: complex_tensor (Tensor): Tensor shape of `(..., complex=2)` Return: Tensor: Angle of a complex tensor. Shape of `(..., )` """ return torch.atan2(complex_tensor[..., 1], complex_tensor[..., 0]) def magphase( complex_tensor: Tensor, power: float = 1.0 ) -> Tuple[Tensor, Tensor]: r"""Separate a complex-valued spectrogram with shape `(..., 2)` into its magnitude and phase. Args: complex_tensor (Tensor): Tensor shape of `(..., complex=2)` power (float): Power of the norm. (Default: `1.0`) Returns: (Tensor, Tensor): The magnitude and phase of the complex tensor """ mag = complex_norm(complex_tensor, power) phase = angle(complex_tensor) return mag, phase def phase_vocoder( complex_specgrams: Tensor, rate: float, phase_advance: Tensor ) -> Tensor: r"""Given a STFT tensor, speed up in time without modifying pitch by a factor of ``rate``. Args: complex_specgrams (Tensor): Dimension of `(..., freq, time, complex=2)` rate (float): Speed-up factor phase_advance (Tensor): Expected phase advance in each bin. Dimension of (freq, 1) Returns: Tensor: Complex Specgrams Stretch with dimension of `(..., freq, ceil(time/rate), complex=2)` Example >>> freq, hop_length = 1025, 512 >>> # (channel, freq, time, complex=2) >>> complex_specgrams = torch.randn(2, freq, 300, 2) >>> rate = 1.3 # Speed up by 30% >>> phase_advance = torch.linspace( >>> 0, math.pi * hop_length, freq)[..., None] >>> x = phase_vocoder(complex_specgrams, rate, phase_advance) >>> x.shape # with 231 == ceil(300 / 1.3) torch.Size([2, 1025, 231, 2]) """ # pack batch shape = complex_specgrams.size() complex_specgrams = complex_specgrams.reshape([-1] + list(shape[-3:])) time_steps = torch.arange(0, complex_specgrams.size(-2), rate, device=complex_specgrams.device, dtype=complex_specgrams.dtype) alphas = time_steps % 1.0 phase_0 = angle(complex_specgrams[..., :1, :]) # Time Padding complex_specgrams = torch.nn.functional.pad(complex_specgrams, [0, 0, 0, 2]) # (new_bins, freq, 2) complex_specgrams_0 = complex_specgrams.index_select(-2, time_steps.long()) complex_specgrams_1 = complex_specgrams.index_select(-2, (time_steps + 1).long()) angle_0 = angle(complex_specgrams_0) angle_1 = angle(complex_specgrams_1) norm_0 = torch.norm(complex_specgrams_0, p=2, dim=-1) norm_1 = torch.norm(complex_specgrams_1, p=2, dim=-1) phase = angle_1 - angle_0 - phase_advance phase = phase - 2 * math.pi * torch.round(phase / (2 * math.pi)) # Compute Phase Accum phase = phase + phase_advance phase = torch.cat([phase_0, phase[..., :-1]], dim=-1) phase_acc = torch.cumsum(phase, -1) mag = alphas * norm_1 + (1 - alphas) * norm_0 real_stretch = mag * torch.cos(phase_acc) imag_stretch = mag * torch.sin(phase_acc) complex_specgrams_stretch = torch.stack([real_stretch, imag_stretch], dim=-1) # unpack batch complex_specgrams_stretch = complex_specgrams_stretch.reshape(shape[:-3] + complex_specgrams_stretch.shape[1:]) return complex_specgrams_stretch def mask_along_axis_iid( specgrams: Tensor, mask_param: int, mask_value: float, axis: int ) -> Tensor: r""" Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where ``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``. Args: specgrams (Tensor): Real spectrograms (batch, channel, freq, time) mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param] mask_value (float): Value to assign to the masked columns axis (int): Axis to apply masking on (2 -> frequency, 3 -> time) Returns: Tensor: Masked spectrograms of dimensions (batch, channel, freq, time) """ if axis != 2 and axis != 3: raise ValueError('Only Frequency and Time masking are supported') device = specgrams.device dtype = specgrams.dtype value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * mask_param min_value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * (specgrams.size(axis) - value) # Create broadcastable mask mask_start = min_value[..., None, None] mask_end = (min_value + value)[..., None, None] mask = torch.arange(0, specgrams.size(axis), device=device, dtype=dtype) # Per batch example masking specgrams = specgrams.transpose(axis, -1) specgrams.masked_fill_((mask >= mask_start) & (mask < mask_end), mask_value) specgrams = specgrams.transpose(axis, -1) return specgrams def mask_along_axis( specgram: Tensor, mask_param: int, mask_value: float, axis: int ) -> Tensor: r""" Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where ``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``. All examples will have the same mask interval. Args: specgram (Tensor): Real spectrogram (channel, freq, time) mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param] mask_value (float): Value to assign to the masked columns axis (int): Axis to apply masking on (1 -> frequency, 2 -> time) Returns: Tensor: Masked spectrogram of dimensions (channel, freq, time) """ # pack batch shape = specgram.size() specgram = specgram.reshape([-1] + list(shape[-2:])) value = torch.rand(1) * mask_param min_value = torch.rand(1) * (specgram.size(axis) - value) mask_start = (min_value.long()).squeeze() mask_end = (min_value.long() + value.long()).squeeze() assert mask_end - mask_start < mask_param if axis == 1: specgram[:, mask_start:mask_end] = mask_value elif axis == 2: specgram[:, :, mask_start:mask_end] = mask_value else: raise ValueError('Only Frequency and Time masking are supported') # unpack batch specgram = specgram.reshape(shape[:-2] + specgram.shape[-2:]) return specgram def compute_deltas( specgram: Tensor, win_length: int = 5, mode: str = "replicate" ) -> Tensor: r"""Compute delta coefficients of a tensor, usually a spectrogram: .. math:: d_t = \frac{\sum_{n=1}^{\text{N}} n (c_{t+n} - c_{t-n})}{2 \sum_{n=1}^{\text{N}} n^2} where :math:`d_t` is the deltas at time :math:`t`, :math:`c_t` is the spectrogram coeffcients at time :math:`t`, :math:`N` is ``(win_length-1)//2``. Args: specgram (Tensor): Tensor of audio of dimension (..., freq, time) win_length (int, optional): The window length used for computing delta (Default: ``5``) mode (str, optional): Mode parameter passed to padding (Default: ``"replicate"``) Returns: Tensor: Tensor of deltas of dimension (..., freq, time) Example >>> specgram = torch.randn(1, 40, 1000) >>> delta = compute_deltas(specgram) >>> delta2 = compute_deltas(delta) """ device = specgram.device dtype = specgram.dtype # pack batch shape = specgram.size() specgram = specgram.reshape(1, -1, shape[-1]) assert win_length >= 3 n = (win_length - 1) // 2 # twice sum of integer squared denom = n * (n + 1) * (2 * n + 1) / 3 specgram = torch.nn.functional.pad(specgram, (n, n), mode=mode) kernel = torch.arange(-n, n + 1, 1, device=device, dtype=dtype).repeat(specgram.shape[1], 1, 1) output = torch.nn.functional.conv1d(specgram, kernel, groups=specgram.shape[1]) / denom # unpack batch output = output.reshape(shape) return output def _compute_nccf( waveform: Tensor, sample_rate: int, frame_time: float, freq_low: int ) -> Tensor: r""" Compute Normalized Cross-Correlation Function (NCCF). .. math:: \phi_i(m) = \frac{\sum_{n=b_i}^{b_i + N-1} w(n) w(m+n)}{\sqrt{E(b_i) E(m+b_i)}}, where :math:`\phi_i(m)` is the NCCF at frame :math:`i` with lag :math:`m`, :math:`w` is the waveform, :math:`N` is the length of a frame, :math:`b_i` is the beginning of frame :math:`i`, :math:`E(j)` is the energy :math:`\sum_{n=j}^{j+N-1} w^2(n)`. """ EPSILON = 10 ** (-9) # Number of lags to check lags = int(math.ceil(sample_rate / freq_low)) frame_size = int(math.ceil(sample_rate * frame_time)) waveform_length = waveform.size()[-1] num_of_frames = int(math.ceil(waveform_length / frame_size)) p = lags + num_of_frames * frame_size - waveform_length waveform = torch.nn.functional.pad(waveform, (0, p)) # Compute lags output_lag = [] for lag in range(1, lags + 1): s1 = waveform[..., :-lag].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :] s2 = waveform[..., lag:].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :] output_frames = ( (s1 * s2).sum(-1) / (EPSILON + torch.norm(s1, p=2, dim=-1)).pow(2) / (EPSILON + torch.norm(s2, p=2, dim=-1)).pow(2) ) output_lag.append(output_frames.unsqueeze(-1)) nccf = torch.cat(output_lag, -1) return nccf def _combine_max( a: Tuple[Tensor, Tensor], b: Tuple[Tensor, Tensor], thresh: float = 0.99 ) -> Tuple[Tensor, Tensor]: """ Take value from first if bigger than a multiplicative factor of the second, elementwise. """ mask = (a[0] > thresh * b[0]) values = mask * a[0] + ~mask * b[0] indices = mask * a[1] + ~mask * b[1] return values, indices def _find_max_per_frame( nccf: Tensor, sample_rate: int, freq_high: int ) -> Tensor: r""" For each frame, take the highest value of NCCF, apply centered median smoothing, and convert to frequency. Note: If the max among all the lags is very close to the first half of lags, then the latter is taken. """ lag_min = int(math.ceil(sample_rate / freq_high)) # Find near enough max that is smallest best = torch.max(nccf[..., lag_min:], -1) half_size = nccf.shape[-1] // 2 half = torch.max(nccf[..., lag_min:half_size], -1) best = _combine_max(half, best) indices = best[1] # Add back minimal lag indices += lag_min # Add 1 empirical calibration offset indices += 1 return indices def _median_smoothing( indices: Tensor, win_length: int ) -> Tensor: r""" Apply median smoothing to the 1D tensor over the given window. """ # Centered windowed pad_length = (win_length - 1) // 2 # "replicate" padding in any dimension indices = torch.nn.functional.pad( indices, (pad_length, 0), mode="constant", value=0. ) indices[..., :pad_length] = torch.cat(pad_length * [indices[..., pad_length].unsqueeze(-1)], dim=-1) roll = indices.unfold(-1, win_length, 1) values, _ = torch.median(roll, -1) return values def detect_pitch_frequency( waveform: Tensor, sample_rate: int, frame_time: float = 10 ** (-2), win_length: int = 30, freq_low: int = 85, freq_high: int = 3400, ) -> Tensor: r"""Detect pitch frequency. It is implemented using normalized cross-correlation function and median smoothing. Args: waveform (Tensor): Tensor of audio of dimension (..., freq, time) sample_rate (int): The sample rate of the waveform (Hz) frame_time (float, optional): Duration of a frame (Default: ``10 ** (-2)``). win_length (int, optional): The window length for median smoothing (in number of frames) (Default: ``30``). freq_low (int, optional): Lowest frequency that can be detected (Hz) (Default: ``85``). freq_high (int, optional): Highest frequency that can be detected (Hz) (Default: ``3400``). Returns: Tensor: Tensor of freq of dimension (..., frame) """ # pack batch shape = list(waveform.size()) waveform = waveform.reshape([-1] + shape[-1:]) nccf = _compute_nccf(waveform, sample_rate, frame_time, freq_low) indices = _find_max_per_frame(nccf, sample_rate, freq_high) indices = _median_smoothing(indices, win_length) # Convert indices to frequency EPSILON = 10 ** (-9) freq = sample_rate / (EPSILON + indices.to(torch.float)) # unpack batch freq = freq.reshape(shape[:-1] + list(freq.shape[-1:])) return freq def sliding_window_cmn( waveform: Tensor, cmn_window: int = 600, min_cmn_window: int = 100, center: bool = False, norm_vars: bool = False, ) -> Tensor: r""" Apply sliding-window cepstral mean (and optionally variance) normalization per utterance. Args: waveform (Tensor): Tensor of audio of dimension (..., freq, time) cmn_window (int, optional): Window in frames for running average CMN computation (int, default = 600) min_cmn_window (int, optional): Minimum CMN window used at start of decoding (adds latency only at start). Only applicable if center == false, ignored if center==true (int, default = 100) center (bool, optional): If true, use a window centered on the current frame (to the extent possible, modulo end effects). If false, window is to the left. (bool, default = false) norm_vars (bool, optional): If true, normalize variance to one. (bool, default = false) Returns: Tensor: Tensor of freq of dimension (..., frame) """ input_shape = waveform.shape num_frames, num_feats = input_shape[-2:] waveform = waveform.view(-1, num_frames, num_feats) num_channels = waveform.shape[0] dtype = waveform.dtype device = waveform.device last_window_start = last_window_end = -1 cur_sum = torch.zeros(num_channels, num_feats, dtype=dtype, device=device) cur_sumsq = torch.zeros(num_channels, num_feats, dtype=dtype, device=device) cmn_waveform = torch.zeros( num_channels, num_frames, num_feats, dtype=dtype, device=device) for t in range(num_frames): window_start = 0 window_end = 0 if center: window_start = t - cmn_window // 2 window_end = window_start + cmn_window else: window_start = t - cmn_window window_end = t + 1 if window_start < 0: window_end -= window_start window_start = 0 if not center: if window_end > t: window_end = max(t + 1, min_cmn_window) if window_end > num_frames: window_start -= (window_end - num_frames) window_end = num_frames if window_start < 0: window_start = 0 if last_window_start == -1: input_part = waveform[:, window_start: window_end - window_start, :] cur_sum += torch.sum(input_part, 1) if norm_vars: cur_sumsq += torch.cumsum(input_part ** 2, 1)[:, -1, :] else: if window_start > last_window_start: frame_to_remove = waveform[:, last_window_start, :] cur_sum -= frame_to_remove if norm_vars: cur_sumsq -= (frame_to_remove ** 2) if window_end > last_window_end: frame_to_add = waveform[:, last_window_end, :] cur_sum += frame_to_add if norm_vars: cur_sumsq += (frame_to_add ** 2) window_frames = window_end - window_start last_window_start = window_start last_window_end = window_end cmn_waveform[:, t, :] = waveform[:, t, :] - cur_sum / window_frames if norm_vars: if window_frames == 1: cmn_waveform[:, t, :] = torch.zeros( num_channels, num_feats, dtype=dtype, device=device) else: variance = cur_sumsq variance = variance / window_frames variance -= ((cur_sum ** 2) / (window_frames ** 2)) variance = torch.pow(variance, -0.5) cmn_waveform[:, t, :] *= variance cmn_waveform = cmn_waveform.view(input_shape[:-2] + (num_frames, num_feats)) if len(input_shape) == 2: cmn_waveform = cmn_waveform.squeeze(0) return cmn_waveform def spectral_centroid( waveform: Tensor, sample_rate: int, pad: int, window: Tensor, n_fft: int, hop_length: int, win_length: int, ) -> Tensor: r""" Compute the spectral centroid for each channel along the time axis. The spectral centroid is defined as the weighted average of the frequency values, weighted by their magnitude. Args: waveform (Tensor): Tensor of audio of dimension (..., time) sample_rate (int): Sample rate of the audio waveform pad (int): Two sided padding of signal window (Tensor): Window tensor that is applied/multiplied to each frame/window n_fft (int): Size of FFT hop_length (int): Length of hop between STFT windows win_length (int): Window size Returns: Tensor: Dimension (..., time) """ specgram = spectrogram(waveform, pad=pad, window=window, n_fft=n_fft, hop_length=hop_length, win_length=win_length, power=1., normalized=False) freqs = torch.linspace(0, sample_rate // 2, steps=1 + n_fft // 2, device=specgram.device).reshape((-1, 1)) freq_dim = -2 return (freqs * specgram).sum(dim=freq_dim) / specgram.sum(dim=freq_dim) @_mod_utils.requires_sox() def apply_codec( waveform: Tensor, sample_rate: int, format: str, channels_first: bool = True, compression: Optional[float] = None, encoding: Optional[str] = None, bits_per_sample: Optional[int] = None, ) -> Tensor: r""" Apply codecs as a form of augmentation. Args: waveform (Tensor): Audio data. Must be 2 dimensional. See also ```channels_first```. sample_rate (int): Sample rate of the audio waveform. format (str): File format. channels_first (bool): When True, both the input and output Tensor have dimension ``[channel, time]``. Otherwise, they have dimension ``[time, channel]``. compression (float): Used for formats other than WAV. For mor details see :py:func:`torchaudio.backend.sox_io_backend.save`. encoding (str, optional): Changes the encoding for the supported formats. For more details see :py:func:`torchaudio.backend.sox_io_backend.save`. bits_per_sample (int, optional): Changes the bit depth for the supported formats. For more details see :py:func:`torchaudio.backend.sox_io_backend.save`. Returns: torch.Tensor: Resulting Tensor. If ``channels_first=True``, it has ``[channel, time]`` else ``[time, channel]``. """ bytes = io.BytesIO() torchaudio.backend.sox_io_backend.save(bytes, waveform, sample_rate, channels_first, compression, format, encoding, bits_per_sample ) bytes.seek(0) augmented, _ = torchaudio.sox_effects.sox_effects.apply_effects_file( bytes, effects=[["rate", f"{sample_rate}"]], channels_first=channels_first, format=format) return augmented @_mod_utils.requires_kaldi() def compute_kaldi_pitch( waveform: torch.Tensor, sample_rate: float, frame_length: float = 25.0, frame_shift: float = 10.0, min_f0: float = 50, max_f0: float = 400, soft_min_f0: float = 10.0, penalty_factor: float = 0.1, lowpass_cutoff: float = 1000, resample_frequency: float = 4000, delta_pitch: float = 0.005, nccf_ballast: float = 7000, lowpass_filter_width: int = 1, upsample_filter_width: int = 5, max_frames_latency: int = 0, frames_per_chunk: int = 0, simulate_first_pass_online: bool = False, recompute_frame: int = 500, snip_edges: bool = True, ) -> torch.Tensor: """Extract pitch based on method described in [1]. This function computes the equivalent of `compute-kaldi-pitch-feats` from Kaldi. Args: waveform (Tensor): The input waveform of shape `(..., time)`. sample_rate (float): Sample rate of `waveform`. frame_length (float, optional): Frame length in milliseconds. (default: 25.0) frame_shift (float, optional): Frame shift in milliseconds. (default: 10.0) min_f0 (float, optional): Minimum F0 to search for (Hz) (default: 50.0) max_f0 (float, optional): Maximum F0 to search for (Hz) (default: 400.0) soft_min_f0 (float, optional): Minimum f0, applied in soft way, must not exceed min-f0 (default: 10.0) penalty_factor (float, optional): Cost factor for FO change. (default: 0.1) lowpass_cutoff (float, optional): Cutoff frequency for LowPass filter (Hz) (default: 1000) resample_frequency (float, optional): Frequency that we down-sample the signal to. Must be more than twice lowpass-cutoff. (default: 4000) delta_pitch( float, optional): Smallest relative change in pitch that our algorithm measures. (default: 0.005) nccf_ballast (float, optional): Increasing this factor reduces NCCF for quiet frames (default: 7000) lowpass_filter_width (int, optional): Integer that determines filter width of lowpass filter, more gives sharper filter. (default: 1) upsample_filter_width (int, optional): Integer that determines filter width when upsampling NCCF. (default: 5) max_frames_latency (int, optional): Maximum number of frames of latency that we allow pitch tracking to introduce into the feature processing (affects output only if ``frames_per_chunk > 0`` and ``simulate_first_pass_online=True``) (default: 0) frames_per_chunk (int, optional): The number of frames used for energy normalization. (default: 0) simulate_first_pass_online (bool, optional): If true, the function will output features that correspond to what an online decoder would see in the first pass of decoding -- not the final version of the features, which is the default. (default: False) Relevant if ``frames_per_chunk > 0``. recompute_frame (int, optional): Only relevant for compatibility with online pitch extraction. A non-critical parameter; the frame at which we recompute some of the forward pointers, after revising our estimate of the signal energy. Relevant if ``frames_per_chunk > 0``. (default: 500) snip_edges (bool, optional): If this is set to false, the incomplete frames near the ending edge won't be snipped, so that the number of frames is the file size divided by the frame-shift. This makes different types of features give the same number of frames. (default: True) Returns: Tensor: Pitch feature. Shape: ``(batch, frames 2)`` where the last dimension corresponds to pitch and NCCF. Reference: - A pitch extraction algorithm tuned for automatic speech recognition <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME> 2014 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), Florence, 2014, pp. 2494-2498, doi: 10.1109/ICASSP.2014.6854049. """ shape = waveform.shape waveform = waveform.reshape(-1, shape[-1]) result = torch.ops.torchaudio.kaldi_ComputeKaldiPitch( waveform, sample_rate, frame_length, frame_shift, min_f0, max_f0, soft_min_f0, penalty_factor, lowpass_cutoff, resample_frequency, delta_pitch, nccf_ballast, lowpass_filter_width, upsample_filter_width, max_frames_latency, frames_per_chunk, simulate_first_pass_online, recompute_frame, snip_edges, ) result = result.reshape(shape[:-1] + result.shape[-2:]) return result
2.453125
2
src/status_node.py
Faust-Wang/vswarm
21
1498
#!/usr/bin/env python3 from __future__ import absolute_import, division, print_function import curses import sys from collections import deque from datetime import datetime import numpy as np import rospy from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus from geometry_msgs.msg import PoseStamped from mavros_msgs.msg import ExtendedState, PositionTarget, State # StatusText from scipy.spatial.transform import Rotation as R from sensor_msgs.msg import BatteryState, Image, NavSatFix GPS_FIX_DICT = { 0: ('No GPS', curses.COLOR_RED), 1: ('No fix', curses.COLOR_RED), 2: ('2D lock', curses.COLOR_BLUE), 3: ('3D lock', curses.COLOR_BLUE), 4: ('DGPS', curses.COLOR_MAGENTA), 5: ('RTK float', curses.COLOR_YELLOW), 6: ('RTK fix', curses.COLOR_GREEN) } def get_color(color): return curses.color_pair(color) def frequency_from_messages(messages): durations = [] for i in range(len(messages) - 1): duration = messages[i + 1].header.stamp - messages[i].header.stamp durations.append(duration.to_sec()) frequency = 1 / np.mean(durations) if np.isnan(frequency): return 0 return frequency class StatusNode: def __init__(self, screen): rospy.init_node('status_node', argv=sys.argv) self.rate = rospy.get_param('~rate', default=1.0) # Curses setup self.screen = curses.initscr() self.rows, self.cols = self.screen.getmaxyx() height_status = 15 self.status = curses.newwin(height_status, self.cols, 1, 2) # self.console = curses.newwin(self.rows - height_status, self.cols, 12, 2) self.lines = 0 self.text = '' self.screen.keypad(True) curses.curs_set(False) # Hide cursor colors = [curses.COLOR_BLACK, curses.COLOR_BLUE, curses.COLOR_CYAN, curses.COLOR_GREEN, curses.COLOR_MAGENTA, curses.COLOR_RED, curses.COLOR_WHITE, curses.COLOR_YELLOW] # Curses color setup curses.use_default_colors() for color in colors: curses.init_pair(color, color, -1) # Default variables self.status_battery_perc = None self.state = State() self.state_sub = rospy.Subscriber('mavros/state', State, callback=self.state_callback, queue_size=1) self.battery = BatteryState() self.battery_sub = rospy.Subscriber('mavros/battery', BatteryState, callback=self.battery_callback, queue_size=1) self.extended = ExtendedState() self.extended_sub = rospy.Subscriber('mavros/extended_state', ExtendedState, callback=self.extended_callback, queue_size=1) # self.statustext = StatusText() # self.statustext_sub = rospy.Subscriber('mavros/statustext/recv', StatusText, # callback=self.statustext_callback, # queue_size=1) self.gps = NavSatFix() self.gps_sub = rospy.Subscriber('mavros/global_position/raw/fix', NavSatFix, callback=self.gps_callback, queue_size=1) self.local_pose = PoseStamped() self.local_pose_sub = rospy.Subscriber('mavros/local_position/pose', PoseStamped, callback=self.local_pose_callback, queue_size=1) self.global_pose = PoseStamped() self.global_pose_sub = rospy.Subscriber('global_position/pose', PoseStamped, callback=self.global_pose_callback, queue_size=1) self.diagnostics = DiagnosticArray() self.diagnostic_gps = DiagnosticStatus() self.diagnostics_sub = rospy.Subscriber('/diagnostics', DiagnosticArray, callback=self.diagnostics_callback, queue_size=1) self.setpoint = PositionTarget() self.setpoint_sub = rospy.Subscriber('mavros/setpoint_raw/local', PositionTarget, callback=self.setpoint_callback, queue_size=1) self.cameras = ['front', 'right', 'back', 'left'] self.image_subscribers = [] self.images = {c: deque(maxlen=10) for c in self.cameras} for camera in self.cameras: topic = f'camera_{camera}/image_raw' subscriber = rospy.Subscriber(topic, Image, callback=self.image_callback, callback_args=camera, queue_size=1, buff_size=2 ** 24) self.image_subscribers.append(subscriber) def battery_callback(self, battery_msg): if battery_msg.location == 'id0': self.battery = battery_msg def state_callback(self, state_msg): self.state = state_msg def extended_callback(self, extended_msg): self.extended = extended_msg def diagnostics_callback(self, diagnostics_msg): for status in diagnostics_msg.status: if 'GPS' in status.name: self.diagnostic_gps = status def gps_callback(self, gps_msg): self.gps = gps_msg def local_pose_callback(self, pose_msg): self.local_pose = pose_msg def global_pose_callback(self, pose_msg): self.global_pose = pose_msg def setpoint_callback(self, setpoint_msg): self.setpoint = setpoint_msg def image_callback(self, image_msg, camera): self.images[camera].append(image_msg) def statustext_callback(self, statustext_msg): screen = self.console time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S') # time_str = datetime.datetime.fromtimestamp(unix_time) text = statustext_msg.text severity = statustext_msg.severity msg = statustext_msg severity_red = [msg.EMERGENCY, msg.ALERT, msg.CRITICAL, msg.ERROR] severity_yellow = [msg.WARNING, msg.NOTICE] severity_neutral = [msg.INFO, msg.DEBUG] color = curses.COLOR_CYAN if severity in severity_red: color = curses.COLOR_RED elif severity in severity_yellow: color = curses.COLOR_YELLOW elif severity in severity_neutral: color = curses.COLOR_WHITE self.text = f'{time_str}: {text} ({color})' # screen.addstr(self.lines, 0, log, get_color(color)) self.lines += 1 screen.refresh() def print_status(self): screen = self.status screen.clear() # rospy.loginfo(status) # print(status) x_tab = 0 x_indent = 14 row = 0 # Battery battery_percentage = int(self.battery.percentage * 100) color = curses.COLOR_CYAN if battery_percentage > 50: color = curses.COLOR_GREEN elif battery_percentage > 25: color = curses.COLOR_YELLOW elif battery_percentage > 0: color = curses.COLOR_RED status_battery = str(battery_percentage) + '%' screen.addstr(row, x_tab, 'Battery: ') screen.addstr(row, x_indent, status_battery, get_color(color)) row += 1 # Armed if self.state.armed: color = curses.COLOR_RED status_armed = 'Yes' else: color = curses.COLOR_GREEN status_armed = 'No' screen.addstr(row, x_tab, 'Armed: ') screen.addstr(row, x_indent, status_armed, get_color(color)) row += 1 # Mode color = curses.COLOR_CYAN mode = self.state.mode if mode.startswith('AUTO'): mode = mode.split('.')[-1] mode = mode.capitalize() if mode == 'Offboard': color = curses.COLOR_RED else: color = curses.COLOR_BLUE if mode == '': mode = 'None' elif mode == 'Posctl': mode = 'Position' elif mode == 'Rtl': mode = 'Return' status_mode = '{}'.format(mode) screen.addstr(row, x_tab, 'Mode: ') screen.addstr(row, x_indent, status_mode, get_color(color)) row += 1 # Extended status if self.extended.landed_state == self.extended.LANDED_STATE_IN_AIR: status_extended = 'Air' color = curses.COLOR_RED elif self.extended.landed_state == self.extended.LANDED_STATE_LANDING: status_extended = 'Landed' color = curses.COLOR_GREEN elif self.extended.landed_state == self.extended.LANDED_STATE_ON_GROUND: status_extended = 'Ground' color = curses.COLOR_GREEN elif self.extended.landed_state == self.extended.LANDED_STATE_TAKEOFF: status_extended = 'Takeoff' color = curses.COLOR_RED elif self.extended.landed_state == self.extended.LANDED_STATE_UNDEFINED: status_extended = 'Undefined' color = curses.COLOR_CYAN screen.addstr(row, x_tab, 'State: ') screen.addstr(row, x_indent, status_extended, get_color(color)) row += 1 # GPS info satellites = 0 fix_type, color = GPS_FIX_DICT[0] for value in self.diagnostic_gps.values: if value.key == 'Satellites visible': satellites = value.value elif value.key == 'Fix type': fix_type, color = GPS_FIX_DICT[int(value.value)] screen.addstr(row, x_tab, 'GPS info: ') screen.addstr(row, x_indent, f'{fix_type} ({satellites} sat)', get_color(color)) row += 2 # GPS pos latitude = self.gps.latitude longitude = self.gps.longitude altitude = round(self.gps.altitude, 2) status_gps = f'{latitude:.7f} {longitude:.7f} {altitude:.2f} (LLA)' screen.addstr(row, x_tab, 'GPS pos: ') screen.addstr(row, x_indent, status_gps) row += 1 # Local pose p = self.local_pose.pose.position q = self.local_pose.pose.orientation quaternion = [q.x, q.y, q.z, q.w] try: rot = R.from_quat(quaternion) except ValueError: rot = R.from_euler('zyx', [0.0, 0.0, 0.0]) yaw, pitch, roll = rot.as_euler('zyx', degrees=True) x, y, z = round(p.x, 2), round(p.y, 2), round(p.z, 2) yaw, pitch, roll = int(yaw), int(pitch), int(roll) screen.addstr(row, x_tab, 'Local pos: ') screen.addstr(row, x_indent, f'{x:.2f} {y:.2f} {z:.2f} (XYZ) {roll} {pitch} {yaw} (RPY)') row += 1 # Global pose p = self.global_pose.pose.position q = self.global_pose.pose.orientation quaternion = [q.x, q.y, q.z, q.w] try: rot = R.from_quat(quaternion) except ValueError: rot = R.from_euler('zyx', [0.0, 0.0, 0.0]) yaw, pitch, roll = rot.as_euler('zyx', degrees=True) x, y, z = round(p.x, 2), round(p.y, 2), round(p.z, 2) yaw, pitch, roll = int(yaw), int(pitch), int(roll) screen.addstr(row, x_tab, 'Global pos: ') screen.addstr(row, x_indent, f'{x:.2f} {y:.2f} {z:.2f} (XYZ) {roll} {pitch} {yaw} (RPY)') row += 1 # Setpoint v = self.setpoint.velocity vx, vy, vz = round(v.x, 2), round(v.y, 2), round(v.z, 2) yaw = int(np.rad2deg(self.setpoint.yaw)) screen.addstr(row, x_tab, 'Setpoint: ') screen.addstr(row, x_indent, f'{vx:.2f} {vy:.2f} {vz:.2f} (XYZ) {yaw} (Y)') row += 1 # Cameras freqs = {c: 0 for c in self.cameras} for cam, messages in self.images.items(): freqs[cam] = frequency_from_messages(messages) ff, fr, fb, fl = [int(round(v)) for k, v in freqs.items()] screen.addstr(row, x_tab, 'Cameras: ') screen.addstr(row, x_indent, f'{ff} {fr} {fb} {fl} (front right back left [Hz])') row += 1 screen.refresh() self.screen.refresh() def run(self): rate = rospy.Rate(self.rate) try: while not rospy.is_shutdown(): self.print_status() rate.sleep() except rospy.ROSInterruptException: curses.nocbreak() self.screen.keypad(False) curses.echo() def curses_main(screen): StatusNode(screen).run() def main(): try: curses.wrapper(curses_main) except rospy.ROSInterruptException: pass if __name__ == '__main__': main()
2.140625
2
bin/boxplot_param.py
mo-schmid/MIALab
0
1499
<filename>bin/boxplot_param.py import argparse import os import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import pandas as pd from pathlib import Path class ResultParam(): """Result Parameter""" def __init__(self, path: Path, param_str: str): """Initializes a new instance of the Result Parameter Args: path (Path): path to the desired result file param_str (str): string containing the parameters used in the postprocessing """ self.path = path self.param_str = param_str def set_box_format(bp, color): plt.setp(bp['boxes'], color=color) plt.setp(bp['whiskers'], color=color) plt.setp(bp['caps'], color=color) plt.setp(bp['caps'], linewidth=1) plt.setp(bp['medians'], color='red') plt.setp(bp['medians'], linewidth=1.5) plt.setp(bp['fliers'], marker='.') plt.setp(bp['fliers'], markerfacecolor='black') plt.setp(bp['fliers'], alpha=1) def boxplot(file_path: str, data: list, title: str, x_label: str, y_label: str, x_ticks: tuple, min_: float = None, max_: float = None): if len(data) != len(x_ticks): raise ValueError('arguments data and x_ticks need to have same length') fig = plt.figure( figsize=( 2 *1.5, 5*1.5)) # figsize defaults to (width, height) =(6.4, 4.8), # for boxplots, we want the ratio to be inversed ax = fig.add_subplot(111) # create an axes instance (nrows=ncols=index) bp = ax.boxplot(data, widths=0.6) set_box_format(bp, '000') # set and format litle, labels, and ticks ax.set_title(title, fontweight='bold', fontsize=20) ax.set_ylabel(y_label, fontweight='bold', fontsize=18) # ax.set_xlabel(x_label, fontweight='bold', fontsize=9.5) # we don't use the x-label since it should be clear from the x-ticks ax.yaxis.set_tick_params(labelsize=12) ax.set_xticklabels(x_ticks, fontdict={'fontsize': 18, 'fontweight': 'bold'}, rotation=45) # remove frame ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) # thicken frame ax.spines['left'].set_linewidth(2) ax.spines['bottom'].set_linewidth(2) # adjust min and max if provided if min_ is not None or max_ is not None: min_original, max_original = ax.get_ylim() min_ = min_ if min_ is not None and min_ < min_original else min_original max_ = max_ if max_ is not None and max_ > max_original else max_original ax.set_ylim(min_, max_) plt.savefig(file_path, bbox_inches="tight") plt.close() def format_data(data, label: str, metric: str): return data[data['LABEL'] == label][metric].values def metric_to_readable_text(metric: str): if metric == 'DICE': return 'Dice coefficient' elif metric == 'HDRFDST': return 'Hausdorff distance (mm)' else: raise ValueError('Metric "{}" unknown'.format(metric)) def main(results: [ResultParam], plot_dir: Path): """generates box plots comparing two or more result sets for all labels Args: results ([ResultParam]): a list of result parameters (Path and description) plot_dir: ath to the desired result folder to store the qq-plots """ metrics = ('DICE', 'HDRFDST') # the metrics we want to plot the results for metrics_yaxis_limits = ((0.0, 1.0), (0.0, 18)) # tuples of y-axis limits (min, max) for each metric. Use None if unknown labels = ('WhiteMatter','GreyMatter', 'Hippocampus','Amygdala','Thalamus') # the brain structures/tissues you are interested in # load the CSVs. We usually want to compare different methods (e.g. a set of different features), therefore, # we load two CSV (for simplicity, it is the same here) # todo: adapt to your needs to compare different methods (e.g. load different CSVs) dfs = [] methods = [] for res in results: dfs.append(pd.read_csv(res.path, sep=';')) methods.append(res.param_str) # todo: read parameter values from text file, use them to plot the information about the paramter # some parameters to improve the plot's readability title = '{}' for label in labels: for metric, (min_, max_) in zip(metrics, metrics_yaxis_limits): boxplot(os.path.join(plot_dir, '{}_{}.png'.format(label, metric)), [format_data(df, label, metric) for df in dfs], title.format(label), 'Method', metric_to_readable_text(metric), methods, min_, max_ ) if __name__ == '__main__': results = [] results.append(ResultParam(Path(Path.cwd() / "mia-result\gridsearch_PKF/2020-12-11-09-51-54/no_PP/results.csv"), "no pp")) results.append(ResultParam(Path(Path.cwd() /"mia-result/gridsearch_PKF/2020-12-11-09-51-54/with_PP/PP-V-20_0-BG-True/results.csv"), "with pp")) main(results, Path(Path.cwd() / 'mia-result/plot_results'))
3.03125
3