code
stringlengths
3
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
3
1.05M
#pylint: disable-msg=R0903,R0904 """#10026""" __revision__ = 1 from gtk import VBox import gtk class FooButton(gtk.Button): """extend gtk.Button""" def extend(self): """hop""" print self print gtk.Button print VBox
dbbhattacharya/kitsune
vendor/packages/pylint/test/regrtest_data/pygtk_import.py
Python
bsd-3-clause
241
#!/usr/bin/python # -*- Mode: python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*- import sys import os import shutil from foldersync.storage import Status class LocalStorage(object): def __init__(self): pass def put(self, localpath, remotepath): if not os.path.exists(localpath): return if os.path.isdir(localpath): try: os.mkdir(remotepath) except IOError: pass else: shutil.copy(localpath, remotepath) def stat(self, remotepath): try: status = os.stat(remotepath) return Status(status.st_mtime, status.st_size) except OSError: return None
lukacu/foldersync
foldersync/storage/local.py
Python
gpl-3.0
650
from office365.runtime.client_object import ClientObject from office365.runtime.queries.service_operation_query import ServiceOperationQuery from office365.runtime.paths.resource_path import ResourcePath from office365.sharepoint.userprofiles.userProfile import UserProfile class ProfileLoader(ClientObject): def __init__(self, context): super(ProfileLoader, self).__init__(context, ResourcePath("SP.UserProfiles.ProfileLoader.GetProfileLoader")) @staticmethod def get_profile_loader(context): """ :type: office365.sharepoint.client_context.ClientContext context """ result = ProfileLoader(context) qry = ServiceOperationQuery(result, "GetProfileLoader", None, None, None, result) qry.static = True context.add_query(qry) return result def get_user_profile(self): result = UserProfile(self.context, ResourcePath("GetUserProfile", self.resource_path)) qry = ServiceOperationQuery(self, "GetUserProfile", None, None, None, result) self.context.add_query(qry) return result @property def entity_type_name(self): return "SP.UserProfiles.ProfileLoader"
vgrem/Office365-REST-Python-Client
office365/sharepoint/userprofiles/profileLoader.py
Python
mit
1,190
# (c) 2012, Michael DeHaan <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. import traceback import os import pipes import shutil import subprocess import select import fcntl from ansible.errors import AnsibleError from ansible.plugins.connections import ConnectionBase from ansible.utils.debug import debug class Connection(ConnectionBase): ''' Local based connections ''' def get_transport(self): ''' used to identify this connection object ''' return 'local' def connect(self, port=None): ''' connect to the local host; nothing to do here ''' self._display.vvv("ESTABLISH LOCAL CONNECTION FOR USER: %s" % self._connection_info.remote_user, host=self._connection_info.remote_addr) return self def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): ''' run a command on the local host ''' debug("in local.exec_command()") # su requires to be run from a terminal, and therefore isn't supported here (yet?) #if self._connection_info.su: # raise AnsibleError("Internal Error: this module does not support running commands via su") if in_data: raise AnsibleError("Internal Error: this module does not support optimized module pipelining") executable = executable.split()[0] if executable else None self._display.vvv("%s EXEC %s" % (self._connection_info.remote_addr, cmd)) # FIXME: cwd= needs to be set to the basedir of the playbook debug("opening command with Popen()") p = subprocess.Popen( cmd, shell=isinstance(cmd, basestring), executable=executable, #cwd=... stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) debug("done running command with Popen()") # FIXME: more su/sudo stuff #if self.runner.sudo and sudoable and self.runner.sudo_pass: # fcntl.fcntl(p.stdout, fcntl.F_SETFL, # fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) # fcntl.fcntl(p.stderr, fcntl.F_SETFL, # fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK) # sudo_output = '' # while not sudo_output.endswith(prompt) and success_key not in sudo_output: # rfd, wfd, efd = select.select([p.stdout, p.stderr], [], # [p.stdout, p.stderr], self.runner.timeout) # if p.stdout in rfd: # chunk = p.stdout.read() # elif p.stderr in rfd: # chunk = p.stderr.read() # else: # stdout, stderr = p.communicate() # raise AnsibleError('timeout waiting for sudo password prompt:\n' + sudo_output) # if not chunk: # stdout, stderr = p.communicate() # raise AnsibleError('sudo output closed while waiting for password prompt:\n' + sudo_output) # sudo_output += chunk # if success_key not in sudo_output: # p.stdin.write(self.runner.sudo_pass + '\n') # fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK) # fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK) debug("getting output with communicate()") stdout, stderr = p.communicate() debug("done communicating") debug("done with local.exec_command()") return (p.returncode, '', stdout, stderr) def put_file(self, in_path, out_path): ''' transfer a file from local to local ''' #vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) self._display.vvv("%s PUT %s TO %s" % (self._connection_info.remote_addr, in_path, out_path)) if not os.path.exists(in_path): #raise AnsibleFileNotFound("file or module does not exist: %s" % in_path) raise AnsibleError("file or module does not exist: %s" % in_path) try: shutil.copyfile(in_path, out_path) except shutil.Error: traceback.print_exc() raise AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path)) except IOError: traceback.print_exc() raise AnsibleError("failed to transfer file to %s" % out_path) def fetch_file(self, in_path, out_path): #vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) self._display.vvv("%s FETCH %s TO %s" % (self._connection_info.remote_addr, in_path, out_path)) ''' fetch a file from local to local -- for copatibility ''' self.put_file(in_path, out_path) def close(self): ''' terminate the connection; nothing to do here ''' pass
M0ses/ansible
v2/ansible/plugins/connections/local.py
Python
gpl-3.0
5,509
from crispy_forms.bootstrap import FormActions from crispy_forms.helper import FormHelper from crispy_forms.layout import ButtonHolder, Div, Fieldset, HTML, Layout, Submit from django import forms from django.core.validators import EmailValidator, email_re from django.core.urlresolvers import reverse from django.forms.widgets import PasswordInput, HiddenInput from django.utils.safestring import mark_safe from django.utils.translation import ugettext as _, ugettext_noop, ugettext_lazy from django.template.loader import get_template from django.template import Context from corehq.apps.locations.models import Location from corehq.apps.users.models import CouchUser from corehq.apps.users.util import format_username from corehq.apps.app_manager.models import validate_lang from corehq.apps.commtrack.models import CommTrackUser, Program import re # required to translate inside of a mark_safe tag from django.utils.functional import lazy import six # Python 3 compatibility mark_safe_lazy = lazy(mark_safe, six.text_type) def wrapped_language_validation(value): try: validate_lang(value) except ValueError: raise forms.ValidationError("%s is not a valid language code! Please " "enter a valid two or three digit code." % value) class LanguageField(forms.CharField): """ Adds language code validation to a field """ def __init__(self, *args, **kwargs): super(LanguageField, self).__init__(*args, **kwargs) self.min_length = 2 self.max_length = 3 default_error_messages = { 'invalid': _(u'Please enter a valid two or three digit language code.'), } default_validators = [wrapped_language_validation] class BaseUpdateUserForm(forms.Form): @property def direct_properties(self): return [] def update_user(self, existing_user=None, **kwargs): is_update_successful = False if not existing_user and 'email' in self.cleaned_data: from django.contrib.auth.models import User django_user = User() django_user.username = self.cleaned_data['email'] django_user.save() existing_user = CouchUser.from_django_user(django_user) existing_user.save() is_update_successful = True for prop in self.direct_properties: setattr(existing_user, prop, self.cleaned_data[prop]) is_update_successful = True if is_update_successful: existing_user.save() return is_update_successful def initialize_form(self, existing_user=None, **kwargs): if existing_user is None: return for prop in self.direct_properties: self.initial[prop] = getattr(existing_user, prop, "") class UpdateUserRoleForm(BaseUpdateUserForm): role = forms.ChoiceField(choices=(), required=False) def update_user(self, existing_user=None, domain=None, **kwargs): is_update_successful = super(UpdateUserRoleForm, self).update_user(existing_user) if domain and 'role' in self.cleaned_data: role = self.cleaned_data['role'] try: existing_user.set_role(domain, role) existing_user.save() is_update_successful = True except KeyError: pass return is_update_successful def load_roles(self, role_choices=None, current_role=None): if role_choices is None: role_choices = [] self.fields['role'].choices = role_choices if current_role: self.initial['role'] = current_role class BaseUserInfoForm(forms.Form): first_name = forms.CharField(label=ugettext_lazy('First Name'), max_length=50, required=False) last_name = forms.CharField(label=ugettext_lazy('Last Name'), max_length=50, required=False) email = forms.EmailField(label=ugettext_lazy("E-mail"), max_length=75, required=False) language = forms.ChoiceField( choices=(), initial=None, required=False, help_text=mark_safe_lazy( ugettext_lazy( "<i class=\"icon-info-sign\"></i> " "Becomes default language seen in CloudCare and reports (if applicable). " "Supported languages for reports are en, fr (partial), and hin (partial)." ) ) ) def load_language(self, language_choices=None): if language_choices is None: language_choices = [] self.fields['language'].choices = [('', '')] + language_choices class UpdateMyAccountInfoForm(BaseUpdateUserForm, BaseUserInfoForm): email_opt_out = forms.BooleanField( required=False, label="", help_text=ugettext_lazy("Opt out of emails about new features and other CommCare updates.") ) @property def direct_properties(self): return self.fields.keys() class UpdateCommCareUserInfoForm(BaseUserInfoForm, UpdateUserRoleForm): @property def direct_properties(self): indirect_props = ['role'] return [k for k in self.fields.keys() if k not in indirect_props] class RoleForm(forms.Form): def __init__(self, *args, **kwargs): if kwargs.has_key('role_choices'): role_choices = kwargs.pop('role_choices') else: role_choices = () super(RoleForm, self).__init__(*args, **kwargs) self.fields['role'].choices = role_choices class Meta: app_label = 'users' class CommCareAccountForm(forms.Form): """ Form for CommCareAccounts """ # 128 is max length in DB # 25 is domain max length # @{domain}.commcarehq.org adds 16 # left over is 87 and 80 just sounds better max_len_username = 80 username = forms.CharField(max_length=max_len_username, required=True) password = forms.CharField(widget=PasswordInput(), required=True, min_length=1, help_text="Only numbers are allowed in passwords") password_2 = forms.CharField(label='Password (reenter)', widget=PasswordInput(), required=True, min_length=1) domain = forms.CharField(widget=HiddenInput()) phone_number = forms.CharField(max_length=80, required=False) class Meta: app_label = 'users' def __init__(self, *args, **kwargs): super(forms.Form, self).__init__(*args, **kwargs) self.helper = FormHelper() self.helper.layout = Layout( Fieldset( 'Create new Mobile Worker account', 'username', 'password', HTML("{% if only_numeric %}" "<div class=\"control-group\"><div class=\"controls\">" "To enable alphanumeric passwords, go to the " "applications this user will use, go to CommCare " "Settings, and change Password Format to Alphanumeric." "</div></div>" "{% endif %}" ), 'password_2', 'phone_number', Div( Div(HTML("Please enter number, including international code, in digits only."), css_class="controls"), css_class="control-group" ) ), FormActions( ButtonHolder( Submit('submit', 'Create Mobile Worker') ) ) ) def clean_phone_number(self): phone_number = self.cleaned_data['phone_number'] phone_number = re.sub('\s|\+|\-', '', phone_number) if phone_number == '': return None elif not re.match(r'\d+$', phone_number): raise forms.ValidationError(_("%s is an invalid phone number." % phone_number)) return phone_number def clean_username(self): username = self.cleaned_data['username'] if username == 'admin' or username == 'demo_user': raise forms.ValidationError("The username %s is reserved for CommCare." % username) return username def clean(self): try: password = self.cleaned_data['password'] password_2 = self.cleaned_data['password_2'] except KeyError: pass else: if password != password_2: raise forms.ValidationError("Passwords do not match") if self.password_format == 'n' and not password.isnumeric(): raise forms.ValidationError("Password is not numeric") try: username = self.cleaned_data['username'] except KeyError: pass else: if len(username) > CommCareAccountForm.max_len_username: raise forms.ValidationError( "Username %s is too long. Must be under %d characters." % (username, CommCareAccountForm.max_len_username)) validate_username('%[email protected]' % username) domain = self.cleaned_data['domain'] username = format_username(username, domain) num_couch_users = len(CouchUser.view("users/by_username", key=username)) if num_couch_users > 0: raise forms.ValidationError("CommCare user already exists") # set the cleaned username to [email protected] self.cleaned_data['username'] = username return self.cleaned_data validate_username = EmailValidator(email_re, _(u'Username contains invalid characters.'), 'invalid') class MultipleSelectionForm(forms.Form): """ Form for selecting groups (used by the group UI on the user page) """ selected_ids = forms.MultipleChoiceField( label="", required=False, ) def __init__(self, *args, **kwargs): self.helper = FormHelper() self.helper.form_tag = False self.helper.add_input(Submit('submit', 'Update')) super(MultipleSelectionForm, self).__init__(*args, **kwargs) class SupplyPointSelectWidget(forms.Widget): def __init__(self, attrs=None, domain=None, id='supply-point'): super(SupplyPointSelectWidget, self).__init__(attrs) self.domain = domain self.id = id def render(self, name, value, attrs=None): return get_template('locations/manage/partials/autocomplete_select_widget.html').render(Context({ 'id': self.id, 'name': name, 'value': value or '', 'query_url': reverse('corehq.apps.commtrack.views.api_query_supply_point', args=[self.domain]), })) class CommtrackUserForm(forms.Form): supply_point = forms.CharField(label='Supply Point:', required=False) program_id = forms.ChoiceField(label="Program", choices=(), required=False) def __init__(self, *args, **kwargs): domain = None if 'domain' in kwargs: domain = kwargs['domain'] del kwargs['domain'] super(CommtrackUserForm, self).__init__(*args, **kwargs) self.fields['supply_point'].widget = SupplyPointSelectWidget(domain=domain) programs = Program.by_domain(domain, wrap=False) choices = list((prog['_id'], prog['name']) for prog in programs) choices.insert(0, ('', '')) self.fields['program_id'].choices = choices def save(self, user): commtrack_user = CommTrackUser.wrap(user.to_json()) location_id = self.cleaned_data['supply_point'] if location_id: loc = Location.get(location_id) commtrack_user.clear_locations() commtrack_user.add_location(loc)
gmimano/commcaretest
corehq/apps/users/forms.py
Python
bsd-3-clause
11,739
import rpyc class AService(rpyc.Service): class exposed_A(object): @classmethod def exposed_foo(cls, a, b): return 17 * a + b if __name__ == "__main__": with rpyc.connect_thread(remote_service = AService) as conn: print( conn.root.A.foo(1, 2))
pombredanne/rpyc
issues/issue26.py
Python
mit
291
"""cyme.branch - This is the Branch thread started by the :program:`cyme-branch` program. It starts the HTTP server, the Supervisor, and one or more controllers. """ from __future__ import absolute_import import logging from celery import current_app as celery from celery.utils import LOG_LEVELS, term from cell.g import Event from kombu.log import LogMixin from kombu.utils import gen_unique_id from . import signals from .state import state from .thread import gThread from cyme.utils import find_symbol, instantiate class MockSup(LogMixin): def __init__(self, thread, *args): self.thread = thread def start(self): self.thread.start() def stop(self): return self.thread.stop() class Branch(gThread): controller_cls = '.controller.Controller' httpd_cls = '.httpd.HttpServer' supervisor_cls = '.supervisor.Supervisor' intsup_cls = '.intsup.gSup' _components_ready = {} _components_shutdown = {} _presence_ready = 0 _ready = False def __init__(self, addrport='', id=None, loglevel=logging.INFO, logfile=None, without_httpd=False, numc=2, sup_interval=None, ready_event=None, colored=None, **kwargs): self.id = id or gen_unique_id() if isinstance(addrport, basestring): addr, _, port = addrport.partition(':') addrport = (addr, int(port) if port else 8000) self.addrport = addrport self.connection = celery.broker_connection() self.without_httpd = without_httpd self.logfile = logfile self.loglevel = loglevel self.numc = numc self.ready_event = ready_event self.exit_request = Event() self.colored = colored or term.colored(enabled=False) self.httpd = None gSup = find_symbol(self, self.intsup_cls) if not self.without_httpd: self.httpd = MockSup(instantiate(self, self.httpd_cls, addrport), signals.httpd_ready) self.supervisor = gSup(instantiate(self, self.supervisor_cls, sup_interval), signals.supervisor_ready) self.controllers = [gSup(instantiate(self, self.controller_cls, id='%s.%s' % (self.id, i), connection=self.connection, branch=self), signals.controller_ready) for i in xrange(1, numc + 1)] c = [self.supervisor] + self.controllers + [self.httpd] c = self.components = list(filter(None, c)) self._components_ready = dict(zip([z.thread for z in c], [False] * len(c))) for controller in self.controllers: if hasattr(controller.thread, 'presence'): self._components_ready[controller.thread.presence] = False self._components_shutdown = dict(self._components_ready) super(Branch, self).__init__() def _component_ready(self, sender=None, **kwargs): if not self._ready: self._components_ready[sender] = True if all(self._components_ready.values()): signals.branch_ready.send(sender=self) if self.ready_event: self.ready_event.send() self.ready_event = None self._ready = True def on_ready(self, **kwargs): pass def prepare_signals(self): signals.controller_ready.connect(self._component_ready) signals.httpd_ready.connect(self._component_ready) signals.supervisor_ready.connect(self._component_ready) signals.presence_ready.connect(self._component_ready) signals.branch_ready.connect(self.on_ready) signals.thread_post_shutdown.connect(self._component_shutdown) def run(self): state.is_branch = True signals.branch_startup_request.send(sender=self) self.prepare_signals() self.info('Starting with id %r', self.id) [g.start() for g in self.components] self.exit_request.wait() def stop(self): self.exit_request.send(1) super(Branch, self).stop() def after(self): for component in reversed(self.components): if self._components_ready[component.thread]: try: component.stop() except KeyboardInterrupt: pass except BaseException, exc: component.error('Error in shutdown: %r', exc) def _component_shutdown(self, sender, **kwargs): self._components_shutdown[sender] = True if all(self._components_shutdown.values()): signals.branch_shutdown_complete.send(sender=self) def about(self): url = port = None if self.httpd: url, port = self.httpd.thread.url, self.httpd.thread.port port = self.httpd.thread.port if self.httpd else None return {'id': self.id, 'loglevel': LOG_LEVELS[self.loglevel], 'numc': self.numc, 'sup_interval': self.supervisor.interval, 'logfile': self.logfile, 'port': port, 'url': url}
celery/cyme
cyme/branch/__init__.py
Python
bsd-3-clause
5,311
import requests class GeneralBikeshareFeed(object): gbfs_feed = 'http://gbfs.citibikenyc.com/gbfs/gbfs.json' def __init__(self): """ { "last_updated":1478918732, "ttl":10, "data":{ "en":{ "feeds":[ { "name":"system_information", "url":"https://gbfs.citibikenyc.com/gbfs/en/system_information.json" }, { "name":"system_alerts", "url":"https://gbfs.citibikenyc.com/gbfs/en/system_alerts.json" }, { "name":"station_information", "url":"https://gbfs.citibikenyc.com/gbfs/en/station_information.json" }, { "name":"station_status", "url":"https://gbfs.citibikenyc.com/gbfs/en/station_status.json" }, { "name":"system_regions", "url":"https://gbfs.citibikenyc.com/gbfs/en/system_regions.json" }]}}} """ r = requests.get(GeneralBikeshareFeed.gbfs_feed) data = r.json() self.feeds = dict() for feed in data['data']['en']['feeds']: self.feeds[feed['name']] = feed['url'] def system_information(self): r = requests.get(self.feeds['system_information']) data = r.json() return data['data'] def system_alerts(self): r = requests.get(self.feeds['system_alerts']) data = r.json() return data['data']['alerts'] def station_information(self): r = requests.get(self.feeds['station_information']) data = r.json() return data['data']['stations'] def station_status(self): r = requests.get(self.feeds['station_status']) data = r.json() return data['data']['stations'] def system_regions(self): r = requests.get(self.feeds['system_regions']) data = r.json() return data['data']['regions']
mdprewitt/citiike-gbf
gbf/feed.py
Python
mit
1,973
import logging import os import sys import signal DESTDIR = "/opt/spark-cluster/" logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s") rootLogger = logging.getLogger() rootLogger.setLevel(logging.DEBUG) def sigint_handler(signum, frame): os.system("docker-compose --file {}docker-compose.yml stop".format(DESTDIR)) os.system("docker-compose --file {}docker-compose.yml rm -f".format(DESTDIR)) exit() signal.signal(signal.SIGINT, sigint_handler) def check_dependencies(path): if os.path.exists(path+"requirements.txt"): rootLogger.info("found requirements... \ninstalling...") os.system("pip install -r {}requirements.txt".format(path)) else: print(path+"requirements.txt") try: import pip import logging import datetime import socket import re import requests import subprocess import yaml import docker except ImportError: # check dependencies check_dependencies(DESTDIR) import pip import datetime import socket import re import requests import subprocess import yaml import docker def generate_master_docker(outdir, master_ip, master_port = 7077): spark_master_confs = { "IP": "{}".format(master_ip), "PORT": "{}".format(master_port) } d = {"spark-master": { "image": "spark", "command": "bin/spark-class org.apache.spark.deploy.master.Master -h spark-master", "hostname": "spark-master", "environment": { "SPARK_CONF_DIR": "/conf", "SPARK_PUBLIC_DNS": "{}".format(spark_master_confs["IP"]), "SPARK_MASTER_HOST": "{}".format(spark_master_confs["IP"]), }, "expose" : [ 7001, 7002, 7003, 7004, 7005, 7006, 7077, 6066], "ports" : [ "4040:4040", "6066:6066", "7077:7077", "8080:8080"], "volumes": [ "./conf/spark-master:/conf", "./data:/tmp/data"] } } with open(outdir + 'docker-compose.yml', 'w') as outfile: yaml.dump(d, outfile, default_flow_style=False) def generate_worker_docker(outdir, master_ip, hostnumber): spark_master_confs = { "IP": "{}".format(master_ip), "PORT": "7077" } spark_worker_confs = { "NAME": "spark-worker-{}".format(hostnumber), "IP": "172.16.0.1{}".format(hostnumber), "PORT": "5078", "WEBUI_PORT": "8081", } d = {"{}".format(spark_worker_confs["NAME"]): { "image": "spark", "command": "bin/spark-class org.apache.spark.deploy.worker.Worker spark://{}:{}". \ format(spark_master_confs["IP"], spark_master_confs["PORT"]), "hostname": "{}".format(spark_worker_confs["NAME"]), "environment": { "SPARK_CONF_DIR": "/conf", "SPARK_PUBLIC_DNS": "{}".format(spark_master_confs["IP"]), "SPARK_WORKER_CORES": "2", "SPARK_WORKER_MEMORY": "2g", "SPARK_WORKER_PORT": "50{}".format(hostnumber), "SPARK_WORKER_WEBUI_PORT": "80{}".format(hostnumber), "SPARK_MASTER_HOST": "{}".format(spark_master_confs["IP"]), "MASTER": "spark://{}:{}".format(spark_master_confs["IP"], spark_master_confs["PORT"]), }, "expose": [ 7012, 7013, 7014, 7015, 7016, 8881, 5078], "ports": ["8081:8081"], "volumes": [ "./conf/{}:/conf".format(spark_worker_confs["NAME"]), "./data:/tmp/data"] } } with open(outdir + 'docker-compose.yml', 'w') as outfile: yaml.dump(d, outfile, default_flow_style=False) def generate_docker_compose(outdir, master_ip): hostname = socket.gethostname() t = re.match("alcatel(\d+)", hostname) if t: hostnumber = t.group(1) else: raise Exception("unknown hostnumber") s = re.match("172\.16\.0\.1(\d+)", master_ip) if s: master_name = "alcatel" + s.group(1) else: raise Exception("can't identify master's name {}".format(master_ip)) if hostname == master_name: rootLogger.info("identified as master {}".format(hostname)) generate_master_docker(outdir, master_ip) else: rootLogger.info("identified as worker {}".format(hostname)) generate_worker_docker(outdir, master_ip, hostnumber) def maybe_build_docker_image(): global c docker_image_name = "datamining/spark-cluster:latest" c = docker.Client() images = [img['RepoTags'][0] for img in c.images()] if docker_image_name not in images: rootLogger.info("{} not in already built images: {}".format(docker_image_name, images)) url = "https://raw.githubusercontent.com/f-guitart/spark-cluster/master/spark.df" response = requests.get(url) response.raise_for_status() dockerfile = response.content with open("/opt/spark-cluster/spark.df", "wb") as f: f.write(dockerfile) with open("/opt/spark-cluster/spark.df", "rb") as f: response = [line for line in c.build(fileobj=f, rm=True, tag='datamining/spark-cluster')] rootLogger.info(str(response)) else: rootLogger.info("{} already built".format(docker_image_name)) # with open("/opt/spark-cluster/log/docker-build.log","a") as f: # f.writelines("{} - {}".format(datetime.datetime.now(), response)) def maybe_create_directories(path): if os.path.exists(path): new_dirs = ["log"] for d in new_dirs: if not os.path.exists(path+d): os.makedirs(path+d) else: os.makedirs(path) maybe_create_directories(path) def add_log_handlers(logdir, logfilname): fileHandler = logging.FileHandler("{0}/{1}.log".format(logdir, logfilname)) fileHandler.setFormatter(logFormatter) rootLogger.addHandler(fileHandler) consoleHandler = logging.StreamHandler() consoleHandler.setFormatter(logFormatter) rootLogger.addHandler(consoleHandler) def maybe_stop_container(): global c c.containers() def run_docker(docker_dir): compose_out = subprocess.Popen(["docker-compose","-f", docker_dir+"{}".format("docker-compose.yml"), "up"], stdout=sys.stdout) rootLogger.info("{}".format(compose_out)) def main(in_arg): c = None # get role master_ip = in_arg # create dirs maybe_create_directories(DESTDIR) # add log handlers add_log_handlers(DESTDIR+"log/","docker_builder") # download docker image rootLogger.info("building docker image") maybe_build_docker_image() # generate docker compose generate_docker_compose(DESTDIR, master_ip) # launch docker compose maybe_stop_container() status = run_docker(DESTDIR) #notify_status(status) if __name__ == '__main__': main(sys.argv[1])
f-guitart/spark-cluster
run_spark_cluster.py
Python
gpl-3.0
7,041
xs = (<caret>1, 2)
smmribeiro/intellij-community
python/testData/intentions/PyConvertCollectionLiteralIntentionTest/convertTupleToSetNotAvailableWithoutSetLiterals.py
Python
apache-2.0
18
import fnmatch import mimetypes import os import re import signal import time import traceback from multiprocessing import Process, JoinableQueue, Queue import chardet import psutil from lib.FileManager.FM import REQUEST_DELAY from lib.FileManager.workers.baseWorkerCustomer import BaseWorkerCustomer from misc.helperUnicode import as_unicode from misc.helpers import kill TIMEOUT_LIMIT = 60 * 60 # 10 min class FindText(BaseWorkerCustomer): NUM_WORKING_PROCESSES = 2 def __init__(self, params, session, *args, **kwargs): super(FindText, self).__init__(*args, **kwargs) self.path = params.get('path', '/') self.session = session self.session = session self.text = params.get('text', '') self.params = params # file queue to be processed by many threads self.file_queue = JoinableQueue(maxsize=0) self.result_queue = Queue(maxsize=0) self.result = [] self.is_alive = { "status": True } self.re_text = re.compile('.*' + fnmatch.translate(self.text)[:-7] + '.*', re.UNICODE | re.IGNORECASE) # remove \Z(?ms) from end of result expression def run(self): try: self.preload() sftp = self.get_sftp_connection(self.session) self.logger.debug("findText started with timeout = %s" % TIMEOUT_LIMIT) time_limit = int(time.time()) + TIMEOUT_LIMIT # Launches a number of worker threads to perform operations using the queue of inputs sftp_managers = [] for i in range(self.NUM_WORKING_PROCESSES): p = Process(target=self.worker, args=(self.re_text, self.file_queue, self.result_queue, time_limit)) p.start() proc = psutil.Process(p.pid) proc.ionice(psutil.IOPRIO_CLASS_IDLE) proc.nice(20) self.logger.debug( "Search worker #%s, set ionice = idle and nice = 20 for pid %s" % ( str(i), str(p.pid))) self.processes.append(p) abs_path = self.path self.logger.debug("FM FindText worker run(), abs_path = %s" % abs_path) if not sftp.exists(abs_path): raise Exception("Provided path not exist") self.on_running(self.status_id, pid=self.pid, pname=self.name) for current, dirs, files in sftp.walk(abs_path): for f in files: try: file_path = os.path.join(current, f) self.file_queue.put(file_path) except UnicodeDecodeError as e: self.logger.error( "UnicodeDecodeError %s, %s" % (str(e), traceback.format_exc())) except IOError as e: self.logger.error("IOError %s, %s" % (str(e), traceback.format_exc())) except Exception as e: self.logger.error( "Exception %s, %s" % (str(e), traceback.format_exc())) while int(time.time()) <= time_limit: self.logger.debug("file_queue size = %s , empty = %s (timeout: %s/%s)" % ( self.file_queue.qsize(), self.file_queue.empty(), str(int(time.time())), time_limit)) if self.file_queue.empty(): self.logger.debug("join() file_queue until workers done jobs") self.file_queue.join() break else: time.sleep(REQUEST_DELAY) if int(time.time()) > time_limit: self.is_alive['status'] = False for sftp in sftp_managers: sftp.conn.close() for p in self.processes: try: self.logger.debug("FM FindText terminate worker process, pid = %s" % p.pid) kill(p.pid, signal.SIGKILL, self.logger) except OSError: self.logger.error( "FindText unable to terminate worker process, pid = %s" % p.pid) if self.is_alive['status'] is True: while not self.result_queue.empty(): file_path = self.result_queue.get() self.result.append(sftp.make_file_info(file_path)) self.on_success(self.status_id, data=self.result, pid=self.pid, pname=self.name) else: result = { "error": True, "message": "Operation timeout exceeded", "traceback": "" } self.on_error(self.status_id, result, pid=self.pid, pname=self.name) except Exception as e: result = { "error": True, "message": str(e), "traceback": traceback.format_exc() } self.on_error(self.status_id, result, pid=self.pid, pname=self.name) def worker(self, re_text, file_queue, result_queue, timeout): try: worker_sftp = self.get_sftp_connection(self.session) while int(time.time()) < timeout: if file_queue.empty() is not True: f_path = file_queue.get() try: if not worker_sftp.is_binary(f_path): mime = mimetypes.guess_type(f_path)[0] # исключаем некоторые mime типы из поиска if mime not in ['application/pdf', 'application/rar']: with worker_sftp.open(f_path, 'rb') as fp: for line in fp: try: line = as_unicode(line) except UnicodeDecodeError: charset = chardet.detect(line) if charset.get('encoding') in ['MacCyrillic']: detected = 'windows-1251' else: detected = charset.get('encoding') if detected is None: break try: line = str(line, detected, "replace") except LookupError: pass if re_text.match(line) is not None: result_queue.put(f_path) self.logger.debug("matched file = %s " % f_path) break except UnicodeDecodeError as unicode_e: self.logger.error( "UnicodeDecodeError %s, %s" % (str(unicode_e), traceback.format_exc())) except IOError as io_e: self.logger.error("IOError %s, %s" % (str(io_e), traceback.format_exc())) except Exception as other_e: self.logger.error("Exception %s, %s" % (str(other_e), traceback.format_exc())) finally: file_queue.task_done() else: time.sleep(REQUEST_DELAY) worker_sftp.close() except Exception as e: result = { "error": True, "message": str(e), "traceback": traceback.format_exc() } self.logger.error('SFTP FindText Worker Exception {}'.format(result))
LTD-Beget/sprutio-rpc
lib/FileManager/workers/sftp/findText.py
Python
gpl-3.0
8,040
# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. # Copyright (C) NIWA & British Crown (Met Office) & Contributors. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Parse and validate the workflow definition file Set local values of variables to give workflow context before parsing config, i.e for template filters (Jinja2, python ...etc) and possibly needed locally by event handlers. This is needed for both running and non-running workflow parsing (obtaining config/graph info). Potentially task-specific due to different directory paths on different task hosts, however, they are overridden by tasks prior to job submission. Do some consistency checking, then construct task proxy objects and graph structures. """ import contextlib from copy import copy from fnmatch import fnmatchcase import os from pathlib import Path import re import traceback from typing import ( Any, Callable, Dict, List, Mapping, Optional, Set, TYPE_CHECKING, Tuple, Union ) from metomi.isodatetime.data import Calendar from metomi.isodatetime.parsers import DurationParser from metomi.isodatetime.exceptions import IsodatetimeError from metomi.isodatetime.timezone import get_local_time_zone_format from metomi.isodatetime.dumpers import TimePointDumper from cylc.flow import LOG from cylc.flow.c3mro import C3 from cylc.flow.cfgspec.glbl_cfg import glbl_cfg from cylc.flow.cfgspec.workflow import RawWorkflowConfig from cylc.flow.cycling.loader import ( get_point, get_point_relative, get_interval, get_interval_cls, get_sequence, get_sequence_cls, init_cyclers, get_dump_format, INTEGER_CYCLING_TYPE, ISO8601_CYCLING_TYPE ) from cylc.flow.cycling.integer import IntegerInterval from cylc.flow.cycling.iso8601 import ingest_time, ISO8601Interval from cylc.flow.exceptions import ( CylcError, WorkflowConfigError, IntervalParsingError, TaskDefError, ParamExpandError, UserInputError ) import cylc.flow.flags from cylc.flow.graph_parser import GraphParser from cylc.flow.listify import listify from cylc.flow.option_parsers import verbosity_to_env from cylc.flow.graphnode import GraphNodeParser from cylc.flow.param_expand import NameExpander from cylc.flow.parsec.exceptions import ItemNotFoundError from cylc.flow.parsec.OrderedDict import OrderedDictWithDefaults from cylc.flow.parsec.util import replicate from cylc.flow.pathutil import ( get_workflow_run_dir, get_workflow_run_log_dir, get_workflow_run_share_dir, get_workflow_run_work_dir, get_workflow_name_from_id ) from cylc.flow.platforms import FORBIDDEN_WITH_PLATFORM from cylc.flow.print_tree import print_tree from cylc.flow.subprocctx import SubFuncContext from cylc.flow.task_events_mgr import ( EventData, get_event_handler_data ) from cylc.flow.task_id import TaskID from cylc.flow.task_outputs import ( TASK_OUTPUT_SUCCEEDED, TaskOutputs ) from cylc.flow.task_trigger import TaskTrigger, Dependency from cylc.flow.taskdef import TaskDef from cylc.flow.unicode_rules import ( TaskNameValidator, TaskOutputValidator, XtriggerNameValidator, ) from cylc.flow.wallclock import ( get_current_time_string, set_utc_mode, get_utc_mode) from cylc.flow.workflow_files import NO_TITLE, WorkflowFiles from cylc.flow.xtrigger_mgr import XtriggerManager if TYPE_CHECKING: from optparse import Values from cylc.flow.cycling import IntervalBase, PointBase, SequenceBase RE_CLOCK_OFFSET = re.compile(r'(' + TaskID.NAME_RE + r')(?:\(\s*(.+)\s*\))?') RE_EXT_TRIGGER = re.compile(r'(.*)\s*\(\s*(.+)\s*\)\s*') RE_SEC_MULTI_SEQ = re.compile(r'(?![^(]+\)),') RE_WORKFLOW_ID_VAR = re.compile(r'\${?CYLC_WORKFLOW_(REG_)?ID}?') RE_TASK_NAME_VAR = re.compile(r'\${?CYLC_TASK_NAME}?') RE_VARNAME = re.compile(r'^[a-zA-Z_][\w]*$') def check_varnames(env): """Check a list of env var names for legality. Return a list of bad names (empty implies success). """ bad = [] for varname in env: if not RE_VARNAME.match(varname): bad.append(varname) return bad def interpolate_template(tmpl, params_dict): """Try the string interpolation/formatting operator `%` on a template string with a dictionary of parameters. E.g. 'a_%(foo)d' % {'foo': 12} If it fails, raises ParamExpandError, but if the string does not contain `%(`, it just returns the string. """ if '%(' not in tmpl: return tmpl # User probably not trying to use param template try: return tmpl % params_dict except KeyError: raise ParamExpandError('bad parameter') except TypeError: raise ParamExpandError('wrong data type for parameter') except ValueError: raise ParamExpandError('bad template syntax') # TODO: separate config for run and non-run purposes? class WorkflowConfig: """Class for workflow configuration items and derived quantities.""" CHECK_CIRCULAR_LIMIT = 100 # If no. tasks > this, don't check circular VIS_N_POINTS = 3 CYLC7_GRAPH_COMPAT_MSG = ( "Cylc 7 graph compatibility: making success outputs 'required' (to" " retain failed tasks in the pool) and pre-spawning graph children (to" " replicate Cylc 7 stall behaviour). Please refer to documentation on" " upgrading Cylc 7 graphs to Cylc 8." ) def __init__( self, workflow: str, fpath: Union[Path, str], options: 'Values', template_vars: Optional[Mapping[str, Any]] = None, is_reload: bool = False, output_fname: Optional[str] = None, xtrigger_mgr: Optional[XtriggerManager] = None, mem_log_func: Optional[Callable[[str], None]] = None, run_dir: Optional[str] = None, log_dir: Optional[str] = None, work_dir: Optional[str] = None, share_dir: Optional[str] = None ) -> None: self.mem_log = mem_log_func if self.mem_log is None: self.mem_log = lambda x: None self.mem_log("config.py:config.py: start init config") self.workflow = workflow # workflow id self.workflow_name = get_workflow_name_from_id(self.workflow) self.fpath = str(fpath) # workflow definition self.fdir = os.path.dirname(fpath) self.run_dir = run_dir or get_workflow_run_dir(self.workflow) self.log_dir = log_dir or get_workflow_run_log_dir(self.workflow) self.share_dir = share_dir or get_workflow_run_share_dir(self.workflow) self.work_dir = work_dir or get_workflow_run_work_dir(self.workflow) self.options = options self.implicit_tasks: Set[str] = set() self.edges: Dict[ 'SequenceBase', Set[Tuple[str, str, bool, bool]] ] = {} self.taskdefs: Dict[str, TaskDef] = {} self.clock_offsets = {} self.expiration_offsets = {} self.ext_triggers = {} # Old external triggers (client/server) self.xtrigger_mgr = xtrigger_mgr self.workflow_polling_tasks = {} # type: ignore # TODO figure out type self.initial_point: 'PointBase' self.start_point: 'PointBase' self.final_point: Optional['PointBase'] = None self.sequences: List['SequenceBase'] = [] self.actual_first_point: Optional['PointBase'] = None self._start_point_for_actual_first_point: Optional['PointBase'] = None self.task_param_vars = {} # type: ignore # TODO figure out type self.custom_runahead_limit: Optional['IntervalBase'] = None self.max_num_active_cycle_points = None # runtime hierarchy dicts keyed by namespace name: self.runtime: Dict[str, dict] = { # TODO figure out type # lists of parent namespaces 'parents': {}, # lists of C3-linearized ancestor namespaces 'linearized ancestors': {}, # lists of first-parent ancestor namespaces 'first-parent ancestors': {}, # lists of all descendant namespaces # (not including the final tasks) 'descendants': {}, # lists of all descendant namespaces from the first-parent # hierarchy (first parents are collapsible in visualization) 'first-parent descendants': {}, } # tasks self.leaves = [] # TODO figure out type # one up from root self.feet = [] # type: ignore # TODO figure out type # Export local environmental workflow context before config parsing. self.process_workflow_env() # parse, upgrade, validate the workflow, but don't expand with default # items self.mem_log("config.py: before RawWorkflowConfig init") if output_fname: output_fname = os.path.expandvars(output_fname) self.pcfg = RawWorkflowConfig( fpath, output_fname, template_vars ) self.mem_log("config.py: after RawWorkflowConfig init") self.mem_log("config.py: before get(sparse=True") self.cfg = self.pcfg.get(sparse=True) self.mem_log("config.py: after get(sparse=True)") if 'scheduler' in self.cfg and 'install' in self.cfg['scheduler']: self.get_validated_rsync_includes() # First check for the essential scheduling section. if 'scheduling' not in self.cfg: raise WorkflowConfigError("missing [scheduling] section.") if 'graph' not in self.cfg['scheduling']: raise WorkflowConfigError("missing [scheduling][[graph]] section.") # (The check that 'graph' is defined is below). # Override the workflow defn with an initial point from the CLI. icp_str = getattr(self.options, 'icp', None) if icp_str is not None: self.cfg['scheduling']['initial cycle point'] = icp_str self.prelim_process_graph() # allow test workflows with no [runtime]: if 'runtime' not in self.cfg: self.cfg['runtime'] = OrderedDictWithDefaults() if 'root' not in self.cfg['runtime']: self.cfg['runtime']['root'] = OrderedDictWithDefaults() try: # Ugly hack to avoid templates getting included in parameters parameter_values = { key: value for key, value in self.cfg['task parameters'].items() if key != 'templates' } except KeyError: # (Workflow config defaults not put in yet.) parameter_values = {} try: parameter_templates = self.cfg['task parameters']['templates'] except KeyError: parameter_templates = {} # Check that parameter templates are a section if not hasattr(parameter_templates, 'update'): raise WorkflowConfigError( '[task parameters][templates] is a section. Don\'t use it ' 'as a parameter.' ) # parameter values and templates are normally needed together. self.parameters = (parameter_values, parameter_templates) LOG.debug("Expanding [runtime] namespace lists and parameters") # Set default parameter expansion templates if necessary. for pname, pvalues in parameter_values.items(): if pvalues and pname not in parameter_templates: if any(not isinstance(pvalue, int) for pvalue in pvalues): # Strings, bare parameter values parameter_templates[pname] = r'_%%(%s)s' % pname elif any(pvalue < 0 for pvalue in pvalues): # Integers, with negative value(s) # Prefix values with signs and parameter names parameter_templates[pname] = r'_%s%%(%s)+0%dd' % ( pname, pname, max(len(str(pvalue)) for pvalue in pvalues)) else: # Integers, positive only # Prefix values with parameter names parameter_templates[pname] = r'_%s%%(%s)0%dd' % ( pname, pname, len(str(max(pvalues)))) # Expand parameters in 'special task' lists. if 'special tasks' in self.cfg['scheduling']: for spec, names in self.cfg['scheduling']['special tasks'].items(): self.cfg['scheduling']['special tasks'][spec] = ( self._expand_name_list(names)) # Expand parameters in internal queue member lists. if 'queues' in self.cfg['scheduling']: for queue, cfg in self.cfg['scheduling']['queues'].items(): if 'members' not in cfg: continue self.cfg['scheduling']['queues'][queue]['members'] = ( self._expand_name_list(cfg['members'])) # Check environment variable names and parameter environment templates # Done before inheritance to avoid repetition self.check_env_names() self.check_param_env_tmpls() self.mem_log("config.py: before _expand_runtime") self._expand_runtime() self.mem_log("config.py: after _expand_runtime") self.ns_defn_order = list(self.cfg['runtime']) self.mem_log("config.py: before compute_family_tree") # do sparse inheritance self.compute_family_tree() self.mem_log("config.py: after compute_family_tree") self.mem_log("config.py: before inheritance") self.compute_inheritance() self.mem_log("config.py: after inheritance") # filter task environment variables after inheritance self.filter_env() # Now add config defaults. Items added prior to this end up in the # sparse dict (e.g. parameter-expanded namespaces). self.mem_log("config.py: before get(sparse=False)") self.cfg = self.pcfg.get(sparse=False) self.mem_log("config.py: after get(sparse=False)") # These 2 must be called before call to init_cyclers(self.cfg): self.process_utc_mode() self.process_cycle_point_tz() # after the call to init_cyclers, we can start getting proper points. init_cyclers(self.cfg) self.cycling_type = get_interval_cls().get_null().TYPE self.cycle_point_dump_format = get_dump_format(self.cycling_type) # Initial point from workflow definition (or CLI override above). self.process_initial_cycle_point() self.process_start_cycle_point() self.process_final_cycle_point() # Parse special task cycle point offsets, and replace family names. LOG.debug("Parsing [special tasks]") for s_type in self.cfg['scheduling']['special tasks']: result = copy(self.cfg['scheduling']['special tasks'][s_type]) extn = '' for item in self.cfg['scheduling']['special tasks'][s_type]: name = item if s_type == 'external-trigger': match = RE_EXT_TRIGGER.match(item) if match is None: raise WorkflowConfigError( "Illegal %s spec: %s" % (s_type, item) ) name, ext_trigger_msg = match.groups() extn = "(" + ext_trigger_msg + ")" elif s_type in ['clock-trigger', 'clock-expire']: match = RE_CLOCK_OFFSET.match(item) if match is None: raise WorkflowConfigError( "Illegal %s spec: %s" % (s_type, item) ) if ( self.cfg['scheduling']['cycling mode'] != Calendar.MODE_GREGORIAN ): raise WorkflowConfigError( "%s tasks require " "[scheduling]cycling mode=%s" % ( s_type, Calendar.MODE_GREGORIAN) ) name, offset_string = match.groups() if not offset_string: offset_string = "PT0M" if ( cylc.flow.flags.verbosity > 0 and offset_string.startswith("-") ): LOG.warning( "%s offsets are normally positive: %s" % ( s_type, item)) try: offset_interval = ( get_interval(offset_string).standardise()) except IntervalParsingError: raise WorkflowConfigError( "Illegal %s spec: %s" % ( s_type, offset_string)) extn = "(" + offset_string + ")" # Replace family names with members. if name in self.runtime['descendants']: result.remove(item) for member in self.runtime['descendants'][name]: if member in self.runtime['descendants']: # (sub-family) continue result.append(member + extn) if s_type == 'clock-trigger': self.clock_offsets[member] = offset_interval if s_type == 'clock-expire': self.expiration_offsets[member] = offset_interval if s_type == 'external-trigger': self.ext_triggers[member] = ext_trigger_msg elif s_type == 'clock-trigger': self.clock_offsets[name] = offset_interval elif s_type == 'clock-expire': self.expiration_offsets[name] = offset_interval elif s_type == 'external-trigger': self.ext_triggers[name] = self.dequote(ext_trigger_msg) self.cfg['scheduling']['special tasks'][s_type] = result self.process_config_env() self.mem_log("config.py: before load_graph()") self.load_graph() self.mem_log("config.py: after load_graph()") self.process_runahead_limit() if self.run_mode('simulation', 'dummy', 'dummy-local'): self.configure_sim_modes() self.configure_workflow_state_polling_tasks() self._check_task_event_handlers() self._check_special_tasks() # adds to self.implicit_tasks self._check_explicit_cycling() self._check_implicit_tasks() self.validate_namespace_names() # Check that external trigger messages are only used once (they have to # be discarded immediately to avoid triggering the next instance of the # just-triggered task). seen = {} for name, tdef in self.taskdefs.items(): for msg in tdef.external_triggers: if msg not in seen: seen[msg] = name else: LOG.error( "External trigger '%s'\n used in tasks %s and %s." % ( msg, name, seen[msg])) raise WorkflowConfigError( "external triggers must be used only once.") self.leaves = self.get_task_name_list() for ancestors in self.runtime['first-parent ancestors'].values(): try: foot = ancestors[-2] # one back from 'root' except IndexError: pass else: if foot not in self.feet: self.feet.append(foot) self.feet.sort() # sort effects get_graph_raw output # Replace workflow and task name in workflow and task URLs. self.cfg['meta']['URL'] = self.cfg['meta']['URL'] % { 'workflow_name': self.workflow} # BACK COMPAT: CYLC_WORKFLOW_NAME # from: # Cylc7 # to: # Cylc8 # remove at: # Cylc9 self.cfg['meta']['URL'] = RE_WORKFLOW_ID_VAR.sub( self.workflow, self.cfg['meta']['URL']) for name, cfg in self.cfg['runtime'].items(): cfg['meta']['URL'] = cfg['meta']['URL'] % { 'workflow_name': self.workflow, 'task_name': name} # BACK COMPAT: CYLC_WORKFLOW_NAME, CYLC_TASK_NAME # from: # Cylc7 # to: # Cylc8 # remove at: # Cylc9 cfg['meta']['URL'] = RE_WORKFLOW_ID_VAR.sub( self.workflow, cfg['meta']['URL']) cfg['meta']['URL'] = RE_TASK_NAME_VAR.sub( name, cfg['meta']['URL']) if getattr(self.options, 'is_validate', False): self.mem_log("config.py: before _check_circular()") self._check_circular() self.mem_log("config.py: after _check_circular()") self.mem_log("config.py: end init config") def prelim_process_graph(self) -> None: """Ensure graph is not empty; set integer cycling mode and icp/fcp = 1 for simplest "R1 = foo" type graphs. """ graphdict = self.cfg['scheduling']['graph'] if not any(graphdict.values()): raise WorkflowConfigError('No workflow dependency graph defined.') if ( 'cycling mode' not in self.cfg['scheduling'] and self.cfg['scheduling'].get('initial cycle point', '1') == '1' and all(item in ['graph', '1', 'R1'] for item in graphdict) ): # Pure acyclic graph, assume integer cycling mode with '1' cycle self.cfg['scheduling']['cycling mode'] = INTEGER_CYCLING_TYPE for key in ('initial cycle point', 'final cycle point'): if key not in self.cfg['scheduling']: self.cfg['scheduling'][key] = '1' def process_utc_mode(self): """Set UTC mode from config or from stored value on restart. Sets: self.cfg['scheduler']['UTC mode'] The UTC mode flag """ cfg_utc_mode = self.cfg['scheduler']['UTC mode'] # Get the original UTC mode if restart: orig_utc_mode = getattr(self.options, 'utc_mode', None) if orig_utc_mode is None: # Not a restart - will save config value if cfg_utc_mode is not None: orig_utc_mode = cfg_utc_mode else: orig_utc_mode = glbl_cfg().get(['scheduler', 'UTC mode']) elif cfg_utc_mode is not None and cfg_utc_mode != orig_utc_mode: LOG.warning( "UTC mode = {0} specified in configuration, but is stored as " "{1} from the initial run. The workflow will continue to use " "UTC mode = {1}" .format(cfg_utc_mode, orig_utc_mode) ) self.cfg['scheduler']['UTC mode'] = orig_utc_mode set_utc_mode(orig_utc_mode) def process_cycle_point_tz(self): """Set the cycle point time zone from config or from stored value on restart. Ensure workflows restart with the same cycle point time zone even after system time zone changes e.g. DST (the value is put in db by Scheduler). Sets: self.cfg['scheduler']['cycle point time zone'] """ cfg_cp_tz = self.cfg['scheduler'].get('cycle point time zone') if ( not cylc.flow.flags.cylc7_back_compat and not cfg_cp_tz ): cfg_cp_tz = 'Z' # Get the original workflow run time zone if restart: orig_cp_tz = getattr(self.options, 'cycle_point_tz', None) if orig_cp_tz is None: # Not a restart if cfg_cp_tz is None: if get_utc_mode() is True: orig_cp_tz = 'Z' else: orig_cp_tz = get_local_time_zone_format() else: orig_cp_tz = cfg_cp_tz elif cfg_cp_tz is not None: dmp = TimePointDumper() if dmp.get_time_zone(cfg_cp_tz) != dmp.get_time_zone(orig_cp_tz): LOG.warning( "cycle point time zone = {0} specified in configuration, " "but there is a stored value of {1} from the initial run. " "The workflow will continue to run in {1}" .format(cfg_cp_tz, orig_cp_tz) ) self.cfg['scheduler']['cycle point time zone'] = orig_cp_tz def process_initial_cycle_point(self) -> None: """Validate and set initial cycle point from flow.cylc or options. Sets: self.initial_point self.cfg['scheduling']['initial cycle point'] self.options.icp Raises: WorkflowConfigError - if it fails to validate """ orig_icp = self.cfg['scheduling']['initial cycle point'] if self.cycling_type == INTEGER_CYCLING_TYPE: if orig_icp is None: orig_icp = '1' icp = orig_icp elif self.cycling_type == ISO8601_CYCLING_TYPE: if orig_icp is None: raise WorkflowConfigError( "This workflow requires an initial cycle point.") if orig_icp == "now": icp = get_current_time_string() else: try: icp = ingest_time(orig_icp, get_current_time_string()) except IsodatetimeError as exc: raise WorkflowConfigError(str(exc)) if orig_icp != icp: # now/next()/prev() was used, need to store evaluated point in DB self.options.icp = icp self.initial_point = get_point(icp).standardise() self.cfg['scheduling']['initial cycle point'] = str(self.initial_point) # Validate initial cycle point against any constraints constraints = self.cfg['scheduling']['initial cycle point constraints'] if constraints: valid_icp = False for entry in constraints: possible_pt = get_point_relative( entry, self.initial_point ).standardise() if self.initial_point == possible_pt: valid_icp = True break if not valid_icp: raise WorkflowConfigError( f"Initial cycle point {self.initial_point} does not meet " f"the constraints {constraints}") def process_start_cycle_point(self) -> None: """Set the start cycle point from options. Sets: self.options.startcp self.start_point """ startcp = getattr(self.options, 'startcp', None) starttask = getattr(self.options, 'starttask', None) if startcp is not None and starttask is not None: raise UserInputError( "--start-cycle-point and --start-task are mutually exclusive" ) if startcp: # Start from a point later than initial point. if self.options.startcp == 'now': self.options.startcp = get_current_time_string() self.start_point = get_point(self.options.startcp).standardise() elif starttask: # Start from designated task(s). # Select the earliest start point for use in pre-initial ignore. self.start_point = min( get_point( TaskID.split(taskid)[1] ).standardise() for taskid in self.options.starttask ) else: # Start from the initial point. self.start_point = self.initial_point def process_final_cycle_point(self) -> None: """Validate and set the final cycle point from flow.cylc or options. Sets: self.final_point self.cfg['scheduling']['final cycle point'] Raises: WorkflowConfigError - if it fails to validate """ if ( self.cfg['scheduling']['final cycle point'] is not None and not self.cfg['scheduling']['final cycle point'].strip() ): self.cfg['scheduling']['final cycle point'] = None fcp_str = getattr(self.options, 'fcp', None) if fcp_str == 'reload': fcp_str = self.options.fcp = None if fcp_str is None: fcp_str = self.cfg['scheduling']['final cycle point'] if fcp_str is not None: # Is the final "point"(/interval) relative to initial? if self.cycling_type == INTEGER_CYCLING_TYPE: if "P" in fcp_str: # Relative, integer cycling. self.final_point = get_point_relative( self.cfg['scheduling']['final cycle point'], self.initial_point ).standardise() else: with contextlib.suppress(IsodatetimeError): # Relative, ISO8601 cycling. self.final_point = get_point_relative( fcp_str, self.initial_point).standardise() if self.final_point is None: # Must be absolute. self.final_point = get_point(fcp_str).standardise() self.cfg['scheduling']['final cycle point'] = str(self.final_point) if (self.final_point is not None and self.initial_point > self.final_point): raise WorkflowConfigError( f"The initial cycle point:{self.initial_point} is after the " f"final cycle point:{self.final_point}.") # Validate final cycle point against any constraints constraints = self.cfg['scheduling']['final cycle point constraints'] if constraints and self.final_point is not None: valid_fcp = False for entry in constraints: possible_pt = get_point_relative( entry, self.final_point).standardise() if self.final_point == possible_pt: valid_fcp = True break if not valid_fcp: raise WorkflowConfigError( f"Final cycle point {self.final_point} does not " f"meet the constraints {constraints}") def _check_implicit_tasks(self) -> None: """Raise WorkflowConfigError if implicit tasks are found in graph or queue config, unless allowed by config.""" if not self.implicit_tasks: return print_limit = 10 tasks_str = '\n * '.join(list(self.implicit_tasks)[:print_limit]) num = len(self.implicit_tasks) if num > print_limit: tasks_str += f"\n and {num} more" msg = ( "implicit tasks detected (no entry under [runtime]):\n" f" * {tasks_str}" ) if self.cfg['scheduler']['allow implicit tasks']: LOG.debug(msg) return # Check if implicit tasks explicitly disallowed try: is_disallowed = self.pcfg.get( ['scheduler', 'allow implicit tasks'], sparse=True ) is False except ItemNotFoundError: is_disallowed = False if is_disallowed: raise WorkflowConfigError(msg) # Otherwise "[scheduler]allow implicit tasks" is not set msg = ( f"{msg}\n" "To allow implicit tasks, use " f"'{WorkflowFiles.FLOW_FILE}[scheduler]allow implicit tasks'" ) # Allow implicit tasks in Cylc 7 back-compat mode (but not if # rose-suite.conf present, to maintain compat with Rose 2019) if ( Path(self.run_dir, 'rose-suite.conf').is_file() or not cylc.flow.flags.cylc7_back_compat ): raise WorkflowConfigError(msg) LOG.warning(msg) def _check_circular(self): """Check for circular dependence in graph.""" if (len(self.taskdefs) > self.CHECK_CIRCULAR_LIMIT and not getattr(self.options, 'check_circular', False)): LOG.warning( f"Number of tasks is > {self.CHECK_CIRCULAR_LIMIT}; will not " "check graph for circular dependencies. To enforce this " "check, use the option --check-circular.") return start_point_str = self.cfg['scheduling']['initial cycle point'] raw_graph = self.get_graph_raw(start_point_str, stop_point_str=None) lhs2rhss = {} # left hand side to right hand sides rhs2lhss = {} # right hand side to left hand sides for lhs, rhs in raw_graph: lhs2rhss.setdefault(lhs, set()) lhs2rhss[lhs].add(rhs) rhs2lhss.setdefault(rhs, set()) rhs2lhss[rhs].add(lhs) self._check_circular_helper(lhs2rhss, rhs2lhss) if rhs2lhss: # Before reporting circular dependence, pick out all the edges with # no outgoings. self._check_circular_helper(rhs2lhss, lhs2rhss) err_msg = '' for rhs, lhss in sorted(rhs2lhss.items()): for lhs in sorted(lhss): err_msg += ' %s => %s' % ( TaskID.get(*lhs), TaskID.get(*rhs)) if err_msg: raise WorkflowConfigError( 'circular edges detected:' + err_msg) @staticmethod def _check_circular_helper(x2ys, y2xs): """Topological elimination. An implementation of Kahn's algorithm for topological sorting, but only use the part for pulling out starter nodes with no incoming edges. See https://en.wikipedia.org/wiki/Topological_sorting x2ys is a map of {x1: [y1, y2, ...], ...} to map edges using x's as keys, such as x1 => y1, x1 => y2, etc y2xs is a map of {y3: [x4, x5, ...], ...} to map edges using y's as keys, such as x4 => y3, x5 => y3, etc """ # Starter x nodes are those with no incoming, i.e. # x nodes that never appear as a y. sxs = set(x2ys).difference(y2xs) while sxs: sx01 = sxs.pop() for y01 in x2ys[sx01]: y2xs[y01].remove(sx01) if not y2xs[y01]: if y01 in x2ys: # No need to look at this again if it does not have any # outgoing. sxs.add(y01) del y2xs[y01] del x2ys[sx01] def _expand_name_list(self, orig_names): """Expand any parameters in lists of names.""" name_expander = NameExpander(self.parameters) exp_names = [] for orig_name in orig_names: exp_names += [name for name, _ in name_expander.expand(orig_name)] return exp_names def _update_task_params(self, task_name, params): """Update the dict of parameters used in a task definition. # Used to expand parameter values in task environments. """ self.task_param_vars.setdefault( task_name, {} ).update( params ) def _expand_runtime(self): """Expand [runtime] name lists or parameterized names. This makes individual runtime namespaces out of any headings that represent multiple namespaces, like [[foo, bar]] or [[foo<m,n>]]. It requires replicating the sparse runtime OrderedDict into a new OrderedDict - we can't just stick expanded names on the end because the order matters (for add-or-override by repeated namespaces). TODO - this will have an impact on memory footprint for large workflows with a lot of runtime config. We should consider ditching OrderedDict and instead using an ordinary dict """ if (not self.parameters[0] and not any(',' in ns for ns in self.cfg['runtime'])): # No parameters, no namespace lists: no expansion needed. return newruntime = OrderedDictWithDefaults() name_expander = NameExpander(self.parameters) for namespace_heading, namespace_dict in self.cfg['runtime'].items(): for name, indices in name_expander.expand(namespace_heading): if name not in newruntime: newruntime[name] = OrderedDictWithDefaults() replicate(newruntime[name], namespace_dict) if indices: self._update_task_params(name, indices) new_environ = OrderedDictWithDefaults() if 'environment' in newruntime[name]: new_environ = newruntime[name]['environment'].copy() newruntime[name]['environment'] = new_environ if 'inherit' in newruntime[name]: # Allow inheritance from parameterized namespaces. parents = newruntime[name]['inherit'] origin = 'inherit = %s' % ', '.join(parents) repl_parents = [] for parent in parents: used_indices, expanded = ( name_expander.expand_parent_params( parent, indices, origin) ) repl_parents.append(expanded) if used_indices: self._update_task_params(name, used_indices) newruntime[name]['inherit'] = repl_parents self.cfg['runtime'] = newruntime def validate_namespace_names(self): """Validate task and family names.""" for name in self.implicit_tasks: success, message = TaskNameValidator.validate(name) if not success: raise WorkflowConfigError( f'invalid task name "{name}"\n{message}' ) for name in self.cfg['runtime']: if name == 'root': # root is allowed to be defined in the runtime section continue success, message = TaskNameValidator.validate(name) if not success: raise WorkflowConfigError( f'task/family name {message}\n[runtime][[{name}]]' ) @staticmethod def dequote(s): """Strip quotes off a string.""" if (s[0] == s[-1]) and s.startswith(("'", '"')): return s[1:-1] return s def check_env_names(self): """Check for illegal environment variable names""" bad = {} for label, item in self.cfg['runtime'].items(): for key in ('environment', 'parameter environment templates'): if key in item: res = check_varnames(item[key]) if res: bad[(label, key)] = res if bad: err_msg = "bad env variable names:" for (label, key), names in bad.items(): err_msg += '\nNamespace:\t%s [%s]' % (label, key) for name in names: err_msg += "\n\t\t%s" % name LOG.error(err_msg) raise WorkflowConfigError( "Illegal environment variable name(s) detected") def check_param_env_tmpls(self): """Check for illegal parameter environment templates""" parameter_values = { key: values[0] for key, values in self.parameters[0].items() if values } bads = set() for task_name, task_items in self.cfg['runtime'].items(): if 'environment' not in task_items: continue for name, tmpl in task_items['environment'].items(): try: interpolate_template(tmpl, parameter_values) except ParamExpandError as descr: bads.add((task_name, name, tmpl, descr)) if bads: LOG.warning( 'bad parameter environment template:\n ' + '\n '.join( '[runtime][%s][environment]%s = %s # %s' % bad for bad in sorted(bads) ) ) def filter_env(self): """Filter environment variables after sparse inheritance""" for ns in self.cfg['runtime'].values(): try: oenv = ns['environment'] except KeyError: # no environment to filter continue try: fincl = ns['environment filter']['include'] except KeyError: # empty include-filter means include all fincl = [] try: fexcl = ns['environment filter']['exclude'] except KeyError: # empty exclude-filter means exclude none fexcl = [] if not fincl and not fexcl: # no filtering to do continue nenv = OrderedDictWithDefaults() for key, val in oenv.items(): if (not fincl or key in fincl) and key not in fexcl: nenv[key] = val ns['environment'] = nenv def compute_family_tree(self): first_parents = {} demoted = {} for name in self.cfg['runtime']: if name == 'root': self.runtime['parents'][name] = [] first_parents[name] = [] continue # get declared parents, with implicit inheritance from root. pts = self.cfg['runtime'][name].get('inherit', ['root']) if not pts: pts = ['root'] for p in pts: if p == "None": # see just below continue if p not in self.cfg['runtime']: raise WorkflowConfigError( "undefined parent for " + name + ": " + p) if pts[0] == "None": if len(pts) < 2: raise WorkflowConfigError( "null parentage for " + name) demoted[name] = pts[1] pts = pts[1:] first_parents[name] = ['root'] else: first_parents[name] = [pts[0]] self.runtime['parents'][name] = pts if cylc.flow.flags.verbosity > 0 and demoted: log_msg = "First parent(s) demoted to secondary:" for n, p in demoted.items(): log_msg += "\n + %s as parent of '%s'" % (p, n) LOG.debug(log_msg) c3 = C3(self.runtime['parents']) c3_single = C3(first_parents) for name in self.cfg['runtime']: try: self.runtime['linearized ancestors'][name] = c3.mro(name) self.runtime['first-parent ancestors'][name] = ( c3_single.mro(name)) except RecursionError: raise WorkflowConfigError( "circular [runtime] inheritance?") except Exception as exc: # catch inheritance errors # TODO - specialise MRO exceptions raise WorkflowConfigError(str(exc)) for name in self.cfg['runtime']: ancestors = self.runtime['linearized ancestors'][name] for p in ancestors[1:]: if p not in self.runtime['descendants']: self.runtime['descendants'][p] = [] if name not in self.runtime['descendants'][p]: self.runtime['descendants'][p].append(name) first_ancestors = self.runtime['first-parent ancestors'][name] for p in first_ancestors[1:]: if p not in self.runtime['first-parent descendants']: self.runtime['first-parent descendants'][p] = [] if name not in self.runtime['first-parent descendants'][p]: self.runtime['first-parent descendants'][p].append(name) def compute_inheritance(self): LOG.debug("Parsing the runtime namespace hierarchy") # TODO: Note an unused alternative mechanism was removed here # (March 2020). It stored the result of each completed MRO and # re-used these wherever possible. This could be more efficient # for full namespaces in deep hierarchies. We should go back and # look if inheritance computation becomes a problem. results = OrderedDictWithDefaults() # Loop through runtime members, 'root' first. nses = list(self.cfg['runtime']) nses.sort(key=lambda ns: ns != 'root') for ns in nses: # for each namespace ... hierarchy = copy(self.runtime['linearized ancestors'][ns]) hierarchy.reverse() result = OrderedDictWithDefaults() # Go up the linearized MRO from root, replicating or # overriding each namespace element as we go. for name in hierarchy: replicate(result, self.cfg['runtime'][name]) # n_reps += 1 results[ns] = result # replace pre-inheritance namespaces with the post-inheritance result self.cfg['runtime'] = results # uncomment this to compare the simple and efficient methods # print ' Number of namespace replications:', n_reps # def print_inheritance(self): # # (use for debugging) # for foo in self.runtime: # log_msg = '\t' + foo # for item, val in self.runtime[foo].items(): # log_msg += '\t\t' + item + '\t' + val # LOG.info(log_msg) def process_runahead_limit(self): """Extract the runahead limits information.""" limit = self.cfg['scheduling']['runahead limit'] if limit.isdigit(): limit = f'PT{limit}H' LOG.warning( 'Use of a raw number of hours for the runahead limit is ' f'deprecated. Use "{limit}" instead') number_limit_regex = re.compile(r'^P\d+$') time_limit_regexes = DurationParser.DURATION_REGEXES if number_limit_regex.fullmatch(limit): self.custom_runahead_limit = IntegerInterval(limit) # Handle "runahead limit = P0": if self.custom_runahead_limit.is_null(): self.custom_runahead_limit = IntegerInterval('P1') elif ( # noqa: SIM106 self.cycling_type == ISO8601_CYCLING_TYPE and any(tlr.fullmatch(limit) for tlr in time_limit_regexes) ): self.custom_runahead_limit = ISO8601Interval(limit) else: raise WorkflowConfigError( f'bad runahead limit "{limit}" for {self.cycling_type} ' 'cycling type') def get_custom_runahead_limit(self): """Return the custom runahead limit (may be None).""" return self.custom_runahead_limit def get_max_num_active_cycle_points(self): """Return the maximum allowed number of pool cycle points.""" return self.max_num_active_cycle_points def get_config(self, args, sparse=False): return self.pcfg.get(args, sparse) def adopt_orphans(self, orphans): # Called by the scheduler after reloading the workflow definition # at run time and finding any live task proxies whose # definitions have been removed from the workflow. Keep them # in the default queue and under the root family, until they # run their course and disappear. for orphan in orphans: self.runtime['linearized ancestors'][orphan] = [orphan, 'root'] def configure_workflow_state_polling_tasks(self): # Check custom script not defined for automatic workflow polling tasks. for l_task in self.workflow_polling_tasks: try: cs = self.pcfg.get(sparse=True)['runtime'][l_task]['script'] except KeyError: pass else: if cs: # (allow explicit blanking of inherited script) raise WorkflowConfigError( "script cannot be defined for automatic" + " workflow polling task '%s':\n%s" % (l_task, cs)) # Generate the automatic scripting. for name, tdef in list(self.taskdefs.items()): if name not in self.workflow_polling_tasks: continue rtc = tdef.rtconfig comstr = ( "cylc workflow-state" f" --task={tdef.workflow_polling_cfg['task']}" " --point=$CYLC_TASK_CYCLE_POINT" ) for key, fmt in [ ('user', ' --%s=%s'), ('host', ' --%s=%s'), ('interval', ' --%s=%d'), ('max-polls', ' --%s=%s'), ('run-dir', ' --%s=%s')]: if rtc['workflow state polling'][key]: comstr += fmt % (key, rtc['workflow state polling'][key]) if rtc['workflow state polling']['message']: comstr += " --message='%s'" % ( rtc['workflow state polling']['message']) else: comstr += " --status=" + tdef.workflow_polling_cfg['status'] comstr += " " + tdef.workflow_polling_cfg['workflow'] script = "echo " + comstr + "\n" + comstr rtc['script'] = script def configure_sim_modes(self): """Adjust task defs for simulation mode and dummy modes.""" for tdef in self.taskdefs.values(): # Compute simulated run time by scaling the execution limit. rtc = tdef.rtconfig limit = rtc['execution time limit'] speedup = rtc['simulation']['speedup factor'] if limit and speedup: sleep_sec = (DurationParser().parse( str(limit)).get_seconds() / speedup) else: sleep_sec = DurationParser().parse( str(rtc['simulation']['default run length']) ).get_seconds() rtc['execution time limit'] = ( sleep_sec + DurationParser().parse(str( rtc['simulation']['time limit buffer'])).get_seconds() ) rtc['job']['simulated run length'] = sleep_sec # Generate dummy scripting. rtc['init-script'] = "" rtc['env-script'] = "" rtc['pre-script'] = "" rtc['post-script'] = "" scr = "sleep %d" % sleep_sec # Dummy message outputs. for msg in rtc['outputs'].values(): scr += "\ncylc message '%s'" % msg if rtc['simulation']['fail try 1 only']: arg1 = "true" else: arg1 = "false" arg2 = " ".join(rtc['simulation']['fail cycle points']) scr += "\ncylc__job__dummy_result %s %s || exit 1" % (arg1, arg2) rtc['script'] = scr # All dummy modes should run on platform localhost # All Cylc 7 config items which conflict with platform are removed. for section, key, _ in FORBIDDEN_WITH_PLATFORM: if (section in rtc and key in rtc[section]): rtc[section][key] = None rtc['platform'] = 'localhost' # Disable environment, in case it depends on env-script. rtc['environment'] = {} if tdef.run_mode == 'dummy-local': # Run all dummy tasks on the workflow host. rtc['platform'] = 'localhost' # Simulation mode tasks should fail in which cycle points? f_pts = [] f_pts_orig = rtc['simulation']['fail cycle points'] if 'all' in f_pts_orig: # None for "fail all points". f_pts = None else: # (And [] for "fail no points".) for point_str in f_pts_orig: f_pts.append(get_point(point_str).standardise()) rtc['simulation']['fail cycle points'] = f_pts def get_parent_lists(self): return self.runtime['parents'] def get_first_parent_ancestors(self, pruned=False): if pruned: # prune non-task namespaces from ancestors dict pruned_ancestors = {} for key, val in self.runtime['first-parent ancestors'].items(): if key not in self.taskdefs: continue pruned_ancestors[key] = val return pruned_ancestors else: return self.runtime['first-parent ancestors'] def get_linearized_ancestors(self): return self.runtime['linearized ancestors'] def get_first_parent_descendants(self): return self.runtime['first-parent descendants'] @staticmethod def define_inheritance_tree(tree, hierarchy): """Combine inheritance hierarchies into a tree structure.""" for rt_ in hierarchy: hier = copy(hierarchy[rt_]) hier.reverse() cur_tree = tree for item in hier: if item not in cur_tree: cur_tree[item] = {} cur_tree = cur_tree[item] def add_tree_titles(self, tree): for key, val in tree.items(): if val == {}: if 'title' in self.cfg['runtime'][key]['meta']: tree[key] = self.cfg['runtime'][key]['meta']['title'] else: tree[key] = NO_TITLE elif isinstance(val, dict): self.add_tree_titles(val) def get_namespace_list(self, which): names = [] if which == 'graphed tasks': # tasks used only in the graph names = list(self.taskdefs) elif which == 'all namespaces': # all namespaces names = list(self.cfg['runtime']) elif which == 'all tasks': for ns in self.cfg['runtime']: if ns not in self.runtime['descendants']: # tasks have no descendants names.append(ns) result = {} for ns in names: if 'title' in self.cfg['runtime'][ns]['meta']: # the runtime dict is sparse at this stage. result[ns] = self.cfg['runtime'][ns]['meta']['title'] else: # no need to flesh out the full runtime just for title result[ns] = NO_TITLE return result def get_mro(self, ns): try: mro = self.runtime['linearized ancestors'][ns] except KeyError: mro = ["no such namespace: " + ns] return mro def print_first_parent_tree(self, pretty=False, titles=False): # find task namespaces (no descendants) tasks = [] for ns in self.cfg['runtime']: if ns not in self.runtime['descendants']: tasks.append(ns) pruned_ancestors = self.get_first_parent_ancestors(pruned=True) tree = {} self.define_inheritance_tree(tree, pruned_ancestors) padding = '' if titles: self.add_tree_titles(tree) # compute pre-title padding maxlen = 0 for namespace in pruned_ancestors: items = copy(pruned_ancestors[namespace]) items.reverse() for itt, item in enumerate(items): tmp = 2 * itt + 1 + len(item) if itt == 0: tmp -= 1 if tmp > maxlen: maxlen = tmp padding = maxlen * ' ' print_tree(tree, padding=padding, use_unicode=pretty) def process_workflow_env(self): """Workflow context is exported to the local environment.""" for key, value in { **verbosity_to_env(cylc.flow.flags.verbosity), 'CYLC_WORKFLOW_ID': self.workflow, 'CYLC_WORKFLOW_NAME': self.workflow_name, 'CYLC_WORKFLOW_RUN_DIR': self.run_dir, 'CYLC_WORKFLOW_LOG_DIR': self.log_dir, 'CYLC_WORKFLOW_WORK_DIR': self.work_dir, 'CYLC_WORKFLOW_SHARE_DIR': self.share_dir, }.items(): os.environ[key] = value def process_config_env(self): """Set local config derived environment.""" os.environ['CYLC_UTC'] = str(get_utc_mode()) os.environ['CYLC_WORKFLOW_INITIAL_CYCLE_POINT'] = str( self.initial_point ) os.environ['CYLC_WORKFLOW_FINAL_CYCLE_POINT'] = str(self.final_point) os.environ['CYLC_CYCLING_MODE'] = self.cfg['scheduling'][ 'cycling mode'] # Add workflow bin directory to PATH for workflow and event handlers os.environ['PATH'] = os.pathsep.join([ os.path.join(self.fdir, 'bin'), os.environ['PATH']]) def run_mode(self, *reqmodes): """Return the run mode. Combine command line option with configuration setting. If "reqmodes" is specified, return the boolean (mode in reqmodes). Otherwise, return the mode as a str. """ mode = getattr(self.options, 'run_mode', None) if not mode: mode = 'live' if reqmodes: return mode in reqmodes else: return mode def _check_task_event_handlers(self): """Check custom event handler templates can be expanded. Ensures that any %(template_variables)s in task event handlers are present in the data that will be passed to them when called (otherwise they will fail). """ for taskdef in self.taskdefs.values(): if taskdef.rtconfig['events']: handler_data = { item.value: '' for item in EventData } handler_data.update( get_event_handler_data(taskdef.rtconfig, self.cfg) ) for key, values in taskdef.rtconfig['events'].items(): if values and ( key == 'handlers' or key.endswith(' handlers')): for handler_template in values: try: handler_template % handler_data except (KeyError, ValueError) as exc: raise WorkflowConfigError( f'bad task event handler template' f' {taskdef.name}:' f' {handler_template}:' f' {repr(exc)}' ) def _check_special_tasks(self): """Check declared special tasks are valid, and detect special implicit tasks""" for task_type in self.cfg['scheduling']['special tasks']: for name in self.cfg['scheduling']['special tasks'][task_type]: if task_type in ['clock-trigger', 'clock-expire', 'external-trigger']: name = name.split('(', 1)[0] if not TaskID.is_valid_name(name): raise WorkflowConfigError( f'Illegal {task_type} task name: {name}') if (name not in self.taskdefs and name not in self.cfg['runtime']): self.implicit_tasks.add(name) def _check_explicit_cycling(self): """Check that inter-cycle offsets refer to cycling tasks. E.G. foo[-P1] => bar requires foo to be defined in the graph somewhere. """ for taskdef in self.taskdefs.values(): taskdef.check_for_explicit_cycling() def get_task_name_list(self): """Return a sorted list of all tasks used in the dependency graph. Note: the sort order may effect get_graph_raw ouput. """ return sorted(self.taskdefs) def generate_edges(self, lexpr, orig_lexpr, left_nodes, right, seq, suicide=False): """Generate edges. Add nodes from this graph section to the abstract graph edges structure. """ conditional = False if '|' in lexpr: # plot conditional triggers differently conditional = True if seq not in self.edges: self.edges[seq] = set() if not left_nodes: # Right is a lone node. self.edges[seq].add((right, None, suicide, conditional)) for left in left_nodes: # if left is None: # continue # TODO - RIGHT CANNOT BE NONE NOW? # if right is not None: # Check for self-edges. if left == right or left.startswith(right + ':'): # (This passes inter-cycle offsets: left[-P1D] => left) # (TODO - but not explicit null offsets like [-P0D]!) if suicide: continue if orig_lexpr != lexpr: LOG.error(f"{orig_lexpr} => {right}") raise WorkflowConfigError( f"self-edge detected: {left} => {right}") self.edges[seq].add((left, right, suicide, conditional)) def generate_taskdef(self, orig_expr, node): """Generate task definition for node.""" name = GraphNodeParser.get_inst().parse(node)[0] taskdef = self.get_taskdef(name, orig_expr) if name in self.workflow_polling_tasks: taskdef.workflow_polling_cfg = { 'workflow': self.workflow_polling_tasks[name][0], 'task': self.workflow_polling_tasks[name][1], 'status': self.workflow_polling_tasks[name][2] } def add_sequence(self, nodes, seq, suicide): """Add valid sequences to taskdefs.""" for node in nodes: name, offset = GraphNodeParser.get_inst().parse(node)[:2] taskdef = self.get_taskdef(name) # Only add sequence to taskdef if explicit (not an offset). if offset: taskdef.used_in_offset_trigger = True elif not suicide: # "foo => !bar" does not define a sequence for bar taskdef.add_sequence(seq) def generate_triggers(self, lexpression, left_nodes, right, seq, suicide, task_triggers): """Create Dependency and TaskTrigger objects. Register dependency with the relevant TaskDef object. """ if not right or not left_nodes: # Lone nodes have no triggers. return # Convert expression to a (nested) list. try: expr_list = listify(lexpression) except SyntaxError: raise WorkflowConfigError('Error in expression "%s"' % lexpression) triggers = {} xtrig_labels = set() for left in left_nodes: if left.startswith('@'): xtrig_labels.add(left[1:]) continue # (GraphParseError checked above) (name, offset, output, offset_is_from_icp, offset_is_irregular, offset_is_absolute) = ( GraphNodeParser.get_inst().parse(left)) # Qualifier. outputs = self.cfg['runtime'][name]['outputs'] if outputs and (output in outputs): # Qualifier is a custom task message. qualifier = outputs[output] elif output: if not TaskOutputs.is_valid_std_name(output): raise WorkflowConfigError( f"Undefined custom output: {name}:{output}" ) qualifier = output else: # No qualifier specified => use "succeeded". qualifier = TASK_OUTPUT_SUCCEEDED # Generate TaskTrigger if not already done. key = (name, offset, qualifier, offset_is_irregular, offset_is_absolute, offset_is_from_icp, self.initial_point) try: task_trigger = task_triggers[key] except KeyError: task_trigger = TaskTrigger(*key) task_triggers[key] = task_trigger triggers[left] = task_trigger # (name is left name) self.taskdefs[name].add_graph_child(task_trigger, right, seq) # graph_parents not currently used but might be needed soon: self.taskdefs[right].add_graph_parent(task_trigger, name, seq) # Walk down "expr_list" depth first, and replace any items matching a # key in "triggers" ("left" values) with the trigger. stack = [expr_list] while stack: item_list = stack.pop() for i, item in enumerate(item_list): if isinstance(item, list): stack.append(item) elif item in triggers: item_list[i] = triggers[item] if triggers: dependency = Dependency(expr_list, set(triggers.values()), suicide) self.taskdefs[right].add_dependency(dependency, seq) validator = XtriggerNameValidator.validate for label in self.cfg['scheduling']['xtriggers']: valid, msg = validator(label) if not valid: raise WorkflowConfigError( f'Invalid xtrigger name "{label}" - {msg}' ) for label in xtrig_labels: try: xtrig = self.cfg['scheduling']['xtriggers'][label] except KeyError: if label != 'wall_clock': raise WorkflowConfigError(f"xtrigger not defined: {label}") else: # Allow "@wall_clock" in the graph as an undeclared # zero-offset clock xtrigger. xtrig = SubFuncContext( 'wall_clock', 'wall_clock', [], {}) if (xtrig.func_name == 'wall_clock' and self.cfg['scheduling']['cycling mode'] == ( INTEGER_CYCLING_TYPE)): sig = xtrig.get_signature() raise WorkflowConfigError( f"clock xtriggers need date-time cycling: {label} = {sig}") if self.xtrigger_mgr is None: XtriggerManager.validate_xtrigger(label, xtrig, self.fdir) else: self.xtrigger_mgr.add_trig(label, xtrig, self.fdir) self.taskdefs[right].add_xtrig_label(label, seq) def get_actual_first_point(self, start_point): """Get actual first cycle point for the workflow Get all sequences to adjust the putative start time upward. """ if (self._start_point_for_actual_first_point is not None and self._start_point_for_actual_first_point == start_point and self.actual_first_point is not None): return self.actual_first_point self._start_point_for_actual_first_point = start_point adjusted = [] for seq in self.sequences: point = seq.get_first_point(start_point) if point: adjusted.append(point) if len(adjusted) > 0: adjusted.sort() self.actual_first_point = adjusted[0] else: self.actual_first_point = start_point return self.actual_first_point def _get_stop_point(self, start_point, stop_point_str=None): """Get stop point from string value or interval, or return None.""" if stop_point_str is None: stop_point = None elif "P" in stop_point_str: # Is the final point(/interval) relative to initial? if self.cfg['scheduling']['cycling mode'] == 'integer': # Relative, integer cycling. stop_point = get_point_relative( stop_point_str, start_point ).standardise() else: # Relative, ISO8601 cycling. stop_point = get_point_relative( stop_point_str, start_point ).standardise() else: stop_point = get_point(stop_point_str).standardise() return stop_point def get_graph_raw( self, start_point_str=None, stop_point_str=None, grouping=None): """Return concrete graph edges between specified cycle points. Return a family-collapsed graph if the grouping arg is not None: * ['FAM1', 'FAM2']: group (collapse) specified families * ['<all>']: group (collapse) all families above root For validation, return non-suicide edges with left and right nodes. """ start_point = get_point( start_point_str or self.cfg['scheduling']['initial cycle point'] ) stop_point = self._get_stop_point(start_point, stop_point_str) actual_first_point = self.get_actual_first_point(start_point) if grouping is None: grouping = [] elif grouping == ['<all>']: grouping = [ fam for fam in self.runtime["first-parent descendants"].keys() if fam != "root" ] else: for bad in ( set(grouping).difference( self.runtime["first-parent descendants"].keys() ) ): LOG.warning(f"Ignoring undefined family {bad}") grouping.remove(bad) is_validate = getattr( self.options, 'is_validate', False) # this is for _check_circular if is_validate: grouping = [] # Now define the concrete graph edges (pairs of nodes) for plotting. workflow_final_point = get_point( self.cfg['scheduling']['final cycle point']) # For the computed stop point, store VIS_N_POINTS of each sequence, # and then cull later to the first VIS_N_POINTS over all sequences. # For nested closed families, only consider the outermost one fpd = self.runtime['first-parent descendants'] clf_map = {} for name in grouping: if all( name not in fpd[i] for i in grouping ): clf_map[name] = fpd[name] gr_edges = {} start_point_offset_cache = {} point_offset_cache = None for sequence, edges in self.edges.items(): # Get initial cycle point for this sequence point = sequence.get_first_point(start_point) new_points = [] while point is not None: if point not in new_points: new_points.append(point) if stop_point is not None and point > stop_point: # Beyond requested final cycle point. break if (workflow_final_point is not None and point > workflow_final_point): # Beyond workflow final cycle point. break if stop_point is None and len(new_points) > self.VIS_N_POINTS: # Take VIS_N_POINTS cycles from each sequence. break point_offset_cache = {} for left, right, suicide, cond in edges: if is_validate and (not right or suicide): continue if right: r_id = (right, point) else: r_id = None if left.startswith('@'): # @xtrigger node. name = left offset_is_from_icp = False offset = None else: name, offset, _, offset_is_from_icp, _, _ = ( GraphNodeParser.get_inst().parse(left)) if offset: if offset_is_from_icp: cache = start_point_offset_cache rel_point = start_point else: cache = point_offset_cache rel_point = point try: l_point = cache[offset] except KeyError: l_point = get_point_relative(offset, rel_point) cache[offset] = l_point else: l_point = point l_id = (name, l_point) if l_id is None and r_id is None: continue if l_id is not None and actual_first_point > l_id[1]: # Check that l_id is not earlier than start time. if (r_id is None or r_id[1] < actual_first_point or is_validate): continue # Pre-initial dependency; # keep right hand node. l_id = r_id r_id = None if point not in gr_edges: gr_edges[point] = [] if is_validate: gr_edges[point].append((l_id, r_id)) else: lstr, rstr = self._close_families(l_id, r_id, clf_map) gr_edges[point].append( (lstr, rstr, None, suicide, cond)) # Increment the cycle point. point = sequence.get_next_point_on_sequence(point) del clf_map del start_point_offset_cache del point_offset_cache GraphNodeParser.get_inst().clear() if stop_point is None: # Prune to VIS_N_POINTS points in total. graph_raw_edges = [] for point in sorted(gr_edges)[:self.VIS_N_POINTS]: graph_raw_edges.extend(gr_edges[point]) else: # Flatten nested list. graph_raw_edges = ( [i for sublist in gr_edges.values() for i in sublist]) graph_raw_edges.sort(key=lambda x: [y if y else '' for y in x[:2]]) return graph_raw_edges def get_node_labels(self, start_point_str=None, stop_point_str=None): """Return dependency graph node labels.""" ret = set() for edge in self.get_graph_raw( start_point_str, stop_point_str, ): left, right = edge[0:2] if left: ret.add(left) if right: ret.add(right) return ret @staticmethod def _close_families(l_id, r_id, clf_map): """Turn (name, point) to 'name.point' for edge. Replace close family members with family nodes if relevant. """ lret = None lname, lpoint = None, None if l_id: lname, lpoint = l_id lret = TaskID.get(lname, lpoint) rret = None rname, rpoint = None, None if r_id: rname, rpoint = r_id rret = TaskID.get(rname, rpoint) for fam_name, fam_members in clf_map.items(): if lname in fam_members and rname in fam_members: # l and r are both members lret = TaskID.get(fam_name, lpoint) rret = TaskID.get(fam_name, rpoint) break elif lname in fam_members: # l is a member lret = TaskID.get(fam_name, lpoint) elif rname in fam_members: # r is a member rret = TaskID.get(fam_name, rpoint) return lret, rret def load_graph(self): """Parse and load dependency graph.""" LOG.debug("Parsing the dependency graph") # Generate a map of *task* members of each family. # Note we could exclude 'root' from this and disallow use of 'root' in # the graph (which would probably be quite reasonable). family_map = { family: [ task for task in tasks if ( task in self.runtime['parents'] and task not in self.runtime['descendants'] ) ] for family, tasks in self.runtime['descendants'].items() if family != 'root' } graphdict = self.cfg['scheduling']['graph'] if 'graph' in graphdict: section = get_sequence_cls().get_async_expr() graphdict[section] = graphdict.pop('graph') icp = self.cfg['scheduling']['initial cycle point'] fcp = self.cfg['scheduling']['final cycle point'] # Make a stack of sections and graphs [(sec1, graph1), ...] sections = [] for section, value in self.cfg['scheduling']['graph'].items(): # Substitute initial and final cycle points. if icp: section = section.replace("^", icp) elif "^" in section: raise WorkflowConfigError("Initial cycle point referenced" " (^) but not defined.") if fcp: section = section.replace("$", fcp) elif "$" in section: raise WorkflowConfigError("Final cycle point referenced" " ($) but not defined.") # If the section consists of more than one sequence, split it up. new_sections = RE_SEC_MULTI_SEQ.split(section) if len(new_sections) > 1: for new_section in new_sections: sections.append((new_section.strip(), value)) else: sections.append((section, value)) # Parse and process each graph section. if cylc.flow.flags.cylc7_back_compat: LOG.warning(self.__class__.CYLC7_GRAPH_COMPAT_MSG) task_triggers = {} task_output_opt = {} for section, graph in sections: try: seq = get_sequence(section, icp, fcp) except (AttributeError, TypeError, ValueError, CylcError) as exc: if cylc.flow.flags.verbosity > 1: traceback.print_exc() msg = 'Cannot process recurrence %s' % section msg += ' (initial cycle point=%s)' % icp msg += ' (final cycle point=%s)' % fcp if isinstance(exc, CylcError): msg += ' %s' % exc.args[0] raise WorkflowConfigError(msg) self.sequences.append(seq) parser = GraphParser( family_map, self.parameters, task_output_opt=task_output_opt ) parser.parse_graph(graph) task_output_opt.update(parser.task_output_opt) self.workflow_polling_tasks.update( parser.workflow_state_polling_tasks) self._proc_triggers(parser, seq, task_triggers) self.set_required_outputs(task_output_opt) # Detect use of xtrigger names with '@' prefix (creates a task). overlap = set(self.taskdefs.keys()).intersection( list(self.cfg['scheduling']['xtriggers'])) if overlap: LOG.error(', '.join(overlap)) raise WorkflowConfigError('task and @xtrigger names clash') for tdef in self.taskdefs.values(): tdef.tweak_outputs() def _proc_triggers(self, parser, seq, task_triggers): """Define graph edges, taskdefs, and triggers, from graph sections.""" suicides = 0 for right, val in parser.triggers.items(): for expr, trigs in val.items(): orig = parser.original[right][expr] lefts, suicide = trigs # (lefts, right) e.g.: # for """ # foo|bar => baz # @x => baz # """ # - ([], foo) # - ([], bar) # - (['foo:succeeded', 'bar:succeeded'], baz) # - (['@x'], baz) self.generate_edges(expr, orig, lefts, right, seq, suicide) # Lefts can be null; all appear on RHS once so can generate # taskdefs with right only. Right is never None or @xtrigger. self.generate_taskdef(orig, right) self.add_sequence( [ node for node in lefts + [right] if node and not node.startswith('@') ], seq, suicide ) # RHS quals not needed now (used already for taskdef outputs) right = parser.REC_QUAL.sub('', right) self.generate_triggers( expr, lefts, right, seq, suicide, task_triggers) if suicide: suicides += 1 if suicides and not cylc.flow.flags.cylc7_back_compat: LOG.warning( f"{suicides} suicide triggers detected. These are rarely" " needed in Cylc 8 - have you upgraded from Cylc 7 syntax?" ) def set_required_outputs( self, task_output_opt: Dict[Tuple[str, str], Tuple[bool, bool, bool]] ) -> None: """set optional/required status of parsed task outputs. Args: task_output_opt: {(task, output): (is-optional, default, is_set)} """ for name, taskdef in self.taskdefs.items(): for output in taskdef.outputs: try: optional, _, _ = task_output_opt[(name, output)] except KeyError: # Output not used in graph. continue taskdef.set_required_output(output, not optional) def find_taskdefs(self, name: str) -> List[TaskDef]: """Find TaskDef objects in family "name" or matching "name". Return a list of TaskDef objects which: * have names that glob matches "name". * are in a family that glob matches "name". """ ret: List[TaskDef] = [] if name in self.taskdefs: # Match a task name ret.append(self.taskdefs[name]) else: fams = self.runtime['first-parent descendants'] # Match a family name if name in fams: for member in fams[name]: if member in self.taskdefs: ret.append(self.taskdefs[member]) else: # Glob match task names for key, taskdef in self.taskdefs.items(): if fnmatchcase(key, name): ret.append(taskdef) # Glob match family names for key, members in fams.items(): if fnmatchcase(key, name): for member in members: if member in self.taskdefs: ret.append(self.taskdefs[member]) return ret def get_taskdef( self, name: str, orig_expr: Optional[str] = None ) -> TaskDef: """Return an instance of TaskDef for task name.""" if name not in self.taskdefs: if name == 'root': self.implicit_tasks.add(name) elif name not in self.cfg['runtime']: # implicit inheritance from root self.implicit_tasks.add(name) # These can't just be a reference to root runtime as we have to # make some items task-specific: e.g. subst task name in URLs. self.cfg['runtime'][name] = OrderedDictWithDefaults() replicate(self.cfg['runtime'][name], self.cfg['runtime']['root']) if 'root' not in self.runtime['descendants']: # (happens when no runtimes are defined in flow.cylc) self.runtime['descendants']['root'] = [] if 'root' not in self.runtime['first-parent descendants']: # (happens when no runtimes are defined in flow.cylc) self.runtime['first-parent descendants']['root'] = [] self.runtime['parents'][name] = ['root'] self.runtime['linearized ancestors'][name] = [name, 'root'] self.runtime['first-parent ancestors'][name] = [name, 'root'] self.runtime['descendants']['root'].append(name) self.runtime['first-parent descendants']['root'].append(name) self.ns_defn_order.append(name) try: self.taskdefs[name] = self._get_taskdef(name) except TaskDefError as exc: if orig_expr: LOG.error(orig_expr) raise WorkflowConfigError(str(exc)) else: # Record custom message outputs from [runtime]. for output, message in ( self.cfg['runtime'][name]['outputs'].items() ): valid, msg = TaskOutputValidator.validate(message) if not valid: raise WorkflowConfigError( f'Invalid message trigger "' f'[runtime][{name}][outputs]' f'{output} = {message}" - {msg}' ) self.taskdefs[name].add_output(output, message) return self.taskdefs[name] def _get_taskdef(self, name: str) -> TaskDef: """Get the dense task runtime.""" # (TaskDefError caught above) try: rtcfg = self.cfg['runtime'][name] except KeyError: raise WorkflowConfigError("Task not defined: %s" % name) # We may want to put in some handling for cases of changing the # initial cycle via restart (accidentally or otherwise). # Get the taskdef object for generating the task proxy class taskd = TaskDef( name, rtcfg, self.run_mode(), self.start_point, self.initial_point) # TODO - put all taskd.foo items in a single config dict if name in self.clock_offsets: taskd.clocktrigger_offset = self.clock_offsets[name] if name in self.expiration_offsets: taskd.expiration_offset = self.expiration_offsets[name] if name in self.ext_triggers: taskd.external_triggers.append(self.ext_triggers[name]) taskd.sequential = ( name in self.cfg['scheduling']['special tasks']['sequential']) taskd.namespace_hierarchy = list( reversed(self.runtime['linearized ancestors'][name])) if name in self.task_param_vars: taskd.param_var.update(self.task_param_vars[name]) return taskd def describe(self, name): """Return title and description of the named task.""" return self.taskdefs[name].describe() def get_ref_log_name(self): """Return path to reference log (for reference test).""" return os.path.join(self.fdir, 'reference.log') def get_expected_failed_tasks(self): """Return list of expected failed tasks. Return: - An empty list if NO task is expected to fail. - A list of NAME.CYCLE for the tasks that are expected to fail in reference test mode. - None if there is no expectation either way. """ if self.options.reftest: return self.cfg['scheduler']['events']['expected task failures'] elif self.options.abort_if_any_task_fails: return [] else: return None def get_validated_rsync_includes(self): """Validate and return items to be included in the file installation""" includes = self.cfg['scheduler']['install'] illegal_includes = [] for include in includes: if include.count("/") > 1: illegal_includes.append(f"{include}") if len(illegal_includes) > 0: raise WorkflowConfigError( "Error in [scheduler] install. " "Directories can only be from the top level, please " "reconfigure:" + str(illegal_includes)[1:-1]) return includes
cylc/cylc
cylc/flow/config.py
Python
gpl-3.0
90,031
from jx_bigquery import bigquery from jx_mysql.mysql import (MySQL, sql_query) from jx_mysql.mysql_snowflake_extractor import MySqlSnowflakeExtractor from jx_python import jx from mo_files import File from mo_json import (json2value, value2json) from mo_logs import (Log, constants, startup, strings) from mo_sql import SQL from mo_times import Timer from mo_times.dates import Date from redis import Redis from treeherder.config.settings import REDIS_URL CONFIG_FILE = (File.new_instance(__file__).parent / "extract_jobs.json").abspath class ExtractJobs: def run(self, force=False, restart=False, start=None, merge=False): try: # SETUP LOGGING settings = startup.read_settings(filename=CONFIG_FILE) constants.set(settings.constants) Log.start(settings.debug) self.extract(settings, force, restart, start, merge) except Exception as e: Log.error("could not extract jobs", cause=e) finally: Log.stop() def extract(self, settings, force, restart, start, merge): if not settings.extractor.app_name: Log.error("Expecting an extractor.app_name in config file") # SETUP DESTINATION destination = bigquery.Dataset( dataset=settings.extractor.app_name, kwargs=settings.destination ).get_or_create_table(settings.destination) try: if merge: with Timer("merge shards"): destination.merge_shards() # RECOVER LAST SQL STATE redis = Redis.from_url(REDIS_URL) state = redis.get(settings.extractor.key) if start: state = start, 0 elif restart or not state: state = (0, 0) redis.set(settings.extractor.key, value2json(state).encode("utf8")) else: state = json2value(state.decode("utf8")) last_modified, job_id = state # SCAN SCHEMA, GENERATE EXTRACTION SQL extractor = MySqlSnowflakeExtractor(settings.source) canonical_sql = extractor.get_sql(SQL("SELECT 0")) # ENSURE SCHEMA HAS NOT CHANGED SINCE LAST RUN old_sql = redis.get(settings.extractor.sql) if old_sql and old_sql.decode("utf8") != canonical_sql.sql: if force: Log.warning("Schema has changed") else: Log.error("Schema has changed") redis.set(settings.extractor.sql, canonical_sql.sql.encode("utf8")) # SETUP SOURCE source = MySQL(settings.source.database) while True: Log.note( "Extracting jobs for last_modified={{last_modified|datetime|quote}}, job.id={{job_id}}", last_modified=last_modified, job_id=job_id, ) # Example: job.id ==283890114 # get_ids = ConcatSQL( # (SQL_SELECT, sql_alias(quote_value(283890114), "id")) # ) get_ids = sql_query( { "from": "job", "select": ["id"], "where": { "or": [ {"gt": {"last_modified": Date(last_modified)}}, { "and": [ {"eq": {"last_modified": Date(last_modified)}}, {"gt": {"id": job_id}}, ] }, ] }, "sort": ["last_modified", "id"], "limit": settings.extractor.chunk_size, } ) sql = extractor.get_sql(get_ids) # PULL FROM source, AND PUSH TO destination acc = [] with source.transaction(): cursor = source.query(sql, stream=True, row_tuples=True) extractor.construct_docs(cursor, acc.append, False) if not acc: break # SOME LIMITS PLACES ON STRING SIZE for fl in jx.drill(acc, "job_log.failure_line"): fl.message = strings.limit(fl.message, 10000) destination.extend(acc) # RECORD THE STATE last_doc = acc[-1] last_modified, job_id = last_doc.last_modified, last_doc.id redis.set( settings.extractor.key, value2json((last_modified, job_id)).encode("utf8"), ) if len(acc) < settings.extractor.chunk_size: break except Exception as e: Log.warning("problem with extraction", cause=e) Log.note("done job extraction") try: with Timer("merge shards"): destination.merge_shards() except Exception as e: Log.warning("problem with merge", cause=e) Log.note("done job merge")
KWierso/treeherder
treeherder/extract/extract_jobs.py
Python
mpl-2.0
5,395
# Copyright 2008-2015 Nokia Networks # Copyright 2016- Robot Framework Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import difflib import re import time import token from tokenize import generate_tokens, untokenize from robot.api import logger from robot.errors import (ContinueForLoop, DataError, ExecutionFailed, ExecutionFailures, ExecutionPassed, ExitForLoop, PassExecution, ReturnFromKeyword) from robot.running import Keyword, RUN_KW_REGISTER from robot.running.context import EXECUTION_CONTEXTS from robot.running.usererrorhandler import UserErrorHandler from robot.utils import (DotDict, escape, format_assign_message, get_error_message, get_time, is_falsy, is_integer, is_string, is_truthy, is_unicode, IRONPYTHON, JYTHON, Matcher, normalize, NormalizedDict, parse_time, prepr, RERAISED_EXCEPTIONS, plural_or_not as s, roundup, secs_to_timestr, seq2str, split_from_equals, StringIO, timestr_to_secs, type_name, unic) from robot.utils.asserts import assert_equal, assert_not_equal from robot.variables import (is_list_var, is_var, DictVariableTableValue, VariableTableValue, VariableSplitter, variable_not_found) from robot.version import get_version if JYTHON: from java.lang import String, Number # TODO: Clean-up registering run keyword variants in RF 3.1. # https://github.com/robotframework/robotframework/issues/2190 def run_keyword_variant(resolve): def decorator(method): RUN_KW_REGISTER.register_run_keyword('BuiltIn', method.__name__, resolve, deprecation_warning=False) return method return decorator class _BuiltInBase(object): @property def _context(self): return self._get_context() def _get_context(self, top=False): ctx = EXECUTION_CONTEXTS.current if not top else EXECUTION_CONTEXTS.top if ctx is None: raise RobotNotRunningError('Cannot access execution context') return ctx @property def _namespace(self): return self._get_context().namespace @property def _variables(self): return self._namespace.variables def _matches(self, string, pattern): # Must use this instead of fnmatch when string may contain newlines. matcher = Matcher(pattern, caseless=False, spaceless=False) return matcher.match(string) def _is_true(self, condition): if is_string(condition): condition = self.evaluate(condition, modules='os,sys') return bool(condition) def _log_types(self, *args): self._log_types_at_level('DEBUG', *args) def _log_types_at_level(self, level, *args): msg = ["Argument types are:"] + [self._get_type(a) for a in args] self.log('\n'.join(msg), level) def _get_type(self, arg): # In IronPython type(u'x') is str. We want to report unicode anyway. if is_unicode(arg): return "<type 'unicode'>" return str(type(arg)) class _Converter(_BuiltInBase): def convert_to_integer(self, item, base=None): """Converts the given item to an integer number. If the given item is a string, it is by default expected to be an integer in base 10. There are two ways to convert from other bases: - Give base explicitly to the keyword as ``base`` argument. - Prefix the given string with the base so that ``0b`` means binary (base 2), ``0o`` means octal (base 8), and ``0x`` means hex (base 16). The prefix is considered only when ``base`` argument is not given and may itself be prefixed with a plus or minus sign. The syntax is case-insensitive and possible spaces are ignored. Examples: | ${result} = | Convert To Integer | 100 | | # Result is 100 | | ${result} = | Convert To Integer | FF AA | 16 | # Result is 65450 | | ${result} = | Convert To Integer | 100 | 8 | # Result is 64 | | ${result} = | Convert To Integer | -100 | 2 | # Result is -4 | | ${result} = | Convert To Integer | 0b100 | | # Result is 4 | | ${result} = | Convert To Integer | -0x100 | | # Result is -256 | See also `Convert To Number`, `Convert To Binary`, `Convert To Octal`, `Convert To Hex`, and `Convert To Bytes`. """ self._log_types(item) return self._convert_to_integer(item, base) def _convert_to_integer(self, orig, base=None): try: item = self._handle_java_numbers(orig) item, base = self._get_base(item, base) if base: return int(item, self._convert_to_integer(base)) return int(item) except: raise RuntimeError("'%s' cannot be converted to an integer: %s" % (orig, get_error_message())) def _handle_java_numbers(self, item): if not JYTHON: return item if isinstance(item, String): return unic(item) if isinstance(item, Number): return item.doubleValue() return item def _get_base(self, item, base): if not is_string(item): return item, base item = normalize(item) if item.startswith(('-', '+')): sign = item[0] item = item[1:] else: sign = '' bases = {'0b': 2, '0o': 8, '0x': 16} if base or not item.startswith(tuple(bases)): return sign+item, base return sign+item[2:], bases[item[:2]] def convert_to_binary(self, item, base=None, prefix=None, length=None): """Converts the given item to a binary string. The ``item``, with an optional ``base``, is first converted to an integer using `Convert To Integer` internally. After that it is converted to a binary number (base 2) represented as a string such as ``1011``. The returned value can contain an optional ``prefix`` and can be required to be of minimum ``length`` (excluding the prefix and a possible minus sign). If the value is initially shorter than the required length, it is padded with zeros. Examples: | ${result} = | Convert To Binary | 10 | | | # Result is 1010 | | ${result} = | Convert To Binary | F | base=16 | prefix=0b | # Result is 0b1111 | | ${result} = | Convert To Binary | -2 | prefix=B | length=4 | # Result is -B0010 | See also `Convert To Integer`, `Convert To Octal` and `Convert To Hex`. """ return self._convert_to_bin_oct_hex(item, base, prefix, length, 'b') def convert_to_octal(self, item, base=None, prefix=None, length=None): """Converts the given item to an octal string. The ``item``, with an optional ``base``, is first converted to an integer using `Convert To Integer` internally. After that it is converted to an octal number (base 8) represented as a string such as ``775``. The returned value can contain an optional ``prefix`` and can be required to be of minimum ``length`` (excluding the prefix and a possible minus sign). If the value is initially shorter than the required length, it is padded with zeros. Examples: | ${result} = | Convert To Octal | 10 | | | # Result is 12 | | ${result} = | Convert To Octal | -F | base=16 | prefix=0 | # Result is -017 | | ${result} = | Convert To Octal | 16 | prefix=oct | length=4 | # Result is oct0020 | See also `Convert To Integer`, `Convert To Binary` and `Convert To Hex`. """ return self._convert_to_bin_oct_hex(item, base, prefix, length, 'o') def convert_to_hex(self, item, base=None, prefix=None, length=None, lowercase=False): """Converts the given item to a hexadecimal string. The ``item``, with an optional ``base``, is first converted to an integer using `Convert To Integer` internally. After that it is converted to a hexadecimal number (base 16) represented as a string such as ``FF0A``. The returned value can contain an optional ``prefix`` and can be required to be of minimum ``length`` (excluding the prefix and a possible minus sign). If the value is initially shorter than the required length, it is padded with zeros. By default the value is returned as an upper case string, but the ``lowercase`` argument a true value (see `Boolean arguments`) turns the value (but not the given prefix) to lower case. Examples: | ${result} = | Convert To Hex | 255 | | | # Result is FF | | ${result} = | Convert To Hex | -10 | prefix=0x | length=2 | # Result is -0x0A | | ${result} = | Convert To Hex | 255 | prefix=X | lowercase=yes | # Result is Xff | See also `Convert To Integer`, `Convert To Binary` and `Convert To Octal`. """ spec = 'x' if is_truthy(lowercase) else 'X' return self._convert_to_bin_oct_hex(item, base, prefix, length, spec) def _convert_to_bin_oct_hex(self, item, base, prefix, length, format_spec): self._log_types(item) ret = format(self._convert_to_integer(item, base), format_spec) prefix = prefix or '' if ret[0] == '-': prefix = '-' + prefix ret = ret[1:] if length: ret = ret.rjust(self._convert_to_integer(length), '0') return prefix + ret def convert_to_number(self, item, precision=None): """Converts the given item to a floating point number. If the optional ``precision`` is positive or zero, the returned number is rounded to that number of decimal digits. Negative precision means that the number is rounded to the closest multiple of 10 to the power of the absolute precision. If a number is equally close to a certain precision, it is always rounded away from zero. Examples: | ${result} = | Convert To Number | 42.512 | | # Result is 42.512 | | ${result} = | Convert To Number | 42.512 | 1 | # Result is 42.5 | | ${result} = | Convert To Number | 42.512 | 0 | # Result is 43.0 | | ${result} = | Convert To Number | 42.512 | -1 | # Result is 40.0 | Notice that machines generally cannot store floating point numbers accurately. This may cause surprises with these numbers in general and also when they are rounded. For more information see, for example, these resources: - http://docs.python.org/2/tutorial/floatingpoint.html - http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition If you need an integer number, use `Convert To Integer` instead. """ self._log_types(item) return self._convert_to_number(item, precision) def _convert_to_number(self, item, precision=None): number = self._convert_to_number_without_precision(item) if precision is not None: number = roundup(number, self._convert_to_integer(precision), return_type=float) return number def _convert_to_number_without_precision(self, item): try: if JYTHON: item = self._handle_java_numbers(item) return float(item) except: error = get_error_message() try: return float(self._convert_to_integer(item)) except RuntimeError: raise RuntimeError("'%s' cannot be converted to a floating " "point number: %s" % (item, error)) def convert_to_string(self, item): """Converts the given item to a Unicode string. Uses ``__unicode__`` or ``__str__`` method with Python objects and ``toString`` with Java objects. Use `Encode String To Bytes` and `Decode Bytes To String` keywords in ``String`` library if you need to convert between Unicode and byte strings using different encodings. Use `Convert To Bytes` if you just want to create byte strings. """ self._log_types(item) return self._convert_to_string(item) def _convert_to_string(self, item): return unic(item) def convert_to_boolean(self, item): """Converts the given item to Boolean true or false. Handles strings ``True`` and ``False`` (case-insensitive) as expected, otherwise returns item's [http://docs.python.org/2/library/stdtypes.html#truth|truth value] using Python's ``bool()`` method. """ self._log_types(item) if is_string(item): if item.upper() == 'TRUE': return True if item.upper() == 'FALSE': return False return bool(item) def convert_to_bytes(self, input, input_type='text'): u"""Converts the given ``input`` to bytes according to the ``input_type``. Valid input types are listed below: - ``text:`` Converts text to bytes character by character. All characters with ordinal below 256 can be used and are converted to bytes with same values. Many characters are easiest to represent using escapes like ``\\x00`` or ``\\xff``. Supports both Unicode strings and bytes. - ``int:`` Converts integers separated by spaces to bytes. Similarly as with `Convert To Integer`, it is possible to use binary, octal, or hex values by prefixing the values with ``0b``, ``0o``, or ``0x``, respectively. - ``hex:`` Converts hexadecimal values to bytes. Single byte is always two characters long (e.g. ``01`` or ``FF``). Spaces are ignored and can be used freely as a visual separator. - ``bin:`` Converts binary values to bytes. Single byte is always eight characters long (e.g. ``00001010``). Spaces are ignored and can be used freely as a visual separator. In addition to giving the input as a string, it is possible to use lists or other iterables containing individual characters or numbers. In that case numbers do not need to be padded to certain length and they cannot contain extra spaces. Examples (last column shows returned bytes): | ${bytes} = | Convert To Bytes | hyv\xe4 | | # hyv\\xe4 | | ${bytes} = | Convert To Bytes | \\xff\\x07 | | # \\xff\\x07 | | ${bytes} = | Convert To Bytes | 82 70 | int | # RF | | ${bytes} = | Convert To Bytes | 0b10 0x10 | int | # \\x02\\x10 | | ${bytes} = | Convert To Bytes | ff 00 07 | hex | # \\xff\\x00\\x07 | | ${bytes} = | Convert To Bytes | 5246212121 | hex | # RF!!! | | ${bytes} = | Convert To Bytes | 0000 1000 | bin | # \\x08 | | ${input} = | Create List | 1 | 2 | 12 | | ${bytes} = | Convert To Bytes | ${input} | int | # \\x01\\x02\\x0c | | ${bytes} = | Convert To Bytes | ${input} | hex | # \\x01\\x02\\x12 | Use `Encode String To Bytes` in ``String`` library if you need to convert text to bytes using a certain encoding. New in Robot Framework 2.8.2. """ try: try: ordinals = getattr(self, '_get_ordinals_from_%s' % input_type) except AttributeError: raise RuntimeError("Invalid input type '%s'." % input_type) return bytes(bytearray(o for o in ordinals(input))) except: raise RuntimeError("Creating bytes failed: %s" % get_error_message()) def _get_ordinals_from_text(self, input): # https://github.com/IronLanguages/main/issues/1237 if IRONPYTHON and isinstance(input, bytearray): input = bytes(input) for char in input: ordinal = char if is_integer(char) else ord(char) yield self._test_ordinal(ordinal, char, 'Character') def _test_ordinal(self, ordinal, original, type): if 0 <= ordinal <= 255: return ordinal raise RuntimeError("%s '%s' cannot be represented as a byte." % (type, original)) def _get_ordinals_from_int(self, input): if is_string(input): input = input.split() elif is_integer(input): input = [input] for integer in input: ordinal = self._convert_to_integer(integer) yield self._test_ordinal(ordinal, integer, 'Integer') def _get_ordinals_from_hex(self, input): for token in self._input_to_tokens(input, length=2): ordinal = self._convert_to_integer(token, base=16) yield self._test_ordinal(ordinal, token, 'Hex value') def _get_ordinals_from_bin(self, input): for token in self._input_to_tokens(input, length=8): ordinal = self._convert_to_integer(token, base=2) yield self._test_ordinal(ordinal, token, 'Binary value') def _input_to_tokens(self, input, length): if not is_string(input): return input input = ''.join(input.split()) if len(input) % length != 0: raise RuntimeError('Expected input to be multiple of %d.' % length) return (input[i:i+length] for i in range(0, len(input), length)) def create_list(self, *items): """Returns a list containing given items. The returned list can be assigned both to ``${scalar}`` and ``@{list}`` variables. Examples: | @{list} = | Create List | a | b | c | | ${scalar} = | Create List | a | b | c | | ${ints} = | Create List | ${1} | ${2} | ${3} | """ return list(items) @run_keyword_variant(resolve=0) def create_dictionary(self, *items): """Creates and returns a dictionary based on the given ``items``. Items are typically given using the ``key=value`` syntax same way as ``&{dictionary}`` variables are created in the Variable table. Both keys and values can contain variables, and possible equal sign in key can be escaped with a backslash like ``escaped\\=key=value``. It is also possible to get items from existing dictionaries by simply using them like ``&{dict}``. Alternatively items can be specified so that keys and values are given separately. This and the ``key=value`` syntax can even be combined, but separately given items must be first. If same key is used multiple times, the last value has precedence. The returned dictionary is ordered, and values with strings as keys can also be accessed using a convenient dot-access syntax like ``${dict.key}``. Examples: | &{dict} = | Create Dictionary | key=value | foo=bar | | | # key=value syntax | | Should Be True | ${dict} == {'key': 'value', 'foo': 'bar'} | | &{dict2} = | Create Dictionary | key | value | foo | bar | # separate key and value | | Should Be Equal | ${dict} | ${dict2} | | &{dict} = | Create Dictionary | ${1}=${2} | &{dict} | foo=new | | # using variables | | Should Be True | ${dict} == {1: 2, 'key': 'value', 'foo': 'new'} | | Should Be Equal | ${dict.key} | value | | | | # dot-access | This keyword was changed in Robot Framework 2.9 in many ways: - Moved from ``Collections`` library to ``BuiltIn``. - Support also non-string keys in ``key=value`` syntax. - Returned dictionary is ordered and dot-accessible. - Old syntax to give keys and values separately was deprecated, but deprecation was later removed in RF 3.0.1. """ separate, combined = self._split_dict_items(items) result = DotDict(self._format_separate_dict_items(separate)) combined = DictVariableTableValue(combined).resolve(self._variables) result.update(combined) return result def _split_dict_items(self, items): separate = [] for item in items: name, value = split_from_equals(item) if value is not None or VariableSplitter(item).is_dict_variable(): break separate.append(item) return separate, items[len(separate):] def _format_separate_dict_items(self, separate): separate = self._variables.replace_list(separate) if len(separate) % 2 != 0: raise DataError('Expected even number of keys and values, got %d.' % len(separate)) return [separate[i:i+2] for i in range(0, len(separate), 2)] class _Verify(_BuiltInBase): def _set_and_remove_tags(self, tags): set_tags = [tag for tag in tags if not tag.startswith('-')] remove_tags = [tag[1:] for tag in tags if tag.startswith('-')] if remove_tags: self.remove_tags(*remove_tags) if set_tags: self.set_tags(*set_tags) def fail(self, msg=None, *tags): """Fails the test with the given message and optionally alters its tags. The error message is specified using the ``msg`` argument. It is possible to use HTML in the given error message, similarly as with any other keyword accepting an error message, by prefixing the error with ``*HTML*``. It is possible to modify tags of the current test case by passing tags after the message. Tags starting with a hyphen (e.g. ``-regression``) are removed and others added. Tags are modified using `Set Tags` and `Remove Tags` internally, and the semantics setting and removing them are the same as with these keywords. Examples: | Fail | Test not ready | | | # Fails with the given message. | | Fail | *HTML*<b>Test not ready</b> | | | # Fails using HTML in the message. | | Fail | Test not ready | not-ready | | # Fails and adds 'not-ready' tag. | | Fail | OS not supported | -regression | | # Removes tag 'regression'. | | Fail | My message | tag | -t* | # Removes all tags starting with 't' except the newly added 'tag'. | See `Fatal Error` if you need to stop the whole test execution. Support for modifying tags was added in Robot Framework 2.7.4 and HTML message support in 2.8. """ self._set_and_remove_tags(tags) raise AssertionError(msg) if msg else AssertionError() def fatal_error(self, msg=None): """Stops the whole test execution. The test or suite where this keyword is used fails with the provided message, and subsequent tests fail with a canned message. Possible teardowns will nevertheless be executed. See `Fail` if you only want to stop one test case unconditionally. """ error = AssertionError(msg) if msg else AssertionError() error.ROBOT_EXIT_ON_FAILURE = True raise error def should_not_be_true(self, condition, msg=None): """Fails if the given condition is true. See `Should Be True` for details about how ``condition`` is evaluated and how ``msg`` can be used to override the default error message. """ if self._is_true(condition): raise AssertionError(msg or "'%s' should not be true." % condition) def should_be_true(self, condition, msg=None): """Fails if the given condition is not true. If ``condition`` is a string (e.g. ``${rc} < 10``), it is evaluated as a Python expression as explained in `Evaluating expressions` and the keyword status is decided based on the result. If a non-string item is given, the status is got directly from its [http://docs.python.org/2/library/stdtypes.html#truth|truth value]. The default error message (``<condition> should be true``) is not very informative, but it can be overridden with the ``msg`` argument. Examples: | Should Be True | ${rc} < 10 | | Should Be True | '${status}' == 'PASS' | # Strings must be quoted | | Should Be True | ${number} | # Passes if ${number} is not zero | | Should Be True | ${list} | # Passes if ${list} is not empty | Variables used like ``${variable}``, as in the examples above, are replaced in the expression before evaluation. Variables are also available in the evaluation namespace and can be accessed using special syntax ``$variable``. This is a new feature in Robot Framework 2.9 and it is explained more thoroughly in `Evaluating expressions`. Examples: | Should Be True | $rc < 10 | | Should Be True | $status == 'PASS' | # Expected string must be quoted | Starting from Robot Framework 2.8, `Should Be True` automatically imports Python's [http://docs.python.org/2/library/os.html|os] and [http://docs.python.org/2/library/sys.html|sys] modules that contain several useful attributes: | Should Be True | os.linesep == '\\n' | # Unixy | | Should Be True | os.linesep == '\\r\\n' | # Windows | | Should Be True | sys.platform == 'darwin' | # OS X | | Should Be True | sys.platform.startswith('java') | # Jython | """ if not self._is_true(condition): raise AssertionError(msg or "'%s' should be true." % condition) def should_be_equal(self, first, second, msg=None, values=True): """Fails if the given objects are unequal. Optional ``msg`` and ``values`` arguments specify how to construct the error message if this keyword fails: - If ``msg`` is not given, the error message is ``<first> != <second>``. - If ``msg`` is given and ``values`` gets a true value, the error message is ``<msg>: <first> != <second>``. - If ``msg`` is given and ``values`` gets a false value, the error message is simply ``<msg>``. ``values`` is true by default, but can be turned to false by using, for example, string ``false`` or ``no values``. See `Boolean arguments` section for more details. If both arguments are multiline strings, the comparison is done using `multiline string comparisons`. """ self._log_types_at_info_if_different(first, second) self._should_be_equal(first, second, msg, values) def _should_be_equal(self, first, second, msg, values): if first == second: return include_values = self._include_values(values) if include_values and is_string(first) and is_string(second): self._raise_multi_diff(first, second) assert_equal(first, second, msg, include_values) def _log_types_at_info_if_different(self, first, second): level = 'DEBUG' if type(first) == type(second) else 'INFO' self._log_types_at_level(level, first, second) def _raise_multi_diff(self, first, second): first_lines, second_lines = first.splitlines(), second.splitlines() if len(first_lines) < 3 or len(second_lines) < 3: return self.log("%s\n!=\n%s" % (first, second)) err = 'Multiline strings are different:\n' for line in difflib.unified_diff(first_lines, second_lines, fromfile='first', tofile='second', lineterm=''): err += line + '\n' raise AssertionError(err) def _include_values(self, values): return is_truthy(values) and str(values).upper() != 'NO VALUES' def should_not_be_equal(self, first, second, msg=None, values=True): """Fails if the given objects are equal. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. """ self._log_types_at_info_if_different(first, second) self._should_not_be_equal(first, second, msg, values) def _should_not_be_equal(self, first, second, msg, values): assert_not_equal(first, second, msg, self._include_values(values)) def should_not_be_equal_as_integers(self, first, second, msg=None, values=True, base=None): """Fails if objects are equal after converting them to integers. See `Convert To Integer` for information how to convert integers from other bases than 10 using ``base`` argument or ``0b/0o/0x`` prefixes. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. See `Should Be Equal As Integers` for some usage examples. """ self._log_types_at_info_if_different(first, second) self._should_not_be_equal(self._convert_to_integer(first, base), self._convert_to_integer(second, base), msg, values) def should_be_equal_as_integers(self, first, second, msg=None, values=True, base=None): """Fails if objects are unequal after converting them to integers. See `Convert To Integer` for information how to convert integers from other bases than 10 using ``base`` argument or ``0b/0o/0x`` prefixes. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. Examples: | Should Be Equal As Integers | 42 | ${42} | Error message | | Should Be Equal As Integers | ABCD | abcd | base=16 | | Should Be Equal As Integers | 0b1011 | 11 | """ self._log_types_at_info_if_different(first, second) self._should_be_equal(self._convert_to_integer(first, base), self._convert_to_integer(second, base), msg, values) def should_not_be_equal_as_numbers(self, first, second, msg=None, values=True, precision=6): """Fails if objects are equal after converting them to real numbers. The conversion is done with `Convert To Number` keyword using the given ``precision``. See `Should Be Equal As Numbers` for examples on how to use ``precision`` and why it does not always work as expected. See also `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. """ self._log_types_at_info_if_different(first, second) first = self._convert_to_number(first, precision) second = self._convert_to_number(second, precision) self._should_not_be_equal(first, second, msg, values) def should_be_equal_as_numbers(self, first, second, msg=None, values=True, precision=6): """Fails if objects are unequal after converting them to real numbers. The conversion is done with `Convert To Number` keyword using the given ``precision``. Examples: | Should Be Equal As Numbers | ${x} | 1.1 | | # Passes if ${x} is 1.1 | | Should Be Equal As Numbers | 1.123 | 1.1 | precision=1 | # Passes | | Should Be Equal As Numbers | 1.123 | 1.4 | precision=0 | # Passes | | Should Be Equal As Numbers | 112.3 | 75 | precision=-2 | # Passes | As discussed in the documentation of `Convert To Number`, machines generally cannot store floating point numbers accurately. Because of this limitation, comparing floats for equality is problematic and a correct approach to use depends on the context. This keyword uses a very naive approach of rounding the numbers before comparing them, which is both prone to rounding errors and does not work very well if numbers are really big or small. For more information about comparing floats, and ideas on how to implement your own context specific comparison algorithm, see http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/. See `Should Not Be Equal As Numbers` for a negative version of this keyword and `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. """ self._log_types_at_info_if_different(first, second) first = self._convert_to_number(first, precision) second = self._convert_to_number(second, precision) self._should_be_equal(first, second, msg, values) def should_not_be_equal_as_strings(self, first, second, msg=None, values=True): """Fails if objects are equal after converting them to strings. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. """ self._log_types_at_info_if_different(first, second) first, second = [self._convert_to_string(i) for i in (first, second)] self._should_not_be_equal(first, second, msg, values) def should_be_equal_as_strings(self, first, second, msg=None, values=True): """Fails if objects are unequal after converting them to strings. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. If both arguments are multiline strings, the comparison is done using `multiline string comparisons`. """ self._log_types_at_info_if_different(first, second) first, second = [self._convert_to_string(i) for i in (first, second)] self._should_be_equal(first, second, msg, values) def should_not_start_with(self, str1, str2, msg=None, values=True): """Fails if the string ``str1`` starts with the string ``str2``. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. """ if str1.startswith(str2): raise AssertionError(self._get_string_msg(str1, str2, msg, values, 'starts with')) def should_start_with(self, str1, str2, msg=None, values=True): """Fails if the string ``str1`` does not start with the string ``str2``. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. """ if not str1.startswith(str2): raise AssertionError(self._get_string_msg(str1, str2, msg, values, 'does not start with')) def should_not_end_with(self, str1, str2, msg=None, values=True): """Fails if the string ``str1`` ends with the string ``str2``. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. """ if str1.endswith(str2): raise AssertionError(self._get_string_msg(str1, str2, msg, values, 'ends with')) def should_end_with(self, str1, str2, msg=None, values=True): """Fails if the string ``str1`` does not end with the string ``str2``. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. """ if not str1.endswith(str2): raise AssertionError(self._get_string_msg(str1, str2, msg, values, 'does not end with')) def should_not_contain(self, container, item, msg=None, values=True): """Fails if ``container`` contains ``item`` one or more times. Works with strings, lists, and anything that supports Python's ``in`` operator. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. Examples: | Should Not Contain | ${output} | FAILED | | Should Not Contain | ${some list} | value | """ if item in container: raise AssertionError(self._get_string_msg(container, item, msg, values, 'contains')) def should_contain(self, container, item, msg=None, values=True): """Fails if ``container`` does not contain ``item`` one or more times. Works with strings, lists, and anything that supports Python's ``in`` operator. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. Examples: | Should Contain | ${output} | PASS | | Should Contain | ${some list} | value | """ if item not in container: raise AssertionError(self._get_string_msg(container, item, msg, values, 'does not contain')) def should_contain_x_times(self, item1, item2, count, msg=None): """Fails if ``item1`` does not contain ``item2`` ``count`` times. Works with strings, lists and all objects that `Get Count` works with. The default error message can be overridden with ``msg`` and the actual count is always logged. Examples: | Should Contain X Times | ${output} | hello | 2 | | Should Contain X Times | ${some list} | value | 3 | """ count = self._convert_to_integer(count) x = self.get_count(item1, item2) if not msg: msg = "'%s' contains '%s' %d time%s, not %d time%s." \ % (unic(item1), unic(item2), x, s(x), count, s(count)) self.should_be_equal_as_integers(x, count, msg, values=False) def get_count(self, item1, item2): """Returns and logs how many times ``item2`` is found from ``item1``. This keyword works with Python strings and lists and all objects that either have ``count`` method or can be converted to Python lists. Example: | ${count} = | Get Count | ${some item} | interesting value | | Should Be True | 5 < ${count} < 10 | """ if not hasattr(item1, 'count'): try: item1 = list(item1) except: raise RuntimeError("Converting '%s' to list failed: %s" % (item1, get_error_message())) count = item1.count(item2) self.log('Item found from the first item %d time%s' % (count, s(count))) return count def should_not_match(self, string, pattern, msg=None, values=True): """Fails if the given ``string`` matches the given ``pattern``. Pattern matching is similar as matching files in a shell, and it is always case-sensitive. In the pattern ``*`` matches to anything and ``?`` matches to any single character. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. """ if self._matches(string, pattern): raise AssertionError(self._get_string_msg(string, pattern, msg, values, 'matches')) def should_match(self, string, pattern, msg=None, values=True): """Fails unless the given ``string`` matches the given ``pattern``. Pattern matching is similar as matching files in a shell, and it is always case-sensitive. In the pattern, ``*`` matches to anything and ``?`` matches to any single character. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. """ if not self._matches(string, pattern): raise AssertionError(self._get_string_msg(string, pattern, msg, values, 'does not match')) def should_match_regexp(self, string, pattern, msg=None, values=True): """Fails if ``string`` does not match ``pattern`` as a regular expression. Regular expression check is implemented using the Python [http://docs.python.org/2/library/re.html|re module]. Python's regular expression syntax is derived from Perl, and it is thus also very similar to the syntax used, for example, in Java, Ruby and .NET. Things to note about the regexp syntax in Robot Framework test data: 1) Backslash is an escape character in the test data, and possible backslashes in the pattern must thus be escaped with another backslash (e.g. ``\\\\d\\\\w+``). 2) Strings that may contain special characters, but should be handled as literal strings, can be escaped with the `Regexp Escape` keyword. 3) The given pattern does not need to match the whole string. For example, the pattern ``ello`` matches the string ``Hello world!``. If a full match is needed, the ``^`` and ``$`` characters can be used to denote the beginning and end of the string, respectively. For example, ``^ello$`` only matches the exact string ``ello``. 4) Possible flags altering how the expression is parsed (e.g. ``re.IGNORECASE``, ``re.MULTILINE``) can be set by prefixing the pattern with the ``(?iLmsux)`` group like ``(?im)pattern``. The available flags are ``i`` (case-insensitive), ``m`` (multiline mode), ``s`` (dotall mode), ``x`` (verbose), ``u`` (Unicode dependent) and ``L`` (locale dependent). If this keyword passes, it returns the portion of the string that matched the pattern. Additionally, the possible captured groups are returned. See the `Should Be Equal` keyword for an explanation on how to override the default error message with the ``msg`` and ``values`` arguments. Examples: | Should Match Regexp | ${output} | \\\\d{6} | # Output contains six numbers | | Should Match Regexp | ${output} | ^\\\\d{6}$ | # Six numbers and nothing more | | ${ret} = | Should Match Regexp | Foo: 42 | (?i)foo: \\\\d+ | | ${match} | ${group1} | ${group2} = | | ... | Should Match Regexp | Bar: 43 | (Foo|Bar): (\\\\d+) | => | ${ret} = 'Foo: 42' | ${match} = 'Bar: 43' | ${group1} = 'Bar' | ${group2} = '43' """ res = re.search(pattern, string) if res is None: raise AssertionError(self._get_string_msg(string, pattern, msg, values, 'does not match')) match = res.group(0) groups = res.groups() if groups: return [match] + list(groups) return match def should_not_match_regexp(self, string, pattern, msg=None, values=True): """Fails if ``string`` matches ``pattern`` as a regular expression. See `Should Match Regexp` for more information about arguments. """ if re.search(pattern, string) is not None: raise AssertionError(self._get_string_msg(string, pattern, msg, values, 'matches')) def get_length(self, item): """Returns and logs the length of the given item as an integer. The item can be anything that has a length, for example, a string, a list, or a mapping. The keyword first tries to get the length with the Python function ``len``, which calls the item's ``__len__`` method internally. If that fails, the keyword tries to call the item's possible ``length`` and ``size`` methods directly. The final attempt is trying to get the value of the item's ``length`` attribute. If all these attempts are unsuccessful, the keyword fails. Examples: | ${length} = | Get Length | Hello, world! | | | Should Be Equal As Integers | ${length} | 13 | | @{list} = | Create List | Hello, | world! | | ${length} = | Get Length | ${list} | | | Should Be Equal As Integers | ${length} | 2 | See also `Length Should Be`, `Should Be Empty` and `Should Not Be Empty`. """ length = self._get_length(item) self.log('Length is %d' % length) return length def _get_length(self, item): try: return len(item) except RERAISED_EXCEPTIONS: raise except: try: return item.length() except RERAISED_EXCEPTIONS: raise except: try: return item.size() except RERAISED_EXCEPTIONS: raise except: try: return item.length except RERAISED_EXCEPTIONS: raise except: raise RuntimeError("Could not get length of '%s'." % item) def length_should_be(self, item, length, msg=None): """Verifies that the length of the given item is correct. The length of the item is got using the `Get Length` keyword. The default error message can be overridden with the ``msg`` argument. """ length = self._convert_to_integer(length) actual = self.get_length(item) if actual != length: raise AssertionError(msg or "Length of '%s' should be %d but is %d." % (item, length, actual)) def should_be_empty(self, item, msg=None): """Verifies that the given item is empty. The length of the item is got using the `Get Length` keyword. The default error message can be overridden with the ``msg`` argument. """ if self.get_length(item) > 0: raise AssertionError(msg or "'%s' should be empty." % item) def should_not_be_empty(self, item, msg=None): """Verifies that the given item is not empty. The length of the item is got using the `Get Length` keyword. The default error message can be overridden with the ``msg`` argument. """ if self.get_length(item) == 0: raise AssertionError(msg or "'%s' should not be empty." % item) def _get_string_msg(self, str1, str2, msg, values, delim): default = "'%s' %s '%s'" % (unic(str1), delim, unic(str2)) if not msg: msg = default elif self._include_values(values): msg = '%s: %s' % (msg, default) return msg class _Variables(_BuiltInBase): def get_variables(self, no_decoration=False): """Returns a dictionary containing all variables in the current scope. Variables are returned as a special dictionary that allows accessing variables in space, case, and underscore insensitive manner similarly as accessing variables in the test data. This dictionary supports all same operations as normal Python dictionaries and, for example, Collections library can be used to access or modify it. Modifying the returned dictionary has no effect on the variables available in the current scope. By default variables are returned with ``${}``, ``@{}`` or ``&{}`` decoration based on variable types. Giving a true value (see `Boolean arguments`) to the optional argument ``no_decoration`` will return the variables without the decoration. This option is new in Robot Framework 2.9. Example: | ${example_variable} = | Set Variable | example value | | ${variables} = | Get Variables | | | Dictionary Should Contain Key | ${variables} | \\${example_variable} | | Dictionary Should Contain Key | ${variables} | \\${ExampleVariable} | | Set To Dictionary | ${variables} | \\${name} | value | | Variable Should Not Exist | \\${name} | | | | ${no decoration} = | Get Variables | no_decoration=Yes | | Dictionary Should Contain Key | ${no decoration} | example_variable | Note: Prior to Robot Framework 2.7.4 variables were returned as a custom object that did not support all dictionary methods. """ return self._variables.as_dict(decoration=is_falsy(no_decoration)) @run_keyword_variant(resolve=0) def get_variable_value(self, name, default=None): """Returns variable value or ``default`` if the variable does not exist. The name of the variable can be given either as a normal variable name (e.g. ``${NAME}``) or in escaped format (e.g. ``\\${NAME}``). Notice that the former has some limitations explained in `Set Suite Variable`. Examples: | ${x} = | Get Variable Value | ${a} | default | | ${y} = | Get Variable Value | ${a} | ${b} | | ${z} = | Get Variable Value | ${z} | | => | ${x} gets value of ${a} if ${a} exists and string 'default' otherwise | ${y} gets value of ${a} if ${a} exists and value of ${b} otherwise | ${z} is set to Python None if it does not exist previously See `Set Variable If` for another keyword to set variables dynamically. """ try: return self._variables[self._get_var_name(name)] except DataError: return self._variables.replace_scalar(default) def log_variables(self, level='INFO'): """Logs all variables in the current scope with given log level.""" variables = self.get_variables() for name in sorted(variables, key=lambda s: s[2:-1].lower()): msg = format_assign_message(name, variables[name], cut_long=False) self.log(msg, level) @run_keyword_variant(resolve=0) def variable_should_exist(self, name, msg=None): """Fails unless the given variable exists within the current scope. The name of the variable can be given either as a normal variable name (e.g. ``${NAME}``) or in escaped format (e.g. ``\\${NAME}``). Notice that the former has some limitations explained in `Set Suite Variable`. The default error message can be overridden with the ``msg`` argument. See also `Variable Should Not Exist` and `Keyword Should Exist`. """ name = self._get_var_name(name) msg = self._variables.replace_string(msg) if msg \ else "Variable %s does not exist." % name try: self._variables[name] except DataError: raise AssertionError(msg) @run_keyword_variant(resolve=0) def variable_should_not_exist(self, name, msg=None): """Fails if the given variable exists within the current scope. The name of the variable can be given either as a normal variable name (e.g. ``${NAME}``) or in escaped format (e.g. ``\\${NAME}``). Notice that the former has some limitations explained in `Set Suite Variable`. The default error message can be overridden with the ``msg`` argument. See also `Variable Should Exist` and `Keyword Should Exist`. """ name = self._get_var_name(name) msg = self._variables.replace_string(msg) if msg \ else "Variable %s exists." % name try: self._variables[name] except DataError: pass else: raise AssertionError(msg) def replace_variables(self, text): """Replaces variables in the given text with their current values. If the text contains undefined variables, this keyword fails. If the given ``text`` contains only a single variable, its value is returned as-is and it can be any object. Otherwise this keyword always returns a string. Example: The file ``template.txt`` contains ``Hello ${NAME}!`` and variable ``${NAME}`` has the value ``Robot``. | ${template} = | Get File | ${CURDIR}/template.txt | | ${message} = | Replace Variables | ${template} | | Should Be Equal | ${message} | Hello Robot! | """ return self._variables.replace_scalar(text) def set_variable(self, *values): """Returns the given values which can then be assigned to a variables. This keyword is mainly used for setting scalar variables. Additionally it can be used for converting a scalar variable containing a list to a list variable or to multiple scalar variables. It is recommended to use `Create List` when creating new lists. Examples: | ${hi} = | Set Variable | Hello, world! | | ${hi2} = | Set Variable | I said: ${hi} | | ${var1} | ${var2} = | Set Variable | Hello | world | | @{list} = | Set Variable | ${list with some items} | | ${item1} | ${item2} = | Set Variable | ${list with 2 items} | Variables created with this keyword are available only in the scope where they are created. See `Set Global Variable`, `Set Test Variable` and `Set Suite Variable` for information on how to set variables so that they are available also in a larger scope. """ if len(values) == 0: return '' elif len(values) == 1: return values[0] else: return list(values) @run_keyword_variant(resolve=0) def set_test_variable(self, name, *values): """Makes a variable available everywhere within the scope of the current test. Variables set with this keyword are available everywhere within the scope of the currently executed test case. For example, if you set a variable in a user keyword, it is available both in the test case level and also in all other user keywords used in the current test. Other test cases will not see variables set with this keyword. See `Set Suite Variable` for more information and examples. """ name = self._get_var_name(name) value = self._get_var_value(name, values) self._variables.set_test(name, value) self._log_set_variable(name, value) @run_keyword_variant(resolve=0) def set_suite_variable(self, name, *values): """Makes a variable available everywhere within the scope of the current suite. Variables set with this keyword are available everywhere within the scope of the currently executed test suite. Setting variables with this keyword thus has the same effect as creating them using the Variable table in the test data file or importing them from variable files. Possible child test suites do not see variables set with this keyword by default. Starting from Robot Framework 2.9, that can be controlled by using ``children=<option>`` as the last argument. If the specified ``<option>`` is a non-empty string or any other value considered true in Python, the variable is set also to the child suites. Parent and sibling suites will never see variables set with this keyword. The name of the variable can be given either as a normal variable name (e.g. ``${NAME}``) or in escaped format as ``\\${NAME}`` or ``$NAME``. Variable value can be given using the same syntax as when variables are created in the Variable table. If a variable already exists within the new scope, its value will be overwritten. Otherwise a new variable is created. If a variable already exists within the current scope, the value can be left empty and the variable within the new scope gets the value within the current scope. Examples: | Set Suite Variable | ${SCALAR} | Hello, world! | | Set Suite Variable | ${SCALAR} | Hello, world! | children=true | | Set Suite Variable | @{LIST} | First item | Second item | | Set Suite Variable | &{DICT} | key=value | foo=bar | | ${ID} = | Get ID | | Set Suite Variable | ${ID} | To override an existing value with an empty value, use built-in variables ``${EMPTY}``, ``@{EMPTY}`` or ``&{EMPTY}``: | Set Suite Variable | ${SCALAR} | ${EMPTY} | | Set Suite Variable | @{LIST} | @{EMPTY} | # New in RF 2.7.4 | | Set Suite Variable | &{DICT} | &{EMPTY} | # New in RF 2.9 | *NOTE:* If the variable has value which itself is a variable (escaped or not), you must always use the escaped format to set the variable: Example: | ${NAME} = | Set Variable | \\${var} | | Set Suite Variable | ${NAME} | value | # Sets variable ${var} | | Set Suite Variable | \\${NAME} | value | # Sets variable ${NAME} | This limitation applies also to `Set Test Variable`, `Set Global Variable`, `Variable Should Exist`, `Variable Should Not Exist` and `Get Variable Value` keywords. """ name = self._get_var_name(name) if (values and is_string(values[-1]) and values[-1].startswith('children=')): children = self._variables.replace_scalar(values[-1][9:]) children = is_truthy(children) values = values[:-1] else: children = False value = self._get_var_value(name, values) self._variables.set_suite(name, value, children=children) self._log_set_variable(name, value) @run_keyword_variant(resolve=0) def set_global_variable(self, name, *values): """Makes a variable available globally in all tests and suites. Variables set with this keyword are globally available in all test cases and suites executed after setting them. Setting variables with this keyword thus has the same effect as creating from the command line using the options ``--variable`` or ``--variablefile``. Because this keyword can change variables everywhere, it should be used with care. See `Set Suite Variable` for more information and examples. """ name = self._get_var_name(name) value = self._get_var_value(name, values) self._variables.set_global(name, value) self._log_set_variable(name, value) # Helpers def _get_var_name(self, orig): name = self._resolve_possible_variable(orig) try: return self._unescape_variable_if_needed(name) except ValueError: raise RuntimeError("Invalid variable syntax '%s'." % orig) def _resolve_possible_variable(self, name): try: resolved = self._variables.replace_string(name) return self._unescape_variable_if_needed(resolved) except (KeyError, ValueError, DataError): return name def _unescape_variable_if_needed(self, name): if name.startswith('\\'): name = name[1:] if len(name) < 2: raise ValueError if name[0] in '$@&' and name[1] != '{': name = '%s{%s}' % (name[0], name[1:]) if is_var(name): return name # Support for possible internal variables (issue 397) name = '%s{%s}' % (name[0], self.replace_variables(name[2:-1])) if is_var(name): return name raise ValueError def _get_var_value(self, name, values): if not values: return self._variables[name] if name[0] == '$': # We could consider catenating values similarly as when creating # scalar variables in the variable table, but that would require # handling non-string values somehow. For details see # https://github.com/robotframework/robotframework/issues/1919 if len(values) != 1 or VariableSplitter(values[0]).is_list_variable(): raise DataError("Setting list value to scalar variable '%s' " "is not supported anymore. Create list " "variable '@%s' instead." % (name, name[1:])) return self._variables.replace_scalar(values[0]) return VariableTableValue(values, name).resolve(self._variables) def _log_set_variable(self, name, value): self.log(format_assign_message(name, value)) class _RunKeyword(_BuiltInBase): # If you use any of these run keyword variants from another library, you # should register those keywords with 'register_run_keyword' method. See # the documentation of that method at the end of this file. There are also # other run keyword variant keywords in BuiltIn which can also be seen # at the end of this file. @run_keyword_variant(resolve=1) def run_keyword(self, name, *args): """Executes the given keyword with the given arguments. Because the name of the keyword to execute is given as an argument, it can be a variable and thus set dynamically, e.g. from a return value of another keyword or from the command line. """ if not is_string(name): raise RuntimeError('Keyword name must be a string.') kw = Keyword(name, args=args) return kw.run(self._context) @run_keyword_variant(resolve=0) def run_keywords(self, *keywords): """Executes all the given keywords in a sequence. This keyword is mainly useful in setups and teardowns when they need to take care of multiple actions and creating a new higher level user keyword would be an overkill. By default all arguments are expected to be keywords to be executed. Examples: | Run Keywords | Initialize database | Start servers | Clear logs | | Run Keywords | ${KW 1} | ${KW 2} | | Run Keywords | @{KEYWORDS} | Starting from Robot Framework 2.7.6, keywords can also be run with arguments using upper case ``AND`` as a separator between keywords. The keywords are executed so that the first argument is the first keyword and proceeding arguments until the first ``AND`` are arguments to it. First argument after the first ``AND`` is the second keyword and proceeding arguments until the next ``AND`` are its arguments. And so on. Examples: | Run Keywords | Initialize database | db1 | AND | Start servers | server1 | server2 | | Run Keywords | Initialize database | ${DB NAME} | AND | Start servers | @{SERVERS} | AND | Clear logs | | Run Keywords | ${KW} | AND | @{KW WITH ARGS} | Notice that the ``AND`` control argument must be used explicitly and cannot itself come from a variable. If you need to use literal ``AND`` string as argument, you can either use variables or escape it with a backslash like ``\\AND``. """ self._run_keywords(self._split_run_keywords(list(keywords))) def _run_keywords(self, iterable): errors = [] for kw, args in iterable: try: self.run_keyword(kw, *args) except ExecutionPassed as err: err.set_earlier_failures(errors) raise err except ExecutionFailed as err: errors.extend(err.get_errors()) if not err.can_continue(self._context.in_teardown): break if errors: raise ExecutionFailures(errors) def _split_run_keywords(self, keywords): if 'AND' not in keywords: for name in self._variables.replace_list(keywords): yield name, () else: for name, args in self._split_run_keywords_from_and(keywords): yield name, args def _split_run_keywords_from_and(self, keywords): while 'AND' in keywords: index = keywords.index('AND') yield self._resolve_run_keywords_name_and_args(keywords[:index]) keywords = keywords[index+1:] yield self._resolve_run_keywords_name_and_args(keywords) def _resolve_run_keywords_name_and_args(self, kw_call): kw_call = self._variables.replace_list(kw_call, replace_until=1) if not kw_call: raise DataError('Incorrect use of AND') return kw_call[0], kw_call[1:] @run_keyword_variant(resolve=2) def run_keyword_if(self, condition, name, *args): """Runs the given keyword with the given arguments, if ``condition`` is true. The given ``condition`` is evaluated in Python as explained in `Evaluating expressions`, and ``name`` and ``*args`` have same semantics as with `Run Keyword`. Example, a simple if/else construct: | ${status} | ${value} = | `Run Keyword And Ignore Error` | `My Keyword` | | `Run Keyword If` | '${status}' == 'PASS' | `Some Action` | arg | | `Run Keyword Unless` | '${status}' == 'PASS' | `Another Action` | In this example, only either `Some Action` or `Another Action` is executed, based on the status of `My Keyword`. Instead of `Run Keyword And Ignore Error` you can also use `Run Keyword And Return Status`. Variables used like ``${variable}``, as in the examples above, are replaced in the expression before evaluation. Variables are also available in the evaluation namespace and can be accessed using special syntax ``$variable``. This is a new feature in Robot Framework 2.9 and it is explained more thoroughly in `Evaluating expressions`. Example: | `Run Keyword If` | $result is None or $result == 'FAIL' | `Keyword` | Starting from Robot version 2.7.4, this keyword supports also optional ELSE and ELSE IF branches. Both of these are defined in ``*args`` and must use exactly format ``ELSE`` or ``ELSE IF``, respectively. ELSE branches must contain first the name of the keyword to execute and then its possible arguments. ELSE IF branches must first contain a condition, like the first argument to this keyword, and then the keyword to execute and its possible arguments. It is possible to have ELSE branch after ELSE IF and to have multiple ELSE IF branches. Given previous example, if/else construct can also be created like this: | ${status} | ${value} = | `Run Keyword And Ignore Error` | My Keyword | | `Run Keyword If` | '${status}' == 'PASS' | `Some Action` | arg | ELSE | `Another Action` | The return value is the one of the keyword that was executed or None if no keyword was executed (i.e. if ``condition`` was false). Hence, it is recommended to use ELSE and/or ELSE IF branches to conditionally assign return values from keyword to variables (to conditionally assign fixed values to variables, see `Set Variable If`). This is illustrated by the example below: | ${var1} = | `Run Keyword If` | ${rc} == 0 | `Some keyword returning a value` | | ... | ELSE IF | 0 < ${rc} < 42 | `Another keyword` | | ... | ELSE IF | ${rc} < 0 | `Another keyword with args` | ${rc} | arg2 | | ... | ELSE | `Final keyword to handle abnormal cases` | ${rc} | | ${var2} = | `Run Keyword If` | ${condition} | `Some keyword` | In this example, ${var2} will be set to None if ${condition} is false. Notice that ``ELSE`` and ``ELSE IF`` control words must be used explicitly and thus cannot come from variables. If you need to use literal ``ELSE`` and ``ELSE IF`` strings as arguments, you can escape them with a backslash like ``\\ELSE`` and ``\\ELSE IF``. Starting from Robot Framework 2.8, Python's [http://docs.python.org/2/library/os.html|os] and [http://docs.python.org/2/library/sys.html|sys] modules are automatically imported when evaluating the ``condition``. Attributes they contain can thus be used in the condition: | `Run Keyword If` | os.sep == '/' | `Unix Keyword` | | ... | ELSE IF | sys.platform.startswith('java') | `Jython Keyword` | | ... | ELSE | `Windows Keyword` | """ args, branch = self._split_elif_or_else_branch(args) if self._is_true(condition): return self.run_keyword(name, *args) return branch() def _split_elif_or_else_branch(self, args): if 'ELSE IF' in args: args, branch = self._split_branch(args, 'ELSE IF', 2, 'condition and keyword') return args, lambda: self.run_keyword_if(*branch) if 'ELSE' in args: args, branch = self._split_branch(args, 'ELSE', 1, 'keyword') return args, lambda: self.run_keyword(*branch) return args, lambda: None def _split_branch(self, args, control_word, required, required_error): index = list(args).index(control_word) branch = self._variables.replace_list(args[index+1:], required) if len(branch) < required: raise DataError('%s requires %s.' % (control_word, required_error)) return args[:index], branch @run_keyword_variant(resolve=2) def run_keyword_unless(self, condition, name, *args): """Runs the given keyword with the given arguments, if ``condition`` is false. See `Run Keyword If` for more information and an example. """ if not self._is_true(condition): return self.run_keyword(name, *args) @run_keyword_variant(resolve=1) def run_keyword_and_ignore_error(self, name, *args): """Runs the given keyword with the given arguments and ignores possible error. This keyword returns two values, so that the first is either string ``PASS`` or ``FAIL``, depending on the status of the executed keyword. The second value is either the return value of the keyword or the received error message. See `Run Keyword And Return Status` If you are only interested in the execution status. The keyword name and arguments work as in `Run Keyword`. See `Run Keyword If` for a usage example. Errors caused by invalid syntax, timeouts, or fatal exceptions are not caught by this keyword. Otherwise this keyword itself never fails. Since Robot Framework 2.9, variable errors are caught by this keyword. """ try: return 'PASS', self.run_keyword(name, *args) except ExecutionFailed as err: if err.dont_continue: raise return 'FAIL', unic(err) @run_keyword_variant(resolve=1) def run_keyword_and_return_status(self, name, *args): """Runs the given keyword with given arguments and returns the status as a Boolean value. This keyword returns Boolean ``True`` if the keyword that is executed succeeds and ``False`` if it fails. This is useful, for example, in combination with `Run Keyword If`. If you are interested in the error message or return value, use `Run Keyword And Ignore Error` instead. The keyword name and arguments work as in `Run Keyword`. Example: | ${passed} = | `Run Keyword And Return Status` | Keyword | args | | `Run Keyword If` | ${passed} | Another keyword | Errors caused by invalid syntax, timeouts, or fatal exceptions are not caught by this keyword. Otherwise this keyword itself never fails. New in Robot Framework 2.7.6. """ status, _ = self.run_keyword_and_ignore_error(name, *args) return status == 'PASS' @run_keyword_variant(resolve=1) def run_keyword_and_continue_on_failure(self, name, *args): """Runs the keyword and continues execution even if a failure occurs. The keyword name and arguments work as with `Run Keyword`. Example: | Run Keyword And Continue On Failure | Fail | This is a stupid example | | Log | This keyword is executed | The execution is not continued if the failure is caused by invalid syntax, timeout, or fatal exception. Since Robot Framework 2.9, variable errors are caught by this keyword. """ try: return self.run_keyword(name, *args) except ExecutionFailed as err: if not err.dont_continue: err.continue_on_failure = True raise err @run_keyword_variant(resolve=2) def run_keyword_and_expect_error(self, expected_error, name, *args): """Runs the keyword and checks that the expected error occurred. The expected error must be given in the same format as in Robot Framework reports. It can be a pattern containing characters ``?``, which matches to any single character and ``*``, which matches to any number of any characters. ``name`` and ``*args`` have same semantics as with `Run Keyword`. If the expected error occurs, the error message is returned and it can be further processed/tested, if needed. If there is no error, or the error does not match the expected error, this keyword fails. Examples: | Run Keyword And Expect Error | My error | Some Keyword | arg1 | arg2 | | ${msg} = | Run Keyword And Expect Error | * | My KW | | Should Start With | ${msg} | Once upon a time in | Errors caused by invalid syntax, timeouts, or fatal exceptions are not caught by this keyword. Since Robot Framework 2.9, variable errors are caught by this keyword. """ try: self.run_keyword(name, *args) except ExecutionFailed as err: if err.dont_continue: raise error = err else: raise AssertionError("Expected error '%s' did not occur." % expected_error) if not self._matches(unic(error), expected_error): raise AssertionError("Expected error '%s' but got '%s'." % (expected_error, error)) return unic(error) @run_keyword_variant(resolve=2) def repeat_keyword(self, repeat, name, *args): """Executes the specified keyword multiple times. ``name`` and ``args`` define the keyword that is executed similarly as with `Run Keyword`. ``repeat`` specifies how many times (as a count) or how long time (as a timeout) the keyword should be executed. If ``repeat`` is given as count, it specifies how many times the keyword should be executed. ``repeat`` can be given as an integer or as a string that can be converted to an integer. If it is a string, it can have postfix ``times`` or ``x`` (case and space insensitive) to make the expression more explicit. If ``repeat`` is given as timeout, it must be in Robot Framework's time format (e.g. ``1 minute``, ``2 min 3 s``). Using a number alone (e.g. ``1`` or ``1.5``) does not work in this context. If ``repeat`` is zero or negative, the keyword is not executed at all. This keyword fails immediately if any of the execution rounds fails. Examples: | Repeat Keyword | 5 times | Go to Previous Page | | Repeat Keyword | ${var} | Some Keyword | arg1 | arg2 | | Repeat Keyword | 2 minutes | Some Keyword | arg1 | arg2 | Specifying ``repeat`` as a timeout is new in Robot Framework 3.0. """ try: count = self._get_repeat_count(repeat) except RuntimeError as err: timeout = self._get_repeat_timeout(repeat) if timeout is None: raise err keywords = self._keywords_repeated_by_timeout(timeout, name, args) else: keywords = self._keywords_repeated_by_count(count, name, args) self._run_keywords(keywords) def _get_repeat_count(self, times, require_postfix=False): times = normalize(str(times)) if times.endswith('times'): times = times[:-5] elif times.endswith('x'): times = times[:-1] elif require_postfix: raise ValueError return self._convert_to_integer(times) def _get_repeat_timeout(self, timestr): try: float(timestr) except ValueError: pass else: return None try: return timestr_to_secs(timestr) except ValueError: return None def _keywords_repeated_by_count(self, count, name, args): if count <= 0: self.log("Keyword '%s' repeated zero times." % name) for i in range(count): self.log("Repeating keyword, round %d/%d." % (i + 1, count)) yield name, args def _keywords_repeated_by_timeout(self, timeout, name, args): if timeout <= 0: self.log("Keyword '%s' repeated zero times." % name) repeat_round = 0 maxtime = time.time() + timeout while time.time() < maxtime: repeat_round += 1 self.log("Repeating keyword, round %d, %s remaining." % (repeat_round, secs_to_timestr(maxtime - time.time(), compact=True))) yield name, args @run_keyword_variant(resolve=3) def wait_until_keyword_succeeds(self, retry, retry_interval, name, *args): """Runs the specified keyword and retries if it fails. ``name`` and ``args`` define the keyword that is executed similarly as with `Run Keyword`. How long to retry running the keyword is defined using ``retry`` argument either as timeout or count. ``retry_interval`` is the time to wait before trying to run the keyword again after the previous run has failed. If ``retry`` is given as timeout, it must be in Robot Framework's time format (e.g. ``1 minute``, ``2 min 3 s``, ``4.5``) that is explained in an appendix of Robot Framework User Guide. If it is given as count, it must have ``times`` or ``x`` postfix (e.g. ``5 times``, ``10 x``). ``retry_interval`` must always be given in Robot Framework's time format. If the keyword does not succeed regardless of retries, this keyword fails. If the executed keyword passes, its return value is returned. Examples: | Wait Until Keyword Succeeds | 2 min | 5 sec | My keyword | argument | | ${result} = | Wait Until Keyword Succeeds | 3x | 200ms | My keyword | All normal failures are caught by this keyword. Errors caused by invalid syntax, test or keyword timeouts, or fatal exceptions (caused e.g. by `Fatal Error`) are not caught. Running the same keyword multiple times inside this keyword can create lots of output and considerably increase the size of the generated output files. Starting from Robot Framework 2.7, it is possible to remove unnecessary keywords from the outputs using ``--RemoveKeywords WUKS`` command line option. Support for specifying ``retry`` as a number of times to retry is a new feature in Robot Framework 2.9. Since Robot Framework 2.9, variable errors are caught by this keyword. """ maxtime = count = -1 try: count = self._get_repeat_count(retry, require_postfix=True) except ValueError: timeout = timestr_to_secs(retry) maxtime = time.time() + timeout message = 'for %s' % secs_to_timestr(timeout) else: if count <= 0: raise ValueError('Retry count %d is not positive.' % count) message = '%d time%s' % (count, s(count)) retry_interval = timestr_to_secs(retry_interval) while True: try: return self.run_keyword(name, *args) except ExecutionFailed as err: if err.dont_continue: raise count -= 1 if time.time() > maxtime > 0 or count == 0: raise AssertionError("Keyword '%s' failed after retrying " "%s. The last error was: %s" % (name, message, err)) self._sleep_in_parts(retry_interval) @run_keyword_variant(resolve=1) def set_variable_if(self, condition, *values): """Sets variable based on the given condition. The basic usage is giving a condition and two values. The given condition is first evaluated the same way as with the `Should Be True` keyword. If the condition is true, then the first value is returned, and otherwise the second value is returned. The second value can also be omitted, in which case it has a default value None. This usage is illustrated in the examples below, where ``${rc}`` is assumed to be zero. | ${var1} = | Set Variable If | ${rc} == 0 | zero | nonzero | | ${var2} = | Set Variable If | ${rc} > 0 | value1 | value2 | | ${var3} = | Set Variable If | ${rc} > 0 | whatever | | => | ${var1} = 'zero' | ${var2} = 'value2' | ${var3} = None It is also possible to have 'else if' support by replacing the second value with another condition, and having two new values after it. If the first condition is not true, the second is evaluated and one of the values after it is returned based on its truth value. This can be continued by adding more conditions without a limit. | ${var} = | Set Variable If | ${rc} == 0 | zero | | ... | ${rc} > 0 | greater than zero | less then zero | | | | ${var} = | Set Variable If | | ... | ${rc} == 0 | zero | | ... | ${rc} == 1 | one | | ... | ${rc} == 2 | two | | ... | ${rc} > 2 | greater than two | | ... | ${rc} < 0 | less than zero | Use `Get Variable Value` if you need to set variables dynamically based on whether a variable exist or not. """ values = self._verify_values_for_set_variable_if(list(values)) if self._is_true(condition): return self._variables.replace_scalar(values[0]) values = self._verify_values_for_set_variable_if(values[1:], True) if len(values) == 1: return self._variables.replace_scalar(values[0]) return self.run_keyword('BuiltIn.Set Variable If', *values[0:]) def _verify_values_for_set_variable_if(self, values, default=False): if not values: if default: return [None] raise RuntimeError('At least one value is required') if is_list_var(values[0]): values[:1] = [escape(item) for item in self._variables[values[0]]] return self._verify_values_for_set_variable_if(values) return values @run_keyword_variant(resolve=1) def run_keyword_if_test_failed(self, name, *args): """Runs the given keyword with the given arguments, if the test failed. This keyword can only be used in a test teardown. Trying to use it anywhere else results in an error. Otherwise, this keyword works exactly like `Run Keyword`, see its documentation for more details. Prior to Robot Framework 2.9 failures in test teardown itself were not detected by this keyword. """ test = self._get_test_in_teardown('Run Keyword If Test Failed') if not test.passed: return self.run_keyword(name, *args) @run_keyword_variant(resolve=1) def run_keyword_if_test_passed(self, name, *args): """Runs the given keyword with the given arguments, if the test passed. This keyword can only be used in a test teardown. Trying to use it anywhere else results in an error. Otherwise, this keyword works exactly like `Run Keyword`, see its documentation for more details. Prior to Robot Framework 2.9 failures in test teardown itself were not detected by this keyword. """ test = self._get_test_in_teardown('Run Keyword If Test Passed') if test.passed: return self.run_keyword(name, *args) @run_keyword_variant(resolve=1) def run_keyword_if_timeout_occurred(self, name, *args): """Runs the given keyword if either a test or a keyword timeout has occurred. This keyword can only be used in a test teardown. Trying to use it anywhere else results in an error. Otherwise, this keyword works exactly like `Run Keyword`, see its documentation for more details. """ self._get_test_in_teardown('Run Keyword If Timeout Occurred') if self._context.timeout_occurred: return self.run_keyword(name, *args) def _get_test_in_teardown(self, kwname): ctx = self._context if ctx.test and ctx.in_test_teardown: return ctx.test raise RuntimeError("Keyword '%s' can only be used in test teardown." % kwname) @run_keyword_variant(resolve=1) def run_keyword_if_all_critical_tests_passed(self, name, *args): """Runs the given keyword with the given arguments, if all critical tests passed. This keyword can only be used in suite teardown. Trying to use it in any other place will result in an error. Otherwise, this keyword works exactly like `Run Keyword`, see its documentation for more details. """ suite = self._get_suite_in_teardown('Run Keyword If ' 'All Critical Tests Passed') if suite.statistics.critical.failed == 0: return self.run_keyword(name, *args) @run_keyword_variant(resolve=1) def run_keyword_if_any_critical_tests_failed(self, name, *args): """Runs the given keyword with the given arguments, if any critical tests failed. This keyword can only be used in a suite teardown. Trying to use it anywhere else results in an error. Otherwise, this keyword works exactly like `Run Keyword`, see its documentation for more details. """ suite = self._get_suite_in_teardown('Run Keyword If ' 'Any Critical Tests Failed') if suite.statistics.critical.failed > 0: return self.run_keyword(name, *args) @run_keyword_variant(resolve=1) def run_keyword_if_all_tests_passed(self, name, *args): """Runs the given keyword with the given arguments, if all tests passed. This keyword can only be used in a suite teardown. Trying to use it anywhere else results in an error. Otherwise, this keyword works exactly like `Run Keyword`, see its documentation for more details. """ suite = self._get_suite_in_teardown('Run Keyword If All Tests Passed') if suite.statistics.all.failed == 0: return self.run_keyword(name, *args) @run_keyword_variant(resolve=1) def run_keyword_if_any_tests_failed(self, name, *args): """Runs the given keyword with the given arguments, if one or more tests failed. This keyword can only be used in a suite teardown. Trying to use it anywhere else results in an error. Otherwise, this keyword works exactly like `Run Keyword`, see its documentation for more details. """ suite = self._get_suite_in_teardown('Run Keyword If Any Tests Failed') if suite.statistics.all.failed > 0: return self.run_keyword(name, *args) def _get_suite_in_teardown(self, kwname): if not self._context.in_suite_teardown: raise RuntimeError("Keyword '%s' can only be used in suite teardown." % kwname) return self._context.suite class _Control(_BuiltInBase): def continue_for_loop(self): """Skips the current for loop iteration and continues from the next. Skips the remaining keywords in the current for loop iteration and continues from the next one. Can be used directly in a for loop or in a keyword that the loop uses. Example: | :FOR | ${var} | IN | @{VALUES} | | | Run Keyword If | '${var}' == 'CONTINUE' | Continue For Loop | | | Do Something | ${var} | See `Continue For Loop If` to conditionally continue a for loop without using `Run Keyword If` or other wrapper keywords. New in Robot Framework 2.8. """ self.log("Continuing for loop from the next iteration.") raise ContinueForLoop() def continue_for_loop_if(self, condition): """Skips the current for loop iteration if the ``condition`` is true. A wrapper for `Continue For Loop` to continue a for loop based on the given condition. The condition is evaluated using the same semantics as with `Should Be True` keyword. Example: | :FOR | ${var} | IN | @{VALUES} | | | Continue For Loop If | '${var}' == 'CONTINUE' | | | Do Something | ${var} | New in Robot Framework 2.8. """ if self._is_true(condition): self.continue_for_loop() def exit_for_loop(self): """Stops executing the enclosing for loop. Exits the enclosing for loop and continues execution after it. Can be used directly in a for loop or in a keyword that the loop uses. Example: | :FOR | ${var} | IN | @{VALUES} | | | Run Keyword If | '${var}' == 'EXIT' | Exit For Loop | | | Do Something | ${var} | See `Exit For Loop If` to conditionally exit a for loop without using `Run Keyword If` or other wrapper keywords. """ self.log("Exiting for loop altogether.") raise ExitForLoop() def exit_for_loop_if(self, condition): """Stops executing the enclosing for loop if the ``condition`` is true. A wrapper for `Exit For Loop` to exit a for loop based on the given condition. The condition is evaluated using the same semantics as with `Should Be True` keyword. Example: | :FOR | ${var} | IN | @{VALUES} | | | Exit For Loop If | '${var}' == 'EXIT' | | | Do Something | ${var} | New in Robot Framework 2.8. """ if self._is_true(condition): self.exit_for_loop() @run_keyword_variant(resolve=0) def return_from_keyword(self, *return_values): """Returns from the enclosing user keyword. This keyword can be used to return from a user keyword with PASS status without executing it fully. It is also possible to return values similarly as with the ``[Return]`` setting. For more detailed information about working with the return values, see the User Guide. This keyword is typically wrapped to some other keyword, such as `Run Keyword If` or `Run Keyword If Test Passed`, to return based on a condition: | Run Keyword If | ${rc} < 0 | Return From Keyword | | Run Keyword If Test Passed | Return From Keyword | It is possible to use this keyword to return from a keyword also inside a for loop. That, as well as returning values, is demonstrated by the `Find Index` keyword in the following somewhat advanced example. Notice that it is often a good idea to move this kind of complicated logic into a test library. | ***** Variables ***** | @{LIST} = foo baz | | ***** Test Cases ***** | Example | ${index} = Find Index baz @{LIST} | Should Be Equal ${index} ${1} | ${index} = Find Index non existing @{LIST} | Should Be Equal ${index} ${-1} | | ***** Keywords ***** | Find Index | [Arguments] ${element} @{items} | ${index} = Set Variable ${0} | :FOR ${item} IN @{items} | \\ Run Keyword If '${item}' == '${element}' Return From Keyword ${index} | \\ ${index} = Set Variable ${index + 1} | Return From Keyword ${-1} # Also [Return] would work here. The most common use case, returning based on an expression, can be accomplished directly with `Return From Keyword If`. Both of these keywords are new in Robot Framework 2.8. See also `Run Keyword And Return` and `Run Keyword And Return If`. """ self.log('Returning from the enclosing user keyword.') raise ReturnFromKeyword(return_values) @run_keyword_variant(resolve=1) def return_from_keyword_if(self, condition, *return_values): """Returns from the enclosing user keyword if ``condition`` is true. A wrapper for `Return From Keyword` to return based on the given condition. The condition is evaluated using the same semantics as with `Should Be True` keyword. Given the same example as in `Return From Keyword`, we can rewrite the `Find Index` keyword as follows: | ***** Keywords ***** | Find Index | [Arguments] ${element} @{items} | ${index} = Set Variable ${0} | :FOR ${item} IN @{items} | \\ Return From Keyword If '${item}' == '${element}' ${index} | \\ ${index} = Set Variable ${index + 1} | Return From Keyword ${-1} # Also [Return] would work here. See also `Run Keyword And Return` and `Run Keyword And Return If`. New in Robot Framework 2.8. """ if self._is_true(condition): self.return_from_keyword(*return_values) @run_keyword_variant(resolve=1) def run_keyword_and_return(self, name, *args): """Runs the specified keyword and returns from the enclosing user keyword. The keyword to execute is defined with ``name`` and ``*args`` exactly like with `Run Keyword`. After running the keyword, returns from the enclosing user keyword and passes possible return value from the executed keyword further. Returning from a keyword has exactly same semantics as with `Return From Keyword`. Example: | `Run Keyword And Return` | `My Keyword` | arg1 | arg2 | | # Above is equivalent to: | | ${result} = | `My Keyword` | arg1 | arg2 | | `Return From Keyword` | ${result} | | | Use `Run Keyword And Return If` if you want to run keyword and return based on a condition. New in Robot Framework 2.8.2. """ ret = self.run_keyword(name, *args) self.return_from_keyword(escape(ret)) @run_keyword_variant(resolve=2) def run_keyword_and_return_if(self, condition, name, *args): """Runs the specified keyword and returns from the enclosing user keyword. A wrapper for `Run Keyword And Return` to run and return based on the given ``condition``. The condition is evaluated using the same semantics as with `Should Be True` keyword. Example: | `Run Keyword And Return If` | ${rc} > 0 | `My Keyword` | arg1 | arg2 | | # Above is equivalent to: | | `Run Keyword If` | ${rc} > 0 | `Run Keyword And Return` | `My Keyword ` | arg1 | arg2 | Use `Return From Keyword If` if you want to return a certain value based on a condition. New in Robot Framework 2.8.2. """ if self._is_true(condition): self.run_keyword_and_return(name, *args) def pass_execution(self, message, *tags): """Skips rest of the current test, setup, or teardown with PASS status. This keyword can be used anywhere in the test data, but the place where used affects the behavior: - When used in any setup or teardown (suite, test or keyword), passes that setup or teardown. Possible keyword teardowns of the started keywords are executed. Does not affect execution or statuses otherwise. - When used in a test outside setup or teardown, passes that particular test case. Possible test and keyword teardowns are executed. Possible continuable failures before this keyword is used, as well as failures in executed teardowns, will fail the execution. It is mandatory to give a message explaining why execution was passed. By default the message is considered plain text, but starting it with ``*HTML*`` allows using HTML formatting. It is also possible to modify test tags passing tags after the message similarly as with `Fail` keyword. Tags starting with a hyphen (e.g. ``-regression``) are removed and others added. Tags are modified using `Set Tags` and `Remove Tags` internally, and the semantics setting and removing them are the same as with these keywords. Examples: | Pass Execution | All features available in this version tested. | | Pass Execution | Deprecated test. | deprecated | -regression | This keyword is typically wrapped to some other keyword, such as `Run Keyword If`, to pass based on a condition. The most common case can be handled also with `Pass Execution If`: | Run Keyword If | ${rc} < 0 | Pass Execution | Negative values are cool. | | Pass Execution If | ${rc} < 0 | Negative values are cool. | Passing execution in the middle of a test, setup or teardown should be used with care. In the worst case it leads to tests that skip all the parts that could actually uncover problems in the tested application. In cases where execution cannot continue do to external factors, it is often safer to fail the test case and make it non-critical. New in Robot Framework 2.8. """ message = message.strip() if not message: raise RuntimeError('Message cannot be empty.') self._set_and_remove_tags(tags) log_message, level = self._get_logged_test_message_and_level(message) self.log('Execution passed with message:\n%s' % log_message, level) raise PassExecution(message) @run_keyword_variant(resolve=1) def pass_execution_if(self, condition, message, *tags): """Conditionally skips rest of the current test, setup, or teardown with PASS status. A wrapper for `Pass Execution` to skip rest of the current test, setup or teardown based the given ``condition``. The condition is evaluated similarly as with `Should Be True` keyword, and ``message`` and ``*tags`` have same semantics as with `Pass Execution`. Example: | :FOR | ${var} | IN | @{VALUES} | | | Pass Execution If | '${var}' == 'EXPECTED' | Correct value was found | | | Do Something | ${var} | New in Robot Framework 2.8. """ if self._is_true(condition): message = self._variables.replace_string(message) tags = self._variables.replace_list(tags) self.pass_execution(message, *tags) class _Misc(_BuiltInBase): def no_operation(self): """Does absolutely nothing.""" def sleep(self, time_, reason=None): """Pauses the test executed for the given time. ``time`` may be either a number or a time string. Time strings are in a format such as ``1 day 2 hours 3 minutes 4 seconds 5milliseconds`` or ``1d 2h 3m 4s 5ms``, and they are fully explained in an appendix of Robot Framework User Guide. Optional `reason` can be used to explain why sleeping is necessary. Both the time slept and the reason are logged. Examples: | Sleep | 42 | | Sleep | 1.5 | | Sleep | 2 minutes 10 seconds | | Sleep | 10s | Wait for a reply | """ seconds = timestr_to_secs(time_) # Python hangs with negative values if seconds < 0: seconds = 0 self._sleep_in_parts(seconds) self.log('Slept %s' % secs_to_timestr(seconds)) if reason: self.log(reason) def _sleep_in_parts(self, seconds): # time.sleep can't be stopped in windows # to ensure that we can signal stop (with timeout) # split sleeping to small pieces endtime = time.time() + float(seconds) while True: remaining = endtime - time.time() if remaining <= 0: break time.sleep(min(remaining, 0.01)) def catenate(self, *items): """Catenates the given items together and returns the resulted string. By default, items are catenated with spaces, but if the first item contains the string ``SEPARATOR=<sep>``, the separator ``<sep>`` is used instead. Items are converted into strings when necessary. Examples: | ${str1} = | Catenate | Hello | world | | | ${str2} = | Catenate | SEPARATOR=--- | Hello | world | | ${str3} = | Catenate | SEPARATOR= | Hello | world | => | ${str1} = 'Hello world' | ${str2} = 'Hello---world' | ${str3} = 'Helloworld' """ if not items: return '' items = [unic(item) for item in items] if items[0].startswith('SEPARATOR='): sep = items[0][len('SEPARATOR='):] items = items[1:] else: sep = ' ' return sep.join(items) def log(self, message, level='INFO', html=False, console=False, repr=False): u"""Logs the given message with the given level. Valid levels are TRACE, DEBUG, INFO (default), HTML, WARN, and ERROR. Messages below the current active log level are ignored. See `Set Log Level` keyword and ``--loglevel`` command line option for more details about setting the level. Messages logged with the WARN or ERROR levels will be automatically visible also in the console and in the Test Execution Errors section in the log file. Logging can be configured using optional ``html``, ``console`` and ``repr`` arguments. They are off by default, but can be enabled by giving them a true value. See `Boolean arguments` section for more information about true and false values. If the ``html`` argument is given a true value, the message will be considered HTML and special characters such as ``<`` in it are not escaped. For example, logging ``<img src="image.png">`` creates an image when ``html`` is true, but otherwise the message is that exact string. An alternative to using the ``html`` argument is using the HTML pseudo log level. It logs the message as HTML using the INFO level. If the ``console`` argument is true, the message will be written to the console where test execution was started from in addition to the log file. This keyword always uses the standard output stream and adds a newline after the written message. Use `Log To Console` instead if either of these is undesirable, If the ``repr`` argument is true, the given item will be passed through a custom version of Python's ``pprint.pformat()`` function before logging it. This is useful, for example, when working with strings or bytes containing invisible characters, or when working with nested data structures. The custom version differs from the standard one so that it omits the ``u`` prefix from Unicode strings and adds ``b`` prefix to byte strings. Examples: | Log | Hello, world! | | | # Normal INFO message. | | Log | Warning, world! | WARN | | # Warning. | | Log | <b>Hello</b>, world! | html=yes | | # INFO message as HTML. | | Log | <b>Hello</b>, world! | HTML | | # Same as above. | | Log | <b>Hello</b>, world! | DEBUG | html=true | # DEBUG as HTML. | | Log | Hello, console! | console=yes | | # Log also to the console. | | Log | Hyv\xe4 \\x00 | repr=yes | | # Log ``'Hyv\\xe4 \\x00'``. | See `Log Many` if you want to log multiple messages in one go, and `Log To Console` if you only want to write to the console. Arguments ``html``, ``console``, and ``repr`` are new in Robot Framework 2.8.2. Pprint support when ``repr`` is used is new in Robot Framework 2.8.6, and it was changed to drop the ``u`` prefix and add the ``b`` prefix in Robot Framework 2.9. """ if is_truthy(repr): message = prepr(message, width=80) logger.write(message, level, is_truthy(html)) if is_truthy(console): logger.console(message) @run_keyword_variant(resolve=0) def log_many(self, *messages): """Logs the given messages as separate entries using the INFO level. Supports also logging list and dictionary variable items individually. Examples: | Log Many | Hello | ${var} | | Log Many | @{list} | &{dict} | See `Log` and `Log To Console` keywords if you want to use alternative log levels, use HTML, or log to the console. """ for msg in self._yield_logged_messages(messages): self.log(msg) def _yield_logged_messages(self, messages): for msg in messages: var = VariableSplitter(msg) value = self._variables.replace_scalar(msg) if var.is_list_variable(): for item in value: yield item elif var.is_dict_variable(): for name, value in value.items(): yield '%s=%s' % (name, value) else: yield value def log_to_console(self, message, stream='STDOUT', no_newline=False): """Logs the given message to the console. By default uses the standard output stream. Using the standard error stream is possibly by giving the ``stream`` argument value ``STDERR`` (case-insensitive). By default appends a newline to the logged message. This can be disabled by giving the ``no_newline`` argument a true value (see `Boolean arguments`). Examples: | Log To Console | Hello, console! | | | Log To Console | Hello, stderr! | STDERR | | Log To Console | Message starts here and is | no_newline=true | | Log To Console | continued without newline. | | This keyword does not log the message to the normal log file. Use `Log` keyword, possibly with argument ``console``, if that is desired. New in Robot Framework 2.8.2. """ logger.console(message, newline=is_falsy(no_newline), stream=stream) @run_keyword_variant(resolve=0) def comment(self, *messages): """Displays the given messages in the log file as keyword arguments. This keyword does nothing with the arguments it receives, but as they are visible in the log, this keyword can be used to display simple messages. Given arguments are ignored so thoroughly that they can even contain non-existing variables. If you are interested about variable values, you can use the `Log` or `Log Many` keywords. """ pass def set_log_level(self, level): """Sets the log threshold to the specified level and returns the old level. Messages below the level will not logged. The default logging level is INFO, but it can be overridden with the command line option ``--loglevel``. The available levels: TRACE, DEBUG, INFO (default), WARN, ERROR and NONE (no logging). """ try: old = self._context.output.set_log_level(level) except DataError as err: raise RuntimeError(unic(err)) self._namespace.variables.set_global('${LOG_LEVEL}', level.upper()) self.log('Log level changed from %s to %s.' % (old, level.upper())) return old def reload_library(self, name_or_instance): """Rechecks what keywords the specified library provides. Can be called explicitly in the test data or by a library itself when keywords it provides have changed. The library can be specified by its name or as the active instance of the library. The latter is especially useful if the library itself calls this keyword as a method. New in Robot Framework 2.9. """ library = self._namespace.reload_library(name_or_instance) self.log('Reloaded library %s with %s keywords.' % (library.name, len(library))) @run_keyword_variant(resolve=0) def import_library(self, name, *args): """Imports a library with the given name and optional arguments. This functionality allows dynamic importing of libraries while tests are running. That may be necessary, if the library itself is dynamic and not yet available when test data is processed. In a normal case, libraries should be imported using the Library setting in the Setting table. This keyword supports importing libraries both using library names and physical paths. When paths are used, they must be given in absolute format or found from [http://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#pythonpath-jythonpath-and-ironpythonpath| search path]. Forward slashes can be used as path separators in all operating systems. It is possible to pass arguments to the imported library and also named argument syntax works if the library supports it. ``WITH NAME`` syntax can be used to give a custom name to the imported library. Examples: | Import Library | MyLibrary | | Import Library | ${CURDIR}/../Library.py | arg1 | named=arg2 | | Import Library | ${LIBRARIES}/Lib.java | arg | WITH NAME | JavaLib | """ try: self._namespace.import_library(name, list(args)) except DataError as err: raise RuntimeError(unic(err)) @run_keyword_variant(resolve=0) def import_variables(self, path, *args): """Imports a variable file with the given path and optional arguments. Variables imported with this keyword are set into the test suite scope similarly when importing them in the Setting table using the Variables setting. These variables override possible existing variables with the same names. This functionality can thus be used to import new variables, for example, for each test in a test suite. The given path must be absolute or found from [http://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#pythonpath-jythonpath-and-ironpythonpath| search path]. Forward slashes can be used as path separator regardless the operating system. Examples: | Import Variables | ${CURDIR}/variables.py | | | | Import Variables | ${CURDIR}/../vars/env.py | arg1 | arg2 | | Import Variables | file_from_pythonpath.py | | | """ try: self._namespace.import_variables(path, list(args), overwrite=True) except DataError as err: raise RuntimeError(unic(err)) @run_keyword_variant(resolve=0) def import_resource(self, path): """Imports a resource file with the given path. Resources imported with this keyword are set into the test suite scope similarly when importing them in the Setting table using the Resource setting. The given path must be absolute or found from [http://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#pythonpath-jythonpath-and-ironpythonpath| search path]. Forward slashes can be used as path separator regardless the operating system. Examples: | Import Resource | ${CURDIR}/resource.txt | | Import Resource | ${CURDIR}/../resources/resource.html | | Import Resource | found_from_pythonpath.robot | """ try: self._namespace.import_resource(path) except DataError as err: raise RuntimeError(unic(err)) def set_library_search_order(self, *search_order): """Sets the resolution order to use when a name matches multiple keywords. The library search order is used to resolve conflicts when a keyword name in the test data matches multiple keywords. The first library (or resource, see below) containing the keyword is selected and that keyword implementation used. If the keyword is not found from any library (or resource), test executing fails the same way as when the search order is not set. When this keyword is used, there is no need to use the long ``LibraryName.Keyword Name`` notation. For example, instead of having | MyLibrary.Keyword | arg | | MyLibrary.Another Keyword | | MyLibrary.Keyword | xxx | you can have | Set Library Search Order | MyLibrary | | Keyword | arg | | Another Keyword | | Keyword | xxx | This keyword can be used also to set the order of keywords in different resource files. In this case resource names must be given without paths or extensions like: | Set Library Search Order | resource | another_resource | *NOTE:* - The search order is valid only in the suite where this keywords is used. - Keywords in resources always have higher priority than keywords in libraries regardless the search order. - The old order is returned and can be used to reset the search order later. - Library and resource names in the search order are both case and space insensitive. """ return self._namespace.set_search_order(search_order) def keyword_should_exist(self, name, msg=None): """Fails unless the given keyword exists in the current scope. Fails also if there are more than one keywords with the same name. Works both with the short name (e.g. ``Log``) and the full name (e.g. ``BuiltIn.Log``). The default error message can be overridden with the ``msg`` argument. See also `Variable Should Exist`. """ try: runner = self._namespace.get_runner(name) except DataError as err: raise AssertionError(msg or unic(err)) if isinstance(runner, UserErrorHandler): raise AssertionError(msg or runner.error) def get_time(self, format='timestamp', time_='NOW'): """Returns the given time in the requested format. *NOTE:* DateTime library added in Robot Framework 2.8.5 contains much more flexible keywords for getting the current date and time and for date and time handling in general. How time is returned is determined based on the given ``format`` string as follows. Note that all checks are case-insensitive. 1) If ``format`` contains the word ``epoch``, the time is returned in seconds after the UNIX epoch (1970-01-01 00:00:00 UTC). The return value is always an integer. 2) If ``format`` contains any of the words ``year``, ``month``, ``day``, ``hour``, ``min``, or ``sec``, only the selected parts are returned. The order of the returned parts is always the one in the previous sentence and the order of words in ``format`` is not significant. The parts are returned as zero-padded strings (e.g. May -> ``05``). 3) Otherwise (and by default) the time is returned as a timestamp string in the format ``2006-02-24 15:08:31``. By default this keyword returns the current local time, but that can be altered using ``time`` argument as explained below. Note that all checks involving strings are case-insensitive. 1) If ``time`` is a number, or a string that can be converted to a number, it is interpreted as seconds since the UNIX epoch. This documentation was originally written about 1177654467 seconds after the epoch. 2) If ``time`` is a timestamp, that time will be used. Valid timestamp formats are ``YYYY-MM-DD hh:mm:ss`` and ``YYYYMMDD hhmmss``. 3) If ``time`` is equal to ``NOW`` (default), the current local time is used. This time is got using Python's ``time.time()`` function. 4) If ``time`` is equal to ``UTC``, the current time in [http://en.wikipedia.org/wiki/Coordinated_Universal_Time|UTC] is used. This time is got using ``time.time() + time.altzone`` in Python. 5) If ``time`` is in the format like ``NOW - 1 day`` or ``UTC + 1 hour 30 min``, the current local/UTC time plus/minus the time specified with the time string is used. The time string format is described in an appendix of Robot Framework User Guide. Examples (expecting the current local time is 2006-03-29 15:06:21): | ${time} = | Get Time | | | | | ${secs} = | Get Time | epoch | | | | ${year} = | Get Time | return year | | | | ${yyyy} | ${mm} | ${dd} = | Get Time | year,month,day | | @{time} = | Get Time | year month day hour min sec | | | | ${y} | ${s} = | Get Time | seconds and year | | => | ${time} = '2006-03-29 15:06:21' | ${secs} = 1143637581 | ${year} = '2006' | ${yyyy} = '2006', ${mm} = '03', ${dd} = '29' | @{time} = ['2006', '03', '29', '15', '06', '21'] | ${y} = '2006' | ${s} = '21' Examples (expecting the current local time is 2006-03-29 15:06:21 and UTC time is 2006-03-29 12:06:21): | ${time} = | Get Time | | 1177654467 | # Time given as epoch seconds | | ${secs} = | Get Time | sec | 2007-04-27 09:14:27 | # Time given as a timestamp | | ${year} = | Get Time | year | NOW | # The local time of execution | | @{time} = | Get Time | hour min sec | NOW + 1h 2min 3s | # 1h 2min 3s added to the local time | | @{utc} = | Get Time | hour min sec | UTC | # The UTC time of execution | | ${hour} = | Get Time | hour | UTC - 1 hour | # 1h subtracted from the UTC time | => | ${time} = '2007-04-27 09:14:27' | ${secs} = 27 | ${year} = '2006' | @{time} = ['16', '08', '24'] | @{utc} = ['12', '06', '21'] | ${hour} = '11' Support for UTC time was added in Robot Framework 2.7.5 but it did not work correctly until 2.7.7. """ return get_time(format, parse_time(time_)) def evaluate(self, expression, modules=None, namespace=None): """Evaluates the given expression in Python and returns the results. ``expression`` is evaluated in Python as explained in `Evaluating expressions`. ``modules`` argument can be used to specify a comma separated list of Python modules to be imported and added to the evaluation namespace. ``namespace`` argument can be used to pass a custom evaluation namespace as a dictionary. Possible ``modules`` are added to this namespace. This is a new feature in Robot Framework 2.8.4. Variables used like ``${variable}`` are replaced in the expression before evaluation. Variables are also available in the evaluation namespace and can be accessed using special syntax ``$variable``. This is a new feature in Robot Framework 2.9 and it is explained more thoroughly in `Evaluating expressions`. Examples (expecting ``${result}`` is 3.14): | ${status} = | Evaluate | 0 < ${result} < 10 | # Would also work with string '3.14' | | ${status} = | Evaluate | 0 < $result < 10 | # Using variable itself, not string representation | | ${random} = | Evaluate | random.randint(0, sys.maxint) | modules=random, sys | | ${ns} = | Create Dictionary | x=${4} | y=${2} | | ${result} = | Evaluate | x*10 + y | namespace=${ns} | => | ${status} = True | ${random} = <random integer> | ${result} = 42 """ variables = self._variables.as_dict(decoration=False) expression = self._handle_variables_in_expression(expression, variables) namespace = self._create_evaluation_namespace(namespace, modules) variables = self._decorate_variables_for_evaluation(variables) try: if not is_string(expression): raise TypeError("Expression must be string, got %s." % type_name(expression)) if not expression: raise ValueError("Expression cannot be empty.") return eval(expression, namespace, variables) except: raise RuntimeError("Evaluating expression '%s' failed: %s" % (expression, get_error_message())) def _handle_variables_in_expression(self, expression, variables): if not is_string(expression): return expression tokens = [] variable_started = seen_variable = False generated = generate_tokens(StringIO(expression).readline) for toknum, tokval, _, _, _ in generated: if variable_started: if toknum == token.NAME: if tokval not in variables: variable_not_found('$%s' % tokval, variables, deco_braces=False) tokval = 'RF_VAR_' + tokval seen_variable = True else: tokens.append((token.ERRORTOKEN, '$')) variable_started = False if toknum == token.ERRORTOKEN and tokval == '$': variable_started = True else: tokens.append((toknum, tokval)) if seen_variable: return untokenize(tokens).strip() return expression def _create_evaluation_namespace(self, namespace, modules): namespace = dict(namespace or {}) modules = modules.replace(' ', '').split(',') if modules else [] namespace.update((m, __import__(m)) for m in modules if m) return namespace def _decorate_variables_for_evaluation(self, variables): decorated = [('RF_VAR_' + name, value) for name, value in variables.items()] return NormalizedDict(decorated, ignore='_') def call_method(self, object, method_name, *args, **kwargs): """Calls the named method of the given object with the provided arguments. The possible return value from the method is returned and can be assigned to a variable. Keyword fails both if the object does not have a method with the given name or if executing the method raises an exception. Support for ``**kwargs`` is new in Robot Framework 2.9. Since that possible equal signs in other arguments must be escaped with a backslash like ``\\=``. Examples: | Call Method | ${hashtable} | put | myname | myvalue | | ${isempty} = | Call Method | ${hashtable} | isEmpty | | | Should Not Be True | ${isempty} | | | | | ${value} = | Call Method | ${hashtable} | get | myname | | Should Be Equal | ${value} | myvalue | | | | Call Method | ${object} | kwargs | name=value | foo=bar | | Call Method | ${object} | positional | escaped\\=equals | """ try: method = getattr(object, method_name) except AttributeError: raise RuntimeError("Object '%s' does not have method '%s'." % (object, method_name)) try: return method(*args, **kwargs) except: raise RuntimeError("Calling method '%s' failed: %s" % (method_name, get_error_message())) def regexp_escape(self, *patterns): """Returns each argument string escaped for use as a regular expression. This keyword can be used to escape strings to be used with `Should Match Regexp` and `Should Not Match Regexp` keywords. Escaping is done with Python's ``re.escape()`` function. Examples: | ${escaped} = | Regexp Escape | ${original} | | @{strings} = | Regexp Escape | @{strings} | """ if len(patterns) == 0: return '' if len(patterns) == 1: return re.escape(patterns[0]) return [re.escape(p) for p in patterns] def set_test_message(self, message, append=False): """Sets message for the current test case. If the optional ``append`` argument is given a true value (see `Boolean arguments`), the given ``message`` is added after the possible earlier message by joining the messages with a space. In test teardown this keyword can alter the possible failure message, but otherwise failures override messages set by this keyword. Notice that in teardown the message is available as a built-in variable ``${TEST MESSAGE}``. It is possible to use HTML format in the message by starting the message with ``*HTML*``. Examples: | Set Test Message | My message | | | Set Test Message | is continued. | append=yes | | Should Be Equal | ${TEST MESSAGE} | My message is continued. | | Set Test Message | `*`HTML`*` <b>Hello!</b> | | This keyword can not be used in suite setup or suite teardown. Support for ``append`` was added in Robot Framework 2.7.7 and support for HTML format in 2.8. """ test = self._context.test if not test: raise RuntimeError("'Set Test Message' keyword cannot be used in " "suite setup or teardown.") test.message = self._get_possibly_appended_value(test.message, message, append) if self._context.in_test_teardown: self._variables.set_test("${TEST_MESSAGE}", test.message) message, level = self._get_logged_test_message_and_level(test.message) self.log('Set test message to:\n%s' % message, level) def _get_possibly_appended_value(self, initial, new, append): if not is_unicode(new): new = unic(new) if is_truthy(append) and initial: return '%s %s' % (initial, new) return new def _get_logged_test_message_and_level(self, message): if message.startswith('*HTML*'): return message[6:].lstrip(), 'HTML' return message, 'INFO' def set_test_documentation(self, doc, append=False): """Sets documentation for the current test case. By default the possible existing documentation is overwritten, but this can be changed using the optional ``append`` argument similarly as with `Set Test Message` keyword. The current test documentation is available as a built-in variable ``${TEST DOCUMENTATION}``. This keyword can not be used in suite setup or suite teardown. New in Robot Framework 2.7. Support for ``append`` was added in 2.7.7. """ test = self._context.test if not test: raise RuntimeError("'Set Test Documentation' keyword cannot be " "used in suite setup or teardown.") test.doc = self._get_possibly_appended_value(test.doc, doc, append) self._variables.set_test('${TEST_DOCUMENTATION}', test.doc) self.log('Set test documentation to:\n%s' % test.doc) def set_suite_documentation(self, doc, append=False, top=False): """Sets documentation for the current test suite. By default the possible existing documentation is overwritten, but this can be changed using the optional ``append`` argument similarly as with `Set Test Message` keyword. This keyword sets the documentation of the current suite by default. If the optional ``top`` argument is given a true value (see `Boolean arguments`), the documentation of the top level suite is altered instead. The documentation of the current suite is available as a built-in variable ``${SUITE DOCUMENTATION}``. New in Robot Framework 2.7. Support for ``append`` and ``top`` were added in 2.7.7. """ top = is_truthy(top) suite = self._get_context(top).suite suite.doc = self._get_possibly_appended_value(suite.doc, doc, append) self._variables.set_suite('${SUITE_DOCUMENTATION}', suite.doc, top) self.log('Set suite documentation to:\n%s' % suite.doc) def set_suite_metadata(self, name, value, append=False, top=False): """Sets metadata for the current test suite. By default possible existing metadata values are overwritten, but this can be changed using the optional ``append`` argument similarly as with `Set Test Message` keyword. This keyword sets the metadata of the current suite by default. If the optional ``top`` argument is given a true value (see `Boolean arguments`), the metadata of the top level suite is altered instead. The metadata of the current suite is available as a built-in variable ``${SUITE METADATA}`` in a Python dictionary. Notice that modifying this variable directly has no effect on the actual metadata the suite has. New in Robot Framework 2.7.4. Support for ``append`` and ``top`` were added in 2.7.7. """ top = is_truthy(top) if not is_unicode(name): name = unic(name) metadata = self._get_context(top).suite.metadata original = metadata.get(name, '') metadata[name] = self._get_possibly_appended_value(original, value, append) self._variables.set_suite('${SUITE_METADATA}', metadata.copy(), top) self.log("Set suite metadata '%s' to value '%s'." % (name, metadata[name])) def set_tags(self, *tags): """Adds given ``tags`` for the current test or all tests in a suite. When this keyword is used inside a test case, that test gets the specified tags and other tests are not affected. If this keyword is used in a suite setup, all test cases in that suite, recursively, gets the given tags. It is a failure to use this keyword in a suite teardown. The current tags are available as a built-in variable ``@{TEST TAGS}``. See `Remove Tags` if you want to remove certain tags and `Fail` if you want to fail the test case after setting and/or removing tags. """ ctx = self._context if ctx.test: ctx.test.tags.add(tags) ctx.variables.set_test('@{TEST_TAGS}', list(ctx.test.tags)) elif not ctx.in_suite_teardown: ctx.suite.set_tags(tags, persist=True) else: raise RuntimeError("'Set Tags' cannot be used in suite teardown.") self.log('Set tag%s %s.' % (s(tags), seq2str(tags))) def remove_tags(self, *tags): """Removes given ``tags`` from the current test or all tests in a suite. Tags can be given exactly or using a pattern where ``*`` matches anything and ``?`` matches one character. This keyword can affect either one test case or all test cases in a test suite similarly as `Set Tags` keyword. The current tags are available as a built-in variable ``@{TEST TAGS}``. Example: | Remove Tags | mytag | something-* | ?ython | See `Set Tags` if you want to add certain tags and `Fail` if you want to fail the test case after setting and/or removing tags. """ ctx = self._context if ctx.test: ctx.test.tags.remove(tags) ctx.variables.set_test('@{TEST_TAGS}', list(ctx.test.tags)) elif not ctx.in_suite_teardown: ctx.suite.set_tags(remove=tags, persist=True) else: raise RuntimeError("'Remove Tags' cannot be used in suite teardown.") self.log('Removed tag%s %s.' % (s(tags), seq2str(tags))) def get_library_instance(self, name=None, all=False): """Returns the currently active instance of the specified test library. This keyword makes it easy for test libraries to interact with other test libraries that have state. This is illustrated by the Python example below: | from robot.libraries.BuiltIn import BuiltIn | | def title_should_start_with(expected): | seleniumlib = BuiltIn().get_library_instance('SeleniumLibrary') | title = seleniumlib.get_title() | if not title.startswith(expected): | raise AssertionError("Title '%s' did not start with '%s'" | % (title, expected)) It is also possible to use this keyword in the test data and pass the returned library instance to another keyword. If a library is imported with a custom name, the ``name`` used to get the instance must be that name and not the original library name. If the optional argument ``all`` is given a true value, then a dictionary mapping all library names to instances will be returned. This feature is new in Robot Framework 2.9.2. Example: | &{all libs} = | Get library instance | all=True | """ if is_truthy(all): return self._namespace.get_library_instances() try: return self._namespace.get_library_instance(name) except DataError as err: raise RuntimeError(unic(err)) class BuiltIn(_Verify, _Converter, _Variables, _RunKeyword, _Control, _Misc): """An always available standard library with often needed keywords. ``BuiltIn`` is Robot Framework's standard library that provides a set of generic keywords needed often. It is imported automatically and thus always available. The provided keywords can be used, for example, for verifications (e.g. `Should Be Equal`, `Should Contain`), conversions (e.g. `Convert To Integer`) and for various other purposes (e.g. `Log`, `Sleep`, `Run Keyword If`, `Set Global Variable`). == Table of contents == - `HTML error messages` - `Evaluating expressions` - `Boolean arguments` - `Multiline string comparisons` - `Shortcuts` - `Keywords` = HTML error messages = Many of the keywords accept an optional error message to use if the keyword fails. Starting from Robot Framework 2.8, it is possible to use HTML in these messages by prefixing them with ``*HTML*``. See `Fail` keyword for a usage example. Notice that using HTML in messages is not limited to BuiltIn library but works with any error message. = Evaluating expressions = Many keywords, such as `Evaluate`, `Run Keyword If` and `Should Be True`, accept an expression that is evaluated in Python. These expressions are evaluated using Python's [https://docs.python.org/2/library/functions.html#eval|eval] function so that all Python built-ins like ``len()`` and ``int()`` are available. `Evaluate` allows configuring the execution namespace with custom modules, and other keywords have [https://docs.python.org/2/library/os.html|os] and [https://docs.python.org/2/library/sys.html|sys] modules available automatically. Examples: | `Run Keyword If` | os.sep == '/' | Log | Not on Windows | | ${random int} = | `Evaluate` | random.randint(0, 5) | modules=random | When a variable is used in the expressing using the normal ``${variable}`` syntax, its value is replaces before the expression is evaluated. This means that the value used in the expression will be the string representation of the variable value, not the variable value itself. This is not a problem with numbers and other objects that have a string representation that can be evaluated directly, but with other objects the behavior depends on the string representation. Most importantly, strings must always be quoted, and if they can contain newlines, they must be triple quoted. Examples: | `Should Be True` | ${rc} < 10 | Return code greater than 10 | | `Run Keyword If` | '${status}' == 'PASS' | Log | Passed | | `Run Keyword If` | 'FAIL' in '''${output}''' | Log | Output contains FAIL | Starting from Robot Framework 2.9, variables themselves are automatically available in the evaluation namespace. They can be accessed using special variable syntax without the curly braces like ``$variable``. These variables should never be quoted, and in fact they are not even replaced inside strings. Examples: | `Should Be True` | $rc < 10 | Return code greater than 10 | | `Run Keyword If` | $status == 'PASS' | `Log` | Passed | | `Run Keyword If` | 'FAIL' in $output | `Log` | Output contains FAIL | | `Should Be True` | len($result) > 1 and $result[1] == 'OK' | Notice that instead of creating complicated expressions, it is often better to move the logic into a test library. = Boolean arguments = Some keywords accept arguments that are handled as Boolean values true or false. If such an argument is given as a string, it is considered false if it is either empty or case-insensitively equal to ``false`` or ``no``. Keywords verifying something that allow dropping actual and expected values from the possible error message also consider string ``no values`` as false. Other strings are considered true regardless their value, and other argument types are tested using same [http://docs.python.org/2/library/stdtypes.html#truth-value-testing|rules as in Python]. True examples: | `Should Be Equal` | ${x} | ${y} | Custom error | values=True | # Strings are generally true. | | `Should Be Equal` | ${x} | ${y} | Custom error | values=yes | # Same as the above. | | `Should Be Equal` | ${x} | ${y} | Custom error | values=${TRUE} | # Python ``True`` is true. | | `Should Be Equal` | ${x} | ${y} | Custom error | values=${42} | # Numbers other than 0 are true. | False examples: | `Should Be Equal` | ${x} | ${y} | Custom error | values=False | # String ``false`` is false. | | `Should Be Equal` | ${x} | ${y} | Custom error | values=no | # Also string ``no`` is false. | | `Should Be Equal` | ${x} | ${y} | Custom error | values=${EMPTY} | # Empty string is false. | | `Should Be Equal` | ${x} | ${y} | Custom error | values=${FALSE} | # Python ``False`` is false. | | `Should Be Equal` | ${x} | ${y} | Custom error | values=no values | # ``no values`` works with ``values`` argument | Note that prior to Robot Framework 2.9 some keywords considered all non-empty strings, including ``false`` and ``no``, to be true. = Multiline string comparisons = `Should Be Equal` and `Should Be Equal As Strings` report the failures using [https://en.wikipedia.org/wiki/Diff_utility#Unified_format|unified diff format] if both strings have more than two lines. New in Robot Framework 2.9.1. Example: | ${first} = | `Catenate` | SEPARATOR=\\n | Not in second | Same | Differs | Same | | ${second} = | `Catenate` | SEPARATOR=\\n | Same | Differs2 | Same | Not in first | | `Should Be Equal` | ${first} | ${second} | Results in the following error message: | Multiline strings are different: | --- first | +++ second | @@ -1,4 +1,4 @@ | -Not in second | Same | -Differs | +Differs2 | Same | +Not in first """ ROBOT_LIBRARY_SCOPE = 'GLOBAL' ROBOT_LIBRARY_VERSION = get_version() class RobotNotRunningError(AttributeError): """Used when something cannot be done because Robot is not running. Based on AttributeError to be backwards compatible with RF < 2.8.5. May later be based directly on Exception, so new code should except this exception explicitly. """ pass def register_run_keyword(library, keyword, args_to_process=None, deprecation_warning=True): """Registers 'run keyword' so that its arguments can be handled correctly. *NOTE:* This API will change in RF 3.1. For more information see https://github.com/robotframework/robotframework/issues/2190. Use with `deprecation_warning=False` to avoid related deprecation warnings. 1) Why is this method needed Keywords running other keywords internally (normally using `Run Keyword` or some variants of it in BuiltIn) must have the arguments meant to the internally executed keyword handled specially to prevent processing them twice. This is done ONLY for keywords registered using this method. If the register keyword has same name as any keyword from Robot Framework standard libraries, it can be used without getting warnings. Normally there is a warning in such cases unless the keyword is used in long format (e.g. MyLib.Keyword). Keywords executed by registered run keywords can be tested in dry-run mode if they have 'name' argument which takes the name of the executed keyword. 2) How to use this method `library` is the name of the library where the registered keyword is implemented. `keyword` can be either a function or method implementing the keyword, or name of the implemented keyword as a string. `args_to_process` is needed when `keyword` is given as a string, and it defines how many of the arguments to the registered keyword must be processed normally. When `keyword` is a method or function, this information is got directly from it so that varargs (those specified with syntax '*args') are not processed but others are. 3) Examples from robot.libraries.BuiltIn import BuiltIn, register_run_keyword def my_run_keyword(name, *args): # do something return BuiltIn().run_keyword(name, *args) # Either one of these works register_run_keyword(__name__, my_run_keyword) register_run_keyword(__name__, 'My Run Keyword', 1) ------------- from robot.libraries.BuiltIn import BuiltIn, register_run_keyword class MyLibrary: def my_run_keyword_if(self, expression, name, *args): # do something return BuiltIn().run_keyword_if(expression, name, *args) # Either one of these works register_run_keyword('MyLibrary', MyLibrary.my_run_keyword_if) register_run_keyword('MyLibrary', 'my_run_keyword_if', 2) """ RUN_KW_REGISTER.register_run_keyword(library, keyword, args_to_process, deprecation_warning)
jaloren/robotframework
src/robot/libraries/BuiltIn.py
Python
apache-2.0
146,969
#!/usr/bin/env python # Google Code Jam 2017. Round 1B # Problem B. Stable Neigh-bors # # * Problem # You are lucky enough to own N pet unicorns. Each of your unicorns has # either one or two of the following kinds of hairs in its mane: red hairs, # yellow hairs, and blue hairs. The color of a mane depends on exactly which # sorts of colored hairs it contains: # - A mane with only one color of hair appears to be that color. For example, # a mane with only blue hairs is blue. # - A mane with red and yellow hairs appears orange. # - A mane with yellow and blue hairs appears green. # - A mane with red and blue hairs appears violet. # You have R, O, Y, G, B, and V unicorns with red, orange, yellow, green, # blue, and violet manes, respectively. # You have just built a circular stable with N stalls, arranged in a ring # such that each stall borders two other stalls. You would like to put exactly # one of your unicorns in each of these stalls. However, unicorns need to feel # rare and special, so no unicorn can be next to another unicorn that shares at # least one of the hair colors in its mane. For example, a unicorn with an # orange mane cannot be next to a unicorn with a violet mane, since both of # those manes have red hairs. Similarly, a unicorn with a green mane cannot be # next to a unicorn with a yellow mane, since both of those have yellow hairs. # Is it possible to place all of your unicorns? If so, provide any one # arrangement. # # * Input # The first line of the input gives the number of test cases, T. T test cases # follow. Each consists of one line with seven integers: N, R, O, Y, G, B, # and V. # # * Output # For each test case, output one line containing Case #x: y, where x is the # test case number (starting from 1) and y is IMPOSSIBLE if it is not # possible to place all the unicorns, or a string of N characters # representing the placements of unicorns in stalls, starting at a point of # your choice and reading clockwise around the circle. Use R to represent # each unicorn with a red mane, O to represent each unicorn with an orange # mane, and so on with Y, G, B, and V. This arrangement must obey the rules # described in the statement above. # If multiple arrangements are possible, you may print any of them. # # * Limits # 1 ≤ T ≤ 100. # 3 ≤ N ≤ 1000. # R + O + Y + G + B + V = N. # 0 ≤ Z for each Z in {R, O, Y, G, B, V}. # Small dataset: O = G = V = 0. # (Each unicorn has only one hair color in its mane.) # Large dataset: No restrictions beyond the general limits. # (Each unicorn may have either one or two hair colors in its # mane.) # # * Sample # Input Output # 4 # 6 2 0 2 0 2 0 Case #1: RYBRBY # 3 1 0 2 0 0 0 Case #2: IMPOSSIBLE # 6 2 0 1 1 2 0 Case #3: YBRGRB # 4 0 0 2 0 0 2 Case #4: YVYV # Note that the last two sample cases would not appear in the Small dataset. # For sample case #1, there are many possible answers; for example, another # is BYBRYR. Note that BYRYRB would not be a valid answer; remember that the # stalls form a ring, and the first touches the last! # In sample case #2, there are only three stalls, and each stall is a # neighbor of the other two, so the two unicorns with yellow manes would # have to be neighbors, which is not allowed. # For sample case #3, note that arranging the unicorns in the same color # pattern as the Google logo (BRYBGR) would not be valid, since a unicorn # with a blue mane would be a neighbor of a unicorn with a green mane, # and both of those manes share blue hairs. # In sample case #4, no two unicorns with yellow manes can be neighbors, # and no two unicorns with violet manes can be neighbors. __author__ = "Krzysztof Kutt" __copyright__ = "Copyright 2017, Krzysztof Kutt" import sys import io def are_equal(elem1, elem2): if elem1 == elem2: return True if elem1 in ["O", "G", "V"] and elem2 in ["O", "G", "V"]: return True elems = [] for elem_ in [elem1, elem2]: if elem_ == "O": elems.append("R") elems.append("Y") elif elem_ == "G": elems.append("B") elems.append("Y") elif elem_ == "V": elems.append("B") elems.append("R") else: elems.append(elem_) elems.sort() for i in range(1, len(elems)): if elems[i-1] == elems[i]: return True return False def check_sequence(sequence): for i in range(len(sequence)-1): if are_equal(sequence[i], sequence[i+1]): return False if are_equal(sequence[-1], sequence[0]): return False return True def arrange_unicorns(count, unicorns): """ Check if arrangement for the set of unicorns is possible (accordingly to the specification) :param count: number of unicorns :param unicorns: list of [R, O, Y, G, B, V] :return: arrangement or "IMPOSSIBLE" if it is impossible to do this """ R, O, Y, G, B, V = unicorns RR = V + R + O YY = O + Y + G BB = G + B + V if RR > count/2 or YY > count/2 or BB > count/2: # if there are more that the half of unicorns in the one color, # there is no possibility to arrange them return "IMPOSSIBLE" letters = ["R", "O", "Y", "G", "B", "V"] # generate empty stable sequence = [""] * count # current stall current = 0 # start with the color that have the biggest number (to ensure that the # last won't be near the first one from this color) if RR > YY and RR > BB: index = 5 elif YY > RR and YY > BB: index = 1 elif BB > RR and BB > YY: index = 3 else: index = 0 for uni in range(1, len(unicorns)): if unicorns[uni] > unicorns[index]: index = uni while index not in [1, 3, 5]: index = index-1 if index-1 >= 0 else 5 # print("Index {} = {}".format(index, letters[index])) # set them every 2 places so the ones with the same color (even if they # have two colors!) won't be neighbours for _1 in range(len(unicorns)): for _2 in range(unicorns[index]): sequence[current] = letters[index] current = current+2 if current+2 < count else 1 index = index+1 if index+1 < len(unicorns) else 0 if not check_sequence(sequence): # print("{} {}".format(unicorns, count/2)) # FIXME It does not work! My solution sometimes generates bad # sequences (the first and the last one are the same; sometimes it is # moved, but the problem is always the same) return "IMPOSSIBLE" return "".join(sequence) if __name__ == '__main__': # FIXME Comment this line before sending to Google! sys.stdin = io.StringIO("".join(open("sample.in", "r").readlines())) t = int(input()) # read a line with a single integer for case in range(1, t + 1): unicorns_ = [int(s) for s in input().split(" ")] print("Case #{}: {}".format(case, arrange_unicorns(unicorns_[0], unicorns_[1:])))
kkutt/codejam
codejam_2017/round_1b/stableneighbors/StableNeighBors.py
Python
mit
7,160
########################################################### # # Copyright (c) 2005, Southpaw Technology # All Rights Reserved # # PROPRIETARY INFORMATION. This software is proprietary to # Southpaw Technology, and is not to be reproduced, transmitted, # or disclosed in any way without written permission. # # # __all__ = ['CsvExportWdg', 'CsvImportWdg'] import csv, os import string import datetime from pyasm.biz import CsvParser, File, Project from pyasm.search import Search, SObjectFactory, SearchType, SearchKey from pyasm.command import Command, FileUpload from pyasm.web import HtmlElement, SpanWdg, DivWdg, Table, WebContainer, Widget, FloatDivWdg from pyasm.widget import CheckboxWdg, IconSubmitWdg, HiddenRowToggleWdg, HiddenWdg, WidgetConfigView, ProdIconButtonWdg, TextWdg, TextAreaWdg, IconWdg, ProgressWdg, HintWdg, SelectWdg from pyasm.common import Common, Environment, TacticException from tactic.ui.common import BaseRefreshWdg from misc_input_wdg import SearchTypeSelectWdg from upload_wdg import SimpleUploadWdg from button_new_wdg import ActionButtonWdg from swap_display_wdg import SwapDisplayWdg class CsvExportWdg(BaseRefreshWdg): def get_args_keys(my): return {'search_type': 'Search Type', \ 'view': 'View of the search type', \ 'related_view': 'Related View of search type', 'mode': 'export mode',\ 'selected_search_keys': 'Selected Search Keys', 'search_class': 'Custom search class used', } def init(my): my.search_type = my.kwargs.get('search_type') # reconstruct the full search_type if it's base SType if my.search_type.find("?") == -1: project_code = Project.get_project_code() my.search_type = SearchType.build_search_type(my.search_type, project_code) my.view = my.kwargs.get('view') my.element_names = my.kwargs.get('element_names') my.related_view = my.kwargs.get('related_view') my.search_class = my.kwargs.get('search_class') my.search_view = my.kwargs.get('search_view') my.simple_search_view = my.kwargs.get('simple_search_view') my.mode = my.kwargs.get('mode') my.close_cbfn = my.kwargs.get('close_cbfn') my.input_search_keys = my.kwargs.get('selected_search_keys') my.selected_search_keys = [] my.error_msg = '' my.search_type_list = [] my.is_test = my.kwargs.get('test') == True my.table = None def check(my): if my.mode == 'export_matched': from tactic.ui.panel import TableLayoutWdg my.table = TableLayoutWdg(search_type=my.search_type, view=my.view,\ show_search_limit='false', search_limit=-1, search_view=my.search_view,\ search_class=my.search_class, simple_search_view=my.simple_search_view, init_load_num=-1) my.table.handle_search() search_objs = my.table.sobjects my.selected_search_keys = SearchKey.get_by_sobjects(search_objs, use_id=True) return True for sk in my.input_search_keys: st = SearchKey.extract_search_type(sk) if st not in my.search_type_list: my.search_type_list.append(st) id = SearchKey.extract_id(sk) if id == '-1': continue my.selected_search_keys.append(sk) if len(my.search_type_list) > 1: my.check_passed = False my.error_msg = 'More than 1 search type is selected. Please keep the selection to one type only.' return False if not my.search_type_list and my.mode == 'export_selected': my.check_passed = False my.error_msg = 'Search type cannot be identified. Please select a valid item.' return False return True def get_display(my): top = my.top top.add_color("background", "background") top.add_color("color", "color") top.add_style("padding: 10px") top.add_style("min-width: 400px") from tactic.ui.app import HelpButtonWdg help_wdg = HelpButtonWdg(alias="exporting-csv-data") top.add(help_wdg) help_wdg.add_style("float: right") help_wdg.add_style("margin-top: -3px") if not my.check(): top.add(DivWdg('Error: %s' %my.error_msg)) top.add(HtmlElement.br(2)) return super(CsvExportWdg, my).get_display() if my.search_type_list and my.search_type_list[0] != my.search_type: st = SearchType.get(my.search_type_list[0]) title_div =DivWdg('Exporting related items [%s]' % st.get_title()) top.add(title_div) top.add(HtmlElement.br()) my.search_type = my.search_type_list[0] my.view = my.related_view if my.mode != 'export_all': num = len(my.selected_search_keys) else: search = Search(my.search_type) num = search.get_count() msg_div = DivWdg('Total: %s items to export'% num) msg_div.add_style("font-size: 12px") msg_div.add_style("font-weight: bold") msg_div.add_style('margin-left: 4px') top.add(msg_div) if num > 300: msg_div.add_behavior({'type':'load', 'cbjs_action': "spt.alert('%s items are about to be exported. It may take a while.')" %num}) top.add(HtmlElement.br()) div = DivWdg(css='spt_csv_export', id='csv_export_action') div.add_color("background", "background", -10) div.add_style("padding: 10px") div.add_style("margin: 5px") div.add_styles('max-height: 350px; overflow: auto') table = Table( css='minimal') table.add_color("color", "color") div.add(table) table.set_id('csv_export_table') table.center() cb_name = 'csv_column_name' master_cb = CheckboxWdg('master_control') master_cb.set_checked() master_cb.add_behavior({'type': 'click_up', 'propagate_evt': True, 'cbjs_action': ''' var inputs = spt.api.Utility.get_inputs(bvr.src_el.getParent('.spt_csv_export'),'%s'); for (var i = 0; i < inputs.length; i++) inputs[i].checked = !inputs[i].checked; ''' %cb_name}) span = SpanWdg('Select Columns To Export') span.add_style('font-weight','600') table.add_row_cell(span) table.add_row_cell(HtmlElement.br()) tr = table.add_row() tr.add_style('border-bottom: 1px groove #777') td = table.add_cell(master_cb) label = HtmlElement.i('toggle all') label.add_style('color: #888') table.add_cell(label) col1 = table.add_col() col1.add_style('width: 35px') col2 = table.add_col() if not my.search_type or not my.view: return table # use overriding element names and derived titles if available config = WidgetConfigView.get_by_search_type(my.search_type, my.view) if my.element_names and config: filtered_columns = my.element_names titles = [] for name in my.element_names: title = config.get_element_title(name) titles.append(title) else: # excluding FunctionalTableElement filtered_columns = [] titles = [] if not config: columns = search.get_columns() filtered_columns = columns titles = ['n/a'] * len(filtered_columns) else: columns = config.get_element_names() filtered_columns = columns titles = config.get_element_titles() """ # commented out until it is decided 2.5 widgets will # use this class to differentiate between reg and functional element from pyasm.widget import FunctionalTableElement for column in columns: widget = config.get_display_widget(column) if isinstance(widget, FunctionalTableElement): continue filtered_columns.append(column) """ for idx, column in enumerate(filtered_columns): table.add_row() cb = CheckboxWdg(cb_name) cb.set_option('value', column) cb.set_checked() table.add_cell(cb) title = titles[idx] table.add_cell('<b>%s</b> (%s) '%(title, column)) action_div = DivWdg() widget = DivWdg() table.add_row_cell(widget) widget.add_style("margin: 20px 0 10px 0px") cb = CheckboxWdg('include_id', label=" Include ID") cb.set_default_checked() widget.add(cb) hint = HintWdg('To update entries with specific ID later, please check this option. For new inserts in this or other table later on, uncheck this option.') widget.add(hint) label = string.capwords(my.mode.replace('_', ' ')) button = ActionButtonWdg(title=label, size='l') is_export_all = my.mode == 'export_all' button.add_behavior({ 'type': "click_up", 'cbfn_action': 'spt.dg_table_action.csv_export', 'element': 'csv_export', 'column_names': 'csv_column_name', 'search_type': my.search_type, 'view': my.view, 'search_keys' : my.selected_search_keys, 'is_export_all' : is_export_all }) my.close_action = "var popup = bvr.src_el.getParent('.spt_popup');spt.popup.close(popup)" if my.close_action: close_button = ActionButtonWdg(title='Close') close_button.add_behavior({ 'type': "click", 'cbjs_action': my.close_action }) table = Table() action_div.add(table) table.center() table.add_row() td = table.add_cell(button) td.add_style("width: 130px") table.add_cell(close_button) action_div.add("<br clear='all'/>") top.add(div) top.add(HtmlElement.br()) top.add(action_div) if my.is_test: rtn_data = {'columns': my.element_names, 'count': len(my.selected_search_keys)} if my.mode == 'export_matched': rtn_data['sql'] = my.table.search_wdg.search.get_statement() from pyasm.common import jsondumps rtn_data = jsondumps(rtn_data) return rtn_data return top class CsvImportWdg(BaseRefreshWdg): def get_args_keys(my): return { 'search_type': 'Search Type to import'} def init(my): web = WebContainer.get_web() my.is_refresh = my.kwargs.get('is_refresh') my.search_type = my.kwargs.get('search_type') if not my.search_type: my.search_type = web.get_form_value('search_type_filter') my.close_cbfn = my.kwargs.get('close_cbfn') my.data = web.get_form_value("data") my.web_url = web.get_form_value("web_url") my.file_path = None if my.web_url: import urllib2 response = urllib2.urlopen(my.web_url) csv = response.read() my.file_path = "/tmp/test.csv" f = open(my.file_path, 'w') f.write(csv) f.close() if not my.file_path: my.file_path = web.get_form_value('file_path') if not my.file_path: ticket = web.get_form_value('html5_ticket') if not ticket: ticket = web.get_form_value('csv_import|ticket') file_name = web.get_form_value('file_name') if my.data: if not file_name: file_name = "%s.csv" % ticket my.file_path = '%s/%s' %(web.get_upload_dir(ticket=ticket), file_name) f = open(my.file_path, "wb") f.write(my.data) f.close() elif file_name: my.file_path = '%s/%s' %(web.get_upload_dir(ticket=ticket), file_name) def get_display(my): widget = DivWdg() if my.kwargs.get("is_refresh") == 'true': from tactic.ui.widget import TitleWdg title = TitleWdg(name_of_title='Import CSV',help_alias='importing-csv-data') widget.add(title) widget.add_style('padding: 10px') widget.add_style('font-size: 12px') #widget.add_border() widget.add_color('color','color') widget.add_color('background','background') widget.add_class("spt_import_top") inner = DivWdg() widget.add(inner) inner.add( my.get_first_row_wdg() ) inner.add(ProgressWdg()) if my.is_refresh: return inner else: return widget def get_upload_wdg(my): '''get search type select and upload wdg''' key = 'csv_import' widget = DivWdg(css='spt_import_csv') widget.add_color('color','color') widget.add_color('background','background') widget.add_style('width: 600px') # get the search type stype_div = DivWdg() widget.add(stype_div) # DEPRECATED # handle new search_types """ new_search_type = CheckboxWdg("new_search_type_checkbox") new_search_type.add_event("onclick", "toggle_display('new_search_type_div')") new_search_type_div = DivWdg() new_search_type_div.set_id("new_search_type_div") name_input = TextWdg("asset_name") title = TextWdg("asset_title") description = TextAreaWdg("asset_description") table = Table() table.set_id('csv_main_body') table.add_style("margin: 10px 10px") table.add_col().set_attr('width','140') table.add_col().set_attr('width','400') table.add_row() table.add_header("Search Type: ").set_attr('align','left') table.add_cell(name_input) table.add_row() table.add_header("Title: ").set_attr('align','left') table.add_cell(title) table.add_row() table.add_header("Description: ").set_attr('align','left') table.add_cell(description) new_search_type_div.add(table) new_search_type_div.add_style("display: none") #widget.add(new_search_type_div) """ show_stype_select = my.kwargs.get("show_stype_select") if show_stype_select in ['true',True] or not my.search_type: title = DivWdg("<b>Select sType to import data into:</b>&nbsp;&nbsp;") stype_div.add( title ) title.add_style("float: left") search_type_select = SearchTypeSelectWdg("search_type_filter", mode=SearchTypeSelectWdg.ALL) search_type_select.add_empty_option("-- Select --") if not search_type_select.get_value(): search_type_select.set_value(my.search_type) search_type_select.set_persist_on_submit() stype_div.add(search_type_select) search_type_select.add_behavior( {'type': 'change', \ 'cbjs_action': "spt.panel.load('csv_import_main','%s', {}, {\ 'search_type_filter': bvr.src_el.value});" %(Common.get_full_class_name(my)) } ) else: hidden = HiddenWdg("search_type_filter") stype_div.add(hidden) hidden.set_value(my.search_type) if my.search_type: sobj = None try: sobj = SObjectFactory.create(my.search_type) except ImportError: widget.add(HtmlElement.br()) widget.add(SpanWdg('WARNING: Import Error encountered. Please choose another search type.', css='warning')) return widget required_columns = sobj.get_required_columns() if required_columns: widget.add(HtmlElement.br()) req_span = SpanWdg("Required Columns: ", css='med') req_span.add_color('color','color') widget.add(req_span) #required_columns = ['n/a'] req_span.add(', '.join(required_columns)) widget.add( HtmlElement.br() ) if my.file_path: hidden = HiddenWdg("file_path", my.file_path) widget.add(hidden) if my.web_url: file_span = FloatDivWdg('URL: <i>%s</i>&nbsp;&nbsp;&nbsp;' %my.web_url, css='med') else: if not my.data: file_span = FloatDivWdg('File uploaded: <i>%s</i>&nbsp;&nbsp;&nbsp;' %os.path.basename(my.file_path), css='med') else: lines = len(my.data.split("\n")) file_span = FloatDivWdg("Uploaded [%s] lines of entries: &nbsp; " % lines) file_span.add_color('color','color') file_span.add_style('margin: 8px 0 0 10px') file_span.add_style('font-size: 14px') widget.add(file_span) button = ActionButtonWdg(title='Change') button.add_style('float','left') button.add_behavior( {'type': 'click_up', \ 'cbjs_action': "spt.panel.load('csv_import_main','%s', {}, {\ 'search_type_filter': '%s'});" %(Common.get_full_class_name(my), my.search_type) } ) widget.add(button) widget.add("<br clear='all'/>") widget.add(HtmlElement.br()) return widget widget.add_style("overflow-y: auto") msg = DivWdg() widget.add(msg) msg.add_border() msg.add_style("width: 500px") msg.add_color("background", "background3") msg.add_style("padding: 30px") msg.add_style("margin: 10 auto") #msg.add_style("text-align: center") msg.add( "<div style='float: left; padding-top: 6px; margin-right: 105px'><b>Upload a csv file: </b></div>") ticket = Environment.get_security().get_ticket_key() on_complete = '''var server = TacticServerStub.get(); var file = spt.html5upload.get_file(); if (file) { var file_name = file.name; // clean up the file name the way it is done in the server //file_name = spt.path.get_filesystem_name(file_name); var server = TacticServerStub.get(); var class_name = 'tactic.ui.widget.CsvImportWdg'; var values = spt.api.Utility.get_input_values('csv_import_main'); values['is_refresh'] = true; values['file_name'] = file_name; values['html5_ticket'] = '%s'; try { var info = spt.panel.load('csv_import_main', class_name, {}, values); spt.app_busy.hide(); } catch(e) { spt.alert(spt.exception.handler(e)); } } else { alert('Error: file object cannot be found.') } spt.app_busy.hide();'''%ticket from tactic.ui.input import UploadButtonWdg browse = UploadButtonWdg(name='new_csv_upload', title="Browse", tip="Click to choose a csv file",\ on_complete=on_complete, ticket=ticket) browse.add_style('float: left') msg.add(browse) # this is now only used in the copy and paste Upload button for backward-compatibility upload_wdg = SimpleUploadWdg(key=key, show_upload=False) upload_wdg.add_style('display: none') msg.add(upload_wdg) msg.add("<br/>") msg.add("<div style='margin: 30px; text-align: center'>-- OR --</div>") msg.add("<b>Published URL: </b><br/>") from tactic.ui.input import TextInputWdg text = TextInputWdg(name="web_url") text.add_style("width: 100%") msg.add(text) msg.add("<div style='margin: 30px; text-align: center'>-- OR --</div>") msg.add("<b>Copy and Paste from a Spreadsheet: </b><br/>") text = TextAreaWdg("data") text.add_style('width: 100%') text.add_style('height: 100px') text.add_class("spt_import_cut_paste") msg.add(text) msg.add("<br/>"*3) button = ActionButtonWdg(title="Parse") button.add_style("margin: 5px auto") msg.add(button) button.add_behavior( { 'type': 'click_up', 'cbjs_action': ''' var top = bvr.src_el.getParent(".spt_import_top"); var el = top.getElement(".spt_import_cut_paste"); var value = el.value; var csv = []; // convert to a csv file! lines = value.split("\\n"); for (var i = 0; i < lines.length; i++) { if (lines[i] == '') { continue; } var parts = lines[i].split("\\t"); var new_line = []; for (var j = 0; j < parts.length; j++) { if (parts[j] == '') { new_line.push(''); } else { new_line.push('"'+parts[j]+'"'); } } new_line = new_line.join(","); csv.push(new_line); } csv = csv.join("\\n") /* // FIXME: need to get a local temp directory var applet = spt.Applet.get(); var path = spt.browser.os_is_Windows() ? "C:/sthpw/copy_n_paste.csv" : "/tmp/sthpw/copy_n_paste.csv"; applet.create_file(path, csv); // upload the file applet.upload_file(path) applet.rmtree(path); var top = bvr.src_el.getParent(".spt_import_csv"); var hidden = top.getElement(".spt_upload_hidden"); hidden.value = path; var file_name = spt.path.get_basename(hidden.value); file_name = spt.path.get_filesystem_name(file_name); */ var class_name = 'tactic.ui.widget.CsvImportWdg'; var values = spt.api.Utility.get_input_values('csv_import_main'); values['is_refresh'] = true; //values['file_name'] = file_name; values['data'] = csv; var info = spt.panel.load('csv_import_main', class_name, {}, values); ''' } ) return widget def get_first_row_wdg(my): # read the csv file #my.file_path = "" div = DivWdg(id='csv_import_main') div.add_class('spt_panel') div.add( my.get_upload_wdg() ) if not my.search_type: return div if not my.file_path: return div if not my.file_path.endswith(".csv"): div.add('<br/>') div.add( "Uploaded file [%s] is not a csv file. Refreshing in 3 seconds. . ."% os.path.basename(my.file_path)) div.add_behavior( {'type': 'load', \ 'cbjs_action': "setTimeout(function() {spt.panel.load('csv_import_main','%s', {}, {\ 'search_type_filter': '%s'});}, 3000);" %(Common.get_full_class_name(my), my.search_type) } ) return div if not os.path.exists(my.file_path): raise TacticException("Path '%s' does not exist" % my.file_path) div.add(HtmlElement.br(2)) # NOT NEEDED: clear the widget settings before drawing #expr = "@SOBJECT(sthpw/wdg_settings['key','EQ','pyasm.widget.input_wdg.CheckboxWdg|column_enabled_']['login','$LOGIN']['project_code','$PROJECT'])" #sobjs = Search.eval(expr) #for sobj in sobjs: # sobj.delete(log=False) div.add( HtmlElement.b("The following is taken from the first line in the uploaded csv file. Select the appropriate column to match.") ) div.add(HtmlElement.br()) """ text = HtmlElement.b("Make sure you have all the required columns** in the csv.") text.add_style('text-align: left') div.add(text) """ div.add(HtmlElement.br(2)) option_div_top = DivWdg() option_div_top.add_color('color','color') option_div_top.add_color('background','background', -5) option_div_top.add_style("padding: 10px") option_div_top.add_border() option_div_top.add_style("width: auto") swap = SwapDisplayWdg(title="Parsing Options") option_div_top.add(swap) option_div_top.add_style("margin-right: 30px") my.search_type_obj = SearchType.get(my.search_type) option_div = DivWdg() swap.set_content_id(option_div.set_unique_id() ) option_div.add_style("display: none") option_div.add_style('margin-left: 14px') option_div.add_style('margin-top: 10px') option_div.add_style("font-weight: bold") option_div_top.add(option_div) # first row and second row #option_div.add( HtmlElement.br() ) option_div.add(SpanWdg("Use Title Row: ", css='med')) title_row_checkbox = CheckboxWdg("has_title") title_row_checkbox.set_default_checked() title_row_checkbox.add_behavior({'type' : 'click_up', 'propagate_evt': 'true', 'cbjs_action': "spt.panel.refresh('preview_data',\ spt.api.Utility.get_input_values('csv_import_main'))"}) option_div.add(title_row_checkbox) option_div.add( HintWdg("Set this to use the first row as a title row to match up columns in the database") ) option_div.add( HtmlElement.br(2) ) option_div.add(SpanWdg("Use Lowercase Title: ", css='med')) lower_title_checkbox = CheckboxWdg("lowercase_title") lower_title_checkbox.add_behavior({'type' : 'click_up', 'propagate_evt': 'true', 'cbjs_action': "spt.panel.refresh('preview_data',\ spt.api.Utility.get_input_values('csv_import_main'))"}) option_div.add(lower_title_checkbox) option_div.add( HtmlElement.br(2) ) option_div.add(SpanWdg("Sample Data Row: ", css='med')) data_row_text = SelectWdg("data_row") data_row_text.set_option('values', '1|2|3|4|5') data_row_text.set_value('1') data_row_text.add_behavior({'type' : 'change', 'cbjs_action': "spt.panel.refresh('preview_data',\ spt.api.Utility.get_input_values('csv_import_main'))"}) option_div.add(data_row_text) option_div.add( HintWdg("Set this as a sample data row for display here") ) option_div.add( HtmlElement.br(2) ) # encoder option_div.add(SpanWdg("Encoder: ", css='med')) select_wdg = SelectWdg('encoder') select_wdg.set_option('values', ['','utf-8', 'iso_8859-1']) select_wdg.set_option('labels', ['ASCII (default)','UTF-8','Excel ISO 8859-1']) select_wdg.add_behavior({'type' : 'change', 'cbjs_action': "spt.panel.refresh('preview_data',\ spt.api.Utility.get_input_values('csv_import_main'))"}) option_div.add(select_wdg) option_div.add( HtmlElement.br(2) ) option_div.add(SpanWdg("Identifying Column: ", css='med')) select_wdg = SelectWdg('id_col') select_wdg.set_option('empty','true') #columns = my.search_type_obj.get_columns() columns = SearchType.get_columns(my.search_type) # make sure it starts off with id, code where applicable if 'code' in columns: columns.remove('code') columns.insert(0, 'code') if 'id' in columns: columns.remove('id') columns.insert(0, 'id') select_wdg.set_option('values', columns) option_div.add(select_wdg) option_div.add( HintWdg("Set which column to use for identifying an item to update during CSV Import") ) option_div.add( HtmlElement.br(2) ) # triggers mode option_div.add(SpanWdg("Triggers: ", css='med')) select_wdg = SelectWdg('triggers_mode') select_wdg.set_option('values', ['','False', 'True', 'none']) select_wdg.set_option('labels', ['- Select -','Internal Triggers Only','All Triggers','No Triggers']) select_wdg.add_behavior({'type' : 'change', 'cbjs_action': "spt.panel.refresh('preview_data',\ spt.api.Utility.get_input_values('csv_import_main'))"}) option_div.add(select_wdg) option_div.add( HtmlElement.br(2) ) div.add(option_div_top) my.has_title = title_row_checkbox.is_checked() # need to somehow specify defaults for columns div.add(my.get_preview_wdg()) return div def get_preview_wdg(my): preview = PreviewDataWdg(file_path=my.file_path, search_type = my.search_type) return preview class PreviewDataWdg(BaseRefreshWdg): def init(my): my.is_refresh = my.kwargs.get('is_refresh') my.file_path = my.kwargs.get('file_path') my.search_type = my.kwargs.get('search_type') my.search_type_obj = SearchType.get(my.search_type) web = WebContainer.get_web() my.encoder = web.get_form_value('encoder') title_row_checkbox = CheckboxWdg("has_title") my.has_title = title_row_checkbox.is_checked() lowercase_title_checkbox = CheckboxWdg("lowercase_title") my.lowercase_title = lowercase_title_checkbox.is_checked() def get_column_preview(my, div): # parse the first fow csv_parser = CsvParser(my.file_path) if my.has_title: csv_parser.set_has_title_row(True) else: csv_parser.set_has_title_row(False) if my.lowercase_title: csv_parser.set_lowercase_title(True) if my.encoder: csv_parser.set_encoder(my.encoder) try: csv_parser.parse() # that can be all kinds of encoding/decoding exception except Exception, e: # possibly incompatible encoder selected, use the default instead. # Let the user pick it. span = SpanWdg('WARNING: The selected encoder is not compatible with your csv file. Please choose the proper one (e.g. UTF-8). Refer to the documentation/tutorial on how to save your csv file with UTF-8 encoding if you have special characters in it.', css='warning') div.add(SpanWdg(e.__str__())) div.add(HtmlElement.br()) div.add(span, 'warning') return div csv_titles = csv_parser.get_titles() # for 2nd guess of similar column titles processed_csv_titles = [x.replace(' ', '_').lower() for x in csv_titles] csv_data = csv_parser.get_data() web = WebContainer.get_web() data_row = web.get_form_value('data_row') if not csv_data: div.add(SpanWdg('Your csv file seems to be empty', css='warning')) return div if not data_row: data_row = 0 else: try: data_row = int(data_row) data_row -= 1 except ValueError: data_row = 0 if data_row >= len(csv_data): data_row = len(csv_data)-1 #data_row_text.set_value(data_row) div.add( IconWdg("Important", IconWdg.CREATE) ) div.add("Use the sample row to match which columns the data will be imported into TACTIC<br/><br/>") #table = Table(css='spt_csv_table') table = Table() table.add_color('background','background') table.add_color('color','color') table.add_style("width: 100%") table.set_attr("cellpadding", "7") table.add_border() table.add_row() cb = CheckboxWdg('csv_row') cb.set_default_checked() js = ''' var cbs = bvr.src_el.getParent('.spt_csv_table').getElements('.spt_csv_row'); for (i=0; i < cbs.length; i++){ if (!cbs[i].getAttribute('special')) cbs[i].checked = bvr.src_el.checked; }''' cb.add_behavior({'type': 'click_up', 'propagate_evt': True, 'cbjs_action': js}) th = table.add_header(cb) th.add_gradient("background", "background") th = table.add_header("CSV Column Value") th.add_gradient("background", "background") th.add_class('smaller') th = table.add_header("TACTIC Column") th.add_gradient("background", "background") th.add_class('smaller') th = table.add_header("Create New Column") th.add_style('min-width: 100px') th.add_gradient("background", "background") th.add_class('smaller') columns = SearchType.get_columns(my.search_type) sobj = SObjectFactory.create(my.search_type) required_columns = sobj.get_required_columns() row = csv_data[data_row] labels = [] my.num_columns = len(row) hidden = HiddenWdg("num_columns", my.num_columns) div.add(hidden) for column in columns: if column in required_columns: label = '%s**'%column else: label = column labels.append(label) columns.append("(note)") labels.append("(Note)") skipped_columns = [] new_col_indices = [] for j, cell in enumerate(row): # skip extra empty title if j >= len(csv_titles): skipped_columns.append(str(j)) continue column_select = SelectWdg("column_%s" % j) is_new_column = True use_processed = False # only set the value if it is actually in there if csv_titles[j] in columns: column_select.set_option("default", csv_titles[j]) is_new_column = False elif processed_csv_titles[j] in columns: column_select.set_option("default", processed_csv_titles[j]) is_new_column = False use_processed = True sel_val = column_select.get_value() table.add_row() cb = CheckboxWdg('column_enabled_%s' %j) cb.set_default_checked() #cb.set_persistence() cb.set_persist_on_submit() cb.add_class('spt_csv_row') # disable the id column by default if csv_titles[j] in columns and csv_titles[j] == 'id': cb.set_option('special','true') cb.add_behavior({'type':'click_up', #'propagate_evt': True, 'cbjs_action': '''spt.alert('The id column is not meant to be imported. It can only be chosen as an Identifying Column for update purpose.'); bvr.src_el.checked = false;'''}) else: # if it is not a new column, and column selected is empty, we don't select the checkbox by default if sel_val != '' or is_new_column or not my.is_refresh: cb.set_default_checked() table.add_cell(cb) td = table.add_cell(cell) td.add_style("padding: 3px") # this is an optimal max width td.add_style('max-width: 600px') column_select.add_behavior({'type': "change", 'cbjs_action': '''if (bvr.src_el.value !='') { set_display_off('new_column_div_%s'); } else { set_display_on('new_column_div_%s') }; spt.panel.refresh('preview_data', spt.api.Utility.get_input_values('csv_import_main')); '''% (j,j)}) column_select.add_empty_option("(New Column)") column_select.set_persist_on_submit() column_select.set_option("values", columns) column_select.set_option("labels", labels) display = column_select.get_buffer_display() td = table.add_cell( display ) if csv_titles[j] != 'id': if my.is_refresh: if sel_val != '': td.add_color('background','background2') else: if not is_new_column: td.add_color('background','background2') #if is_new_column: if True: # this star is not necessary, and could be misleading if one checks off Use TItle Row #td.add(" <b style='color: red'>*</b>") # new property new_column_div = DivWdg() if sel_val: new_column_div.add_style("display", "none") else: new_column_div.add_style("display", "block") new_column_div.set_id("new_column_div_%s" % j) td = table.add_cell( new_column_div ) if sel_val == '': td.add_color('background','background2') new_col_indices.append(j) text = TextWdg("new_column_%s" % j) text.add_style('border-color: #8DA832') text.set_persist_on_submit() if my.has_title: if use_processed: new_title = processed_csv_titles[j] else: new_title = csv_titles[j] text.set_value(new_title) # prefer to use bg color instead of OR to notify which one is used """ or_span = SpanWdg(" OR ", css='med') or_span.add_color('color','color') new_column_div.add(or_span) """ new_column_div.add( text ) if skipped_columns: div.add(SpanWdg('WARNING: Some titles are empty or there are too many data cells. Column index [%s] '\ 'are skipped.' %','.join(skipped_columns), css='warning')) div.add(HtmlElement.br(2)) div.add(table) # Analyze data. It will try to create a timestamp, then integer, then float, then varchar, then text column for idx in new_col_indices: column_types = {} data_cell_list = [] my.CHECK = 5 column_type = '' for k, row in enumerate(csv_data): if k >= len(row): data = '' else: data = row[idx] if data.strip() == '': continue if my.CHECK == 5: column_type = my._check_timestamp(data) if my.CHECK == 4: column_type = my._check_integer(data) if my.CHECK == 3: column_type = my._check_float(data) if my.CHECK == 2: column_type = my._check_varchar(data) # TEST: use democracy to determine type column_type = my._check_timestamp(data) if not column_type: column_type = my._check_integer(data) if not column_type: column_type = my._check_float(data) if not column_type: column_type = my._check_varchar(data) if column_types.get(column_type) == None: column_types[column_type] = 1 else: column_types[column_type] = column_types[column_type] + 1 # max 30 per analysis if k > 30: break largest = 0 for key, num in column_types.items(): if num > largest: column_type = key largest = num #table.add_cell(column_type) hidden = HiddenWdg('new_column_type_%s' %idx, value=column_type) div.add(hidden) def get_display(my): widget = DivWdg(id='preview_data') widget.add_style('padding: 6px') my.set_as_panel(widget) widget.add(SpanWdg(), 'warning') widget.add(HtmlElement.br(2)) my.get_column_preview(widget) web = WebContainer.get_web() csv_parser = CsvParser(my.file_path) if my.encoder: csv_parser.set_encoder(my.encoder) try: csv_parser.parse() # that can be all kinds of encoding/decoding exception except Exception, e: # possibly incompatible encoder selected, use the default instead. # Let the user pick it. span = SpanWdg('WARNING: The selected encoder is not compatible with your csv file. Please choose the proper one. Refer to the documentation/tutorial on how to save your csv file with UTF-8 encoding if you have special characters in it.', css='warning') widget.add(span, 'warning') return widget #csv_parser.set_encoder(None) #csv_parser.parse() csv_titles = csv_parser.get_titles() csv_data = csv_parser.get_data() columns = [] num_columns = len(csv_titles) for i in range(0, num_columns): column = web.get_form_value("column_%s" % i) if column: pass #column = csv_titles[i] else: column = web.get_form_value("new_column_%s" % i) columns.append(column) response_div = DivWdg(css='spt_cmd_response') #response_div.add_style('color','#F0C956') response_div.add_color('background','background3') response_div.add_color('color','color3') response_div.add_style('padding: 30px') response_div.add_style('display: none') widget.add(HtmlElement.br()) widget.add(response_div) widget.add(HtmlElement.br(2)) sobject_title = my.search_type_obj.get_title() div = DivWdg(css='spt_csv_sample') widget.add(div) h3 = DivWdg("Preview Data") #h3.add_border() h3.add_color('color','color') #h3.add_gradient('background','background', -5) h3.add("<hr style='dashed'/>") h3.add_style("padding: 5px") h3.add_style("font-weight: bold") h3.add_style("margin-left: -20px") h3.add_style("margin-right: -20px") div.add(h3) div.add("<br/>") refresh_button = ActionButtonWdg(title="Refresh") refresh_button.add_behavior({'type' : 'click_up', 'cbjs_action': "spt.panel.refresh('preview_data',\ spt.api.Utility.get_input_values('csv_import_main'))"}) refresh_button.add_style("float: left") div.add(refresh_button) import_button = ActionButtonWdg(title="Import") import_button.set_id('CsvImportButton') import_button.add_behavior({ 'type':'click_up', 'top_id':'csv_import_main', 'cbjs_action':''' //spt.dg_table_action.csv_import(bvr); var project = spt.Environment.get().get_project(); var my_search_type = bvr.search_type; var top_id = bvr.top_id; values = spt.api.Utility.get_input_values(top_id); var server = TacticServerStub.get(); var class_name = 'pyasm.command.CsvImportCmd'; var rtn = ''; var response_div = bvr.src_el.getParent('.spt_panel').getElement('.spt_cmd_response'); spt.app_busy.show("Importing Data"); var has_error = false; try { rtn = server.execute_cmd(class_name, {}, values); rtn.description = rtn.description.replace(/\\n/g,'<br/>'); response_div.innerHTML = rtn.description; var src_el = bvr.src_el; setTimeout(function() {spt.hide(bvr.src_el.getParent('.spt_panel').getElement('.spt_csv_sample'));}, 500); } catch (e) { var err_message = spt.exception.handler(e); spt.error(err_message); err_message = err_message.replace(/\\n/g,'<br/>'); response_div.innerHTML = 'Error: ' + err_message; response_div.setStyle("display", ""); has_error = true; } if (!has_error) { var popup = bvr.src_el.getParent(".spt_popup"); if (popup) { popup.destroy(); } } spt.app_busy.hide(); ''' }) import_button.add_style("float: left") div.add( import_button ) div.add(HtmlElement.br(clear='all')) div.add(HtmlElement.br(clear='all')) message_div = DivWdg("The following table will be imported into <b>%s</b> (Showing Max: 100)" % sobject_title) message_div.add_color('color','color') message_div.add_style('margin-left: 14px') div.add(message_div) widget.add(HtmlElement.br()) table_div = DivWdg() widget.add(table_div) table_div.add_style("max-width: 800px") table_div.add_style("overflow-x: auto") # draw the actual table of data table = Table() table_div.add(table) table.add_color('background','background') table.add_color('color','color') table.add_border() table.set_attr("cellpadding", "3") #table.add_attr('border','1') table.add_style("width: 100%") table.add_row() for i, title in enumerate(columns): if not title: title = "<b style='color:red'>*</b>" th = table.add_header(title) th.add_style("min-width: 100px") th.add_gradient("background", "background", -5) th.add_style("padding: 3px") th.add_style("text-align: left") for i, row in enumerate(csv_data): if i > 100: break tr = table.add_row() if i % 2: tr.add_color("background", "background") else: tr.add_color("background", "background3") for j, cell in enumerate(row): column_type = web.get_form_value("new_column_type_%s" % j) td = table.add_cell(cell) if column_type == 'timestamp' and not my._check_timestamp(cell): td.add_style("color: red") if column_type == 'integer' and not my._check_integer(cell): td.add_style("color: red") if column_type == 'float' and not my._check_float(cell): td.add_style("color: red") return widget # ensure this is not a partial date, which should be treated as a regular integer def _parse_date(my, dt_str): from dateutil import parser dt = parser.parse(dt_str, default=datetime.datetime(1900, 1, 1)).date() dt2 = parser.parse(dt_str, default=datetime.datetime(1901, 2, 2)).date() if dt == dt2: return dt else: return None def _check_varchar(my, data): column_type = None if len(data) <= 256: column_type = 'varchar(256)' else: my.CHECK = 1 column_type = 'text' return column_type def _check_integer(my, data): column_type = None try: int(data) column_type = 'integer' except ValueError, e: my.CHECK = 3 return column_type def _check_float(my, data): column_type = None try: float(data) column_type = 'float' except ValueError, e: my.CHECK = 2 return column_type def _check_timestamp(my, data): column_type = None try: timestamp = my._parse_date(data) if timestamp: column_type = 'timestamp' else: # if it is just some number instead of a real date or timestamp column_type = my._check_integer(data) if column_type: my.CHECK = 4 else: my.CHECK = 3 except Exception, e: my.CHECK = 4 return column_type
talha81/TACTIC-DEV
src/tactic/ui/widget/data_export_wdg.py
Python
epl-1.0
50,022
q = int(input()) for _ in range(q): n = int(input()) a = list(map(int, input().split())) a.sort() a2 = [a[i] for i in range(0, len(a), 2)] a3 = [a[i+1] for i in range(0, len(a), 2)] if a2 != a3: print("NO") continue a3.reverse() areas = [ai * aj for ai, aj in zip(a2, a3)] if areas == [areas[0]] * 2 * n: print("YES") else: print("NO")
mathemage/CompetitiveProgramming
codeforces/div3/1203/B/B.py
Python
mit
410
# Copyright FuseSoC contributors # Licensed under the 2-Clause BSD License, see LICENSE for details. # SPDX-License-Identifier: BSD-2-Clause import pytest def test_deptree(tmp_path): import os from fusesoc.config import Config from fusesoc.coremanager import CoreManager from fusesoc.edalizer import Edalizer from fusesoc.librarymanager import Library from fusesoc.vlnv import Vlnv flags = {"tool": "icarus"} tests_dir = os.path.dirname(__file__) deptree_cores_dir = os.path.join(tests_dir, "capi2_cores", "deptree") lib = Library("deptree", deptree_cores_dir) cm = CoreManager(Config()) cm.add_library(lib) root_core = cm.get_core(Vlnv("::deptree-root")) # This is an array of (child, parent) core name tuples and # is used for checking that the flattened list of core # names is consistent with the dependencies. dependencies = ( # Dependencies of the root core ("::deptree-child3:0", "::deptree-root:0"), ("::deptree-child2:0", "::deptree-root:0"), ("::deptree-child1:0", "::deptree-root:0"), ("::deptree-child-a:0", "::deptree-root:0"), # Dependencies of child1 core ("::deptree-child3:0", "::deptree-child1:0"), # Dependencies of child-a core ("::deptree-child4:0", "::deptree-child-a:0"), ) # The ordered files that we expect from each core. expected_core_files = { "::deptree-child3:0": ( "child3-fs1-f1.sv", "child3-fs1-f2.sv", ), "::deptree-child2:0": ( "child2-fs1-f1.sv", "child2-fs1-f2.sv", ), "::deptree-child1:0": ( "child1-fs1-f1.sv", "child1-fs1-f2.sv", ), "::deptree-child4:0": ("child4.sv",), "::deptree-child-a:0": ( # Files from filesets are always included before any # files from generators with "position: append". # This is because generated files are often dependent on files # that are not generated, and it convenient to be able to # include them in the same core. "child-a2.sv", "generated-child-a.sv", "generated-child-a-append.sv", ), "::deptree-root:0": ( "root-fs1-f1.sv", "root-fs1-f2.sv", "root-fs2-f1.sv", "root-fs2-f2.sv", ), } # Use Edalizer to get the files. # This is necessary because we need to run generators. work_root = str(tmp_path / "work") os.mkdir(work_root) edalizer = Edalizer( toplevel=root_core.name, flags=flags, work_root=work_root, core_manager=cm, ) edam = edalizer.run() # Check dependency tree (after running all generators) deps = cm.get_depends(root_core.name, {}) deps_names = [str(c) for c in deps] all_core_names = set() for child, parent in dependencies: assert child in deps_names assert parent in deps_names all_core_names.add(child) all_core_names.add(parent) # Confirm that we don't have any extra or missing core names. assert all_core_names == set(deps_names) # Make sure there are no repeats in deps_names assert len(all_core_names) == len(deps_names) # Now work out what order we expect to get the filenames. # The order of filenames within each core in deterministic. # Each fileset in order. Followed by each generator in order. # The order between the cores is taken the above `dep_names`. expected_filenames = [] # A generator-created core with "position: first" expected_filenames.append("generated-child-a-first.sv") for dep_name in deps_names: expected_filenames += list(expected_core_files[dep_name]) # A generator-created core with "position: last" expected_filenames.append("generated-child-a-last.sv") edalized_filenames = [os.path.basename(f["name"]) for f in edam["files"]] assert edalized_filenames == expected_filenames def test_copyto(): import os import tempfile from fusesoc.config import Config from fusesoc.coremanager import CoreManager from fusesoc.edalizer import Edalizer from fusesoc.librarymanager import Library from fusesoc.vlnv import Vlnv flags = {"tool": "icarus"} work_root = tempfile.mkdtemp(prefix="copyto_") core_dir = os.path.join(os.path.dirname(__file__), "cores", "misc", "copytocore") lib = Library("misc", core_dir) cm = CoreManager(Config()) cm.add_library(lib) core = cm.get_core(Vlnv("::copytocore")) edalizer = Edalizer( toplevel=core.name, flags=flags, core_manager=cm, work_root=work_root, export_root=None, system_name=None, ) edam = edalizer.run() assert edam["files"] == [ { "file_type": "user", "core": "::copytocore:0", "name": "copied.file", }, { "file_type": "tclSource", "core": "::copytocore:0", "name": "subdir/another.file", }, ] assert os.path.exists(os.path.join(work_root, "copied.file")) assert os.path.exists(os.path.join(work_root, "subdir", "another.file")) def test_export(): import os import tempfile from fusesoc.config import Config from fusesoc.coremanager import CoreManager from fusesoc.edalizer import Edalizer from fusesoc.librarymanager import Library from fusesoc.vlnv import Vlnv flags = {"tool": "icarus"} build_root = tempfile.mkdtemp(prefix="export_") export_root = os.path.join(build_root, "exported_files") work_root = os.path.join(build_root, "work") core_dir = os.path.join(os.path.dirname(__file__), "cores") cm = CoreManager(Config()) cm.add_library(Library("cores", core_dir)) core = cm.get_core(Vlnv("::wb_intercon")) edalizer = Edalizer( toplevel=core.name, flags=flags, core_manager=cm, work_root=work_root, export_root=export_root, system_name=None, ) edalizer.run() for f in [ "wb_intercon_1.0/dummy_icarus.v", "wb_intercon_1.0/bench/wb_mux_tb.v", "wb_intercon_1.0/bench/wb_upsizer_tb.v", "wb_intercon_1.0/bench/wb_intercon_tb.v", "wb_intercon_1.0/bench/wb_arbiter_tb.v", "wb_intercon_1.0/rtl/verilog/wb_data_resize.v", "wb_intercon_1.0/rtl/verilog/wb_mux.v", "wb_intercon_1.0/rtl/verilog/wb_arbiter.v", "wb_intercon_1.0/rtl/verilog/wb_upsizer.v", ]: assert os.path.isfile(os.path.join(export_root, f))
olofk/fusesoc
tests/test_coremanager.py
Python
bsd-2-clause
6,691
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import functools from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from azure.core.tracing.decorator_async import distributed_trace_async from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models from ..._vendor import _convert_request from ...operations._managed_instance_long_term_retention_policies_operations import build_create_or_update_request_initial, build_get_request, build_list_by_database_request T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class ManagedInstanceLongTermRetentionPoliciesOperations: """ManagedInstanceLongTermRetentionPoliciesOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.sql.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config @distributed_trace_async async def get( self, resource_group_name: str, managed_instance_name: str, database_name: str, policy_name: Union[str, "_models.ManagedInstanceLongTermRetentionPolicyName"], **kwargs: Any ) -> "_models.ManagedInstanceLongTermRetentionPolicy": """Gets a managed database's long term retention policy. :param resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. :type resource_group_name: str :param managed_instance_name: The name of the managed instance. :type managed_instance_name: str :param database_name: The name of the database. :type database_name: str :param policy_name: The policy name. Should always be Default. :type policy_name: str or ~azure.mgmt.sql.models.ManagedInstanceLongTermRetentionPolicyName :keyword callable cls: A custom type or function that will be passed the direct response :return: ManagedInstanceLongTermRetentionPolicy, or the result of cls(response) :rtype: ~azure.mgmt.sql.models.ManagedInstanceLongTermRetentionPolicy :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedInstanceLongTermRetentionPolicy"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_get_request( resource_group_name=resource_group_name, managed_instance_name=managed_instance_name, database_name=database_name, policy_name=policy_name, subscription_id=self._config.subscription_id, template_url=self.get.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('ManagedInstanceLongTermRetentionPolicy', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/backupLongTermRetentionPolicies/{policyName}'} # type: ignore async def _create_or_update_initial( self, resource_group_name: str, managed_instance_name: str, database_name: str, policy_name: Union[str, "_models.ManagedInstanceLongTermRetentionPolicyName"], parameters: "_models.ManagedInstanceLongTermRetentionPolicy", **kwargs: Any ) -> Optional["_models.ManagedInstanceLongTermRetentionPolicy"]: cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ManagedInstanceLongTermRetentionPolicy"]] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] _json = self._serialize.body(parameters, 'ManagedInstanceLongTermRetentionPolicy') request = build_create_or_update_request_initial( resource_group_name=resource_group_name, managed_instance_name=managed_instance_name, database_name=database_name, policy_name=policy_name, subscription_id=self._config.subscription_id, content_type=content_type, json=_json, template_url=self._create_or_update_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = None if response.status_code == 200: deserialized = self._deserialize('ManagedInstanceLongTermRetentionPolicy', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/backupLongTermRetentionPolicies/{policyName}'} # type: ignore @distributed_trace_async async def begin_create_or_update( self, resource_group_name: str, managed_instance_name: str, database_name: str, policy_name: Union[str, "_models.ManagedInstanceLongTermRetentionPolicyName"], parameters: "_models.ManagedInstanceLongTermRetentionPolicy", **kwargs: Any ) -> AsyncLROPoller["_models.ManagedInstanceLongTermRetentionPolicy"]: """Sets a managed database's long term retention policy. :param resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. :type resource_group_name: str :param managed_instance_name: The name of the managed instance. :type managed_instance_name: str :param database_name: The name of the database. :type database_name: str :param policy_name: The policy name. Should always be Default. :type policy_name: str or ~azure.mgmt.sql.models.ManagedInstanceLongTermRetentionPolicyName :param parameters: The long term retention policy info. :type parameters: ~azure.mgmt.sql.models.ManagedInstanceLongTermRetentionPolicy :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either ManagedInstanceLongTermRetentionPolicy or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.sql.models.ManagedInstanceLongTermRetentionPolicy] :raises: ~azure.core.exceptions.HttpResponseError """ content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedInstanceLongTermRetentionPolicy"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._create_or_update_initial( resource_group_name=resource_group_name, managed_instance_name=managed_instance_name, database_name=database_name, policy_name=policy_name, parameters=parameters, content_type=content_type, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('ManagedInstanceLongTermRetentionPolicy', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/backupLongTermRetentionPolicies/{policyName}'} # type: ignore @distributed_trace def list_by_database( self, resource_group_name: str, managed_instance_name: str, database_name: str, **kwargs: Any ) -> AsyncIterable["_models.ManagedInstanceLongTermRetentionPolicyListResult"]: """Gets a database's long term retention policy. :param resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. :type resource_group_name: str :param managed_instance_name: The name of the managed instance. :type managed_instance_name: str :param database_name: The name of the database. :type database_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ManagedInstanceLongTermRetentionPolicyListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.sql.models.ManagedInstanceLongTermRetentionPolicyListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedInstanceLongTermRetentionPolicyListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_by_database_request( resource_group_name=resource_group_name, managed_instance_name=managed_instance_name, database_name=database_name, subscription_id=self._config.subscription_id, template_url=self.list_by_database.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_by_database_request( resource_group_name=resource_group_name, managed_instance_name=managed_instance_name, database_name=database_name, subscription_id=self._config.subscription_id, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request async def extract_data(pipeline_response): deserialized = self._deserialize("ManagedInstanceLongTermRetentionPolicyListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_by_database.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/backupLongTermRetentionPolicies'} # type: ignore
Azure/azure-sdk-for-python
sdk/sql/azure-mgmt-sql/azure/mgmt/sql/aio/operations/_managed_instance_long_term_retention_policies_operations.py
Python
mit
16,070
from __future__ import division, unicode_literals from ml.similarity import pre_process, TfidfCluster from textblob import TextBlob as tb import nltk, re, pprint import nltk.chunk from nltk.corpus import twitter_samples from nltk import tag from nltk.corpus import wordnet from nltk.corpus.reader.wordnet import POS_LIST import math from stemming.porter2 import stem from nltk import ngrams as _ngrams import json from nltk.corpus import stopwords from linguistic import pattern class TestNLTK(object): def setup(self): pass def teardown(self): pass def test_named_entity_extraction(self): data_problem = [ """ If you are navigating through the list of open tabs inside the All+Tabs panel and you wanna filter by a term you have to select the search text field first. It would be nice if any entered character is automatically routed to the search field and the filter gets applied. """, """ In maximized mode there is something like 3 pixels padding on the right side of "All tabs" panel. It doesn't exist on the left side of panel and in not maximized mode. """, """ When you have the All+Tabs panel open it would be great if you can press Cmd/Ctrl+F to focus the search text field. Right now the panel gets hidden and the Find toolbar is shown without focus. IMO using the command inside the All+Tabs panel would make more sense. """, """ Steps to reproduce: Nothing... had multiple windows and tiles open... for about 4 hours Actual results: Crashed without warning """, """ Firefox crashes at leat 6 times a day. Installed latest version but still crashing. Goes very slow before it crashes. """, """ Steps to reproduce: W have installed Firefox 18 (as we did with all previous version) on Solaris 10 SPAC 64b Actual results: When we tried to start it form a console, it crashed with a message: Segmentation fault. And it produced a core dump Expected results: Firefox should have open correctly """, """ screen shots: Aurora 7.0a2 top, FF 3.6.18 bottom User Agent: Mozilla/5.0 (X11; Linux i686 on x86_64; rv:7.0a2) Gecko/20110709 Firefox/7.0a2 Build ID: 20110709042004 Steps to reproduce: Start Aurora with a new Profile just to be sure Open Menu Preferences Select Advanced | Encryption , View Certificates Select tab certificates, scroll down to Thawte Select first Thawte Certificate, click "view..." button (Actually the problem is not specific to this single certificate. I tried many more built-in root certificates and they all had the same issue.) Actual results: Certificate Viewer opens, it says "could not verify this certificate for unknown reasons" Expected results: Certificate Viewer opens, it should say "this certificates has been verified for the following uses" and a list of uses. """, """ I agree with the utility of this feature it's just going to take some serious work. This is a pretty hacky area of the code that is screaming for a rewrite. I'll slate this for 1.1 """ ] data_analyses = [ """"All Tabs" panel code removed in bug 670684""", """"All Tabs" panel code removed in bug 670684""", """"All Tabs" panel code removed in bug 670684""", """ Please provide the crash ID from about:crashes. Are you able to reproduce it consistently? """, """Please post the related Report IDs from about:crashes.""", """ Do you see crash report IDs in about:crashes? """, """ I agree with the utility of this feature it's just going to take some serious work. This is a pretty hacky area of the code that is screaming for a rewrite. I'll slate this for 1.1 """, """ This is caused by bug 479393: We are expecting that CERT_VerifyCertificateNow will return SECSuccess if at least one of the given usages is valid. However, it actually returns SECFailure unless all of the given usages are valid. I remember that Kai's original patch was correctly ignoring the return value of CERT_VerifyCertificateNow, but he "corrected" it when I told him we should check the return value. My bad. :( The fix is simple: restore the error detection logic to the way it was done before: """ ] # problems = data_problem[3:6] # analyses = data_analyses[3:6] problems = data_problem[0:3] analyses = data_analyses[0:3] lp = pattern.LinguisticPattern(problems, analyses) lp.named_entities(labels=['NP']) result = lp.find_ngrams_patterns() for p in result: print p # from nltk.util import ngrams # from nltk.corpus import gutenberg # # gut_ngrams = (ngram for sent in gutenberg.sents() for ngram in # ngrams(sent, 3, pad_left=True, pad_right=True, right_pad_symbol='EOS', left_pad_symbol="BOS")) # freq_dist = nltk.FreqDist(gut_ngrams) # kneser_ney = nltk.KneserNeyProbDist(freq_dist) # # prob_sum = 0 # for i in kneser_ney.samples(): # if i[0] == "I" and i[1] == "confess": # prob_sum += kneser_ney.prob(i) # print "{0}:{1}".format(i, kneser_ney.prob(i)) # print prob_sum # def find_ngram_to(n, entity, problem): # # ngrams = _ngrams(problem.lower().split(), n) # result = [gram for gram in ngrams if entity in gram] # return result # # # # # def find_ngrams(entity, problems): # result = {} # for x in xrange(5, 6): # result[x] = {} # for i, problem in enumerate(problems): # key = "p" + str(i + 1) # result[x][key] = find_ngram_to(x, entity, problem) # # break # # return result # # # # # # # # # def tf(word, blob): # return blob.words.count(word) / len(blob.words) # # # def n_containing(word, bloblist): # return sum(1 for blob in bloblist if word in blob.words) # # # def idf(word, bloblist): # y = (1 + n_containing(word, bloblist)) # x = math.log(len(bloblist) / y) # return x # # # def tfidf(word, blob, bloblist): # _tf = tf(word, blob) # _idf = idf(word, bloblist) # # return _tf * _idf # # # def tag_override(sent): # force_tags = {'crashes': 'VBZ'} # tagged_words = nltk.pos_tag(sent) # new_tagged_words = [(word, force_tags.get(word, tag)) for word, tag in tagged_words] # return new_tagged_words # # # def get_pos(tag): # if tag: # s = str(tag).lower()[0] # if s in POS_LIST: # return s # # return None # # # def transform(tree): # return (tree.leaves(), tree.label()) # # # def disambiguity(word, tag): # words = wordnet.synsets(word, get_pos(tag)) # if words: # value = words[0] # return value._lemma_names[0] # else: # return word # clusters = [ # { # "id": 1, # "problems": data_problem[0:3], # "analyses": data_analyses[0:3] # }, # { # "id": 2, # "problems": data_problem[3:6], # "analyses": data_analyses[3:6] # }, # { # "id": 3, # "problems": data_problem[6:7], # "analyses": data_analyses[6:7] # }, # { # "id": 4, # "problems": data_problem[7:8], # "analyses": data_analyses[7:8] # }, # ]
marquesarthur/BugAnalysisRecommender
dataset/tests/test_nltk_ne_verbs.py
Python
mit
8,055
# -*- coding: utf-8 -*- # Copyright (c) 2017, Frappe Technologies and Contributors # See license.txt from __future__ import unicode_literals import unittest class TestGoogleMapsSettings(unittest.TestCase): pass
ESS-LLP/frappe
frappe/integrations/doctype/google_maps_settings/test_google_maps_settings.py
Python
mit
213
# Author: Alexandre Gramfort <[email protected]> # Fabian Pedregosa <[email protected]> # # License: BSD 3 clause import numpy as np from scipy import sparse import warnings from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.linear_model.base import LinearRegression from sklearn.linear_model.base import center_data, sparse_center_data from sklearn.utils import check_random_state from sklearn.datasets.samples_generator import make_sparse_uncorrelated from sklearn.datasets.samples_generator import make_regression def test_linear_regression(): # Test LinearRegression on a simple dataset. # a simple dataset X = [[1], [2]] Y = [1, 2] clf = LinearRegression() clf.fit(X, Y) assert_array_almost_equal(clf.coef_, [1]) assert_array_almost_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.predict(X), [1, 2]) # test it also for degenerate input X = [[1]] Y = [0] clf = LinearRegression() clf.fit(X, Y) assert_array_almost_equal(clf.coef_, [0]) assert_array_almost_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.predict(X), [0]) def test_linear_regression_n_jobs(): """ Test for the n_jobs parameter on the fit method and the constructor """ X = [[1], [2]] Y = [1, 2] clf = LinearRegression() with warnings.catch_warnings(record=True): clf_fit = clf.fit(X, Y, 4) assert_equal(clf_fit.n_jobs, clf.n_jobs) assert_equal(clf.n_jobs, 1) def test_fit_intercept(): # Test assertions on betas shape. X2 = np.array([[0.38349978, 0.61650022], [0.58853682, 0.41146318]]) X3 = np.array([[0.27677969, 0.70693172, 0.01628859], [0.08385139, 0.20692515, 0.70922346]]) y = np.array([1, 1]) lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y) lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y) lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y) lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y) assert_equal(lr2_with_intercept.coef_.shape, lr2_without_intercept.coef_.shape) assert_equal(lr3_with_intercept.coef_.shape, lr3_without_intercept.coef_.shape) assert_equal(lr2_without_intercept.coef_.ndim, lr3_without_intercept.coef_.ndim) def test_linear_regression_sparse(random_state=0): "Test that linear regression also works with sparse data" random_state = check_random_state(random_state) for i in range(10): n = 100 X = sparse.eye(n, n) beta = random_state.rand(n) y = X * beta[:, np.newaxis] ols = LinearRegression() ols.fit(X, y.ravel()) assert_array_almost_equal(beta, ols.coef_ + ols.intercept_) assert_array_almost_equal(ols.residues_, 0) def test_linear_regression_multiple_outcome(random_state=0): "Test multiple-outcome linear regressions" X, y = make_regression(random_state=random_state) Y = np.vstack((y, y)).T n_features = X.shape[1] clf = LinearRegression(fit_intercept=True) clf.fit((X), Y) assert_equal(clf.coef_.shape, (2, n_features)) Y_pred = clf.predict(X) clf.fit(X, y) y_pred = clf.predict(X) assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3) def test_linear_regression_sparse_multiple_outcome(random_state=0): "Test multiple-outcome linear regressions with sparse data" random_state = check_random_state(random_state) X, y = make_sparse_uncorrelated(random_state=random_state) X = sparse.coo_matrix(X) Y = np.vstack((y, y)).T n_features = X.shape[1] ols = LinearRegression() ols.fit(X, Y) assert_equal(ols.coef_.shape, (2, n_features)) Y_pred = ols.predict(X) ols.fit(X, y.ravel()) y_pred = ols.predict(X) assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3) def test_center_data(): n_samples = 200 n_features = 2 rng = check_random_state(0) X = rng.rand(n_samples, n_features) y = rng.rand(n_samples) expected_X_mean = np.mean(X, axis=0) # XXX: currently scaled to variance=n_samples expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0]) expected_y_mean = np.mean(y, axis=0) Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False, normalize=False) assert_array_almost_equal(X_mean, np.zeros(n_features)) assert_array_almost_equal(y_mean, 0) assert_array_almost_equal(X_std, np.ones(n_features)) assert_array_almost_equal(Xt, X) assert_array_almost_equal(yt, y) Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True, normalize=False) assert_array_almost_equal(X_mean, expected_X_mean) assert_array_almost_equal(y_mean, expected_y_mean) assert_array_almost_equal(X_std, np.ones(n_features)) assert_array_almost_equal(Xt, X - expected_X_mean) assert_array_almost_equal(yt, y - expected_y_mean) Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True, normalize=True) assert_array_almost_equal(X_mean, expected_X_mean) assert_array_almost_equal(y_mean, expected_y_mean) assert_array_almost_equal(X_std, expected_X_std) assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std) assert_array_almost_equal(yt, y - expected_y_mean) def test_center_data_multioutput(): n_samples = 200 n_features = 3 n_outputs = 2 rng = check_random_state(0) X = rng.rand(n_samples, n_features) y = rng.rand(n_samples, n_outputs) expected_y_mean = np.mean(y, axis=0) args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))] for center, X in args: _, yt, _, y_mean, _ = center(X, y, fit_intercept=False, normalize=False) assert_array_almost_equal(y_mean, np.zeros(n_outputs)) assert_array_almost_equal(yt, y) _, yt, _, y_mean, _ = center(X, y, fit_intercept=True, normalize=False) assert_array_almost_equal(y_mean, expected_y_mean) assert_array_almost_equal(yt, y - y_mean) _, yt, _, y_mean, _ = center(X, y, fit_intercept=True, normalize=True) assert_array_almost_equal(y_mean, expected_y_mean) assert_array_almost_equal(yt, y - y_mean) def test_center_data_weighted(): n_samples = 200 n_features = 2 rng = check_random_state(0) X = rng.rand(n_samples, n_features) y = rng.rand(n_samples) sample_weight = rng.rand(n_samples) expected_X_mean = np.average(X, axis=0, weights=sample_weight) expected_y_mean = np.average(y, axis=0, weights=sample_weight) # XXX: if normalize=True, should we expect a weighted standard deviation? # Currently not weighted, but calculated with respect to weighted mean # XXX: currently scaled to variance=n_samples expected_X_std = (np.sqrt(X.shape[0]) * np.mean((X - expected_X_mean) ** 2, axis=0) ** .5) Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True, normalize=False, sample_weight=sample_weight) assert_array_almost_equal(X_mean, expected_X_mean) assert_array_almost_equal(y_mean, expected_y_mean) assert_array_almost_equal(X_std, np.ones(n_features)) assert_array_almost_equal(Xt, X - expected_X_mean) assert_array_almost_equal(yt, y - expected_y_mean) Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True, normalize=True, sample_weight=sample_weight) assert_array_almost_equal(X_mean, expected_X_mean) assert_array_almost_equal(y_mean, expected_y_mean) assert_array_almost_equal(X_std, expected_X_std) assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std) assert_array_almost_equal(yt, y - expected_y_mean) def test_sparse_center_data(): n_samples = 200 n_features = 2 rng = check_random_state(0) # random_state not supported yet in sparse.rand X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng X = X.tolil() y = rng.rand(n_samples) XA = X.toarray() # XXX: currently scaled to variance=n_samples expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0]) Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y, fit_intercept=False, normalize=False) assert_array_almost_equal(X_mean, np.zeros(n_features)) assert_array_almost_equal(y_mean, 0) assert_array_almost_equal(X_std, np.ones(n_features)) assert_array_almost_equal(Xt.A, XA) assert_array_almost_equal(yt, y) Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y, fit_intercept=True, normalize=False) assert_array_almost_equal(X_mean, np.mean(XA, axis=0)) assert_array_almost_equal(y_mean, np.mean(y, axis=0)) assert_array_almost_equal(X_std, np.ones(n_features)) assert_array_almost_equal(Xt.A, XA) assert_array_almost_equal(yt, y - np.mean(y, axis=0)) Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y, fit_intercept=True, normalize=True) assert_array_almost_equal(X_mean, np.mean(XA, axis=0)) assert_array_almost_equal(y_mean, np.mean(y, axis=0)) assert_array_almost_equal(X_std, expected_X_std) assert_array_almost_equal(Xt.A, XA / expected_X_std) assert_array_almost_equal(yt, y - np.mean(y, axis=0)) def test_csr_sparse_center_data(): # Test output format of sparse_center_data, when input is csr X, y = make_regression() X[X < 2.5] = 0.0 csr = sparse.csr_matrix(X) csr_, y, _, _, _ = sparse_center_data(csr, y, True) assert_equal(csr_.getformat(), 'csr')
uglyboxer/linear_neuron
net-p3/lib/python3.5/site-packages/sklearn/linear_model/tests/test_base.py
Python
mit
10,447
from django.conf.urls import url from . import views app_name = 'osf' urlpatterns = [ url(r'^reviews/$', views.ReviewActionListCreate.as_view(), name=views.ReviewActionListCreate.view_name), url(r'^requests/$', views.NodeRequestActionCreate.as_view(), name=views.NodeRequestActionCreate.view_name), url(r'^(?P<action_id>\w+)/$', views.ActionDetail.as_view(), name=views.ActionDetail.view_name), ]
binoculars/osf.io
api/actions/urls.py
Python
apache-2.0
412
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Implementation of a Multilayer Perceptron for classification.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from .models_base import glorot from .models_base import Model import numpy as np import tensorflow as tf class MLP(Model): """Multilayer Perceptron for binary and multi-class classification. Attributes: output_dim: Integer representing the number of classes. hidden_sizes: List containing the sizes of the hidden layers. activation: An activation function to apply to the output of each hidden layer. aggregation: String representing an aggregation operation that could be applied to the inputs. Valid options: None, `add`. If None, then no aggregation is performed. If `add`, the first half of the features dimension is added to the second half (see the `_aggregate` function for details). hidden_aggregation: A tuple or list of integers representing the number of hidden units in each layer of the projection network described above. is_binary_classification: Boolean specifying if this is model for binary classification. If so, it uses a different loss function and returns predictions with a single dimension, batch size. name: String representing the model name. """ def __init__(self, output_dim, hidden_sizes, activation=tf.nn.leaky_relu, aggregation=None, hidden_aggregation=(), is_binary_classification=False, name='MLP'): super(MLP, self).__init__( aggregation=aggregation, hidden_aggregation=hidden_aggregation, activation=activation) self.output_dim = output_dim self.hidden_sizes = hidden_sizes self.is_binary_classification = is_binary_classification self.name = name def _construct_layers(self, inputs): """Creates all hidden layers of the model, before the prediction layer. Args: inputs: A tensor containing the model inputs. The first dimension is the batch size. Returns: A tuple containing the encoded representation of the inputs and a dictionary of regularization parameters. """ reg_params = {} # Reshape inputs in case they are not of shape (batch_size, features). num_features = np.prod(inputs.shape[1:]) inputs = tf.reshape(inputs, [-1, num_features]) hidden = inputs for layer_index, output_size in enumerate(self.hidden_sizes): input_size = hidden.get_shape().dims[-1].value weights_name = 'W_' + str(layer_index) weights = tf.get_variable( name=weights_name, initializer=glorot((input_size, output_size)), use_resource=True) reg_params[weights_name] = weights biases = tf.get_variable( 'b_' + str(layer_index), initializer=tf.zeros([output_size], dtype=tf.float32), use_resource=True) hidden = self.activation(tf.nn.xw_plus_b(hidden, weights, biases)) return hidden, reg_params def get_encoding_and_params(self, inputs, **unused_kwargs): """Creates the model hidden representations and prediction ops. For this model, the hidden representation is the last layer of the MLP, before the logit computation. The predictions are unnormalized logits. Args: inputs: A tensor containing the model inputs. The first dimension is the batch size. **unused_kwargs: Other unused keyword arguments. Returns: encoding: A tensor containing an encoded batch of samples. The first dimension corresponds to the batch size. all_vars: A dictionary mapping from variable name to TensorFlow op containing all variables used in this model. reg_params: A dictionary mapping from a variable name to a Tensor of parameters which will be used for regularization. """ # Build layers. with tf.variable_scope(self.name): if isinstance(inputs, (tuple, list)): with tf.variable_scope('encoding'): hidden1, reg_params = self._construct_layers(inputs[0]) with tf.variable_scope('encoding', reuse=True): hidden2, _ = self._construct_layers(inputs[1]) hidden = self._aggregate((hidden1, hidden2)) else: with tf.variable_scope('encoding'): hidden, reg_params = self._construct_layers(inputs) # Store model variables for easy access. variables = tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_default_graph().get_name_scope()) all_vars = {var.name: var for var in variables} return hidden, all_vars, reg_params def get_predictions_and_params(self, encoding, is_train, **kwargs): """Creates the model prediction op. For this model, the hidden representation is the last layer of the MLP, before the logit computation. The predictions are unnormalized logits. Args: encoding: A tensor containing the model inputs. The first dimension is the batch size. is_train: A placeholder representing a boolean value that specifies if this model will be used for training or for test. **kwargs: Other keyword arguments. Returns: predictions: A tensor of logits. For multiclass classification its shape is (num_samples, num_classes), where the second dimension contains a logit per class. For binary classification, its shape is (num_samples,), where each element is the probability of class 1 for that sample. all_vars: A dictionary mapping from variable name to TensorFlow op containing all variables used in this model. reg_params: A dictionary mapping from a variable name to a Tensor of parameters which will be used for regularization. """ reg_params = {} # Build layers. with tf.variable_scope(self.name + '/prediction'): input_size = encoding.get_shape().dims[-1].value weights = tf.get_variable( 'W_outputs', initializer=glorot((input_size, self.output_dim)), use_resource=True) reg_params['W_outputs'] = weights biases = tf.get_variable( 'b_outputs', initializer=tf.zeros([self.output_dim], dtype=tf.float32), use_resource=True) predictions = tf.nn.xw_plus_b(encoding, weights, biases, name='predictions') if self.is_binary_classification: predictions = predictions[:, 0] # Store model variables for easy access. variables = tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_default_graph().get_name_scope()) all_vars = {var.name: var for var in variables} return predictions, all_vars, reg_params def get_loss(self, predictions, targets, name_scope='loss', reg_params=None, **kwargs): """Returns a loss between the provided targets and predictions. For binary classification, this loss is sigmoid cross entropy. For multi-class classification, it is softmax cross entropy. A weight decay loss is also added to the parameters passed in reg_params. Args: predictions: A tensor of predictions. For multiclass classification its shape is (num_samples, num_classes), where the second dimension contains a logit per class. For binary classification, its shape is (num_samples,), where each element is the probability of class 1 for that sample. targets: A tensor of targets of shape (num_samples,), where each row contains the label index of the corresponding sample. name_scope: A string containing the name scope used in TensorFlow. reg_params: A dictonary of parameters, mapping from name to parameter, for the variables to be included in the weight decay loss. If None, no weight decay is applied. **kwargs: Keyword arguments, potentially containing the weight of the regularization term, passed under the name `weight_decay`. If this is not provided, it defaults to 0.0. Returns: loss: The cummulated loss value. """ reg_params = reg_params if reg_params is not None else {} weight_decay = kwargs['weight_decay'] if 'weight_decay' in kwargs else None with tf.name_scope(name_scope): # Cross entropy error. if self.is_binary_classification: loss = tf.reduce_sum( tf.nn.sigmoid_cross_entropy_with_logits( labels=targets, logits=predictions)) else: loss = tf.losses.softmax_cross_entropy(targets, predictions) # Weight decay loss. if weight_decay is not None: for var in reg_params.values(): loss = loss + weight_decay * tf.nn.l2_loss(var) return loss def normalize_predictions(self, predictions): """Converts predictions to probabilities. Args: predictions: A tensor of logits. For multiclass classification its shape is (num_samples, num_classes), where the second dimension contains a logit per class. For binary classification, its shape is (num_samples,), where each element is the probability of class 1 for that sample. Returns: A tensor of the same shape as predictions, with values between [0, 1] representing probabilities. """ if self.is_binary_classification: return tf.nn.sigmoid(predictions) return tf.nn.softmax(predictions, axis=-1)
tensorflow/neural-structured-learning
research/gam/gam/models/mlp.py
Python
apache-2.0
10,172
import cv2 cap = cv2.VideoCapture('/path/to/video') #import video count = 0 n = 360 #Number of frames to skip while cap.isOpened(): ret,frame = cap.read() # Read video frame cv2.imshow('Video Annotation',frame) cv2.imwrite("frame%d.jpg" % count, frame) #Save every nth frame count = count + n if cv2.waitKey(10) & 0xFF == ord('q'): break cap.release() cap.destroyAllWindows()
karanchawla/100DaysofCode
day1/video_to_images.py
Python
mit
408
# Copyright 2014 Google Inc. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example OpenHTF test logic. Run with (your virtualenv must be activated first): python all_the_things.py """ import time import os.path import openhtf as htf from openhtf import util from openhtf.util import units from openhtf.plugs import user_input from openhtf.output import callbacks from openhtf.output.callbacks import console_summary from openhtf.output.callbacks import json_factory from examples import example_plugs @htf.plug(example=example_plugs.ExamplePlug) @htf.plug(frontend_aware=example_plugs.ExampleFrontendAwarePlug) def example_monitor(example, frontend_aware): time.sleep(.2) frontend_aware.increment() return example.increment() @htf.measures( htf.Measurement( 'widget_type').matches_regex(r'.*Widget$').doc( '''This measurement tracks the type of widgets.'''), htf.Measurement( 'widget_color').doc('Color of the widget'), htf.Measurement('widget_size').in_range(1, 4).doc('Size of widget')) @htf.measures('specified_as_args', docstring='Helpful docstring', units=units.HERTZ, validators=[util.validators.matches_regex('Measurement')]) @htf.plug(example=example_plugs.ExamplePlug) @htf.plug(prompts=user_input.UserInput) def hello_world(test, example, prompts): """A hello world test phase.""" test.logger.info('Hello World!') test.measurements.widget_type = prompts.prompt( 'What\'s the widget type? (Hint: try `MyWidget` to PASS)', text_input=True) if test.measurements.widget_type == 'raise': raise Exception() test.measurements.widget_color = 'Black' test.measurements.widget_size = 3 test.measurements.specified_as_args = 'Measurement args specified directly' test.logger.info('Plug value: %s', example.increment()) # Timeout if this phase takes longer than 10 seconds. @htf.TestPhase(timeout_s=10) @htf.measures( *(htf.Measurement( 'level_%s' % i) for i in ['none', 'some', 'all'])) @htf.monitors('monitor_measurement', example_monitor) def set_measurements(test): """Test phase that sets a measurement.""" test.measurements.level_none = 0 time.sleep(1) test.measurements.level_some = 8 time.sleep(1) test.measurements.level_all = 9 time.sleep(1) level_all = test.get_measurement('level_all') assert level_all.value == 9 @htf.measures( htf.Measurement('dimensions').with_dimensions(units.HERTZ), htf.Measurement('lots_of_dims').with_dimensions( units.HERTZ, units.SECOND, htf.Dimension(description='my_angle', unit=units.RADIAN))) def dimensions(test): for dim in range(5): test.measurements.dimensions[dim] = 1 << dim for x, y, z in zip(range(1, 5), range(21, 25), range (101, 105)): test.measurements.lots_of_dims[x, y, z] = x + y + z @htf.measures( htf.Measurement('replaced_min_only').in_range('{min}', 5, type=int), htf.Measurement('replaced_max_only').in_range(0, '{max}', type=int), htf.Measurement('replaced_min_max').in_range('{min}', '{max}', type=int), ) def measures_with_args(test, min, max): test.measurements.replaced_min_only = 1 test.measurements.replaced_max_only = 1 test.measurements.replaced_min_max = 1 def attachments(test): test.attach('test_attachment', 'This is test attachment data.'.encode('utf-8')) test.attach_from_file( os.path.join(os.path.dirname(__file__), 'example_attachment.txt')) test_attachment = test.get_attachment('test_attachment') assert test_attachment.data == b'This is test attachment data.' @htf.TestPhase(run_if=lambda: False) def skip_phase(test): """Don't run this phase.""" def analysis(test): level_all = test.get_measurement('level_all') assert level_all.value == 9 test_attachment = test.get_attachment('test_attachment') assert test_attachment.data == b'This is test attachment data.' lots_of_dims = test.get_measurement('lots_of_dims') assert lots_of_dims.value.value == [ (1, 21, 101, 123), (2, 22, 102, 126), (3, 23, 103, 129), (4, 24, 104, 132) ] test.logger.info('Pandas datafram of lots_of_dims \n:%s', lots_of_dims.value.to_dataframe()) def teardown(test): test.logger.info('Running teardown') if __name__ == '__main__': test = htf.Test( htf.PhaseGroup.with_teardown(teardown)( hello_world, set_measurements, dimensions, attachments, skip_phase, measures_with_args.with_args(min=1, max=4), analysis, ), # Some metadata fields, these in particular are used by mfg-inspector, # but you can include any metadata fields. test_name='MyTest', test_description='OpenHTF Example Test', test_version='1.0.0') test.add_output_callbacks(callbacks.OutputToFile( './{dut_id}.{metadata[test_name]}.{start_time_millis}.pickle')) test.add_output_callbacks(json_factory.OutputToJSON( './{dut_id}.{metadata[test_name]}.{start_time_millis}.json', indent=4)) test.add_output_callbacks(console_summary.ConsoleSummary()) # Example of how to output to testrun protobuf format and save to disk then # upload. Replace json_file with your JSON-formatted private key downloaded # from Google Developers Console when you created the Service Account you # intend to use, or name it 'my_private_key.json'. # inspector = (mfg_inspector.MfgInspector # .from_json(json.load(json_file))) # .set_converter(test_runs_converter.test_run_from_test_record)) # test.add_output_callbacks( # inspector.save_to_disk('./{dut_id}.{start_time_millis}.pb'), # inspector.upload()) test.execute(test_start=user_input.prompt_for_test_start())
jettisonjoe/openhtf
examples/all_the_things.py
Python
apache-2.0
6,199
import pytest import os import pkg_resources import numpy as np from psoap import orbit_astrometry from psoap import constants as C import matplotlib.pyplot as plt import matplotlib # Create plots of all of the orbits from astropy.io import ascii # Create plots of all of the orbits # If it doesn't already exist, create a "plots/basic/" directory outdir = "tests/plots/41Dra/" if not os.path.exists(outdir): print("Creating ", outdir) os.makedirs(outdir) # Load the Tokovinin data sets for radial velocity and astrometry rv_data_fname = pkg_resources.resource_filename("psoap", "data/41Dra/rv.dat") rv_data = ascii.read(rv_data_fname) ind_A = (rv_data["comp"] == "a") ind_B = (rv_data["comp"] == "b") rv_jds_A = rv_data["JD"][ind_A] vAs_data = rv_data["RV"][ind_A] vAs_err = rv_data["err"][ind_A] rv_jds_B = rv_data["JD"][ind_B] vBs_data = rv_data["RV"][ind_B] vBs_err = rv_data["err"][ind_B] # Sort to separate vA and vB from each other astro_data_fname = pkg_resources.resource_filename("psoap", "data/41Dra/astro.dat") astro_data = ascii.read(astro_data_fname) rho_data = astro_data["rho"] rho_err = 0.003 theta_data = astro_data["theta"] theta_err = 0.1 astro_jds = astro_data["JD"] def test_data(): # Make a plot of the astrometric data on the sky fig, ax = plt.subplots(nrows=1) xs = rho_data * np.cos(theta_data * np.pi/180) ys = rho_data * np.sin(theta_data * np.pi/180) ax.plot(xs, ys, ".") ax.set_xlabel("North") ax.set_ylabel("East") ax.plot(0,0, "k*") ax.set_aspect("equal", "datalim") fig.savefig(outdir + "data_astro.png") dpc = 44.6 # pc # Orbital elements for 41 Dra a = 0.0706 * dpc # [AU] e = 0.9754 i = 49.7 # [deg] omega = 127.31 # omega_1 Omega = 1.9 + 180 # [deg] T0 = 2449571.037 # [Julian Date] M_2 = 1.20 # [M_sun] M_tot = 1.28 + M_2 # [M_sun] gamma = 5.76 # [km/s] P = np.sqrt(4 * np.pi**2 / (C.G * M_tot * C.M_sun) * (a * C.AU)**3) / (24 * 3600) # [day] # Pick a span of dates for the observations dates_obs = np.linspace(2446630, 2452010, num=300) # [day] # Pick a span of dates for one period dates = np.linspace(T0, T0 + P, num=600) # Initialize the orbit orb = orbit_astrometry.Binary(a, e, i, omega, Omega, T0, M_tot, M_2, gamma, obs_dates=dates) full_dict = orb.get_full_orbit() vAs, vBs, XYZ_As, XYZ_Bs, XYZ_ABs, xy_As, xy_Bs, xy_ABs = [full_dict[key] for key in ("vAs", "vBs", "XYZ_As", "XYZ_Bs", "XYZ_ABs", "xy_As", "xy_Bs", "xy_ABs")] polar_dict = orb.get_orbit() vAs, vBs, rho_ABs, theta_ABs = [polar_dict[key] for key in ("vAs", "vBs", "rhos", "thetas")] # Convert to sky coordinates, using distance alpha_dec_As = XYZ_As/dpc # [arcsec] alpha_dec_Bs = XYZ_Bs/dpc # [arcsec] alpha_dec_ABs = XYZ_ABs/dpc # [arcsec] rho_ABs = rho_ABs/dpc # [arcsec] peri_A = orb._get_periastron_A()/dpc peri_B = orb._get_periastron_B()/dpc peri_BA = orb._get_periastron_BA()/dpc asc_A = orb._get_node_A()/dpc asc_B = orb._get_node_B()/dpc asc_BA = orb._get_node_BA()/dpc # Since we are plotting vs one date, we need to plot the dots using a color scale so we can figure them out along the orbit. # Set a colorscale for the lnprobs cmap_primary = matplotlib.cm.get_cmap("Blues") cmap_secondary = matplotlib.cm.get_cmap("Oranges") norm = matplotlib.colors.Normalize(vmin=np.min(dates), vmax=np.max(dates)) # Determine colors based on the ending lnprob of each walker def plot_points(ax, dates, xs, ys, primary): for date, x, y in zip(dates, xs, ys): if primary: c = cmap_primary(norm(date)) else: c = cmap_secondary(norm(date)) ax.plot(x, y, "o", color=c, mew=0.1, ms=3, mec="k") # Then, we will make 3D plots of the orbit so that we can square with what we think is happening. # The final crowning grace will be a 3D matplotlib plot of the orbital path. def test_B_rel_A(): # Plot the Orbits fig, ax = plt.subplots(nrows=1, figsize=(5,5)) plot_points(ax, dates, alpha_dec_ABs[:,0], alpha_dec_ABs[:,1], False) ax.plot(0,0, "*k", ms=2) ax.plot(peri_BA[0], peri_BA[1], "ko", ms=3) ax.plot(asc_BA[0], asc_BA[1], "o", color="C2", ms=3) ax.set_xlabel(r"$\Delta \delta$ mas") ax.set_ylabel(r"$\Delta \alpha \cos \delta $ mas") ax.set_aspect("equal", "datalim") fig.savefig(outdir + "orbit_B_rel_A.png") # Make a series of astrometric plots from different angles. def test_AB_Z(): # Now plot A and B together, viewed from the Z axis fig, ax = plt.subplots(nrows=1, figsize=(5,5)) ax.plot(0,0, "ok", ms=2) plot_points(ax, dates, alpha_dec_As[:,0], alpha_dec_As[:,1], True) plot_points(ax, dates, alpha_dec_Bs[:,0], alpha_dec_Bs[:,1], False) ax.plot(peri_A[0], peri_A[1], "ko", ms=3) ax.plot(peri_B[0], peri_B[1], "ko", ms=3) ax.plot(asc_A[0], asc_A[1], "^", color="C0", ms=3) ax.plot(asc_B[0], asc_B[1], "^", color="C1", ms=3) ax.set_xlabel(r"$\Delta \delta$ mas") ax.set_ylabel(r"$\Delta \alpha \cos \delta$ mas") ax.set_aspect("equal", "datalim") fig.subplots_adjust(left=0.15, right=0.85, bottom=0.15, top=0.85) # Plot A and B together, viewed from the observer (along -Z axis). fig.savefig(outdir + "orbit_AB_Z.png") def test_AB_X(): # Now plot A and B together, viewed from the X axis # This means Y will form the "X" axis, or North # And Z will form the Y axis, or towards observer fig, ax = plt.subplots(nrows=1, figsize=(5,5)) ax.plot(0,0, "ok", ms=2) plot_points(ax, dates, alpha_dec_As[:,1], alpha_dec_As[:,2], True) plot_points(ax, dates, alpha_dec_Bs[:,1], alpha_dec_Bs[:,2], False) ax.plot(peri_A[1], peri_A[2], "ko", ms=3) ax.plot(peri_B[1], peri_B[2], "ko", ms=3) ax.plot(asc_A[1], asc_A[2], "^", color="C0", ms=3) ax.plot(asc_B[1], asc_B[2], "^", color="C1", ms=3) ax.set_xlabel(r"$\Delta \alpha \cos \delta$ mas") ax.set_ylabel(r"$\Delta Z$ mas (towards observer)") ax.axhline(0, ls=":", color="k") ax.set_aspect("equal", "datalim") fig.savefig(outdir + "orbit_AB_X.png") def test_AB_Y(): # Now plot A and B together, viewed from the Y axis # This means Z will form the "X" axis, or towards the observer # And X will form the Y axis, or East fig, ax = plt.subplots(nrows=1, figsize=(5,5)) ax.plot(0,0, "ok", ms=2) plot_points(ax, dates, alpha_dec_As[:,2], alpha_dec_As[:,0], True) plot_points(ax, dates, alpha_dec_Bs[:,2], alpha_dec_Bs[:,0], False) ax.plot(peri_A[2], peri_A[0], "ko", ms=3) ax.plot(peri_B[2], peri_B[0], "ko", ms=3) ax.plot(asc_A[2], asc_A[0], "^", color="C0", ms=3) ax.plot(asc_B[2], asc_B[0], "^", color="C1", ms=3) ax.axvline(0, ls=":", color="k") ax.set_xlabel(r"$\Delta Z$ mas (towards observer)") ax.set_ylabel(r"$\Delta \delta$ mas") ax.set_aspect("equal", "datalim") fig.savefig(outdir + "orbit_AB_Y.png") def test_RV(): # Plot velocities, rho, and theta as function of time for one period fig, ax = plt.subplots(nrows=4, sharex=True, figsize=(8,8)) ax[0].plot(dates, vAs) # ax[0].errorbar(rv_jds_A, vAs_data, yerr=vAs_err, ls="") # ax[0].plot(rv_jds_A, vAs_data, "k.") ax[0].set_ylabel(r"$v_A$ km/s") ax[1].plot(dates, vBs) # ax[1].errorbar(rv_jds_B, vBs_data, yerr=vBs_err, ls="") # ax[1].plot(rv_jds_B, vBs_data, "k.") ax[1].set_ylabel(r"$v_B$ km/s") ax[2].plot(dates, rho_ABs) # ax[2].errorbar(astro_jds, rho_data, yerr=rho_err, ls="") # ax[2].plot(astro_jds, rho_data, "k.") ax[2].set_ylabel(r"$\rho_\mathrm{AB}$ [mas]") ax[3].plot(dates, theta_ABs) # ax[3].errorbar(astro_jds, theta_data, yerr=theta_err, ls="") # ax[3].plot(astro_jds, theta_data, "k.") ax[3].set_ylabel(r"$\theta$ [deg]") ax[-1].set_xlabel("date") fig.savefig(outdir + "orbit_vel_rho_theta_one_period.png", dpi=400) # Now make a 3D Orbit and pop it up def test_3D_plane_rel(): # Plot the orbits in the plane fig, ax = plt.subplots(nrows=1, figsize=(5,5)) plot_points(ax, dates, xy_ABs[:,0], xy_ABs[:,1], False) ax.plot(0,0, "*k", ms=10) ax.set_xlabel(r"$X$ [AU]") ax.set_ylabel(r"$Y$ [AU]") ax.set_aspect("equal", "datalim") fig.savefig(outdir + "orbit_B_rel_A_plane.png") def test_3D_plane(): fig, ax = plt.subplots(nrows=1, figsize=(5,5)) plot_points(ax, dates, xy_As[:,0], xy_As[:,1], True) plot_points(ax, dates, xy_Bs[:,0], xy_Bs[:,1], False) ax.plot(0,0, "ko", ms=10) ax.set_xlabel(r"$X$ [AU]") ax.set_ylabel(r"$Y$ [AU]") ax.set_aspect("equal", "datalim") fig.savefig(outdir + "orbit_AB_plane.png") # Redo this using a finer space series of dates spanning the full series of observations. # Pick a span of dates for the observations dates = np.linspace(2446630, 2452010, num=3000) # [day] orb = orbit_astrometry.Binary(a, e, i, omega, Omega, T0, M_tot, M_2, gamma, obs_dates=dates) polar_dict = orb.get_orbit() vAs, vBs, rho_ABs, theta_ABs = [polar_dict[key] for key in ("vAs", "vBs", "rhos", "thetas")] # Convert to sky coordinates, using distance rho_ABs = rho_ABs/dpc # [arcsec] def test_vel_rho_theta(): # Plot velocities, rho, and theta as function of time fig, ax = plt.subplots(nrows=4, sharex=True, figsize=(12,8)) ax[0].plot(dates, vAs) ax[0].errorbar(rv_jds_A, vAs_data, yerr=vAs_err, ls="") ax[0].plot(rv_jds_A, vAs_data, "k.") ax[0].set_ylabel(r"$v_A$ km/s") ax[1].plot(dates, vBs) ax[1].errorbar(rv_jds_B, vBs_data, yerr=vBs_err, ls="") ax[1].plot(rv_jds_B, vBs_data, "k.") ax[1].set_ylabel(r"$v_B$ km/s") ax[2].plot(dates, rho_ABs) ax[2].errorbar(astro_jds, rho_data, yerr=rho_err, ls="") ax[2].plot(astro_jds, rho_data, "k.") ax[2].set_ylabel(r"$\rho_\mathrm{AB}$ [mas]") ax[3].plot(dates, theta_ABs) ax[3].errorbar(astro_jds, theta_data, yerr=theta_err, ls="") ax[3].plot(astro_jds, theta_data, "k.") ax[3].set_ylabel(r"$\theta$ [deg]") ax[-1].set_xlabel("date") fig.savefig(outdir + "orbit_vel_rho_theta.png", dpi=400) def test_phase_RV(): # Plot the phase-folded RVs fig, ax = plt.subplots(nrows=1, figsize=(8,8)) P = orb.P T0 = orb.T0 print("P", P) print("K_A", orb.K) print("q", orb.q) phase = ((dates - T0) % (2*P)) / P rv_phase_A = ((rv_jds_A - T0) % P) / P rv_phase_B = ((rv_jds_B - T0) % P) / P # sort according to phase ind = np.argsort(phase) ax.plot(phase[ind], vAs[ind], color="k") ax.plot(phase[ind], vBs[ind], color="r", ls="--") ax.plot(rv_phase_A, vAs_data, "k.") ax.plot(rv_phase_B, vBs_data, "r.") ax.set_xlabel("phase") ax.set_ylabel(r"$v$ [km/s]") ax.set_xlim(-0.2, 1.2) fig.savefig(outdir + "orbit_rv_phase.png", dpi=300) def test_RV_zoomed(): # remake a zoomed version of this figure to compare to the plot in Tokovinin. P = orb.P T0 = orb.T0 print("P", P) print("K_A", orb.K) print("q", orb.q) phase = ((dates - T0) % (2*P)) / P rv_phase_A = ((rv_jds_A - T0) % P) / P rv_phase_B = ((rv_jds_B - T0) % P) / P # sort according to phase ind = np.argsort(phase) fig, ax = plt.subplots(nrows=1, figsize=(8,8)) ax.plot(phase[ind], vAs[ind], color="k") ax.plot(phase[ind], vBs[ind], color="r", ls="--") indA = rv_phase_A < 0.05 indB = rv_phase_B < 0.05 # Shift the points so they show up on our plot rv_phase_A[indA] = 1.0 + rv_phase_A[indA] rv_phase_B[indB] = 1.0 + rv_phase_B[indB] ax.plot(rv_phase_A, vAs_data, "k.") ax.plot(rv_phase_B, vBs_data, "r.") ax.set_xlabel("phase") ax.set_ylabel(r"$v$ [km/s]") ax.set_xlim(0.98, 1.03) fig.savefig(outdir + "orbit_rv_phase_zoom.png", dpi=300) plt.close('all')
iancze/PSOAP
tests/test_orbit_astrometry_41Dra.py
Python
mit
11,690
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ The L{_response} module contains constants for all standard HTTP codes, along with a mapping to the corresponding phrases. """ from __future__ import division, absolute_import import string from twisted.trial import unittest from twisted.web import _responses class ResponseTests(unittest.TestCase): def test_constants(self): """ All constants besides C{RESPONSES} defined in L{_response} are integers and are keys in C{RESPONSES}. """ for sym in dir(_responses): if sym == 'RESPONSES': continue if all((c == '_' or c in string.ascii_uppercase) for c in sym): val = getattr(_responses, sym) self.assertIsInstance(val, int) self.assertIn(val, _responses.RESPONSES)
Architektor/PySnip
venv/lib/python2.7/site-packages/twisted/web/test/test_web__responses.py
Python
gpl-3.0
877
# Copyright 2020 The StackStorm Authors. # Copyright 2019 Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from st2common.util.monkey_patch import monkey_patch monkey_patch() import ssl import random import unittest2 import eventlet from bson.objectid import ObjectId from kombu.mixins import ConsumerMixin from kombu import Exchange from kombu import Queue from oslo_config import cfg from st2common.transport.publishers import PoolPublisher from st2common.transport.utils import _get_ssl_kwargs from st2common.transport import utils as transport_utils from st2common.models.db.liveaction import LiveActionDB __all__ = ["TransportUtilsTestCase"] class QueueConsumer(ConsumerMixin): def __init__(self, connection, queue): self.connection = connection self.queue = queue self.received_messages = [] def get_consumers(self, Consumer, channel): return [ Consumer( queues=[self.queue], accept=["pickle"], callbacks=[self.process_task] ) ] def process_task(self, body, message): self.received_messages.append((body, message)) message.ack() class TransportUtilsTestCase(unittest2.TestCase): def tearDown(self): super(TransportUtilsTestCase, self).tearDown() cfg.CONF.set_override(name="compression", group="messaging", override=None) def test_publish_compression(self): live_action_db = LiveActionDB() live_action_db.id = ObjectId() live_action_db.status = "succeeded" live_action_db.action = "core.local" live_action_db.result = {"foo": "bar"} exchange = Exchange("st2.execution.test", type="topic") queue_name = "test-" + str(random.randint(1, 10000)) queue = Queue( name=queue_name, exchange=exchange, routing_key="#", auto_delete=True ) publisher = PoolPublisher() with transport_utils.get_connection() as connection: connection.connect() watcher = QueueConsumer(connection=connection, queue=queue) watcher_thread = eventlet.greenthread.spawn(watcher.run) # Give it some time to start up since we are publishing on a new queue eventlet.sleep(0.5) self.assertEqual(len(watcher.received_messages), 0) # 1. Verify compression is off as a default publisher.publish(payload=live_action_db, exchange=exchange) eventlet.sleep(0.2) self.assertEqual(len(watcher.received_messages), 1) self.assertEqual( watcher.received_messages[0][1].properties["content_type"], "application/x-python-serialize", ) self.assertEqual( watcher.received_messages[0][1].properties["content_encoding"], "binary" ) self.assertEqual( watcher.received_messages[0][1].properties["application_headers"], {} ) self.assertEqual(watcher.received_messages[0][0].id, live_action_db.id) # 2. Verify config level option is used cfg.CONF.set_override(name="compression", group="messaging", override="zstd") publisher.publish(payload=live_action_db, exchange=exchange) eventlet.sleep(0.2) self.assertEqual(len(watcher.received_messages), 2) self.assertEqual( watcher.received_messages[1][1].properties["content_type"], "application/x-python-serialize", ) self.assertEqual( watcher.received_messages[1][1].properties["content_encoding"], "binary" ) self.assertEqual( watcher.received_messages[1][1].properties["application_headers"], {"compression": "application/zstd"}, ) self.assertEqual(watcher.received_messages[1][0].id, live_action_db.id) # 2. Verify argument level option is used and has precedence over config one cfg.CONF.set_override(name="compression", group="messaging", override="zstd") publisher.publish(payload=live_action_db, exchange=exchange, compression="gzip") eventlet.sleep(0.2) self.assertEqual(len(watcher.received_messages), 3) self.assertEqual( watcher.received_messages[2][1].properties["content_type"], "application/x-python-serialize", ) self.assertEqual( watcher.received_messages[2][1].properties["content_encoding"], "binary" ) self.assertEqual( watcher.received_messages[2][1].properties["application_headers"], {"compression": "application/x-gzip"}, ) self.assertEqual(watcher.received_messages[2][0].id, live_action_db.id) watcher_thread.kill() def test_get_ssl_kwargs(self): # 1. No SSL kwargs provided ssl_kwargs = _get_ssl_kwargs() self.assertEqual(ssl_kwargs, {}) # 2. ssl kwarg provided ssl_kwargs = _get_ssl_kwargs(ssl=True) self.assertEqual(ssl_kwargs, {"ssl": True}) # 3. ssl_keyfile provided ssl_kwargs = _get_ssl_kwargs(ssl_keyfile="/tmp/keyfile") self.assertEqual(ssl_kwargs, {"ssl": True, "keyfile": "/tmp/keyfile"}) # 4. ssl_certfile provided ssl_kwargs = _get_ssl_kwargs(ssl_certfile="/tmp/certfile") self.assertEqual(ssl_kwargs, {"ssl": True, "certfile": "/tmp/certfile"}) # 5. ssl_ca_certs provided ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs="/tmp/ca_certs") self.assertEqual(ssl_kwargs, {"ssl": True, "ca_certs": "/tmp/ca_certs"}) # 6. ssl_ca_certs and ssl_cert_reqs combinations ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs="/tmp/ca_certs", ssl_cert_reqs="none") self.assertEqual( ssl_kwargs, {"ssl": True, "ca_certs": "/tmp/ca_certs", "cert_reqs": ssl.CERT_NONE}, ) ssl_kwargs = _get_ssl_kwargs( ssl_ca_certs="/tmp/ca_certs", ssl_cert_reqs="optional" ) self.assertEqual( ssl_kwargs, {"ssl": True, "ca_certs": "/tmp/ca_certs", "cert_reqs": ssl.CERT_OPTIONAL}, ) ssl_kwargs = _get_ssl_kwargs( ssl_ca_certs="/tmp/ca_certs", ssl_cert_reqs="required" ) self.assertEqual( ssl_kwargs, {"ssl": True, "ca_certs": "/tmp/ca_certs", "cert_reqs": ssl.CERT_REQUIRED}, )
StackStorm/st2
st2common/tests/unit/test_transport.py
Python
apache-2.0
6,878
#!/usr/bin/env python # # Copyright 2010 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, blks2 import scipy, pylab def main(): N = 1000000 fs = 8000 freqs = [100, 200, 300, 400, 500] nchans = 7 sigs = list() for fi in freqs: s = gr.sig_source_c(fs, gr.GR_SIN_WAVE, fi, 1) sigs.append(s) taps = gr.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100) print "Num. Taps = %d (taps per filter = %d)" % (len(taps), len(taps)/nchans) filtbank = gr.pfb_synthesis_filterbank_ccf(nchans, taps) head = gr.head(gr.sizeof_gr_complex, N) snk = gr.vector_sink_c() tb = gr.top_block() tb.connect(filtbank, head, snk) for i,si in enumerate(sigs): tb.connect(si, (filtbank, i)) tb.run() if 1: f1 = pylab.figure(1) s1 = f1.add_subplot(1,1,1) s1.plot(snk.data()[1000:]) fftlen = 2048 f2 = pylab.figure(2) s2 = f2.add_subplot(1,1,1) winfunc = scipy.blackman s2.psd(snk.data()[10000:], NFFT=fftlen, Fs = nchans*fs, noverlap=fftlen/4, window = lambda d: d*winfunc(fftlen)) pylab.show() if __name__ == "__main__": main()
pgoeser/gnuradio
gnuradio-examples/python/pfb/synth_filter.py
Python
gpl-3.0
2,042
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. # MIT License. See license.txt from __future__ import unicode_literals import webnotes, json from webnotes.widgets import reportview @webnotes.whitelist() def get_data(module, doctypes='[]'): doctypes = json.loads(doctypes) return { "reports": get_report_list(module), "item_count": get_count(doctypes) } def get_count(doctypes): count = {} can_read = webnotes.user.get_can_read() for d in doctypes: if d in can_read: count[d] = get_doctype_count_from_table(d) return count def get_doctype_count_from_table(doctype): try: count = reportview.execute(doctype, fields=["count(*)"], as_list=True)[0][0] except Exception, e: if e.args[0]==1146: count = None else: raise e return count def get_report_list(module): """return list on new style reports for modules""" return webnotes.conn.sql(""" select distinct tabReport.name, tabReport.ref_doctype as doctype, if((tabReport.report_type='Query Report' or tabReport.report_type='Script Report'), 1, 0) as is_query_report from `tabReport`, `tabDocType` where tabDocType.module=%s and tabDocType.name = tabReport.ref_doctype and tabReport.docstatus in (0, NULL) and ifnull(tabReport.is_standard, "No")="No" and ifnull(tabReport.disabled,0) != 1 order by tabReport.name""", module, as_dict=True)
rohitw1991/latestadbwnf
webnotes/widgets/moduleview.py
Python
mit
1,363
import asyncio from functools import partial class AsyncWrapper: def __init__(self, target_instance, executor=None): self._target_inst = target_instance self._loop = asyncio.get_event_loop() self._executor = executor def __getattribute__(self, name): try: return super().__getattribute__(name) except AttributeError: method = self._target_inst.__getattribute__(name) return partial(self._async_wrapper, method) async def _async_wrapper(self, method_name, *args, **kwargs): coroutine_wrapped = partial(method_name, *args, **kwargs) return self._loop.run_in_executor(self._executor, coroutine_wrapped)
KeepSafe/translation-real-time-validaton
notifier/executor.py
Python
apache-2.0
708
#!/usr/bin/env python import sys import argparse import logging from Bio import SeqIO logging.basicConfig(level=logging.INFO) log = logging.getLogger() def drop_id(fasta_file=None): for rec in SeqIO.parse(fasta_file, "fasta"): rec.description = "" ind = str(rec.seq).find("##") if ( ind != -1 ): # This method causes mid-file comments (such as from Apollo sequences) to be appended to the end of the previous sequence rec.seq = rec.seq[0:ind] yield rec if __name__ == "__main__": parser = argparse.ArgumentParser(description="Identify shine-dalgarno sequences") parser.add_argument("fasta_file", type=argparse.FileType("r"), help="Genbank file") args = parser.parse_args() for rec in drop_id(**vars(args)): SeqIO.write([rec], sys.stdout, "fasta")
TAMU-CPT/galaxy-tools
tools/fasta/fasta_remove_id.py
Python
gpl-3.0
846
import sqlalchemy from sqlalchemy import Column, Integer, String from sqlalchemy.orm import mapper, sessionmaker import subprocess class PygrationState(object): '''Python object representing the state table''' def __init__(self, migration=None, step_id=None, step_name=None): self.migration = migration self.step_id = step_id self.step_name = step_name self.sequence = None self.add_state = None self.simdrop_state = None self.drop_state = None def __repr__(self): return "<PygrationState(%s, %s)>" % (self.migration, self.step_id) class Table(object): metadata = sqlalchemy.MetaData() engine = None pygration_state = None @classmethod def define(cls, schema=None): cls.pygration_state = sqlalchemy.Table('pygration_state', cls.metadata , Column('migration', String(length=160), primary_key=True) , Column('step_id', String(length=160), primary_key=True) , Column('step_name', String(length=160)) , Column('sequence', Integer) , Column('add_state', String(length=16)) , Column('simdrop_state', String(length=16)) , Column('drop_state', String(length=16)) , schema=schema ) class FileLoader(object): '''Object for running SQL from a file on the file system''' def __init__(self, binary, args = [], formatting_dict = {}): self._binary = binary self._args = [arg.format(filename="{filename}", **formatting_dict) for arg in args] def __call__(self, filename): args = [arg.format(filename=filename) for arg in self._args] print self._binary, args subprocess.check_call([self._binary] + args) def open(url=None, drivername=None, schema=None, username=None, password=None, host=None, port=None, database=None, query=None): """Open the DB through a SQLAlchemy engine. Returns an open session. """ if url is None and drivername is None: raise Exception("Either a url or a driver name is required to open a db connection") if url is None: url = sqlalchemy.engine.url.URL(drivername = drivername, username = username, password = password, host = host, port = port, database = database, query = query) Table.engine = sqlalchemy.create_engine(url) Table.metadata.bind = Table.engine Session = sessionmaker() Session.configure(bind=Table.engine) session = Session() Table.define(schema) mapper(PygrationState, Table.pygration_state) return session
mdg/pygrate
pygration/db.py
Python
apache-2.0
2,877
# Copyright (C) 2021 Nippon Telegraph and Telephone Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tacker.sol_refactored.objects import base from tacker.sol_refactored.objects import fields # NFV-SOL 003 # - v3.3.1 5.5.3.13 (API version: 2.0.0) @base.TackerObjectRegistry.register class AffectedVnfcV2(base.TackerObject, base.TackerObjectDictCompat): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.StringField(nullable=False), 'vduId': fields.StringField(nullable=False), 'vnfdId': fields.StringField(nullable=True), 'changeType': fields.EnumField( valid_values=[ 'ADDED', 'REMOVED', 'MODIFIED', 'TEMPORARY', ], nullable=False, ), 'computeResource': fields.ObjectField( 'ResourceHandle', nullable=False), 'resourceDefinitionId': fields.StringField(nullable=True), 'zoneId': fields.StringField(nullable=True), 'metadata': fields.KeyValuePairsField(nullable=True), 'affectedVnfcCpIds': fields.ListOfStringsField(nullable=True), 'addedStorageResourceIds': fields.ListOfStringsField(nullable=True), 'removedStorageResourceIds': fields.ListOfStringsField(nullable=True), }
openstack/tacker
tacker/sol_refactored/objects/v2/affected_vnfc.py
Python
apache-2.0
1,888
# Copyright 2008-2009 WebDriver committers # Copyright 2008-2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Exceptions that may happen in all the webdriver code.""" class WebDriverException(Exception): def __init__(self, msg=None, screen=None, stacktrace=None): self.msg = msg self.screen = screen self.stacktrace = stacktrace def __str__(self): exception_msg = "Message: %s " % repr(self.msg) if self.screen is not None: exception_msg = "%s; Screenshot: available via screen " \ % exception_msg if self.stacktrace is not None: exception_msg = "%s; Stacktrace: %s " \ % (exception_msg, str(self.stacktrace)) return exception_msg class ErrorInResponseException(WebDriverException): """An error has occurred on the server side. This may happen when communicating with the firefox extension or the remote driver server.""" def __init__(self, response, msg): WebDriverException.__init__(self, msg) self.response = response class InvalidSwitchToTargetException(WebDriverException): """The frame or window target to be switched doesn't exist.""" pass class NoSuchFrameException(InvalidSwitchToTargetException): pass class NoSuchWindowException(InvalidSwitchToTargetException): pass class NoSuchElementException(WebDriverException): """find_element_by_* can't find the element.""" pass class NoSuchAttributeException(WebDriverException): """find_element_by_* can't find the element.""" pass class StaleElementReferenceException(WebDriverException): """Indicates that a reference to an element is now "stale" --- the element no longer appears on the DOM of the page.""" pass class InvalidElementStateException(WebDriverException): pass class NoAlertPresentException(WebDriverException): pass class ElementNotVisibleException(InvalidElementStateException): """Thrown to indicate that although an element is present on the DOM, it is not visible, and so is not able to be interacted with.""" pass class ElementNotSelectableException(InvalidElementStateException): pass class InvalidCookieDomainException(WebDriverException): """Thrown when attempting to add a cookie under a different domain than the current URL.""" pass class UnableToSetCookieException(WebDriverException): """Thrown when a driver fails to set a cookie.""" pass class RemoteDriverServerException(WebDriverException): pass class TimeoutException(WebDriverException): """Thrown when a command does not complete in enough time.""" pass class UnexpectedTagNameException(WebDriverException): """Thrown when a support class did not get an expected web element""" pass class InvalidSelectiorException(NoSuchElementException): """ Thrown when the selector which is used to find an element does not return a WebElement. Currently this only happens when the selector is an xpath expression is used which is either syntactically invalid (i.e. it is not a xpath expression) or the expression does not select WebElements (e.g. "count(//input)"). """ pass class ImeNotAvailableException(WebDriverException): """ Indicates that IME support is not available. This exception is thrown for every IME-related method call if IME support is not available on the machine. """ pass class ImeActivationFailedException(WebDriverException): """ Indicates that activating an IME engine has failed. """ pass
softak/webfaction_demo
vendor-local/lib/python/selenium/common/exceptions.py
Python
bsd-3-clause
4,099
#ImportModules import ShareYourSystem as SYS #Definition a Getter MyGetter=SYS.GetterClass() MyGetter.MyInt=1 #print print("MyGetter['MyFloat'] is ") print(MyGetter['MyFloat']) print('\n') #print print("MyGetter['MyInterfacer'] is ") print(MyGetter['MyInterfacer']) print('\n') #print print("MyGetter.get('MyStr',_NewBool=False).GettedValueVariable is ") print(MyGetter.get('MyStr',_NewBool=False).GettedValueVariable) print('\n') #print print('MyGetter is ') SYS._print(MyGetter)
Ledoux/ShareYourSystem
Pythonlogy/build/lib/ShareYourSystem/Standards/Itemizers/Getter/05_ExampleDoc.py
Python
mit
491
#!/usr/bin/env python # Copyright 2009-2016 Thomas Paviot ([email protected]) ## # This file is part of pythonOCC. ## # pythonOCC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. ## # pythonOCC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. ## # You should have received a copy of the GNU Lesser General Public License # along with pythonOCC. If not, see <http://www.gnu.org/licenses/>. import logging import os import sys from typing import Any, Callable, List, Optional, Tuple from OCC import VERSION from OCC.Display.backend import load_backend, get_qt_modules from OCC.Display.OCCViewer import OffscreenRenderer log = logging.getLogger(__name__) def check_callable(_callable: Callable) -> None: if not callable(_callable): raise AssertionError("The function supplied is not callable") def init_display( backend_str: Optional[str] = None, size: Optional[Tuple[int, int]] = (1024, 768), display_triedron: Optional[bool] = True, background_gradient_color1: Optional[List[int]] = [206, 215, 222], background_gradient_color2: Optional[List[int]] = [128, 128, 128], ): """This function loads and initialize a GUI using either wx, pyq4, pyqt5 or pyside. If ever the environment variable PYTHONOCC_OFFSCREEN_RENDERER, then the GUI is simply ignored and an offscreen renderer is returned. init_display returns 4 objects : * display : an instance of Viewer3d ; * start_display : a function (the GUI mainloop) ; * add_menu : a function that creates a menu in the GUI * add_function_to_menu : adds a menu option In case an offscreen renderer is returned, start_display and add_menu are ignored, i.e. an empty function is returned (named do_nothing). add_function_to_menu just execute the function taken as a paramter. Note : the offscreen renderer is used on the travis side. """ if size is None: # prevent size to being None (mypy) raise AssertionError("window size cannot be None") if os.getenv("PYTHONOCC_OFFSCREEN_RENDERER") == "1": # create the offscreen renderer offscreen_renderer = OffscreenRenderer() def do_nothing(*kargs: Any, **kwargs: Any) -> None: """takes as many parameters as you want, ans does nothing """ pass def call_function(s, func: Callable) -> None: """A function that calls another function. Helpfull to bypass add_function_to_menu. s should be a string """ check_callable(func) log.info("Execute %s :: %s menu fonction" % (s, func.__name__)) func() log.info("done") # returns empty classes and functions return offscreen_renderer, do_nothing, do_nothing, call_function used_backend = load_backend(backend_str) # wxPython based simple GUI if used_backend == "wx": import wx from OCC.Display.wxDisplay import wxViewer3d print("wxPython backend - ", wx.version()) class AppFrame(wx.Frame): def __init__(self, parent): wx.Frame.__init__( self, parent, -1, "pythonOCC-%s 3d viewer ('wx' backend)" % VERSION, style=wx.DEFAULT_FRAME_STYLE, size=size, ) self.canva = wxViewer3d(self) self.menuBar = wx.MenuBar() self._menus = {} self._menu_methods = {} def add_menu(self, menu_name: str) -> None: _menu = wx.Menu() self.menuBar.Append(_menu, "&" + menu_name) self.SetMenuBar(self.menuBar) self._menus[menu_name] = _menu def add_function_to_menu(self, menu_name: str, _callable: Callable) -> None: # point on curve _id = wx.NewId() check_callable(_callable) try: self._menus[menu_name].Append( _id, _callable.__name__.replace("_", " ").lower() ) except KeyError: raise ValueError("the menu item %s does not exist" % menu_name) self.Bind(wx.EVT_MENU, _callable, id=_id) app = wx.App(False) win = AppFrame(None) win.Show(True) wx.SafeYield() win.canva.InitDriver() app.SetTopWindow(win) display = win.canva._display def add_menu(*args, **kwargs) -> None: win.add_menu(*args, **kwargs) def add_function_to_menu(*args, **kwargs) -> None: win.add_function_to_menu(*args, **kwargs) def start_display() -> None: app.MainLoop() # Qt based simple GUI elif "qt" in used_backend: from OCC.Display.qtDisplay import qtViewer3d QtCore, QtGui, QtWidgets, QtOpenGL = get_qt_modules() # check Qt version qt_version = None if hasattr(QtCore, "QT_VERSION_STR"): # PyQt5 qt_version = QtCore.QT_VERSION_STR elif hasattr(QtCore, "__version__"): # PySide2 qt_version = QtCore.__version__ print("%s backend - Qt version %s" % (used_backend, qt_version)) class MainWindow(QtWidgets.QMainWindow): def __init__(self, *args: Any) -> None: QtWidgets.QMainWindow.__init__(self, *args) self.canva = qtViewer3d(self) self.setWindowTitle( "pythonOCC-%s 3d viewer ('%s' backend)" % (VERSION, used_backend) ) self.setCentralWidget(self.canva) if sys.platform != "darwin": self.menu_bar = self.menuBar() else: # create a parentless menubar # see: http://stackoverflow.com/questions/11375176/qmenubar-and-qmenu-doesnt-show-in-mac-os-x?lq=1 # noticeable is that the menu ( alas ) is created in the # topleft of the screen, just # next to the apple icon # still does ugly things like showing the "Python" menu in # bold self.menu_bar = QtWidgets.QMenuBar() self._menus = {} self._menu_methods = {} # place the window in the center of the screen, at half the # screen size self.centerOnScreen() def centerOnScreen(self) -> None: """Centers the window on the screen.""" resolution = QtWidgets.QApplication.desktop().screenGeometry() x = (resolution.width() - self.frameSize().width()) / 2 y = (resolution.height() - self.frameSize().height()) / 2 self.move(x, y) def add_menu(self, menu_name: str) -> None: _menu = self.menu_bar.addMenu("&" + menu_name) self._menus[menu_name] = _menu def add_function_to_menu(self, menu_name: str, _callable: Callable) -> None: check_callable(_callable) try: _action = QtWidgets.QAction( _callable.__name__.replace("_", " ").lower(), self ) # if not, the "exit" action is now shown... _action.setMenuRole(QtWidgets.QAction.NoRole) _action.triggered.connect(_callable) self._menus[menu_name].addAction(_action) except KeyError: raise ValueError("the menu item %s does not exist" % menu_name) # following couple of lines is a tweak to enable ipython --gui='qt' app = QtWidgets.QApplication.instance() # checks if QApplication already exists if not app: # create QApplication if it doesnt exist app = QtWidgets.QApplication(sys.argv) win = MainWindow() win.resize(size[0] - 1, size[1] - 1) win.show() win.centerOnScreen() win.canva.InitDriver() win.resize(size[0], size[1]) win.canva.qApp = app display = win.canva._display def add_menu(*args, **kwargs) -> None: win.add_menu(*args, **kwargs) def add_function_to_menu(*args, **kwargs) -> None: win.add_function_to_menu(*args, **kwargs) def start_display() -> None: win.raise_() # make the application float to the top app.exec_() if display_triedron: display.display_triedron() if background_gradient_color1 and background_gradient_color2: # background gradient display.set_bg_gradient_color( background_gradient_color1, background_gradient_color2 ) return display, start_display, add_menu, add_function_to_menu
tpaviot/pythonocc-core
src/Display/SimpleGui.py
Python
lgpl-3.0
9,307
import os import re from queue import Queue from shutil import which from unittest.case import skipIf from bears.python.PyLintBear import PyLintBear from tests.LocalBearTestHelper import LocalBearTestHelper from coalib.settings.Section import Section from coalib.settings.Setting import Setting @skipIf(which('pylint') is None, 'PyLint is not installed') class PyLintBearTest(LocalBearTestHelper): def setUp(self): self.section = Section("test section") self.uut = PyLintBear(self.section, Queue()) self.test_file = os.path.join(os.path.dirname(__file__), "test_files", "pylint_test.py") self.rc_file = os.path.join(os.path.dirname(__file__), "test_files", "pylint_config") def test_run(self): self.section.append(Setting("pylint_disable", "")) self.check_validity( self.uut, [], # Doesn't matter, pylint will parse the file self.test_file, valid=False) # This is a special case because there's only one result yielded. # This was a bug once where the last result got ignored. self.section.append(Setting("pylint_disable", "E0211,W0611,C0111")) self.check_validity(self.uut, [], self.test_file, valid=False) self.section.append( Setting("pylint_disable", "E0211,W0611,C0111,W0311")) self.check_validity(self.uut, [], self.test_file) self.section.append(Setting("pylint_disable", "all")) self.check_validity(self.uut, [], self.test_file) self.section.append(Setting("pylint_enable", "C0111")) self.check_validity(self.uut, [], self.test_file, valid=False) self.section.append(Setting("pylint_cli_options", "--disable=all")) self.check_validity(self.uut, [], self.test_file) def test_rcfile(self): self.section.append(Setting("pylint_rcfile", re.escape(self.rc_file))) self.check_validity(self.uut, [], self.test_file)
sals1275/coala-bears
tests/python/PyLintBearTest.py
Python
agpl-3.0
2,109
import itertools import os import pprint import re from math import isclose import lmfit import numpy as np import pandas as pd import peakutils as pku from ImagingReso.resonance import Resonance import ImagingReso._utilities as reso_util from cerberus import Validator import matplotlib.pyplot as plt x_type_list = ['energy', 'lambda', 'time', 'number'] y_type_list = ['transmission', 'attenuation'] t_unit_list = ['s', 'ms', 'us', 'ns'] peak_type_list = ['indexed', 'all'] # peak_type_list = ['indexed', 'all', 'none'] index_level_list = ['iso', 'ele'] peak_model_list = ['Gaussian', 'Lorentzian'] def check_if_in_list(name, name_list): if name not in name_list: raise ValueError("'{}' is not valid, only support: '{}'".format(name, name_list)) def convert_energy_to(x_type, x, offset_us=None, source_to_detector_m=None, t_unit='us', num_offset=0, time_resolution_us=None, t_start_us=None): check_if_in_list(x_type, x_type_list) check_if_in_list(t_unit, t_unit_list) if x_type == 'lambda': x = reso_util.ev_to_angstroms(x) if x_type == 'time': if offset_us is None: raise ValueError("'offset_us=' is required when x_type='time'") if source_to_detector_m is None: raise ValueError("'source_to_detector_m=' is required when x_type='time'") x = reso_util.ev_to_s(offset_us=offset_us, source_to_detector_m=source_to_detector_m, array=x) x = convert_s_to(x=x, t_unit=t_unit) if x_type == 'number': if time_resolution_us is not None: x = reso_util.ev_to_image_number(offset_us=offset_us, source_to_detector_m=source_to_detector_m, array=x, time_resolution_us=time_resolution_us, t_start_us=t_start_us) # x = x + num_offset else: x = np.array(range(len(x))) + num_offset return x def convert_attenuation_to(y_type, y): check_if_in_list(y_type, y_type_list) if y_type == 'transmission': y = 1 - y return np.array(y) def convert_s_to(x, t_unit): if t_unit == 'ns': _x = x * 1e9 elif t_unit == 'us': _x = x * 1e6 elif t_unit == 'ms': _x = x * 1e3 else: _x = x return _x def convert_exp_peak_df(peak_df: pd.DataFrame, x_type, t_unit): check_if_in_list(x_type, x_type_list) check_if_in_list(t_unit, t_unit_list) if x_type == 'energy': assert 'x' in peak_df.columns _x = peak_df['x'] elif x_type == 'lambda': assert 'x_A' in peak_df.columns _x = peak_df['x_A'] elif x_type == 'time': assert 'x_s' in peak_df.columns _x = peak_df['x_s'] _x = convert_s_to(x=_x, t_unit=t_unit) else: assert 'x_num_o' in peak_df.columns _x = peak_df['x_num_o'] return _x.values # np.array def check_and_make_dir(current_path, name): _dir_path = os.path.join(current_path, name) if not os.path.exists(_dir_path): os.makedirs(_dir_path) print("Folder: '{}' has been created ".format(_dir_path)) return _dir_path def load_txt_csv(path_to_file): """ Load and format data from .txt or .csv files :param path_to_file: :return: pd.Dataframe """ # Error for file format and existence _format = path_to_file[-4:] if _format not in ['.txt', '.csv']: raise ValueError("File must be in the format of '.txt' or '.csv'") if os.path.exists(path_to_file) is False: raise ValueError( "Can not locate file '{}' in '{}' ".format(os.path.basename(path_to_file), os.path.dirname(path_to_file))) _sep = ',' df = pd.read_csv(path_to_file, sep=_sep, header=None) if type(df[0][0]) is str: # if the first element is still a str, use ',' to pd.read_csv if df[0][0].count('\t') != 0: _sep = '\t' df = pd.read_csv(path_to_file, sep=_sep, header=None) if type(df[0][0]) is str: # if the first element is still a str, skip the row of the 'X' 'Y' axis labels df = pd.read_csv(path_to_file, sep=_sep, header=None, skiprows=1) if list(df[0][:4]) == [1, 2, 3, 4]: df[0] = df[1] df.drop(df.columns[1], axis=1, inplace=True) return df def get_foil_density_gcm3(length_mm, width_mm, thickness_mm, mass_g): """ Get density from mass/(L*W*H) :param length_mm: :param width_mm: :param thickness_mm: :param mass_g: :return: density in g/cm^3 """ _mm3_to_cm3 = 0.001 density_gcm3 = mass_g / (length_mm * width_mm * thickness_mm * _mm3_to_cm3) return density_gcm3 def set_plt(ax, fig_title, grid, x_type, y_type, t_unit, logx, logy): check_if_in_list(x_type, x_type_list) check_if_in_list(y_type, y_type_list) ax.set_title(fig_title) if x_type == 'energy': ax.set_xlabel('Energy (eV)') elif x_type == 'lambda': ax.set_xlabel('Wavelength (\u212B)') elif x_type == 'number': ax.set_xlabel('Image number (#)') else: check_if_in_list(t_unit, t_unit_list) if t_unit == 'us': ax.set_xlabel('Time of flight (\u03BCs)') else: ax.set_xlabel('Time of flight ({})'.format(t_unit)) if y_type == 'attenuation': ax.set_ylabel('Neutron attenuation') else: ax.set_ylabel('Neutron transmission') ax.legend(loc='best') # ax1.legend(bbox_to_anchor=(1., 1), loc=2, borderaxespad=0.) # ax1.legend(bbox_to_anchor=(0, 0.93, 1., .102), loc=3, borderaxespad=0.) if grid: # ax1.set_xticks(np.arange(0, 100, 10)) # ax1.set_yticks(np.arange(0, 1., 0.1)) ax.grid() if logx: ax.set_xscale('log') if logy: ax.set_yscale('log') return ax def rm_envelope(y, deg, max_it=None, tol=None): envelope = pku.envelope(y=y, deg=deg, max_it=max_it, tol=tol) # return y + y.max() - envelope return y / envelope class Items(object): """ A easier way to specify layers/elements/isotopes for in plot()/export() """ def __init__(self, o_reso, database='ENDF_VIII'): self.o_reso = o_reso self.shaped_list = None self.database = database def shaped(self, items_list): _shaped_list = [] for _raw_path_to_plot in items_list: if type(_raw_path_to_plot) is not list: if '*' in _raw_path_to_plot: _shaped_list = _shaped_list + _fill_iso_to_items(name=_raw_path_to_plot, stack=self.o_reso.stack, database=self.database) else: _shaped_list.append(_shape_items(_raw_path_to_plot)) else: if len(_raw_path_to_plot) == 1: _raw_path_to_plot = _shape_items(_raw_path_to_plot[0]) _shaped_list.append(_raw_path_to_plot) # Clean duplicates in list _shaped_list = _rm_duplicated_items(_shaped_list) self.shaped_list = _shaped_list return _shaped_list def values(self, y_axis_type='attenuation'): # plot specified from 'items_to_plot' if self.shaped_list is None: raise ValueError("'.shaped_list' is empty, please run '.shaped()' first.") if y_axis_type != 'sigma': _stack = self.o_reso.stack_signal else: _stack = self.o_reso.stack_sigma y_axis_type = 'sigma_b' y_axis_tag = y_axis_type _y_axis_dict = {} for _each_path in self.shaped_list: _label = _each_path[-1] if len(_each_path) == 3: _y_axis_dict[_label] = _stack[_each_path[0]][_each_path[1]][_each_path[2]][y_axis_tag] elif len(_each_path) == 2: _y_axis_dict[_label] = _stack[_each_path[0]][_each_path[1]][y_axis_tag] else: raise ValueError("Format error of '{}', should be in the form of " "['layer', 'element'] or ['layer', 'element', 'isotope']") return _y_axis_dict def _shape_items(name): # input is not structured as required by ImagingReso if type(name) is not str: raise ValueError("'{}' entered is not a string.".format(name)) if len(name) == 0: raise ValueError("'{}' entered has no length.".format(name)) _path_of_input = [] if any(str.isdigit(i) for i in name) is True: # isotopes _parsed = re.findall(r'([A-Z][a-z]*)(\d*)', name) _element_str = _parsed[0][0] _number_str = re.findall('\d+', name)[0] _isotope_str = _number_str + '-' + _element_str _path_of_input.append(_element_str) _path_of_input.append(_element_str) _path_of_input.append(_isotope_str) else: # elements if len(name) > 2: raise ValueError("'{}' entered is not a single element symbol.".format(name)) if len(name) == 1: if name.isupper() is False: name = name.upper() _path_of_input.append(name) _path_of_input.append(name) if len(name) == 2: if name[0].isupper() and name[1].islower() is True: _path_of_input.append(name) _path_of_input.append(name) else: raise ValueError("'{}' entered is not a valid element symbol.".format(name)) return _path_of_input def _fill_iso_to_items(name, stack=None, database='ENDF_VII'): if '*' not in name: raise ValueError("'*' is needed to retrieve all isotopes of '{}' ".format(name)) else: ele_name = name.replace('*', '') if stack is None: o_reso = Resonance(database=database) o_reso.add_layer(formula=ele_name, thickness=1) stack = o_reso.stack iso_list = stack[ele_name][ele_name]['isotopes']['list'] _path_to_iso = [] for _each_iso in iso_list: _path_to_iso.append(_shape_items(_each_iso)) return _path_to_iso def _rm_duplicated_items(raw): raw.sort() cleaned_list = list(raw for raw, _ in itertools.groupby(raw)) return cleaned_list # def almostequatl class Layer(object): def __init__(self): self.info = {} def add_Layer(self, layers): for _each_layer in list(layers.info.keys()): self.add_layer(layer=layers.info[_each_layer]['layer'], thickness_mm=layers.info[_each_layer]['thickness'], density_gcm3=layers.info[_each_layer]['density']) def add_layer(self, layer, thickness_mm, density_gcm3=None): # Input Validation _input = {'layer': layer, 'thickness': thickness_mm, 'density': density_gcm3, } schema = {'layer': {'type': 'string', 'required': True, }, 'thickness': {'type': 'number', 'required': True, }, 'density': {'type': 'number', 'required': True, 'nullable': True, }, } v = Validator(schema) if v.validate(_input) is False: raise ValueError(v.errors) _formula = re.findall(r'([A-Z][a-z]*)(\d*)', layer) _elements = [] for _element in _formula: _single_element = list(_element)[0] _elements.append(_single_element) # raise error if input is contains more than one element for single layer. if len(_elements) > 1: raise ValueError("Please enter single element as layer in string. Example: 'Gd' or 'U'") if density_gcm3 is not None: self.info[layer] = {'layer': layer, 'thickness': {'value': thickness_mm, 'units': 'mm', }, 'density': {'value': density_gcm3, 'units': 'g/cm3', }, 'molar_mass': {'value': None, 'units': None, }, 'molar_conc': {'value': None, 'units': None, }, } else: self.info[layer] = {'layer': layer, 'thickness': {'value': thickness_mm, 'units': 'mm', }, 'density': {'value': np.NaN, 'units': 'g/cm3', }, 'molar_mass': {'value': None, 'units': None, }, 'molar_conc': {'value': None, 'units': None, }, } def pprint(self): pprint.pprint(self.info) def find_peak(y, x=None, x_name='x_num', y_name='y', thres=0.015, min_dist=1, imprv_reso=False): if x is None: x = np.array(range(len(y))) # Note: weirdly, indexes have to be reset here to get correct peak locations x = np.array(x) y = np.array(y) _index = pku.indexes(y=y, thres=thres, min_dist=min_dist) if len(_index) != 0: _peak_y = list(y[_index]) if imprv_reso is False: _peak_x = list(x[_index]) else: _peak_x = list(pku.interpolate(x, y, ind=_index)) else: # No peaks detected _peak_y = [] _peak_x = [] peak_df = pd.DataFrame() peak_df[x_name] = _peak_x peak_df[y_name] = _peak_y peak_df.sort_values([x_name], inplace=True) peak_df.reset_index(inplace=True, drop=True) return peak_df def index_peak(peak_dict, peak_map_dict, rel_tol): num_peak_indexed = 0 _peak_map = peak_map_dict['peak_map'] _peak_df = peak_dict['df'] _names = _peak_map.keys() peak_map_indexed = {} for _peak_name in _names: _df = pd.DataFrame() _df_ideal = pd.DataFrame() peak_map_indexed[_peak_name] = {} _peak_x = _peak_map[_peak_name]['ideal']['x'] _peak_y = _peak_map[_peak_name]['ideal']['y'] _x_num_indexed_list = [] _x_indexed_list = [] _y_indexed_list = [] _x_ideal_list = [] _y_ideal_list = [] for _i in range(len(_peak_df['x'])): for _j in range(len(_peak_x)): if peak_map_dict['y_type'] == 'attenuation': if isclose(_peak_x[_j], _peak_df['x'][_i], rel_tol=rel_tol) and _peak_y[_j] >= _peak_df['y'][_i]: _x_num_indexed_list.append(_peak_df['x_num'][_i]) _x_indexed_list.append(_peak_df['x'][_i]) _y_indexed_list.append(_peak_df['y'][_i]) _x_ideal_list.append(_peak_x[_j]) _y_ideal_list.append(_peak_y[_j]) else: if isclose(_peak_x[_j], _peak_df['x'][_i], rel_tol=rel_tol) and _peak_y[_j] <= _peak_df['y'][_i]: _x_num_indexed_list.append(_peak_df['x_num'][_i]) _x_indexed_list.append(_peak_df['x'][_i]) _y_indexed_list.append(_peak_df['y'][_i]) _x_ideal_list.append(_peak_x[_j]) _y_ideal_list.append(_peak_y[_j]) num_peak_indexed += len(_x_indexed_list) _df['x_num'] = _x_num_indexed_list _df['x'] = _x_indexed_list _df['y'] = _y_indexed_list _df_ideal['x'] = _x_ideal_list _df_ideal['y'] = _y_ideal_list peak_map_indexed[_peak_name]['exp'] = _df peak_map_indexed[_peak_name]['ideal'] = _df_ideal peak_map_indexed_dict = { 'peak_map_indexed': peak_map_indexed, 'x_type': peak_map_dict['x_type'], 'y_type': peak_map_dict['y_type'], } return peak_map_indexed_dict class ResoPeak(object): def __init__(self, y, x, y_type, x_type, img_num): """ Initialization """ self.peak_dict = None self.peak_map_indexed_dict = None self.y_type = y_type self.x_type = x_type self.shape_report = None self.prefix_list = None self.x = x self.y = y self.img_num = img_num def find_peak(self, thres, min_dist, imprv_reso: bool): _peak_dict = self._find_peak(y=self.y, x=self.x, thres=thres, min_dist=min_dist, imprv_reso=imprv_reso) _peak_dict['x_type'] = self.x_type _peak_dict['df']['y'] = convert_attenuation_to(y_type=self.y_type, y=_peak_dict['df']['y']) _peak_dict['y_type'] = self.y_type self.peak_dict = _peak_dict return _peak_dict def _find_peak(self, y: np.array, thres, min_dist, imprv_reso: bool, x=None): """""" if x is None: x = np.array(range(len(y))) else: x = np.array(x) if x.shape != y.shape: raise ValueError("The length ({}) of 'x' is not equal the length ({}) of 'y'".format(len(x), len(y))) peak_index = pku.indexes(y=y, thres=thres, min_dist=min_dist) if len(peak_index) != 0: _peak_x_num = self.img_num[peak_index] _peak_y = y[peak_index] if imprv_reso: _peak_x = pku.interpolate(x, y, ind=peak_index) else: _peak_x = x[peak_index] else: # No peaks detected _peak_x_num = [] _peak_x = [] _peak_y = [] peak_df = pd.DataFrame() peak_df['x_num'] = _peak_x_num peak_df['x'] = _peak_x peak_df['y'] = _peak_y peak_dict = { 'df': peak_df } self.peak_dict = peak_dict return peak_dict def index_peak(self, peak_map_dict, rel_tol): if self.peak_dict is None: raise ValueError("Please identify peak use 'Peak.find()' before indexing.") self.peak_map_indexed_dict = index_peak(peak_dict=self.peak_dict, peak_map_dict=peak_map_dict, rel_tol=rel_tol) def analyze(self, report=False, fit_model='Lorentzian'): check_if_in_list(fit_model, peak_model_list) # print(self.img_num) _peak_map_indexed_dict = self.peak_map_indexed_dict _peak_map_indexed = _peak_map_indexed_dict['peak_map_indexed'] _y = self.y _x = self.img_num model = lmfit.models.GaussianModel(prefix='bkg_') pars = model.guess(_y, x=_x) self.prefix_list = [] for _ele in _peak_map_indexed.keys(): if '-' not in _ele: for _ind in range(len(_peak_map_indexed[_ele]['exp'])): _prefix = _ele + '_' + str(_ind) + '_' if fit_model == 'Gaussian': _model = lmfit.models.GaussianModel(prefix=_prefix) else: # fit_model == 'Lorentzian': _model = lmfit.models.LorentzianModel(prefix=_prefix) _center = _peak_map_indexed[_ele]['exp']['x_num'][_ind] pars.update(_model.make_params()) pars[_prefix + 'amplitude'].value = 1 pars[_prefix + 'center'].set(_center, min=_center - 100, max=_center + 100) # pars[_prefix + 'center'].set(_center) pars[_prefix + 'sigma'].set(2.0, min=0.5, max=20) # pars[_prefix + 'sigma'].set(2.0) model += _model self.prefix_list.append(_prefix) _out = model.fit(_y, pars, x=_x) self.shape_report = _out self.__fwhm() self.__fill_img_num_to_peak_map_indexed() print("+------------ Peak analysis ------------+\n{} peak fitting:".format(fit_model)) print("{}\n".format(self.fwhm_df)) if report is True: print(_out.fit_report()) def plot_fit(self): if self.shape_report is not None: self.shape_report.plot() plt.show() else: print("Peaks have not been fitted. Please run 'Peak.analyze()' before plotting.") def __fwhm(self): _fwhm_df = pd.DataFrame() # generate ele list for _fwhm_df _ele_list = [_ele_name.split('_')[0] for _ele_name in self.prefix_list] _prefix_list = self.prefix_list _values = self.shape_report.__dict__['params'].valuesdict() pars_center_name = [_i + 'center' for _i in _prefix_list] pars_fwhm_name = [_i + 'fwhm' for _i in _prefix_list] pars_center_value = [_values[_name] for _name in pars_center_name] pars_fwhm_value = [_values[_name] for _name in pars_fwhm_name] _fwhm_df['ele_name'] = _ele_list _fwhm_df['center_val'] = pars_center_value _fwhm_df['fwhm_val'] = pars_fwhm_value _fwhm_df.sort_values(['center_val'], inplace=True) _fwhm_df.reset_index(inplace=True, drop=True) self.fwhm_df = _fwhm_df def __fill_img_num_to_peak_map_indexed(self): _peak_map_indexed = self.peak_map_indexed_dict['peak_map_indexed'] _fwhm_df = self.fwhm_df _df = pd.DataFrame() _df['x_num'] = self.img_num _df['x'] = self.x _df['y'] = self.y _df.set_index('x_num', inplace=True) for _ele in _peak_map_indexed.keys(): _peak_map_indexed[_ele]['peak_span'] = {} _img_num_list = [] _peak_span_df = pd.DataFrame() for _ind in range(len(_fwhm_df)): if _fwhm_df['ele_name'][_ind] == _ele: half_fwhm = _fwhm_df['fwhm_val'][_ind] / 2 _min = _fwhm_df['center_val'][_ind] - half_fwhm # _min = _fwhm_df['center_val'][_ind] - half_fwhm + self.x_num_gap _max = _fwhm_df['center_val'][_ind] + half_fwhm # _max = _fwhm_df['center_val'][_ind] + half_fwhm + self.x_num_gap _min = int(np.floor(_min)) _max = int(np.ceil(_max)) + 1 _img_num_list += [a for a in range(_min, _max)] _peak_span_df['x_num'] = _img_num_list _peak_span_df['x'] = list(_df['x'].reindex(_img_num_list)) _peak_span_df['y'] = list(_df['y'].reindex(_img_num_list)) _peak_span_df['y'] = convert_attenuation_to(y_type=self.y_type, y=_peak_span_df['y']) _peak_map_indexed[_ele]['peak_span'] = _peak_span_df self.peak_map_indexed_dict['peak_map_indexed'] = _peak_map_indexed # def a_new_decorator(a_func): # @wraps(a_func) # def wrapTheFunction(): # print("I am doing some boring work before executing a_func()") # a_func() # print("I am doing some boring work after executing a_func()") # # return wrapTheFunction # # # @a_new_decorator # def a_function_requiring_decoration(): # """Hey yo! Decorate me!""" # print("I am the function which needs some decoration to " # "remove my foul smell") # # # class Plot(object): # def __init__(self, logfile='out.log'): # self.logfile = logfile # # def __call__(self, func): # log_string = func.__name__ + " was called" # print(log_string) # # Open the logfile and append # with open(self.logfile, 'a') as opened_file: # # Now we log to the specified logfile # opened_file.write(log_string + '\n') # # Now, send a notification # self.notify() # # def notify(self): # # logit only logs, no more # pass # # # class Export(object): # def __init__(self, logfile='out.log'): # self.logfile = logfile # # def __call__(self, func): # log_string = func.__name__ + " was called" # print(log_string) # # Open the logfile and append # with open(self.logfile, 'a') as opened_file: # # Now we log to the specified logfile # opened_file.write(log_string + '\n') # # Now, send a notification # self.notify() # # def notify(self): # # logit only logs, no more # pass # # # class Logit(object): # def __init__(self, logfile='out.log'): # self.logfile = logfile # # def __call__(self, func): # log_string = func.__name__ + " was called" # print(log_string) # # Open the logfile and append # with open(self.logfile, 'a') as opened_file: # # Now we log to the specified logfile # opened_file.write(log_string + '\n') # # Now, send a notification # self.notify() # # def notify(self): # # logit only logs, no more # pass
ornlneutronimaging/ResoFit
ResoFit/_utilities.py
Python
bsd-3-clause
25,822
import numpy as np from explauto.utils import rand_bounds, bounds_min_max, softmax_choice, prop_choice from explauto.utils.config import make_configuration from learning_module import LearningModule class Supervisor(object): def __init__(self, config, model_babbling="random", n_motor_babbling=0, explo_noise=0.1, choice_eps=0.2, proba_imitate=0.5, tau=1.): self.config = config self.model_babbling = model_babbling self.n_motor_babbling = n_motor_babbling self.explo_noise = explo_noise self.choice_eps = choice_eps self.proba_imitate = proba_imitate self.conf = make_configuration(**config) self.t = 0 self.modules = {} self.chosen_modules = [] self.goals = [] self.cp_evolution = {} self.pp_evolution = {} self.mid_control = None self.last_cmd = None # Define motor and sensory spaces: m_ndims = self.conf.m_ndims # number of motor parameters self.arm_n_dims = 21 self.diva_n_dims = 28 self.m_arm = range(self.arm_n_dims) self.m_diva = range(self.arm_n_dims,self.arm_n_dims + self.diva_n_dims) self.m_space = range(m_ndims) self.c_dims = range(m_ndims, m_ndims+10) self.s_hand = range(m_ndims+10, m_ndims+20) self.s_tool = range(m_ndims+20, m_ndims+30) self.s_toy1 = range(m_ndims+30, m_ndims+40) self.s_toy2 = range(m_ndims+40, m_ndims+50) self.s_toy3 = range(m_ndims+50, m_ndims+60) self.s_self_sound = range(m_ndims+60, m_ndims+70) self.s_caregiver_sound = range(m_ndims+70, m_ndims+80) #self.s_caregiver = range(m_ndims+70, m_ndims+80) self.s_spaces = dict(s_hand=self.s_hand, s_tool=self.s_tool, s_toy1=self.s_toy1, s_toy2=self.s_toy2, s_toy3=self.s_toy3, s_self_sound=self.s_self_sound, s_caregiver_sound=self.s_caregiver_sound) # Create the learning modules: self.modules['mod1'] = LearningModule("mod1", self.m_arm, self.s_hand, self.conf, explo_noise=self.explo_noise) self.modules['mod2'] = LearningModule("mod2", self.m_arm, self.s_tool, self.conf, explo_noise=self.explo_noise) self.modules['mod3'] = LearningModule("mod3", self.m_arm, self.c_dims[2:4] + self.s_toy1, self.conf, context_mode=dict(mode='mcs', context_dims=[2, 3], context_n_dims=2, context_sensory_bounds=[[-1.]*2,[1.]*2]), explo_noise=self.explo_noise) self.modules['mod4'] = LearningModule("mod4", self.m_arm, self.c_dims[4:6] + self.s_toy2, self.conf, context_mode=dict(mode='mcs', context_dims=[4, 5], context_n_dims=2, context_sensory_bounds=[[-1.]*2,[1.]*2]), explo_noise=self.explo_noise) self.modules['mod5'] = LearningModule("mod5", self.m_arm, self.c_dims[6:8] + self.s_toy3, self.conf, context_mode=dict(mode='mcs', context_dims=[6, 7], context_n_dims=2, context_sensory_bounds=[[-1.]*2,[1.]*2]), explo_noise=self.explo_noise) self.modules['mod6'] = LearningModule("mod6", self.m_arm, self.c_dims[2:8] + self.s_caregiver_sound, self.conf, context_mode=dict(mode='mcs', context_dims=[2, 3, 4, 5, 6, 7], context_n_dims=6, context_sensory_bounds=[[-1.]*6,[1.]*6]), explo_noise=self.explo_noise) self.modules['mod10'] = LearningModule("mod10", self.m_diva, self.c_dims[2:4] + self.s_toy1, self.conf, context_mode=dict(mode='mcs', context_dims=[2, 3], context_n_dims=2, context_sensory_bounds=[[-1.]*2,[1.]*2]), explo_noise=self.explo_noise) self.modules['mod11'] = LearningModule("mod11", self.m_diva, self.c_dims[4:6] + self.s_toy2, self.conf, context_mode=dict(mode='mcs', context_dims=[4, 5], context_n_dims=2, context_sensory_bounds=[[-1.]*2,[1.]*2]), explo_noise=self.explo_noise) self.modules['mod12'] = LearningModule("mod12", self.m_diva, self.c_dims[6:8] + self.s_toy3, self.conf, context_mode=dict(mode='mcs', context_dims=[6, 7], context_n_dims=2, context_sensory_bounds=[[-1.]*2,[1.]*2]), explo_noise=self.explo_noise) self.modules['mod13'] = LearningModule("mod13", self.m_diva, self.s_self_sound, self.conf, explo_noise=self.explo_noise) self.modules['mod14'] = LearningModule("mod14", self.m_diva, self.s_caregiver_sound, self.conf, imitate=["mod6", "mod14"], explo_noise=self.explo_noise) for mid in self.modules.keys(): self.cp_evolution[mid] = [] self.pp_evolution[mid] = [] self.count_arm = 0 self.count_diva = 0 self.mids = ["mod"+ str(i) for i in range(1, 15) if "mod"+ str(i) in self.modules.keys()] def mid2motor_space(self, mid): if mid in ["mod"+ str(i) for i in range(1, 8)]: return "arm" else: return "diva" def save(self): sm_data = {} im_data = {} for mid in self.modules.keys(): sm_data[mid] = self.modules[mid].sensorimotor_model.save() im_data[mid] = self.modules[mid].interest_model.save() return { #"sm_data":sm_data, #"im_data":im_data, #"goals":self.goals, "chosen_modules":np.array(self.chosen_modules, dtype=np.int8), #"cp_evolution":self.cp_evolution, #"pp_evolution":self.pp_evolution, "imitated_sounds":self.modules['mod14'].imitated_sounds} def choose_babbling_module(self): interests = {} for mid in self.modules.keys(): interests[mid] = self.modules[mid].interest() if self.model_babbling == 'random': mid = np.random.choice(interests.keys()) elif self.model_babbling == 'hand_object_sound': if np.random.random() < 1. / 3.: mid = 'mod1' elif np.random.random() < 1. / 2.: mid = np.random.choice(['mod2', 'mod3', 'mod4', 'mod5', 'mod10', 'mod11', 'mod12']) else: mid = np.random.choice(['mod6', 'mod13', 'mod14']) elif self.model_babbling == 'object_sound': if np.random.random() < 1. / 2.: mid = np.random.choice(['mod1', 'mod2', 'mod3', 'mod4', 'mod5', 'mod10', 'mod11', 'mod12']) else: mid = np.random.choice(['mod6', 'mod13', 'mod14']) elif self.model_babbling == 'greedy': if np.random.random() < self.choice_eps: mid = np.random.choice(interests.keys()) else: mid = max(interests, key=interests.get) elif self.model_babbling == 'softmax': temperature = self.choice_eps w = interests.values() mid = self.modules.keys()[softmax_choice(w, temperature)] elif self.model_babbling == 'prop': w = interests.values() mid = self.modules.keys()[prop_choice(w, eps=self.choice_eps)] self.chosen_modules.append(int(mid[3:])) return mid def eval_mode(self): self.sm_modes = {} for mod in self.modules.values(): self.sm_modes[mod.mid] = mod.sensorimotor_model.mode mod.sensorimotor_model.mode = 'exploit' def learning_mode(self): for mod in self.modules.values(): mod.sensorimotor_model.mode = self.sm_modes[mod.mid] def motor_primitive(self, m): return m def sensory_primitive(self, s): return s def get_m(self, ms): return ms[self.conf.m_dims] def get_s(self, ms): return ms[self.conf.s_dims] def motor_babbling(self, arm=False, audio=False): self.m = rand_bounds(self.conf.m_bounds)[0] if not self.modules.has_key("mod13"): arm = True if not self.modules.has_key("mod1"): audio = True if arm: r = 1. self.last_cmd = "arm" elif audio: r = 0. self.last_cmd = "diva" else: r = np.random.random() if r < 0.5: self.m[:self.arm_n_dims] = 0. self.last_cmd = "diva" else: self.m[self.arm_n_dims:] = 0. self.last_cmd = "arm" return self.m def set_ms(self, m, s): return np.array(list(m) + list(s)) def update_sensorimotor_models(self, ms): for mid in self.modules.keys(): if self.last_cmd == self.mid2motor_space(mid): self.modules[mid].update_sm(self.modules[mid].get_m(ms), self.modules[mid].get_s(ms)) def produce(self, context): if self.t < self.n_motor_babbling: self.mid_control = None self.chosen_modules.append(0) self.goals.append(None) return self.motor_babbling() else: mid = self.choose_babbling_module() # print "babbling module:", mid, # if mid == "mod3": # print "violet toy" # elif mid == "mod4": # print "green toy" # elif mid == "mod5": # print "yellow toy" # else: # print self.mid_control = mid mid_c = self.modules[mid].get_c(context) if self.modules[mid].context_mode else None if self.modules[mid].imitate is not None: m = self.modules[mid].produce(context=mid_c, imitate_sm=[self.modules[m].sm.model.imodel.fmodel.dataset for m in self.modules[mid].imitate]) else: m = self.modules[mid].produce(context=mid_c) self.goals.append(np.array(self.modules[mid].x, dtype=np.float16)) if self.mid2motor_space(mid) == "arm": self.last_cmd = "arm" self.m = list(m) + [0.]*self.diva_n_dims self.count_arm += 1 else: self.last_cmd = "diva" self.m = [0.]*self.arm_n_dims + list(m) self.count_diva += 1 return self.m def perceive(self, s): s = self.sensory_primitive(s) ms = self.set_ms(self.m, s) self.update_sensorimotor_models(ms) if self.mid_control is not None: self.modules[self.mid_control].update_im(self.modules[self.mid_control].get_m(ms), self.modules[self.mid_control].get_s(ms)) self.t = self.t + 1 if self.t % 100 == 0: for mid in self.modules.keys(): self.cp_evolution[mid].append(self.modules[mid].interest_model.current_competence_progress) self.pp_evolution[mid].append(self.modules[mid].interest_model.current_prediction_progress) if self.t % 1000 == 0: self.print_stats() def print_stats(self): print "\n----------------\nAgent Statistics\n----------------\n" print "#Iterations:", self.t print for mid in self.mids: print "# Chosen module", mid, ":", self.chosen_modules.count(int(mid[3:])) print for mid in self.mids: print "Competence progress of", mid, ": " if mid in ["mod10", "mod11", "mod12", "mod13", "mod14"] else " : ", self.modules[mid].interest_model.current_competence_progress print for mid in self.mids: print "Prediction progress of", mid, ": " if mid in ["mod10", "mod11", "mod12", "mod13", "mod14"] else " : ", self.modules[mid].interest_model.current_prediction_progress print "#Arm trials", self.count_arm print "#Vocal trials", self.count_diva print
sebastien-forestier/PLAY2017
play/learning/supervisor.py
Python
gpl-3.0
11,933
from django.contrib.contenttypes.models import ContentType from django.core.paginator import Paginator from django.http import Http404 from django.template.response import TemplateResponse from wagtail.core.models import Page def content_type_use(request, content_type_app_name, content_type_model_name): try: content_type = ContentType.objects.get_by_natural_key(content_type_app_name, content_type_model_name) except ContentType.DoesNotExist: raise Http404 page_class = content_type.model_class() # page_class must be a Page type and not some other random model if not issubclass(page_class, Page): raise Http404 pages = page_class.objects.all() paginator = Paginator(pages, per_page=10) pages = paginator.get_page(request.GET.get('p')) return TemplateResponse(request, 'wagtailadmin/pages/content_type_use.html', { 'pages': pages, 'app_name': content_type_app_name, 'content_type': content_type, 'page_class': page_class, })
FlipperPA/wagtail
wagtail/admin/views/pages/usage.py
Python
bsd-3-clause
1,032
#!/usr/bin/env python import sys import gzip import paddle.v2 as paddle ### Parameters word_vector_dim = 620 latent_chain_dim = 1000 beam_size = 5 max_length = 50 def seq2seq_net(source_dict_dim, target_dict_dim, generating=False): ''' Define the network structure of NMT, including encoder and decoder. :param source_dict_dim: size of source dictionary :type source_dict_dim : int :param target_dict_dim: size of target dictionary :type target_dict_dim: int ''' decoder_size = encoder_size = latent_chain_dim #### Encoder src_word_id = paddle.layer.data( name='source_language_word', type=paddle.data_type.integer_value_sequence(source_dict_dim)) src_embedding = paddle.layer.embedding( input=src_word_id, size=word_vector_dim) # use bidirectional_gru encoded_vector = paddle.networks.bidirectional_gru( input=src_embedding, size=encoder_size, fwd_act=paddle.activation.Tanh(), fwd_gate_act=paddle.activation.Sigmoid(), bwd_act=paddle.activation.Tanh(), bwd_gate_act=paddle.activation.Sigmoid(), return_seq=True) #### Decoder encoder_last = paddle.layer.last_seq(input=encoded_vector) encoder_last_projected = paddle.layer.mixed( size=decoder_size, act=paddle.activation.Tanh(), input=paddle.layer.full_matrix_projection(input=encoder_last)) # gru step def gru_decoder_without_attention(enc_vec, current_word): ''' Step function for gru decoder :param enc_vec: encoded vector of source language :type enc_vec: layer object :param current_word: current input of decoder :type current_word: layer object ''' decoder_mem = paddle.layer.memory( name='gru_decoder', size=decoder_size, boot_layer=encoder_last_projected) context = paddle.layer.last_seq(input=enc_vec) decoder_inputs = paddle.layer.mixed( size=decoder_size * 3, input=[ paddle.layer.full_matrix_projection(input=context), paddle.layer.full_matrix_projection(input=current_word) ]) gru_step = paddle.layer.gru_step( name='gru_decoder', act=paddle.activation.Tanh(), gate_act=paddle.activation.Sigmoid(), input=decoder_inputs, output_mem=decoder_mem, size=decoder_size) out = paddle.layer.mixed( size=target_dict_dim, bias_attr=True, act=paddle.activation.Softmax(), input=paddle.layer.full_matrix_projection(input=gru_step)) return out decoder_group_name = "decoder_group" group_input1 = paddle.layer.StaticInput(input=encoded_vector, is_seq=True) group_inputs = [group_input1] if not generating: trg_embedding = paddle.layer.embedding( input=paddle.layer.data( name='target_language_word', type=paddle.data_type.integer_value_sequence(target_dict_dim)), size=word_vector_dim, param_attr=paddle.attr.ParamAttr(name='_target_language_embedding')) group_inputs.append(trg_embedding) decoder = paddle.layer.recurrent_group( name=decoder_group_name, step=gru_decoder_without_attention, input=group_inputs) lbl = paddle.layer.data( name='target_language_next_word', type=paddle.data_type.integer_value_sequence(target_dict_dim)) cost = paddle.layer.classification_cost(input=decoder, label=lbl) return cost else: trg_embedding = paddle.layer.GeneratedInput( size=target_dict_dim, embedding_name='_target_language_embedding', embedding_size=word_vector_dim) group_inputs.append(trg_embedding) beam_gen = paddle.layer.beam_search( name=decoder_group_name, step=gru_decoder_without_attention, input=group_inputs, bos_id=0, eos_id=1, beam_size=beam_size, max_length=max_length) return beam_gen def train(source_dict_dim, target_dict_dim): ''' Training function for NMT :param source_dict_dim: size of source dictionary :type source_dict_dim: int :param target_dict_dim: size of target dictionary :type target_dict_dim: int ''' # initialize model cost = seq2seq_net(source_dict_dim, target_dict_dim) parameters = paddle.parameters.create(cost) # define optimize method and trainer optimizer = paddle.optimizer.RMSProp( learning_rate=1e-3, gradient_clipping_threshold=10.0, regularization=paddle.optimizer.L2Regularization(rate=8e-4)) trainer = paddle.trainer.SGD( cost=cost, parameters=parameters, update_equation=optimizer) # define data reader wmt14_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.wmt14.train(source_dict_dim), buf_size=8192), batch_size=55) # define event_handler callback def event_handler(event): if isinstance(event, paddle.event.EndIteration): if event.batch_id % 100 == 0 and event.batch_id > 0: with gzip.open('models/nmt_without_att_params_batch_%d.tar.gz' % event.batch_id, 'w') as f: parameters.to_tar(f) if event.batch_id % 10 == 0: print "\nPass %d, Batch %d, Cost %f, %s" % ( event.pass_id, event.batch_id, event.cost, event.metrics) else: sys.stdout.write('.') sys.stdout.flush() # start to train trainer.train( reader=wmt14_reader, event_handler=event_handler, num_passes=2) def generate(source_dict_dim, target_dict_dim, init_models_path): ''' Generating function for NMT :param source_dict_dim: size of source dictionary :type source_dict_dim: int :param target_dict_dim: size of target dictionary :type target_dict_dim: int :param init_models_path: path for inital model :type init_models_path: string ''' # load data samples for generation gen_creator = paddle.dataset.wmt14.gen(source_dict_dim) gen_data = [] for item in gen_creator(): gen_data.append((item[0], )) beam_gen = seq2seq_net(source_dict_dim, target_dict_dim, True) with gzip.open(init_models_path) as f: parameters = paddle.parameters.Parameters.from_tar(f) # prob is the prediction probabilities, and id is the prediction word. beam_result = paddle.infer( output_layer=beam_gen, parameters=parameters, input=gen_data, field=['prob', 'id']) # get the dictionary src_dict, trg_dict = paddle.dataset.wmt14.get_dict(source_dict_dim) # the delimited element of generated sequences is -1, # the first element of each generated sequence is the sequence length seq_list, seq = [], [] for w in beam_result[1]: if w != -1: seq.append(w) else: seq_list.append(' '.join([trg_dict.get(w) for w in seq[1:]])) seq = [] prob = beam_result[0] for i in xrange(len(gen_data)): print "\n*******************************************************\n" print "src:", ' '.join([src_dict.get(w) for w in gen_data[i][0]]), "\n" for j in xrange(beam_size): print "prob = %f:" % (prob[i][j]), seq_list[i * beam_size + j] def usage_helper(): print "Please specify training/generating phase!" print "Usage: python nmt_without_attention_v2.py --train/generate" exit(1) def main(): if not (len(sys.argv) == 2): usage_helper() if sys.argv[1] == '--train': generating = False elif sys.argv[1] == '--generate': generating = True else: usage_helper() # initialize paddle paddle.init(use_gpu=False, trainer_count=1) source_language_dict_dim = 30000 target_language_dict_dim = 30000 if generating: # modify this path to speicify a trained model. init_models_path = 'models/nmt_without_att_params_batch_1800.tar.gz' if not os.path.exists(init_models_path): print "trained model cannot be found." exit(1) generate(source_language_dict_dim, target_language_dict_dim, init_models_path) else: if not os.path.exists('./models'): os.system('mkdir ./models') train(source_language_dict_dim, target_language_dict_dim) if __name__ == '__main__': main()
zhaopu7/models
nmt_without_attention/nmt_without_attention.py
Python
apache-2.0
8,713
#!/usr/bin/env python3 import torch from .. import settings def pivoted_cholesky(matrix, max_iter, error_tol=None): from ..lazy import lazify, LazyTensor batch_shape = matrix.shape[:-2] matrix_shape = matrix.shape[-2:] if error_tol is None: error_tol = settings.preconditioner_tolerance.value() # Need to get diagonals. This is easy if it's a LazyTensor, since # LazyTensor.diag() operates in batch mode. matrix = lazify(matrix) matrix_diag = matrix._approx_diag() # Make sure max_iter isn't bigger than the matrix max_iter = min(max_iter, matrix_shape[-1]) # What we're returning L = torch.zeros(*batch_shape, max_iter, matrix_shape[-1], dtype=matrix.dtype, device=matrix.device) orig_error = torch.max(matrix_diag, dim=-1)[0] errors = torch.norm(matrix_diag, 1, dim=-1) / orig_error # The permutation permutation = torch.arange(0, matrix_shape[-1], dtype=torch.long, device=matrix_diag.device) permutation = permutation.repeat(*batch_shape, 1) # Get batch indices batch_iters = [ torch.arange(0, size, dtype=torch.long, device=matrix_diag.device) .unsqueeze_(-1) .repeat(torch.Size(batch_shape[:i]).numel(), torch.Size(batch_shape[i + 1 :]).numel()) .view(-1) for i, size in enumerate(batch_shape) ] m = 0 while (m == 0) or (m < max_iter and torch.max(errors) > error_tol): permuted_diags = torch.gather(matrix_diag, -1, permutation[..., m:]) max_diag_values, max_diag_indices = torch.max(permuted_diags, -1) max_diag_indices = max_diag_indices + m # Swap pi_m and pi_i in each row, where pi_i is the element of the permutation # corresponding to the max diagonal element old_pi_m = permutation[..., m].clone() permutation[..., m].copy_(permutation.gather(-1, max_diag_indices.unsqueeze(-1)).squeeze_(-1)) permutation.scatter_(-1, max_diag_indices.unsqueeze(-1), old_pi_m.unsqueeze(-1)) pi_m = permutation[..., m].contiguous() L_m = L[..., m, :] # Will be all zeros -- should we use torch.zeros? L_m.scatter_(-1, pi_m.unsqueeze(-1), max_diag_values.sqrt().unsqueeze_(-1)) row = matrix[(*batch_iters, pi_m.view(-1), slice(None, None, None))] if isinstance(row, LazyTensor): row = row.evaluate() row = row.view(*batch_shape, matrix_shape[-1]) if m + 1 < matrix_shape[-1]: pi_i = permutation[..., m + 1 :].contiguous() L_m_new = row.gather(-1, pi_i) if m > 0: L_prev = L[..., :m, :].gather(-1, pi_i.unsqueeze(-2).repeat(*(1 for _ in batch_shape), m, 1)) update = L[..., :m, :].gather(-1, pi_m.view(*pi_m.shape, 1, 1).repeat(*(1 for _ in batch_shape), m, 1)) L_m_new -= torch.sum(update * L_prev, dim=-2) L_m_new /= L_m.gather(-1, pi_m.unsqueeze(-1)) L_m.scatter_(-1, pi_i, L_m_new) matrix_diag_current = matrix_diag.gather(-1, pi_i) matrix_diag.scatter_(-1, pi_i, matrix_diag_current - L_m_new ** 2) L[..., m, :] = L_m errors = torch.norm(matrix_diag.gather(-1, pi_i), 1, dim=-1) / orig_error m = m + 1 return L[..., :m, :].transpose(-1, -2).contiguous()
jrg365/gpytorch
gpytorch/utils/pivoted_cholesky.py
Python
mit
3,313
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*- ### BEGIN LICENSE # Copyright (c) 2012, Peter Levi <[email protected]> # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License version 3, as published # by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranties of # MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR # PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see <http://www.gnu.org/licenses/>. ### END LICENSE # This is the preferences dialog. from gi.repository import Gtk, Gdk, GObject, GdkPixbuf # pylint: disable=E0611 import io import stat import threading import subprocess from variety.Util import Util from variety import Texts from variety.plugins.IQuoteSource import IQuoteSource from variety_lib import varietyconfig from variety_lib.varietyconfig import get_data_file from variety.FolderChooser import FolderChooser from variety.Options import Options from variety.AddFlickrDialog import AddFlickrDialog from variety.AddMediaRssDialog import AddMediaRssDialog from variety.AddRedditDialog import AddRedditDialog from variety.EditFavoriteOperationsDialog import EditFavoriteOperationsDialog from variety.SmartFeaturesConfirmationDialog import SmartFeaturesConfirmationDialog from variety.LoginOrRegisterDialog import LoginOrRegisterDialog from variety.AddWallhavenDialog import AddWallhavenDialog from variety import _, _u import os import logging import random random.seed() logger = logging.getLogger('variety') from variety_lib.PreferencesDialog import PreferencesDialog UNREMOVEABLE_TYPES = [ Options.SourceType.FAVORITES, Options.SourceType.FETCHED, Options.SourceType.DESKTOPPR, Options.SourceType.BING, Options.SourceType.UNSPLASH, Options.SourceType.APOD, Options.SourceType.EARTH, Options.SourceType.RECOMMENDED, Options.SourceType.LATEST, ] EDITABLE_TYPES = [ Options.SourceType.FLICKR, Options.SourceType.MEDIA_RSS, Options.SourceType.WALLHAVEN, Options.SourceType.REDDIT, ] class PreferencesVarietyDialog(PreferencesDialog): __gtype_name__ = "PreferencesVarietyDialog" def finish_initializing(self, builder, parent): # pylint: disable=E1002 """Set up the preferences dialog""" super(PreferencesVarietyDialog, self).finish_initializing(builder, parent) # Bind each preference widget to gsettings # widget = self.builder.get_object('example_entry') # settings.bind("example", widget, "text", Gio.SettingsBindFlags.DEFAULT) if Gdk.Screen.get_default().get_height() < 750: self.ui.sources_scrolled_window.set_size_request(0, 0) self.ui.hosts_scrolled_window.set_size_request(0, 0) self.ui.tips_scrolled_window.set_size_request(0, 0) PreferencesVarietyDialog.add_image_preview(self.ui.icon_chooser, 64) self.loading = False self.dl_chooser = FolderChooser(self.ui.download_folder_chooser, self.on_downloaded_changed) self.fav_chooser = FolderChooser(self.ui.favorites_folder_chooser, self.on_favorites_changed) self.fetched_chooser = FolderChooser(self.ui.fetched_folder_chooser, self.on_fetched_changed) self.copyto_chooser = FolderChooser(self.ui.copyto_folder_chooser, self.on_copyto_changed) self.slideshow_custom_chooser = FolderChooser(self.ui.slideshow_custom_chooser, self.delayed_apply) try: from varietyslideshow import varietyslideshow except: self.ui.notebook.remove_page(2) self.reload() def fill_smart_profile_url(self, msg): if '%SMART_PROFILE_URL%' in msg: profile_url = self.parent.smart.get_profile_url() msg = msg.replace('%SMART_PROFILE_URL%', profile_url) if profile_url else "" return msg def update_status_message(self): msg = "" if self.parent.server_options: try: msg_dict = self.parent.server_options.get("status_message", {}) ver = varietyconfig.get_version() if ver in msg_dict: msg = msg_dict[ver].strip() elif "*" in msg_dict: msg = msg_dict["*"].strip() msg = self.fill_smart_profile_url(msg) except Exception: logger.exception(lambda: "Could not parse status message") msg = "" self.set_status_message(msg) def set_status_message(self, msg): def _update_ui(): self.ui.status_message.set_visible(msg) self.ui.status_message.set_markup(msg) GObject.idle_add(_update_ui) def reload(self): try: logger.info(lambda: "Reloading preferences dialog") self.loading = True self.options = Options() self.options.read() self.ui.autostart.set_active(os.path.isfile(os.path.expanduser(u"~/.config/autostart/variety.desktop"))) self.ui.change_enabled.set_active(self.options.change_enabled) self.set_change_interval(self.options.change_interval) self.ui.change_on_start.set_active(self.options.change_on_start) self.ui.safe_mode.set_active(self.options.safe_mode) self.ui.download_enabled.set_active(self.options.download_enabled) self.set_download_interval(self.options.download_interval) self.dl_chooser.set_folder(os.path.expanduser(self.options.download_folder)) self.update_real_download_folder() self.ui.quota_enabled.set_active(self.options.quota_enabled) self.ui.quota_size.set_text(str(self.options.quota_size)) self.fav_chooser.set_folder(os.path.expanduser(self.options.favorites_folder)) self.fetched_chooser.set_folder(os.path.expanduser(self.options.fetched_folder)) self.ui.clipboard_enabled.set_active(self.options.clipboard_enabled) self.ui.clipboard_use_whitelist.set_active(self.options.clipboard_use_whitelist) self.ui.clipboard_hosts.get_buffer().set_text('\n'.join(self.options.clipboard_hosts)) if self.options.icon == "Light": self.ui.icon.set_active(0) elif self.options.icon == "Dark": self.ui.icon.set_active(1) elif self.options.icon == "Current": self.ui.icon.set_active(2) elif self.options.icon == "None": self.ui.icon.set_active(4) else: self.ui.icon.set_active(3) self.ui.icon_chooser.set_filename(self.options.icon) if self.options.favorites_operations == [["/", "Copy"]]: self.ui.favorites_operations.set_active(0) elif self.options.favorites_operations == [["/", "Move"]]: self.ui.favorites_operations.set_active(1) elif self.options.favorites_operations == [["/", "Both"]]: self.ui.favorites_operations.set_active(2) else: self.ui.favorites_operations.set_active(3) self.favorites_operations = self.options.favorites_operations self.ui.smart_enabled.set_active(self.options.smart_enabled) self.ui.sync_enabled.set_active(self.options.sync_enabled) self.ui.stats_enabled.set_active(self.options.stats_enabled) self.ui.facebook_show_dialog.set_active(self.options.facebook_show_dialog) self.ui.copyto_enabled.set_active(self.options.copyto_enabled) self.copyto_chooser.set_folder(self.parent.get_actual_copyto_folder()) self.ui.desired_color_enabled.set_active(self.options.desired_color_enabled) self.ui.desired_color.set_color(Gdk.Color(red = 160 * 256, green = 160 * 256, blue = 160 * 256)) c = self.options.desired_color if c: self.ui.desired_color.set_color(Gdk.Color(red = c[0] * 256, green = c[1] * 256, blue = c[2] * 256)) self.ui.min_size_enabled.set_active(self.options.min_size_enabled) min_sizes = [50, 80, 100] index = 0 while min_sizes[index] < self.options.min_size and index < len(min_sizes) - 1: index += 1 self.ui.min_size.set_active(index) self.ui.landscape_enabled.set_active(self.options.use_landscape_enabled) self.ui.lightness_enabled.set_active(self.options.lightness_enabled) self.ui.lightness.set_active(0 if self.options.lightness_mode == Options.LightnessMode.DARK else 1) self.ui.min_rating_enabled.set_active(self.options.min_rating_enabled) self.ui.min_rating.set_active(self.options.min_rating - 1) self.ui.clock_enabled.set_active(self.options.clock_enabled) self.ui.clock_font.set_font_name(self.options.clock_font) self.ui.clock_date_font.set_font_name(self.options.clock_date_font) self.ui.quotes_enabled.set_active(self.options.quotes_enabled) self.ui.quotes_font.set_font_name(self.options.quotes_font) c = self.options.quotes_text_color self.ui.quotes_text_color.set_color(Gdk.Color(red = c[0] * 256, green = c[1] * 256, blue = c[2] * 256)) c = self.options.quotes_bg_color self.ui.quotes_bg_color.set_color(Gdk.Color(red = c[0] * 256, green = c[1] * 256, blue = c[2] * 256)) self.ui.quotes_bg_opacity.set_value(self.options.quotes_bg_opacity) self.ui.quotes_text_shadow.set_active(self.options.quotes_text_shadow) self.ui.quotes_tags.set_text(self.options.quotes_tags) self.ui.quotes_authors.set_text(self.options.quotes_authors) self.ui.quotes_change_enabled.set_active(self.options.quotes_change_enabled) self.set_quotes_change_interval(self.options.quotes_change_interval) self.ui.quotes_width.set_value(self.options.quotes_width) self.ui.quotes_hpos.set_value(self.options.quotes_hpos) self.ui.quotes_vpos.set_value(self.options.quotes_vpos) self.ui.slideshow_sources_enabled.set_active(self.options.slideshow_sources_enabled) self.ui.slideshow_favorites_enabled.set_active(self.options.slideshow_favorites_enabled) self.ui.slideshow_downloads_enabled.set_active(self.options.slideshow_downloads_enabled) self.ui.slideshow_custom_enabled.set_active(self.options.slideshow_custom_enabled) self.slideshow_custom_chooser.set_folder(os.path.expanduser(self.options.slideshow_custom_folder)) if self.options.slideshow_sort_order == "Random": self.ui.slideshow_sort_order.set_active(0) elif self.options.slideshow_sort_order == "Name, asc": self.ui.slideshow_sort_order.set_active(1) elif self.options.slideshow_sort_order == "Name, desc": self.ui.slideshow_sort_order.set_active(2) elif self.options.slideshow_sort_order == "Date, asc": self.ui.slideshow_sort_order.set_active(3) elif self.options.slideshow_sort_order == "Date, desc": self.ui.slideshow_sort_order.set_active(4) else: self.ui.slideshow_sort_order.set_active(0) self.ui.slideshow_monitor.remove_all() self.ui.slideshow_monitor.append_text(_('All')) screen = Gdk.Screen.get_default() for i in range(0, screen.get_n_monitors()): geo = screen.get_monitor_geometry(i) self.ui.slideshow_monitor.append_text('%d - %s, %dx%d' % (i + 1, screen.get_monitor_plug_name(i), geo.width, geo.height)) self.ui.slideshow_monitor.set_active(0) try: self.ui.slideshow_monitor.set_active(int(self.options.slideshow_monitor)) except: self.ui.slideshow_monitor.set_active(0) if self.options.slideshow_mode == "Fullscreen": self.ui.slideshow_mode.set_active(0) elif self.options.slideshow_mode == "Desktop": self.ui.slideshow_mode.set_active(1) elif self.options.slideshow_mode == "Maximized": self.ui.slideshow_mode.set_active(2) elif self.options.slideshow_mode == "Window": self.ui.slideshow_mode.set_active(3) else: self.ui.slideshow_mode.set_active(0) self.ui.slideshow_seconds.set_value(self.options.slideshow_seconds) self.ui.slideshow_fade.set_value(self.options.slideshow_fade) self.ui.slideshow_zoom.set_value(self.options.slideshow_zoom) self.ui.slideshow_pan.set_value(self.options.slideshow_pan) self.ui.sources.get_model().clear() for s in self.options.sources: self.ui.sources.get_model().append(self.source_to_model_row(s)) if not hasattr(self, "enabled_toggled_handler_id"): self.enabled_toggled_handler_id = self.ui.sources_enabled_checkbox_renderer.connect( "toggled", self.source_enabled_toggled, self.ui.sources.get_model()) #self.ui.sources.get_selection().connect("changed", self.on_sources_selection_changed) if hasattr(self, "filter_checkboxes"): for cb in self.filter_checkboxes: self.ui.filters_grid.remove(cb) cb.destroy() self.filter_checkboxes = [] self.filter_name_to_checkbox = {} for i, f in enumerate(self.options.filters): cb = Gtk.CheckButton(Texts.FILTERS.get(f[1], f[1])) self.filter_name_to_checkbox[f[1]] = cb cb.connect("toggled", self.delayed_apply) cb.set_visible(True) cb.set_active(f[0]) cb.set_margin_right(20) self.ui.filters_grid.attach(cb, i % 4, i // 4, 1, 1) self.filter_checkboxes.append(cb) if hasattr(self, "quotes_sources_checkboxes"): for cb in self.quotes_sources_checkboxes: self.ui.quotes_sources_grid.remove(cb) cb.destroy() self.quotes_sources_checkboxes = [] for i, p in enumerate(self.parent.jumble.get_plugins(IQuoteSource)): cb = Gtk.CheckButton(p['info']['name']) cb.connect("toggled", self.delayed_apply) cb.set_visible(True) cb.set_tooltip_text(p['info']['description']) cb.set_active(p['info']['name'] not in self.options.quotes_disabled_sources) cb.set_margin_right(20) self.ui.quotes_sources_grid.attach(cb, i % 4, i // 4, 1, 1) self.quotes_sources_checkboxes.append(cb) self.ui.tips_buffer.set_text('\n\n'.join(Texts.TIPS)) try: with io.open(get_data_file("ui/changes.txt")) as f: self.ui.changes_buffer.set_text(f.read()) except Exception: logger.warning(lambda: "Missing ui/changes.txt file") self.on_smart_user_updated() self.on_change_enabled_toggled() self.on_download_enabled_toggled() self.on_sources_selection_changed() self.on_desired_color_enabled_toggled() self.on_min_size_enabled_toggled() self.on_lightness_enabled_toggled() self.on_min_rating_enabled_toggled() self.on_copyto_enabled_toggled() self.on_quotes_change_enabled_toggled() self.on_icon_changed() self.on_favorites_operations_changed() self.update_clipboard_state() self.build_add_button_menu() self.update_status_message() finally: # To be sure we are completely loaded, pass via two hops: first delay, then idle_add: def _finish_loading(): self.loading = False def _idle_finish_loading(): GObject.idle_add(_finish_loading) timer = threading.Timer(1, _idle_finish_loading) timer.start() def on_add_button_clicked(self, widget=None): def position(*args, **kwargs): button_alloc = self.ui.add_button.get_allocation() window_pos = self.ui.add_button.get_window().get_position() return button_alloc.x + window_pos[0], button_alloc.y + button_alloc.height + window_pos[1], True self.add_menu.popup(None, self.ui.add_button, position, None, 0, Gtk.get_current_event_time()) def on_remove_sources_clicked(self, widget=None): def position(*args, **kwargs): button_alloc = self.ui.remove_sources.get_allocation() window_pos = self.ui.remove_sources.get_window().get_position() return button_alloc.x + window_pos[0], button_alloc.y + button_alloc.height + window_pos[1], True self.build_remove_button_menu().popup(None, self.ui.remove_sources, position, None, 0, Gtk.get_current_event_time()) def build_add_button_menu(self): self.add_menu = Gtk.Menu() items = [ (_("Images"), self.on_add_images_clicked), (_("Folders"), self.on_add_folders_clicked), '-', (_("Flickr"), self.on_add_flickr_clicked), (_("Wallhaven.cc"), self.on_add_wallhaven_clicked), (_("Reddit"), self.on_add_reddit_clicked), (_("Media RSS"), self.on_add_mediarss_clicked), ] for x in items: if x == '-': item = Gtk.SeparatorMenuItem.new() else: item = Gtk.MenuItem() item.set_label(x[0]) item.connect("activate", x[1]) self.add_menu.append(item) self.add_menu.show_all() def build_remove_button_menu(self): model, rows = self.ui.sources.get_selection().get_selected_rows() has_downloaders = False for row in rows: type = Options.str_to_type(model[row][1]) if type in Options.SourceType.dl_types and type not in UNREMOVEABLE_TYPES: has_downloaders = True self.remove_menu = Gtk.Menu() item1 = Gtk.MenuItem() item1.set_label(_("Remove the source, keep the files") if len(rows) == 1 else _("Remove the sources, keep the files")) item1.connect("activate", self.remove_sources) self.remove_menu.append(item1) item2 = Gtk.MenuItem() def _remove_with_files(widget=None): self.remove_sources(delete_files=True) item2.set_label(_("Remove the source and delete the downloaded files") if len(rows) == 1 else _("Remove the sources and delete the downloaded files")) item2.connect("activate", _remove_with_files) item2.set_sensitive(has_downloaders) self.remove_menu.append(item2) self.remove_menu.show_all() return self.remove_menu def source_enabled_toggled(self, widget, path, model): row = model[path] row[0] = not row[0] self.on_row_enabled_state_changed(row) def on_row_enabled_state_changed(self, row): # Special case when enabling the Earth downloader: if row[0] and row[1] == Options.type_to_str(Options.SourceType.EARTH): updated = False if not self.ui.change_enabled.get_active(): self.ui.change_enabled.set_active(True) updated = True if self.get_change_interval() > 30 * 60: self.set_change_interval(30 * 60) updated = True if not self.ui.download_enabled.get_active(): self.ui.download_enabled.set_active(True) updated = True if self.get_download_interval() > 30 * 60: self.set_download_interval(30 * 60) updated = True if updated: self.parent.show_notification( _("World Sunlight Map enabled"), _("Using the World Sunlight Map requires both downloading and changing " "enabled at intervals of 30 minutes or less. Settings were adjusted automatically.")) # special case when enabling the Recommended or Latest downloader: elif row[0] and row[1] in (Options.type_to_str(Options.SourceType.RECOMMENDED),) and \ not self.parent.options.smart_enabled: row[0] = False self.dialog = SmartFeaturesConfirmationDialog() def _on_ok(button): self.parent.options.smart_enabled = self.dialog.ui.smart_enabled.get_active() self.parent.options.write() self.ui.smart_enabled.set_active(self.parent.options.smart_enabled) if self.parent.options.smart_enabled: row[0] = True self.dialog.ui.btn_ok.connect("clicked", _on_ok) self.dialog.run() self.dialog.destroy() self.dialog = None def set_time(self, interval, text, time_unit, times=(1, 60, 60 * 60, 24 * 60 * 60)): if interval < 5: interval = 5 x = len(times) - 1 while times[x] > interval: x -= 1 text.set_text(str(interval // times[x])) time_unit.set_active(x) return def set_change_interval(self, seconds): self.set_time(seconds, self.ui.change_interval_text, self.ui.change_interval_time_unit) def set_download_interval(self, seconds): self.set_time(seconds, self.ui.download_interval_text, self.ui.download_interval_time_unit, times=(60, 60 * 60, 24 * 60 * 60)) def set_quotes_change_interval(self, seconds): self.set_time(seconds, self.ui.quotes_change_interval_text, self.ui.quotes_change_interval_time_unit) def read_time(self, text_entry, time_unit_combo, minimum, default): result = default try: interval = int(text_entry.get_text()) tree_iter = time_unit_combo.get_active_iter() if tree_iter: model = time_unit_combo.get_model() time_unit_seconds = model[tree_iter][1] result = interval * time_unit_seconds if result < minimum: result = minimum except Exception: logger.exception(lambda: "Could not understand interval") return result def get_change_interval(self): return self.read_time( self.ui.change_interval_text, self.ui.change_interval_time_unit, 5, self.options.change_interval) def get_download_interval(self): return self.read_time( self.ui.download_interval_text, self.ui.download_interval_time_unit, 60, self.options.download_interval) def get_quotes_change_interval(self): return self.read_time( self.ui.quotes_change_interval_text, self.ui.quotes_change_interval_time_unit, 10, self.options.quotes_change_interval) @staticmethod def add_image_preview(chooser, size = 250): preview = Gtk.Image() chooser.set_preview_widget(preview) def update_preview(c): try: file = chooser.get_preview_filename() pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(file, size, size) preview.set_from_pixbuf(pixbuf) chooser.set_preview_widget_active(True) except Exception: chooser.set_preview_widget_active(False) chooser.connect("update-preview", update_preview) def on_add_images_clicked(self, widget=None): chooser = Gtk.FileChooserDialog(_("Add Images"), parent=self, action=Gtk.FileChooserAction.OPEN, buttons=[_("Cancel"), Gtk.ResponseType.CANCEL, _("Add"), Gtk.ResponseType.OK]) self.dialog = chooser PreferencesVarietyDialog.add_image_preview(chooser) chooser.set_select_multiple(True) chooser.set_local_only(True) filter = Gtk.FileFilter() filter.set_name(_("Images")) for s in ["jpg", "jpeg", "png", "bmp", "tiff", "svg"]: filter.add_pattern("*." + s) filter.add_pattern("*." + s.upper()) chooser.add_filter(filter) response = chooser.run() if response == Gtk.ResponseType.OK: images = list(chooser.get_filenames()) images = [f for f in images if Util.is_image(f) and os.path.isfile(f)] self.add_sources(Options.SourceType.IMAGE, images) self.dialog = None chooser.destroy() def on_add_folders_clicked(self, widget=None): chooser = Gtk.FileChooserDialog(_("Add Folders - Only add the root folders, subfolders are searched recursively"), parent=self, action=Gtk.FileChooserAction.SELECT_FOLDER, buttons=[_("Cancel"), Gtk.ResponseType.CANCEL, _("Add"), Gtk.ResponseType.OK]) self.dialog = chooser chooser.set_select_multiple(True) chooser.set_local_only(True) response = chooser.run() if response == Gtk.ResponseType.OK: folders = list(chooser.get_filenames()) folders = [f for f in folders if os.path.isdir(f)] self.add_sources(Options.SourceType.FOLDER, folders) self.dialog = None chooser.destroy() def add_sources(self, type, locations): self.ui.sources.get_selection().unselect_all() existing = {} for i, r in enumerate(self.ui.sources.get_model()): if r[1] == Options.type_to_str(type): if type == Options.SourceType.FOLDER: existing[os.path.normpath(_u(r[2]))] = r, i else: existing[self.model_row_to_source(r)[2]] = r, i newly_added = 0 for f in locations: if type == Options.SourceType.FOLDER or type == Options.SourceType.IMAGE: f = os.path.normpath(f) elif type in UNREMOVEABLE_TYPES: f = list(existing.keys())[0] if existing else None # reuse the already existing location, do not add another one if not f in existing: self.ui.sources.get_model().append(self.source_to_model_row([True, type, f])) self.ui.sources.get_selection().select_path(len(self.ui.sources.get_model()) - 1) self.ui.sources.scroll_to_cell(len(self.ui.sources.get_model()) - 1, None, False, 0, 0) newly_added += 1 else: logger.info(lambda: "Source already exists, activating it: " + f) existing[f][0][0] = True self.ui.sources.get_selection().select_path(existing[f][1]) self.ui.sources.scroll_to_cell(existing[f][1], None, False, 0, 0) return newly_added def focus_source_and_image(self, source, image): self.ui.notebook.set_current_page(0) self.ui.sources.get_selection().unselect_all() for i, r in enumerate(self.ui.sources.get_model()): if self.model_row_to_source(r)[1:] == source[1:]: self.focused_image = image self.ui.sources.get_selection().select_path(i) self.ui.sources.scroll_to_cell(i, None, False, 0, 0) return def remove_sources(self, widget=None, delete_files = False): model, rows = self.ui.sources.get_selection().get_selected_rows() if delete_files: for row in rows: type = Options.str_to_type(model[row][1]) if type in Options.SourceType.dl_types and type not in UNREMOVEABLE_TYPES: source = self.model_row_to_source(model[row]) self.parent.delete_files_of_source(source) # store the treeiters from paths iters = [] for row in rows: if Options.str_to_type(model[row][1]) not in UNREMOVEABLE_TYPES: iters.append(model.get_iter(row)) # remove the rows (treeiters) for i in iters: if i is not None: model.remove(i) def on_source_doubleclicked(self, tree_view, row_index, arg4=None): self.edit_source(self.ui.sources.get_model()[row_index]) def on_edit_source_clicked(self, widget=None): model, rows = self.ui.sources.get_selection().get_selected_rows() if len(rows) == 1: self.edit_source(model[rows[0]]) def on_use_clicked(self, widget=None): model, rows = self.ui.sources.get_selection().get_selected_rows() for row in model: row[0] = False for path in rows: model[path][0] = True for row in model: #TODO we trigger for all rows, though some of them don't actually change state - but no problem for now self.on_row_enabled_state_changed(row) self.on_sources_selection_changed() def edit_source(self, edited_row): type = Options.str_to_type(edited_row[1]) if type == Options.SourceType.IMAGE or type == Options.SourceType.FOLDER: subprocess.call(["xdg-open", os.path.realpath(_u(edited_row[2]))]) elif type == Options.SourceType.FAVORITES: subprocess.call(["xdg-open", self.parent.options.favorites_folder]) elif type == Options.SourceType.FETCHED: subprocess.call(["xdg-open", self.parent.options.fetched_folder]) elif type in EDITABLE_TYPES: if type == Options.SourceType.FLICKR: self.dialog = AddFlickrDialog() elif type == Options.SourceType.WALLHAVEN: self.dialog = AddWallhavenDialog() elif type == Options.SourceType.REDDIT: self.dialog = AddRedditDialog() elif type == Options.SourceType.MEDIA_RSS: self.dialog = AddMediaRssDialog() self.dialog.set_edited_row(edited_row) self.show_dialog(self.dialog) def on_sources_selection_changed(self, widget=None): model, rows = self.ui.sources.get_selection().get_selected_rows() enabled = set(i for i, row in enumerate(model) if row[0]) selected = set(row.get_indices()[0] for row in rows) self.ui.use_button.set_sensitive(selected and enabled != selected) if hasattr(self, "previous_selection") and rows == self.previous_selection: return self.previous_selection = rows self.ui.edit_source.set_sensitive(False) self.ui.edit_source.set_label(_("Edit...")) if len(rows) == 1: source = model[rows[0]] type = Options.str_to_type(source[1]) if type == Options.SourceType.IMAGE: self.ui.edit_source.set_sensitive(True) self.ui.edit_source.set_label(_("View Image")) elif type in [Options.SourceType.FOLDER, Options.SourceType.FAVORITES, Options.SourceType.FETCHED]: self.ui.edit_source.set_sensitive(True) self.ui.edit_source.set_label(_("Open Folder")) elif type in EDITABLE_TYPES: self.ui.edit_source.set_sensitive(True) self.ui.edit_source.set_label(_("Edit...")) def timer_func(): self.show_thumbs(list(model[row] for row in rows)) if hasattr(self, "show_timer") and self.show_timer: self.show_timer.cancel() self.show_timer = threading.Timer(0.3, timer_func) self.show_timer.start() for row in rows: if Options.str_to_type(model[row][1]) in UNREMOVEABLE_TYPES: self.ui.remove_sources.set_sensitive(False) return self.ui.remove_sources.set_sensitive(len(rows) > 0) def model_row_to_source(self, row): return [row[0], Options.str_to_type(row[1]), Texts.SOURCES[row[1]][0] if row[1] in Texts.SOURCES else _u(row[2])] def source_to_model_row(self, s): srctype = Options.type_to_str(s[1]) return [s[0], srctype, s[2] if not srctype in Texts.SOURCES else Texts.SOURCES[srctype][1]] def show_thumbs(self, source_rows, pin=False, thumbs_type=None): try: if not source_rows: return self.parent.thumbs_manager.hide(gdk_thread=False, force=True) images = [] folders = [] image_count = 0 for row in source_rows: if not row: continue type = Options.str_to_type(row[1]) if type == Options.SourceType.IMAGE: image_count += 1 images.append(_u(row[2])) else: folder = self.parent.get_folder_of_source(self.model_row_to_source(row)) image_count += sum(1 for f in Util.list_files(folders=(folder,), filter_func=Util.is_image, max_files=1, randomize=False)) folders.append(folder) if image_count > -1: folder_images = list(Util.list_files(folders=folders, filter_func=Util.is_image, max_files=1000)) random.shuffle(folder_images) to_show = images + folder_images[:100] if hasattr(self, "focused_image") and self.focused_image is not None: try: to_show.remove(self.focused_image) except Exception: pass to_show.insert(0, self.focused_image) self.focused_image = None self.parent.thumbs_manager.show( to_show, gdk_thread=False, screen=self.get_screen(), folders=folders, type=thumbs_type) if pin: self.parent.thumbs_manager.pin() if thumbs_type: self.parent.update_indicator(is_gtk_thread=False, auto_changed=False) except Exception: logger.exception(lambda: "Could not create thumbs window:") def on_add_mediarss_clicked(self, widget=None): self.show_dialog(AddMediaRssDialog()) def on_add_reddit_clicked(self, widget=None): self.show_dialog(AddRedditDialog()) def on_add_flickr_clicked(self, widget=None): self.show_dialog(AddFlickrDialog()) def on_add_wallhaven_clicked(self, widget=None): self.show_dialog(AddWallhavenDialog()) def show_dialog(self, dialog): self.dialog = dialog self.dialog.parent = self self.dialog.set_transient_for(self) response = self.dialog.run() if response != Gtk.ResponseType.OK: if self.dialog: self.dialog.destroy() self.dialog = None def on_add_dialog_okay(self, source_type, location, edited_row): if edited_row: edited_row[2] = location else: self.add_sources(source_type, [location]) self.dialog = None def close(self): self.ui.error_downloaded.set_label("") self.ui.error_favorites.set_label("") self.ui.error_fetched.set_label("") self.hide() self.parent.trigger_download() self.on_destroy() self.parent.show_usage_stats_notice() def on_save_clicked(self, widget): self.delayed_apply() self.close() def delayed_apply(self, widget=None, *arg): if not self.loading: self.delayed_apply_with_interval(0.1) def delayed_apply_slow(self, widget=None, *arg): if not self.loading: self.delayed_apply_with_interval(1) def delayed_apply_with_interval(self, interval): if not self.loading: if hasattr(self, "apply_timer") and self.apply_timer: self.apply_timer.cancel() self.apply_timer = None self.apply_timer = threading.Timer(interval, self.apply) self.apply_timer.start() def apply(self): try: logger.info(lambda: "Applying preferences") self.options = Options() self.options.read() self.options.change_enabled = self.ui.change_enabled.get_active() self.options.change_on_start = self.ui.change_on_start.get_active() self.options.change_interval = self.get_change_interval() self.options.safe_mode = self.ui.safe_mode.get_active() self.options.download_enabled = self.ui.download_enabled.get_active() self.options.download_interval = self.get_download_interval() self.options.quota_enabled = self.ui.quota_enabled.get_active() try: self.options.quota_size = int(self.ui.quota_size.get_text()) if self.options.quota_size < 50: self.options.quota_size = 50 except Exception: logger.exception(lambda: "Could not understand quota size") if os.access(self.dl_chooser.get_folder(), os.W_OK): self.options.download_folder = self.dl_chooser.get_folder() if os.access(self.fav_chooser.get_folder(), os.W_OK): self.options.favorites_folder = self.fav_chooser.get_folder() self.options.favorites_operations = self.favorites_operations self.options.sources = [] for r in self.ui.sources.get_model(): self.options.sources.append(self.model_row_to_source(r)) if os.access(self.fetched_chooser.get_folder(), os.W_OK): self.options.fetched_folder = self.fetched_chooser.get_folder() self.options.clipboard_enabled = self.ui.clipboard_enabled.get_active() self.options.clipboard_use_whitelist = self.ui.clipboard_use_whitelist.get_active() buf = self.ui.clipboard_hosts.get_buffer() self.options.clipboard_hosts = Util.split(_u(buf.get_text(buf.get_start_iter(), buf.get_end_iter(), False))) if self.ui.icon.get_active() == 0: self.options.icon = "Light" elif self.ui.icon.get_active() == 1: self.options.icon = "Dark" elif self.ui.icon.get_active() == 2: self.options.icon = "Current" elif self.ui.icon.get_active() == 4: self.options.icon = "None" elif self.ui.icon.get_active() == 3: file = _u(self.ui.icon_chooser.get_filename()) if file and os.access(file, os.R_OK): self.options.icon = file else: self.options.icon = "Light" if self.ui.favorites_operations.get_active() == 0: self.options.favorites_operations = [["/", "Copy"]] elif self.ui.favorites_operations.get_active() == 1: self.options.favorites_operations = [["/", "Move"]] elif self.ui.favorites_operations.get_active() == 2: self.options.favorites_operations = [["/", "Both"]] elif self.ui.favorites_operations.get_active() == 3: # will be set in the favops editor dialog pass self.options.smart_enabled = self.ui.smart_enabled.get_active() if self.ui.sync_enabled.get_sensitive(): self.options.sync_enabled = self.ui.sync_enabled.get_active() self.options.stats_enabled = self.ui.stats_enabled.get_active() self.options.facebook_show_dialog = self.ui.facebook_show_dialog.get_active() self.options.copyto_enabled = self.ui.copyto_enabled.get_active() copyto = os.path.normpath(self.copyto_chooser.get_folder()) if copyto == os.path.normpath(self.parent.get_actual_copyto_folder('Default')): self.options.copyto_folder = 'Default' else: self.options.copyto_folder = copyto self.options.desired_color_enabled = self.ui.desired_color_enabled.get_active() c = self.ui.desired_color.get_color() self.options.desired_color = (c.red // 256, c.green // 256, c.blue // 256) self.options.min_size_enabled = self.ui.min_size_enabled.get_active() try: self.options.min_size = int(self.ui.min_size.get_active_text()) except Exception: pass self.options.use_landscape_enabled = self.ui.landscape_enabled.get_active() self.options.lightness_enabled = self.ui.lightness_enabled.get_active() self.options.lightness_mode = \ Options.LightnessMode.DARK if self.ui.lightness.get_active() == 0 else Options.LightnessMode.LIGHT self.options.min_rating_enabled = self.ui.min_rating_enabled.get_active() try: self.options.min_rating = int(self.ui.min_rating.get_active_text()) except Exception: pass self.options.clock_enabled = self.ui.clock_enabled.get_active() self.options.clock_font = _u(self.ui.clock_font.get_font_name()) self.options.clock_date_font = _u(self.ui.clock_date_font.get_font_name()) self.options.quotes_enabled = self.ui.quotes_enabled.get_active() self.options.quotes_font = _u(self.ui.quotes_font.get_font_name()) c = self.ui.quotes_text_color.get_color() self.options.quotes_text_color = (c.red // 256, c.green // 256, c.blue // 256) c = self.ui.quotes_bg_color.get_color() self.options.quotes_bg_color = (c.red // 256, c.green // 256, c.blue // 256) self.options.quotes_bg_opacity = max(0, min(100, int(self.ui.quotes_bg_opacity.get_value()))) self.options.quotes_text_shadow = self.ui.quotes_text_shadow.get_active() self.options.quotes_tags = _u(self.ui.quotes_tags.get_text()) self.options.quotes_authors = _u(self.ui.quotes_authors.get_text()) self.options.quotes_change_enabled = self.ui.quotes_change_enabled.get_active() self.options.quotes_change_interval = self.get_quotes_change_interval() self.options.quotes_width = max(0, min(100, int(self.ui.quotes_width.get_value()))) self.options.quotes_hpos = max(0, min(100, int(self.ui.quotes_hpos.get_value()))) self.options.quotes_vpos = max(0, min(100, int(self.ui.quotes_vpos.get_value()))) self.options.quotes_disabled_sources = [ cb.get_label() for cb in self.quotes_sources_checkboxes if not cb.get_active()] for f in self.options.filters: f[0] = self.filter_name_to_checkbox[f[1]].get_active() self.options.slideshow_sources_enabled = self.ui.slideshow_sources_enabled.get_active() self.options.slideshow_favorites_enabled = self.ui.slideshow_favorites_enabled.get_active() self.options.slideshow_downloads_enabled = self.ui.slideshow_downloads_enabled.get_active() self.options.slideshow_custom_enabled = self.ui.slideshow_custom_enabled.get_active() if os.access(self.slideshow_custom_chooser.get_folder(), os.R_OK): self.options.slideshow_custom_folder = self.slideshow_custom_chooser.get_folder() if self.ui.slideshow_sort_order.get_active() == 0: self.options.slideshow_sort_order = "Random" elif self.ui.slideshow_sort_order.get_active() == 1: self.options.slideshow_sort_order = "Name, asc" elif self.ui.slideshow_sort_order.get_active() == 2: self.options.slideshow_sort_order = "Name, desc" elif self.ui.slideshow_sort_order.get_active() == 3: self.options.slideshow_sort_order = "Date, asc" elif self.ui.slideshow_sort_order.get_active() == 4: self.options.slideshow_sort_order = "Date, desc" if self.ui.slideshow_monitor.get_active() == 0: self.options.slideshow_monitor = "All" else: self.options.slideshow_monitor = self.ui.slideshow_monitor.get_active() if self.ui.slideshow_mode.get_active() == 0: self.options.slideshow_mode = "Fullscreen" elif self.ui.slideshow_mode.get_active() == 1: self.options.slideshow_mode = "Desktop" elif self.ui.slideshow_mode.get_active() == 2: self.options.slideshow_mode = "Maximized" elif self.ui.slideshow_mode.get_active() == 3: self.options.slideshow_mode = "Window" self.options.slideshow_seconds = max(0.5, float(self.ui.slideshow_seconds.get_value())) self.options.slideshow_fade = max(0, min(1, float(self.ui.slideshow_fade.get_value()))) self.options.slideshow_zoom = max(0, min(1, float(self.ui.slideshow_zoom.get_value()))) self.options.slideshow_pan = max(0, min(0.2, float(self.ui.slideshow_pan.get_value()))) self.options.write() if not self.parent.running: return self.parent.reload_config() self.update_autostart() except Exception: if self.parent.running: logger.exception(lambda: "Error while applying preferences") dialog = Gtk.MessageDialog(self, Gtk.DialogFlags.MODAL, Gtk.MessageType.ERROR, Gtk.ButtonsType.OK, "An error occurred while saving preferences.\n" "Please run from a terminal with the -v flag and try again.") dialog.set_title("Oops") dialog.run() dialog.destroy() def update_autostart(self): file = os.path.expanduser(u"~/.config/autostart/variety.desktop") if not self.ui.autostart.get_active(): try: if os.path.exists(file): logger.info(lambda: "Removing autostart entry") os.unlink(file) except Exception: logger.exception(lambda: "Could not remove autostart entry variety.desktop") else: if not os.path.exists(file): self.parent.create_autostart_entry() def on_change_enabled_toggled(self, widget = None): self.ui.change_interval_text.set_sensitive(self.ui.change_enabled.get_active()) self.ui.change_interval_time_unit.set_sensitive(self.ui.change_enabled.get_active()) def on_quotes_change_enabled_toggled(self, widget = None): self.ui.quotes_change_interval_text.set_sensitive(self.ui.quotes_change_enabled.get_active()) self.ui.quotes_change_interval_time_unit.set_sensitive(self.ui.quotes_change_enabled.get_active()) def on_download_enabled_toggled(self, widget = None): active = self.ui.download_enabled.get_active() self.ui.download_interval_text.set_sensitive(active) self.ui.download_interval_time_unit.set_sensitive(active) self.ui.download_folder_chooser.set_sensitive(active) self.ui.quota_enabled.set_sensitive(active) self.ui.quota_size.set_sensitive(active) self.on_quota_enabled_toggled() def on_quota_enabled_toggled(self, widget = None): active = self.ui.download_enabled.get_active() and self.ui.quota_enabled.get_active() self.ui.quota_size.set_sensitive(active) def on_desired_color_enabled_toggled(self, widget = None): self.ui.desired_color.set_sensitive(self.ui.desired_color_enabled.get_active()) def on_min_size_enabled_toggled(self, widget = None): self.ui.min_size.set_sensitive(self.ui.min_size_enabled.get_active()) self.ui.min_size_label.set_sensitive(self.ui.min_size_enabled.get_active()) def on_min_rating_enabled_toggled(self, widget = None): self.ui.min_rating.set_sensitive(self.ui.min_rating_enabled.get_active()) def on_lightness_enabled_toggled(self, widget = None): self.ui.lightness.set_sensitive(self.ui.lightness_enabled.get_active()) def on_smart_enabled_toggled(self, widget=None): self.on_smart_user_updated() if not self.ui.smart_enabled.get_active(): for s in self.parent.options.sources: if s[1] in (Options.SourceType.RECOMMENDED,) and s[0]: self.parent.show_notification(_("Recommended images source disabled")) s[0] = False self.parent.options.write() for i, r in enumerate(self.ui.sources.get_model()): if Options.str_to_type(r[1]) in (Options.SourceType.RECOMMENDED,): r[0] = False elif not self.parent.smart.user: def _f(): self.parent.smart.load_user(create_if_missing=True) threading.Timer(0, _f).start() def on_destroy(self, widget = None): if hasattr(self, "dialog") and self.dialog: try: self.dialog.destroy() except Exception: pass for chooser in (self.dl_chooser, self.fav_chooser, self.fetched_chooser): try: chooser.destroy() except Exception: pass self.parent.thumbs_manager.hide(gdk_thread=True, force=False) def on_downloaded_changed(self, widget=None): self.delayed_apply() if not os.access(self.dl_chooser.get_folder(), os.W_OK): self.ui.error_downloaded.set_label(_("No write permissions")) else: self.ui.error_downloaded.set_label("") if not self.loading and self.ui.quota_enabled.get_active(): self.ui.quota_enabled.set_active(False) self.parent.show_notification( _("Limit disabled"), _("Changing the download folder automatically turns off the size limit to prevent from accidental data loss"), important=True) def update_real_download_folder(self): if not Util.same_file_paths(self.parent.options.download_folder, self.parent.real_download_folder): self.ui.real_download_folder.set_visible(True) self.ui.real_download_folder.set_text(_("Actual download folder: %s ") % self.parent.real_download_folder) def on_favorites_changed(self, widget=None): self.delayed_apply() if not os.access(self.fav_chooser.get_folder(), os.W_OK): self.ui.error_favorites.set_label(_("No write permissions")) else: self.ui.error_favorites.set_label("") def on_fetched_changed(self, widget=None): self.delayed_apply() if not os.access(self.fetched_chooser.get_folder(), os.W_OK): self.ui.error_fetched.set_label(_("No write permissions")) else: self.ui.error_fetched.set_label("") def update_clipboard_state(self, widget=None): self.ui.clipboard_use_whitelist.set_sensitive(self.ui.clipboard_enabled.get_active()) # keep the hosts list always enabled - user can wish to add hosts even when monitoring is not enabled - if undesired, uncomment below: # self.ui.clipboard_hosts.set_sensitive(self.ui.clipboard_enabled.get_active() and self.ui.clipboard_use_whitelist.get_active()) def on_edit_favorites_operations_clicked(self, widget=None): self.dialog = EditFavoriteOperationsDialog() self.dialog.set_transient_for(self) buf = self.dialog.ui.textbuffer buf.set_text('\n'.join(':'.join(x) for x in self.favorites_operations)) if self.dialog.run() == Gtk.ResponseType.OK: text = _u(buf.get_text(buf.get_start_iter(), buf.get_end_iter(), False)) self.favorites_operations = list([x.strip().split(':') for x in text.split('\n') if x]) self.delayed_apply() self.dialog.destroy() self.dialog = None def on_icon_changed(self, widget=None): self.ui.icon_chooser.set_visible(self.ui.icon.get_active() == 3) def on_favorites_operations_changed(self, widget=None): self.ui.edit_favorites_operations.set_visible(self.ui.favorites_operations.get_active() == 3) def on_copyto_enabled_toggled(self, widget=None): self.copyto_chooser.set_sensitive(self.ui.copyto_enabled.get_active()) self.ui.copyto_use_default.set_sensitive(self.ui.copyto_enabled.get_active()) self.on_copyto_changed() def on_copyto_changed(self): self.ui.copyto_faq_link.set_sensitive(self.ui.copyto_enabled.get_active()) if self.ui.copyto_enabled.get_active() and self.copyto_chooser.get_folder(): folder = self.copyto_chooser.get_folder() self.ui.copyto_use_default.set_sensitive(folder != self.parent.get_actual_copyto_folder('Default')) under_encrypted = Util.is_home_encrypted() and folder.startswith(os.path.expanduser('~') + '/') self.ui.copyto_encrypted_note.set_visible(under_encrypted) can_write = os.access(self.parent.get_actual_copyto_folder(folder), os.W_OK) can_read = os.stat(folder).st_mode | stat.S_IROTH self.ui.copyto_faq_link.set_visible(can_write and can_read and not under_encrypted) self.ui.copyto_permissions_box.set_visible(not can_write or not can_read) self.ui.copyto_write_permissions_warning.set_visible(not can_write) self.ui.copyto_read_permissions_warning.set_visible(not can_read) else: self.ui.copyto_faq_link.set_visible(True) self.ui.copyto_encrypted_note.set_visible(False) self.ui.copyto_permissions_box.set_visible(False) self.delayed_apply() def on_copyto_use_default_clicked(self, widget=None): self.copyto_chooser.set_folder(self.parent.get_actual_copyto_folder('Default')) self.on_copyto_changed() def on_copyto_fix_permissions_clicked(self, widget=None): folder = self.copyto_chooser.get_folder() can_write = os.access(self.parent.get_actual_copyto_folder(folder), os.W_OK) can_read = os.stat(folder).st_mode | stat.S_IROTH mode = 'a+' if not can_read: mode += 'r' if not can_write: mode += 'w' try: Util.superuser_exec("chmod", mode, folder) except Exception: logger.exception(lambda: "Could not adjust copyto folder permissions") self.parent.show_notification( _("Could not adjust permissions"), _('You may try manually running this command:\nsudo chmod %s "%s"') % (mode, folder)) self.on_copyto_changed() def on_btn_login_register_clicked(self, widget=None): if hasattr(self, 'dialog') and self.dialog and isinstance(self.dialog, LoginOrRegisterDialog): return login_dialog = LoginOrRegisterDialog() login_dialog.set_smart(self.parent.smart) self.show_dialog(login_dialog) def close_login_register_dialog(self): if hasattr(self, "dialog") and self.dialog and isinstance(self.dialog, LoginOrRegisterDialog): def _close(): self.dialog.destroy() self.dialog = None GObject.idle_add(_close) def on_smart_user_updated(self, create_user_attempts=0): self.update_status_message() sync_allowed = self.ui.smart_enabled.get_active() and self.parent.smart.is_registered() self.ui.sync_enabled.set_sensitive(sync_allowed) self.ui.sync_login_note.set_visible(not sync_allowed) if not sync_allowed: self.ui.sync_enabled.set_active(False) else: self.ui.sync_enabled.set_active(self.options.sync_enabled) if self.parent.smart.user: self.ui.box_smart_connecting.set_visible(False) self.ui.box_smart_user.set_visible(True) username = self.parent.smart.user.get("username") self.ui.smart_username.set_markup(_('Logged in as: ') + '<a href="%s">%s</a>' % ( self.parent.smart.get_profile_url(), username or _('Anonymous'))) self.ui.btn_login_register.set_label(_('Login or register') if not bool(username) else _('Switch user')) self.ui.smart_register_note.set_visible(not bool(username)) else: if not self.ui.smart_enabled.get_active(): self.ui.box_smart_connecting.set_visible(False) self.ui.box_smart_user.set_visible(False) elif create_user_attempts == 0: def _create_user(): def _start(): self.ui.smart_spinner.set_visible(True) self.ui.smart_spinner.start() self.ui.smart_connect_error.set_visible(False) self.ui.box_smart_connecting.set_visible(True) GObject.idle_add(_start) try: self.parent.smart.load_user(create_if_missing=True) self.on_smart_user_updated(create_user_attempts + 1) except IOError: def _fail(): self.ui.smart_spinner.set_visible(False) self.ui.smart_connect_error.set_visible(True) GObject.idle_add(_fail) finally: def _stop(): self.ui.smart_spinner.set_visible(False) GObject.idle_add(_stop) threading.Timer(0, _create_user).start() def on_btn_slideshow_reset_clicked(self, widget=None): self.ui.slideshow_seconds.set_value(6) self.ui.slideshow_fade.set_value(0.4) self.ui.slideshow_zoom.set_value(0.2) self.ui.slideshow_pan.set_value(0.05) def on_btn_slideshow_start_clicked(self, widget=None): self.apply() self.parent.on_start_slideshow()
GLolol/variety
variety/PreferencesVarietyDialog.py
Python
gpl-3.0
58,437
""" HTML parsing library based on the WHATWG "HTML5" specification. The parser is designed to be compatible with existing HTML found in the wild and implements well-defined error recovery that is largely compatible with modern desktop web browsers. Example usage: import html5lib f = open("my_document.html") tree = html5lib.parse(f) """ from __future__ import absolute_import, division, unicode_literals from .html5parser import HTMLParser, parse, parseFragment from .treebuilders import getTreeBuilder from .treewalkers import getTreeWalker from .serializer import serialize __all__ = ["HTMLParser", "parse", "parseFragment", "getTreeBuilder", "getTreeWalker", "serialize"] # this has to be at the top level, see how setup.py parses this __version__ = "0.99999999-dev"
ordbogen/html5lib-python
html5lib/__init__.py
Python
mit
788
import sys def setup(core, object): object.setStfFilename('static_item_n') object.setStfName('item_wookiee_gloves_02_01') object.setDetailFilename('static_item_d') object.setDetailName('item_wookiee_gloves_02_01') object.setIntAttribute('cat_stat_mod_bonus.@stat_n:agility_modified', 3) object.setStringAttribute('class_required', 'Smuggler') return
ProjectSWGCore/NGECore2
scripts/object/tangible/wearables/wookiee/item_smuggler_gloves_02_01.py
Python
lgpl-3.0
357
#!/usr/bin/env python """This script allows to create arbitrarily large files with the desired combination of groups, tables per group and rows per table. Issue "python stress-test3.py" without parameters for a help on usage. """ import gc import sys from time import perf_counter as clock from time import process_time as cpuclock import tables as tb class Test(tb.IsDescription): ngroup = tb.Int32Col(pos=1) ntable = tb.Int32Col(pos=2) nrow = tb.Int32Col(pos=3) string = tb.StringCol(500, pos=4) def createFileArr(filename, ngroups, ntables, nrows): # First, create the groups # Open a file in "w"rite mode fileh = tb.open_file(filename, mode="w", title="PyTables Stress Test") for k in range(ngroups): # Create the group fileh.create_group("/", 'group%04d' % k, "Group %d" % k) fileh.close() return (0, 4) def readFileArr(filename, ngroups, recsize, verbose): rowsread = 0 for ngroup in range(ngroups): fileh = tb.open_file(filename, mode="r", root_uep='group%04d' % ngroup) # Get the group group = fileh.root ntable = 0 if verbose: print("Group ==>", group) for table in fileh.list_nodes(group, 'Array'): if verbose > 1: print("Array ==>", table) print("Rows in", table._v_pathname, ":", table.shape) arr = table.read() rowsread += len(arr) ntable += 1 # Close the file (eventually destroy the extended type) fileh.close() return (rowsread, 4, 0) def createFile(filename, ngroups, ntables, nrows, complevel, complib, recsize): # First, create the groups # Open a file in "w"rite mode fileh = tb.open_file(filename, mode="w", title="PyTables Stress Test") for k in range(ngroups): # Create the group group = fileh.create_group("/", 'group%04d' % k, "Group %d" % k) fileh.close() # Now, create the tables rowswritten = 0 for k in range(ngroups): fileh = tb.open_file(filename, mode="a", root_uep='group%04d' % k) # Get the group group = fileh.root for j in range(ntables): # Create a table table = fileh.create_table(group, 'table%04d' % j, Test, 'Table%04d' % j, tb.Filters(complevel, complib), nrows) rowsize = table.rowsize # Get the row object associated with the new table row = table.row # Fill the table for i in range(nrows): row['ngroup'] = k row['ntable'] = j row['nrow'] = i row.append() rowswritten += nrows table.flush() # Close the file fileh.close() return (rowswritten, rowsize) def readFile(filename, ngroups, recsize, verbose): # Open the HDF5 file in read-only mode rowsread = 0 for ngroup in range(ngroups): fileh = tb.open_file(filename, mode="r", root_uep='group%04d' % ngroup) # Get the group group = fileh.root ntable = 0 if verbose: print("Group ==>", group) for table in fileh.list_nodes(group, 'Table'): rowsize = table.rowsize buffersize = table.rowsize * table.nrowsinbuf if verbose > 1: print("Table ==>", table) print("Max rows in buf:", table.nrowsinbuf) print("Rows in", table._v_pathname, ":", table.nrows) print("Buffersize:", table.rowsize * table.nrowsinbuf) print("MaxTuples:", table.nrowsinbuf) nrow = 0 for row in table: try: assert row["ngroup"] == ngroup assert row["ntable"] == ntable assert row["nrow"] == nrow except: print("Error in group: %d, table: %d, row: %d" % (ngroup, ntable, nrow)) print("Record ==>", row) nrow += 1 assert nrow == table.nrows rowsread += table.nrows ntable += 1 # Close the file (eventually destroy the extended type) fileh.close() return (rowsread, rowsize, buffersize) def dump_garbage(): """show us waht the garbage is about.""" # Force collection print("\nGARBAGE:") gc.collect() print("\nGARBAGE OBJECTS:") for x in gc.garbage: s = str(x) #if len(s) > 80: s = s[:77] + "..." print(type(x), "\n ", s) if __name__ == "__main__": import getopt try: import psyco psyco_imported = 1 except: psyco_imported = 0 usage = """usage: %s [-d debug] [-v level] [-p] [-r] [-w] [-l complib] [-c complevel] [-g ngroups] [-t ntables] [-i nrows] file -d debugging level -v verbosity level -p use "psyco" if available -a use Array objects instead of Table -r only read test -w only write test -l sets the compression library to be used ("zlib", "lzo", "ucl", "bzip2") -c sets a compression level (do not set it or 0 for no compression) -g number of groups hanging from "/" -t number of tables per group -i number of rows per table """ try: opts, pargs = getopt.getopt(sys.argv[1:], 'd:v:parwl:c:g:t:i:') except: sys.stderr.write(usage) sys.exit(0) # if we pass too much parameters, abort if len(pargs) != 1: sys.stderr.write(usage) sys.exit(0) # default options ngroups = 5 ntables = 5 nrows = 100 verbose = 0 debug = 0 recsize = "medium" testread = 1 testwrite = 1 usepsyco = 0 usearray = 0 complevel = 0 complib = "zlib" # Get the options for option in opts: if option[0] == '-d': debug = int(option[1]) if option[0] == '-v': verbose = int(option[1]) if option[0] == '-p': usepsyco = 1 if option[0] == '-a': usearray = 1 elif option[0] == '-r': testwrite = 0 elif option[0] == '-w': testread = 0 elif option[0] == '-l': complib = option[1] elif option[0] == '-c': complevel = int(option[1]) elif option[0] == '-g': ngroups = int(option[1]) elif option[0] == '-t': ntables = int(option[1]) elif option[0] == '-i': nrows = int(option[1]) if debug: gc.enable() gc.set_debug(gc.DEBUG_LEAK) # Catch the hdf5 file passed as the last argument file = pargs[0] print("Compression level:", complevel) if complevel > 0: print("Compression library:", complib) if testwrite: t1 = clock() cpu1 = cpuclock() if psyco_imported and usepsyco: psyco.bind(createFile) if usearray: (rowsw, rowsz) = createFileArr(file, ngroups, ntables, nrows) else: (rowsw, rowsz) = createFile(file, ngroups, ntables, nrows, complevel, complib, recsize) t2 = clock() cpu2 = cpuclock() tapprows = t2 - t1 cpuapprows = cpu2 - cpu1 print(f"Rows written: {rowsw} Row size: {rowsz}") print( f"Time writing rows: {tapprows:.3f} s (real) " f"{cpuapprows:.3f} s (cpu) {cpuapprows / tapprows:.0%}") print(f"Write rows/sec: {rowsw / tapprows}") print(f"Write KB/s : {rowsw * rowsz / (tapprows * 1024):.0f}") if testread: t1 = clock() cpu1 = cpuclock() if psyco_imported and usepsyco: psyco.bind(readFile) if usearray: (rowsr, rowsz, bufsz) = readFileArr(file, ngroups, recsize, verbose) else: (rowsr, rowsz, bufsz) = readFile(file, ngroups, recsize, verbose) t2 = clock() cpu2 = cpuclock() treadrows = t2 - t1 cpureadrows = cpu2 - cpu1 print(f"Rows read: {rowsw} Row size: {rowsz}, Buf size: {bufsz}") print( f"Time reading rows: {treadrows:.3f} s (real) " f"{cpureadrows:.3f} s (cpu) {cpureadrows / treadrows:.0%}") print(f"Read rows/sec: {rowsr / treadrows}") print(f"Read KB/s : {rowsr * rowsz / (treadrows * 1024):.0f}") # Show the dirt if debug > 1: dump_garbage()
avalentino/PyTables
bench/stress-test3.py
Python
bsd-3-clause
8,612
""" Objects for dealing with polynomials. This module provides a number of objects (mostly functions) useful for dealing with polynomials, including a `Polynomial` class that encapsulates the usual arithmetic operations. (General information on how this module represents and works with polynomial objects is in the docstring for its "parent" sub-package, `numpy.polynomial`). Constants --------- - `polydomain` -- Polynomial default domain, [-1,1]. - `polyzero` -- (Coefficients of the) "zero polynomial." - `polyone` -- (Coefficients of the) constant polynomial 1. - `polyx` -- (Coefficients of the) identity map polynomial, ``f(x) = x``. Arithmetic ---------- - `polyadd` -- add two polynomials. - `polysub` -- subtract one polynomial from another. - `polymulx` -- multiply a polynomial in ``P_i(x)`` by ``x``. - `polymul` -- multiply two polynomials. - `polydiv` -- divide one polynomial by another. - `polypow` -- raise a polynomial to a positive integer power. - `polyval` -- evaluate a polynomial at given points. - `polyval2d` -- evaluate a 2D polynomial at given points. - `polyval3d` -- evaluate a 3D polynomial at given points. - `polygrid2d` -- evaluate a 2D polynomial on a Cartesian product. - `polygrid3d` -- evaluate a 3D polynomial on a Cartesian product. Calculus -------- - `polyder` -- differentiate a polynomial. - `polyint` -- integrate a polynomial. Misc Functions -------------- - `polyfromroots` -- create a polynomial with specified roots. - `polyroots` -- find the roots of a polynomial. - `polyvalfromroots` -- evaluate a polynomial at given points from roots. - `polyvander` -- Vandermonde-like matrix for powers. - `polyvander2d` -- Vandermonde-like matrix for 2D power series. - `polyvander3d` -- Vandermonde-like matrix for 3D power series. - `polycompanion` -- companion matrix in power series form. - `polyfit` -- least-squares fit returning a polynomial. - `polytrim` -- trim leading coefficients from a polynomial. - `polyline` -- polynomial representing given straight line. Classes ------- - `Polynomial` -- polynomial class. See Also -------- `numpy.polynomial` """ from __future__ import division, absolute_import, print_function __all__ = [ 'polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', 'polyadd', 'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval', 'polyvalfromroots', 'polyder', 'polyint', 'polyfromroots', 'polyvander', 'polyfit', 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d', 'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d'] import warnings import numpy as np import numpy.linalg as la from numpy.core.multiarray import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase polytrim = pu.trimcoef # # These are constant arrays are of integer type so as to be compatible # with the widest range of other types, such as Decimal. # # Polynomial default domain. polydomain = np.array([-1, 1]) # Polynomial coefficients representing zero. polyzero = np.array([0]) # Polynomial coefficients representing one. polyone = np.array([1]) # Polynomial coefficients representing the identity x. polyx = np.array([0, 1]) # # Polynomial series functions # def polyline(off, scl): """ Returns an array representing a linear polynomial. Parameters ---------- off, scl : scalars The "y-intercept" and "slope" of the line, respectively. Returns ------- y : ndarray This module's representation of the linear polynomial ``off + scl*x``. See Also -------- chebline Examples -------- >>> from numpy.polynomial import polynomial as P >>> P.polyline(1,-1) array([ 1, -1]) >>> P.polyval(1, P.polyline(1,-1)) # should be 0 0.0 """ if scl != 0: return np.array([off, scl]) else: return np.array([off]) def polyfromroots(roots): """ Generate a monic polynomial with given roots. Return the coefficients of the polynomial .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), where the `r_n` are the roots specified in `roots`. If a zero has multiplicity n, then it must appear in `roots` n times. For instance, if 2 is a root of multiplicity three and 3 is a root of multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear in any order. If the returned coefficients are `c`, then .. math:: p(x) = c_0 + c_1 * x + ... + x^n The coefficient of the last term is 1 for monic polynomials in this form. Parameters ---------- roots : array_like Sequence containing the roots. Returns ------- out : ndarray 1-D array of the polynomial's coefficients If all the roots are real, then `out` is also real, otherwise it is complex. (see Examples below). See Also -------- chebfromroots, legfromroots, lagfromroots, hermfromroots hermefromroots Notes ----- The coefficients are determined by multiplying together linear factors of the form `(x - r_i)`, i.e. .. math:: p(x) = (x - r_0) (x - r_1) ... (x - r_n) where ``n == len(roots) - 1``; note that this implies that `1` is always returned for :math:`a_n`. Examples -------- >>> from numpy.polynomial import polynomial as P >>> P.polyfromroots((-1,0,1)) # x(x - 1)(x + 1) = x^3 - x array([ 0., -1., 0., 1.]) >>> j = complex(0,1) >>> P.polyfromroots((-j,j)) # complex returned, though values are real array([1.+0.j, 0.+0.j, 1.+0.j]) """ return pu._fromroots(polyline, polymul, roots) def polyadd(c1, c2): """ Add one polynomial to another. Returns the sum of two polynomials `c1` + `c2`. The arguments are sequences of coefficients from lowest order term to highest, i.e., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. Parameters ---------- c1, c2 : array_like 1-D arrays of polynomial coefficients ordered from low to high. Returns ------- out : ndarray The coefficient array representing their sum. See Also -------- polysub, polymulx, polymul, polydiv, polypow Examples -------- >>> from numpy.polynomial import polynomial as P >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> sum = P.polyadd(c1,c2); sum array([4., 4., 4.]) >>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2) 28.0 """ return pu._add(c1, c2) def polysub(c1, c2): """ Subtract one polynomial from another. Returns the difference of two polynomials `c1` - `c2`. The arguments are sequences of coefficients from lowest order term to highest, i.e., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. Parameters ---------- c1, c2 : array_like 1-D arrays of polynomial coefficients ordered from low to high. Returns ------- out : ndarray Of coefficients representing their difference. See Also -------- polyadd, polymulx, polymul, polydiv, polypow Examples -------- >>> from numpy.polynomial import polynomial as P >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> P.polysub(c1,c2) array([-2., 0., 2.]) >>> P.polysub(c2,c1) # -P.polysub(c1,c2) array([ 2., 0., -2.]) """ return pu._sub(c1, c2) def polymulx(c): """Multiply a polynomial by x. Multiply the polynomial `c` by x, where x is the independent variable. Parameters ---------- c : array_like 1-D array of polynomial coefficients ordered from low to high. Returns ------- out : ndarray Array representing the result of the multiplication. See Also -------- polyadd, polysub, polymul, polydiv, polypow Notes ----- .. versionadded:: 1.5.0 """ # c is a trimmed copy [c] = pu.as_series([c]) # The zero series needs special treatment if len(c) == 1 and c[0] == 0: return c prd = np.empty(len(c) + 1, dtype=c.dtype) prd[0] = c[0]*0 prd[1:] = c return prd def polymul(c1, c2): """ Multiply one polynomial by another. Returns the product of two polynomials `c1` * `c2`. The arguments are sequences of coefficients, from lowest order term to highest, e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2.`` Parameters ---------- c1, c2 : array_like 1-D arrays of coefficients representing a polynomial, relative to the "standard" basis, and ordered from lowest order term to highest. Returns ------- out : ndarray Of the coefficients of their product. See Also -------- polyadd, polysub, polymulx, polydiv, polypow Examples -------- >>> from numpy.polynomial import polynomial as P >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> P.polymul(c1,c2) array([ 3., 8., 14., 8., 3.]) """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) ret = np.convolve(c1, c2) return pu.trimseq(ret) def polydiv(c1, c2): """ Divide one polynomial by another. Returns the quotient-with-remainder of two polynomials `c1` / `c2`. The arguments are sequences of coefficients, from lowest order term to highest, e.g., [1,2,3] represents ``1 + 2*x + 3*x**2``. Parameters ---------- c1, c2 : array_like 1-D arrays of polynomial coefficients ordered from low to high. Returns ------- [quo, rem] : ndarrays Of coefficient series representing the quotient and remainder. See Also -------- polyadd, polysub, polymulx, polymul, polypow Examples -------- >>> from numpy.polynomial import polynomial as P >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> P.polydiv(c1,c2) (array([3.]), array([-8., -4.])) >>> P.polydiv(c2,c1) (array([ 0.33333333]), array([ 2.66666667, 1.33333333])) # may vary """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if c2[-1] == 0: raise ZeroDivisionError() # note: this is more efficient than `pu._div(polymul, c1, c2)` lc1 = len(c1) lc2 = len(c2) if lc1 < lc2: return c1[:1]*0, c1 elif lc2 == 1: return c1/c2[-1], c1[:1]*0 else: dlen = lc1 - lc2 scl = c2[-1] c2 = c2[:-1]/scl i = dlen j = lc1 - 1 while i >= 0: c1[i:j] -= c2*c1[j] i -= 1 j -= 1 return c1[j+1:]/scl, pu.trimseq(c1[:j+1]) def polypow(c, pow, maxpower=None): """Raise a polynomial to a power. Returns the polynomial `c` raised to the power `pow`. The argument `c` is a sequence of coefficients ordered from low to high. i.e., [1,2,3] is the series ``1 + 2*x + 3*x**2.`` Parameters ---------- c : array_like 1-D array of array of series coefficients ordered from low to high degree. pow : integer Power to which the series will be raised maxpower : integer, optional Maximum power allowed. This is mainly to limit growth of the series to unmanageable size. Default is 16 Returns ------- coef : ndarray Power series of power. See Also -------- polyadd, polysub, polymulx, polymul, polydiv Examples -------- >>> from numpy.polynomial import polynomial as P >>> P.polypow([1,2,3], 2) array([ 1., 4., 10., 12., 9.]) """ # note: this is more efficient than `pu._pow(polymul, c1, c2)`, as it # avoids calling `as_series` repeatedly return pu._pow(np.convolve, c, pow, maxpower) def polyder(c, m=1, scl=1, axis=0): """ Differentiate a polynomial. Returns the polynomial coefficients `c` differentiated `m` times along `axis`. At each iteration the result is multiplied by `scl` (the scaling factor is for use in a linear change of variable). The argument `c` is an array of coefficients from low to high degree along each axis, e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2`` while [[1,2],[1,2]] represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is ``x`` and axis=1 is ``y``. Parameters ---------- c : array_like Array of polynomial coefficients. If c is multidimensional the different axis correspond to different variables with the degree in each axis given by the corresponding index. m : int, optional Number of derivatives taken, must be non-negative. (Default: 1) scl : scalar, optional Each differentiation is multiplied by `scl`. The end result is multiplication by ``scl**m``. This is for use in a linear change of variable. (Default: 1) axis : int, optional Axis over which the derivative is taken. (Default: 0). .. versionadded:: 1.7.0 Returns ------- der : ndarray Polynomial coefficients of the derivative. See Also -------- polyint Examples -------- >>> from numpy.polynomial import polynomial as P >>> c = (1,2,3,4) # 1 + 2x + 3x**2 + 4x**3 >>> P.polyder(c) # (d/dx)(c) = 2 + 6x + 12x**2 array([ 2., 6., 12.]) >>> P.polyder(c,3) # (d**3/dx**3)(c) = 24 array([24.]) >>> P.polyder(c,scl=-1) # (d/d(-x))(c) = -2 - 6x - 12x**2 array([ -2., -6., -12.]) >>> P.polyder(c,2,-1) # (d**2/d(-x)**2)(c) = 6 + 24x array([ 6., 24.]) """ c = np.array(c, ndmin=1, copy=1) if c.dtype.char in '?bBhHiIlLqQpP': # astype fails with NA c = c + 0.0 cdt = c.dtype cnt = pu._deprecate_as_int(m, "the order of derivation") iaxis = pu._deprecate_as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") iaxis = normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: c = c[:1]*0 else: for i in range(cnt): n = n - 1 c *= scl der = np.empty((n,) + c.shape[1:], dtype=cdt) for j in range(n, 0, -1): der[j - 1] = j*c[j] c = der c = np.moveaxis(c, 0, iaxis) return c def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): """ Integrate a polynomial. Returns the polynomial coefficients `c` integrated `m` times from `lbnd` along `axis`. At each iteration the resulting series is **multiplied** by `scl` and an integration constant, `k`, is added. The scaling factor is for use in a linear change of variable. ("Buyer beware": note that, depending on what one is doing, one may want `scl` to be the reciprocal of what one might expect; for more information, see the Notes section below.) The argument `c` is an array of coefficients, from low to high degree along each axis, e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2`` while [[1,2],[1,2]] represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is ``x`` and axis=1 is ``y``. Parameters ---------- c : array_like 1-D array of polynomial coefficients, ordered from low to high. m : int, optional Order of integration, must be positive. (Default: 1) k : {[], list, scalar}, optional Integration constant(s). The value of the first integral at zero is the first value in the list, the value of the second integral at zero is the second value, etc. If ``k == []`` (the default), all constants are set to zero. If ``m == 1``, a single scalar can be given instead of a list. lbnd : scalar, optional The lower bound of the integral. (Default: 0) scl : scalar, optional Following each integration the result is *multiplied* by `scl` before the integration constant is added. (Default: 1) axis : int, optional Axis over which the integral is taken. (Default: 0). .. versionadded:: 1.7.0 Returns ------- S : ndarray Coefficient array of the integral. Raises ------ ValueError If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or ``np.ndim(scl) != 0``. See Also -------- polyder Notes ----- Note that the result of each integration is *multiplied* by `scl`. Why is this important to note? Say one is making a linear change of variable :math:`u = ax + b` in an integral relative to `x`. Then :math:`dx = du/a`, so one will need to set `scl` equal to :math:`1/a` - perhaps not what one would have first thought. Examples -------- >>> from numpy.polynomial import polynomial as P >>> c = (1,2,3) >>> P.polyint(c) # should return array([0, 1, 1, 1]) array([0., 1., 1., 1.]) >>> P.polyint(c,3) # should return array([0, 0, 0, 1/6, 1/12, 1/20]) array([ 0. , 0. , 0. , 0.16666667, 0.08333333, # may vary 0.05 ]) >>> P.polyint(c,k=3) # should return array([3, 1, 1, 1]) array([3., 1., 1., 1.]) >>> P.polyint(c,lbnd=-2) # should return array([6, 1, 1, 1]) array([6., 1., 1., 1.]) >>> P.polyint(c,scl=-2) # should return array([0, -2, -2, -2]) array([ 0., -2., -2., -2.]) """ c = np.array(c, ndmin=1, copy=1) if c.dtype.char in '?bBhHiIlLqQpP': # astype doesn't preserve mask attribute. c = c + 0.0 cdt = c.dtype if not np.iterable(k): k = [k] cnt = pu._deprecate_as_int(m, "the order of integration") iaxis = pu._deprecate_as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of integration must be non-negative") if len(k) > cnt: raise ValueError("Too many integration constants") if np.ndim(lbnd) != 0: raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") iaxis = normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c k = list(k) + [0]*(cnt - len(k)) c = np.moveaxis(c, iaxis, 0) for i in range(cnt): n = len(c) c *= scl if n == 1 and np.all(c[0] == 0): c[0] += k[i] else: tmp = np.empty((n + 1,) + c.shape[1:], dtype=cdt) tmp[0] = c[0]*0 tmp[1] = c[0] for j in range(1, n): tmp[j + 1] = c[j]/(j + 1) tmp[0] += k[i] - polyval(lbnd, tmp) c = tmp c = np.moveaxis(c, 0, iaxis) return c def polyval(x, c, tensor=True): """ Evaluate a polynomial at points x. If `c` is of length `n + 1`, this function returns the value .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n The parameter `x` is converted to an array only if it is a tuple or a list, otherwise it is treated as a scalar. In either case, either `x` or its elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If `c` is multidimensional, then the shape of the result depends on the value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that scalars have shape (,). Trailing zeros in the coefficients will be used in the evaluation, so they should be avoided if efficiency is a concern. Parameters ---------- x : array_like, compatible object If `x` is a list or tuple, it is converted to an ndarray, otherwise it is left unchanged and treated as a scalar. In either case, `x` or its elements must support addition and multiplication with with themselves and with the elements of `c`. c : array_like Array of coefficients ordered so that the coefficients for terms of degree n are contained in c[n]. If `c` is multidimensional the remaining indices enumerate multiple polynomials. In the two dimensional case the coefficients may be thought of as stored in the columns of `c`. tensor : boolean, optional If True, the shape of the coefficient array is extended with ones on the right, one for each dimension of `x`. Scalars have dimension 0 for this action. The result is that every column of coefficients in `c` is evaluated for every element of `x`. If False, `x` is broadcast over the columns of `c` for the evaluation. This keyword is useful when `c` is multidimensional. The default value is True. .. versionadded:: 1.7.0 Returns ------- values : ndarray, compatible object The shape of the returned array is described above. See Also -------- polyval2d, polygrid2d, polyval3d, polygrid3d Notes ----- The evaluation uses Horner's method. Examples -------- >>> from numpy.polynomial.polynomial import polyval >>> polyval(1, [1,2,3]) 6.0 >>> a = np.arange(4).reshape(2,2) >>> a array([[0, 1], [2, 3]]) >>> polyval(a, [1,2,3]) array([[ 1., 6.], [17., 34.]]) >>> coef = np.arange(4).reshape(2,2) # multidimensional coefficients >>> coef array([[0, 1], [2, 3]]) >>> polyval([1,2], coef, tensor=True) array([[2., 4.], [4., 7.]]) >>> polyval([1,2], coef, tensor=False) array([2., 7.]) """ c = np.array(c, ndmin=1, copy=0) if c.dtype.char in '?bBhHiIlLqQpP': # astype fails with NA c = c + 0.0 if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: c = c.reshape(c.shape + (1,)*x.ndim) c0 = c[-1] + x*0 for i in range(2, len(c) + 1): c0 = c[-i] + c0*x return c0 def polyvalfromroots(x, r, tensor=True): """ Evaluate a polynomial specified by its roots at points x. If `r` is of length `N`, this function returns the value .. math:: p(x) = \\prod_{n=1}^{N} (x - r_n) The parameter `x` is converted to an array only if it is a tuple or a list, otherwise it is treated as a scalar. In either case, either `x` or its elements must support multiplication and addition both with themselves and with the elements of `r`. If `r` is a 1-D array, then `p(x)` will have the same shape as `x`. If `r` is multidimensional, then the shape of the result depends on the value of `tensor`. If `tensor is ``True`` the shape will be r.shape[1:] + x.shape; that is, each polynomial is evaluated at every value of `x`. If `tensor` is ``False``, the shape will be r.shape[1:]; that is, each polynomial is evaluated only for the corresponding broadcast value of `x`. Note that scalars have shape (,). .. versionadded:: 1.12 Parameters ---------- x : array_like, compatible object If `x` is a list or tuple, it is converted to an ndarray, otherwise it is left unchanged and treated as a scalar. In either case, `x` or its elements must support addition and multiplication with with themselves and with the elements of `r`. r : array_like Array of roots. If `r` is multidimensional the first index is the root index, while the remaining indices enumerate multiple polynomials. For instance, in the two dimensional case the roots of each polynomial may be thought of as stored in the columns of `r`. tensor : boolean, optional If True, the shape of the roots array is extended with ones on the right, one for each dimension of `x`. Scalars have dimension 0 for this action. The result is that every column of coefficients in `r` is evaluated for every element of `x`. If False, `x` is broadcast over the columns of `r` for the evaluation. This keyword is useful when `r` is multidimensional. The default value is True. Returns ------- values : ndarray, compatible object The shape of the returned array is described above. See Also -------- polyroots, polyfromroots, polyval Examples -------- >>> from numpy.polynomial.polynomial import polyvalfromroots >>> polyvalfromroots(1, [1,2,3]) 0.0 >>> a = np.arange(4).reshape(2,2) >>> a array([[0, 1], [2, 3]]) >>> polyvalfromroots(a, [-1, 0, 1]) array([[-0., 0.], [ 6., 24.]]) >>> r = np.arange(-2, 2).reshape(2,2) # multidimensional coefficients >>> r # each column of r defines one polynomial array([[-2, -1], [ 0, 1]]) >>> b = [-2, 1] >>> polyvalfromroots(b, r, tensor=True) array([[-0., 3.], [ 3., 0.]]) >>> polyvalfromroots(b, r, tensor=False) array([-0., 0.]) """ r = np.array(r, ndmin=1, copy=0) if r.dtype.char in '?bBhHiIlLqQpP': r = r.astype(np.double) if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray): if tensor: r = r.reshape(r.shape + (1,)*x.ndim) elif x.ndim >= r.ndim: raise ValueError("x.ndim must be < r.ndim when tensor == False") return np.prod(x - r, axis=0) def polyval2d(x, y, c): """ Evaluate a 2-D polynomial at points (x, y). This function returns the value .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * x^i * y^j The parameters `x` and `y` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either `x` and `y` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` has fewer than two dimensions, ones are implicitly appended to its shape to make it 2-D. The shape of the result will be c.shape[2:] + x.shape. Parameters ---------- x, y : array_like, compatible objects The two dimensional series is evaluated at the points `(x, y)`, where `x` and `y` must have the same shape. If `x` or `y` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and, if it isn't an ndarray, it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficient of the term of multi-degree i,j is contained in `c[i,j]`. If `c` has dimension greater than two the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the two dimensional polynomial at points formed with pairs of corresponding values from `x` and `y`. See Also -------- polyval, polygrid2d, polyval3d, polygrid3d Notes ----- .. versionadded:: 1.7.0 """ return pu._valnd(polyval, c, x, y) def polygrid2d(x, y, c): """ Evaluate a 2-D polynomial on the Cartesian product of x and y. This function returns the values: .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * a^i * b^j where the points `(a, b)` consist of all pairs formed by taking `a` from `x` and `b` from `y`. The resulting points form a grid with `x` in the first dimension and `y` in the second. The parameters `x` and `y` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars. In either case, either `x` and `y` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` has fewer than two dimensions, ones are implicitly appended to its shape to make it 2-D. The shape of the result will be c.shape[2:] + x.shape + y.shape. Parameters ---------- x, y : array_like, compatible objects The two dimensional series is evaluated at the points in the Cartesian product of `x` and `y`. If `x` or `y` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and, if it isn't an ndarray, it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficients for terms of degree i,j are contained in ``c[i,j]``. If `c` has dimension greater than two the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the two dimensional polynomial at points in the Cartesian product of `x` and `y`. See Also -------- polyval, polyval2d, polyval3d, polygrid3d Notes ----- .. versionadded:: 1.7.0 """ return pu._gridnd(polyval, c, x, y) def polyval3d(x, y, z, c): """ Evaluate a 3-D polynomial at points (x, y, z). This function returns the values: .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * x^i * y^j * z^k The parameters `x`, `y`, and `z` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either `x`, `y`, and `z` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` has fewer than 3 dimensions, ones are implicitly appended to its shape to make it 3-D. The shape of the result will be c.shape[3:] + x.shape. Parameters ---------- x, y, z : array_like, compatible object The three dimensional series is evaluated at the points `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If any of `x`, `y`, or `z` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and if it isn't an ndarray it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficient of the term of multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension greater than 3 the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the multidimensional polynomial on points formed with triples of corresponding values from `x`, `y`, and `z`. See Also -------- polyval, polyval2d, polygrid2d, polygrid3d Notes ----- .. versionadded:: 1.7.0 """ return pu._valnd(polyval, c, x, y, z) def polygrid3d(x, y, z, c): """ Evaluate a 3-D polynomial on the Cartesian product of x, y and z. This function returns the values: .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * a^i * b^j * c^k where the points `(a, b, c)` consist of all triples formed by taking `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form a grid with `x` in the first dimension, `y` in the second, and `z` in the third. The parameters `x`, `y`, and `z` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars. In either case, either `x`, `y`, and `z` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` has fewer than three dimensions, ones are implicitly appended to its shape to make it 3-D. The shape of the result will be c.shape[3:] + x.shape + y.shape + z.shape. Parameters ---------- x, y, z : array_like, compatible objects The three dimensional series is evaluated at the points in the Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and, if it isn't an ndarray, it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficients for terms of degree i,j are contained in ``c[i,j]``. If `c` has dimension greater than two the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the two dimensional polynomial at points in the Cartesian product of `x` and `y`. See Also -------- polyval, polyval2d, polygrid2d, polyval3d Notes ----- .. versionadded:: 1.7.0 """ return pu._gridnd(polyval, c, x, y, z) def polyvander(x, deg): """Vandermonde matrix of given degree. Returns the Vandermonde matrix of degree `deg` and sample points `x`. The Vandermonde matrix is defined by .. math:: V[..., i] = x^i, where `0 <= i <= deg`. The leading indices of `V` index the elements of `x` and the last index is the power of `x`. If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the matrix ``V = polyvander(x, n)``, then ``np.dot(V, c)`` and ``polyval(x, c)`` are the same up to roundoff. This equivalence is useful both for least squares fitting and for the evaluation of a large number of polynomials of the same degree and sample points. Parameters ---------- x : array_like Array of points. The dtype is converted to float64 or complex128 depending on whether any of the elements are complex. If `x` is scalar it is converted to a 1-D array. deg : int Degree of the resulting matrix. Returns ------- vander : ndarray. The Vandermonde matrix. The shape of the returned matrix is ``x.shape + (deg + 1,)``, where the last index is the power of `x`. The dtype will be the same as the converted `x`. See Also -------- polyvander2d, polyvander3d """ ideg = pu._deprecate_as_int(deg, "deg") if ideg < 0: raise ValueError("deg must be non-negative") x = np.array(x, copy=0, ndmin=1) + 0.0 dims = (ideg + 1,) + x.shape dtyp = x.dtype v = np.empty(dims, dtype=dtyp) v[0] = x*0 + 1 if ideg > 0: v[1] = x for i in range(2, ideg + 1): v[i] = v[i-1]*x return np.moveaxis(v, 0, -1) def polyvander2d(x, y, deg): """Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees `deg` and sample points `(x, y)`. The pseudo-Vandermonde matrix is defined by .. math:: V[..., (deg[1] + 1)*i + j] = x^i * y^j, where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of `V` index the points `(x, y)` and the last index encodes the powers of `x` and `y`. If ``V = polyvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... and ``np.dot(V, c.flat)`` and ``polyval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares fitting and for the evaluation of a large number of 2-D polynomials of the same degrees and sample points. Parameters ---------- x, y : array_like Arrays of point coordinates, all of the same shape. The dtypes will be converted to either float64 or complex128 depending on whether any of the elements are complex. Scalars are converted to 1-D arrays. deg : list of ints List of maximum degrees of the form [x_deg, y_deg]. Returns ------- vander2d : ndarray The shape of the returned matrix is ``x.shape + (order,)``, where :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same as the converted `x` and `y`. See Also -------- polyvander, polyvander3d, polyval2d, polyval3d """ return pu._vander2d(polyvander, x, y, deg) def polyvander3d(x, y, z, deg): """Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees `deg` and sample points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, then The pseudo-Vandermonde matrix is defined by .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = x^i * y^j * z^k, where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading indices of `V` index the points `(x, y, z)` and the last index encodes the powers of `x`, `y`, and `z`. If ``V = polyvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns of `V` correspond to the elements of a 3-D coefficient array `c` of shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... and ``np.dot(V, c.flat)`` and ``polyval3d(x, y, z, c)`` will be the same up to roundoff. This equivalence is useful both for least squares fitting and for the evaluation of a large number of 3-D polynomials of the same degrees and sample points. Parameters ---------- x, y, z : array_like Arrays of point coordinates, all of the same shape. The dtypes will be converted to either float64 or complex128 depending on whether any of the elements are complex. Scalars are converted to 1-D arrays. deg : list of ints List of maximum degrees of the form [x_deg, y_deg, z_deg]. Returns ------- vander3d : ndarray The shape of the returned matrix is ``x.shape + (order,)``, where :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will be the same as the converted `x`, `y`, and `z`. See Also -------- polyvander, polyvander3d, polyval2d, polyval3d Notes ----- .. versionadded:: 1.7.0 """ return pu._vander3d(polyvander, x, y, z, deg) def polyfit(x, y, deg, rcond=None, full=False, w=None): """ Least-squares fit of a polynomial to data. Return the coefficients of a polynomial of degree `deg` that is the least squares fit to the data values `y` given at points `x`. If `y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple fits are done, one for each column of `y`, and the resulting coefficients are stored in the corresponding columns of a 2-D return. The fitted polynomial(s) are in the form .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n, where `n` is `deg`. Parameters ---------- x : array_like, shape (`M`,) x-coordinates of the `M` sample (data) points ``(x[i], y[i])``. y : array_like, shape (`M`,) or (`M`, `K`) y-coordinates of the sample points. Several sets of sample points sharing the same x-coordinates can be (independently) fit with one call to `polyfit` by passing in for `y` a 2-D array that contains one data set per column. deg : int or 1-D array_like Degree(s) of the fitting polynomials. If `deg` is a single integer all terms up to and including the `deg`'th term are included in the fit. For NumPy versions >= 1.11.0 a list of integers specifying the degrees of the terms to include may be used instead. rcond : float, optional Relative condition number of the fit. Singular values smaller than `rcond`, relative to the largest singular value, will be ignored. The default value is ``len(x)*eps``, where `eps` is the relative precision of the platform's float type, about 2e-16 in most cases. full : bool, optional Switch determining the nature of the return value. When ``False`` (the default) just the coefficients are returned; when ``True``, diagnostic information from the singular value decomposition (used to solve the fit's matrix equation) is also returned. w : array_like, shape (`M`,), optional Weights. If not None, the contribution of each point ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the weights are chosen so that the errors of the products ``w[i]*y[i]`` all have the same variance. The default value is None. .. versionadded:: 1.5.0 Returns ------- coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`) Polynomial coefficients ordered from low to high. If `y` was 2-D, the coefficients in column `k` of `coef` represent the polynomial fit to the data in `y`'s `k`-th column. [residuals, rank, singular_values, rcond] : list These values are only returned if `full` = True resid -- sum of squared residuals of the least squares fit rank -- the numerical rank of the scaled Vandermonde matrix sv -- singular values of the scaled Vandermonde matrix rcond -- value of `rcond`. For more details, see `linalg.lstsq`. Raises ------ RankWarning Raised if the matrix in the least-squares fit is rank deficient. The warning is only raised if `full` == False. The warnings can be turned off by: >>> import warnings >>> warnings.simplefilter('ignore', np.RankWarning) See Also -------- chebfit, legfit, lagfit, hermfit, hermefit polyval : Evaluates a polynomial. polyvander : Vandermonde matrix for powers. linalg.lstsq : Computes a least-squares fit from the matrix. scipy.interpolate.UnivariateSpline : Computes spline fits. Notes ----- The solution is the coefficients of the polynomial `p` that minimizes the sum of the weighted squared errors .. math :: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, where the :math:`w_j` are the weights. This problem is solved by setting up the (typically) over-determined matrix equation: .. math :: V(x) * c = w * y, where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the coefficients to be solved for, `w` are the weights, and `y` are the observed values. This equation is then solved using the singular value decomposition of `V`. If some of the singular values of `V` are so small that they are neglected (and `full` == ``False``), a `RankWarning` will be raised. This means that the coefficient values may be poorly determined. Fitting to a lower order polynomial will usually get rid of the warning (but may not be what you want, of course; if you have independent reason(s) for choosing the degree which isn't working, you may have to: a) reconsider those reasons, and/or b) reconsider the quality of your data). The `rcond` parameter can also be set to a value smaller than its default, but the resulting fit may be spurious and have large contributions from roundoff error. Polynomial fits using double precision tend to "fail" at about (polynomial) degree 20. Fits using Chebyshev or Legendre series are generally better conditioned, but much can still depend on the distribution of the sample points and the smoothness of the data. If the quality of the fit is inadequate, splines may be a good alternative. Examples -------- >>> np.random.seed(123) >>> from numpy.polynomial import polynomial as P >>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1] >>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + N(0,1) "noise" >>> c, stats = P.polyfit(x,y,3,full=True) >>> np.random.seed(123) >>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1 array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286]) # may vary >>> stats # note the large SSR, explaining the rather poor results [array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316, # may vary 0.28853036]), 1.1324274851176597e-014] Same thing without the added noise >>> y = x**3 - x >>> c, stats = P.polyfit(x,y,3,full=True) >>> c # c[0], c[2] should be "very close to 0", c[1] ~= -1, c[3] ~= 1 array([-6.36925336e-18, -1.00000000e+00, -4.08053781e-16, 1.00000000e+00]) >>> stats # note the minuscule SSR [array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158, # may vary 0.50443316, 0.28853036]), 1.1324274851176597e-014] """ return pu._fit(polyvander, x, y, deg, rcond, full, w) def polycompanion(c): """ Return the companion matrix of c. The companion matrix for power series cannot be made symmetric by scaling the basis, so this function differs from those for the orthogonal polynomials. Parameters ---------- c : array_like 1-D array of polynomial coefficients ordered from low to high degree. Returns ------- mat : ndarray Companion matrix of dimensions (deg, deg). Notes ----- .. versionadded:: 1.7.0 """ # c is a trimmed copy [c] = pu.as_series([c]) if len(c) < 2: raise ValueError('Series must have maximum degree of at least 1.') if len(c) == 2: return np.array([[-c[0]/c[1]]]) n = len(c) - 1 mat = np.zeros((n, n), dtype=c.dtype) bot = mat.reshape(-1)[n::n+1] bot[...] = 1 mat[:, -1] -= c[:-1]/c[-1] return mat def polyroots(c): """ Compute the roots of a polynomial. Return the roots (a.k.a. "zeros") of the polynomial .. math:: p(x) = \\sum_i c[i] * x^i. Parameters ---------- c : 1-D array_like 1-D array of polynomial coefficients. Returns ------- out : ndarray Array of the roots of the polynomial. If all the roots are real, then `out` is also real, otherwise it is complex. See Also -------- chebroots Notes ----- The root estimates are obtained as the eigenvalues of the companion matrix, Roots far from the origin of the complex plane may have large errors due to the numerical instability of the power series for such values. Roots with multiplicity greater than 1 will also show larger errors as the value of the series near such points is relatively insensitive to errors in the roots. Isolated roots near the origin can be improved by a few iterations of Newton's method. Examples -------- >>> import numpy.polynomial.polynomial as poly >>> poly.polyroots(poly.polyfromroots((-1,0,1))) array([-1., 0., 1.]) >>> poly.polyroots(poly.polyfromroots((-1,0,1))).dtype dtype('float64') >>> j = complex(0,1) >>> poly.polyroots(poly.polyfromroots((-j,0,j))) array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j]) # may vary """ # c is a trimmed copy [c] = pu.as_series([c]) if len(c) < 2: return np.array([], dtype=c.dtype) if len(c) == 2: return np.array([-c[0]/c[1]]) # rotated companion matrix reduces error m = polycompanion(c)[::-1,::-1] r = la.eigvals(m) r.sort() return r # # polynomial class # class Polynomial(ABCPolyBase): """A power series class. The Polynomial class provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the attributes and methods listed in the `ABCPolyBase` documentation. Parameters ---------- coef : array_like Polynomial coefficients in order of increasing degree, i.e., ``(1, 2, 3)`` give ``1 + 2*x + 3*x**2``. domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. The default value is [-1, 1]. window : (2,) array_like, optional Window, see `domain` for its use. The default value is [-1, 1]. .. versionadded:: 1.6.0 """ # Virtual Functions _add = staticmethod(polyadd) _sub = staticmethod(polysub) _mul = staticmethod(polymul) _div = staticmethod(polydiv) _pow = staticmethod(polypow) _val = staticmethod(polyval) _int = staticmethod(polyint) _der = staticmethod(polyder) _fit = staticmethod(polyfit) _line = staticmethod(polyline) _roots = staticmethod(polyroots) _fromroots = staticmethod(polyfromroots) # Virtual properties nickname = 'poly' domain = np.array(polydomain) window = np.array(polydomain) basis_name = None @staticmethod def _repr_latex_term(i, arg_str, needs_parens): if needs_parens: arg_str = r'\left({}\right)'.format(arg_str) if i == 0: return '1' elif i == 1: return arg_str else: return '{}^{{{}}}'.format(arg_str, i)
shoyer/numpy
numpy/polynomial/polynomial.py
Python
bsd-3-clause
48,632
from __future__ import division import sys sys.path.append('../spherepy') import spherepy as sp #TODO: Change all xrange instances to range #and do a 'from six.moves import range' here from six.moves import xrange fs = """ c[n, m] ======= 2: {7} {4} {2} {6} {8} 1: {3} {1} {5} 0: {0} n ------------- ------------- ------------- ------------- ------------- m = -2 m = -1 m = 0 m = 1 m = 2 """ def _tiny_rep(c): sr = "{0:.2}".format(c) if sr[0] == '(': sr = sr[1:-1] return sr c = sp.random_coefs(4,4) c[0,0] = 1 sa = [] cfit = c[0:2,:] cvec = cfit._vec for val in cvec: sa.append(_tiny_rep(val)) while len(sa) < 9: sa.append("") for n in range(0,9): sa[n] = sa[n].center(13) print fs.format(sa[0],sa[1],sa[2],sa[3],sa[4],sa[5],sa[6],sa[7],sa[8]) c = sp.random_coefs(4,1) cc = c[0:2,:] p = sp.ispht(cc,50,60) #sp.plot_sphere_mag(p)
rdireen/spherepy
examples/test_plot.py
Python
gpl-3.0
1,000
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # PyNurseryRhymesDemo # The MIT License # # Copyright (c) 2010,2015 Jeremie DECOCK (http://www.jdhp.org) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # Here is the procedure to submit updates to PyPI # =============================================== # # 1. Register to PyPI: # # $ python3 setup.py register # # 2. Upload the source distribution: # # $ python3 setup.py sdist upload from jdhp_distutils_demo import __version__ as VERSION from distutils.core import setup # See : http://pypi.python.org/pypi?%3Aaction=list_classifiers CLASSIFIERS = ['Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3', 'Topic :: Software Development :: Libraries'] # You can either specify manually the list of packages to include in the # distribution or use "setuptools.find_packages()" to include them # automatically with a recursive search (from the root directory of the # project). #PACKAGES = find_packages() PACKAGES = ['jdhp_distutils_demo'] SCRIPTS = ['scripts/distutils-demo-nox', 'scripts/distutils-demo'] README_FILE = 'README.rst' def get_long_description(): with open(README_FILE, 'r') as fd: desc = fd.read() return desc setup(author='Jeremie DECOCK', author_email='[email protected]', maintainer='Jeremie DECOCK', maintainer_email='[email protected]', name='jdhp-distutils-demo', description='A snippet to test distutils and PyPI', long_description=get_long_description(), url='http://www.jdhp.org/', download_url='http://www.jdhp.org/',# where the package may be downloaded scripts=SCRIPTS, classifiers=CLASSIFIERS, #license='MIT license', # Useless if license is already in CLASSIFIERS packages=PACKAGES, version=VERSION)
jeremiedecock/snippets
python/distutils/example_without_dependency/setup.py
Python
mit
3,069
from __future__ import unicode_literals from django.contrib.auth.models import User from django.db import models from django.utils.encoding import python_2_unicode_compatible # Forward declared intermediate model @python_2_unicode_compatible class Membership(models.Model): person = models.ForeignKey('Person') group = models.ForeignKey('Group') price = models.IntegerField(default=100) def __str__(self): return "%s is a member of %s" % (self.person.name, self.group.name) # using custom id column to test ticket #11107 @python_2_unicode_compatible class UserMembership(models.Model): id = models.AutoField(db_column='usermembership_id', primary_key=True) user = models.ForeignKey(User) group = models.ForeignKey('Group') price = models.IntegerField(default=100) def __str__(self): return "%s is a user and member of %s" % (self.user.username, self.group.name) @python_2_unicode_compatible class Person(models.Model): name = models.CharField(max_length=128) def __str__(self): return self.name @python_2_unicode_compatible class Group(models.Model): name = models.CharField(max_length=128) # Membership object defined as a class members = models.ManyToManyField(Person, through=Membership) user_members = models.ManyToManyField(User, through='UserMembership') def __str__(self): return self.name # A set of models that use an non-abstract inherited model as the 'through' model. class A(models.Model): a_text = models.CharField(max_length=20) class ThroughBase(models.Model): a = models.ForeignKey(A) b = models.ForeignKey('B') class Through(ThroughBase): extra = models.CharField(max_length=20) class B(models.Model): b_text = models.CharField(max_length=20) a_list = models.ManyToManyField(A, through=Through) # Using to_field on the through model @python_2_unicode_compatible class Car(models.Model): make = models.CharField(max_length=20, unique=True, null=True) drivers = models.ManyToManyField('Driver', through='CarDriver') def __str__(self): return "%s" % self.make @python_2_unicode_compatible class Driver(models.Model): name = models.CharField(max_length=20, unique=True, null=True) def __str__(self): return "%s" % self.name class Meta: ordering = ('name',) @python_2_unicode_compatible class CarDriver(models.Model): car = models.ForeignKey('Car', to_field='make') driver = models.ForeignKey('Driver', to_field='name') def __str__(self): return "pk=%s car=%s driver=%s" % (str(self.pk), self.car, self.driver)
openhatch/new-mini-tasks
vendor/packages/Django/tests/regressiontests/m2m_through_regress/models.py
Python
apache-2.0
2,634
from django.conf.urls import patterns, include, url from django.contrib import admin from django.http import HttpResponseRedirect from .api import Base, ExampleAuthenticated, ExampleAdmin #from config import settings from django.conf import settings admin.autodiscover() urlpatterns = patterns('', url(r'^api/v1/searchmanager/', include('apps.searchmanager.urls')), url(r'^api/v1/eventsmanager/', include('apps.eventsmanager.urls')), url(r'^api/v1/metricsmanager/', include('apps.metricsmanager.urls')), url(r'^api/v1/datasetmanager/', include('apps.datasetmanager.urls')), url(r'^api/v1/visualizationsmanager/', include('apps.visualizationsmanager.urls')), url(r'^api/v1/indicatorservice/', include('apps.indicatorservice.urls')), url(r'^api/v1/auth/', include('apps.common.urls')), url(r'^api/v1/references/', include('apps.referencepool.urls')), url(r'^api/v1/$', Base.as_view()), url(r'^admin/', include(admin.site.urls)), url(r'^docs/', include('rest_framework_swagger.urls'), name='swagger'), url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.PC_SERVICES['references']['MEDIA_URL'],}), url(r'^example/auth/anyuser$', ExampleAuthenticated.as_view()), url(r'^example/auth/adminuser$', ExampleAdmin.as_view()), # For the time being redirect to swagger url(r'^$', lambda x: HttpResponseRedirect('/api/v1')) )
almey/policycompass-services
policycompass_services/urls.py
Python
agpl-3.0
1,413
# -*- coding: utf-8 -*- # Generated by Django 1.11.10 on 2018-10-18 18:43 from __future__ import unicode_literals from django.core.exceptions import ObjectDoesNotExist from django.db import migrations, models import django.db.models.deletion def set_planrepository(apps, schema_editor): Build = apps.get_model('build', 'Build') for build in Build.objects.all().iterator(): try: build.planrepo = build.plan.planrepository_set.get(repo = build.repo) except ObjectDoesNotExist: continue build.save() class Migration(migrations.Migration): dependencies = [ ('plan', '0020_auto_20181018_1636'), ('build', '0018_remove_build_schedule'), ] operations = [ migrations.AddField( model_name='build', name='planrepo', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='builds', to='plan.PlanRepository'), preserve_default=False, ), migrations.RunPython(set_planrepository), ]
SalesforceFoundation/mrbelvedereci
metaci/build/migrations/0019_build_planrepo.py
Python
bsd-3-clause
1,072
import json import os import sys import webbrowser import code import click from plaintable import Table from rst2ansi import rst2ansi import cumulusci from cumulusci.core.config import ConnectedAppOAuthConfig from cumulusci.core.config import FlowConfig from cumulusci.core.config import OrgConfig from cumulusci.core.config import ScratchOrgConfig from cumulusci.core.config import ServiceConfig from cumulusci.core.config import TaskConfig from cumulusci.core.config import YamlGlobalConfig from cumulusci.core.config import YamlProjectConfig from cumulusci.core.exceptions import ApexTestException from cumulusci.core.exceptions import FlowNotFoundError from cumulusci.core.exceptions import KeychainConnectedAppNotFound from cumulusci.core.exceptions import KeychainKeyNotFound from cumulusci.salesforce_api.exceptions import MetadataApiError from cumulusci.salesforce_api.exceptions import MetadataComponentFailure from cumulusci.core.exceptions import NotInProject from cumulusci.core.exceptions import ProjectConfigNotFound from cumulusci.core.exceptions import ScratchOrgException from cumulusci.core.exceptions import ServiceNotConfigured from cumulusci.core.exceptions import TaskNotFoundError from cumulusci.core.exceptions import TaskOptionsError from cumulusci.core.exceptions import TaskRequiresSalesforceOrg from cumulusci.core.utils import import_class from cumulusci.utils import doc_task from cumulusci.oauth.salesforce import CaptureSalesforceOAuth from logger import init_logger def pretty_dict(data): if not data: return '' return json.dumps(data, sort_keys=True, indent=4, separators=(',', ': ')) class CliConfig(object): def __init__(self): self.global_config = None self.project_config = None self.keychain = None init_logger() self._load_global_config() self._load_project_config() self._load_keychain() self._add_repo_to_path() def _add_repo_to_path(self): if self.project_config: sys.path.append(self.project_config.repo_root) def _load_global_config(self): try: self.global_config = YamlGlobalConfig() except NotInProject as e: raise click.UsageError(e.message) def _load_project_config(self): try: self.project_config = self.global_config.get_project_config() except ProjectConfigNotFound: pass except NotInProject as e: raise click.UsageError(e.message) def _load_keychain(self): self.keychain_key = os.environ.get('CUMULUSCI_KEY') if self.project_config: keychain_class = os.environ.get( 'CUMULUSCI_KEYCHAIN_CLASS', self.project_config.cumulusci__keychain, ) self.keychain_class = import_class(keychain_class) self.keychain = self.keychain_class(self.project_config, self.keychain_key) self.project_config.set_keychain(self.keychain) try: CLI_CONFIG = CliConfig() except click.UsageError as e: click.echo(e.message) sys.exit(1) pass_config = click.make_pass_decorator(CliConfig, ensure=True) def check_connected_app(config): check_keychain(config) if not config.keychain.get_connected_app(): raise click.UsageError("Please use the 'org config_connected_app' command to configure the OAuth Connected App to use for this project's keychain") def check_keychain(config): check_project_config(config) if config.project_config.keychain and config.project_config.keychain.encrypted and not config.keychain_key: raise click.UsageError('You must set the environment variable CUMULUSCI_KEY with the encryption key to be used for storing org credentials') def check_project_config(config): if not config.project_config: raise click.UsageError('No project configuration found. You can use the "project init" command to initilize the project for use with CumulusCI') # Root command @click.group('cli') @pass_config def cli(config): pass @click.command(name='version', help='Print the current version of CumulusCI') def version(): click.echo(cumulusci.__version__) @click.command(name='shell', help='Drop into a python shell') @pass_config @click.pass_context def shell(ctx,config): code.interact(local=dict(globals(), **locals())) # Top Level Groups @click.group('project', help="Commands for interacting with project repository configurations") @pass_config def project(config): pass @click.group('org', help="Commands for connecting and interacting with Salesforce orgs") @pass_config def org(config): pass @click.group('task', help="Commands for finding and running tasks for a project") @pass_config def task(config): pass @click.group('flow', help="Commands for finding and running flows for a project") @pass_config def flow(config): pass @click.group('service',help="Commands for connecting services to the keychain") @pass_config def service(config): pass cli.add_command(project) cli.add_command(org) cli.add_command(task) cli.add_command(flow) cli.add_command(version) cli.add_command(shell) cli.add_command(service) # Commands for group: project @click.command(name='init', help="Initialize a new project for use with the cumulusci toolbelt", ) @click.option('--name', help="The project's package name", prompt=True, ) @click.option('--package-name', help="The project's package name", prompt=True, ) @click.option('--package-namespace', help="The project's package namespace", prompt=True, ) @click.option('--package-api-version', help="The Salesforce API verson for the package", prompt=True, default=CLI_CONFIG.global_config.project__package__api_version, ) @click.option('--git-prefix-feature', help="The branch prefix for all feature branches", prompt=True, default=CLI_CONFIG.global_config.project__git__prefix_feature, ) @click.option('--git-default-branch', help="The default branch in the repository", prompt=True, default=CLI_CONFIG.global_config.project__git__default_branch, ) @click.option('--git-prefix-beta', help="The tag prefix for beta release tags", prompt=True, default=CLI_CONFIG.global_config.project__git__prefix_beta, ) @click.option('--git-prefix-release', help="The tag prefix for production release tags", prompt=True, default=CLI_CONFIG.global_config.project__git__prefix_release, ) @click.option('--test-name-match', help="The SOQL format like query for selecting Apex tests. % is wildcard", prompt=True, default=CLI_CONFIG.global_config.project__test__name_match, ) @pass_config def project_init(config, name, package_name, package_namespace, package_api_version, git_prefix_feature, git_default_branch, git_prefix_beta, git_prefix_release, test_name_match): if not os.path.isdir('.git'): click.echo("You are not in the root of a Git repository") if os.path.isfile('cumulusci.yml'): click.echo("This project already has a cumulusci.yml file") yml_config = [] # project: yml_config.append('project:') yml_config.append(' name: {}'.format(name)) # package: package_config = [] if package_name and package_name != config.global_config.project__package__name: package_config.append(' name: {}'.format(package_name)) if package_namespace and package_namespace != config.global_config.project__package__namespace: package_config.append(' namespace: {}'.format(package_namespace)) if package_api_version and package_api_version != config.global_config.project__package__api_version: package_config.append(' api_version: {}'.format(package_api_version)) if package_config: yml_config.append(' package:') yml_config.extend(package_config) # git: git_config = [] if git_prefix_feature and git_prefix_feature != config.global_config.project__git__prefix_feature: git_config.append(' prefix_feature: {}'.format(git_prefix_feature)) if git_default_branch and git_default_branch != config.global_config.project__git__default_branch: git_config.append(' default_branch: {}'.format(git_default_branch)) if git_prefix_beta and git_prefix_beta != config.global_config.project__git__prefix_beta: git_config.append(' prefix_beta: {}'.format(git_prefix_beta)) if git_prefix_release and git_prefix_release != config.global_config.project__git__prefix_release: git_config.append(' prefix_release: {}'.format(git_prefix_release)) if git_config: yml_config.append(' git:') yml_config.extend(git_config) # test: test_config = [] if test_name_match and test_name_match != config.global_config.project__test__name_match: test_config.append(' name_match: {}'.format(test_name_match)) if test_config: yml_config.append(' test:') yml_config.extend(test_config) yml_config.append('') with open('cumulusci.yml','w') as f_yml: f_yml.write('\n'.join(yml_config)) click.echo("Your project is now initialized for use with CumulusCI") click.echo("You can use the project edit command to edit the project's config file") @click.command(name='info', help="Display information about the current project's configuration") @pass_config def project_info(config): check_project_config(config) click.echo(pretty_dict(config.project_config.project)) @click.command(name='list', help="List projects and their locations") @pass_config def project_list(config): pass @click.command(name='cd', help="Change to the project's directory") @pass_config def project_cd(config): pass project.add_command(project_init) project.add_command(project_info) #project.add_command(project_list) #project.add_command(project_cd) # Commands for group: service @click.command(name='list', help='List services available for configuration and use') @pass_config def service_list(config): headers = ['service','description','is_configured'] data = [] for serv,schema in config.project_config.services.iteritems(): is_configured = '' if serv in config.keychain.list_services(): is_configured = '* ' data.append((serv,schema['description'],is_configured)) table = Table(data, headers) click.echo(table) class ConnectServiceCommand(click.MultiCommand): def list_commands(self, ctx): """ list the services that can be configured """ config = ctx.ensure_object(CliConfig) return sorted(config.project_config.services.keys()) def _build_param(self, attribute, details): req = details['required'] return click.Option(('--{0}'.format(attribute),), prompt=req, required=req) def get_command(self, ctx, name): config = ctx.ensure_object(CliConfig) attributes = getattr( config.project_config, 'services__{0}__attributes'.format(name) ).iteritems() params = [self._build_param(attr,cnfg) for attr, cnfg in attributes] params.append(click.Option(('--project',),is_flag=True)) @click.pass_context def callback(ctx,project=False,*args, **kwargs): check_keychain(config) serv_conf = dict((k, v) for k, v in kwargs.iteritems() if v!=None) # remove None values config.keychain.set_service(name, ServiceConfig(serv_conf), project) if project: click.echo('{0} is now configured for this project'.format(name)) else: click.echo('{0} is now configured for global use'.format(name)) ret = click.Command(name, params=params, callback=callback) return ret @click.command(cls=ConnectServiceCommand,name='connect',help='Connect a CumulusCI task service') @click.pass_context def service_connect(ctx, *args, **kvargs): pass @click.command(name='show',help='Show the details of a connected service') @click.argument('service_name') @pass_config def service_show(config,service_name): check_keychain(config) try: service_config = config.keychain.get_service(service_name) click.echo(pretty_dict(service_config.config)) except ServiceNotConfigured: click.echo('{0} is not configured for this project. Use service connect {0} to configure.'.format(service_name)) service.add_command(service_connect) service.add_command(service_list) service.add_command(service_show) # Commands for group: org @click.command(name='browser', help="Opens a browser window and logs into the org using the stored OAuth credentials") @click.argument('org_name') @pass_config def org_browser(config, org_name): check_connected_app(config) org_config = config.project_config.get_org(org_name) org_config.refresh_oauth_token(config.keychain.get_connected_app()) webbrowser.open(org_config.start_url) # Save the org config in case it was modified config.keychain.set_org(org_name, org_config) @click.command(name='connect', help="Connects a new org's credentials using OAuth Web Flow") @click.argument('org_name') @click.option('--sandbox', is_flag=True, help="If set, connects to a Salesforce sandbox org") @click.option('--login-url', help='If set, login to this hostname.', default= 'https://login.salesforce.com') @click.option('--global-org', help='Set True if org should be used by any project', is_flag=True) @pass_config def org_connect(config, org_name, sandbox, login_url, global_org): check_connected_app(config) connected_app = config.keychain.get_connected_app() if sandbox: login_url = 'https://test.salesforce.com' oauth_capture = CaptureSalesforceOAuth( client_id = connected_app.client_id, client_secret = connected_app.client_secret, callback_url = connected_app.callback_url, auth_site = login_url, scope = 'web full refresh_token' ) oauth_dict = oauth_capture() org_config = OrgConfig(oauth_dict) org_config.load_userinfo() config.keychain.set_org(org_name, org_config, global_org) @click.command(name='default', help="Sets an org as the default org for tasks and flows") @click.argument('org_name') @click.option('--unset', is_flag=True, help="Unset the org as the default org leaving no default org selected") @pass_config def org_default(config, org_name, unset): check_connected_app(config) if unset: org = config.keychain.unset_default_org() click.echo('{} is no longer the default org. No default org set.'.format(org_name)) else: org = config.keychain.set_default_org(org_name) click.echo('{} is now the default org'.format(org_name)) @click.command(name='info', help="Display information for a connected org") @click.argument('org_name') @pass_config def org_info(config, org_name): check_connected_app(config) org_config = config.keychain.get_org(org_name) org_config.refresh_oauth_token(config.keychain.get_connected_app()) click.echo(pretty_dict(org_config.config)) # Save the org config in case it was modified config.keychain.set_org(org_name, org_config) @click.command(name='list', help="Lists the connected orgs for the current project") @pass_config def org_list(config): check_connected_app(config) data = [] headers = ['org','is_default'] for org in config.project_config.list_orgs(): org_config = config.project_config.get_org(org) if org_config.default: data.append((org, '*')) else: data.append((org, '')) table = Table(data, headers) click.echo(table) @click.command(name='scratch', help="Connects a Salesforce DX Scratch Org to the keychain") @click.argument('config_name') @click.argument('org_name') @click.option('--delete', is_flag=True, help="If set, triggers a deletion of the current scratch org. This can be used to reset the org as the org configuration remains to regenerate the org on the next task run.") @pass_config def org_scratch(config, config_name, org_name, delete): check_connected_app(config) scratch_configs = getattr(config.project_config, 'orgs__scratch') if not scratch_configs: raise click.UsageError( 'No scratch org configs found in cumulusci.yml') scratch_config = scratch_configs.get(config_name) if not scratch_config: raise click.UsageError( 'No scratch org config named {} found in the cumulusci.yml file'.format(config_name) ) org_config = ScratchOrgConfig(scratch_config) config.keychain.set_org(org_name, org_config) @click.command(name='scratch_delete', help="Deletes a Salesforce DX Scratch Org leaving the config in the keychain for regeneration") @click.argument('org_name') @pass_config def org_scratch_delete(config, org_name): check_connected_app(config) org_config = config.keychain.get_org(org_name) if not org_config.scratch: raise click.UsageError('Org {} is not a scratch org'.format(org_name)) try: org_config.delete_org() except ScratchOrgException as e: exception = click.UsageError(e.message) config.keychain.set_org(org_name, org_config) @click.command(name='connected_app', help="Displays the ConnectedApp info used for OAuth connections") @pass_config def org_connected_app(config): check_connected_app(config) click.echo(pretty_dict(config.keychain.get_connected_app().config)) @click.command(name='config_connected_app', help="Configures the connected app used for connecting to Salesforce orgs") @click.option('--client_id', help="The Client ID from the connected app", prompt=True) @click.option('--client_secret', help="The Client Secret from the connected app", prompt=True, hide_input=True) @click.option('--callback_url', help="The callback_url configured on the Connected App", default='http://localhost:8080/callback') @click.option('--project', help='Set if storing encrypted keychain file in project directory', is_flag=True) @pass_config def org_config_connected_app(config, client_id, client_secret, callback_url, project): check_keychain(config) app_config = ConnectedAppOAuthConfig() app_config.config = { 'client_id': client_id, 'client_secret': client_secret, 'callback_url': callback_url, } config.keychain.set_connected_app(app_config, project) org.add_command(org_browser) org.add_command(org_config_connected_app) org.add_command(org_connect) org.add_command(org_connected_app) org.add_command(org_default) org.add_command(org_info) org.add_command(org_list) org.add_command(org_scratch) org.add_command(org_scratch_delete) # Commands for group: task @click.command(name='list', help="List available tasks for the current context") @pass_config def task_list(config): check_project_config(config) data = [] headers = ['task', 'description'] for task in config.project_config.list_tasks(): data.append((task['name'], task['description'])) table = Table(data, headers) click.echo(table) @click.command(name='doc', help="Exports RST format documentation for all tasks") @pass_config def task_doc(config): config_src = config.global_config for name, options in config_src.tasks.items(): task_config = TaskConfig(options) doc = doc_task(name, task_config) click.echo(doc) click.echo('') @click.command(name='info', help="Displays information for a task") @click.argument('task_name') @pass_config def task_info(config, task_name): check_project_config(config) task_config = getattr(config.project_config, 'tasks__{}'.format(task_name)) if not task_config: raise TaskNotFoundError('Task not found: {}'.format(task_name)) task_config = TaskConfig(task_config) click.echo(rst2ansi(doc_task(task_name, task_config))) @click.command(name='run', help="Runs a task") @click.argument('task_name') @click.option('--org', help="Specify the target org. By default, runs against the current default org") @click.option('-o', nargs=2, multiple=True, help="Pass task specific options for the task as '-o option value'. You can specify more than one option by using -o more than once.") @click.option('--debug', is_flag=True, help="Drops into pdb, the Python debugger, on an exception") @pass_config def task_run(config, task_name, org, o, debug): # Check environment check_keychain(config) # Get necessary configs if org: org_config = config.project_config.get_org(org) else: org, org_config = config.project_config.keychain.get_default_org() task_config = getattr(config.project_config, 'tasks__{}'.format(task_name)) if not task_config: raise TaskNotFoundError('Task not found: {}'.format(task_name)) # Get the class to look up options class_path = task_config.get('class_path') task_class = import_class(class_path) # Parse command line options and add to task config if o: if 'options' not in task_config: task_config['options'] = {} for option in o: name = option[0] value = option[1] # Validate the option if name not in task_class.task_options: raise click.UsageError( 'Option "{}" is not available for task {}'.format( name, task_name, ), ) # Override the option in the task config task_config['options'][name] = value task_config = TaskConfig(task_config) exception = None # Create and run the task try: task = task_class(config.project_config, task_config, org_config = org_config) except TaskRequiresSalesforceOrg as e: exception = click.UsageError('This task requires a salesforce org. Use org default <name> to set a default org or pass the org name with the --org option') except TaskOptionsError as e: exception = click.UsageError(e.message) except Exception as e: if debug: import pdb import traceback traceback.print_exc() pdb.post_mortem() else: raise if not exception: try: task() except TaskOptionsError as e: exception = click.UsageError(e.message) except ApexTestException as e: exception = click.ClickException('Failed: ApexTestFailure') except MetadataComponentFailure as e: exception = click.ClickException('Failed: MetadataComponentFailure') except MetadataApiError as e: exception = click.ClickException('Failed: MetadataApiError') except Exception as e: if debug: import pdb import traceback traceback.print_exc() pdb.post_mortem() else: raise # Save the org config in case it was modified in the task if org and org_config: config.keychain.set_org(org, org_config) if exception: raise exception # Add the task commands to the task group task.add_command(task_doc) task.add_command(task_info) task.add_command(task_list) task.add_command(task_run) # Commands for group: flow @click.command(name='list', help="List available flows for the current context") @pass_config def flow_list(config): check_project_config(config) data = [] headers = ['flow', 'description'] for flow in config.project_config.flows: description = getattr(config.project_config, 'flows__{}__description'.format(flow)) data.append((flow, description)) table = Table(data, headers) click.echo(table) @click.command(name='info', help="Displays information for a flow") @click.argument('flow_name') @pass_config def flow_info(config, flow_name): check_project_config(config) flow = getattr(config.project_config, 'flows__{}'.format(flow_name)) if not flow: raise FlowNotFoundError('Flow not found: {}'.format(flow_name)) click.echo(pretty_dict(flow)) @click.command(name='run', help="Runs a flow") @click.argument('flow_name') @click.option('--org', help="Specify the target org. By default, runs against the current default org") @click.option('--delete-org', is_flag=True, help="If set, deletes the scratch org after the flow completes") @click.option('--debug', is_flag=True, help="Drops into pdb, the Python debugger, on an exception") @pass_config def flow_run(config, flow_name, org, delete_org, debug): # Check environment check_keychain(config) # Get necessary configs if org: org_config = config.project_config.get_org(org) else: org, org_config = config.project_config.keychain.get_default_org() if delete_org and not org_config.scratch: raise click.UsageError('--delete-org can only be used with a scratch org') flow = getattr(config.project_config, 'flows__{}'.format(flow_name)) if not flow: raise FlowNotFoundError('Flow not found: {}'.format(flow_name)) flow_config = FlowConfig(flow) if not flow_config.config: raise click.UsageError('No configuration found for flow {}'.format(flow_name)) # Get the class to look up options class_path = flow_config.config.get('class_path', 'cumulusci.core.flows.BaseFlow') flow_class = import_class(class_path) exception = None # Create the flow and handle initialization exceptions try: flow = flow_class(config.project_config, flow_config, org_config) except TaskRequiresSalesforceOrg as e: exception = click.UsageError('This flow requires a salesforce org. Use org default <name> to set a default org or pass the org name with the --org option') except TaskOptionsError as e: exception = click.UsageError(e.message) except Exception as e: if debug: import pdb import traceback traceback.print_exc() pdb.post_mortem() else: raise if not exception: # Run the flow and handle exceptions try: flow() except TaskOptionsError as e: exception = click.UsageError(e.message) except ApexTestException as e: exception = click.ClickException('Failed: ApexTestException') except MetadataComponentFailure as e: exception = click.ClickException('Failed: MetadataComponentFailure') except MetadataApiError as e: exception = click.ClickException('Failed: MetadataApiError') except Exception as e: if debug: import pdb import traceback traceback.print_exc() pdb.post_mortem() else: raise # Delete the scratch org if --delete-org was set if delete_org: try: org_config.delete_org() except Exception as e: click.echo('Scratch org deletion failed. Ignoring the error below to complete the flow:') click.echo(e.message) # Save the org config in case it was modified in a task if org and org_config: config.keychain.set_org(org, org_config) if exception: raise exception flow.add_command(flow_list) flow.add_command(flow_info) flow.add_command(flow_run)
Joble/CumulusCI
cumulusci/cli/cli.py
Python
bsd-3-clause
27,515
# -*- coding: utf-8 -*- """ Jinja Documentation Extensions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Support for automatically documenting filters and tests. :copyright: Copyright 2008 by Armin Ronacher. :license: BSD. """ import collections import os import re import inspect import jinja2 from itertools import islice from types import BuiltinFunctionType from docutils import nodes from docutils.statemachine import ViewList from sphinx.ext.autodoc import prepare_docstring from sphinx.application import TemplateBridge from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic from jinja2 import Environment, FileSystemLoader def parse_rst(state, content_offset, doc): node = nodes.section() # hack around title style bookkeeping surrounding_title_styles = state.memo.title_styles surrounding_section_level = state.memo.section_level state.memo.title_styles = [] state.memo.section_level = 0 state.nested_parse(doc, content_offset, node, match_titles=1) state.memo.title_styles = surrounding_title_styles state.memo.section_level = surrounding_section_level return node.children class JinjaStyle(Style): title = 'Jinja Style' default_style = "" styles = { Comment: 'italic #aaaaaa', Comment.Preproc: 'noitalic #B11414', Comment.Special: 'italic #505050', Keyword: 'bold #B80000', Keyword.Type: '#808080', Operator.Word: 'bold #B80000', Name.Builtin: '#333333', Name.Function: '#333333', Name.Class: 'bold #333333', Name.Namespace: 'bold #333333', Name.Entity: 'bold #363636', Name.Attribute: '#686868', Name.Tag: 'bold #686868', Name.Decorator: '#686868', String: '#AA891C', Number: '#444444', Generic.Heading: 'bold #000080', Generic.Subheading: 'bold #800080', Generic.Deleted: '#aa0000', Generic.Inserted: '#00aa00', Generic.Error: '#aa0000', Generic.Emph: 'italic', Generic.Strong: 'bold', Generic.Prompt: '#555555', Generic.Output: '#888888', Generic.Traceback: '#aa0000', Error: '#F00 bg:#FAA' } _sig_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*(\(.*?\))') def format_function(name, aliases, func): lines = inspect.getdoc(func).splitlines() signature = '()' if isinstance(func, BuiltinFunctionType): match = _sig_re.match(lines[0]) if match is not None: del lines[:1 + bool(lines and not lines[0])] signature = match.group(1) else: try: argspec = inspect.getargspec(func) if getattr(func, 'environmentfilter', False) or \ getattr(func, 'contextfilter', False) or \ getattr(func, 'evalcontextfilter', False): del argspec[0][0] signature = inspect.formatargspec(*argspec) except: pass result = ['.. function:: %s%s' % (name, signature), ''] result.extend(' ' + line for line in lines) if aliases: result.extend(('', ' :aliases: %s' % ', '.join( '``%s``' % x for x in sorted(aliases)))) return result def dump_functions(mapping): def directive(dirname, arguments, options, content, lineno, content_offset, block_text, state, state_machine): reverse_mapping = {} for name, func in mapping.items(): reverse_mapping.setdefault(func, []).append(name) filters = [] for func, names in reverse_mapping.items(): aliases = sorted(names, key=lambda x: len(x)) name = aliases.pop() filters.append((name, aliases, func)) filters.sort() result = ViewList() for name, aliases, func in filters: for item in format_function(name, aliases, func): result.append(item, '<jinjaext>') node = nodes.paragraph() state.nested_parse(result, content_offset, node) return node.children return directive from jinja2.defaults import DEFAULT_FILTERS, DEFAULT_TESTS jinja_filters = dump_functions(DEFAULT_FILTERS) jinja_tests = dump_functions(DEFAULT_TESTS) def jinja_nodes(dirname, arguments, options, content, lineno, content_offset, block_text, state, state_machine): from jinja2.nodes import Node doc = ViewList() def walk(node, indent): p = ' ' * indent sig = ', '.join(node.fields) doc.append(p + '.. autoclass:: %s(%s)' % (node.__name__, sig), '') if node.abstract: members = [] for key, name in node.__dict__.items(): if not key.startswith('_') and \ not hasattr(node.__base__, key) and isinstance(name, collections.Callable): members.append(key) if members: members.sort() doc.append('%s :members: %s' % (p, ', '.join(members)), '') if node.__base__ != object: doc.append('', '') doc.append('%s :Node type: :class:`%s`' % (p, node.__base__.__name__), '') doc.append('', '') children = node.__subclasses__() children.sort(key=lambda x: x.__name__.lower()) for child in children: walk(child, indent) walk(Node, 0) return parse_rst(state, content_offset, doc) def inject_toc(app, doctree, docname): titleiter = iter(doctree.traverse(nodes.title)) try: # skip first title, we are not interested in that one next(titleiter) title = next(titleiter) # and check if there is at least another title next(titleiter) except StopIteration: return tocnode = nodes.section('') tocnode['classes'].append('toc') toctitle = nodes.section('') toctitle['classes'].append('toctitle') toctitle.append(nodes.title(text='Table Of Contents')) tocnode.append(toctitle) tocnode += doctree.document.settings.env.get_toc_for(docname)[0][1] title.parent.insert(title.parent.children.index(title), tocnode) def setup(app): app.add_directive('jinjafilters', jinja_filters, 0, (0, 0, 0)) app.add_directive('jinjatests', jinja_tests, 0, (0, 0, 0)) app.add_directive('jinjanodes', jinja_nodes, 0, (0, 0, 0)) # uncomment for inline toc. links are broken unfortunately ##app.connect('doctree-resolved', inject_toc)
jackTheRipper/iotrussia
web_server/lib/jinja2-master/docs/jinjaext.py
Python
gpl-2.0
6,923
# # Copyright John Reid 2007, 2008 # """ Code to build HMM models of PSSMs of various Markov orders. """ import hmm, numpy, pickle def _load_model_parameters(model, f): parameters = pickle.load(f) if len(parameters) != len(model.parameters): raise RuntimeError('Different number of parameters in file (%d) and in model (%d)' % (len(parameters), len(model.parameters))) for i, t in enumerate(parameters): model.parameters[i] = t def _dump_model_parameters(model, f): pickle.dump(list(model.parameters), f) class ModelBuilder(object): """ Helps to build models over order n alphabets """ def __init__(self, order, alphabet_size=4): self.order = order self.alphabet_size = alphabet_size self.M = alphabet_size ** (order + 1) self.converter = hmm.MarkovOrderConverter(alphabet_size, order) def new_model_by_states(self): "@return: A new hmm.ModelByStates." return hmm.ModelByStates(self.M, self.order) def add_fully_parameterised_state(self, model, pi = 1.0, emission_dist = None): """ Adds a state with a different emission parameter for each of self.M possible order-n output characters """ # add the state state = model.add_state(pi = model.add_parameter(pi)) # allocate the parameters to the emissions for m in xrange(self.M): if None != emission_dist: assert len(emission_dist) == self.M state.b[m] = model.add_parameter(emission_dist[m]) else: state.b[m] = model.add_parameter( 1.0 / self.M ) def add_order_0_parameterised_state(self, model, pi = 1.0, emission_dist = None): """ Adds a state with shared emission parameters for each of the self.M possible order-n output characters that represent the same order-0 character """ # add the state state = model.add_state(pi = model.add_parameter(pi)) # from IPython.Shell import IPShellEmbed # ipshell = IPShellEmbed() # ipshell() # add the parameters for the emissions if None != emission_dist: assert len(emission_dist) == self.alphabet_size params = [ model.add_parameter( x ) for x in emission_dist ] else: params = [ model.add_parameter( 1.0 / self.M ) for i in xrange(self.alphabet_size) ] # allocate the parameters to the emissions for m in xrange(self.M): state.b[m] = params[m % self.alphabet_size] return state def add_order_0_rev_comp_state(self, model, forward_state, pi = 1.0): """ Adds a state with shared emission parameters for each of the self.M possible order-n output characters that represent the same order-0 character. This state's output is the reverse complement of the given state """ # add the state state = model.add_state(pi = model.add_parameter(pi)) # allocate the parameters to the emissions for m in xrange(self.M): state.b[m] = forward_state.b[self.alphabet_size - 1 - (m % self.alphabet_size)] return state def create_uniform_background_model(self): """ @return: A HMM with one mosaic with uniform emission probabilities. """ model = hmm.ModelByStates(self.M, self.order) self.add_fully_parameterised_state( model, emission_dist = numpy.ones(self.M)/4. ) transition_param = model.add_parameter(1.0) model.states[0].add_successor(model.states[0], transition_param) return model def create_background_mosaic_model(self, num_mosaics, p_transition, dirichlet_prior_strength): """ Create a mosaic model """ model = hmm.ModelByStates(self.M, self.order) transition_param = model.add_parameter(p_transition) no_transition_param = model.add_parameter(1.0 - p_transition) for i in xrange(num_mosaics): self.add_fully_parameterised_state( model, emission_dist = hmm.dirichlet_draw(numpy.ones(self.M)*dirichlet_prior_strength) ) for state_1 in model.states: for state_2 in model.states: if state_1 == state_2: state_1.add_successor(state_2, no_transition_param) else: state_1.add_successor(state_2, transition_param) return model def load_background_mosaic_model(self, f): ''' Load a background model from the given file (or filename) ''' if isinstance(f, str): f = open(f) # how many mosaics? num_mosaics = pickle.load(f) # create a model of the desired structure model = self.create_background_mosaic_model(num_mosaics, 0.0, 1.0) # load the parameters into the model _load_model_parameters(model, f) return model def dump_background_mosaic_model(self, model, f): ''' Dump a background model into the given file (or filename) ''' if isinstance(f, str): f = open(f, 'w') # how many mosaics? pickle.dump(model.N, f) # load the parameters into the model _dump_model_parameters(model, f) def create_background_model(order, N): return hmm.pssm.ModelBuilder(order).create_background_mosaic_model(N, .01, 100.0)
JohnReid/biopsy
Python/hmm/pssm/model_builder.py
Python
mit
5,519
import unittest import numpy as np import numpy.testing as np_test from pgmpy.inference import VariableElimination from pgmpy.inference import BeliefPropagation from pgmpy.models import BayesianModel, MarkovModel from pgmpy.models import JunctionTree from pgmpy.factors import TabularCPD from pgmpy.factors import Factor from pgmpy.extern.six.moves import range class TestVariableElimination(unittest.TestCase): def setUp(self): self.bayesian_model = BayesianModel([('A', 'J'), ('R', 'J'), ('J', 'Q'), ('J', 'L'), ('G', 'L')]) cpd_a = TabularCPD('A', 2, values=[[0.2], [0.8]]) cpd_r = TabularCPD('R', 2, values=[[0.4], [0.6]]) cpd_j = TabularCPD('J', 2, values=[[0.9, 0.6, 0.7, 0.1], [0.1, 0.4, 0.3, 0.9]], evidence=['A', 'R'], evidence_card=[2, 2]) cpd_q = TabularCPD('Q', 2, values=[[0.9, 0.2], [0.1, 0.8]], evidence=['J'], evidence_card=[2]) cpd_l = TabularCPD('L', 2, values=[[0.9, 0.45, 0.8, 0.1], [0.1, 0.55, 0.2, 0.9]], evidence=['J', 'G'], evidence_card=[2, 2]) cpd_g = TabularCPD('G', 2, values=[[0.6], [0.4]]) self.bayesian_model.add_cpds(cpd_a, cpd_g, cpd_j, cpd_l, cpd_q, cpd_r) self.bayesian_inference = VariableElimination(self.bayesian_model) # All the values that are used for comparision in the all the tests are # found using SAMIAM (assuming that it is correct ;)) def test_query_single_variable(self): query_result = self.bayesian_inference.query(['J']) np_test.assert_array_almost_equal(query_result['J'].values, np.array([0.416, 0.584])) def test_query_multiple_variable(self): query_result = self.bayesian_inference.query(['Q', 'J']) np_test.assert_array_almost_equal(query_result['J'].values, np.array([0.416, 0.584])) np_test.assert_array_almost_equal(query_result['Q'].values, np.array([0.4912, 0.5088])) def test_query_single_variable_with_evidence(self): query_result = self.bayesian_inference.query(variables=['J'], evidence={'A': 0, 'R': 1}) np_test.assert_array_almost_equal(query_result['J'].values, np.array([0.60, 0.40])) def test_query_multiple_variable_with_evidence(self): query_result = self.bayesian_inference.query(variables=['J', 'Q'], evidence={'A': 0, 'R': 0, 'G': 0, 'L': 1}) np_test.assert_array_almost_equal(query_result['J'].values, np.array([0.818182, 0.181818])) np_test.assert_array_almost_equal(query_result['Q'].values, np.array([0.772727, 0.227273])) def test_query_multiple_times(self): # This just tests that the models are not getting modified while querying them query_result = self.bayesian_inference.query(['J']) query_result = self.bayesian_inference.query(['J']) np_test.assert_array_almost_equal(query_result['J'].values, np.array([0.416, 0.584])) query_result = self.bayesian_inference.query(['Q', 'J']) query_result = self.bayesian_inference.query(['Q', 'J']) np_test.assert_array_almost_equal(query_result['J'].values, np.array([0.416, 0.584])) np_test.assert_array_almost_equal(query_result['Q'].values, np.array([0.4912, 0.5088])) query_result = self.bayesian_inference.query(variables=['J'], evidence={'A': 0, 'R': 1}) query_result = self.bayesian_inference.query(variables=['J'], evidence={'A': 0, 'R': 1}) np_test.assert_array_almost_equal(query_result['J'].values, np.array([0.60, 0.40])) query_result = self.bayesian_inference.query(variables=['J', 'Q'], evidence={'A': 0, 'R': 0, 'G': 0, 'L': 1}) query_result = self.bayesian_inference.query(variables=['J', 'Q'], evidence={'A': 0, 'R': 0, 'G': 0, 'L': 1}) np_test.assert_array_almost_equal(query_result['J'].values, np.array([0.818182, 0.181818])) np_test.assert_array_almost_equal(query_result['Q'].values, np.array([0.772727, 0.227273])) def test_max_marginal(self): np_test.assert_almost_equal(self.bayesian_inference.max_marginal(), 0.1659, decimal=4) def test_max_marginal_var(self): np_test.assert_almost_equal(self.bayesian_inference.max_marginal(['G']), 0.5714, decimal=4) def test_max_marginal_var1(self): np_test.assert_almost_equal(self.bayesian_inference.max_marginal(['G', 'R']), 0.4055, decimal=4) def test_max_marginal_var2(self): np_test.assert_almost_equal(self.bayesian_inference.max_marginal(['G', 'R', 'A']), 0.3260, decimal=4) def test_map_query(self): map_query = self.bayesian_inference.map_query() self.assertDictEqual(map_query, {'A': 1, 'R': 1, 'J': 1, 'Q': 1, 'G': 0, 'L': 0}) def test_map_query_with_evidence(self): map_query = self.bayesian_inference.map_query(['A', 'R', 'L'], {'J': 0, 'Q': 1, 'G': 0}) self.assertDictEqual(map_query, {'A': 1, 'R': 0, 'L': 0}) def test_induced_graph(self): induced_graph = self.bayesian_inference.induced_graph(['G', 'Q', 'A', 'J', 'L', 'R']) result_edges = sorted([sorted(x) for x in induced_graph.edges()]) self.assertEqual([['A', 'J'], ['A', 'R'], ['G', 'J'], ['G', 'L'], ['J', 'L'], ['J', 'Q'], ['J', 'R'], ['L', 'R']], result_edges) def test_induced_width(self): result_width = self.bayesian_inference.induced_width(['G', 'Q', 'A', 'J', 'L', 'R']) self.assertEqual(2, result_width) def tearDown(self): del self.bayesian_inference del self.bayesian_model class TestVariableEliminationMarkov(unittest.TestCase): def setUp(self): # It is just a moralised version of the above Bayesian network so all the results are same. Only factors # are under consideration for inference so this should be fine. self.markov_model = MarkovModel([('A', 'J'), ('R', 'J'), ('J', 'Q'), ('J', 'L'), ('G', 'L'), ('A', 'R'), ('J', 'G')]) factor_a = TabularCPD('A', 2, values=[[0.2], [0.8]]).to_factor() factor_r = TabularCPD('R', 2, values=[[0.4], [0.6]]).to_factor() factor_j = TabularCPD('J', 2, values=[[0.9, 0.6, 0.7, 0.1], [0.1, 0.4, 0.3, 0.9]], evidence=['A', 'R'], evidence_card=[2, 2]).to_factor() factor_q = TabularCPD('Q', 2, values=[[0.9, 0.2], [0.1, 0.8]], evidence=['J'], evidence_card=[2]).to_factor() factor_l = TabularCPD('L', 2, values=[[0.9, 0.45, 0.8, 0.1], [0.1, 0.55, 0.2, 0.9]], evidence=['J', 'G'], evidence_card=[2, 2]).to_factor() factor_g = TabularCPD('G', 2, [[0.6], [0.4]]).to_factor() self.markov_model.add_factors(factor_a, factor_r, factor_j, factor_q, factor_l, factor_g) self.markov_inference = VariableElimination(self.markov_model) # All the values that are used for comparision in the all the tests are # found using SAMIAM (assuming that it is correct ;)) def test_query_single_variable(self): query_result = self.markov_inference.query(['J']) np_test.assert_array_almost_equal(query_result['J'].values, np.array([0.416, 0.584])) def test_query_multiple_variable(self): query_result = self.markov_inference.query(['Q', 'J']) np_test.assert_array_almost_equal(query_result['J'].values, np.array([0.416, 0.584])) np_test.assert_array_almost_equal(query_result['Q'].values, np.array([0.4912, 0.5088])) def test_query_single_variable_with_evidence(self): query_result = self.markov_inference.query(variables=['J'], evidence={'A': 0, 'R': 1}) np_test.assert_array_almost_equal(query_result['J'].values, np.array([0.60, 0.40])) def test_query_multiple_variable_with_evidence(self): query_result = self.markov_inference.query(variables=['J', 'Q'], evidence={'A': 0, 'R': 0, 'G': 0, 'L': 1}) np_test.assert_array_almost_equal(query_result['J'].values, np.array([0.818182, 0.181818])) np_test.assert_array_almost_equal(query_result['Q'].values, np.array([0.772727, 0.227273])) def test_query_multiple_times(self): # This just tests that the models are not getting modified while querying them query_result = self.markov_inference.query(['J']) query_result = self.markov_inference.query(['J']) np_test.assert_array_almost_equal(query_result['J'].values, np.array([0.416, 0.584])) query_result = self.markov_inference.query(['Q', 'J']) query_result = self.markov_inference.query(['Q', 'J']) np_test.assert_array_almost_equal(query_result['J'].values, np.array([0.416, 0.584])) np_test.assert_array_almost_equal(query_result['Q'].values, np.array([0.4912, 0.5088])) query_result = self.markov_inference.query(variables=['J'], evidence={'A': 0, 'R': 1}) query_result = self.markov_inference.query(variables=['J'], evidence={'A': 0, 'R': 1}) np_test.assert_array_almost_equal(query_result['J'].values, np.array([0.60, 0.40])) query_result = self.markov_inference.query(variables=['J', 'Q'], evidence={'A': 0, 'R': 0, 'G': 0, 'L': 1}) query_result = self.markov_inference.query(variables=['J', 'Q'], evidence={'A': 0, 'R': 0, 'G': 0, 'L': 1}) np_test.assert_array_almost_equal(query_result['J'].values, np.array([0.818182, 0.181818])) np_test.assert_array_almost_equal(query_result['Q'].values, np.array([0.772727, 0.227273])) def test_max_marginal(self): np_test.assert_almost_equal(self.markov_inference.max_marginal(), 0.1659, decimal=4) def test_max_marginal_var(self): np_test.assert_almost_equal(self.markov_inference.max_marginal(['G']), 0.5714, decimal=4) def test_max_marginal_var1(self): np_test.assert_almost_equal(self.markov_inference.max_marginal(['G', 'R']), 0.4055, decimal=4) def test_max_marginal_var2(self): np_test.assert_almost_equal(self.markov_inference.max_marginal(['G', 'R', 'A']), 0.3260, decimal=4) def test_map_query(self): map_query = self.markov_inference.map_query() self.assertDictEqual(map_query, {'A': 1, 'R': 1, 'J': 1, 'Q': 1, 'G': 0, 'L': 0}) def test_map_query_with_evidence(self): map_query = self.markov_inference.map_query(['A', 'R', 'L'], {'J': 0, 'Q': 1, 'G': 0}) self.assertDictEqual(map_query, {'A': 1, 'R': 0, 'L': 0}) def test_induced_graph(self): induced_graph = self.markov_inference.induced_graph(['G', 'Q', 'A', 'J', 'L', 'R']) result_edges = sorted([sorted(x) for x in induced_graph.edges()]) self.assertEqual([['A', 'J'], ['A', 'R'], ['G', 'J'], ['G', 'L'], ['J', 'L'], ['J', 'Q'], ['J', 'R'], ['L', 'R']], result_edges) def test_induced_width(self): result_width = self.markov_inference.induced_width(['G', 'Q', 'A', 'J', 'L', 'R']) self.assertEqual(2, result_width) def tearDown(self): del self.markov_inference del self.markov_model class TestBeliefPropagation(unittest.TestCase): def setUp(self): self.junction_tree = JunctionTree([(('A', 'B'), ('B', 'C')), (('B', 'C'), ('C', 'D'))]) phi1 = Factor(['A', 'B'], [2, 3], range(6)) phi2 = Factor(['B', 'C'], [3, 2], range(6)) phi3 = Factor(['C', 'D'], [2, 2], range(4)) self.junction_tree.add_factors(phi1, phi2, phi3) self.bayesian_model = BayesianModel([('A', 'J'), ('R', 'J'), ('J', 'Q'), ('J', 'L'), ('G', 'L')]) cpd_a = TabularCPD('A', 2, values=[[0.2], [0.8]]) cpd_r = TabularCPD('R', 2, values=[[0.4], [0.6]]) cpd_j = TabularCPD('J', 2, values=[[0.9, 0.6, 0.7, 0.1], [0.1, 0.4, 0.3, 0.9]], evidence=['A', 'R'], evidence_card=[2, 2]) cpd_q = TabularCPD('Q', 2, values=[[0.9, 0.2], [0.1, 0.8]], evidence=['J'], evidence_card=[2]) cpd_l = TabularCPD('L', 2, values=[[0.9, 0.45, 0.8, 0.1], [0.1, 0.55, 0.2, 0.9]], evidence=['J', 'G'], evidence_card=[2, 2]) cpd_g = TabularCPD('G', 2, values=[[0.6], [0.4]]) self.bayesian_model.add_cpds(cpd_a, cpd_g, cpd_j, cpd_l, cpd_q, cpd_r) def test_calibrate_clique_belief(self): belief_propagation = BeliefPropagation(self.junction_tree) belief_propagation.calibrate() clique_belief = belief_propagation.get_clique_beliefs() phi1 = Factor(['A', 'B'], [2, 3], range(6)) phi2 = Factor(['B', 'C'], [3, 2], range(6)) phi3 = Factor(['C', 'D'], [2, 2], range(4)) b_A_B = phi1 * (phi3.marginalize(['D'], inplace=False) * phi2).marginalize(['C'], inplace=False) b_B_C = phi2 * (phi1.marginalize(['A'], inplace=False) * phi3.marginalize(['D'], inplace=False)) b_C_D = phi3 * (phi1.marginalize(['A'], inplace=False) * phi2).marginalize(['B'], inplace=False) np_test.assert_array_almost_equal(clique_belief[('A', 'B')].values, b_A_B.values) np_test.assert_array_almost_equal(clique_belief[('B', 'C')].values, b_B_C.values) np_test.assert_array_almost_equal(clique_belief[('C', 'D')].values, b_C_D.values) def test_calibrate_sepset_belief(self): belief_propagation = BeliefPropagation(self.junction_tree) belief_propagation.calibrate() sepset_belief = belief_propagation.get_sepset_beliefs() phi1 = Factor(['A', 'B'], [2, 3], range(6)) phi2 = Factor(['B', 'C'], [3, 2], range(6)) phi3 = Factor(['C', 'D'], [2, 2], range(4)) b_B = (phi1 * (phi3.marginalize(['D'], inplace=False) * phi2).marginalize(['C'], inplace=False)).marginalize(['A'], inplace=False) b_C = (phi2 * (phi1.marginalize(['A'], inplace=False) * phi3.marginalize(['D'], inplace=False))).marginalize(['B'], inplace=False) np_test.assert_array_almost_equal(sepset_belief[frozenset((('A', 'B'), ('B', 'C')))].values, b_B.values) np_test.assert_array_almost_equal(sepset_belief[frozenset((('B', 'C'), ('C', 'D')))].values, b_C.values) def test_max_calibrate_clique_belief(self): belief_propagation = BeliefPropagation(self.junction_tree) belief_propagation.max_calibrate() clique_belief = belief_propagation.get_clique_beliefs() phi1 = Factor(['A', 'B'], [2, 3], range(6)) phi2 = Factor(['B', 'C'], [3, 2], range(6)) phi3 = Factor(['C', 'D'], [2, 2], range(4)) b_A_B = phi1 * (phi3.maximize(['D'], inplace=False) * phi2).maximize(['C'], inplace=False) b_B_C = phi2 * (phi1.maximize(['A'], inplace=False) * phi3.maximize(['D'], inplace=False)) b_C_D = phi3 * (phi1.maximize(['A'], inplace=False) * phi2).maximize(['B'], inplace=False) np_test.assert_array_almost_equal(clique_belief[('A', 'B')].values, b_A_B.values) np_test.assert_array_almost_equal(clique_belief[('B', 'C')].values, b_B_C.values) np_test.assert_array_almost_equal(clique_belief[('C', 'D')].values, b_C_D.values) def test_max_calibrate_sepset_belief(self): belief_propagation = BeliefPropagation(self.junction_tree) belief_propagation.max_calibrate() sepset_belief = belief_propagation.get_sepset_beliefs() phi1 = Factor(['A', 'B'], [2, 3], range(6)) phi2 = Factor(['B', 'C'], [3, 2], range(6)) phi3 = Factor(['C', 'D'], [2, 2], range(4)) b_B = (phi1 * (phi3.maximize(['D'], inplace=False) * phi2).maximize(['C'], inplace=False)).maximize(['A'], inplace=False) b_C = (phi2 * (phi1.maximize(['A'], inplace=False) * phi3.maximize(['D'], inplace=False))).maximize(['B'], inplace=False) np_test.assert_array_almost_equal(sepset_belief[frozenset((('A', 'B'), ('B', 'C')))].values, b_B.values) np_test.assert_array_almost_equal(sepset_belief[frozenset((('B', 'C'), ('C', 'D')))].values, b_C.values) # All the values that are used for comparision in the all the tests are # found using SAMIAM (assuming that it is correct ;)) def test_query_single_variable(self): belief_propagation = BeliefPropagation(self.bayesian_model) query_result = belief_propagation.query(['J']) np_test.assert_array_almost_equal(query_result['J'].values, np.array([0.416, 0.584])) def test_query_multiple_variable(self): belief_propagation = BeliefPropagation(self.bayesian_model) query_result = belief_propagation.query(['Q', 'J']) np_test.assert_array_almost_equal(query_result['J'].values, np.array([0.416, 0.584])) np_test.assert_array_almost_equal(query_result['Q'].values, np.array([0.4912, 0.5088])) def test_query_single_variable_with_evidence(self): belief_propagation = BeliefPropagation(self.bayesian_model) query_result = belief_propagation.query(variables=['J'], evidence={'A': 0, 'R': 1}) np_test.assert_array_almost_equal(query_result['J'].values, np.array([0.60, 0.40])) def test_query_multiple_variable_with_evidence(self): belief_propagation = BeliefPropagation(self.bayesian_model) query_result = belief_propagation.query(variables=['J', 'Q'], evidence={'A': 0, 'R': 0, 'G': 0, 'L': 1}) np_test.assert_array_almost_equal(query_result['J'].values, np.array([0.818182, 0.181818])) np_test.assert_array_almost_equal(query_result['Q'].values, np.array([0.772727, 0.227273])) def test_map_query(self): belief_propagation = BeliefPropagation(self.bayesian_model) map_query = belief_propagation.map_query() self.assertDictEqual(map_query, {'A': 1, 'R': 1, 'J': 1, 'Q': 1, 'G': 0, 'L': 0}) def test_map_query_with_evidence(self): belief_propagation = BeliefPropagation(self.bayesian_model) map_query = belief_propagation.map_query(['A', 'R', 'L'], {'J': 0, 'Q': 1, 'G': 0}) self.assertDictEqual(map_query, {'A': 1, 'R': 0, 'L': 0}) def tearDown(self): del self.junction_tree del self.bayesian_model
anaviltripathi/pgmpy
pgmpy/tests/test_inference/test_ExactInference.py
Python
mit
21,305
#!/usr/bin/env python # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Port files to Starboard (or do a lot of the leg work).""" import logging import os import re import string import sys # Prefix for system functions. _SYSTEM_FUNCTIONS_PREFIX = 'XML_' # Prefix and suffix for system header. _SYSTEM_HEADER_PREFIX = '#ifdef' _SYSTEM_HEADER_PREFIX_TEMPLATE = string.Template('#ifdef HAVE_${header_name}_H') _SYSTEM_HEADER_SUFFIX = '#endif' # Includes that should be wrapped by . _INCLUDES_TO_WRAP = [ ] # References to these symbols should be wrapped with the previx so that we # can redirect them for Starboard. _SYSTEM_FUNCTIONS = [ r'abort', r'assert', r'free', r'malloc', r'realloc', ] # Matches all "system" include lines. Note that this does NOT match include # lines where there is more than one space between the # and the 'include', in # an attempt to avoid touching includes that are already conditional. _RE_INCLUDE = re.compile(r'^#\s?include <.*>$') _RE_INCLUDE_TO_WRAP = re.compile(r'^#\s*include <(' + r'|'.join(_INCLUDES_TO_WRAP) + r').*>') # Matches system function calls that should be wrapped. _RE_SYSTEM_FUNCTION = re.compile(r'(?<!\w)(?!\>)(' + r'|'.join(_SYSTEM_FUNCTIONS) + r')(?=[(])') def _IsSystemInclude(line): return _RE_INCLUDE.match(line) and not _RE_INCLUDE_EXCEPTION.match(line) def _ReplaceSystemFunction(match): return _SYSTEM_FUNCTIONS_PREFIX + match.group(1).upper() def _ReplaceIncludeToWrap(match): return _SYSTEM_FUNCTIONS_PREFIX + match.group(1).upper() def _Replace(line): replaced_line = _RE_SYSTEM_FUNCTION.sub(_ReplaceSystemFunction, line) return replaced_line def PortFile(input_file_path): """Reads a source file, ports it, and writes it again. Args: input_file_path: The path to the source file to port. Returns: 0 on success. """ lines = None with open(input_file_path, 'r') as f: lines = f.read().splitlines() output_lines = [] logging.info('Porting: %s', input_file_path) for line_number in range(0, len(lines)): previous_line = lines[line_number - 1] if line_number > 0 else '' line = lines[line_number] result = _RE_INCLUDE_TO_WRAP.match(line) if result: if not previous_line.startswith(_SYSTEM_HEADER_PREFIX): output_lines.append(_SYSTEM_HEADER_PREFIX_TEMPLATE.substitute(header_name=result.group(1).upper())) output_lines.append(line) output_lines.append(_SYSTEM_HEADER_SUFFIX) else: output_lines.append(line) continue replaced_line = _Replace(line) if replaced_line != line: logging.debug('line %6d: %s', line_number, replaced_line) output_lines.append(replaced_line) os.unlink(input_file_path) with open(input_file_path, 'w') as f: f.write('\n'.join(output_lines) + '\n') return 0 def PortAll(file_list): result = 0 for file_path in file_list: this_result = PortFile(file_path) if this_result != 0: result = this_result print '\n' return result if __name__ == '__main__': logging.basicConfig(format='%(levelname)-7s:%(message)s', level=logging.INFO) sys.exit(PortAll(sys.argv[1:]))
youtube/cobalt
third_party/libxml/port.py
Python
bsd-3-clause
3,830
from django import forms class WakeOnLan(forms.Form): computer_name = forms.CharField(max_length=30, required=False, help_text='Enter your ip or hostname')
Oleh-Hrebchuk/WakeOnLanDjango
wakeonlanapp/forms.py
Python
gpl-3.0
161
# Google Home page example # # Author: Dnpwwo, 2017 - 2018 # # Demonstrates HTTP/HTTPS connectivity. # After connection it performs a GET on www.google.com and receives a 302 (Page Moved) response # It then does a subsequent GET on the Location specified in the 302 response and receives a 200 response. # """ <plugin key="Google" name="Google Home page example" author="Dnpwwo" version="2.2.7" externallink="https://www.google.com"> <description> <h2>Google Home page example</h2><br/> Will hit the supplied URL every 5 heartbeats in the request protocol. Redirects are handled. </description> <params> <param field="Address" label="IP Address" width="200px" required="true" default="www.google.com"/> <param field="Mode1" label="Protocol" width="75px"> <options> <option label="HTTPS" value="443"/> <option label="HTTP" value="80" default="true" /> </options> </param> <param field="Mode6" label="Debug" width="150px"> <options> <option label="None" value="0" default="true" /> <option label="Python Only" value="2"/> <option label="Basic Debugging" value="62"/> <option label="Basic+Messages" value="126"/> <option label="Connections Only" value="16"/> <option label="Connections+Python" value="18"/> <option label="Connections+Queue" value="144"/> <option label="All" value="-1"/> </options> </param> </params> </plugin> """ import Domoticz class BasePlugin: httpConn = None runAgain = 6 disconnectCount = 0 sProtocol = "HTTP" def __init__(self): return def onStart(self): if Parameters["Mode6"] != "0": Domoticz.Debugging(int(Parameters["Mode6"])) DumpConfigToLog() if (Parameters["Mode1"] == "443"): self.sProtocol = "HTTPS" self.httpConn = Domoticz.Connection(Name=self.sProtocol+" Test", Transport="TCP/IP", Protocol=self.sProtocol, Address=Parameters["Address"], Port=Parameters["Mode1"]) self.httpConn.Connect() def onStop(self): Domoticz.Log("onStop - Plugin is stopping.") def onConnect(self, Connection, Status, Description): if (Status == 0): Domoticz.Debug("Google connected successfully.") sendData = { 'Verb' : 'GET', 'URL' : '/', 'Headers' : { 'Content-Type': 'text/xml; charset=utf-8', \ 'Connection': 'keep-alive', \ 'Accept': 'Content-Type: text/html; charset=UTF-8', \ 'Host': Parameters["Address"]+":"+Parameters["Mode1"], \ 'User-Agent':'Domoticz/1.0' } } Connection.Send(sendData) else: Domoticz.Log("Failed to connect ("+str(Status)+") to: "+Parameters["Address"]+":"+Parameters["Mode1"]+" with error: "+Description) def onMessage(self, Connection, Data): DumpHTTPResponseToLog(Data) strData = Data["Data"].decode("utf-8", "ignore") Status = int(Data["Status"]) LogMessage(strData) if (Status == 200): if ((self.disconnectCount & 1) == 1): Domoticz.Log("Good Response received from Google, Disconnecting.") self.httpConn.Disconnect() else: Domoticz.Log("Good Response received from Google, Dropping connection.") self.httpConn = None self.disconnectCount = self.disconnectCount + 1 elif (Status == 302): Domoticz.Log("Google returned a Page Moved Error.") sendData = { 'Verb' : 'GET', 'URL' : Data["Headers"]["Location"], 'Headers' : { 'Content-Type': 'text/xml; charset=utf-8', \ 'Connection': 'keep-alive', \ 'Accept': 'Content-Type: text/html; charset=UTF-8', \ 'Host': Parameters["Address"]+":"+Parameters["Mode1"], \ 'User-Agent':'Domoticz/1.0' }, } Connection.Send(sendData) elif (Status == 400): Domoticz.Error("Google returned a Bad Request Error.") elif (Status == 500): Domoticz.Error("Google returned a Server Error.") else: Domoticz.Error("Google returned a status: "+str(Status)) def onCommand(self, Unit, Command, Level, Hue): Domoticz.Debug("onCommand called for Unit " + str(Unit) + ": Parameter '" + str(Command) + "', Level: " + str(Level)) def onDisconnect(self, Connection): Domoticz.Log("onDisconnect called for connection to: "+Connection.Address+":"+Connection.Port) def onHeartbeat(self): #Domoticz.Trace(True) if (self.httpConn != None and (self.httpConn.Connecting() or self.httpConn.Connected())): Domoticz.Debug("onHeartbeat called, Connection is alive.") else: self.runAgain = self.runAgain - 1 if self.runAgain <= 0: if (self.httpConn == None): self.httpConn = Domoticz.Connection(Name=self.sProtocol+" Test", Transport="TCP/IP", Protocol=self.sProtocol, Address=Parameters["Address"], Port=Parameters["Mode1"]) self.httpConn.Connect() self.runAgain = 6 else: Domoticz.Debug("onHeartbeat called, run again in "+str(self.runAgain)+" heartbeats.") #Domoticz.Trace(False) global _plugin _plugin = BasePlugin() def onStart(): global _plugin _plugin.onStart() def onStop(): global _plugin _plugin.onStop() def onConnect(Connection, Status, Description): global _plugin _plugin.onConnect(Connection, Status, Description) def onMessage(Connection, Data): global _plugin _plugin.onMessage(Connection, Data) def onCommand(Unit, Command, Level, Hue): global _plugin _plugin.onCommand(Unit, Command, Level, Hue) def onNotification(Name, Subject, Text, Status, Priority, Sound, ImageFile): global _plugin _plugin.onNotification(Name, Subject, Text, Status, Priority, Sound, ImageFile) def onDisconnect(Connection): global _plugin _plugin.onDisconnect(Connection) def onHeartbeat(): global _plugin _plugin.onHeartbeat() # Generic helper functions def LogMessage(Message): if Parameters["Mode6"] == "File": f = open(Parameters["HomeFolder"]+"http.html","w") f.write(Message) f.close() Domoticz.Log("File written") def DumpConfigToLog(): for x in Parameters: if Parameters[x] != "": Domoticz.Debug( "'" + x + "':'" + str(Parameters[x]) + "'") Domoticz.Debug("Device count: " + str(len(Devices))) for x in Devices: Domoticz.Debug("Device: " + str(x) + " - " + str(Devices[x])) Domoticz.Debug("Device ID: '" + str(Devices[x].ID) + "'") Domoticz.Debug("Device Name: '" + Devices[x].Name + "'") Domoticz.Debug("Device nValue: " + str(Devices[x].nValue)) Domoticz.Debug("Device sValue: '" + Devices[x].sValue + "'") Domoticz.Debug("Device LastLevel: " + str(Devices[x].LastLevel)) return def DumpHTTPResponseToLog(httpResp, level=0): if (level==0): Domoticz.Debug("HTTP Details ("+str(len(httpResp))+"):") indentStr = "" for x in range(level): indentStr += "----" if isinstance(httpResp, dict): for x in httpResp: if not isinstance(httpResp[x], dict) and not isinstance(httpResp[x], list): Domoticz.Debug(indentStr + ">'" + x + "':'" + str(httpResp[x]) + "'") else: Domoticz.Debug(indentStr + ">'" + x + "':") DumpHTTPResponseToLog(httpResp[x], level+1) elif isinstance(httpResp, list): for x in httpResp: Domoticz.Debug(indentStr + "['" + x + "']") else: Domoticz.Debug(indentStr + ">'" + x + "':'" + str(httpResp[x]) + "'")
gordonb3/domoticz
plugins/examples/HTTP.py
Python
gpl-3.0
8,304
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2015 CERN. # # Invenio is free software; you can redistribute it # and/or modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the # Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307, USA. # # In applying this license, CERN does not # waive the privileges and immunities granted to it by virtue of its status # as an Intergovernmental Organization or submit itself to any jurisdiction. """Invenio module for depositing metadata using workflows.""" import os import sys from setuptools import setup from setuptools.command.test import test as TestCommand readme = open('README.rst').read() history = open('CHANGES.rst').read() requirements = [ 'Flask>=0.10.1', 'six>=1.7.2', 'idutils>=0.1.0', 'invenio-formatter>=0.2.0', 'invenio-oauth2server>=0.1.0', 'invenio-pidstore>=0.1.0', 'invenio-records>=0.3.0', 'invenio-workflows>=0.1.0', ] test_requirements = [ 'unittest2>=1.1.0', 'Flask_Testing>=0.4.1', 'pytest>=2.7.0', 'pytest_cov>=1.8.0,<2.0.0', 'pytest_pep8>=1.0.6', 'coverage>=3.7.1', ] class PyTest(TestCommand): """PyTest Test.""" user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")] def initialize_options(self): """Init pytest.""" TestCommand.initialize_options(self) self.pytest_args = [] try: from ConfigParser import ConfigParser except ImportError: from configparser import ConfigParser config = ConfigParser() config.read('pytest.ini') self.pytest_args = config.get('pytest', 'addopts').split(' ') def finalize_options(self): """Finalize pytest.""" TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): """Run tests.""" # import here, cause outside the eggs aren't loaded import pytest import _pytest.config pm = _pytest.config.get_plugin_manager() pm.consider_setuptools_entrypoints() errno = pytest.main(self.pytest_args) sys.exit(errno) # Get the version string. Cannot be done with import! g = {} with open(os.path.join('invenio_deposit', 'version.py'), 'rt') as fp: exec(fp.read(), g) version = g['__version__'] setup( name='invenio-deposit', version=version, description=__doc__, long_description=readme + '\n\n' + history, keywords='invenio TODO', license='GPLv2', author='CERN', author_email='[email protected]', url='https://github.com/inveniosoftware/invenio-deposit', packages=[ 'invenio_deposit', ], zip_safe=False, include_package_data=True, platforms='any', install_requires=requirements, extras_require={ 'docs': [ 'Sphinx>=1.3', 'sphinx_rtd_theme>=0.1.7' ], 'tests': test_requirements }, classifiers=[ 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Software Development :: Libraries :: Python Modules', 'Programming Language :: Python :: 2', # 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', # 'Programming Language :: Python :: 3', # 'Programming Language :: Python :: 3.3', # 'Programming Language :: Python :: 3.4', 'Development Status :: 1 - Planning', ], tests_require=test_requirements, cmdclass={'test': PyTest}, )
dset0x/invenio-deposit
setup.py
Python
gpl-2.0
4,296
import decimal import logging from decimal import Decimal from django.db import models, transaction from django.contrib.auth.models import User, Group from django.db.models.signals import post_save # pre_save, from django.dispatch import receiver LOGGER = logging.getLogger(__name__) # Model Product class Product(models.Model): name = models.CharField(max_length=64) description = models.TextField(max_length=512, null=True, blank=True) # Starting cost cost = models.DecimalField(max_digits=8, decimal_places=2) # Publication date post_date = models.DateField(auto_now_add=True) # Last modification date update_date = models.DateField(auto_now=True) # Users who finance or make the product first_owners = models.ManyToManyField(User, through='Ownership') is_public = models.BooleanField(default=True) # Group (in case of private product) private_group = models.OneToOneField(Group, null=True, blank=True) # Step for progressive refund step = models.DecimalField(max_digits=8, decimal_places=2, null=True, blank=True) def __str__(self): return self.name @property def nb_use(self): return self.use_set.count() @property def price(self): # next use price nb_use = self.nb_use step = self.step cost = self.cost if step and nb_use * step < cost: price = step else: price = cost / (1 + nb_use) price = price.quantize(Decimal('0.01'), decimal.ROUND_UP) return price def recompute_use_balances(self, hint_user=None): """ Main function of orent app refund owners and/or previous users taking care that : - owners have been payed back as soon as possible - every users spend the same amount of money by use. """ nb_use = self.nb_use price = self.price step = self.step cost = self.cost # If caller gives us a user on which to work on, # avoid a back-and-forth trip on the database by using # it directly. Problem spotted during test-cases write. hint_user_id = None if hint_user is None else hint_user.id def impact_all_uses(what): """ Refund all previous users""" for use in self.use_set.all(): if use.user_id == hint_user_id: profil = hint_user.profil else: profil = use.user.profil profil.balance += what profil.save() def impact_first_owner(what): """ Refund first owners. """ if self.first_owner_id == hint_user_id: profil = hint_user.profil else: profil = self.first_owner.profil profil.balance += what profil.save() if step: # use don't allow to reach cost if (nb_use + 1) * step < cost: # for ownership in self.ownership_set.all() # ratio = ownership.ratio # total_refunded = nb_use * price * ratio # total_refunded = total_refunded.quantize(Decimal('0.01'), decimal.ROUND_DOWN) # total_refund = (nb_use + 1)* price * ratio # total_refund = total_refund.quantize(Decimal('0.01'), decimal.ROUND_DOWN) # what = total_refund - total_refunded # impact_first_owner(what) impact_first_owner(price) # use allow to reach cost if nb_use * step < cost <= (nb_use + 1) * step: # for ownership in self.ownership_set.all() # ratio = ownership.ratio # total_refunded = nb_use * price * ratio # total_refunded = total_refunded.quantize(Decimal('0.01'), decimal.ROUND_DOWN) # total_refund = cost * ratio # total_refund = total_refund.quantize(Decimal('0.01'), decimal.ROUND_DOWN) # what = total_refund - total_refunded # impact_first_owner(what) # refund first owner up to the cost impact_first_owner(cost - nb_use * step) # previous use allowed to reach cost if (nb_use - 1) * step < cost <= nb_use * step: # refund previous users # taking into account overage of cost passing impact_all_uses(step - price) # cost have been reached for 2 uses or more if cost <= (nb_use - 1) * step: previous_price = cost / nb_use previous_price = previous_price.quantize(Decimal('0.01'), decimal.ROUND_UP) impact_all_uses(previous_price - price) else: if nb_use: # product have been used previous_price = cost / nb_use previous_price = previous_price.quantize(Decimal('0.01'), decimal.ROUND_UP) impact_all_uses(previous_price - price) else: # product have never been used impact_first_owner(price) # complete add of a use def add_use_for(self, user): with transaction.atomic(): self.recompute_use_balances(hint_user=user) # HEADS UP: user needs to be created *AFTER* recomputation # for the new user not to be refunded with his/her # own user. profil = user.profil profil.balance -= self.price profil.save() self.use_set.create(user=user) """ def check(self): # Vérifier private_group factory_private_group_name = 'ppg@{}'.format(self.id) if self.private_group: group = self.private_group if group.name != factory_private_group_name: group.name = factory_private_group_name group.save() else: group = Group(name=factory_private_group_name) group.save() self.first_owner.groups.add(group) self.private_group = group """ class Use(models.Model): # related_name='uses' : uses à la place de use_set product = models.ForeignKey(Product) user = models.ForeignKey(User) date = models.DateField(auto_now_add=True) class Profil(models.Model): user = models.OneToOneField(User) balance = models.DecimalField(max_digits=8, decimal_places=2, default=0) class Ownership(models.Model): product = models.ForeignKey(Product) user = models.ForeignKey(User) ratio = models.DecimalField(max_digits=3, decimal_places=2) # SIGNAUX # Group creation for new product @receiver(post_save, sender=Product) def create_group_for_product(sender, instance, created, **kwargs): if created: # product.check() group = Group(name='ppg@{}'.format(instance.id)) group.save() instance.first_owner.groups.add(group) instance.private_group = group # Profil creation for new user @receiver(post_save, sender=User) def create_profil_for_user(sender, instance, created, **kwargs): if created: Profil.objects.create(user=instance)
0rent/0rent
orentapp/models.py
Python
agpl-3.0
7,518
#!/usr/bin/env python ''' WebSocket client based on ws4py Initial design inspired by: https://ws4py.readthedocs.org/en/latest/sources/clienttutorial/ Another library Autobahn worth checking: http://autobahn.ws/python/ ''' import json import os import sys import time import logging import argparse from collections import namedtuple try: from ws4py.client import WebSocketBaseClient from ws4py.manager import WebSocketManager from ws4py import format_addresses, configure_logger except ImportError: print("ws4py is required to run cthun_ws_test " "(try 'sudo pip install ws4py')") sys.exit(1) # Tokens CONNECTION_CHECK_INTERVAL = 2 # [s] SEND_INTERVAL = 0.001 # [s] # Globals logger = None # Errors class RequestError(Exception): ''' Exception due to an invalid request ''' pass # Client class EchoClient(WebSocketBaseClient): def __init__(self, url, mgr, ca_certs = None, keyfile = None, certfile = None): self._mgr = mgr WebSocketBaseClient.__init__(self, url, ssl_options = { "ca_certs" : ca_certs, "keyfile" : keyfile, "certfile" : certfile }) def handshake_ok(self): logger.info("Opening %s" % format_addresses(self)) self._mgr.add(self) def received_message(self, msg): logger.info("### Received a message: %s" % msg) # Configuration ScriptOptions = namedtuple('ScriptOptions', ['url', 'concurrency', 'interval', 'verbose', 'message', 'json_file', 'num', 'ca', 'key', 'cert']) SCRIPT_DESCRIPTION = '''WebSocket client to execute load tests.''' def parseCommandLine(argv): # Define parser = argparse.ArgumentParser(description = SCRIPT_DESCRIPTION) parser.add_argument("url", help = "server url (ex. ws//:localhost:8080/hello)"), parser.add_argument("concurrency", help = "number of concurrent connections") parser.add_argument("-m", "--message", help = "message to be sent to the server", default = None) parser.add_argument("-j", "--json_file", help = "file containing the json message to be sent", default = None) parser.add_argument("-n", "--num", help = "number of messages to be sent", default = 1) parser.add_argument("-i", "--interval", help = "connection check interval; default: %d s" % CONNECTION_CHECK_INTERVAL, default = CONNECTION_CHECK_INTERVAL) parser.add_argument("--ca", help = "ca pem", default = None) parser.add_argument("--key", help = "client key", default = None) parser.add_argument("--cert", help = "client cert", default = None) parser.add_argument("-v", "--verbose", help = "verbose", action = "store_true") # Parse and validate args = parser.parse_args(argv[1:]) try: concurrency = int(args.concurrency) if concurrency < 1: raise ValueError() except ValueError: raise RequestError('concurerncy must be a positive integer') if not args.url.startswith("ws"): raise RequestError('invalid url') try: interval = int(args.interval) if interval < 0: raise ValueError() except ValueError: raise RequestError('interval must be a positive integer') abs_json_file = None if args.json_file: abs_json_file = os.path.abspath(args.json_file) if not os.path.isfile(abs_json_file): raise RequestError("%s does not exist" % abs_json_file) num = None if any([args.message, args.json_file]): try: num = int(args.num) if num < 0: raise ValueError() except ValueError: raise RequestError('invalid number of messages') else: num = 0 return ScriptOptions(args.url, concurrency, interval, args.verbose, args.message, abs_json_file, num, args.ca, args.key, args.cert) def getJsonFromFile(file_path): with open(file_path) as f: json_content = f.read() return json.read(json_content.strip()) def getMessage(script_options): if script_options.json_file is not None: return getJsonFromFile(script_options.json_file) return script_options.message # The actual script def run(script_options): global logger level = logging.DEBUG if script_options.verbose else logging.INFO logger = configure_logger(level = level) mgr = WebSocketManager() try: mgr.start() clients = [] # Connect for connection_idx in range(script_options.concurrency): client = EchoClient(script_options.url, mgr, script_options.ca, script_options.key, script_options.cert) client.connect() clients.append(client) logger.info("%d clients are connected" % (connection_idx + 1)) # Send msg = getMessage(script_options) if msg: msg = json.write(msg) logger.info("Sending messages (num=%d):\n%s", script_options.num, msg) for client in clients: for _ in range(script_options.num): client.send(msg) time.sleep(SEND_INTERVAL) logger.info("Done sending") # Sleep before disconnecting logger.info("Sleeping for %d s before disconnecting", script_options.interval) time.sleep(script_options.interval) except KeyboardInterrupt: logger.info("Interrupted by user") finally: logger.info("Disconnecting!") mgr.close_all(code = 1000, message = "Client is closing the connection") mgr.stop() mgr.join() def main(argv = None): if argv is None: argv = sys.argv try: script_options = parseCommandLine(argv) print script_options run(script_options) except RequestError as e: print("Invalid request: %s" % e) return 1 except Exception as e: logger.exception(str(e)) return 1 return 0 if __name__ == '__main__': sys.exit(main())
MikaelSmith/cpp-pcp-client
scripts/cthun_test.py
Python
apache-2.0
6,944
# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import uuid import mock from oslo_config import cfg from oslo_utils import timeutils from testtools import matchers from keystone import assignment from keystone import auth from keystone.common import authorization from keystone import config from keystone import exception from keystone.models import token_model from keystone.tests import unit as tests from keystone.tests.unit import default_fixtures from keystone.tests.unit.ksfixtures import database from keystone import token from keystone.token import provider from keystone import trust CONF = cfg.CONF TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ' DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id HOST_URL = 'http://keystone:5001' def _build_user_auth(token=None, user_id=None, username=None, password=None, tenant_id=None, tenant_name=None, trust_id=None): """Build auth dictionary. It will create an auth dictionary based on all the arguments that it receives. """ auth_json = {} if token is not None: auth_json['token'] = token if username or password: auth_json['passwordCredentials'] = {} if username is not None: auth_json['passwordCredentials']['username'] = username if user_id is not None: auth_json['passwordCredentials']['userId'] = user_id if password is not None: auth_json['passwordCredentials']['password'] = password if tenant_name is not None: auth_json['tenantName'] = tenant_name if tenant_id is not None: auth_json['tenantId'] = tenant_id if trust_id is not None: auth_json['trust_id'] = trust_id return auth_json class AuthTest(tests.TestCase): def setUp(self): self.useFixture(database.Database()) super(AuthTest, self).setUp() self.load_backends() self.load_fixtures(default_fixtures) self.context_with_remote_user = {'environment': {'REMOTE_USER': 'FOO', 'AUTH_TYPE': 'Negotiate'}} self.empty_context = {'environment': {}} self.controller = token.controllers.Auth() def assertEqualTokens(self, a, b, enforce_audit_ids=True): """Assert that two tokens are equal. Compare two tokens except for their ids. This also truncates the time in the comparison. """ def normalize(token): token['access']['token']['id'] = 'dummy' del token['access']['token']['expires'] del token['access']['token']['issued_at'] del token['access']['token']['audit_ids'] return token self.assertCloseEnoughForGovernmentWork( timeutils.parse_isotime(a['access']['token']['expires']), timeutils.parse_isotime(b['access']['token']['expires'])) self.assertCloseEnoughForGovernmentWork( timeutils.parse_isotime(a['access']['token']['issued_at']), timeutils.parse_isotime(b['access']['token']['issued_at'])) if enforce_audit_ids: self.assertIn(a['access']['token']['audit_ids'][0], b['access']['token']['audit_ids']) self.assertThat(len(a['access']['token']['audit_ids']), matchers.LessThan(3)) self.assertThat(len(b['access']['token']['audit_ids']), matchers.LessThan(3)) return self.assertDictEqual(normalize(a), normalize(b)) class AuthBadRequests(AuthTest): def test_no_external_auth(self): """Verify that _authenticate_external() raises exception if N/A.""" self.assertRaises( token.controllers.ExternalAuthNotApplicable, self.controller._authenticate_external, context={}, auth={}) def test_empty_remote_user(self): """Verify that _authenticate_external() raises exception if REMOTE_USER is set as the empty string. """ context = {'environment': {'REMOTE_USER': ''}} self.assertRaises( token.controllers.ExternalAuthNotApplicable, self.controller._authenticate_external, context=context, auth={}) def test_no_token_in_auth(self): """Verify that _authenticate_token() raises exception if no token.""" self.assertRaises( exception.ValidationError, self.controller._authenticate_token, None, {}) def test_no_credentials_in_auth(self): """Verify that _authenticate_local() raises exception if no creds.""" self.assertRaises( exception.ValidationError, self.controller._authenticate_local, None, {}) def test_empty_username_and_userid_in_auth(self): """Verify that empty username and userID raises ValidationError.""" self.assertRaises( exception.ValidationError, self.controller._authenticate_local, None, {'passwordCredentials': {'password': 'abc', 'userId': '', 'username': ''}}) def test_authenticate_blank_request_body(self): """Verify sending empty json dict raises the right exception.""" self.assertRaises(exception.ValidationError, self.controller.authenticate, {}, {}) def test_authenticate_blank_auth(self): """Verify sending blank 'auth' raises the right exception.""" body_dict = _build_user_auth() self.assertRaises(exception.ValidationError, self.controller.authenticate, {}, body_dict) def test_authenticate_invalid_auth_content(self): """Verify sending invalid 'auth' raises the right exception.""" self.assertRaises(exception.ValidationError, self.controller.authenticate, {}, {'auth': 'abcd'}) def test_authenticate_user_id_too_large(self): """Verify sending large 'userId' raises the right exception.""" body_dict = _build_user_auth(user_id='0' * 65, username='FOO', password='foo2') self.assertRaises(exception.ValidationSizeError, self.controller.authenticate, {}, body_dict) def test_authenticate_username_too_large(self): """Verify sending large 'username' raises the right exception.""" body_dict = _build_user_auth(username='0' * 65, password='foo2') self.assertRaises(exception.ValidationSizeError, self.controller.authenticate, {}, body_dict) def test_authenticate_tenant_id_too_large(self): """Verify sending large 'tenantId' raises the right exception.""" body_dict = _build_user_auth(username='FOO', password='foo2', tenant_id='0' * 65) self.assertRaises(exception.ValidationSizeError, self.controller.authenticate, {}, body_dict) def test_authenticate_tenant_name_too_large(self): """Verify sending large 'tenantName' raises the right exception.""" body_dict = _build_user_auth(username='FOO', password='foo2', tenant_name='0' * 65) self.assertRaises(exception.ValidationSizeError, self.controller.authenticate, {}, body_dict) def test_authenticate_token_too_large(self): """Verify sending large 'token' raises the right exception.""" body_dict = _build_user_auth(token={'id': '0' * 8193}) self.assertRaises(exception.ValidationSizeError, self.controller.authenticate, {}, body_dict) def test_authenticate_password_too_large(self): """Verify sending large 'password' raises the right exception.""" length = CONF.identity.max_password_length + 1 body_dict = _build_user_auth(username='FOO', password='0' * length) self.assertRaises(exception.ValidationSizeError, self.controller.authenticate, {}, body_dict) class AuthWithToken(AuthTest): def test_unscoped_token(self): """Verify getting an unscoped token with password creds.""" body_dict = _build_user_auth(username='FOO', password='foo2') unscoped_token = self.controller.authenticate({}, body_dict) self.assertNotIn('tenant', unscoped_token['access']['token']) def test_auth_invalid_token(self): """Verify exception is raised if invalid token.""" body_dict = _build_user_auth(token={"id": uuid.uuid4().hex}) self.assertRaises( exception.Unauthorized, self.controller.authenticate, {}, body_dict) def test_auth_bad_formatted_token(self): """Verify exception is raised if invalid token.""" body_dict = _build_user_auth(token={}) self.assertRaises( exception.ValidationError, self.controller.authenticate, {}, body_dict) def test_auth_unscoped_token_no_project(self): """Verify getting an unscoped token with an unscoped token.""" body_dict = _build_user_auth( username='FOO', password='foo2') unscoped_token = self.controller.authenticate({}, body_dict) body_dict = _build_user_auth( token=unscoped_token["access"]["token"]) unscoped_token_2 = self.controller.authenticate({}, body_dict) self.assertEqualTokens(unscoped_token, unscoped_token_2) def test_auth_unscoped_token_project(self): """Verify getting a token in a tenant with an unscoped token.""" # Add a role in so we can check we get this back self.assignment_api.add_role_to_user_and_project( self.user_foo['id'], self.tenant_bar['id'], self.role_member['id']) # Get an unscoped token body_dict = _build_user_auth( username='FOO', password='foo2') unscoped_token = self.controller.authenticate({}, body_dict) # Get a token on BAR tenant using the unscoped token body_dict = _build_user_auth( token=unscoped_token["access"]["token"], tenant_name="BAR") scoped_token = self.controller.authenticate({}, body_dict) tenant = scoped_token["access"]["token"]["tenant"] roles = scoped_token["access"]["metadata"]["roles"] self.assertEqual(self.tenant_bar['id'], tenant["id"]) self.assertThat(roles, matchers.Contains(self.role_member['id'])) def test_auth_token_project_group_role(self): """Verify getting a token in a tenant with group roles.""" # Add a v2 style role in so we can check we get this back self.assignment_api.add_role_to_user_and_project( self.user_foo['id'], self.tenant_bar['id'], self.role_member['id']) # Now create a group role for this user as well domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex} self.resource_api.create_domain(domain1['id'], domain1) new_group = {'domain_id': domain1['id'], 'name': uuid.uuid4().hex} new_group = self.identity_api.create_group(new_group) self.identity_api.add_user_to_group(self.user_foo['id'], new_group['id']) self.assignment_api.create_grant( group_id=new_group['id'], project_id=self.tenant_bar['id'], role_id=self.role_admin['id']) # Get a scoped token for the tenant body_dict = _build_user_auth( username='FOO', password='foo2', tenant_name="BAR") scoped_token = self.controller.authenticate({}, body_dict) tenant = scoped_token["access"]["token"]["tenant"] roles = scoped_token["access"]["metadata"]["roles"] self.assertEqual(self.tenant_bar['id'], tenant["id"]) self.assertIn(self.role_member['id'], roles) self.assertIn(self.role_admin['id'], roles) def test_belongs_to_no_tenant(self): r = self.controller.authenticate( {}, auth={ 'passwordCredentials': { 'username': self.user_foo['name'], 'password': self.user_foo['password'] } }) unscoped_token_id = r['access']['token']['id'] self.assertRaises( exception.Unauthorized, self.controller.validate_token, dict(is_admin=True, query_string={'belongsTo': 'BAR'}), token_id=unscoped_token_id) def test_belongs_to(self): body_dict = _build_user_auth( username='FOO', password='foo2', tenant_name="BAR") scoped_token = self.controller.authenticate({}, body_dict) scoped_token_id = scoped_token['access']['token']['id'] self.assertRaises( exception.Unauthorized, self.controller.validate_token, dict(is_admin=True, query_string={'belongsTo': 'me'}), token_id=scoped_token_id) self.assertRaises( exception.Unauthorized, self.controller.validate_token, dict(is_admin=True, query_string={'belongsTo': 'BAR'}), token_id=scoped_token_id) def test_token_auth_with_binding(self): self.config_fixture.config(group='token', bind=['kerberos']) body_dict = _build_user_auth() unscoped_token = self.controller.authenticate( self.context_with_remote_user, body_dict) # the token should have bind information in it bind = unscoped_token['access']['token']['bind'] self.assertEqual('FOO', bind['kerberos']) body_dict = _build_user_auth( token=unscoped_token['access']['token'], tenant_name='BAR') # using unscoped token without remote user context fails self.assertRaises( exception.Unauthorized, self.controller.authenticate, self.empty_context, body_dict) # using token with remote user context succeeds scoped_token = self.controller.authenticate( self.context_with_remote_user, body_dict) # the bind information should be carried over from the original token bind = scoped_token['access']['token']['bind'] self.assertEqual('FOO', bind['kerberos']) def test_deleting_role_revokes_token(self): role_controller = assignment.controllers.Role() project1 = {'id': 'Project1', 'name': uuid.uuid4().hex, 'domain_id': DEFAULT_DOMAIN_ID} self.resource_api.create_project(project1['id'], project1) role_one = {'id': 'role_one', 'name': uuid.uuid4().hex} self.role_api.create_role(role_one['id'], role_one) self.assignment_api.add_role_to_user_and_project( self.user_foo['id'], project1['id'], role_one['id']) no_context = {} # Get a scoped token for the tenant body_dict = _build_user_auth( username=self.user_foo['name'], password=self.user_foo['password'], tenant_name=project1['name']) token = self.controller.authenticate(no_context, body_dict) # Ensure it is valid token_id = token['access']['token']['id'] self.controller.validate_token( dict(is_admin=True, query_string={}), token_id=token_id) # Delete the role, which should invalidate the token role_controller.delete_role( dict(is_admin=True, query_string={}), role_one['id']) # Check the token is now invalid self.assertRaises( exception.TokenNotFound, self.controller.validate_token, dict(is_admin=True, query_string={}), token_id=token_id) def test_only_original_audit_id_is_kept(self): context = {} def get_audit_ids(token): return token['access']['token']['audit_ids'] # get a token body_dict = _build_user_auth(username='FOO', password='foo2') unscoped_token = self.controller.authenticate(context, body_dict) starting_audit_id = get_audit_ids(unscoped_token)[0] self.assertIsNotNone(starting_audit_id) # get another token to ensure the correct parent audit_id is set body_dict = _build_user_auth(token=unscoped_token["access"]["token"]) unscoped_token_2 = self.controller.authenticate(context, body_dict) audit_ids = get_audit_ids(unscoped_token_2) self.assertThat(audit_ids, matchers.HasLength(2)) self.assertThat(audit_ids[-1], matchers.Equals(starting_audit_id)) # get another token from token 2 and ensure the correct parent # audit_id is set body_dict = _build_user_auth(token=unscoped_token_2["access"]["token"]) unscoped_token_3 = self.controller.authenticate(context, body_dict) audit_ids = get_audit_ids(unscoped_token_3) self.assertThat(audit_ids, matchers.HasLength(2)) self.assertThat(audit_ids[-1], matchers.Equals(starting_audit_id)) def test_revoke_by_audit_chain_id_original_token(self): self.config_fixture.config(group='token', revoke_by_id=False) context = {} # get a token body_dict = _build_user_auth(username='FOO', password='foo2') unscoped_token = self.controller.authenticate(context, body_dict) token_id = unscoped_token['access']['token']['id'] # get a second token body_dict = _build_user_auth(token=unscoped_token["access"]["token"]) unscoped_token_2 = self.controller.authenticate(context, body_dict) token_2_id = unscoped_token_2['access']['token']['id'] self.token_provider_api.revoke_token(token_id, revoke_chain=True) self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_v2_token, token_id=token_id) self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_v2_token, token_id=token_2_id) def test_revoke_by_audit_chain_id_chained_token(self): self.config_fixture.config(group='token', revoke_by_id=False) context = {} # get a token body_dict = _build_user_auth(username='FOO', password='foo2') unscoped_token = self.controller.authenticate(context, body_dict) token_id = unscoped_token['access']['token']['id'] # get a second token body_dict = _build_user_auth(token=unscoped_token["access"]["token"]) unscoped_token_2 = self.controller.authenticate(context, body_dict) token_2_id = unscoped_token_2['access']['token']['id'] self.token_provider_api.revoke_token(token_2_id, revoke_chain=True) self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_v2_token, token_id=token_id) self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_v2_token, token_id=token_2_id) def _mock_audit_info(self, parent_audit_id): # NOTE(morgainfainberg): The token model and other cases that are # extracting the audit id expect 'None' if the audit id doesn't # exist. This ensures that the audit_id is None and the # audit_chain_id will also return None. return [None, None] def test_revoke_with_no_audit_info(self): self.config_fixture.config(group='token', revoke_by_id=False) context = {} with mock.patch.object(provider, 'audit_info', self._mock_audit_info): # get a token body_dict = _build_user_auth(username='FOO', password='foo2') unscoped_token = self.controller.authenticate(context, body_dict) token_id = unscoped_token['access']['token']['id'] # get a second token body_dict = _build_user_auth( token=unscoped_token['access']['token']) unscoped_token_2 = self.controller.authenticate(context, body_dict) token_2_id = unscoped_token_2['access']['token']['id'] self.token_provider_api.revoke_token(token_id, revoke_chain=True) revoke_events = self.revoke_api.list_events() self.assertThat(revoke_events, matchers.HasLength(1)) revoke_event = revoke_events[0].to_dict() self.assertIn('expires_at', revoke_event) self.assertEqual(unscoped_token_2['access']['token']['expires'], revoke_event['expires_at']) self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_v2_token, token_id=token_id) self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_v2_token, token_id=token_2_id) # get a new token, with no audit info body_dict = _build_user_auth(username='FOO', password='foo2') unscoped_token = self.controller.authenticate(context, body_dict) token_id = unscoped_token['access']['token']['id'] # get a second token body_dict = _build_user_auth( token=unscoped_token['access']['token']) unscoped_token_2 = self.controller.authenticate(context, body_dict) token_2_id = unscoped_token_2['access']['token']['id'] # Revoke by audit_id, no audit_info means both parent and child # token are revoked. self.token_provider_api.revoke_token(token_id) revoke_events = self.revoke_api.list_events() self.assertThat(revoke_events, matchers.HasLength(2)) revoke_event = revoke_events[1].to_dict() self.assertIn('expires_at', revoke_event) self.assertEqual(unscoped_token_2['access']['token']['expires'], revoke_event['expires_at']) self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_v2_token, token_id=token_id) self.assertRaises(exception.TokenNotFound, self.token_provider_api.validate_v2_token, token_id=token_2_id) class AuthWithPasswordCredentials(AuthTest): def test_auth_invalid_user(self): """Verify exception is raised if invalid user.""" body_dict = _build_user_auth( username=uuid.uuid4().hex, password=uuid.uuid4().hex) self.assertRaises( exception.Unauthorized, self.controller.authenticate, {}, body_dict) def test_auth_valid_user_invalid_password(self): """Verify exception is raised if invalid password.""" body_dict = _build_user_auth( username="FOO", password=uuid.uuid4().hex) self.assertRaises( exception.Unauthorized, self.controller.authenticate, {}, body_dict) def test_auth_empty_password(self): """Verify exception is raised if empty password.""" body_dict = _build_user_auth( username="FOO", password="") self.assertRaises( exception.Unauthorized, self.controller.authenticate, {}, body_dict) def test_auth_no_password(self): """Verify exception is raised if empty password.""" body_dict = _build_user_auth(username="FOO") self.assertRaises( exception.ValidationError, self.controller.authenticate, {}, body_dict) def test_authenticate_blank_password_credentials(self): """Sending empty dict as passwordCredentials raises a 400 error.""" body_dict = {'passwordCredentials': {}, 'tenantName': 'demo'} self.assertRaises(exception.ValidationError, self.controller.authenticate, {}, body_dict) def test_authenticate_no_username(self): """Verify skipping username raises the right exception.""" body_dict = _build_user_auth(password="pass", tenant_name="demo") self.assertRaises(exception.ValidationError, self.controller.authenticate, {}, body_dict) def test_bind_without_remote_user(self): self.config_fixture.config(group='token', bind=['kerberos']) body_dict = _build_user_auth(username='FOO', password='foo2', tenant_name='BAR') token = self.controller.authenticate({}, body_dict) self.assertNotIn('bind', token['access']['token']) def test_change_default_domain_id(self): # If the default_domain_id config option is not the default then the # user in auth data is from the new default domain. # 1) Create a new domain. new_domain_id = uuid.uuid4().hex new_domain = { 'description': uuid.uuid4().hex, 'enabled': True, 'id': new_domain_id, 'name': uuid.uuid4().hex, } self.resource_api.create_domain(new_domain_id, new_domain) # 2) Create user "foo" in new domain with different password than # default-domain foo. new_user_password = uuid.uuid4().hex new_user = { 'name': self.user_foo['name'], 'domain_id': new_domain_id, 'password': new_user_password, 'email': '[email protected]', } new_user = self.identity_api.create_user(new_user) # 3) Update the default_domain_id config option to the new domain self.config_fixture.config(group='identity', default_domain_id=new_domain_id) # 4) Authenticate as "foo" using the password in the new domain. body_dict = _build_user_auth( username=self.user_foo['name'], password=new_user_password) # The test is successful if this doesn't raise, so no need to assert. self.controller.authenticate({}, body_dict) class AuthWithRemoteUser(AuthTest): def test_unscoped_remote_authn(self): """Verify getting an unscoped token with external authn.""" body_dict = _build_user_auth( username='FOO', password='foo2') local_token = self.controller.authenticate( {}, body_dict) body_dict = _build_user_auth() remote_token = self.controller.authenticate( self.context_with_remote_user, body_dict) self.assertEqualTokens(local_token, remote_token, enforce_audit_ids=False) def test_unscoped_remote_authn_jsonless(self): """Verify that external auth with invalid request fails.""" self.assertRaises( exception.ValidationError, self.controller.authenticate, {'REMOTE_USER': 'FOO'}, None) def test_scoped_remote_authn(self): """Verify getting a token with external authn.""" body_dict = _build_user_auth( username='FOO', password='foo2', tenant_name='BAR') local_token = self.controller.authenticate( {}, body_dict) body_dict = _build_user_auth( tenant_name='BAR') remote_token = self.controller.authenticate( self.context_with_remote_user, body_dict) self.assertEqualTokens(local_token, remote_token, enforce_audit_ids=False) def test_scoped_nometa_remote_authn(self): """Verify getting a token with external authn and no metadata.""" body_dict = _build_user_auth( username='TWO', password='two2', tenant_name='BAZ') local_token = self.controller.authenticate( {}, body_dict) body_dict = _build_user_auth(tenant_name='BAZ') remote_token = self.controller.authenticate( {'environment': {'REMOTE_USER': 'TWO'}}, body_dict) self.assertEqualTokens(local_token, remote_token, enforce_audit_ids=False) def test_scoped_remote_authn_invalid_user(self): """Verify that external auth with invalid user fails.""" body_dict = _build_user_auth(tenant_name="BAR") self.assertRaises( exception.Unauthorized, self.controller.authenticate, {'environment': {'REMOTE_USER': uuid.uuid4().hex}}, body_dict) def test_bind_with_kerberos(self): self.config_fixture.config(group='token', bind=['kerberos']) body_dict = _build_user_auth(tenant_name="BAR") token = self.controller.authenticate(self.context_with_remote_user, body_dict) self.assertEqual('FOO', token['access']['token']['bind']['kerberos']) def test_bind_without_config_opt(self): self.config_fixture.config(group='token', bind=['x509']) body_dict = _build_user_auth(tenant_name='BAR') token = self.controller.authenticate(self.context_with_remote_user, body_dict) self.assertNotIn('bind', token['access']['token']) class AuthWithTrust(AuthTest): def setUp(self): super(AuthWithTrust, self).setUp() self.trust_controller = trust.controllers.TrustV3() self.auth_v3_controller = auth.controllers.Auth() self.trustor = self.user_foo self.trustee = self.user_two self.assigned_roles = [self.role_member['id'], self.role_browser['id']] for assigned_role in self.assigned_roles: self.assignment_api.add_role_to_user_and_project( self.trustor['id'], self.tenant_bar['id'], assigned_role) self.sample_data = {'trustor_user_id': self.trustor['id'], 'trustee_user_id': self.trustee['id'], 'project_id': self.tenant_bar['id'], 'impersonation': True, 'roles': [{'id': self.role_browser['id']}, {'name': self.role_member['name']}]} def config_overrides(self): super(AuthWithTrust, self).config_overrides() self.config_fixture.config(group='trust', enabled=True) def _create_auth_context(self, token_id): token_ref = token_model.KeystoneToken( token_id=token_id, token_data=self.token_provider_api.validate_token(token_id)) auth_context = authorization.token_to_auth_context(token_ref) return {'environment': {authorization.AUTH_CONTEXT_ENV: auth_context}, 'token_id': token_id, 'host_url': HOST_URL} def create_trust(self, trust_data, trustor_name, expires_at=None, impersonation=True): username = trustor_name password = 'foo2' unscoped_token = self.get_unscoped_token(username, password) context = self._create_auth_context( unscoped_token['access']['token']['id']) trust_data_copy = copy.deepcopy(trust_data) trust_data_copy['expires_at'] = expires_at trust_data_copy['impersonation'] = impersonation return self.trust_controller.create_trust( context, trust=trust_data_copy)['trust'] def get_unscoped_token(self, username, password='foo2'): body_dict = _build_user_auth(username=username, password=password) return self.controller.authenticate({}, body_dict) def build_v2_token_request(self, username, password, trust, tenant_id=None): if not tenant_id: tenant_id = self.tenant_bar['id'] unscoped_token = self.get_unscoped_token(username, password) unscoped_token_id = unscoped_token['access']['token']['id'] request_body = _build_user_auth(token={'id': unscoped_token_id}, trust_id=trust['id'], tenant_id=tenant_id) return request_body def test_create_trust_bad_data_fails(self): unscoped_token = self.get_unscoped_token(self.trustor['name']) context = self._create_auth_context( unscoped_token['access']['token']['id']) bad_sample_data = {'trustor_user_id': self.trustor['id'], 'project_id': self.tenant_bar['id'], 'roles': [{'id': self.role_browser['id']}]} self.assertRaises(exception.ValidationError, self.trust_controller.create_trust, context, trust=bad_sample_data) def test_create_trust_no_roles(self): unscoped_token = self.get_unscoped_token(self.trustor['name']) context = {'token_id': unscoped_token['access']['token']['id']} self.sample_data['roles'] = [] self.assertRaises(exception.Forbidden, self.trust_controller.create_trust, context, trust=self.sample_data) def test_create_trust(self): expires_at = timeutils.strtime(timeutils.utcnow() + datetime.timedelta(minutes=10), fmt=TIME_FORMAT) new_trust = self.create_trust(self.sample_data, self.trustor['name'], expires_at=expires_at) self.assertEqual(self.trustor['id'], new_trust['trustor_user_id']) self.assertEqual(self.trustee['id'], new_trust['trustee_user_id']) role_ids = [self.role_browser['id'], self.role_member['id']] self.assertTrue(timeutils.parse_strtime(new_trust['expires_at'], fmt=TIME_FORMAT)) self.assertIn('%s/v3/OS-TRUST/' % HOST_URL, new_trust['links']['self']) self.assertIn('%s/v3/OS-TRUST/' % HOST_URL, new_trust['roles_links']['self']) for role in new_trust['roles']: self.assertIn(role['id'], role_ids) def test_create_trust_expires_bad(self): self.assertRaises(exception.ValidationTimeStampError, self.create_trust, self.sample_data, self.trustor['name'], expires_at="bad") self.assertRaises(exception.ValidationTimeStampError, self.create_trust, self.sample_data, self.trustor['name'], expires_at="") self.assertRaises(exception.ValidationTimeStampError, self.create_trust, self.sample_data, self.trustor['name'], expires_at="Z") def test_create_trust_without_project_id(self): """Verify that trust can be created without project id and token can be generated with that trust. """ unscoped_token = self.get_unscoped_token(self.trustor['name']) context = self._create_auth_context( unscoped_token['access']['token']['id']) self.sample_data['project_id'] = None self.sample_data['roles'] = [] new_trust = self.trust_controller.create_trust( context, trust=self.sample_data)['trust'] self.assertEqual(self.trustor['id'], new_trust['trustor_user_id']) self.assertEqual(self.trustee['id'], new_trust['trustee_user_id']) self.assertIs(new_trust['impersonation'], True) auth_response = self.fetch_v2_token_from_trust(new_trust) token_user = auth_response['access']['user'] self.assertEqual(token_user['id'], new_trust['trustor_user_id']) def test_get_trust(self): unscoped_token = self.get_unscoped_token(self.trustor['name']) context = {'token_id': unscoped_token['access']['token']['id'], 'host_url': HOST_URL} new_trust = self.trust_controller.create_trust( context, trust=self.sample_data)['trust'] trust = self.trust_controller.get_trust(context, new_trust['id'])['trust'] self.assertEqual(self.trustor['id'], trust['trustor_user_id']) self.assertEqual(self.trustee['id'], trust['trustee_user_id']) role_ids = [self.role_browser['id'], self.role_member['id']] for role in new_trust['roles']: self.assertIn(role['id'], role_ids) def test_create_trust_no_impersonation(self): new_trust = self.create_trust(self.sample_data, self.trustor['name'], expires_at=None, impersonation=False) self.assertEqual(self.trustor['id'], new_trust['trustor_user_id']) self.assertEqual(self.trustee['id'], new_trust['trustee_user_id']) self.assertIs(new_trust['impersonation'], False) auth_response = self.fetch_v2_token_from_trust(new_trust) token_user = auth_response['access']['user'] self.assertEqual(token_user['id'], new_trust['trustee_user_id']) # TODO(ayoung): Endpoints def test_create_trust_impersonation(self): new_trust = self.create_trust(self.sample_data, self.trustor['name']) self.assertEqual(self.trustor['id'], new_trust['trustor_user_id']) self.assertEqual(self.trustee['id'], new_trust['trustee_user_id']) self.assertIs(new_trust['impersonation'], True) auth_response = self.fetch_v2_token_from_trust(new_trust) token_user = auth_response['access']['user'] self.assertEqual(token_user['id'], new_trust['trustor_user_id']) def test_token_from_trust_wrong_user_fails(self): new_trust = self.create_trust(self.sample_data, self.trustor['name']) request_body = self.build_v2_token_request('FOO', 'foo2', new_trust) self.assertRaises(exception.Forbidden, self.controller.authenticate, {}, request_body) def test_token_from_trust_wrong_project_fails(self): for assigned_role in self.assigned_roles: self.assignment_api.add_role_to_user_and_project( self.trustor['id'], self.tenant_baz['id'], assigned_role) new_trust = self.create_trust(self.sample_data, self.trustor['name']) request_body = self.build_v2_token_request('TWO', 'two2', new_trust, self.tenant_baz['id']) self.assertRaises(exception.Forbidden, self.controller.authenticate, {}, request_body) def fetch_v2_token_from_trust(self, trust): request_body = self.build_v2_token_request('TWO', 'two2', trust) auth_response = self.controller.authenticate({}, request_body) return auth_response def fetch_v3_token_from_trust(self, trust, trustee): v3_password_data = { 'identity': { "methods": ["password"], "password": { "user": { "id": trustee["id"], "password": trustee["password"] } } }, 'scope': { 'project': { 'id': self.tenant_baz['id'] } } } auth_response = (self.auth_v3_controller.authenticate_for_token ({'environment': {}, 'query_string': {}}, v3_password_data)) token = auth_response.headers['X-Subject-Token'] v3_req_with_trust = { "identity": { "methods": ["token"], "token": {"id": token}}, "scope": { "OS-TRUST:trust": {"id": trust['id']}}} token_auth_response = (self.auth_v3_controller.authenticate_for_token ({'environment': {}, 'query_string': {}}, v3_req_with_trust)) return token_auth_response def test_create_v3_token_from_trust(self): new_trust = self.create_trust(self.sample_data, self.trustor['name']) auth_response = self.fetch_v3_token_from_trust(new_trust, self.trustee) trust_token_user = auth_response.json['token']['user'] self.assertEqual(self.trustor['id'], trust_token_user['id']) trust_token_trust = auth_response.json['token']['OS-TRUST:trust'] self.assertEqual(trust_token_trust['id'], new_trust['id']) self.assertEqual(self.trustor['id'], trust_token_trust['trustor_user']['id']) self.assertEqual(self.trustee['id'], trust_token_trust['trustee_user']['id']) trust_token_roles = auth_response.json['token']['roles'] self.assertEqual(2, len(trust_token_roles)) def test_v3_trust_token_get_token_fails(self): new_trust = self.create_trust(self.sample_data, self.trustor['name']) auth_response = self.fetch_v3_token_from_trust(new_trust, self.trustee) trust_token = auth_response.headers['X-Subject-Token'] v3_token_data = {'identity': { 'methods': ['token'], 'token': {'id': trust_token} }} self.assertRaises( exception.Forbidden, self.auth_v3_controller.authenticate_for_token, {'environment': {}, 'query_string': {}}, v3_token_data) def test_token_from_trust(self): new_trust = self.create_trust(self.sample_data, self.trustor['name']) auth_response = self.fetch_v2_token_from_trust(new_trust) self.assertIsNotNone(auth_response) self.assertEqual(2, len(auth_response['access']['metadata']['roles']), "user_foo has three roles, but the token should" " only get the two roles specified in the trust.") def assert_token_count_for_trust(self, trust, expected_value): tokens = self.token_provider_api._persistence._list_tokens( self.trustee['id'], trust_id=trust['id']) token_count = len(tokens) self.assertEqual(expected_value, token_count) def test_delete_tokens_for_user_invalidates_tokens_from_trust(self): new_trust = self.create_trust(self.sample_data, self.trustor['name']) self.assert_token_count_for_trust(new_trust, 0) self.fetch_v2_token_from_trust(new_trust) self.assert_token_count_for_trust(new_trust, 1) self.token_provider_api._persistence.delete_tokens_for_user( self.trustee['id']) self.assert_token_count_for_trust(new_trust, 0) def test_token_from_trust_cant_get_another_token(self): new_trust = self.create_trust(self.sample_data, self.trustor['name']) auth_response = self.fetch_v2_token_from_trust(new_trust) trust_token_id = auth_response['access']['token']['id'] request_body = _build_user_auth(token={'id': trust_token_id}, tenant_id=self.tenant_bar['id']) self.assertRaises( exception.Forbidden, self.controller.authenticate, {}, request_body) def test_delete_trust_revokes_token(self): unscoped_token = self.get_unscoped_token(self.trustor['name']) new_trust = self.create_trust(self.sample_data, self.trustor['name']) context = self._create_auth_context( unscoped_token['access']['token']['id']) self.fetch_v2_token_from_trust(new_trust) trust_id = new_trust['id'] tokens = self.token_provider_api._persistence._list_tokens( self.trustor['id'], trust_id=trust_id) self.assertEqual(1, len(tokens)) self.trust_controller.delete_trust(context, trust_id=trust_id) tokens = self.token_provider_api._persistence._list_tokens( self.trustor['id'], trust_id=trust_id) self.assertEqual(0, len(tokens)) def test_token_from_trust_with_no_role_fails(self): new_trust = self.create_trust(self.sample_data, self.trustor['name']) for assigned_role in self.assigned_roles: self.assignment_api.remove_role_from_user_and_project( self.trustor['id'], self.tenant_bar['id'], assigned_role) request_body = self.build_v2_token_request('TWO', 'two2', new_trust) self.assertRaises( exception.Forbidden, self.controller.authenticate, {}, request_body) def test_expired_trust_get_token_fails(self): expiry = "1999-02-18T10:10:00Z" new_trust = self.create_trust(self.sample_data, self.trustor['name'], expiry) request_body = self.build_v2_token_request('TWO', 'two2', new_trust) self.assertRaises( exception.Forbidden, self.controller.authenticate, {}, request_body) def test_token_from_trust_with_wrong_role_fails(self): new_trust = self.create_trust(self.sample_data, self.trustor['name']) self.assignment_api.add_role_to_user_and_project( self.trustor['id'], self.tenant_bar['id'], self.role_other['id']) for assigned_role in self.assigned_roles: self.assignment_api.remove_role_from_user_and_project( self.trustor['id'], self.tenant_bar['id'], assigned_role) request_body = self.build_v2_token_request('TWO', 'two2', new_trust) self.assertRaises( exception.Forbidden, self.controller.authenticate, {}, request_body) def test_do_not_consume_remaining_uses_when_get_token_fails(self): trust_data = copy.deepcopy(self.sample_data) trust_data['remaining_uses'] = 3 new_trust = self.create_trust(trust_data, self.trustor['name']) for assigned_role in self.assigned_roles: self.assignment_api.remove_role_from_user_and_project( self.trustor['id'], self.tenant_bar['id'], assigned_role) request_body = self.build_v2_token_request('TWO', 'two2', new_trust) self.assertRaises(exception.Forbidden, self.controller.authenticate, {}, request_body) unscoped_token = self.get_unscoped_token(self.trustor['name']) context = self._create_auth_context( unscoped_token['access']['token']['id']) trust = self.trust_controller.get_trust(context, new_trust['id'])['trust'] self.assertEqual(3, trust['remaining_uses']) def test_v2_trust_token_contains_trustor_user_id_and_impersonation(self): new_trust = self.create_trust(self.sample_data, self.trustor['name']) auth_response = self.fetch_v2_token_from_trust(new_trust) self.assertEqual(new_trust['trustee_user_id'], auth_response['access']['trust']['trustee_user_id']) self.assertEqual(new_trust['trustor_user_id'], auth_response['access']['trust']['trustor_user_id']) self.assertEqual(new_trust['impersonation'], auth_response['access']['trust']['impersonation']) self.assertEqual(new_trust['id'], auth_response['access']['trust']['id']) validate_response = self.controller.validate_token( context=dict(is_admin=True, query_string={}), token_id=auth_response['access']['token']['id']) self.assertEqual( new_trust['trustee_user_id'], validate_response['access']['trust']['trustee_user_id']) self.assertEqual( new_trust['trustor_user_id'], validate_response['access']['trust']['trustor_user_id']) self.assertEqual( new_trust['impersonation'], validate_response['access']['trust']['impersonation']) self.assertEqual( new_trust['id'], validate_response['access']['trust']['id']) def disable_user(self, user): user['enabled'] = False self.identity_api.update_user(user['id'], user) def test_trust_get_token_fails_if_trustor_disabled(self): new_trust = self.create_trust(self.sample_data, self.trustor['name']) request_body = self.build_v2_token_request(self.trustee['name'], self.trustee['password'], new_trust) self.disable_user(self.trustor) self.assertRaises( exception.Forbidden, self.controller.authenticate, {}, request_body) def test_trust_get_token_fails_if_trustee_disabled(self): new_trust = self.create_trust(self.sample_data, self.trustor['name']) request_body = self.build_v2_token_request(self.trustee['name'], self.trustee['password'], new_trust) self.disable_user(self.trustee) self.assertRaises( exception.Unauthorized, self.controller.authenticate, {}, request_body) class TokenExpirationTest(AuthTest): @mock.patch.object(timeutils, 'utcnow') def _maintain_token_expiration(self, mock_utcnow): """Token expiration should be maintained after re-auth & validation.""" now = datetime.datetime.utcnow() mock_utcnow.return_value = now r = self.controller.authenticate( {}, auth={ 'passwordCredentials': { 'username': self.user_foo['name'], 'password': self.user_foo['password'] } }) unscoped_token_id = r['access']['token']['id'] original_expiration = r['access']['token']['expires'] mock_utcnow.return_value = now + datetime.timedelta(seconds=1) r = self.controller.validate_token( dict(is_admin=True, query_string={}), token_id=unscoped_token_id) self.assertEqual(original_expiration, r['access']['token']['expires']) mock_utcnow.return_value = now + datetime.timedelta(seconds=2) r = self.controller.authenticate( {}, auth={ 'token': { 'id': unscoped_token_id, }, 'tenantId': self.tenant_bar['id'], }) scoped_token_id = r['access']['token']['id'] self.assertEqual(original_expiration, r['access']['token']['expires']) mock_utcnow.return_value = now + datetime.timedelta(seconds=3) r = self.controller.validate_token( dict(is_admin=True, query_string={}), token_id=scoped_token_id) self.assertEqual(original_expiration, r['access']['token']['expires']) def test_maintain_uuid_token_expiration(self): self.config_fixture.config( group='token', provider='keystone.token.providers.uuid.Provider') self._maintain_token_expiration() class AuthCatalog(tests.SQLDriverOverrides, AuthTest): """Tests for the catalog provided in the auth response.""" def config_files(self): config_files = super(AuthCatalog, self).config_files() # We need to use a backend that supports disabled endpoints, like the # SQL backend. config_files.append(tests.dirs.tests_conf('backend_sql.conf')) return config_files def _create_endpoints(self): def create_region(**kwargs): ref = {'id': uuid.uuid4().hex} ref.update(kwargs) self.catalog_api.create_region(ref) return ref def create_endpoint(service_id, region, **kwargs): id_ = uuid.uuid4().hex ref = { 'id': id_, 'interface': 'public', 'region_id': region, 'service_id': service_id, 'url': 'http://localhost/%s' % uuid.uuid4().hex, } ref.update(kwargs) self.catalog_api.create_endpoint(id_, ref) return ref # Create a service for use with the endpoints. def create_service(**kwargs): id_ = uuid.uuid4().hex ref = { 'id': id_, 'name': uuid.uuid4().hex, 'type': uuid.uuid4().hex, } ref.update(kwargs) self.catalog_api.create_service(id_, ref) return ref enabled_service_ref = create_service(enabled=True) disabled_service_ref = create_service(enabled=False) region = create_region() # Create endpoints enabled_endpoint_ref = create_endpoint( enabled_service_ref['id'], region['id']) create_endpoint( enabled_service_ref['id'], region['id'], enabled=False, interface='internal') create_endpoint( disabled_service_ref['id'], region['id']) return enabled_endpoint_ref def test_auth_catalog_disabled_endpoint(self): """On authenticate, get a catalog that excludes disabled endpoints.""" endpoint_ref = self._create_endpoints() # Authenticate body_dict = _build_user_auth( username='FOO', password='foo2', tenant_name="BAR") token = self.controller.authenticate({}, body_dict) # Check the catalog self.assertEqual(1, len(token['access']['serviceCatalog'])) endpoint = token['access']['serviceCatalog'][0]['endpoints'][0] self.assertEqual( 1, len(token['access']['serviceCatalog'][0]['endpoints'])) exp_endpoint = { 'id': endpoint_ref['id'], 'publicURL': endpoint_ref['url'], 'region': endpoint_ref['region_id'], } self.assertEqual(exp_endpoint, endpoint) def test_validate_catalog_disabled_endpoint(self): """On validate, get back a catalog that excludes disabled endpoints.""" endpoint_ref = self._create_endpoints() # Authenticate body_dict = _build_user_auth( username='FOO', password='foo2', tenant_name="BAR") token = self.controller.authenticate({}, body_dict) # Validate token_id = token['access']['token']['id'] validate_ref = self.controller.validate_token( dict(is_admin=True, query_string={}), token_id=token_id) # Check the catalog self.assertEqual(1, len(token['access']['serviceCatalog'])) endpoint = validate_ref['access']['serviceCatalog'][0]['endpoints'][0] self.assertEqual( 1, len(token['access']['serviceCatalog'][0]['endpoints'])) exp_endpoint = { 'id': endpoint_ref['id'], 'publicURL': endpoint_ref['url'], 'region': endpoint_ref['region_id'], } self.assertEqual(exp_endpoint, endpoint) class NonDefaultAuthTest(tests.TestCase): def test_add_non_default_auth_method(self): self.config_fixture.config(group='auth', methods=['password', 'token', 'custom']) config.setup_authentication() self.assertTrue(hasattr(CONF.auth, 'custom'))
jumpstarter-io/keystone
keystone/tests/unit/test_auth.py
Python
apache-2.0
56,629
#! /usr/bin/env python # coding = utf-8 __author__ = 'jeff.yu' from random import choice from string import lowercase from string import digits import datetime """ {"time_stamp":"2014-01-02T01:10:00Z", "click_id":"43306130-ff65-436b-9851-1e2650000021", "campaign_id":"t_571", "offer_id":109, "ref_site":"", "site":"", "click_time":"2014-01-02T01:10:00Z", "cost_per_click":10.264, "payout":4.491, "real_ip":"147.12.35.1", "proxy_ip":"172.30.14.11", "device_id":18, "os_id":149, "carrier_id":1, "mobile_brand_id":3, "screen_h":6, "screen_w":8, "screen_id":7, "city_id":2, "brand_id":1, "model_id":4, "country_id":3, "state_id":4, "conversion_time":"2014-01-02T01:10:00Z", "event":1, "sub1":"night", "sub2":"", "sub3":"", "sub4":"", "sub5":"", "sub6":"", "sub7":"", "sub8":"", "click":0, "lp_click":0, "conversion":1} """ def n_choice(num): seq = lowercase + digits result = '' for i in range(num): result += choice(seq) return result class DataProducer(object): def __init__(self, campaign_id): self.timestamp = '' self.campaign_id = campaign_id self.click_id = '' self.offer_id = '' self.ref_site = '' self.site = '' self.click_time = '' self.cost_per_click = '' self.payout = '' self.real_ip = '' self.proxy_ip = '' self.device_id = '' self.os_id = '' self.carrier_id = '' self.screen_id = '' self.screen_h = '' self.screen_w = '' self.mobile_brand_id = '' self.model_id = '' self.brand_id = '' self.country_id = '' self.city_id = '' self.state_id = '' self.conversion_time = '' self.event = '' self.sub1 = '' self.sub2 = '' self.sub3 = '' self.sub4 = '' self.sub5 = '' self.sub6 = '' self.sub7 = '' self.sub8 = '' self.click = '' self.lp_click = '' self.conversion = '' def get_data(self): pass def set_timestamp(self): """ :return:2014-01-02T01:10:00Z """ year = choice((2013, 2014)) max_month = 12 if year == 2014: max_month = datetime.date.today().month month = choice(range(1, max_month)) day = choice(range(1, 29)) hour = choice(range(0, 24)) minute = choice(range(0, 60)) second = choice(range(0, 60)) self.timestamp = "{0}-{1}-{2}T{3}:{4}:{5}Z".format(year, month, day, hour, minute, second) def get_time_stamp(self): return self.timestamp def set_click_id(self): self.click_id = "{0}-{1}-{2}-{3}-{4}".format(n_choice(8), n_choice(4), n_choice(4), n_choice(4), n_choice(12)) def get_click_id(self): return self.click_id def get_campaign_id(self): return self.campaign_id def set_offer_id(self): if self.lp_click == 1: self.offer_id = -1 else: self.offer_id = choice(('2205', '2206', '2207', '2208', '2209')) def get_offer_id(self): return self.offer_id def set_ref_site(self): self.ref_site = choice(('http://www.oracle.com', 'http://www.google.com', 'http://www.taobao.com', 'http://www.apple.com', 'http://www.yahoo.com', 'http://www.alibaba.com')) def get_ref_site(self): return self.ref_site def set_site(self): self.site = self.get_ref_site().split("//")[1] def get_site(self): return self.site def set_click_time(self): self.click_time = self.timestamp def get_click_time(self): return self.click_time def set_cost_per_click(self): self.cost_per_click = choice((10.27, 12.23, 22.33, 9.22, 9.88, 14.43)) def get_cost_per_click(self): return self.cost_per_click def set_payout(self): self.payout = choice((10.27, 12.23, 22.33, 9.22, 9.88, 14.43)) def get_payout(self): return self.payout def set_real_ip(self): self.real_ip = choice(('43.32.189.23', '123.98.88.12', '10.1.15.44', '12.99.33.121', '10.1.5.99', '79.50.32.11')) def get_real_ip(self): return self.real_ip def set_proxy_ip(self): self.proxy_ip = choice(('43.32.189.23', '123.98.88.12', '10.1.15.44', '12.99.33.121', '10.1.5.99', '79.50.32.11')) def get_proxy_ip(self): return self.proxy_ip def set_device_id(self): self.device_id = choice((1, 2, 3)) def get_device_id(self): return self.device_id def set_os_id(self): self.os_id = choice((1, 2, 3)) def get_os_id(self): return self.os_id def set_carrier_id(self): self.carrier_id = choice((1, 2, 3, 4)) def get_carrier_id(self): return self.campaign_id def set_mobile_brand_id(self): self.mobile_brand_id = choice((1, 2, 3, 4)) def get_mobile_brand_id(self): return self.mobile_brand_id def set_screen_h(self): self.screen_h = choice((1280, 960, 480, 320)) def get_screen_h(self): return self.screen_h def set_screen_w(self): self.screen_w = choice((1280, 960, 480, 320)) def get_screen_w(self): return self.screen_w def set_screen_id(self): self.screen_id = choice((1, 2, 3, 4)) def get_screen_id(self): return self.screen_id def set_city_id(self): self.city_id = choice(range(1, 50)) def get_city_id(self): return self.city_id def set_brand_id(self): self.brand_id = choice(range(1, 50)) def get_brand_id(self): return self.brand_id def set_model_id(self): self.model_id= choice(range(1, 50)) def get_model_id(self): return self.model_id def set_country_id(self): self.country_id= choice(range(1, 50)) def get_country_id(self): return self.country_id def set_state_id(self): self.state_id= choice(range(1, 50)) def get_state_id(self): return self.state_id def set_conversion_time(self): if self.conversion == 1: self.conversion_time = self.timestamp def get_conversion_time(self): return self.conversion_time def set_event(self): self.event= choice(range(1, 50)) def get_event(self): return self.event def set_sub1(self): self.sub1 = "sub1_" + n_choice(4) def get_sub1(self): return self.sub1 def set_sub2(self): self.sub2 = "sub2_" + n_choice(4) def get_sub2(self): return self.sub2 def set_sub3(self): self.sub3 = "sub3_" + n_choice(4) def get_sub3(self): return self.sub3 def set_sub4(self): self.sub4 = "sub4_" + n_choice(4) def get_sub4(self): return self.sub4 def set_sub5(self): self.sub5 = "sub5_" + n_choice(4) def get_sub5(self): return self.sub5 def set_sub6(self): self.sub6 = "sub6_" + n_choice(4) def get_sub6(self): return self.sub6 def set_sub7(self): self.sub7 = "sub7_" + n_choice(4) def get_sub7(self): return self.sub7 def set_sub8(self): self.sub8 = "sub8_" + n_choice(4) def get_sub8(self): return self.sub8 def set_click(self): self.click = 1 def get_click(self): return self.click def set_lp_click(self): self.lp_click = choice((0, 1)) def get_lp_click(self): return self.lp_click def set_conversion(self): self.conversion = choice((0, 1)) def get_conversion(self): return self.conversion def set_others(self): self.set_timestamp() self.set_click_id() self.set_offer_id() self.set_brand_id() self.set_carrier_id() self.set_city_id() self.set_click_time() self.set_conversion_time() self.set_cost_per_click() self.set_country_id() self.set_device_id() self.set_device_id() self.set_screen_h() self.set_screen_id() self.set_screen_w() self.set_ref_site() self.set_state_id() self.set_site() self.set_sub1() self.set_sub2() self.set_sub3() self.set_sub4() self.set_sub5() self.set_sub6() self.set_sub7() self.set_sub8() self.set_real_ip() self.set_proxy_ip() self.set_payout() self.set_os_id() self.set_mobile_brand_id() self.set_model_id()
yfsuse/Traxex
com/yeahmobi/common/dataProducer.py
Python
gpl-2.0
8,757
# # Copyright (c) 2008--2015 Red Hat, Inc. # # This software is licensed to you under the GNU General Public License, # version 2 (GPLv2). There is NO WARRANTY for this software, express or # implied, including the implied warranties of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 # along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. # # Red Hat trademarks are not licensed under GPLv2. No permission is # granted to use or replicate Red Hat trademarks that are incorporated # in this software or its documentation. # # # rhn-ssl-tool openssl.cnf style file manipulation class # ## FIXME: the logic here is *WAY* too complicated. Need to simplify -taw ## language imports from __future__ import print_function import os import sys import copy import time import socket ## local imports from spacewalk.common.fileutils import cleanupNormPath, rotateFile, rhn_popen, cleanupAbsPath from certs.sslToolLib import getMachineName, daysTil18Jan2038, incSerial, fixSerial from rhn.i18n import sstr # defaults where we can see them (NOTE: directory is figured at write time) CERT_PATH = '/usr/share/rhn/certs/' BUILD_DIR = cleanupNormPath('./ssl-build', dotYN=1) HOSTNAME = socket.gethostname() MACHINENAME = getMachineName(HOSTNAME) CA_KEY_NAME = 'RHN-ORG-PRIVATE-SSL-KEY' CA_CRT_NAME = 'RHN-ORG-TRUSTED-SSL-CERT' CA_CRT_RPM_NAME = CA_CRT_NAME.lower() BASE_SERVER_RPM_NAME = 'rhn-org-httpd-ssl-key-pair' BASE_SERVER_TAR_NAME = 'rhn-org-httpd-ssl-archive' LEGACY_CA_KEY_NAME = 'ca.key' LEGACY_CA_CRT_NAME = 'RHNS-CORP-CA-CERT' LEGACY_SERVER_RPM_NAME1 = 'rhns-ssl-cert' LEGACY_SERVER_RPM_NAME2 = 'rhn-httpd-ssl-key-pair' LEGACY_CA_CERT_RPM_NAME = 'rhns-ca-cert' CA_OPENSSL_CNF_NAME = 'rhn-ca-openssl.cnf' SERVER_OPENSSL_CNF_NAME = 'rhn-server-openssl.cnf' MD = 'sha256' CRYPTO = '-des3' def getOption(options, opt): """ fetch the value of an options object item without blowing up upon obvious errors """ assert opt.find('-') == -1 if not options: return None if opt in options.__dict__: #print 'XXX opt, options.__dict__[opt]', opt, options.__dict__[opt] return options.__dict__[opt] else: return None def setOption(options, opt, value): """ set the value of an options object item without blowing up upon obvious errors """ if not options: return if opt in options.__dict__: options.__dict__[opt] = value def getStartDate_aWeekAgo(): """ for SSL cert/key generation, returns now, minus 1 week just in case weird time zone issues get in the way of a working cert/key. format: YYMMDDHHMMSSZ where Z is the capital letter Z """ aweek = 24*60*60*7 return time.strftime("%y%m%d%H%M%S", time.gmtime(time.time()-aweek)) + 'Z' _defs = \ { '--dir' : BUILD_DIR, '--ca-key' : 'RHN-ORG-PRIVATE-SSL-KEY', '--ca-cert' : 'RHN-ORG-TRUSTED-SSL-CERT', '--cert-expiration' : int(daysTil18Jan2038()), '--startdate' : getStartDate_aWeekAgo(), '--server-key' : 'server.key', '--server-cert-req' : 'server.csr', '--server-cert' : 'server.crt', '--jabberd-ssl-cert': 'server.pem', '--set-country' : 'US', '--set-common-name' : "", # these two will never appear '--set-hostname' : HOSTNAME, # at the same time on the CLI '--ca-cert-rpm' : CA_CRT_RPM_NAME, '--server-rpm' : BASE_SERVER_RPM_NAME+'-'+MACHINENAME, '--server-tar' : BASE_SERVER_TAR_NAME+'-'+MACHINENAME, '--rpm-packager' : None, '--rpm-vendor' : None, } _defsCa = copy.copy(_defs) _defsCa.update( { '--set-state' : '', '--set-city' : '', '--set-org' : '', '--set-org-unit' : '', '--set-email' : '', }) _defsServer = copy.copy(_defs) _defsServer.update( { '--set-state' : 'North Carolina', '--set-city' : 'Raleigh', '--set-org' : 'Example Corp. Inc.', '--set-org-unit' : 'unit', '--set-email' : '[email protected]', }) DEFS = _defsServer def reInitDEFS(caYN=0): global DEFS if caYN: DEFS.update(_defsCa) else: DEFS.update(_defsServer) def figureDEFS_dirs(options): """ figure out the directory defaults (after options being at least parsed once). """ global DEFS ## fix up the --dir setting DEFS['--dir'] = getOption(options, 'dir') or DEFS['--dir'] or '.' DEFS['--dir'] = cleanupNormPath(DEFS['--dir'], dotYN=1) ## fix up the --set-hostname and MACHINENAME settings DEFS['--set-hostname'] = getOption(options, 'set_hostname') \ or DEFS['--set-hostname'] \ or socket.gethostname() global MACHINENAME MACHINENAME = getMachineName(DEFS['--set-hostname']) ## remap to options object setOption(options, 'dir', DEFS['--dir']) setOption(options, 'set_hostname', DEFS['--set-hostname']) def figureDEFS_CA(options): """ figure out the defaults (after options being at least parsed once) for the CA key-pair(set) variables. """ global DEFS if not getOption(options, 'ca_key'): # the various default names for CA keys (a hierarchy) for possibility in (CA_KEY_NAME, 'ca.key', 'cakey.pem'): if os.path.exists(os.path.join(DEFS['--dir'], possibility)): DEFS['--ca-key'] = possibility break DEFS['--ca-key'] = os.path.basename(getOption(options, 'ca_key') or DEFS['--ca-key']) DEFS['--ca-cert'] = os.path.basename(getOption(options, 'ca_cert') or DEFS['--ca-cert']) # the various default names for CA keys and certs if not getOption(options, 'ca_cert'): if DEFS['--ca-key'] == CA_KEY_NAME: DEFS['--ca-cert'] = CA_CRT_NAME elif DEFS['--ca-key'] == 'ca.key': DEFS['--ca-cert'] = 'ca.crt' elif DEFS['--ca-key'] == 'cakey.pem': DEFS['--ca-cert'] = 'cacert.pem' else: DEFS['--ca-cert'] = 'ca.crt' DEFS['--cert-expiration'] = getOption(options, 'cert_expiration') \ or int(daysTil18Jan2038()) DEFS['--ca-cert-rpm'] = getOption(options, 'ca_cert_rpm') \ or CA_CRT_RPM_NAME DEFS['--rpm-packager'] = getOption(options, 'rpm_packager') DEFS['--rpm-vendor'] = getOption(options, 'rpm_vendor') if '--cert-expiration' in DEFS: # nothing under 1 day or over # days til 18Jan2038 if DEFS['--cert-expiration'] < 1: DEFS['--cert-expiration'] = 1 _maxdays = int(daysTil18Jan2038()) # already rounded if DEFS['--cert-expiration'] > _maxdays: DEFS['--cert-expiration'] = _maxdays # remap to options object setOption(options, 'ca_key', DEFS['--ca-key']) setOption(options, 'ca_cert', DEFS['--ca-cert']) setOption(options, 'cert_expiration', DEFS['--cert-expiration']) setOption(options, 'ca_cert_rpm', DEFS['--ca-cert-rpm']) def figureDEFS_server(options): """ figure out the defaults (after options being at least parsed once) for the server key-pair(set) variables. """ global DEFS DEFS['--server-key'] = os.path.basename(getOption(options, 'server_key') \ or DEFS['--server-key'] or 'server.key') DEFS['--server-cert-req'] = \ os.path.basename(getOption(options, 'server_cert_req') \ or DEFS['--server-cert-req'] or 'server.csr') DEFS['--server-cert'] = os.path.basename(getOption(options, 'server_cert')\ or DEFS['--server-cert'] or 'server.crt') DEFS['--cert-expiration'] = getOption(options, 'cert_expiration') \ or int(daysTil18Jan2038()) # already rounded DEFS['--server-rpm'] = getOption(options, 'server_rpm') \ or BASE_SERVER_RPM_NAME+'-'+MACHINENAME DEFS['--server-tar'] = getOption(options, 'server_tar') \ or BASE_SERVER_TAR_NAME+'-'+MACHINENAME DEFS['--rpm-packager'] = getOption(options, 'rpm_packager') DEFS['--rpm-vendor'] = getOption(options, 'rpm_vendor') if '--cert-expiration' in DEFS: # nothing under 1 day or over # days til 18Jan2038 if DEFS['--cert-expiration'] < 1: DEFS['--cert-expiration'] = 1 _maxdays = int(daysTil18Jan2038()) # already rounded if DEFS['--cert-expiration'] > _maxdays: DEFS['--cert-expiration'] = _maxdays # remap to options object setOption(options, 'server_key', DEFS['--server-key']) setOption(options, 'server_cert_req', DEFS['--server-cert-req']) setOption(options, 'server_cert', DEFS['--server-cert']) setOption(options, 'cert_expiration', DEFS['--cert-expiration']) setOption(options, 'server_rpm', DEFS['--server-rpm']) setOption(options, 'server_tar', DEFS['--server-tar']) def figureDEFS_distinguishing(options): """ figure out the defaults (after options being at least parsed once) for the distinguishing variables (C, ST, L, O, OU, CN, emailAddress) First from config file, then from commanline. """ global DEFS #if options: # print 'XXX options.__dict__.keys()', options.__dict__.keys() #print 'XXX figureDEFS_distinguishing()' ## map the config file settings to the DEFS object conf = {} caYN = '--gen-ca-cert' in sys.argv or '--gen-ca' in sys.argv if caYN: conf = ConfigFile(os.path.join(DEFS['--dir'], CA_OPENSSL_CNF_NAME)).parse() else: conf = ConfigFile(os.path.join(DEFS['--dir'], MACHINENAME, SERVER_OPENSSL_CNF_NAME)).parse() mapping = { 'C' : ('--set-country',), 'ST' : ('--set-state',), 'L' : ('--set-city',), 'O' : ('--set-org',), 'OU' : ('--set-org-unit',), 'CN' : ('--set-common-name', '--set-hostname'), #'CN' : ('--set-common-name',), 'emailAddress' : ('--set-email',), } # map config file settings to DEFS (see mapping dict above) for key in conf.keys(): #print 'XXX KEY', key, repr(mapping[key]) for v in mapping[key]: DEFS[v] = conf[key] #print 'XXX DEFS["%s"]' % v, '=', conf[key] ## map commanline options to the DEFS object if getOption(options, 'set_country') is not None: DEFS['--set-country'] = getOption(options, 'set_country') if getOption(options, 'set_state') is not None: DEFS['--set-state'] = getOption(options, 'set_state') if getOption(options, 'set_city') is not None: DEFS['--set-city'] = getOption(options, 'set_city') if getOption(options, 'set_org') is not None: DEFS['--set-org'] = getOption(options, 'set_org') if getOption(options, 'set_org_unit') is not None: DEFS['--set-org-unit'] = getOption(options, 'set_org_unit') if getOption(options, 'set_common_name') is not None: DEFS['--set-common-name'] = getOption(options, 'set_common_name') if getOption(options, 'set_hostname') is not None: DEFS['--set-hostname'] = getOption(options, 'set_hostname') if getOption(options, 'set_email') is not None: DEFS['--set-email'] = getOption(options, 'set_email') DEFS['--set-cname'] = getOption(options, 'set_cname') # this is list # remap to options object setOption(options, 'set_country', DEFS['--set-country']) setOption(options, 'set_state', DEFS['--set-state']) setOption(options, 'set_city', DEFS['--set-city']) setOption(options, 'set_org', DEFS['--set-org']) setOption(options, 'set_org_unit', DEFS['--set-org-unit']) setOption(options, 'set_common_name', DEFS['--set-common-name']) #setOption(options, 'set_hostname', DEFS['--set-hostname']) setOption(options, 'set_email', DEFS['--set-email']) setOption(options, 'set_cname', DEFS['--set-cname']) CONF_TEMPLATE_CA = """\ # rhn-ca-openssl.cnf #--------------------------------------------------------------------------- # RHN Management {Satellite,Proxy} autogenerated openSSL configuration file. #--------------------------------------------------------------------------- [ ca ] default_ca = CA_default [ CA_default ] default_bits = 2048 x509_extensions = ca_x509_extensions dir = %s database = $dir/index.txt serial = $dir/serial # how closely we follow policy policy = policy_optional copy_extensions = copy [ policy_optional ] countryName = optional stateOrProvinceName = optional organizationName = optional organizationalUnitName = optional commonName = optional emailAddress = optional #--------------------------------------------------------------------------- [ req ] default_bits = 2048 distinguished_name = req_distinguished_name prompt = no x509_extensions = req_ca_x509_extensions [ req_distinguished_name ] %s [ req_ca_x509_extensions ] basicConstraints = CA:true keyUsage = digitalSignature, keyEncipherment, keyCertSign extendedKeyUsage = serverAuth, clientAuth # PKIX recommendations harmless if included in all certificates. nsComment = "RHN SSL Tool Generated Certificate" subjectKeyIdentifier = hash authorityKeyIdentifier = keyid, issuer:always [ req_server_x509_extensions ] basicConstraints = CA:false keyUsage = digitalSignature, keyEncipherment extendedKeyUsage = serverAuth, clientAuth nsCertType = server # PKIX recommendations harmless if included in all certificates. nsComment = "RHN SSL Tool Generated Certificate" subjectKeyIdentifier = hash authorityKeyIdentifier = keyid, issuer:always #=========================================================================== """ CONF_TEMPLATE_SERVER = """\ # rhn-server-openssl.cnf #--------------------------------------------------------------------------- # RHN Management {Satellite,Proxy} autogenerated openSSL configuration file. #--------------------------------------------------------------------------- [ req ] default_bits = 2048 distinguished_name = req_distinguished_name prompt = no x509_extensions = req_server_x509_extensions req_extensions = v3_req [ req_distinguished_name ] %s [ req_server_x509_extensions ] basicConstraints = CA:false keyUsage = digitalSignature, keyEncipherment extendedKeyUsage = serverAuth, clientAuth nsCertType = server # PKIX recommendations harmless if included in all certificates. nsComment = "RHN SSL Tool Generated Certificate" subjectKeyIdentifier = hash authorityKeyIdentifier = keyid, issuer:always [ v3_req ] # Extensions to add to a certificate request basicConstraints = CA:FALSE keyUsage = nonRepudiation, digitalSignature, keyEncipherment # Some CAs do not yet support subjectAltName in CSRs. # Instead the additional names are form entries on web # pages where one requests the certificate... subjectAltName = @alt_names [alt_names] %s #=========================================================================== """ def gen_req_alt_names(d, hostname): """ generates the alt_names section of the *-openssl.cnf file """ i = 0 result = '' dnsname = [ hostname ] if '--set-cname' in d and d['--set-cname']: dnsname.extend(d['--set-cname']) for name in dnsname: i += 1 result += "DNS.%d = %s\n" % (i, name) return result def gen_req_distinguished_name(d): """ generates the rhn_distinguished section of the *-openssl.cnf file """ s = "" keys = ('C', 'ST', 'L', 'O', 'OU', 'CN', 'emailAddress') for key in keys: if key in d and d[key].strip(): s = s + key + (24-len(key))*' ' + '= %s\n' % d[key].strip() else: s = s + '#' + key + (24-len(key))*' ' + '= ""\n' return s def figureSerial(caCertFilename, serialFilename, indexFilename): """ for our purposes we allow the same serial number for server certs BUT WE DO NOT ALLOW server certs and CA certs to share the same serial number. We blow away the index.txt file each time because we are less concerned with matching serials/signatures between server.crt's. """ # what serial # is the ca cert using (we need to increment from that) ret, outstream, errstream = rhn_popen(['/usr/bin/openssl', 'x509', '-noout', '-serial', '-in', caCertFilename]) out = sstr(outstream.read()) outstream.close() errstream.read() errstream.close() assert not ret caSerial = out.strip().split('=') assert len(caSerial) > 1 caSerial = caSerial[1] caSerial = eval('0x'+caSerial) # initialize the serial value (starting at whatever is in # serialFilename or 1) serial = 1 if os.path.exists(serialFilename): serial = open(serialFilename, 'r').read().strip() if serial: serial = eval('0x'+serial) else: serial = 1 # make sure it is at least 1 more than the CA's serial code always # REMEMBER: openssl will incremented the serial number each time # as well. if serial <= caSerial: serial = incSerial(hex(caSerial)) serial = eval('0x' + serial) serial = fixSerial(hex(serial)) # create the serial file if it doesn't exist # write the digits to this file open(serialFilename, 'w').write(serial+'\n') os.chmod(serialFilename, int('0600',8)) # truncate the index.txt file. Less likely to have unneccessary clashes. open(indexFilename, 'w') os.chmod(indexFilename, int('0600',8)) return serial class ConfigFile: def __init__(self, filename=None): self.filename = filename if self.filename is None: self.filename = SERVER_OPENSSL_CNF_NAME if os.path.exists(os.path.join(DEFS['--dir'], 'rhn_openssl.cnf')): self.filename = os.path.join(DEFS['--dir'], "rhn_openssl.cnf") elif os.path.exists(os.path.join(DEFS['--dir'], 'openssl.cnf')): self.filename = os.path.join(DEFS['--dir'], "openssl.cnf") self.filename = cleanupAbsPath(self.filename) def parse(self): """ yank all the pertinent ssl data from a previously generated openssl.cnf. NOTE: we get a limited sampling of info here. We have no concept of the [ some heading ] divisions in the rhn_openssl.cnf file. """ d = {} try: fo = open(self.filename, 'r') except: return d line = fo.readline() while line: if line.strip() == '[ req_distinguished_name ]': break line = fo.readline() #genKeys = ['dir'] #caKeys = ['private_key', 'certificate',] keys = ['C', 'ST', 'L', 'O', 'OU', 'CN', 'emailAddress', ] # ] + caKeys + genKeys for s in fo.readlines(): s = s.strip() if len(s) > 2 and s[0]=='[' and s[-1]==']': break split = s.split() if not split or len(split) < 3: continue if split[0] not in keys: continue split = s.split('=') if len(split) != 2: continue for i in range(len(split)): split[i] = split[i].strip() d[split[0]] = split[1] return d def updateLegacy(self, newdir=None, verbosity=1): """ in slightly older formatted ca_openssl.cnf files, there was no dir setting seperate from the database and serial settings. This function fixes that setup. Most of the time this function short-circuits early. """ try: fo = open(self.filename, 'r') except: return if newdir is None: newdir = os.path.dirname(self.filename) newfile = "" in_CA_defaultYN = 0 dirSetYN = 0 line = fo.readline() while line: cleanLine = line.strip() # is this a label? isLabelYN = 0 if cleanLine \ and (cleanLine[0], cleanLine[-1]) == ('[',']'): isLabelYN = 1 if cleanLine == '[ CA_default ]': # we don't care much until we hit this label in_CA_defaultYN = 1 elif isLabelYN: in_CA_defaultYN = 0 # hit another label if in_CA_defaultYN: vector = line.split('=') if len(vector) == 2: key = vector[0].strip() if key == 'dir': # we should be OK - short-circuit return if key in ('database', 'serial'): # we never hit a "dir" key if not dirSetYN: newfile = newfile + """\ dir = %s database = $dir/index.txt serial = $dir/serial """ % newdir dirSetYN = 1 line = fo.readline() continue newfile = newfile + line line = fo.readline() try: rotated = rotateFile(filepath=self.filename, verbosity=verbosity) if verbosity>=0 and rotated: print("Rotated: %s --> %s" % (os.path.basename(self.filename), os.path.basename(rotated))) except ValueError: pass fo = open(self.filename, 'w') fo.write(newfile) fo.close() os.chmod(self.filename, int('0600',8)) return dirSetYN def updateDir(self, newdir=None, verbosity=0): """ changes the CA configuration file's directory setting (if need be) in place. Touches nothing else. """ if self.updateLegacy(newdir): return try: fo = open(self.filename, 'r') except: return olddir = '' if newdir is None: newdir = os.path.dirname(self.filename) newfile = "" hit_CA_defaultYN = 0 line = fo.readline() while line: if line.strip() == '[ CA_default ]': # we don't care much until we hit this label hit_CA_defaultYN = 1 if hit_CA_defaultYN: vector = line.split('=') if len(vector) == 2: key, value = vector if key.strip() == 'dir': value = value.strip() olddir = value line = '%s= %s\n' % (key, newdir) hit_CA_defaultYN = 0 if newdir == olddir: # nothing to do return newfile = newfile + line line = fo.readline() try: rotated = rotateFile(filepath=self.filename, verbosity=verbosity) if verbosity>=0 and rotated: print("Rotated: %s --> %s" % (os.path.basename(self.filename), os.path.basename(rotated))) except ValueError: pass fo = open(self.filename, 'w') fo.write(newfile) fo.close() os.chmod(self.filename, int('0600',8)) def save(self, d, caYN=0, verbosity=0): """ d == commandline dictionary """ mapping = { '--set-country' : 'C', '--set-state' : 'ST', '--set-city' : 'L', '--set-org' : 'O', '--set-org-unit' : 'OU', '--set-common-name' : 'CN', # these two will never occur at the '--set-hostname' : 'CN', # same time '--set-email' : 'emailAddress', } rdn = {} for k in d.keys(): if k in mapping: rdn[mapping[k]] = d[k].strip() openssl_cnf = '' if caYN: openssl_cnf = CONF_TEMPLATE_CA % ( os.path.dirname(self.filename)+'/', gen_req_distinguished_name(rdn), ) else: openssl_cnf = CONF_TEMPLATE_SERVER \ % (gen_req_distinguished_name(rdn), gen_req_alt_names(d, rdn['CN'])) try: rotated = rotateFile(filepath=self.filename,verbosity=verbosity) if verbosity>=0 and rotated: print("Rotated: %s --> %s" % (os.path.basename(self.filename), os.path.basename(rotated))) except ValueError: pass fo = open(self.filename, 'w') fo.write(openssl_cnf) fo.close() os.chmod(self.filename, int('0600',8)) return openssl_cnf ## ## generated RPM "configuration" dumping ground: ## POST_UNINSTALL_SCRIPT = """\ if [ \$1 = 0 ]; then # The following steps are copied from mod_ssl's postinstall scriptlet # Make sure the permissions are okay umask 077 if [ ! -f /etc/httpd/conf/ssl.key/server.key ] ; then /usr/bin/openssl genrsa -rand /proc/apm:/proc/cpuinfo:/proc/dma:/proc/filesystems:/proc/interrupts:/proc/ioports:/proc/pci:/proc/rtc:/proc/uptime 1024 > /etc/httpd/conf/ssl.key/server.key 2> /dev/null fi if [ ! -f /etc/httpd/conf/ssl.crt/server.crt ] ; then cat << EOF | /usr/bin/openssl req -new -key /etc/httpd/conf/ssl.key/server.key -x509 -days 365 -out /etc/httpd/conf/ssl.crt/server.crt 2>/dev/null -- SomeState SomeCity SomeOrganization SomeOrganizationalUnit localhost.localdomain [email protected] EOF fi /sbin/service httpd graceful exit 0 fi """ SERVER_RPM_SUMMARY = "Organizational server (httpd) SSL key-pair/key-set." CA_CERT_RPM_SUMMARY = ("Organizational public SSL CA certificate " "(client-side).") #===============================================================================
mcalmer/spacewalk
spacewalk/certs-tools/sslToolConfig.py
Python
gpl-2.0
26,685
# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re from neutron_lib.utils import helpers from oslo_log import log as logging import six from neutron._i18n import _, _LE, _LW from neutron.agent.linux import ip_link_support from neutron.plugins.ml2.drivers.mech_sriov.agent.common \ import exceptions as exc from neutron.plugins.ml2.drivers.mech_sriov.agent import pci_lib LOG = logging.getLogger(__name__) class PciOsWrapper(object): """OS wrapper for checking virtual functions""" DEVICE_PATH = "/sys/class/net/%s/device" PCI_PATH = "/sys/class/net/%s/device/virtfn%s/net" VIRTFN_FORMAT = r"^virtfn(?P<vf_index>\d+)" VIRTFN_REG_EX = re.compile(VIRTFN_FORMAT) @classmethod def scan_vf_devices(cls, dev_name): """Scan os directories to get VF devices @param dev_name: pf network device name @return: list of virtual functions """ vf_list = [] dev_path = cls.DEVICE_PATH % dev_name if not os.path.isdir(dev_path): LOG.error(_LE("Failed to get devices for %s"), dev_name) raise exc.InvalidDeviceError(dev_name=dev_name, reason=_("Device not found")) file_list = os.listdir(dev_path) for file_name in file_list: pattern_match = cls.VIRTFN_REG_EX.match(file_name) if pattern_match: vf_index = int(pattern_match.group("vf_index")) file_path = os.path.join(dev_path, file_name) if os.path.islink(file_path): file_link = os.readlink(file_path) pci_slot = os.path.basename(file_link) vf_list.append((pci_slot, vf_index)) return vf_list @classmethod def is_assigned_vf(cls, dev_name, vf_index): """Check if VF is assigned. Checks if a given vf index of a given device name is assigned by checking the relevant path in the system: VF is assigned if: Direct VF: PCI_PATH does not exist. Macvtap VF: macvtap@<vf interface> interface exists in ip link show @param dev_name: pf network device name @param vf_index: vf index """ path = cls.PCI_PATH % (dev_name, vf_index) try: ifname_list = os.listdir(path) except OSError: # PCI_PATH does not exist means that the DIRECT VF assigend return True # Note(moshele) kernel < 3.13 doesn't create symbolic link # for macvtap interface. Therefore we workaround it # by parsing ip link show and checking if macvtap interface exists for ifname in ifname_list: if pci_lib.PciDeviceIPWrapper.is_macvtap_assigned(ifname): return True return False class EmbSwitch(object): """Class to manage logical embedded switch entity. Embedded Switch object is logical entity representing all VFs connected to same physical network Each physical network is mapped to PF network device interface, meaning all its VF, excluding the devices in exclude_device list. @ivar pci_slot_map: dictionary for mapping each pci slot to vf index @ivar pci_dev_wrapper: pci device wrapper """ def __init__(self, phys_net, dev_name, exclude_devices): """Constructor @param phys_net: physical network @param dev_name: network device name @param exclude_devices: list of pci slots to exclude """ self.phys_net = phys_net self.dev_name = dev_name self.pci_slot_map = {} self.pci_dev_wrapper = pci_lib.PciDeviceIPWrapper(dev_name) self._load_devices(exclude_devices) def _load_devices(self, exclude_devices): """Load devices from driver and filter if needed. @param exclude_devices: excluded devices mapping device_name: pci slots """ scanned_pci_list = PciOsWrapper.scan_vf_devices(self.dev_name) for pci_slot, vf_index in scanned_pci_list: if pci_slot not in exclude_devices: self.pci_slot_map[pci_slot] = vf_index def get_pci_slot_list(self): """Get list of VF addresses.""" return self.pci_slot_map.keys() def get_assigned_devices_info(self): """Get assigned Virtual Functions mac and pci slot information and populates vf_to_pci_slot mappings @return: list of VF pair (mac address, pci slot) """ vf_to_pci_slot_mapping = {} assigned_devices_info = [] for pci_slot, vf_index in self.pci_slot_map.items(): if not PciOsWrapper.is_assigned_vf(self.dev_name, vf_index): continue vf_to_pci_slot_mapping[vf_index] = pci_slot if vf_to_pci_slot_mapping: vf_to_mac_mapping = self.pci_dev_wrapper.get_assigned_macs( list(vf_to_pci_slot_mapping.keys())) for vf_index, mac in vf_to_mac_mapping.items(): pci_slot = vf_to_pci_slot_mapping[vf_index] assigned_devices_info.append((mac, pci_slot)) return assigned_devices_info def get_device_state(self, pci_slot): """Get device state. @param pci_slot: Virtual Function address """ vf_index = self._get_vf_index(pci_slot) return self.pci_dev_wrapper.get_vf_state(vf_index) def set_device_state(self, pci_slot, state): """Set device state. @param pci_slot: Virtual Function address @param state: link state """ vf_index = self._get_vf_index(pci_slot) return self.pci_dev_wrapper.set_vf_state(vf_index, state) def set_device_rate(self, pci_slot, rate_type, rate_kbps): """Set device rate: rate (max_tx_rate), min_tx_rate @param pci_slot: Virtual Function address @param rate_type: device rate name type. Could be 'rate' and 'min_tx_rate'. @param rate_kbps: device rate in kbps """ vf_index = self._get_vf_index(pci_slot) #NOTE(ralonsoh): ip link sets rate in Mbps therefore we need to convert #the rate_kbps value from kbps to Mbps. #Zero means to disable the rate so the lowest rate available is 1Mbps. #Floating numbers are not allowed if rate_kbps > 0 and rate_kbps < 1000: rate_mbps = 1 else: rate_mbps = helpers.round_val(rate_kbps / 1000.0) log_dict = { 'rate_mbps': rate_mbps, 'rate_kbps': rate_kbps, 'vf_index': vf_index, 'rate_type': rate_type } if rate_kbps % 1000 != 0: LOG.debug("'%(rate_type)s' for SR-IOV ports is counted in Mbps; " "setting %(rate_mbps)s Mbps limit for port %(vf_index)s " "instead of %(rate_kbps)s kbps", log_dict) else: LOG.debug("Setting %(rate_mbps)s Mbps limit for port %(vf_index)s", log_dict) return self.pci_dev_wrapper.set_vf_rate(vf_index, rate_type, rate_mbps) def _get_vf_index(self, pci_slot): vf_index = self.pci_slot_map.get(pci_slot) if vf_index is None: LOG.warning(_LW("Cannot find vf index for pci slot %s"), pci_slot) raise exc.InvalidPciSlotError(pci_slot=pci_slot) return vf_index def set_device_spoofcheck(self, pci_slot, enabled): """Set device spoofchecking @param pci_slot: Virtual Function address @param enabled: True to enable spoofcheck, False to disable """ vf_index = self.pci_slot_map.get(pci_slot) if vf_index is None: raise exc.InvalidPciSlotError(pci_slot=pci_slot) return self.pci_dev_wrapper.set_vf_spoofcheck(vf_index, enabled) def get_pci_device(self, pci_slot): """Get mac address for given Virtual Function address @param pci_slot: pci slot @return: MAC address of virtual function """ vf_index = self.pci_slot_map.get(pci_slot) mac = None if vf_index is not None: if PciOsWrapper.is_assigned_vf(self.dev_name, vf_index): macs = self.pci_dev_wrapper.get_assigned_macs([vf_index]) mac = macs.get(vf_index) return mac class ESwitchManager(object): """Manages logical Embedded Switch entities for physical network.""" def __new__(cls): # make it a singleton if not hasattr(cls, '_instance'): cls._instance = super(ESwitchManager, cls).__new__(cls) cls.emb_switches_map = {} cls.pci_slot_map = {} return cls._instance def device_exists(self, device_mac, pci_slot): """Verify if device exists. Check if a device mac exists and matches the given VF pci slot @param device_mac: device mac @param pci_slot: VF address """ embedded_switch = self._get_emb_eswitch(device_mac, pci_slot) if embedded_switch: return True return False def get_assigned_devices_info(self, phys_net=None): """Get all assigned devices. Get all assigned devices belongs to given embedded switch @param phys_net: physical network, if none get all assigned devices @return: set of assigned VFs (mac address, pci slot) pair """ if phys_net: eswitch_objects = self.emb_switches_map.get(phys_net, set()) else: eswitch_objects = set() for eswitch_list in self.emb_switches_map.values(): eswitch_objects |= set(eswitch_list) assigned_devices = set() for embedded_switch in eswitch_objects: for device in embedded_switch.get_assigned_devices_info(): assigned_devices.add(device) return assigned_devices def get_device_state(self, device_mac, pci_slot): """Get device state. Get the device state (up/True or down/False) @param device_mac: device mac @param pci_slot: VF PCI slot @return: device state (True/False) None if failed """ embedded_switch = self._get_emb_eswitch(device_mac, pci_slot) if embedded_switch: return embedded_switch.get_device_state(pci_slot) return False def set_device_max_rate(self, device_mac, pci_slot, max_kbps): """Set device max rate Sets the device max rate in kbps @param device_mac: device mac @param pci_slot: pci slot @param max_kbps: device max rate in kbps """ embedded_switch = self._get_emb_eswitch(device_mac, pci_slot) if embedded_switch: embedded_switch.set_device_rate( pci_slot, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE, max_kbps) def set_device_min_tx_rate(self, device_mac, pci_slot, min_kbps): """Set device min_tx_rate Sets the device min_tx_rate in kbps @param device_mac: device mac @param pci_slot: pci slot @param max_kbps: device min_tx_rate in kbps """ embedded_switch = self._get_emb_eswitch(device_mac, pci_slot) if embedded_switch: embedded_switch.set_device_rate( pci_slot, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_MIN_TX_RATE, min_kbps) def set_device_state(self, device_mac, pci_slot, admin_state_up): """Set device state Sets the device state (up or down) @param device_mac: device mac @param pci_slot: pci slot @param admin_state_up: device admin state True/False """ embedded_switch = self._get_emb_eswitch(device_mac, pci_slot) if embedded_switch: embedded_switch.set_device_state(pci_slot, admin_state_up) def set_device_spoofcheck(self, device_mac, pci_slot, enabled): """Set device spoofcheck Sets device spoofchecking (enabled or disabled) @param device_mac: device mac @param pci_slot: pci slot @param enabled: device spoofchecking """ embedded_switch = self._get_emb_eswitch(device_mac, pci_slot) if embedded_switch: embedded_switch.set_device_spoofcheck(pci_slot, enabled) # Note(edan): discover_devices method will be removed # with 'physical_device_mappings' def discover_devices(self, device_mappings, exclude_devices): """Discover which Virtual functions to manage. Discover devices, and create embedded switch object for network device @param device_mappings: device mapping physical_network:device_name @param exclude_devices: excluded devices mapping device_name: pci slots """ if exclude_devices is None: exclude_devices = {} for phys_net, dev_names in six.iteritems(device_mappings): for dev_name in dev_names: self._create_emb_switch(phys_net, dev_name, exclude_devices.get(dev_name, set())) # Note(edan): phys_net param will be removed with # 'physical_device_mappings' config option. def _create_emb_switch(self, phys_net, dev_name, exclude_devices): embedded_switch = EmbSwitch(phys_net, dev_name, exclude_devices) self.emb_switches_map.setdefault(phys_net, []).append(embedded_switch) for pci_slot in embedded_switch.get_pci_slot_list(): self.pci_slot_map[pci_slot] = embedded_switch def _get_emb_eswitch(self, device_mac, pci_slot): """Get embedded switch. Get embedded switch by pci slot and validate pci has device mac @param device_mac: device mac @param pci_slot: pci slot """ embedded_switch = self.pci_slot_map.get(pci_slot) if embedded_switch: used_device_mac = embedded_switch.get_pci_device(pci_slot) if used_device_mac != device_mac: LOG.warning(_LW("device pci mismatch: %(device_mac)s " "- %(pci_slot)s"), {"device_mac": device_mac, "pci_slot": pci_slot}) embedded_switch = None return embedded_switch def clear_max_rate(self, pci_slot): """Clear the VF "rate" parameter Clear the "rate" configuration from VF by setting it to 0. @param pci_slot: VF PCI slot """ self._clear_rate(pci_slot, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE) def clear_min_tx_rate(self, pci_slot): """Clear the VF "min_tx_rate" parameter Clear the "min_tx_rate" configuration from VF by setting it to 0. @param pci_slot: VF PCI slot """ self._clear_rate(pci_slot, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_MIN_TX_RATE) def _clear_rate(self, pci_slot, rate_type): """Clear the VF rate parameter specified in rate_type Clear the rate configuration from VF by setting it to 0. @param pci_slot: VF PCI slot @param rate_type: rate to clear ('rate', 'min_tx_rate') """ #NOTE(Moshe Levi): we don't use the self._get_emb_eswitch here, because #when clearing the VF it may be not assigned. This happens when #libvirt releases the VF back to the hypervisor on delete VM. Therefore #we should just clear the VF rate according to pci_slot no matter #if VF is assigned or not. embedded_switch = self.pci_slot_map.get(pci_slot) if embedded_switch: #NOTE(Moshe Levi): check the pci_slot is not assigned to some #other port before resetting the rate. if embedded_switch.get_pci_device(pci_slot) is None: embedded_switch.set_device_rate(pci_slot, rate_type, 0) else: LOG.warning(_LW("VF with PCI slot %(pci_slot)s is already " "assigned; skipping reset for '%(rate_type)s' " "device configuration parameter"), {'pci_slot': pci_slot, 'rate_type': rate_type}) else: LOG.error(_LE("PCI slot %(pci_slot)s has no mapping to Embedded " "Switch; skipping"), {'pci_slot': pci_slot})
cloudbase/neutron
neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py
Python
apache-2.0
17,105
VERSION = (0, 5, 1) from .decorators import job from .queues import enqueue, get_connection, get_queue, get_scheduler from .workers import get_worker
meteozond/django-rq
django_rq/__init__.py
Python
mit
151
# Software License Agreement (BSD License) # # Copyright (c) 2013, Eric Perko # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the names of the authors nor the names of their # affiliated organizations may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import math import rospy from sensor_msgs.msg import NavSatFix, NavSatStatus, TimeReference from geometry_msgs.msg import TwistStamped from libjavad_navsat_driver.checksum_utils import check_nmea_checksum import libjavad_navsat_driver.parser class RosNMEADriver(object): def __init__(self): self.fix_pub = rospy.Publisher('fix', NavSatFix, queue_size=1) self.vel_pub = rospy.Publisher('vel', TwistStamped, queue_size=1) self.time_ref_pub = rospy.Publisher('time_reference', TimeReference, queue_size=1) self.time_ref_source = rospy.get_param('~time_ref_source', None) self.use_RMC = rospy.get_param('~useRMC', False) # Returns True if we successfully did something with the passed in # nmea_string def add_sentence(self, nmea_string, frame_id, timestamp=None): if not check_nmea_checksum(nmea_string): rospy.logwarn("Received a sentence with an invalid checksum. " + "Sentence was: %s" % repr(nmea_string)) return False parsed_sentence = libjavad_navsat_driver.parser.parse_nmea_sentence(nmea_string) if not parsed_sentence: rospy.logdebug("Failed to parse NMEA sentence. Sentece was: %s" % nmea_string) return False if timestamp: current_time = timestamp else: current_time = rospy.get_rostime() current_fix = NavSatFix() current_fix.header.stamp = current_time current_fix.header.frame_id = frame_id current_time_ref = TimeReference() current_time_ref.header.stamp = current_time current_time_ref.header.frame_id = frame_id if self.time_ref_source: current_time_ref.source = self.time_ref_source else: current_time_ref.source = frame_id if not self.use_RMC and 'GGA' in parsed_sentence: data = parsed_sentence['GGA'] gps_qual = data['fix_type'] if gps_qual == 0: current_fix.status.status = NavSatStatus.STATUS_NO_FIX elif gps_qual == 1: current_fix.status.status = NavSatStatus.STATUS_FIX elif gps_qual == 2: current_fix.status.status = NavSatStatus.STATUS_SBAS_FIX elif gps_qual in (4, 5): current_fix.status.status = NavSatStatus.STATUS_GBAS_FIX else: current_fix.status.status = NavSatStatus.STATUS_NO_FIX current_fix.status.service = NavSatStatus.SERVICE_GPS current_fix.header.stamp = current_time latitude = data['latitude'] if data['latitude_direction'] == 'S': latitude = -latitude current_fix.latitude = latitude longitude = data['longitude'] if data['longitude_direction'] == 'W': longitude = -longitude current_fix.longitude = longitude hdop = data['hdop'] current_fix.position_covariance[0] = hdop ** 2 current_fix.position_covariance[4] = hdop ** 2 current_fix.position_covariance[8] = (2 * hdop) ** 2 # FIXME current_fix.position_covariance_type = \ NavSatFix.COVARIANCE_TYPE_APPROXIMATED # Altitude is above ellipsoid, so adjust for mean-sea-level #altitude = data['altitude'] + data['mean_sea_level'] altitude = data['altitude'] current_fix.altitude = altitude self.fix_pub.publish(current_fix) if not math.isnan(data['utc_time']): current_time_ref.time_ref = rospy.Time.from_sec(data['utc_time']) self.time_ref_pub.publish(current_time_ref) elif 'RMC' in parsed_sentence: data = parsed_sentence['RMC'] # Only publish a fix from RMC if the use_RMC flag is set. if self.use_RMC: if data['fix_valid']: current_fix.status.status = NavSatStatus.STATUS_FIX else: current_fix.status.status = NavSatStatus.STATUS_NO_FIX current_fix.status.service = NavSatStatus.SERVICE_GPS latitude = data['latitude'] if data['latitude_direction'] == 'S': latitude = -latitude current_fix.latitude = latitude longitude = data['longitude'] if data['longitude_direction'] == 'W': longitude = -longitude current_fix.longitude = longitude current_fix.altitude = float('NaN') current_fix.position_covariance_type = \ NavSatFix.COVARIANCE_TYPE_UNKNOWN self.fix_pub.publish(current_fix) if not math.isnan(data['utc_time']): current_time_ref.time_ref = rospy.Time.from_sec(data['utc_time']) self.time_ref_pub.publish(current_time_ref) # Publish velocity from RMC regardless, since GGA doesn't provide it. if data['fix_valid']: current_vel = TwistStamped() current_vel.header.stamp = current_time current_vel.header.frame_id = frame_id current_vel.twist.linear.x = data['speed'] * \ math.sin(data['true_course']) current_vel.twist.linear.y = data['speed'] * \ math.cos(data['true_course']) current_vel.twist.angular.z = data['true_course'] self.vel_pub.publish(current_vel) else: return False """Helper method for getting the frame_id with the correct TF prefix""" @staticmethod def get_frame_id(): frame_id = rospy.get_param('~frame_id', 'gps') if frame_id[0] != "/": """Add the TF prefix""" prefix = "" prefix_param = rospy.search_param('tf_prefix') if prefix_param: prefix = rospy.get_param(prefix_param) if prefix[0] != "/": prefix = "/%s" % prefix return "%s/%s" % (prefix, frame_id) else: return frame_id
suzlab/Autoware
ros/src/sensing/drivers/gnss/packages/javad_navsat_driver/lib/libjavad_navsat_driver/driver.py
Python
bsd-3-clause
7,841
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. import babel.dates import collections from datetime import datetime, timedelta from dateutil import parser from dateutil import rrule from dateutil.relativedelta import relativedelta import logging from operator import itemgetter import pytz import re import time import uuid from odoo import api, fields, models from odoo import tools from odoo.tools.translate import _ from odoo.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT from odoo.exceptions import UserError, ValidationError _logger = logging.getLogger(__name__) VIRTUALID_DATETIME_FORMAT = "%Y%m%d%H%M%S" def calendar_id2real_id(calendar_id=None, with_date=False): """ Convert a "virtual/recurring event id" (type string) into a real event id (type int). E.g. virtual/recurring event id is 4-20091201100000, so it will return 4. :param calendar_id: id of calendar :param with_date: if a value is passed to this param it will return dates based on value of withdate + calendar_id :return: real event id """ if calendar_id and isinstance(calendar_id, (basestring)): res = filter(None, calendar_id.split('-')) if len(res) == 2: real_id = res[0] if with_date: real_date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT, time.strptime(res[1], VIRTUALID_DATETIME_FORMAT)) start = datetime.strptime(real_date, DEFAULT_SERVER_DATETIME_FORMAT) end = start + timedelta(hours=with_date) return (int(real_id), real_date, end.strftime(DEFAULT_SERVER_DATETIME_FORMAT)) return int(real_id) return calendar_id and int(calendar_id) or calendar_id def get_real_ids(ids): if isinstance(ids, (basestring, int, long)): return calendar_id2real_id(ids) if isinstance(ids, (list, tuple)): return [calendar_id2real_id(_id) for _id in ids] def real_id2calendar_id(record_id, date): return '%s-%s' % (record_id, date.strftime(VIRTUALID_DATETIME_FORMAT)) def is_calendar_id(record_id): return len(str(record_id).split('-')) != 1 class Contacts(models.Model): _name = 'calendar.contacts' user_id = fields.Many2one('res.users', 'Me', default=lambda self: self.env.user) partner_id = fields.Many2one('res.partner', 'Employee', required=True) active = fields.Boolean('Active', default=True) @api.model def unlink_from_partner_id(self, partner_id): return self.search([('partner_id', '=', partner_id)]).unlink() class Attendee(models.Model): """ Calendar Attendee Information """ _name = 'calendar.attendee' _rec_name = 'common_name' _description = 'Attendee information' def _default_access_token(self): return uuid.uuid4().hex STATE_SELECTION = [ ('needsAction', 'Needs Action'), ('tentative', 'Uncertain'), ('declined', 'Declined'), ('accepted', 'Accepted'), ] state = fields.Selection(STATE_SELECTION, string='Status', readonly=True, default='needsAction', help="Status of the attendee's participation") common_name = fields.Char('Common name', compute='_compute_common_name', store=True) partner_id = fields.Many2one('res.partner', 'Contact', readonly="True") email = fields.Char('Email', help="Email of Invited Person") availability = fields.Selection([('free', 'Free'), ('busy', 'Busy')], 'Free/Busy', readonly="True") access_token = fields.Char('Invitation Token', default=_default_access_token) event_id = fields.Many2one('calendar.event', 'Meeting linked', ondelete='cascade') @api.depends('partner_id', 'partner_id.name', 'email') def _compute_common_name(self): for attendee in self: attendee.common_name = attendee.partner_id.name or attendee.email @api.onchange('partner_id') def _onchange_partner_id(self): """ Make entry on email and availability on change of partner_id field. """ self.email = self.partner_id.email @api.model def create(self, values): if not values.get("email") and values.get("common_name"): common_nameval = values.get("common_name").split(':') email = filter(lambda x: x.__contains__('@'), common_nameval) # TODO JEM : should be refactored values['email'] = email and email[0] or '' values['common_name'] = values.get("common_name") return super(Attendee, self).create(values) @api.multi def copy(self, default=None): raise UserError(_('You cannot duplicate a calendar attendee.')) @api.multi def _send_mail_to_attendees(self, template_xmlid, force_send=False): """ Send mail for event invitation to event attendees. :param template_xmlid: xml id of the email template to use to send the invitation :param force_send: if set to True, the mail(s) will be sent immediately (instead of the next queue processing) """ res = False if self.env['ir.config_parameter'].get_param('calendar.block_mail') or self._context.get("no_mail_to_attendees"): return res calendar_view = self.env.ref('calendar.view_calendar_event_calendar') invitation_template = self.env.ref(template_xmlid) # get ics file for all meetings ics_files = self.mapped('event_id').get_ics_file() # prepare rendering context for mail template colors = { 'needsAction': 'grey', 'accepted': 'green', 'tentative': '#FFFF00', 'declined': 'red' } rendering_context = dict(self._context) rendering_context.update({ 'color': colors, 'action_id': self.env['ir.actions.act_window'].search([('view_id', '=', calendar_view.id)], limit=1).id, 'dbname': self._cr.dbname, 'base_url': self.env['ir.config_parameter'].get_param('web.base.url', default='http://localhost:8069') }) invitation_template = invitation_template.with_context(rendering_context) # send email with attachments mails_to_send = self.env['mail.mail'] for attendee in self: if attendee.email or attendee.partner_id.email: ics_file = ics_files.get(attendee.event_id.id) mail_id = invitation_template.send_mail(attendee.id) vals = {} if ics_file: vals['attachment_ids'] = [(0, 0, {'name': 'invitation.ics', 'datas_fname': 'invitation.ics', 'datas': str(ics_file).encode('base64')})] vals['model'] = None # We don't want to have the mail in the tchatter while in queue! vals['res_id'] = False current_mail = self.env['mail.mail'].browse(mail_id) current_mail.mail_message_id.write(vals) mails_to_send |= current_mail if force_send and mails_to_send: res = mails_to_send.send() return res @api.multi def do_tentative(self): """ Makes event invitation as Tentative. """ return self.write({'state': 'tentative'}) @api.multi def do_accept(self): """ Marks event invitation as Accepted. """ result = self.write({'state': 'accepted'}) for attendee in self: attendee.event_id.message_post(body=_("%s has accepted invitation") % (attendee.common_name), subtype="calendar.subtype_invitation") return result @api.multi def do_decline(self): """ Marks event invitation as Declined. """ res = self.write({'state': 'declined'}) for attendee in self: attendee.event_id.message_post(body=_("%s has declined invitation") % (attendee.common_name), subtype="calendar.subtype_invitation") return res class AlarmManager(models.AbstractModel): _name = 'calendar.alarm_manager' def get_next_potential_limit_alarm(self, alarm_type, seconds=None, partner_id=None): result = {} delta_request = """ SELECT rel.calendar_event_id, max(alarm.duration_minutes) AS max_delta,min(alarm.duration_minutes) AS min_delta FROM calendar_alarm_calendar_event_rel AS rel LEFT JOIN calendar_alarm AS alarm ON alarm.id = rel.calendar_alarm_id WHERE alarm.type = %s GROUP BY rel.calendar_event_id """ base_request = """ SELECT cal.id, cal.start - interval '1' minute * calcul_delta.max_delta AS first_alarm, CASE WHEN cal.recurrency THEN cal.final_date - interval '1' minute * calcul_delta.min_delta ELSE cal.stop - interval '1' minute * calcul_delta.min_delta END as last_alarm, cal.start as first_event_date, CASE WHEN cal.recurrency THEN cal.final_date ELSE cal.stop END as last_event_date, calcul_delta.min_delta, calcul_delta.max_delta, cal.rrule AS rule FROM calendar_event AS cal RIGHT JOIN calcul_delta ON calcul_delta.calendar_event_id = cal.id """ filter_user = """ RIGHT JOIN calendar_event_res_partner_rel AS part_rel ON part_rel.calendar_event_id = cal.id AND part_rel.res_partner_id = %s """ # Add filter on alarm type tuple_params = (alarm_type,) # Add filter on partner_id if partner_id: base_request += filter_user tuple_params += (partner_id, ) # Upper bound on first_alarm of requested events first_alarm_max_value = "" if seconds is None: # first alarm in the future + 3 minutes if there is one, now otherwise first_alarm_max_value = """ COALESCE((SELECT MIN(cal.start - interval '1' minute * calcul_delta.max_delta) FROM calendar_event cal RIGHT JOIN calcul_delta ON calcul_delta.calendar_event_id = cal.id WHERE cal.start - interval '1' minute * calcul_delta.max_delta > now() at time zone 'utc' ) + interval '3' minute, now() at time zone 'utc')""" else: # now + given seconds first_alarm_max_value = "(now() at time zone 'utc' + interval '%s' second )" tuple_params += (seconds,) self._cr.execute(""" WITH calcul_delta AS (%s) SELECT * FROM ( %s WHERE cal.active = True ) AS ALL_EVENTS WHERE ALL_EVENTS.first_alarm < %s AND ALL_EVENTS.last_event_date > (now() at time zone 'utc') """ % (delta_request, base_request, first_alarm_max_value), tuple_params) for event_id, first_alarm, last_alarm, first_meeting, last_meeting, min_duration, max_duration, rule in self._cr.fetchall(): result[event_id] = { 'event_id': event_id, 'first_alarm': first_alarm, 'last_alarm': last_alarm, 'first_meeting': first_meeting, 'last_meeting': last_meeting, 'min_duration': min_duration, 'max_duration': max_duration, 'rrule': rule } return result def do_check_alarm_for_one_date(self, one_date, event, event_maxdelta, in_the_next_X_seconds, alarm_type, after=False, missing=False): """ Search for some alarms in the interval of time determined by some parameters (after, in_the_next_X_seconds, ...) :param one_date: date of the event to check (not the same that in the event browse if recurrent) :param event: Event browse record :param event_maxdelta: biggest duration from alarms for this event :param in_the_next_X_seconds: looking in the future (in seconds) :param after: if not False: will return alert if after this date (date as string - todo: change in master) :param missing: if not False: will return alert even if we are too late :param notif: Looking for type notification :param mail: looking for type email """ result = [] # TODO: remove event_maxdelta and if using it if one_date - timedelta(minutes=(missing and 0 or event_maxdelta)) < datetime.now() + timedelta(seconds=in_the_next_X_seconds): # if an alarm is possible for this date for alarm in event.alarm_ids: if alarm.type == alarm_type and \ one_date - timedelta(minutes=(missing and 0 or alarm.duration_minutes)) < datetime.now() + timedelta(seconds=in_the_next_X_seconds) and \ (not after or one_date - timedelta(minutes=alarm.duration_minutes) > fields.Datetime.from_string(after)): alert = { 'alarm_id': alarm.id, 'event_id': event.id, 'notify_at': one_date - timedelta(minutes=alarm.duration_minutes), } result.append(alert) return result @api.model def get_next_mail(self): now = fields.Datetime.now() last_notif_mail = self.env['ir.config_parameter'].sudo().get_param('calendar.last_notif_mail', default=now) try: cron = self.env['ir.model.data'].sudo().get_object('calendar', 'ir_cron_scheduler_alarm') except ValueError: _logger.error("Cron for " + self._name + " can not be identified !") return False interval_to_second = { "weeks": 7 * 24 * 60 * 60, "days": 24 * 60 * 60, "hours": 60 * 60, "minutes": 60, "seconds": 1 } if cron.interval_type not in interval_to_second: _logger.error("Cron delay can not be computed !") return False cron_interval = cron.interval_number * interval_to_second[cron.interval_type] all_meetings = self.get_next_potential_limit_alarm('email', seconds=cron_interval) for meeting in self.env['calendar.event'].browse(all_meetings.keys()): max_delta = all_meetings[meeting.id]['max_duration'] if meeting.recurrency: at_least_one = False last_found = False for one_date in meeting._get_recurrent_date_by_event(): in_date_format = one_date.replace(tzinfo=None) last_found = self.do_check_alarm_for_one_date(in_date_format, meeting, max_delta, 0, 'email', after=last_notif_mail, missing=True) for alert in last_found: self.do_mail_reminder(alert) at_least_one = True # if it's the first alarm for this recurrent event if at_least_one and not last_found: # if the precedent event had an alarm but not this one, we can stop the search for this event break else: in_date_format = datetime.strptime(meeting.start, DEFAULT_SERVER_DATETIME_FORMAT) last_found = self.do_check_alarm_for_one_date(in_date_format, meeting, max_delta, 0, 'email', after=last_notif_mail, missing=True) for alert in last_found: self.do_mail_reminder(alert) self.env['ir.config_parameter'].sudo().set_param('calendar.last_notif_mail', now) @api.model def get_next_notif(self): partner = self.env.user.partner_id all_notif = [] if not partner: return [] all_meetings = self.get_next_potential_limit_alarm('notification', partner_id=partner.id) time_limit = 3600 * 24 # return alarms of the next 24 hours for event_id in all_meetings: max_delta = all_meetings[event_id]['max_duration'] meeting = self.env['calendar.event'].browse(event_id) if meeting.recurrency: b_found = False last_found = False for one_date in meeting._get_recurrent_date_by_event(): in_date_format = one_date.replace(tzinfo=None) last_found = self.do_check_alarm_for_one_date(in_date_format, meeting, max_delta, time_limit, 'notification', after=partner.calendar_last_notif_ack) if last_found: for alert in last_found: all_notif.append(self.do_notif_reminder(alert)) if not b_found: # if it's the first alarm for this recurrent event b_found = True if b_found and not last_found: # if the precedent event had alarm but not this one, we can stop the search fot this event break else: in_date_format = fields.Datetime.from_string(meeting.start) last_found = self.do_check_alarm_for_one_date(in_date_format, meeting, max_delta, time_limit, 'notification', after=partner.calendar_last_notif_ack) if last_found: for alert in last_found: all_notif.append(self.do_notif_reminder(alert)) return all_notif def do_mail_reminder(self, alert): meeting = self.env['calendar.event'].browse(alert['event_id']) alarm = self.env['calendar.alarm'].browse(alert['alarm_id']) result = False if alarm.type == 'email': result = meeting.attendee_ids._send_mail_to_attendees('calendar.calendar_template_meeting_reminder', force_send=True) return result def do_notif_reminder(self, alert): alarm = self.env['calendar.alarm'].browse(alert['alarm_id']) meeting = self.env['calendar.event'].browse(alert['event_id']) if alarm.type == 'notification': message = meeting.display_time delta = alert['notify_at'] - datetime.now() delta = delta.seconds + delta.days * 3600 * 24 return { 'event_id': meeting.id, 'title': meeting.name, 'message': message, 'timer': delta, 'notify_at': fields.Datetime.to_string(alert['notify_at']), } def notify_next_alarm(self, partner_ids): """ Sends through the bus the next alarm of given partners """ notifications = [] users = self.env['res.users'].search([('partner_id', 'in', tuple(partner_ids))]) for user in users: notif = self.sudo(user.id).get_next_notif() notifications.append([(self._cr.dbname, 'calendar.alarm', user.partner_id.id), notif]) if len(notifications) > 0: self.env['bus.bus'].sendmany(notifications) class Alarm(models.Model): _name = 'calendar.alarm' _description = 'Event alarm' @api.depends('interval', 'duration') def _compute_duration_minutes(self): for alarm in self: if alarm.interval == "minutes": alarm.duration_minutes = alarm.duration elif alarm.interval == "hours": alarm.duration_minutes = alarm.duration * 60 elif alarm.interval == "days": alarm.duration_minutes = alarm.duration * 60 * 24 else: alarm.duration_minutes = 0 _interval_selection = {'minutes': 'Minute(s)', 'hours': 'Hour(s)', 'days': 'Day(s)'} name = fields.Char('Name', required=True) type = fields.Selection([('notification', 'Notification'), ('email', 'Email')], 'Type', required=True, default='email') duration = fields.Integer('Amount', required=True, default=1) interval = fields.Selection(list(_interval_selection.iteritems()), 'Unit', required=True, default='hours') duration_minutes = fields.Integer('Duration in minutes', compute='_compute_duration_minutes', store=True, help="Duration in minutes") @api.onchange('duration', 'interval') def _onchange_duration_interval(self): display_interval = self._interval_selection.get(self.interval, '') self.name = str(self.duration) + ' ' + display_interval def _update_cron(self): try: cron = self.env['ir.model.data'].sudo().get_object('calendar', 'ir_cron_scheduler_alarm') except ValueError: return False return cron.toggle(model=self._name, domain=[('type', '=', 'email')]) @api.model def create(self, values): result = super(Alarm, self).create(values) self._update_cron() return result @api.multi def write(self, values): result = super(Alarm, self).write(values) self._update_cron() return result @api.multi def unlink(self): result = super(Alarm, self).unlink() self._update_cron() return result class MeetingType(models.Model): _name = 'calendar.event.type' _description = 'Meeting Type' name = fields.Char('Name', required=True) _sql_constraints = [ ('name_uniq', 'unique (name)', "Tag name already exists !"), ] class Meeting(models.Model): """ Model for Calendar Event Special context keys : - `no_mail_to_attendees` : disabled sending email to attendees when creating/editing a meeting """ _name = 'calendar.event' _description = "Event" _order = "id desc" _inherit = ["mail.thread", "ir.needaction_mixin"] @api.model def _default_partners(self): """ When active_model is res.partner, the current partners should be attendees """ partners = self.env.user.partner_id active_id = self._context.get('active_id') if self._context.get('active_model') == 'res.partner' and active_id: if active_id not in partners.ids: partners |= self.env['res.partner'].browse(active_id) return partners @api.multi def _get_recurrent_dates_by_event(self): """ Get recurrent start and stop dates based on Rule string""" start_dates = self._get_recurrent_date_by_event(date_field='start') stop_dates = self._get_recurrent_date_by_event(date_field='stop') return zip(start_dates, stop_dates) @api.multi def _get_recurrent_date_by_event(self, date_field='start'): """ Get recurrent dates based on Rule string and all event where recurrent_id is child date_field: the field containing the reference date information for recurrency computation """ self.ensure_one() if date_field in self._fields.keys() and self._fields[date_field].type in ('date', 'datetime'): reference_date = self[date_field] else: reference_date = self.start def todate(date): val = parser.parse(''.join((re.compile('\d')).findall(date))) ## Dates are localized to saved timezone if any, else current timezone. if not val.tzinfo: val = pytz.UTC.localize(val) return val.astimezone(timezone) timezone = pytz.timezone(self._context.get('tz') or 'UTC') event_date = pytz.UTC.localize(fields.Datetime.from_string(reference_date)) # Add "+hh:mm" timezone if not event_date: event_date = datetime.now() if self.allday and self.rrule and 'UNTIL' in self.rrule and 'Z' not in self.rrule: rset1 = rrule.rrulestr(str(self.rrule), dtstart=event_date.replace(tzinfo=None), forceset=True, ignoretz=True) else: # Convert the event date to saved timezone (or context tz) as it'll # define the correct hour/day asked by the user to repeat for recurrence. event_date = event_date.astimezone(timezone) # transform "+hh:mm" timezone rset1 = rrule.rrulestr(str(self.rrule), dtstart=event_date, forceset=True, tzinfos={}) recurring_meetings = self.search([('recurrent_id', '=', self.id), '|', ('active', '=', False), ('active', '=', True)]) for meeting in recurring_meetings: rset1._exdate.append(todate(meeting.recurrent_id_date)) return [d.astimezone(pytz.UTC) if d.tzinfo else d for d in rset1] @api.multi def _get_recurrency_end_date(self): """ Return the last date a recurring event happens, according to its end_type. """ self.ensure_one() data = self.read(['final_date', 'recurrency', 'rrule_type', 'count', 'end_type', 'stop', 'interval'])[0] if not data.get('recurrency'): return False end_type = data.get('end_type') final_date = data.get('final_date') if end_type == 'count' and all(data.get(key) for key in ['count', 'rrule_type', 'stop', 'interval']): count = (data['count'] + 1) * data['interval'] delay, mult = { 'daily': ('days', 1), 'weekly': ('days', 7), 'monthly': ('months', 1), 'yearly': ('years', 1), }[data['rrule_type']] deadline = fields.Datetime.from_string(data['stop']) return deadline + relativedelta(**{delay: count * mult}) return final_date @api.multi def _find_my_attendee(self): """ Return the first attendee where the user connected has been invited from all the meeting_ids in parameters. """ self.ensure_one() for attendee in self.attendee_ids: if self.env.user.partner_id == attendee.partner_id: return attendee return False @api.model def _get_date_formats(self): """ get current date and time format, according to the context lang :return: a tuple with (format date, format time) """ lang = self._context.get("lang") lang_params = {} if lang: record_lang = self.env['res.lang'].search([("code", "=", lang)], limit=1) lang_params = { 'date_format': record_lang.date_format, 'time_format': record_lang.time_format } # formats will be used for str{f,p}time() which do not support unicode in Python 2, coerce to str format_date = lang_params.get("date_format", '%B-%d-%Y').encode('utf-8') format_time = lang_params.get("time_format", '%I-%M %p').encode('utf-8') return (format_date, format_time) @api.model def _get_recurrent_fields(self): return ['byday', 'recurrency', 'final_date', 'rrule_type', 'month_by', 'interval', 'count', 'end_type', 'mo', 'tu', 'we', 'th', 'fr', 'sa', 'su', 'day', 'week_list'] @api.model def _get_display_time(self, start, stop, zduration, zallday): """ Return date and time (from to from) based on duration with timezone in string. Eg : 1) if user add duration for 2 hours, return : August-23-2013 at (04-30 To 06-30) (Europe/Brussels) 2) if event all day ,return : AllDay, July-31-2013 """ timezone = self._context.get('tz') if not timezone: timezone = self.env.user.partner_id.tz or 'UTC' timezone = tools.ustr(timezone).encode('utf-8') # make safe for str{p,f}time() # get date/time format according to context format_date, format_time = self.with_context(tz=timezone)._get_date_formats() # convert date and time into user timezone date = fields.Datetime.context_timestamp(self.with_context(tz=timezone), fields.Datetime.from_string(start)) date_deadline = fields.Datetime.context_timestamp(self.with_context(tz=timezone), fields.Datetime.from_string(stop)) # convert into string the date and time, using user formats date_str = date.strftime(format_date) time_str = date.strftime(format_time) if zallday: display_time = _("AllDay , %s") % (date_str) elif zduration < 24: duration = date + timedelta(hours=zduration) display_time = _("%s at (%s To %s) (%s)") % (date_str, time_str, duration.strftime(format_time), timezone) else: display_time = _("%s at %s To\n %s at %s (%s)") % (date_str, time_str, date_deadline.strftime(format_date), date_deadline.strftime(format_time), timezone) return display_time def _get_duration(self, start, stop): """ Get the duration value between the 2 given dates. """ if start and stop: diff = fields.Datetime.from_string(stop) - fields.Datetime.from_string(start) if diff: duration = float(diff.days) * 24 + (float(diff.seconds) / 3600) return round(duration, 2) return 0.0 name = fields.Char('Meeting Subject', required=True, states={'done': [('readonly', True)]}) state = fields.Selection([('draft', 'Unconfirmed'), ('open', 'Confirmed')], string='Status', readonly=True, track_visibility='onchange', default='draft') is_attendee = fields.Boolean('Attendee', compute='_compute_attendee') attendee_status = fields.Selection(Attendee.STATE_SELECTION, string='Attendee Status', compute='_compute_attendee') display_time = fields.Char('Event Time', compute='_compute_display_time') display_start = fields.Char('Date', compute='_compute_display_start', store=True) start = fields.Datetime('Start', required=True, help="Start date of an event, without time for full days events") stop = fields.Datetime('Stop', required=True, help="Stop date of an event, without time for full days events") allday = fields.Boolean('All Day', states={'done': [('readonly', True)]}, default=False) start_date = fields.Date('Start Date', compute='_compute_dates', inverse='_inverse_dates', store=True, states={'done': [('readonly', True)]}, track_visibility='onchange') start_datetime = fields.Datetime('Start DateTime', compute='_compute_dates', inverse='_inverse_dates', store=True, states={'done': [('readonly', True)]}, track_visibility='onchange') stop_date = fields.Date('End Date', compute='_compute_dates', inverse='_inverse_dates', store=True, states={'done': [('readonly', True)]}, track_visibility='onchange') stop_datetime = fields.Datetime('End Datetime', compute='_compute_dates', inverse='_inverse_dates', store=True, states={'done': [('readonly', True)]}, track_visibility='onchange') # old date_deadline duration = fields.Float('Duration', states={'done': [('readonly', True)]}) description = fields.Text('Description', states={'done': [('readonly', True)]}) privacy = fields.Selection([('public', 'Everyone'), ('private', 'Only me'), ('confidential', 'Only internal users')], 'Privacy', default='public', states={'done': [('readonly', True)]}, oldname="class") location = fields.Char('Location', states={'done': [('readonly', True)]}, track_visibility='onchange', help="Location of Event") show_as = fields.Selection([('free', 'Free'), ('busy', 'Busy')], 'Show Time as', states={'done': [('readonly', True)]}, default='busy') # RECURRENCE FIELD rrule = fields.Char('Recurrent Rule', compute='_compute_rrule', inverse='_inverse_rrule', store=True) rrule_type = fields.Selection([ ('daily', 'Day(s)'), ('weekly', 'Week(s)'), ('monthly', 'Month(s)'), ('yearly', 'Year(s)') ], string='Recurrency', states={'done': [('readonly', True)]}, help="Let the event automatically repeat at that interval") recurrency = fields.Boolean('Recurrent', help="Recurrent Meeting") recurrent_id = fields.Integer('Recurrent ID') recurrent_id_date = fields.Datetime('Recurrent ID date') end_type = fields.Selection([ ('count', 'Number of repetitions'), ('end_date', 'End date') ], string='Recurrence Termination', default='count') interval = fields.Integer(string='Repeat Every', default=1, help="Repeat every (Days/Week/Month/Year)") count = fields.Integer(string='Repeat', help="Repeat x times", default=1) mo = fields.Boolean('Mon') tu = fields.Boolean('Tue') we = fields.Boolean('Wed') th = fields.Boolean('Thu') fr = fields.Boolean('Fri') sa = fields.Boolean('Sat') su = fields.Boolean('Sun') month_by = fields.Selection([ ('date', 'Date of month'), ('day', 'Day of month') ], string='Option', default='date', oldname='select1') day = fields.Integer('Date of month', default=1) week_list = fields.Selection([ ('MO', 'Monday'), ('TU', 'Tuesday'), ('WE', 'Wednesday'), ('TH', 'Thursday'), ('FR', 'Friday'), ('SA', 'Saturday'), ('SU', 'Sunday') ], string='Weekday') byday = fields.Selection([ ('1', 'First'), ('2', 'Second'), ('3', 'Third'), ('4', 'Fourth'), ('5', 'Fifth'), ('-1', 'Last') ], string='By day') final_date = fields.Date('Repeat Until') user_id = fields.Many2one('res.users', 'Responsible', states={'done': [('readonly', True)]}, default=lambda self: self.env.user) color_partner_id = fields.Integer("Color index of creator", compute='_compute_color_partner', store=False) active = fields.Boolean('Active', default=True, help="If the active field is set to false, it will allow you to hide the event alarm information without removing it.") categ_ids = fields.Many2many('calendar.event.type', 'meeting_category_rel', 'event_id', 'type_id', 'Tags') attendee_ids = fields.One2many('calendar.attendee', 'event_id', 'Participant', ondelete='cascade') partner_ids = fields.Many2many('res.partner', 'calendar_event_res_partner_rel', string='Attendees', states={'done': [('readonly', True)]}, default=_default_partners) alarm_ids = fields.Many2many('calendar.alarm', 'calendar_alarm_calendar_event_rel', string='Reminders', ondelete="restrict", copy=False) @api.multi def _compute_attendee(self): for meeting in self: attendee = meeting._find_my_attendee() meeting.is_attendee = bool(attendee) meeting.attendee_status = attendee.state if attendee else 'needsAction' @api.multi def _compute_display_time(self): for meeting in self: meeting.display_time = self._get_display_time(meeting.start, meeting.stop, meeting.duration, meeting.allday) @api.multi @api.depends('allday', 'start_date', 'start_datetime') def _compute_display_start(self): for meeting in self: meeting.display_start = meeting.start_date if meeting.allday else meeting.start_datetime @api.multi @api.depends('allday', 'start', 'stop') def _compute_dates(self): """ Adapt the value of start_date(time)/stop_date(time) according to start/stop fields and allday. Also, compute the duration for not allday meeting ; otherwise the duration is set to zero, since the meeting last all the day. """ for meeting in self: if meeting.allday: meeting.start_date = meeting.start meeting.start_datetime = False meeting.stop_date = meeting.stop meeting.stop_datetime = False meeting.duration = 0.0 else: meeting.start_date = False meeting.start_datetime = meeting.start meeting.stop_date = False meeting.stop_datetime = meeting.stop meeting.duration = self._get_duration(meeting.start, meeting.stop) @api.multi def _inverse_dates(self): for meeting in self: if meeting.allday: tz = pytz.timezone(self.env.user.tz) if self.env.user.tz else pytz.utc enddate = fields.Datetime.from_string(meeting.stop_date) enddate = tz.localize(enddate) enddate = enddate.replace(hour=18) enddate = enddate.astimezone(pytz.utc) meeting.stop = fields.Datetime.to_string(enddate) startdate = fields.Datetime.from_string(meeting.start_date) startdate = tz.localize(startdate) # Add "+hh:mm" timezone startdate = startdate.replace(hour=8) # Set 8 AM in localtime startdate = startdate.astimezone(pytz.utc) # Convert to UTC meeting.start = fields.Datetime.to_string(startdate) else: meeting.start = meeting.start_datetime meeting.stop = meeting.stop_datetime @api.depends('byday', 'recurrency', 'final_date', 'rrule_type', 'month_by', 'interval', 'count', 'end_type', 'mo', 'tu', 'we', 'th', 'fr', 'sa', 'su', 'day', 'week_list') def _compute_rrule(self): """ Gets Recurrence rule string according to value type RECUR of iCalendar from the values given. :return dictionary of rrule value. """ for meeting in self: if meeting.recurrency: meeting.rrule = meeting._rrule_serialize() else: meeting.rrule = '' @api.multi def _inverse_rrule(self): for meeting in self: if meeting.rrule: data = self._rrule_default_values() data['recurrency'] = True data.update(self._rrule_parse(meeting.rrule, data, meeting.start)) meeting.update(data) @api.multi def _compute_color_partner(self): for meeting in self: meeting.color_partner_id = meeting.user_id.partner_id.id @api.constrains('start_datetime', 'stop_datetime', 'start_date', 'stop_date') def _check_closing_date(self): for meeting in self: if meeting.start_datetime and meeting.stop_datetime and meeting.stop_datetime < meeting.start_datetime: raise ValidationError(_('Ending datetime cannot be set before starting datetime.')) if meeting.start_date and meeting.stop_date and meeting.stop_date < meeting.start_date: raise ValidationError(_('Ending date cannot be set before starting date.')) @api.onchange('start_datetime', 'duration') def _onchange_duration(self): if self.start_datetime: start = fields.Datetime.from_string(self.start_datetime) self.start = self.start_datetime self.stop = fields.Datetime.to_string(start + timedelta(hours=self.duration)) #################################################### # Calendar Business, Reccurency, ... #################################################### @api.multi def get_ics_file(self): """ Returns iCalendar file for the event invitation. :returns a dict of .ics file content for each meeting """ result = {} def ics_datetime(idate, allday=False): if idate: if allday: return fields.Date.from_string(idate) else: return fields.Datetime.from_string(idate).replace(tzinfo=pytz.timezone('UTC')) return False try: # FIXME: why isn't this in CalDAV? import vobject except ImportError: _logger.warning("The `vobject` Python module is not installed, so iCal file generation is unavailable. Use 'pip install vobject' to install it") return result for meeting in self: cal = vobject.iCalendar() event = cal.add('vevent') if not meeting.start or not meeting.stop: raise UserError(_("First you have to specify the date of the invitation.")) event.add('created').value = ics_datetime(time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)) event.add('dtstart').value = ics_datetime(meeting.start, meeting.allday) event.add('dtend').value = ics_datetime(meeting.stop, meeting.allday) event.add('summary').value = meeting.name if meeting.description: event.add('description').value = meeting.description if meeting.location: event.add('location').value = meeting.location if meeting.rrule: event.add('rrule').value = meeting.rrule if meeting.alarm_ids: for alarm in meeting.alarm_ids: valarm = event.add('valarm') interval = alarm.interval duration = alarm.duration trigger = valarm.add('TRIGGER') trigger.params['related'] = ["START"] if interval == 'days': delta = timedelta(days=duration) elif interval == 'hours': delta = timedelta(hours=duration) elif interval == 'minutes': delta = timedelta(minutes=duration) trigger.value = delta valarm.add('DESCRIPTION').value = alarm.name or 'Odoo' for attendee in meeting.attendee_ids: attendee_add = event.add('attendee') attendee_add.value = 'MAILTO:' + (attendee.email or '') result[meeting.id] = cal.serialize() return result @api.multi def create_attendees(self): current_user = self.env.user result = {} for meeting in self: alreay_meeting_partners = meeting.attendee_ids.mapped('partner_id') meeting_attendees = self.env['calendar.attendee'] meeting_partners = self.env['res.partner'] for partner in meeting.partner_ids.filtered(lambda partner: partner not in alreay_meeting_partners): values = { 'partner_id': partner.id, 'email': partner.email, 'event_id': meeting.id, } # current user don't have to accept his own meeting if partner == self.env.user.partner_id: values['state'] = 'accepted' attendee = self.env['calendar.attendee'].create(values) meeting_attendees |= attendee meeting_partners |= partner if meeting_attendees: to_notify = meeting_attendees.filtered(lambda a: a.email != current_user.email) to_notify._send_mail_to_attendees('calendar.calendar_template_meeting_invitation') meeting.write({'attendee_ids': [(4, meeting_attendee.id) for meeting_attendee in meeting_attendees]}) if meeting_partners: meeting.message_subscribe(partner_ids=meeting_partners.ids) # We remove old attendees who are not in partner_ids now. all_partners = meeting.partner_ids all_partner_attendees = meeting.attendee_ids.mapped('partner_id') old_attendees = meeting.attendee_ids partners_to_remove = all_partner_attendees + meeting_partners - all_partners attendees_to_remove = self.env["calendar.attendee"] if partners_to_remove: attendees_to_remove = self.env["calendar.attendee"].search([('partner_id', 'in', partners_to_remove.ids), ('event_id', '=', meeting.id)]) attendees_to_remove.unlink() result[meeting.id] = { 'new_attendees': meeting_attendees, 'old_attendees': old_attendees, 'removed_attendees': attendees_to_remove, 'removed_partners': partners_to_remove } return result @api.multi def get_search_fields(self, order_fields, r_date=None): sort_fields = {} for field in order_fields: if field == 'id' and r_date: sort_fields[field] = real_id2calendar_id(self.id, r_date) else: sort_fields[field] = self[field] if isinstance(self[field], models.BaseModel): name_get = self[field].name_get() if len(name_get) and len(name_get[0]) >= 2: sort_fields[field] = name_get[0][1] if r_date: sort_fields['sort_start'] = r_date.strftime(VIRTUALID_DATETIME_FORMAT) else: display_start = self.display_start sort_fields['sort_start'] = display_start.replace(' ', '').replace('-', '') if display_start else False return sort_fields @api.multi def get_recurrent_ids(self, domain, order=None): """ Gives virtual event ids for recurring events. This method gives ids of dates that comes between start date and end date of calendar views :param order: The fields (comma separated, format "FIELD {DESC|ASC}") on which the events should be sorted """ if order: order_fields = [field.split()[0] for field in order.split(',')] else: # fallback on self._order defined on the model order_fields = [field.split()[0] for field in self._order.split(',')] if 'id' not in order_fields: order_fields.append('id') result_data = [] result = [] for meeting in self: if not meeting.recurrency or not meeting.rrule: result.append(meeting.id) result_data.append(meeting.get_search_fields(order_fields)) continue rdates = meeting._get_recurrent_dates_by_event() for r_start_date, r_stop_date in rdates: # fix domain evaluation # step 1: check date and replace expression by True or False, replace other expressions by True # step 2: evaluation of & and | # check if there are one False pile = [] ok = True r_date = r_start_date # default for empty domain for arg in domain: if str(arg[0]) in ('start', 'stop', 'final_date'): if str(arg[0]) == 'start': r_date = r_start_date else: r_date = r_stop_date if arg[2] and len(arg[2]) > len(r_date.strftime(DEFAULT_SERVER_DATE_FORMAT)): dformat = DEFAULT_SERVER_DATETIME_FORMAT else: dformat = DEFAULT_SERVER_DATE_FORMAT if (arg[1] == '='): ok = r_date.strftime(dformat) == arg[2] if (arg[1] == '>'): ok = r_date.strftime(dformat) > arg[2] if (arg[1] == '<'): ok = r_date.strftime(dformat) < arg[2] if (arg[1] == '>='): ok = r_date.strftime(dformat) >= arg[2] if (arg[1] == '<='): ok = r_date.strftime(dformat) <= arg[2] if (arg[1] == '!='): ok = r_date.strftime(dformat) != arg[2] pile.append(ok) elif str(arg) == str('&') or str(arg) == str('|'): pile.append(arg) else: pile.append(True) pile.reverse() new_pile = [] for item in pile: if not isinstance(item, basestring): res = item elif str(item) == str('&'): first = new_pile.pop() second = new_pile.pop() res = first and second elif str(item) == str('|'): first = new_pile.pop() second = new_pile.pop() res = first or second new_pile.append(res) if [True for item in new_pile if not item]: continue result_data.append(meeting.get_search_fields(order_fields, r_date=r_start_date)) if order_fields: uniq = lambda it: collections.OrderedDict((id(x), x) for x in it).values() def comparer(left, right): for fn, mult in comparers: result = cmp(fn(left), fn(right)) if result: return mult * result return 0 sort_params = [key.split()[0] if key[-4:].lower() != 'desc' else '-%s' % key.split()[0] for key in (order or self._order).split(',')] sort_params = uniq([comp if comp not in ['start', 'start_date', 'start_datetime'] else 'sort_start' for comp in sort_params]) sort_params = uniq([comp if comp not in ['-start', '-start_date', '-start_datetime'] else '-sort_start' for comp in sort_params]) comparers = [((itemgetter(col[1:]), -1) if col[0] == '-' else (itemgetter(col), 1)) for col in sort_params] ids = [r['id'] for r in sorted(result_data, cmp=comparer)] return ids @api.multi def _rrule_serialize(self): """ Compute rule string according to value type RECUR of iCalendar :return: string containing recurring rule (empty if no rule) """ if self.interval and self.interval < 0: raise UserError(_('interval cannot be negative.')) if self.count and self.count <= 0: raise UserError(_('Event recurrence interval cannot be negative.')) def get_week_string(freq): weekdays = ['mo', 'tu', 'we', 'th', 'fr', 'sa', 'su'] if freq == 'weekly': byday = [field.upper() for field in weekdays if self[field]] if byday: return ';BYDAY=' + ','.join(byday) return '' def get_month_string(freq): if freq == 'monthly': if self.month_by == 'date' and (self.day < 1 or self.day > 31): raise UserError(_("Please select a proper day of the month.")) if self.month_by == 'day' and self.byday and self.week_list: # Eg : Second Monday of the month return ';BYDAY=' + self.byday + self.week_list elif self.month_by == 'date': # Eg : 16th of the month return ';BYMONTHDAY=' + str(self.day) return '' def get_end_date(): end_date_new = ''.join((re.compile('\d')).findall(self.final_date)) + 'T235959Z' if self.final_date else False return (self.end_type == 'count' and (';COUNT=' + str(self.count)) or '') +\ ((end_date_new and self.end_type == 'end_date' and (';UNTIL=' + end_date_new)) or '') freq = self.rrule_type # day/week/month/year result = '' if freq: interval_srting = self.interval and (';INTERVAL=' + str(self.interval)) or '' result = 'FREQ=' + freq.upper() + get_week_string(freq) + interval_srting + get_end_date() + get_month_string(freq) return result def _rrule_default_values(self): return { 'byday': False, 'recurrency': False, 'final_date': False, 'rrule_type': False, 'month_by': False, 'interval': 0, 'count': False, 'end_type': False, 'mo': False, 'tu': False, 'we': False, 'th': False, 'fr': False, 'sa': False, 'su': False, 'day': False, 'week_list': False } def _rrule_parse(self, rule_str, data, date_start): day_list = ['mo', 'tu', 'we', 'th', 'fr', 'sa', 'su'] rrule_type = ['yearly', 'monthly', 'weekly', 'daily'] rule = rrule.rrulestr(rule_str, dtstart=fields.Datetime.from_string(date_start)) if rule._freq > 0 and rule._freq < 4: data['rrule_type'] = rrule_type[rule._freq] data['count'] = rule._count data['interval'] = rule._interval data['final_date'] = rule._until and rule._until.strftime(DEFAULT_SERVER_DATETIME_FORMAT) #repeat weekly if rule._byweekday: for i in xrange(0, 7): if i in rule._byweekday: data[day_list[i]] = True data['rrule_type'] = 'weekly' #repeat monthly by nweekday ((weekday, weeknumber), ) if rule._bynweekday: data['week_list'] = day_list[list(rule._bynweekday)[0][0]].upper() data['byday'] = str(list(rule._bynweekday)[0][1]) data['month_by'] = 'day' data['rrule_type'] = 'monthly' if rule._bymonthday: data['day'] = list(rule._bymonthday)[0] data['month_by'] = 'date' data['rrule_type'] = 'monthly' #repeat yearly but for openerp it's monthly, take same information as monthly but interval is 12 times if rule._bymonth: data['interval'] = data['interval'] * 12 #FIXEME handle forever case #end of recurrence #in case of repeat for ever that we do not support right now if not (data.get('count') or data.get('final_date')): data['count'] = 100 if data.get('count'): data['end_type'] = 'count' else: data['end_type'] = 'end_date' return data @api.multi def get_interval(self, interval, tz=None): """ Format and localize some dates to be used in email templates :param string interval: Among 'day', 'month', 'dayname' and 'time' indicating the desired formatting :param string tz: Timezone indicator (optional) :return unicode: Formatted date or time (as unicode string, to prevent jinja2 crash) """ self.ensure_one() date = fields.Datetime.from_string(self.start) if tz: timezone = pytz.timezone(tz or 'UTC') date = date.replace(tzinfo=pytz.timezone('UTC')).astimezone(timezone) if interval == 'day': # Day number (1-31) result = unicode(date.day) elif interval == 'month': # Localized month name and year result = babel.dates.format_date(date=date, format='MMMM y', locale=self._context.get('lang') or 'en_US') elif interval == 'dayname': # Localized day name result = babel.dates.format_date(date=date, format='EEEE', locale=self._context.get('lang') or 'en_US') elif interval == 'time': # Localized time dummy, format_time = self._get_date_formats() result = tools.ustr(date.strftime(format_time + " %Z")) return result @api.multi def get_display_time_tz(self, tz=False): """ get the display_time of the meeting, forcing the timezone. This method is called from email template, to not use sudo(). """ self.ensure_one() if tz: self = self.with_context(tz=tz) return self._get_display_time(self.start, self.stop, self.duration, self.allday) @api.multi def detach_recurring_event(self, values=None): """ Detach a virtual recurring event by duplicating the original and change reccurent values :param values : dict of value to override on the detached event """ if not values: values = {} real_id = calendar_id2real_id(self.id) meeting_origin = self.browse(real_id) data = self.read(['allday', 'start', 'stop', 'rrule', 'duration'])[0] data['start_date' if data['allday'] else 'start_datetime'] = data['start'] data['stop_date' if data['allday'] else 'stop_datetime'] = data['stop'] if data.get('rrule'): data.update( values, recurrent_id=real_id, recurrent_id_date=data.get('start'), rrule_type=False, rrule='', recurrency=False, final_date=datetime.strptime(data.get('start'), DEFAULT_SERVER_DATETIME_FORMAT if data['allday'] else DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(hours=values.get('duration', False) or data.get('duration')) ) # do not copy the id if data.get('id'): del data['id'] return meeting_origin.copy(default=data) @api.multi def action_detach_recurring_event(self): meeting = self.detach_recurring_event() return { 'type': 'ir.actions.act_window', 'res_model': 'calendar.event', 'view_mode': 'form', 'res_id': meeting.id, 'target': 'current', 'flags': {'form': {'action_buttons': True, 'options': {'mode': 'edit'}}} } @api.multi def action_sendmail(self): email = self.env.user.email if email: for meeting in self: meeting.attendee_ids._send_mail_to_attendees('calendar.calendar_template_meeting_invitation') return True #################################################### # Messaging #################################################### # shows events of the day for this user @api.model def _needaction_domain_get(self): return [ ('stop', '<=', time.strftime(DEFAULT_SERVER_DATE_FORMAT + ' 23:59:59')), ('start', '>=', time.strftime(DEFAULT_SERVER_DATE_FORMAT + ' 00:00:00')), ('user_id', '=', self.env.user.id), ] @api.multi def _get_message_unread(self): id_map = {x: calendar_id2real_id(x) for x in self.ids} real = self.browse(set(id_map.values())) super(Meeting, real)._get_message_unread() for event in self: if event.id == id_map[event.id]: continue rec = self.browse(id_map[event.id]) event.message_unread_counter = rec.message_unread_counter event.message_unread = rec.message_unread @api.multi def _get_message_needaction(self): id_map = {x: calendar_id2real_id(x) for x in self.ids} real = self.browse(set(id_map.values())) super(Meeting, real)._get_message_needaction() for event in self: if event.id == id_map[event.id]: continue rec = self.browse(id_map[event.id]) event.message_needaction_counter = rec.message_needaction_counter event.message_needaction = rec.message_needaction @api.multi @api.returns('self', lambda value: value.id) def message_post(self, **kwargs): thread_id = self.id if isinstance(self.id, basestring): thread_id = get_real_ids(self.id) if self.env.context.get('default_date'): context = dict(self.env.context) del context['default_date'] self = self.with_context(context) return super(Meeting, self.browse(thread_id)).message_post(**kwargs) @api.multi def message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None, force=True): records = self.browse(get_real_ids(self.ids)) return super(Meeting, records).message_subscribe( partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids, force=force) @api.multi def message_unsubscribe(self, partner_ids=None, channel_ids=None): records = self.browse(get_real_ids(self.ids)) return super(Meeting, records).message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids) #################################################### # ORM Overrides #################################################### @api.multi def get_metadata(self): real = self.browse(set({x: calendar_id2real_id(x) for x in self.ids}.values())) return super(Meeting, real).get_metadata() @api.model def _name_search(self, name='', args=None, operator='ilike', limit=100, name_get_uid=None): for arg in args: if arg[0] == 'id': for n, calendar_id in enumerate(arg[2]): if isinstance(calendar_id, basestring): arg[2][n] = calendar_id.split('-')[0] return super(Meeting, self)._name_search(name=name, args=args, operator=operator, limit=limit, name_get_uid=name_get_uid) @api.multi def write(self, values): # compute duration, only if start and stop are modified if not 'duration' in values and 'start' in values and 'stop' in values: values['duration'] = self._get_duration(values['start'], values['stop']) # process events one by one for meeting in self: # special write of complex IDS real_ids = [] new_ids = [] if not is_calendar_id(meeting.id): real_ids = [int(meeting.id)] else: real_event_id = calendar_id2real_id(meeting.id) # if we are setting the recurrency flag to False or if we are only changing fields that # should be only updated on the real ID and not on the virtual (like message_follower_ids): # then set real ids to be updated. blacklisted = any(key in values for key in ('start', 'stop', 'active')) if not values.get('recurrency', True) or not blacklisted: real_ids = [real_event_id] else: data = meeting.read(['start', 'stop', 'rrule', 'duration'])[0] if data.get('rrule'): new_ids = meeting.with_context(dont_notify=True).detach_recurring_event(values).ids # to prevent multiple notify_next_alarm new_meetings = self.browse(new_ids) real_meetings = self.browse(real_ids) all_meetings = real_meetings + new_meetings super(Meeting, real_meetings).write(values) # set end_date for calendar searching if any(field in values for field in ['recurrency', 'end_type', 'count', 'rrule_type', 'start', 'stop']): for real_meeting in real_meetings: if real_meeting.recurrency and real_meeting.end_type in ('count', unicode('count')): final_date = real_meeting._get_recurrency_end_date() super(Meeting, real_meeting).write({'final_date': final_date}) attendees_create = False if values.get('partner_ids', False): attendees_create = all_meetings.with_context(dont_notify=True).create_attendees() # to prevent multiple notify_next_alarm # Notify attendees if there is an alarm on the modified event, or if there was an alarm # that has just been removed, as it might have changed their next event notification if not self._context.get('dont_notify'): if len(meeting.alarm_ids) > 0 or values.get('alarm_ids'): partners_to_notify = meeting.partner_ids.ids event_attendees_changes = attendees_create and real_ids and attendees_create[real_ids[0]] if event_attendees_changes: partners_to_notify.append(event_attendees_changes['removed_partners'].ids) self.env['calendar.alarm_manager'].notify_next_alarm(partners_to_notify) if (values.get('start_date') or values.get('start_datetime')) and values.get('active', True): for current_meeting in all_meetings: if attendees_create: attendees_create = attendees_create[current_meeting.id] attendee_to_email = attendees_create['old_attendees'] - attendees_create['removed_attendees'] else: attendee_to_email = current_meeting.attendee_ids if attendee_to_email: attendee_to_email._send_mail_to_attendees('calendar.calendar_template_meeting_changedate') return True @api.model def create(self, values): if not 'user_id' in values: # Else bug with quick_create when we are filter on an other user values['user_id'] = self.env.user.id # compute duration, if not given if not 'duration' in values: values['duration'] = self._get_duration(values['start'], values['stop']) meeting = super(Meeting, self).create(values) final_date = meeting._get_recurrency_end_date() # `dont_notify=True` in context to prevent multiple notify_next_alarm meeting.with_context(dont_notify=True).write({'final_date': final_date}) meeting.with_context(dont_notify=True).create_attendees() # Notify attendees if there is an alarm on the created event, as it might have changed their # next event notification if not self._context.get('dont_notify'): if len(meeting.alarm_ids) > 0: self.env['calendar.alarm_manager'].notify_next_alarm(meeting.partner_ids.ids) return meeting @api.multi def export_data(self, fields_to_export, raw_data=False): """ Override to convert virtual ids to ids """ records = self.browse(set(get_real_ids(self.ids))) return super(Meeting, records).export_data(fields_to_export, raw_data) @api.model def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True): if 'date' in groupby: raise UserError(_('Group by date is not supported, use the calendar view instead.')) return super(Meeting, self.with_context(virtual_id=False)).read_group(domain, fields, groupby, offset=offset, limit=limit, orderby=orderby, lazy=lazy) @api.multi def read(self, fields=None, load='_classic_read'): fields2 = fields and fields[:] or None EXTRAFIELDS = ('privacy', 'user_id', 'duration', 'allday', 'start', 'start_date', 'start_datetime', 'rrule') for f in EXTRAFIELDS: if fields and (f not in fields): fields2.append(f) select = map(lambda x: (x, calendar_id2real_id(x)), self.ids) real_events = self.browse([real_id for calendar_id, real_id in select]) real_data = super(Meeting, real_events).read(fields=fields2, load=load) real_data = dict((d['id'], d) for d in real_data) result = [] for calendar_id, real_id in select: res = real_data[real_id].copy() ls = calendar_id2real_id(calendar_id, with_date=res and res.get('duration', 0) > 0 and res.get('duration') or 1) if not isinstance(ls, (basestring, int, long)) and len(ls) >= 2: res['start'] = ls[1] res['stop'] = ls[2] if res['allday']: res['start_date'] = ls[1] res['stop_date'] = ls[2] else: res['start_datetime'] = ls[1] res['stop_datetime'] = ls[2] if 'display_time' in fields: res['display_time'] = self._get_display_time(ls[1], ls[2], res['duration'], res['allday']) res['id'] = calendar_id result.append(res) for r in result: if r['user_id']: user_id = type(r['user_id']) in (tuple, list) and r['user_id'][0] or r['user_id'] partner_id = self.env.user.partner_id.id if user_id == self.env.user.id or partner_id in r.get("partner_ids", []): continue if r['privacy'] == 'private': for f in r.keys(): recurrent_fields = self._get_recurrent_fields() public_fields = list(set(recurrent_fields + ['id', 'allday', 'start', 'stop', 'display_start', 'display_stop', 'duration', 'user_id', 'state', 'interval', 'count', 'recurrent_id_date', 'rrule'])) if f not in public_fields: if isinstance(r[f], list): r[f] = [] else: r[f] = False if f == 'name': r[f] = _('Busy') for r in result: for k in EXTRAFIELDS: if (k in r) and (fields and (k not in fields)): del r[k] return result @api.multi def unlink(self, can_be_deleted=True): # Get concerned attendees to notify them if there is an alarm on the unlinked events, # as it might have changed their next event notification events = self.search([('id', 'in', self.ids), ('alarm_ids', '!=', False)]) partner_ids = events.mapped('partner_ids').ids records_to_exclude = self.env['calendar.event'] records_to_unlink = self.env['calendar.event'] for meeting in self: if can_be_deleted and not is_calendar_id(meeting.id): # if ID REAL if meeting.recurrent_id: records_to_exclude |= meeting else: # int() required because 'id' from calendar view is a string, since it can be calendar virtual id records_to_unlink |= self.browse(int(meeting.id)) else: records_to_exclude |= meeting result = False if records_to_unlink: result = super(Meeting, records_to_unlink).unlink() if records_to_exclude: result = records_to_exclude.with_context(dont_notify=True).write({'active': False}) # Notify the concerned attendees (must be done after removing the events) self.env['calendar.alarm_manager'].notify_next_alarm(partner_ids) return result @api.model def search(self, args, offset=0, limit=0, order=None, count=False): if self._context.get('mymeetings'): args += [('partner_ids', 'in', self.env.user.partner_id.ids)] new_args = [] for arg in args: new_arg = arg if arg[0] in ('stop_date', 'stop_datetime', 'stop',) and arg[1] == ">=": if self._context.get('virtual_id', True): new_args += ['|', '&', ('recurrency', '=', 1), ('final_date', arg[1], arg[2])] elif arg[0] == "id": new_arg = (arg[0], arg[1], get_real_ids(arg[2])) new_args.append(new_arg) if not self._context.get('virtual_id', True): return super(Meeting, self).search(new_args, offset=offset, limit=limit, order=order, count=count) if any(arg[0] == 'start' for arg in args) and \ not any(arg[0] in ('stop', 'final_date') for arg in args): # domain with a start filter but with no stop clause should be extended # e.g. start=2017-01-01, count=5 => virtual occurences must be included in ('start', '>', '2017-01-02') start_args = new_args new_args = [] for arg in start_args: new_arg = arg if arg[0] in ('start_date', 'start_datetime', 'start',): new_args += ['|', '&', ('recurrency', '=', 1), ('final_date', arg[1], arg[2])] new_args.append(new_arg) # offset, limit, order and count must be treated separately as we may need to deal with virtual ids events = super(Meeting, self).search(new_args, offset=0, limit=0, order=None, count=False) events = self.browse(events.get_recurrent_ids(args, order=order)) if count: return len(events) elif limit: return events[offset: offset + limit] return events @api.multi def copy(self, default=None): self.ensure_one() default = default or {} return super(Meeting, self.browse(calendar_id2real_id(self.id))).copy(default)
hip-odoo/odoo
addons/calendar/models/calendar.py
Python
agpl-3.0
72,599
# Copyright (c) 2001-2004 Twisted Matrix Laboratories. # See LICENSE for details. """ Test cases for dirdbm module. """ from twisted.trial import unittest from twisted.persisted import dirdbm import os, shutil, glob class DirDbmTestCase(unittest.TestCase): def setUp(self): self.path = self.mktemp() self.dbm = dirdbm.open(self.path) self.items = (('abc', 'foo'), ('/lalal', '\000\001'), ('\000\012', 'baz')) def tearDown(self): shutil.rmtree(self.path) def testAll(self): k = "//==".decode("base64") self.dbm[k] = "a" self.dbm[k] = "a" self.assertEquals(self.dbm[k], "a") def testRebuildInteraction(self): from twisted.persisted import dirdbm from twisted.python import rebuild s = dirdbm.Shelf('dirdbm.rebuild.test') s['key'] = 'value' rebuild.rebuild(dirdbm) # print s['key'] def testDbm(self): d = self.dbm # insert keys keys = [] values = [] for k, v in self.items: d[k] = v keys.append(k) values.append(v) keys.sort() values.sort() # check they exist for k, v in self.items: assert d.has_key(k), "has_key() failed" assert d[k] == v, "database has wrong value" # check non existent key try: d["XXX"] except KeyError: pass else: assert 0, "didn't raise KeyError on non-existent key" # check keys(), values() and items() dbkeys = list(d.keys()) dbvalues = list(d.values()) dbitems = list(d.items()) dbkeys.sort() dbvalues.sort() dbitems.sort() items = list(self.items) items.sort() assert keys == dbkeys, ".keys() output didn't match: %s != %s" % (repr(keys), repr(dbkeys)) assert values == dbvalues, ".values() output didn't match: %s != %s" % (repr(values), repr(dbvalues)) assert items == dbitems, "items() didn't match: %s != %s" % (repr(items), repr(dbitems)) copyPath = self.mktemp() d2 = d.copyTo(copyPath) copykeys = list(d.keys()) copyvalues = list(d.values()) copyitems = list(d.items()) copykeys.sort() copyvalues.sort() copyitems.sort() assert dbkeys == copykeys, ".copyTo().keys() didn't match: %s != %s" % (repr(dbkeys), repr(copykeys)) assert dbvalues == copyvalues, ".copyTo().values() didn't match: %s != %s" % (repr(dbvalues), repr(copyvalues)) assert dbitems == copyitems, ".copyTo().items() didn't match: %s != %s" % (repr(dbkeys), repr(copyitems)) d2.clear() assert len(d2.keys()) == len(d2.values()) == len(d2.items()) == 0, ".clear() failed" shutil.rmtree(copyPath) # delete items for k, v in self.items: del d[k] assert not d.has_key(k), "has_key() even though we deleted it" assert len(d.keys()) == 0, "database has keys" assert len(d.values()) == 0, "database has values" assert len(d.items()) == 0, "database has items" def testModificationTime(self): import time # the mtime value for files comes from a different place than the # gettimeofday() system call. On linux, gettimeofday() can be # slightly ahead (due to clock drift which gettimeofday() takes into # account but which open()/write()/close() do not), and if we are # close to the edge of the next second, time.time() can give a value # which is larger than the mtime which results from a subsequent # write(). I consider this a kernel bug, but it is beyond the scope # of this test. Thus we keep the range of acceptability to 3 seconds time. # -warner self.dbm["k"] = "v" self.assert_(abs(time.time() - self.dbm.getModificationTime("k")) <= 3) def testRecovery(self): """DirDBM: test recovery from directory after a faked crash""" k = self.dbm._encode("key1") f = open(os.path.join(self.path, k + ".rpl"), "wb") f.write("value") f.close() k2 = self.dbm._encode("key2") f = open(os.path.join(self.path, k2), "wb") f.write("correct") f.close() f = open(os.path.join(self.path, k2 + ".rpl"), "wb") f.write("wrong") f.close() f = open(os.path.join(self.path, "aa.new"), "wb") f.write("deleted") f.close() dbm = dirdbm.DirDBM(self.path) assert dbm["key1"] == "value" assert dbm["key2"] == "correct" assert not glob.glob(os.path.join(self.path, "*.new")) assert not glob.glob(os.path.join(self.path, "*.rpl")) class ShelfTestCase(DirDbmTestCase): def setUp(self): self.path = self.mktemp() self.dbm = dirdbm.Shelf(self.path) self.items = (('abc', 'foo'), ('/lalal', '\000\001'), ('\000\012', 'baz'), ('int', 12), ('float', 12.0), ('tuple', (None, 12))) testCases = [DirDbmTestCase, ShelfTestCase]
tquilian/exeNext
twisted/test/test_dirdbm.py
Python
gpl-2.0
5,257
#!/usr/bin/env python #coding: UTF-8 # # (c) 2014 Samuel Groß <[email protected]> # import os import sys import importlib from time import sleep try: import concurrent.futures except ImportError: pass # # Functions # def warn(msg): sys.stderr.write('WARNING: ' + msg + '\n') def err(msg): sys.stderr.write('ERROR: ' + msg + '\n') def log(msg): print(msg) # # Decorators # def add_attr(f, attributes): for k, v in attributes.items(): setattr(f, k, v) def source(name): def decorate(f): def add_source_to_events_wrapper(*args, **kwargs): res = f(*args, **kwargs) events = set() for e in res: e.source = name events.add(e) return events add_attr(add_source_to_events_wrapper, {'mod_type': 'source', 'name': name}) return add_source_to_events_wrapper return decorate def sink(name): def decorate(f): add_attr(f, {'mod_type': 'sink', 'name': name}) return f return decorate # # Classes # class Event: def __init__(self, start, end, name, source='', description=''): self.start = start.replace(second=0, microsecond=0) self.end = end.replace(second=0, microsecond=0) self.name = name self.source = source self.description = description def __str__(self): return '[{}] {}: {} - {}'.format( self.source, self.name, self.start.strftime('%d.%m.%Y %H:%M'), self.end.strftime('%d.%m.%Y %H:%M')) def __eq__(self, other): return (self.start == other.start and self.end == other.end and self.name.lower() == other.name.lower() and self.source == other.source) def __hash__(self): return (hash(self.start) ^ hash(self.end) ^ hash(self.name.lower) ^ hash(self.source)) class Sekretaer: DELAY = 60*60 def __init__(self, config): log("[sekretaer] Initializing...") self.config = config # TODO check for and load pickled event set self.events = set() if sys.version_info[:2] >= (3, 2): self.fetch = self.fetch_par else: self.fetch = self.fetch_seq self.sources = [] self.sinks = [] self.load_modules() log("[sekretaer] Ready for work") def work(self): """Main loop.""" while True: events = self.fetch() events -= self.events if events: self.process(events) self.events |= events self.cleanup() sleep(self.DELAY) def fetch_seq(self): """Fetch new events from all enabled sources sequentially (for older python versions).""" events = set() for f in self.sources: try: events |= f(self.config.get(f.name)) except Exception as e: warn("Caught Exception: {}".format(e)) return events def fetch_par(self): """Fetch new events from all enabled sources in parallel.""" events = set() with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor: futures = [executor.submit(f, self.config.get(f.name)) for f in self.sources] for future in concurrent.futures.as_completed(futures): try: events |= future.result() except Exception as e: warn("Caught Exception: {}".format(e)) return events def process(self, events): """Process new events.""" for f in self.sinks: f(self.config.get(f.name), events) def cleanup(self): """Remove old events from the set.""" pass def load_modules(self): """Load enabled modules from the modules folder.""" log("[sekretaer] Loading modules...") modules = [] working_dir = os.path.dirname(os.path.abspath(__file__)) modules_dir = os.path.join(working_dir, 'modules') for filename in os.listdir(modules_dir): if filename.endswith('.py') and not filename.startswith('_'): modules.append(filename[:-3]) for module_name in modules: try: module = importlib.import_module('sekretaer.modules.' + module_name) except Exception as e: warn("[sekretaer] Failed to load module '{}': {}".format(module_name, e)) continue functions = [getattr(module, a) for a in dir(module) if not a.startswith('_') and callable(getattr(module, a))] for f in functions: if hasattr(f, 'mod_type'): if f.mod_type == 'source' and f.name in self.config['sources_enabled']: self.sources.append(f) elif f.mod_type == 'sink' and f.name in self.config['sinks_enabled']: self.sinks.append(f) loaded_modules = set(map(lambda x: x.name, self.sources + self.sinks)) for missing_module in set(self.config['sources_enabled'] + self.config['sinks_enabled']) - loaded_modules: warn("[sekretaer] Module '{}' could not be loaded".format(missing_module)) log("[sekretaer] {} modules loaded".format(len(self.sources) + len(self.sinks))) def shutdown(self): """Store the current state to disk and quit.""" log("[sekretaer] Shutting down") pass
saelo/sekretaer
sekretaer/core.py
Python
mit
5,590
import cProfile import data_loader import data_manipulator import data_saver import neural_net def main(): test_batch, train_batch = data_loader.load_data() data_manipulator.categorize(train_batch, test_batch) model = neural_net.get_trained_model(train_batches=train_batch, test_batch=test_batch, weights_in='weights/1024_1024_256_64_epochs_45', weights_out='weights/1024_1024_256_64_epochs_50') predictions = neural_net.get_predictions(model, test_batch) data_saver.save_results("results/result.csv", predictions) def profiling(): cProfile.run('main()', sort='tottime') if __name__ == "__main__": main()
maciewar/AGH-Deep-Learning-CIFAR10
cifar.py
Python
mit
764
# -*- coding: utf-8 -*- # Copyright (c) 2013 Red Hat, Inc. # # This software is licensed to you under the GNU General Public # License as published by the Free Software Foundation; either version # 2 of the License (GPLv2) or (at your option) any later version. # There is NO WARRANTY for this software, express or implied, # including the implied warranties of MERCHANTABILITY, # NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should # have received a copy of GPLv2 along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. import unittest import mock from pulp.client.commands.schedule import CreateScheduleCommand, ListScheduleCommand, \ DeleteScheduleCommand, UpdateScheduleCommand, NextRunCommand from pulp_node import constants from pulp_node.extensions.admin import sync_schedules from pulp_node.extensions.admin.options import NODE_ID_OPTION, MAX_BANDWIDTH_OPTION, MAX_CONCURRENCY_OPTION NODE_ID = 'node-1' MAX_BANDWIDTH = 12345 MAX_CONCURRENCY = 321 class CommandTests(unittest.TestCase): def setUp(self): super(CommandTests, self).setUp() self.context = mock.MagicMock() def test_list_schedule_command(self): command = sync_schedules.NodeListScheduleCommand(self.context) self.assertTrue(isinstance(command, ListScheduleCommand)) self.assertTrue(NODE_ID_OPTION in command.options) self.assertEqual(command.description, sync_schedules.DESC_LIST) self.assertTrue(isinstance(command.strategy, sync_schedules.NodeSyncScheduleStrategy)) def test_create_schedule_command(self): command = sync_schedules.NodeCreateScheduleCommand(self.context) self.assertTrue(isinstance(command, CreateScheduleCommand)) self.assertTrue(NODE_ID_OPTION in command.options) self.assertTrue(MAX_BANDWIDTH_OPTION in command.options) self.assertTrue(MAX_CONCURRENCY_OPTION in command.options) self.assertEqual(command.description, sync_schedules.DESC_CREATE) self.assertTrue(isinstance(command.strategy, sync_schedules.NodeSyncScheduleStrategy)) def test_delete_schedule_command(self): command = sync_schedules.NodeDeleteScheduleCommand(self.context) self.assertTrue(isinstance(command, DeleteScheduleCommand)) self.assertTrue(NODE_ID_OPTION in command.options) self.assertEqual(command.description, sync_schedules.DESC_DELETE) self.assertTrue(isinstance(command.strategy, sync_schedules.NodeSyncScheduleStrategy)) def test_update_schedule_command(self): command = sync_schedules.NodeUpdateScheduleCommand(self.context) self.assertTrue(isinstance(command, UpdateScheduleCommand)) self.assertTrue(NODE_ID_OPTION in command.options) self.assertEqual(command.description, sync_schedules.DESC_UPDATE) self.assertTrue(isinstance(command.strategy, sync_schedules.NodeSyncScheduleStrategy)) def test_next_run_command(self): command = sync_schedules.NodeNextRunCommand(self.context) self.assertTrue(isinstance(command, NextRunCommand)) self.assertTrue(NODE_ID_OPTION in command.options) self.assertEqual(command.description, sync_schedules.DESC_NEXT_RUN) self.assertTrue(isinstance(command.strategy, sync_schedules.NodeSyncScheduleStrategy)) class NodeSyncScheduleStrategyTests(unittest.TestCase): def setUp(self): super(NodeSyncScheduleStrategyTests, self).setUp() self.context = mock.MagicMock() self.api = mock.MagicMock() self.strategy = sync_schedules.NodeSyncScheduleStrategy(self.context) self.strategy.api = self.api def test_create(self): # Test schedule = '1900-01-01' failure_threshold = 5 enabled = True kwargs = { NODE_ID_OPTION.keyword: NODE_ID, MAX_BANDWIDTH_OPTION.keyword: MAX_BANDWIDTH, MAX_CONCURRENCY_OPTION.keyword: MAX_CONCURRENCY } self.strategy.create_schedule(schedule, failure_threshold, enabled, kwargs) # Verify expected_units = [dict(type_id='node', unit_key=None)] options = { constants.MAX_DOWNLOAD_BANDWIDTH_KEYWORD: MAX_BANDWIDTH, constants.MAX_DOWNLOAD_CONCURRENCY_KEYWORD: MAX_CONCURRENCY, } self.api.add_schedule.assert_called_once_with( sync_schedules.SYNC_OPERATION, NODE_ID, schedule, expected_units, failure_threshold, enabled, options) def test_delete(self): # Test schedule_id = 'abcdef' kwargs = {sync_schedules.NODE_ID_OPTION.keyword : NODE_ID} self.strategy.delete_schedule(schedule_id, kwargs) # Verify self.api.delete_schedule.assert_called_once_with(sync_schedules.SYNC_OPERATION, NODE_ID, schedule_id) def test_retrieve(self): # Test kwargs = {sync_schedules.NODE_ID_OPTION.keyword : NODE_ID} self.strategy.retrieve_schedules(kwargs) # Verify self.api.list_schedules.assert_called_once_with(sync_schedules.SYNC_OPERATION, NODE_ID) def test_update(self): # Test schedule_id = 'abcdef' kwargs = {sync_schedules.NODE_ID_OPTION.keyword : NODE_ID, 'extra' : 'e'} self.strategy.update_schedule(schedule_id, **kwargs) # Verify self.api.update_schedule.assert_called_once_with(sync_schedules.SYNC_OPERATION, NODE_ID, schedule_id, **{'extra' : 'e'})
beav/pulp
nodes/test/unit/test_extensions_sync_schedules.py
Python
gpl-2.0
5,703
"""Test file anonymization.""" # Copyright 2018 Intentionet # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest from testfixtures import LogCapture from netconan.anonymize_files import anonymize_file, anonymize_files _INPUT_CONTENTS = """ # Intentionet's sensitive test file ip address 192.168.2.1 255.255.255.255 my hash is $1$salt$ABCDEFGHIJKLMNOPQRS password foobar """ _REF_CONTENTS = """ # a4daba's fd8607 test file ip address 192.168.139.13 255.255.255.255 my hash is $1$0000$CxUUGIrqPb7GaB5midrQZ. password netconanRemoved1 """ _SALT = "TESTSALT" _SENSITIVE_WORDS = [ "intentionet", "sensitive", ] def test_anonymize_files_bad_input_empty(tmpdir): """Test anonymize_files with empty input dir.""" input_dir = tmpdir.mkdir("input") output_dir = tmpdir.mkdir("output") with pytest.raises(ValueError, match="Input directory is empty"): anonymize_files( str(input_dir), str(output_dir), True, True, salt=_SALT, sensitive_words=_SENSITIVE_WORDS, ) def test_anonymize_files_bad_input_missing(tmpdir): """Test anonymize_files with non-existent input.""" filename = "test.txt" input_file = tmpdir.join(filename) output_file = tmpdir.mkdir("out").join(filename) with pytest.raises(ValueError, match="Input does not exist"): anonymize_files( str(input_file), str(output_file), True, True, salt=_SALT, sensitive_words=_SENSITIVE_WORDS, ) def test_anonymize_files_bad_output_file(tmpdir): """Test anonymize_files when output 'file' already exists but is a dir.""" filename = "test.txt" input_file = tmpdir.join(filename) input_file.write(_INPUT_CONTENTS) output_file = tmpdir.mkdir("out").mkdir(filename) with pytest.raises(ValueError, match="Cannot write output file.*"): anonymize_file(str(input_file), str(output_file)) # Anonymizing files should complete okay, because it skips the errored file with LogCapture() as log_capture: anonymize_files( str(input_file), str(output_file), True, True, salt=_SALT, sensitive_words=_SENSITIVE_WORDS, ) # Confirm the correct message is logged log_capture.check_present( ("root", "ERROR", "Failed to anonymize file {}".format(str(input_file))) ) # Confirm the exception info was also logged assert "Cannot write output file; output file is a directory" in str( log_capture.records[-1].exc_info[1] ) def test_anonymize_files_bad_output_dir(tmpdir): """Test anonymize_files when output 'dir' already exists but is a file.""" filename = "test.txt" input_dir = tmpdir.mkdir("input") input_dir.join(filename).write(_INPUT_CONTENTS) output_file = tmpdir.join("out") output_file.write("blah") with pytest.raises(ValueError, match="Output path must be a directory.*"): anonymize_files( str(input_dir), str(output_file), True, True, salt=_SALT, sensitive_words=_SENSITIVE_WORDS, ) def test_anonymize_files_dir(tmpdir): """Test anonymize_files with a file in root of input dir.""" filename = "test.txt" input_dir = tmpdir.mkdir("input") input_dir.join(filename).write(_INPUT_CONTENTS) output_dir = tmpdir.mkdir("output") output_file = output_dir.join(filename) anonymize_files( str(input_dir), str(output_dir), True, True, salt=_SALT, sensitive_words=_SENSITIVE_WORDS, ) # Make sure output file exists and matches the ref assert os.path.isfile(str(output_file)) assert read_file(str(output_file)) == _REF_CONTENTS def test_anonymize_files_dir_skip_hidden(tmpdir): """Test that file starting with '.' is skipped.""" filename = ".test.txt" input_dir = tmpdir.mkdir("input") input_file = input_dir.join(filename) input_file.write(_INPUT_CONTENTS) output_dir = tmpdir.mkdir("output") output_file = output_dir.join(filename) anonymize_files( str(input_dir), str(output_dir), True, True, salt=_SALT, sensitive_words=_SENSITIVE_WORDS, ) # Make sure output file does not exist assert not os.path.exists(str(output_file)) def test_anonymize_files_dir_nested(tmpdir): """Test anonymize_files with files in nested dirs i.e. not at root of input dir.""" filename = "test.txt" input_dir = tmpdir.mkdir("input") input_dir.mkdir("subdir1").join(filename).write(_INPUT_CONTENTS) input_dir.mkdir("subdir2").mkdir("subsubdir").join(filename).write(_INPUT_CONTENTS) output_dir = tmpdir.mkdir("output") output_file_1 = output_dir.join("subdir1").join(filename) output_file_2 = output_dir.join("subdir2").join("subsubdir").join(filename) anonymize_files( str(input_dir), str(output_dir), True, True, salt=_SALT, sensitive_words=_SENSITIVE_WORDS, ) # Make sure both output files exists and match the ref assert os.path.isfile(str(output_file_1)) assert read_file(str(output_file_1)) == _REF_CONTENTS assert os.path.isfile(str(output_file_2)) assert read_file(str(output_file_2)) == _REF_CONTENTS def test_anonymize_files_file(tmpdir): """Test anonymize_files with input file instead of dir.""" filename = "test.txt" input_file = tmpdir.join(filename) input_file.write(_INPUT_CONTENTS) output_file = tmpdir.mkdir("out").join(filename) anonymize_files( str(input_file), str(output_file), True, True, salt=_SALT, sensitive_words=_SENSITIVE_WORDS, ) # Make sure output file exists and matches the ref assert os.path.isfile(str(output_file)) assert read_file(str(output_file)) == _REF_CONTENTS def read_file(file_path): """Read and return contents of file at specified path.""" with open(file_path, "r") as f: return f.read()
intentionet/netconan
tests/unit/test_anonymize_files.py
Python
apache-2.0
6,740
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2015 clowwindy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import, division, print_function, \ with_statement import os import json import sys import getopt import logging from shadowsocks.common import to_bytes, to_str, IPNetwork from shadowsocks import encrypt VERBOSE_LEVEL = 5 verbose = 0 def check_python(): info = sys.version_info if info[0] == 2 and not info[1] >= 6: print('Python 2.6+ required') sys.exit(1) elif info[0] == 3 and not info[1] >= 3: print('Python 3.3+ required') sys.exit(1) elif info[0] not in [2, 3]: print('Python version not supported') sys.exit(1) def print_exception(e): global verbose logging.error(e) if verbose > 0: import traceback traceback.print_exc() def print_shadowsocks(): version = '' try: import pkg_resources version = pkg_resources.get_distribution('shadowsocks').version except Exception: pass print('Shadowsocks %s' % version) def find_config(): config_path = 'config.json' if os.path.exists(config_path): return config_path config_path = os.path.join(os.path.dirname(__file__), '../', 'config.json') if os.path.exists(config_path): return config_path return None def check_config(config, is_local): if config.get('daemon', None) == 'stop': # no need to specify configuration for daemon stop return if is_local and not config.get('password', None): logging.error('password not specified') print_help(is_local) sys.exit(2) if not is_local and not config.get('password', None) \ and not config.get('port_password', None): logging.error('password or port_password not specified') print_help(is_local) sys.exit(2) if 'local_port' in config: config['local_port'] = int(config['local_port']) if 'server_port' in config and type(config['server_port']) != list: config['server_port'] = int(config['server_port']) if config.get('local_address', '') in [b'0.0.0.0']: logging.warn('warning: local set to listen on 0.0.0.0, it\'s not safe') if config.get('server', '') in ['127.0.0.1', 'localhost']: logging.warn('warning: server set to listen on %s:%s, are you sure?' % (to_str(config['server']), config['server_port'])) if (config.get('method', '') or '').lower() == 'table': logging.warn('warning: table is not safe; please use a safer cipher, ' 'like AES-256-CFB') if (config.get('method', '') or '').lower() == 'rc4': logging.warn('warning: RC4 is not safe; please use a safer cipher, ' 'like AES-256-CFB') if config.get('timeout', 300) < 100: logging.warn('warning: your timeout %d seems too short' % int(config.get('timeout'))) if config.get('timeout', 300) > 600: logging.warn('warning: your timeout %d seems too long' % int(config.get('timeout'))) if config.get('password') in [b'mypassword']: logging.error('DON\'T USE DEFAULT PASSWORD! Please change it in your ' 'config.json!') sys.exit(1) if config.get('user', None) is not None: if os.name != 'posix': logging.error('user can be used only on Unix') sys.exit(1) encrypt.try_cipher(config['password'], config['method']) def get_config(is_local): global verbose logging.basicConfig(level=logging.INFO, format='%(levelname)-s: %(message)s') if is_local: shortopts = 'hd:s:b:p:k:l:m:c:t:vq' longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'user=', 'version'] else: shortopts = 'hd:s:p:k:m:c:t:vq' longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'workers=', 'forbidden-ip=', 'user=', 'version'] try: config_path = find_config() optlist, args = getopt.getopt(sys.argv[1:], shortopts, longopts) for key, value in optlist: if key == '-c': config_path = value if config_path: logging.info('loading config from %s' % config_path) with open(config_path, 'rb') as f: try: config = json.loads(f.read().decode('utf8'), object_hook=_decode_dict) except ValueError as e: logging.error('found an error in config.json: %s', e.message) sys.exit(1) else: config = {} optlist, args = getopt.getopt(sys.argv[1:], shortopts, longopts) v_count = 0 for key, value in optlist: if key == '-p': config['server_port'] = int(value) elif key == '-k': config['password'] = to_bytes(value) elif key == '-l': config['local_port'] = int(value) elif key == '-s': config['server'] = to_str(value) elif key == '-m': config['method'] = to_str(value) elif key == '-b': config['local_address'] = to_str(value) elif key == '-v': v_count += 1 # '-vv' turns on more verbose mode config['verbose'] = v_count elif key == '-t': config['timeout'] = int(value) elif key == '--fast-open': config['fast_open'] = True elif key == '--workers': config['workers'] = int(value) elif key == '--user': config['user'] = to_str(value) elif key == '--forbidden-ip': config['forbidden_ip'] = to_str(value).split(',') elif key in ('-h', '--help'): if is_local: print_local_help() else: print_server_help() sys.exit(0) elif key == '--version': print_shadowsocks() sys.exit(0) elif key == '-d': config['daemon'] = to_str(value) elif key == '--pid-file': config['pid-file'] = to_str(value) elif key == '--log-file': config['log-file'] = to_str(value) elif key == '-q': v_count -= 1 config['verbose'] = v_count except getopt.GetoptError as e: print(e, file=sys.stderr) print_help(is_local) sys.exit(2) if not config: logging.error('config not specified') print_help(is_local) sys.exit(2) config['password'] = to_bytes(config.get('password', b'')) config['method'] = to_str(config.get('method', 'aes-256-cfb')) config['port_password'] = config.get('port_password', None) config['timeout'] = int(config.get('timeout', 300)) config['fast_open'] = config.get('fast_open', False) config['workers'] = config.get('workers', 1) config['pid-file'] = config.get('pid-file', '/var/run/shadowsocks.pid') config['log-file'] = config.get('log-file', '/var/log/shadowsocks.log') config['workers'] = config.get('workers', 1) config['verbose'] = config.get('verbose', False) config['local_address'] = to_str(config.get('local_address', '127.0.0.1')) config['local_port'] = config.get('local_port', 1080) if is_local: if config.get('server', None) is None: logging.error('server addr not specified') print_local_help() sys.exit(2) else: config['server'] = to_str(config['server']) else: config['server'] = to_str(config.get('server', '0.0.0.0')) try: config['forbidden_ip'] = \ IPNetwork(config.get('forbidden_ip', '127.0.0.0/8,::1/128')) except Exception as e: logging.error(e) sys.exit(2) config['server_port'] = config.get('server_port', 8388) logging.getLogger('').handlers = [] logging.addLevelName(VERBOSE_LEVEL, 'VERBOSE') if config['verbose'] >= 2: level = VERBOSE_LEVEL elif config['verbose'] == 1: level = logging.DEBUG elif config['verbose'] == -1: level = logging.WARN elif config['verbose'] <= -2: level = logging.ERROR else: level = logging.INFO verbose = config['verbose'] logging.basicConfig(level=level, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') check_config(config, is_local) return config def print_help(is_local): if is_local: print_local_help() else: print_server_help() def print_local_help(): print('''usage: sslocal [OPTION]... A fast tunnel proxy that helps you bypass firewalls. You can supply configurations via either config file or command line arguments. Proxy options: -c CONFIG path to config file -s SERVER_ADDR server address -p SERVER_PORT server port, default: 8388 -b LOCAL_ADDR local binding address, default: 127.0.0.1 -l LOCAL_PORT local port, default: 1080 -k PASSWORD password -m METHOD encryption method, default: aes-256-cfb -t TIMEOUT timeout in seconds, default: 300 --fast-open use TCP_FASTOPEN, requires Linux 3.7+ General options: -h, --help show this help message and exit -d start/stop/restart daemon mode --pid-file PID_FILE pid file for daemon mode --log-file LOG_FILE log file for daemon mode --user USER username to run as -v, -vv verbose mode -q, -qq quiet mode, only show warnings/errors --version show version information Online help: <https://github.com/shadowsocks/shadowsocks> ''') def print_server_help(): print('''usage: ssserver [OPTION]... A fast tunnel proxy that helps you bypass firewalls. You can supply configurations via either config file or command line arguments. Proxy options: -c CONFIG path to config file -s SERVER_ADDR server address, default: 0.0.0.0 -p SERVER_PORT server port, default: 8388 -k PASSWORD password -m METHOD encryption method, default: aes-256-cfb -t TIMEOUT timeout in seconds, default: 300 --fast-open use TCP_FASTOPEN, requires Linux 3.7+ --workers WORKERS number of workers, available on Unix/Linux --forbidden-ip IPLIST comma seperated IP list forbidden to connect General options: -h, --help show this help message and exit -d start/stop/restart daemon mode --pid-file PID_FILE pid file for daemon mode --log-file LOG_FILE log file for daemon mode --user USER username to run as -v, -vv verbose mode -q, -qq quiet mode, only show warnings/errors --version show version information Online help: <https://github.com/shadowsocks/shadowsocks> ''') def _decode_list(data): rv = [] for item in data: if hasattr(item, 'encode'): item = item.encode('utf-8') elif isinstance(item, list): item = _decode_list(item) elif isinstance(item, dict): item = _decode_dict(item) rv.append(item) return rv def _decode_dict(data): rv = {} for key, value in data.items(): if hasattr(value, 'encode'): value = value.encode('utf-8') elif isinstance(value, list): value = _decode_list(value) elif isinstance(value, dict): value = _decode_dict(value) rv[key] = value return rv
meowlab/shadowsocks-comment
shadowsocks/shell.py
Python
apache-2.0
12,526
from .dual import DualGraph from .graph import Graph, NetworkGraph from .graph_convention import ConventionConverter, GraphConvention from .hex import DualHexGraph, TriGraph from .radial import DualRadialGraph, RadialGraph from .structured_quad import ( DualRectilinearGraph, DualStructuredQuadGraph, DualUniformRectilinearGraph, RectilinearGraph, StructuredQuadGraph, UniformRectilinearGraph, ) from .voronoi import DelaunayGraph, DualVoronoiGraph __all__ = [ "Graph", "NetworkGraph", "DualGraph", "StructuredQuadGraph", "RectilinearGraph", "UniformRectilinearGraph", "DualUniformRectilinearGraph", "DualRectilinearGraph", "DualStructuredQuadGraph", "DelaunayGraph", "DualVoronoiGraph", "TriGraph", "DualHexGraph", "RadialGraph", "DualRadialGraph", "ConventionConverter", "GraphConvention", ]
cmshobe/landlab
landlab/graph/__init__.py
Python
mit
887
#!/usr/bin/env python from __future__ import print_function import requests import requests_cache import bz2 import configparser import re from pprint import pprint #from cStringIO import StringIO from io import BytesIO import tarfile # only other sort-of-trustable mirror # BASE_URL = "http://mirrors.dotsrc.org/cygwin/" # the only one that has https and is reasonably fast BASE_URL = "https://mirrors.kernel.org/sourceware/cygwin/" requests_cache.install_cache('cache', backend='sqlite') def fetch_package_list(): """ Fetch and return package list from cygwin mirror. :rtype: str """ filename = "setup.bz2" url = BASE_URL + "x86/" + filename r = requests.get(url) ret = bz2.decompress(r.content) return ret.decode('utf-8') def get_http_mirror_list(): url = "https://sourceware.org/cygwin/mirrors.lst" r = requests.get(url) #lines = unicode(r.content.decode('utf-8')).strip().split("\n") lines = r.content.strip().split("\n") ret = [] for line in lines: uri_base, hostname, continent, country = line.split(";") if uri_base.startswith("http://"): ret.append(uri_base) return ret def _get_cygwin_version_data(s): ini_part = s.split("\n\n@ ", 1)[0] config = configparser.ConfigParser(empty_lines_in_values=False) config.read_string(u"[DEFAULT]\n" + ini_part) d = {} for key, val in config['DEFAULT'].items(): d[key] = val return d def _get_package_list(s): """ Convert setup.ini text file into list string for each package. :type s: str """ #print(type(s)) #print(s[:1000].split("@")) #pprint(s.split("\\n\\n@ ")) ret = s.split("\n\n@ ")[1:] #print(type(ret)) return ret def _parse_package(s): """ Given text of one package -> return dict contatining data. :type s: str :rtype: dict """ d = {} _rem = s #pprint(_rem) package_name, _rem = s.split("\n", 1) d[u'name'] = package_name # for: sdesc, ldesc, message -> saves and removes them def save_sdesc(m): if m is not None: d[u"sdesc"] = m.group(1).replace("\n", " ") return '' def save_ldesc(m): if m is not None: d[u"ldesc"] = m.group(1).replace("\n", " ") return '' def save_message(m): if m is not None: d[u"message"] = m.group(1).replace("\n", " ") return '' _rem = re.sub(r'sdesc: "(.[^"]+)"\n', save_sdesc, _rem, re.DOTALL) _rem = re.sub(r'ldesc: "(.[^"]+)"\n', save_ldesc, _rem, re.DOTALL | re.MULTILINE) # newline after: " _rem = re.sub(r'ldesc: "(?:\s+)(.[^"]+)"\n', save_ldesc, _rem, re.DOTALL | re.MULTILINE) _rem += "\n" _rem = re.sub(r'message: (\S+) "(.[^"]*)"\n', save_message, _rem, re.DOTALL) # remainder of package data should be correctly formatted # [prev] sections are skipped config = configparser.ConfigParser(empty_lines_in_values=False) config.read_string(u"[DEFAULT]\n" + _rem) for key, val in config['DEFAULT'].items(): d[key] = val return d def parse_package(s): """ Post processing for dictionary -> turn and 'install', 'source' into full urls, 'requires' to list. """ d = _parse_package(s) if 'requires' in d: d[u"requires"] = d.get('requires').split(" ") else: d[u"requires"] = [] if 'install' in d: d[u"install"] = BASE_URL + d.get('install').split(" ", 1)[0] else: d[u"install"] = "" if 'source' in d: d[u"source"] = BASE_URL + d.get('source').split(" ", 1)[0] else: d[u"source"] = "" return d # def install(package_name): def install_package(url): #url1 = "https://mirrors.kernel.org/sourceware/cygwin/x86/release/base-cygwin/base-cygwin-3.3-1.tar.bz2" #url2 = "https://mirrors.kernel.org/sourceware/cygwin/x86/release/cygwin/cygwin-1.7.27-2.tar.xz" r = requests.get(url) str_f = BytesIO(r.content) tf = tarfile.open(fileobj=str_f) tf.extractall("./root") def get_packages(): """ :rtype: dict """ data = fetch_package_list() package_strs = _get_package_list(data) ret = {} for el in package_strs: p = parse_package(el) package_name = p['name'] ret[package_name] = p return ret def resolve_deps(initial_selection): """ Returns list of all package names, that should be installed in order to resolve all dependencies for specific package. """ packages = get_packages() deps = set() assert type(initial_selection) in [str, list] if type(initial_selection) == list: for el in initial_selection: deps.add(el) else: deps.add(initial_selection) def add_deps(package_name): for dependent_package_name in packages[package_name]['requires']: deps.add(dependent_package_name) while True: pre_len = len(deps) for package in list(deps): add_deps(package) after_len = len(deps) if pre_len == after_len: break return deps if __name__ == "__main__": package_names = resolve_deps("cygwin") print(package_names) packages = get_packages() for package_name in package_names: url = packages[package_name]['install'] print(url) install_package(url)
iljau/cyg_fetcher
fetch_package_list.py
Python
mit
5,401
# pylint: disable=g-bad-file-header # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the SWIG-wrapped quantize training rewriting.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf class PywrapQuantizeTrainingTest(tf.test.TestCase): # Mainly to verify the python interface is working. # More tests for this function can be found in the related c++ tests. def testQuantizeTraining(self): with tf.Session() as sess: a = tf.constant(6.0, shape=[1, 1]) b = tf.constant(7.0, shape=[1, 1]) c = tf.matmul(a, b, name='matmul') self.assertEquals(len(sess.graph_def.node), 3) result = tf.train.do_quantize_training_on_graphdef( sess.graph_def, 8) # 2 convert ops are added so it should have 5 nodes now. self.assertEquals(len(result.node), 5) self.assertEquals(c.eval(), 42) if __name__ == '__main__': tf.test.main()
dhalleine/tensorflow
tensorflow/python/client/quantize_training_test.py
Python
apache-2.0
1,626
from django.contrib.auth import get_user_model from rest_framework_json_api import relations from rest_framework_json_api.serializers import DurationField, IntegerField, Serializer from timed.projects.models import Customer, Project, Task from timed.serializers import TotalTimeRootMetaMixin class YearStatisticSerializer(TotalTimeRootMetaMixin, Serializer): duration = DurationField() year = IntegerField() class Meta: resource_name = "year-statistics" class MonthStatisticSerializer(TotalTimeRootMetaMixin, Serializer): duration = DurationField() year = IntegerField() month = IntegerField() class Meta: resource_name = "month-statistics" class CustomerStatisticSerializer(TotalTimeRootMetaMixin, Serializer): duration = DurationField() customer = relations.ResourceRelatedField( source="task__project__customer", model=Customer, read_only=True ) included_serializers = {"customer": "timed.projects.serializers.CustomerSerializer"} class Meta: resource_name = "customer-statistics" class ProjectStatisticSerializer(TotalTimeRootMetaMixin, Serializer): duration = DurationField() project = relations.ResourceRelatedField( source="task__project", model=Project, read_only=True ) included_serializers = {"project": "timed.projects.serializers.ProjectSerializer"} class Meta: resource_name = "project-statistics" class TaskStatisticSerializer(TotalTimeRootMetaMixin, Serializer): duration = DurationField(read_only=True) task = relations.ResourceRelatedField(model=Task, read_only=True) included_serializers = {"task": "timed.projects.serializers.TaskSerializer"} class Meta: resource_name = "task-statistics" class UserStatisticSerializer(TotalTimeRootMetaMixin, Serializer): duration = DurationField(read_only=True) user = relations.ResourceRelatedField(model=get_user_model(), read_only=True) included_serializers = {"user": "timed.employment.serializers.UserSerializer"} class Meta: resource_name = "user-statistics"
adfinis-sygroup/timed-backend
timed/reports/serializers.py
Python
agpl-3.0
2,106
from django.core.management.base import BaseCommand, CommandError from optparse import make_option from django_extensions.management.modelviz import generate_dot class Command(BaseCommand): option_list = BaseCommand.option_list + ( make_option('--disable-fields', '-d', action='store_true', dest='disable_fields', help='Do not show the class member fields'), make_option('--group-models', '-g', action='store_true', dest='group_models', help='Group models together respective to their application'), make_option('--all-applications', '-a', action='store_true', dest='all_applications', help='Automatically include all applications from INSTALLED_APPS'), make_option('--output', '-o', action='store', dest='outputfile', help='Render output file. Type of output dependend on file extensions. Use png or jpg to render graph to image.'), make_option('--layout', '-l', action='store', dest='layout', default='dot', help='Layout to be used by GraphViz for visualization. Layouts: circo dot fdp neato nop nop1 nop2 twopi'), make_option('--verbose-names', '-n', action='store_true', dest='verbose_names', help='Use verbose_name of models and fields'), make_option('--language', '-L', action='store', dest='language', help='Specify language used for verbose_name localization'), make_option('--exclude-columns', '-x', action='store', dest='exclude_columns', help='Exclude specific column(s) from the graph. Can also load exclude list from file.'), make_option('--exclude-models', '-X', action='store', dest='exclude_models', help='Exclude specific model(s) from the graph. Can also load exclude list from file.'), make_option('--inheritance', '-e', action='store_true', dest='inheritance', help='Include inheritance arrows'), ) help = ("Creates a GraphViz dot file for the specified app names. You can pass multiple app names and they will all be combined into a single model. Output is usually directed to a dot file.") args = "[appname]" label = 'application name' requires_model_validation = True can_import_settings = True def handle(self, *args, **options): if len(args) < 1 and not options['all_applications']: raise CommandError("need one or more arguments for appname") dotdata = generate_dot(args, **options) if options['outputfile']: self.render_output(dotdata, **options) else: self.print_output(dotdata) def print_output(self, dotdata): print(dotdata.encode('utf-8')) def render_output(self, dotdata, **kwargs): try: import pygraphviz except ImportError: raise CommandError("You need to install pygraphviz python module") vizdata = ' '.join(dotdata.split("\n")).strip().encode('utf-8') version = pygraphviz.__version__.rstrip("-svn") try: if tuple(int(v) for v in version.split('.')) < (0, 36): # HACK around old/broken AGraph before version 0.36 (ubuntu ships with this old version) import tempfile tmpfile = tempfile.NamedTemporaryFile() tmpfile.write(vizdata) tmpfile.seek(0) vizdata = tmpfile.name except ValueError: pass graph = pygraphviz.AGraph(vizdata) graph.layout(prog=kwargs['layout']) graph.draw(kwargs['outputfile'])
shash/IconDB
django_extensions/management/commands/graph_models.py
Python
agpl-3.0
3,657
# Copyright 2018 ForgeFlow, S.L. (https://www.forgeflow.com) # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html). from odoo import api, models class OutstandingStatement(models.AbstractModel): """Model of Outstanding Statement""" _inherit = "statement.common" _name = "report.partner_statement.outstanding_statement" _description = "Partner Outstanding Statement" def _display_lines_sql_q1(self, partners, date_end, account_type): partners = tuple(partners) return str( self._cr.mogrify( """ SELECT l.id, m.name AS move_id, l.partner_id, l.date, l.name, l.blocked, l.currency_id, l.company_id, CASE WHEN l.ref IS NOT NULL THEN l.ref ELSE m.ref END as ref, CASE WHEN (l.currency_id is not null AND l.amount_currency > 0.0) THEN avg(l.amount_currency) ELSE avg(l.debit) END as debit, CASE WHEN (l.currency_id is not null AND l.amount_currency < 0.0) THEN avg(-l.amount_currency) ELSE avg(l.credit) END as credit, (abs(COALESCE(l.balance, 0.0)) + sum( coalesce(pr.pr_sign, 0.0) * coalesce(pr.amount, 0.0)) ) * sign(COALESCE(l.balance, 0.0)) AS open_amount, (abs(COALESCE(l.amount_currency, 0.0)) + sum( coalesce(pr.pr_sign, 0.0) * CASE WHEN pr.currency_id IS NOT NULL AND pr.currency_id = l.currency_id THEN coalesce(pr.amount_currency, 0.0) WHEN cur.id IS NOT NULL AND ROUND( abs(COALESCE(l.balance, 0.0)), cur.decimal_places) > 0.0 THEN ROUND(coalesce(pr.amount, 0.0) * COALESCE(l.amount_currency, 0.0) / NULLIF(l.balance, 0.0), cur.decimal_places) ELSE ROUND(coalesce(pr.amount, 0.0) * COALESCE(( SELECT r.rate FROM res_currency_rate r JOIN account_move_line aml ON pr.credit_move_id = aml.id WHERE r.currency_id = l.currency_id AND r.name <= aml.date AND (r.company_id IS NULL OR r.company_id = l.company_id) ORDER BY r.company_id, r.name DESC LIMIT 1), 1.0), cur.decimal_places) END) ) * sign(COALESCE(l.amount_currency, 0.0)) AS open_amount_currency, CASE WHEN l.date_maturity is null THEN l.date ELSE l.date_maturity END as date_maturity FROM ( SELECT l.*, CASE WHEN l.debit = 0.0 AND l.credit = 0.0 AND l.currency_id IS NOT NULL AND ROUND(COALESCE(l.amount_currency, 0.0), cur.decimal_places) > 0.0 THEN 1 WHEN l.debit = 0.0 AND l.credit = 0.0 AND l.currency_id IS NOT NULL AND ROUND(COALESCE(l.amount_currency, 0.0), cur.decimal_places) < 0.0 THEN -1 WHEN l.balance > 0.0 THEN 1 ELSE -1 END as sign FROM account_move_line l LEFT JOIN res_currency cur ON cur.id = l.currency_id ) l JOIN account_move m ON l.move_id = m.id LEFT JOIN res_currency cur ON cur.id = l.currency_id LEFT JOIN LATERAL (SELECT pr.*, CASE WHEN pr.credit_move_id = l.id THEN l.sign ELSE -l.sign END AS pr_sign FROM account_partial_reconcile pr WHERE pr.max_date <= %(date_end)s AND ( (pr.debit_move_id = l.id) OR (pr.credit_move_id = l.id)) ) as pr ON TRUE WHERE l.partner_id IN %(partners)s AND l.account_internal_type = %(account_type)s AND ( (pr.id IS NOT NULL AND pr.max_date <= %(date_end)s) OR (pr.id IS NULL) ) AND l.date <= %(date_end)s AND m.state IN ('posted') GROUP BY l.id, l.partner_id, m.name, l.date, l.date_maturity, l.name, CASE WHEN l.ref IS NOT NULL THEN l.ref ELSE m.ref END, l.blocked, l.currency_id, l.balance, l.amount_currency, l.company_id """, locals(), ), "utf-8", ) def _display_lines_sql_q2(self): return str( self._cr.mogrify( """ SELECT Q1.partner_id, Q1.currency_id, Q1.move_id, Q1.date, Q1.date_maturity, Q1.debit, Q1.credit, Q1.name, Q1.ref, Q1.blocked, Q1.company_id, CASE WHEN Q1.currency_id is not null THEN Q1.open_amount_currency ELSE Q1.open_amount END as open_amount FROM Q1 """, locals(), ), "utf-8", ) def _display_lines_sql_q3(self, company_id): return str( self._cr.mogrify( """ SELECT Q2.partner_id, Q2.move_id, Q2.date, Q2.date_maturity, Q2.name, Q2.ref, Q2.debit, Q2.credit, Q2.debit-Q2.credit AS amount, blocked, COALESCE(Q2.currency_id, c.currency_id) AS currency_id, Q2.open_amount FROM Q2 JOIN res_company c ON (c.id = Q2.company_id) JOIN res_currency cur ON cur.id = COALESCE(Q2.currency_id, c.currency_id) WHERE c.id = %(company_id)s AND round(Q2.open_amount, cur.decimal_places) != 0.0 """, locals(), ), "utf-8", ) def _get_account_display_lines( self, company_id, partner_ids, date_start, date_end, account_type ): res = dict(map(lambda x: (x, []), partner_ids)) partners = tuple(partner_ids) # pylint: disable=E8103 self.env.cr.execute( """ WITH Q1 as (%s), Q2 AS (%s), Q3 AS (%s) SELECT partner_id, currency_id, move_id, date, date_maturity, debit, credit, amount, open_amount, name, ref, blocked FROM Q3 ORDER BY date, date_maturity, move_id""" % ( self._display_lines_sql_q1(partners, date_end, account_type), self._display_lines_sql_q2(), self._display_lines_sql_q3(company_id), ) ) for row in self.env.cr.dictfetchall(): res[row.pop("partner_id")].append(row) return res @api.model def _get_report_values(self, docids, data=None): if not data: data = {} if "company_id" not in data: wiz = self.env["outstanding.statement.wizard"].with_context( active_ids=docids, model="res.partner" ) data.update(wiz.create({})._prepare_statement()) data["amount_field"] = "open_amount" return super()._get_report_values(docids, data)
OCA/account-financial-reporting
partner_statement/report/outstanding_statement.py
Python
agpl-3.0
7,210
from bigsi.bloom.bloomfilter import generate_hashes from bigsi.bloom.bloomfilter import BloomFilter
Phelimb/cbg
bigsi/bloom/__init__.py
Python
mit
100
import wave import struct import math SAMPLE_LEN = 15000 noise_output = wave.open('noise.wav', 'w') noise_output.setparams((2, 2, 44100, 0, 'NONE', 'not compressed')) for i in range(0, SAMPLE_LEN): value = int(math.sin(i)*400) packed_value = struct.pack('h', value) noise_output.writeframes(packed_value) noise_output.writeframes(packed_value) noise_output.close()
dasMalle/AScriptADay2016
January/10-NoiseCreator/sound.py
Python
gpl-2.0
401
import sys import xbmc import platform def get_platform(): ret = { "arch": sys.maxsize > 2 ** 32 and "x64" or "x86", "os": "", "version": platform.release() } if xbmc.getCondVisibility("system.platform.android"): ret["os"] = "android" if "arm" in platform.machine() or "aarch" in platform.machine(): ret["arch"] = "arm" elif xbmc.getCondVisibility("system.platform.linux"): ret["os"] = "linux" if "aarch" in platform.machine() or "arm64" in platform.machine(): if xbmc.getCondVisibility("system.platform.linux.raspberrypi"): ret["arch"] = "armv7" else: ret["arch"] = "arm64" elif "armv7" in platform.machine(): ret["arch"] = "armv7" elif "arm" in platform.machine(): ret["arch"] = "arm" elif xbmc.getCondVisibility("system.platform.xbox"): ret["os"] = "windows" ret["arch"] = "x64" elif xbmc.getCondVisibility("system.platform.windows"): ret["os"] = "windows" if platform.machine().endswith('64'): ret["arch"] = "x64" elif xbmc.getCondVisibility("system.platform.osx"): ret["os"] = "darwin" ret["arch"] = "x64" elif xbmc.getCondVisibility("system.platform.ios"): ret["os"] = "ios" ret["arch"] = "arm" return ret PLATFORM = get_platform()
afedchin/xbmctorrent
resources/site-packages/xbmctorrent/osarch.py
Python
gpl-3.0
1,418
from collections import OrderedDict GENDERS = ( 'male', 'female', 'unknown' ) AFFECTED_STATUSES = ( 'affected', 'unaffected', 'unknown' ) class Individual(): def __init__(self, indiv_id, **kwargs): self.indiv_id = indiv_id self.project_id = kwargs.get('project_id', '.') self.family_id = kwargs.get('family_id', '.') self.paternal_id = kwargs.get('paternal_id', '.') self.maternal_id = kwargs.get('maternal_id', '.') self.gender = kwargs.get('gender', '.') self.affected_status = kwargs.get('affected_status', '.') def toJSON(self): return { 'indiv_id': self.indiv_id, 'project_id': self.project_id, 'family_id': self.family_id, 'paternal_id': self.paternal_id, 'maternal_id': self.maternal_id, 'gender': self.gender, 'affected_status': self.affected_status, } class Family(): def __init__(self, family_id, individuals, **kwargs): self.family_id = family_id self.project_id = kwargs.get('project_id', '.') indiv_ids = [i.indiv_id for i in individuals] if len(indiv_ids) != len(set(indiv_ids)): raise Exception('Individual IDs are not unique') self.individuals = {indiv.indiv_id: indiv for indiv in individuals} def toJSON(self): return { 'family_id': self.family_id, 'project_id': self.project_id, 'individuals': {indiv.indiv_id: indiv.toJSON() for indiv in self.individuals.values()} } def indiv_id_list(self): return self.individuals.keys() def contains_indiv_id(self, indiv_id): return indiv_id in self.individuals def num_individuals(self): return len(self.individuals) def get_individuals(self): return self.individuals.values() def get_individual(self, indiv_id): return self.individuals.get(indiv_id) def get_affecteds(self): return [i for i in self.get_individuals() if i.affected_status == 'affected'] def affected_status_map(self): return {indiv.indiv_id: indiv.affected_status for indiv in self.get_individuals()} class Cohort(): def __init__(self, cohort_id, individuals, **kwargs): self.cohort_id = cohort_id self.project_id = kwargs.get('project_id', '.') indiv_ids = [i.indiv_id for i in individuals] if len(indiv_ids) != len(set(indiv_ids)): raise Exception('Individual IDs are not unique') self.individuals = {indiv.indiv_id: indiv for indiv in individuals} def toJSON(self): return { 'cohort_id': self.cohort_id, 'project_id': self.project_id, 'individuals': {indiv.indiv_id: indiv.toJSON() for indiv in self.individuals.values()} } def indiv_id_list(self): return self.individuals.keys() def contains_indiv_id(self, indiv_id): return indiv_id in self.individuals def get_individual(self, indiv_id): return self.individuals.get(indiv_id) class FamilyGroup(): def __init__(self, families, **kwargs): families = [((family.project_id, family.family_id), family) for family in families] self.families = OrderedDict(sorted(families, key=lambda t: t[0])) def toJSON(self): return [family.toJSON() for family in self.get_families()] def get_families(self): return self.families.values()
macarthur-lab/xbrowse
xbrowse/core/samples.py
Python
agpl-3.0
3,504
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=redefined-builtin, invalid-name """Operators used in TIR expression.""" import tvm._ffi from tvm.runtime import convert, const from tvm.ir import Array, Op from .buffer import Buffer from .expr import Call, StringImm, Var, CommReducer from . import _ffi_api def _pack_buffer(buf): """Build intrinsics that packs the buffer.""" shape = Call("handle", "tir.tvm_stack_make_shape", buf.shape) strides = Call("handle", "tir.tvm_stack_make_shape", buf.strides) if buf.strides else 0 pack_args = [ buf.data, shape, strides, len(buf.shape), const(0, dtype=buf.dtype), buf.elem_offset, ] return Call("handle", Op.get("tir.tvm_stack_make_array"), pack_args) def call_packed(*args): """Build expression by call an external packed function. The argument to packed function can be Expr or Buffer. The argument is the corresponding POD type when Expr is presented. When the argument is Buffer, the corresponding PackedFunc will recieve an TVMArrayHandle whose content is valid during the callback period. If the PackedFunc is a python callback, then the corresponding argument is NDArray. Parameters ---------- args : list of Expr or Buffer. Positional arguments. Returns ------- call : PrimExpr The call expression. See Also -------- te.extern : Create tensor with extern function call. """ call_args = [_pack_buffer(x) if isinstance(x, Buffer) else x for x in args] return Call("int32", Op.get("tir.tvm_call_packed"), call_args) def call_intrin(dtype, func_name, *args): """Build expression by calling an intrinsic function. Intrinsics can be overloaded with multiple data types via the intrinsic translation rule. Parameters ---------- dtype : str The data type of the result. func_name: str The intrinsic function name. args : list Positional arguments. Returns ------- call : PrimExpr The call expression. """ return Call(dtype, func_name, convert(args)) def call_pure_extern(dtype, func_name, *args): """Build expression by calling a pure extern function. Parameters ---------- dtype : str The data type of the result. func_name: str The extern function name. args : list Positional arguments. Returns ------- call : PrimExpr The call expression. """ return Call(dtype, Op.get("tir.call_pure_extern"), convert((StringImm(func_name),) + args)) def call_extern(dtype, func_name, *args): """Build expression by calling a extern function. Parameters ---------- dtype : str The data type of the result. func_name: str The extern function name. args : list Positional arguments. Returns ------- call : PrimExpr The call expression. """ return Call(dtype, Op.get("tir.call_extern"), convert((StringImm(func_name),) + args)) def call_llvm_intrin(dtype, name, *args): """Build expression by calling a llvm intrinsic function Parameters ---------- dtype : str The data type of the result. name : str The name of the llvm intrinsic function. args : list Poistional arguments. Returns ------- call : PrimExpr The call expression. """ # pylint: disable=import-outside-toplevel from tvm.target import codegen llvm_id = codegen.llvm_lookup_intrinsic_id(name) assert llvm_id != 0, "%s is not an LLVM intrinsic" % name return call_intrin( dtype, Op.get("tir.call_llvm_intrin"), tvm.tir.const(llvm_id, "uint32"), *args ) def call_llvm_pure_intrin(dtype, name, *args): """Build expression by calling a pure llvm intrinsic function Parameters ---------- dtype : str The data type of the result. name : str The name of the llvm intrinsic function. args : list Poistional arguments. Returns ------- call : PrimExpr The call expression. """ # pylint: disable=import-outside-toplevel from tvm.target import codegen llvm_id = codegen.llvm_lookup_intrinsic_id(name) assert llvm_id != 0, "%s is not an LLVM intrinsic" % name return call_intrin( dtype, Op.get("tir.call_llvm_pure_intrin"), tvm.tir.const(llvm_id, "uint32"), *args ) def any(*args): """Create a new experssion of the union of all conditions in the arguments Parameters ---------- args : list List of symbolic boolean expressions Returns ------- expr: Expr Expression """ if not args: raise ValueError("Any must take at least 1 argument") if len(args) == 1: return args[0] ret = _ffi_api._OpOr(args[0], args[1]) for i in range(2, len(args)): ret = _ffi_api._OpOr(ret, args[i]) return ret def all(*args): """Create a new experssion of the intersection of all conditions in the arguments Parameters ---------- args : list List of symbolic boolean expressions Returns ------- expr: Expr Expression """ if not args: raise ValueError("Any must take at least 1 argument") if len(args) == 1: return args[0] ret = _ffi_api._OpAnd(args[0], args[1]) for i in range(2, len(args)): ret = _ffi_api._OpAnd(ret, args[i]) return ret @tvm._ffi.register_func("tvm.default_trace_action") def _tvm_default_trace_action(*args): print(list(args)) def trace(args, trace_action="tvm.default_trace_action"): """Trace tensor data at the runtime. The trace function allows to trace specific tensor at the runtime. The tracing value should come as last argument. The trace action should be specified, by default tvm.default_trace_action is used. Parameters ---------- args : list of Expr or Buffers. Positional arguments. trace_action : str. The name of the trace action. Returns ------- call : PrimExpr The call expression. See Also -------- tvm.tir.call_packed : Creates packed function. """ if not isinstance(args, list): raise Exception("tvm.tir.trace consumes the args as list type") call_args = [_pack_buffer(x) if isinstance(x, Buffer) else x for x in args] call_args.insert(0, trace_action) return tvm.tir.Call(args[-1].dtype, Op.get("tir.tvm_call_trace_packed"), call_args) def min_value(dtype): """minimum value of dtype Parameters ---------- dtype : str The data type. Returns ------- value : tvm.Expr The minimum value of dtype. """ return _ffi_api.min_value(dtype) def max_value(dtype): """maximum value of dtype Parameters ---------- dtype : str The data type. Returns ------- value : tvm.Expr The maximum value of dtype. """ return _ffi_api.max_value(dtype) def exp(x): """Take exponetial of input x. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x.dtype, "tir.exp", x) def exp2(x): """Calculate 2**x Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x.dtype, "tir.exp2", x) def exp10(x): """Calculate 10**x Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x.dtype, "tir.exp10", x) def erf(x): """Take gauss error function of the input x. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x.dtype, "tir.erf", x) def tanh(x): """Take hyperbolic tanh of input x. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x.dtype, "tir.tanh", x) def sigmoid(x): """Quick function to get sigmoid Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x.dtype, "tir.sigmoid", x) def log(x): """Take log of input x. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x.dtype, "tir.log", x) def log2(x): """Take log2 of input x. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x.dtype, "tir.log2", x) def log10(x): """Take log10 of input x. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x.dtype, "tir.log10", x) def log1p(x): """Take log(x + 1) with respect to input x. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x.dtype, "tir.log1p", x) def tan(x): """Take tan of input x. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x.dtype, "tir.tan", x) def cos(x): """Take cos of input x. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x.dtype, "tir.cos", x) def cosh(x): """Take cosh of input x. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x.dtype, "tir.cosh", x) def acos(x): """Take acos of input x. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x.dtype, "tir.acos", x) def acosh(x): """Take acos of input x. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x.dtype, "tir.acosh", x) def sin(x): """Take sin of input x. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x.dtype, "tir.sin", x) def sinh(x): """Take sinh of input x. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x.dtype, "tir.sinh", x) def asin(x): """Take asin of input x. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x.dtype, "tir.asin", x) def asinh(x): """Take asinh of input x. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x.dtype, "tir.asinh", x) def atan(x): """Take atan of input x. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x.dtype, "tir.atan", x) def atanh(x): """Take atanh of input x. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x.dtype, "tir.atanh", x) def atan2(x1, x2): """Take arctan2(x1, x2). Parameters ---------- x1 : PrimExpr Input argument. x2 : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x1.dtype, "tir.atan2", x1, x2) def sqrt(x): """Take square root of input x. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x.dtype, "tir.sqrt", x) def rsqrt(x): """Take reciprocal of square root of input x. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x.dtype, "tir.rsqrt", x) def floor(x): """Take floor of float input x. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return _ffi_api.floor(x) def ceil(x): """Take ceil of float input x. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return _ffi_api.ceil(x) def trunc(x): """Get truncated value of the input. The truncated value of the scalar x is the nearest integer i which is closer to zero than x is. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return _ffi_api.trunc(x) def abs(x): """Get absolute value of the input element-wise. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return _ffi_api.abs(x) def round(x): """Round elements of the array to the nearest integer. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return _ffi_api.round(x) def nearbyint(x): """Round elements of the array to the nearest integer. This intrinsic uses llvm.nearbyint instead of llvm.round which is faster but will results different from te.round. Notably nearbyint rounds according to the rounding mode, whereas te.round (llvm.round) ignores that. For differences between the two see: https://en.cppreference.com/w/cpp/numeric/math/round https://en.cppreference.com/w/cpp/numeric/math/nearbyint Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return _ffi_api.nearbyint(x) def nextafter(x1, x2): """Return the next floating-point value after x1 towards x2. Parameters ---------- x1 : PrimExpr Input argument. x2 : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x1.dtype, "tir.nextafter", x1, x2) def hypot(x1, x2): """Equivalent to sqrt(x1**2 + x2**2), element-wise. Parameters ---------- x1 : PrimExpr Input argument. x2 : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x1.dtype, "tir.hypot", x1, x2) def copysign(x1, x2): """Change the sign of x1 to that of x2, element-wise. Parameters ---------- x1 : PrimExpr Input argument. x2 : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x1.dtype, "tir.copysign", x1, x2) def ldexp(x1, x2): """Returns x1 * (2 ** x2). Parameters ---------- x1 : PrimExpr Input argument. x2 : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x1.dtype, "tir.ldexp", x1, x2) def isnan(x): """Check if input value is Nan. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return _ffi_api.isnan(x) def isfinite(x): """Check if input value is finite. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return _ffi_api.isfinite(x) def isinf(x): """Check if input value is infinite. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return _ffi_api.isinf(x) def power(x, y): """x power y Parameters ---------- x : PrimExpr Input argument. y : PrimExpr The exponent Returns ------- z : PrimExpr The result. """ return _ffi_api._OpPow(convert(x), convert(y)) def popcount(x): """Count the number of set bits in input x. Parameters ---------- x : PrimExpr Input argument. Returns ------- y : PrimExpr The result. """ return call_intrin(x.dtype, "tir.popcount", x) def q_multiply_shift(x, y, q, s): """Execute a multiplication between two Q-numbers x and y followed by a right shift s. The mathematical expression is: out = round(x*y*2^-s) More about Q-numbers here: https://en.wikipedia.org/wiki/Q_(number_format) The rounding rule is to the nearest value, rounding half up (i.e., round(x.1) = x and round (x.5) = x+1) Parameters ---------- x : PrimExpr First Q-number y : PrimExpr Second Q-number q : PrimExpr Number of fractional bits in x and y. Needs to be > 0 s : PrimExpr Integer shift Returns ------- y : PrimExpr The result. """ return call_intrin("int32", "tir.q_multiply_shift", x, y, q, s) def fmod(x, y): """Return the remainder of x divided by y with the same sign as x. Parameters ---------- x : PrimExpr Input argument. y : PrimExpr Input argument. Returns ------- z : PrimExpr The result. """ return call_intrin(x.dtype, "tir.fmod", x, y) def if_then_else(cond, t, f): """Conditional selection expression. Parameters ---------- cond : PrimExpr The condition t : PrimExpr The result expression if cond is true. f : PrimExpr The result expression if cond is false. Returns ------- result : Node The result of conditional expression. Note ---- Unlike Select, if_then_else will not execute the branch that does not satisfy the condition. You can use it to guard against out of bound access. Unlike Select, if_then_else cannot be vectorized if some lanes in the vector have different conditions. """ return _ffi_api._OpIfThenElse(convert(cond), convert(t), convert(f)) def div(a, b): """Compute a / b as in C/C++ semantics. Parameters ---------- a : PrimExpr The left hand operand, known to be non-negative. b : PrimExpr The right hand operand, known to be non-negative. Returns ------- res : PrimExpr The result expression. Note ---- When operands are integers, returns truncdiv(a, b). """ return _ffi_api._OpDiv(a, b) def indexdiv(a, b): """Compute floor(a / b) where a and b are non-negative. Parameters ---------- a : PrimExpr The left hand operand, known to be non-negative. b : PrimExpr The right hand operand, known to be non-negative. Returns ------- res : PrimExpr The result expression. Note ---- Use this function to split non-negative indices. This function may take advantage of operands' non-negativeness. """ return _ffi_api._OpIndexDiv(a, b) def indexmod(a, b): """Compute the remainder of indexdiv. a and b are non-negative. Parameters ---------- a : PrimExpr The left hand operand, known to be non-negative. b : PrimExpr The right hand operand, known to be non-negative. Returns ------- res : PrimExpr The result expression. Note ---- Use this function to split non-negative indices. This function may take advantage of operands' non-negativeness. """ return _ffi_api._OpIndexMod(a, b) def truncdiv(a, b): """Compute the truncdiv of two expressions. Parameters ---------- a : PrimExpr The left hand operand b : PrimExpr The right hand operand Returns ------- res : PrimExpr The result expression. Note ---- This is the default integer division behavior in C. """ return _ffi_api._OpTruncDiv(a, b) def truncmod(a, b): """Compute the truncmod of two expressions. Parameters ---------- a : PrimExpr The left hand operand b : PrimExpr The right hand operand Returns ------- res : PrimExpr The result expression. Note ---- This is the default integer division behavior in C. """ return _ffi_api._OpTruncMod(a, b) def floordiv(a, b): """Compute the floordiv of two expressions. Parameters ---------- a : PrimExpr The left hand operand b : PrimExpr The right hand operand Returns ------- res : PrimExpr The result expression. """ return _ffi_api._OpFloorDiv(a, b) def floormod(a, b): """Compute the floormod of two expressions. Parameters ---------- a : PrimExpr The left hand operand b : PrimExpr The right hand operand Returns ------- res : PrimExpr The result expression. """ return _ffi_api._OpFloorMod(a, b) def comm_reducer(fcombine, fidentity, name="reduce"): """Create a commutative reducer for reduction. Parameters ---------- fcombine : function(Expr -> Expr -> Expr) A binary function which takes two Expr as input to return a Expr. fidentity : function(str -> Expr) A function which takes a type string as input to return a const Expr. Returns ------- reducer : function A function which creates a reduce expression over axis. There are two ways to use it: 1. accept (expr, axis, where) to produce an Reduce Expr on specified axis; 2. simply use it with multiple Exprs. Example ------- .. code-block:: python n = te.var("n") m = te.var("m") mysum = te.comm_reducer(lambda x, y: x+y, lambda t: tvm.tir.const(0, dtype=t), name="mysum") A = te.placeholder((n, m), name="A") k = te.reduce_axis((0, m), name="k") B = te.compute((n,), lambda i: mysum(A[i, k], axis=k), name="B") """ def _reduce_directly(*args): num = len(args) # process `where` is None if num == 3 and args[2] is None: num = 2 res = args[0] for i in range(num - 1): res = fcombine(res, args[i + 1]) return res def _make_reduce(expr, axis, where=None, init=None): code = fcombine.__code__ assert fcombine.__code__.co_argcount == 2 expr = convert(expr) if init is not None: init = convert(init) if isinstance(expr, Array): size = len(expr) larr = [] rarr = [] dtypes = [] for i in range(size): dtype = expr[i].dtype dtypes.append(dtype) lname = code.co_varnames[0] + "_" + str(i) larr.append(Var(lname, dtype)) rname = code.co_varnames[1] + "_" + str(i) rarr.append(Var(rname, dtype)) if init is not None: init = convert(init) assert isinstance(init, Array) assert len(init) == size for init_i in range(size): init_i = convert(init_i) assert isinstance( init_i, (tvm.tir.ProducerLoad, tvm.tir.IntImm, tvm.tir.FloatImm) ) else: init = convert([]) lhs = convert(larr) rhs = convert(rarr) result = fcombine(lhs, rhs) id_elem = fidentity(*dtypes) else: assert isinstance(expr, tvm.ir.PrimExpr) size = 1 dtype = expr.dtype lvar = Var(code.co_varnames[0], dtype) rvar = Var(code.co_varnames[1], dtype) result = [fcombine(lvar, rvar)] id_elem = [fidentity(dtype)] lhs = convert([lvar]) rhs = convert([rvar]) expr = convert([expr]) if init is not None: assert isinstance(init, (tvm.tir.ProducerLoad, tvm.tir.IntImm, tvm.tir.FloatImm)) init = convert([init]) result = convert(result) id_elem = convert(id_elem) combiner = CommReducer(lhs, rhs, result, id_elem) axis = convert(axis if isinstance(axis, (list, tuple)) else [axis]) if where is None: where = convert(True) if init is None: outputs = tuple( tvm.tir.Reduce(combiner, expr, axis, where, i, convert([])) for i in range(size) ) else: outputs = tuple( tvm.tir.Reduce(combiner, expr, axis, where, i, init) for i in range(size) ) return outputs[0] if size == 1 else outputs # pylint: disable=keyword-arg-before-vararg def reducer(expr, axis, where=None, init=None, *args): if isinstance(axis, (tvm.tir.IterVar, list, tuple)): assert not args return _make_reduce(expr, axis, where, init) if where is None: assert not args return _reduce_directly(expr, axis) return _reduce_directly(expr, axis, where, *args) doc_str = """Create a {0} expression over axis. Parameters ---------- expr : PrimExpr The source expression. axis : IterVar The reduction IterVar axis where : optional, Expr Filtering predicate of the reduction. Returns ------- value : PrimExpr The result value. Example ------- .. code-block:: python m = te.var("m") n = te.var("n") A = te.placeholder((m, n), name="A") k = te.reduce_axis((0, n), name="k") # there are two way to use this {0} reducer: # mode 1, accept (expr, axis, where) to produce an Reduce Expr # tvm.{0} represents tvm.te.{0} or tvm.tir.{0}. B = te.compute((m,), lambda i: tvm.{0}(A[i, k], axis=k), name="B") # mode 2, simply use it with multiple Exprs: {0}_res = tvm.{0}(m, n) """ reducer.__doc__ = doc_str.format(name) return reducer # pylint: disable=unnecessary-lambda sum = comm_reducer(lambda x, y: x + y, lambda t: const(0, dtype=t), name="sum") min = comm_reducer(lambda x, y: _ffi_api._OpMin(x, y), max_value, name="min") max = comm_reducer(lambda x, y: _ffi_api._OpMax(x, y), min_value, name="max")
sxjscience/tvm
python/tvm/tir/op.py
Python
apache-2.0
28,372
"""Defined search unit tests.""" # Copyright 2015 Solinea, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from types import NoneType from mock import patch from rest_framework.status import HTTP_200_OK, HTTP_401_UNAUTHORIZED from rest_framework.test import APIRequestFactory, force_authenticate from goldstone.core.models import SavedSearch from goldstone.core.views import SavedSearchViewSet from goldstone.test_utils import Setup, create_and_login, \ AUTHORIZATION_PAYLOAD, CONTENT_BAD_TOKEN, CONTENT_NO_CREDENTIALS, \ BAD_TOKEN, BAD_UUID, TEST_USER_1, PAGE_SIZE SEARCH_URL = "/core/saved_search/" SEARCH_UUID_URL = SEARCH_URL + "%s/" SEARCH_UUID_RESULTS_URL = SEARCH_UUID_URL + "results/" class SearchSetup(Setup): """A base test class that simulates the installation fixtures for the saved_search tests. """ # load the system saved searches fixtures = ['core_initial_data.yaml'] class PermissionsTest(SearchSetup): """Test all API permissions.""" def test_not_logged_in(self): """We're not logged in.""" for method in (self.client.get, self.client.post): response = method(SEARCH_URL) self.assertContains(response, CONTENT_NO_CREDENTIALS, status_code=HTTP_401_UNAUTHORIZED) for method in (self.client.get, self.client.put, self.client.patch, self.client.delete): for url in [SEARCH_UUID_URL, SEARCH_UUID_RESULTS_URL]: response = method(url % BAD_UUID) self.assertContains(response, CONTENT_NO_CREDENTIALS, status_code=HTTP_401_UNAUTHORIZED) def test_bad_token(self): """We're logged in but present a bogus token.""" create_and_login() for method in (self.client.get, self.client.post): response = \ method(SEARCH_URL, HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % BAD_TOKEN) self.assertContains(response, CONTENT_BAD_TOKEN, status_code=HTTP_401_UNAUTHORIZED) for method in (self.client.get, self.client.put, self.client.patch, self.client.delete): for url in [SEARCH_UUID_URL, SEARCH_UUID_RESULTS_URL]: response = \ method(url % BAD_UUID, HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % BAD_TOKEN) self.assertContains(response, CONTENT_BAD_TOKEN, status_code=HTTP_401_UNAUTHORIZED) def test_normal(self): """We're logged in as a normal user. This test should pass. Because of how DRF works, we only need to test one call (GET) to verify that everything is hooked up correctly. """ # Create a normal user and get the authorization token. token = create_and_login() response = \ self.client.get(SEARCH_URL, HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token) # pylint: disable=E1101 self.assertEqual(response.status_code, HTTP_200_OK) def test_get_tenant_admin(self): """We're logged in as a tenant admin. This test should pass. Because of how DRF works, we only need to test one call (GET) to verify that everything is hooked up correctly. """ from django.contrib.auth import get_user_model # Create a normal user and get the authorization token. Then force the # user to be a tenant admin. token = create_and_login() user = get_user_model().objects.get(username=TEST_USER_1[0]) user.tenant_admin = True user.save() response = \ self.client.get(SEARCH_URL, HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token) # pylint: disable=E1101 self.assertEqual(response.status_code, HTTP_200_OK) class GetPostTests(SearchSetup): """GET and POST to /saved_search/.""" def test_get(self): """Good GET request, one page.""" # The GET's response should equal the contents of the SavedSearch # table's initial data. We verify the result count, the next and # previous keys, and each row's keys. We don't verify the contents # of each defined search. total_rows = SavedSearch.objects.filter(hidden=False).count() if total_rows > PAGE_SIZE: expected_rows = PAGE_SIZE expected_keys = ['created', 'name', 'protected', 'query', 'updated', 'uuid', 'owner', 'index_prefix', 'doc_type', 'timestamp_field', 'last_start', 'last_end', 'target_interval'] token = create_and_login() response = self.client.get( SEARCH_URL, HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token) # pylint: disable=E1101 self.assertEqual(response.status_code, HTTP_200_OK) response_content = json.loads(response.content) self.assertEqual(total_rows, response_content["count"]) self.assertIsNone(response_content["previous"]) if total_rows > PAGE_SIZE: self.assertEqual(len(response_content['results']), PAGE_SIZE) self.assertIsNotNone(response_content["next"]) else: self.assertEqual(len(response_content['results']), response_content["count"]) self.assertIsNone(response_content["next"]) for entry in response_content["results"]: for key in expected_keys: self.assertIn(key, entry) def test_get_pages(self): """Good GET request using pages.""" # We'll ask for the last page of single-entry pages. total_rows = SavedSearch.objects.filter(hidden=False).count() expected_prev = total_rows - 1 token = create_and_login() response = self.client.get( SEARCH_URL + "?page_size=1&page=%d" % total_rows, HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token) # pylint: disable=E1101 self.assertEqual(response.status_code, HTTP_200_OK) response_content = json.loads(response.content) self.assertEqual(total_rows, response_content["count"]) self.assertIsNone(response_content["next"]) self.assertEqual( response_content["previous"], 'http://testserver/core/saved_search/' '?page=%d&page_size=1' % expected_prev) self.assertEqual(1, len(response_content["results"])) def test_get_uuid(self): """Good GET request for one search.""" # Select one row from the pre-defined searches. row = SavedSearch.objects.filter(hidden=False)[0] token = create_and_login() factory = APIRequestFactory() view = SavedSearchViewSet.as_view({'get': 'retrieve'}) request = factory.get(SEARCH_URL, HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token) response = view(request, uuid=row.uuid) response.render() # pylint: disable=E1101 self.assertEqual(response.status_code, HTTP_200_OK) response_content = json.loads(response.content) for key in ['created', 'updated', 'last_start', 'last_end']: self.assertIsInstance(response_content[key], (basestring, NoneType)) self.assertEqual(row.name, response_content["name"]) self.assertEqual(row.protected, response_content["protected"]) self.assertEqual(row.query, response_content["query"]) self.assertEqual(row.uuid, response_content["uuid"]) self.assertEqual(row.owner, response_content["owner"]) self.assertEqual(row.index_prefix, response_content["index_prefix"]) self.assertEqual(row.doc_type, response_content["doc_type"]) self.assertEqual(row.timestamp_field, response_content["timestamp_field"]) self.assertEqual(row.target_interval, response_content["target_interval"])
slashk/goldstone-server
goldstone/core/tests_saved_search_view.py
Python
apache-2.0
8,909
# Copyright (c) 2013-2016 Molly White # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software # and associated documentation files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or # substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING # BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import logging import message from plugins.util import admin, command, humanize_list from queue import Empty @command("admincommandlist") def admincommands(m): """Provide a list of admin-only commands.""" #- !admincommands #- #- ```irc #- < GorillaWarfare> !admincommands #- < GorillaBot> My available admin commands are join, part, quit, setcommand, #- and unset. See http://molly.github.io/GorillaBot for documentation. #- ``` #- #- Say the available admin-only commands. This does not display command aliases. commands = [key for key in m.bot.admin_commands.keys() if not m.bot.admin_commands[key][1]] commands.sort() if len(commands) == 0: m.bot.private_message(m.location, "I have no available admin commands. See " "http://molly.github.io/GorillaBot for documentation.") elif len(commands) == 1: m.bot.private_message(m.location, "My available admin command is {0}. See " "http://molly.github.io/GorillaBot for " "documentation.".format(commands[0])) else: m.bot.private_message(m.location, "My available admin commands are {0}. See " "http://molly.github.io/GorillaBot for " "documentation.".format( humanize_list(commands))) @command("admins", "botops", "oplist") def adminlist(m): """Provide a list of current bot admins.""" #- !adminlist #- #- ```irc #- < GorillaWarfare> !adminlist #- < GorillaBot> My bot admin is GorillaWarfare. #- ``` #- #- Say the current bot operators. ops = list(m.bot.configuration["botops"].keys()) if ops: if len(ops) == 1: m.bot.private_message(m.location, "My bot admin is " + ops[0] + ".") else: m.bot.private_message(m.location, "My bot admins are " + humanize_list(ops)) else: nick = m.bot.configuration["nick"] m.bot.private_message(m.location, "{0} has no master. {0} is a free bot.".format(nick)) @command("pingall", "highlightall") def attention(m): """Ping everyone currently joined to the channel. Be careful to only turn this on if you trust those in the channel not to abuse it.""" #- !attention #- #- ```irc #- < GorillaWarfare> !attention #- < GorillaBot> user1, user2, user3: GorillaWarfare wants your attention #- ``` #- #- Ping all of the users in the channel. #- #- #### Settings #- `on` - Anyone can use this command. Be sure you trust everyone in the channel not to abuse #- it. #- `admin` - Only bot admins can use this command. logger = logging.getLogger("GorillaBot") attention_setting = m.bot.get_setting('attention', m.location) if attention_setting == 'admin': if not m.bot.is_admin(m.sender): m.bot.private_message(m.location, "Please ask a bot operator to perform this action for" " you.") return elif attention_setting != 'on': m.bot.private_message(m.location, "Command not enabled.") return # Okay, we're authorized to do this. m.bot.response_lock.acquire() ignored_messages = [] m.bot.send("NAMES {}".format(m.location)) while True: try: msg = m.bot.message_q.get(True, 120) except Empty: logger.error("No response from server when trying to get nicks. Shutting down.") m.bot.shutdown.set() return if isinstance(msg, message.Numeric): if msg.number == '353': nicks = msg.body.split() nicks = nicks[2:] nicks[0] = nicks[0][1:] sender = m.bot.parse_hostmask(m.sender)["nick"] try: nicks.remove(sender) nicks.remove(m.bot.configuration["nick"]) except ValueError: pass m.bot.private_message(m.location, "{0}: {1} wants your attention" .format(", ".join(nicks), sender)) break ignored_messages.append(msg) for msg in ignored_messages: m.bot.message_q.put(msg) m.bot.response_lock.release() @command("commandlist", "help") def commands(m): """Provide a list of commands available to all users.""" #- !commands #- #- ```irc #- < GorillaWarfare> !commands #- < GorillaBot> My available commands are admincommands, adminlist, commands, hug, #- link, spotify, and xkcd. See http://molly.github.io/GorillaBot #- for documentation. #- ``` #- #- Say the available all-user commands. This does not display command aliases. commands = [key for key in m.bot.commands.keys() if not m.bot.commands[key][1]] commands.sort() if len(commands) == 0: m.bot.private_message(m.location, "I have no available commands. See " "http://molly.github.io/GorillaBot for documentation.") elif len(commands) == 1: m.bot.private_message(m.location, "My available command is {0}. See " "http://molly.github.io/GorillaBot for " "documentation.".format(commands[0])) else: m.bot.private_message(m.location, "My available commands are {0}. See " "http://molly.github.io/GorillaBot for " "documentation.".format( humanize_list(commands)))
quanticle/GorillaBot
gorillabot/plugins/info.py
Python
mit
6,878
# -*- coding: utf-8 - # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from __future__ import with_statement import os import sys from datetime import datetime # workaround on osx, disable kqueue if sys.platform == "darwin": os.environ['EVENT_NOKQUEUE'] = "1" try: import gevent except ImportError: raise RuntimeError("You need gevent installed to use this worker.") from gevent.pool import Pool from gevent.server import StreamServer from gevent import pywsgi import gunicorn from gunicorn.workers.async import AsyncWorker VERSION = "gevent/%s gunicorn/%s" % (gevent.__version__, gunicorn.__version__) BASE_WSGI_ENV = { 'GATEWAY_INTERFACE': 'CGI/1.1', 'SERVER_SOFTWARE': VERSION, 'SCRIPT_NAME': '', 'wsgi.version': (1, 0), 'wsgi.multithread': False, 'wsgi.multiprocess': False, 'wsgi.run_once': False } class GeventWorker(AsyncWorker): server_class = None wsgi_handler = None @classmethod def setup(cls): from gevent import monkey monkey.noisy = False monkey.patch_all() def timeout_ctx(self): return gevent.Timeout(self.cfg.keepalive, False) def run(self): self.socket.setblocking(1) pool = Pool(self.worker_connections) if self.server_class is not None: server = self.server_class( self.socket, application=self.wsgi, spawn=pool, log=self.log, handler_class=self.wsgi_handler) else: server = StreamServer(self.socket, handle=self.handle, spawn=pool) server.start() try: while self.alive: self.notify() if self.ppid != os.getppid(): self.log.info("Parent changed, shutting down: %s", self) break gevent.sleep(1.0) except KeyboardInterrupt: pass try: # Try to stop connections until timeout self.notify() server.stop(timeout=self.timeout) except: pass def handle_request(self, *args): try: super(GeventWorker, self).handle_request(*args) except gevent.GreenletExit: pass if hasattr(gevent.core, 'dns_shutdown'): def init_process(self): #gevent 0.13 and older doesn't reinitialize dns for us after forking #here's the workaround gevent.core.dns_shutdown(fail_requests=1) gevent.core.dns_init() super(GeventWorker, self).init_process() class GeventResponse(object): status = None headers = None response_length = None def __init__(self, status, headers, clength): self.status = status self.headers = headers self.response_length = clength class PyWSGIHandler(pywsgi.WSGIHandler): def log_request(self): start = datetime.fromtimestamp(self.time_start) finish = datetime.fromtimestamp(self.time_finish) response_time = finish - start resp = GeventResponse(self.status, self.response_headers, self.response_length) req_headers = [h.split(":", 1) for h in self.headers.headers] self.server.log.access(resp, req_headers, self.environ, response_time) def get_environ(self): env = super(PyWSGIHandler, self).get_environ() env['gunicorn.sock'] = self.socket env['RAW_URI'] = self.path return env class PyWSGIServer(pywsgi.WSGIServer): base_env = BASE_WSGI_ENV class GeventPyWSGIWorker(GeventWorker): "The Gevent StreamServer based workers." server_class = PyWSGIServer wsgi_handler = PyWSGIHandler
samabhi/pstHealth
venv/lib/python2.7/site-packages/gunicorn/workers/ggevent.py
Python
mit
3,735
x = [] n = int(raw_input()) for j in range(n): a = str(raw_input()) if(a=='pwd'): if(len(x)==0): print '/' else: print '/'+'/'.join(x)+'/' else: a = a[3:] b = map(str,a.split('/')) if(a[0]=='/'): x = [] for i in b: if(len(i) is not 0): if(i == '..'): x.pop(len(x)-1) else: x.append(i)
Sarthak30/Codeforces
cd_and_pwd_commands.py
Python
gpl-2.0
334
#!/usr/bin/env python from PyQt5.QtWidgets import QTextEdit, QMenu, QFileDialog, QSizePolicy import mooseutils class TerminalTextEdit(QTextEdit): """ A readonly text edit that replaces terminal codes with appropiate html codes. Also uses fixed font. """ def __init__(self, **kwds): super(TerminalTextEdit, self).__init__(**kwds) self.setStyleSheet("TerminalTextEdit { background: black; color: white; }") self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) self.setReadOnly(True) def contextMenuEvent(self, event): """ User requested a context menu. Input: event: The QEvent() """ menu = QMenu() save_action = menu.addAction("Save") clear_action = menu.addAction("Clear") action = menu.exec_(event.globalPos()) if action == save_action: self.save() elif action == clear_action: self.clear() def save(self): """ Save the contents into a file. """ fname, other = QFileDialog.getSaveFileName(self, "Choose output", "", "Output file (*.log *.txt)") if fname: try: with open(fname, "w") as f: f.write(self.toPlainText()) mooseutils.mooseMessage("Saved content to %s" % fname) except Exception as e: mooseutils.mooseError("Failed to save file: %s" % e, dialog=True) def clear(self): """ Clear the output """ self.setHtml("") if __name__ == "__main__": from PyQt5.QtWidgets import QApplication import sys qapp = QApplication(sys.argv) w = TerminalTextEdit() w.append('<span style="color:red;">foo</span>') w.show() w.setEnabled(True) sys.exit(qapp.exec_())
backmari/moose
python/peacock/Execute/TerminalTextEdit.py
Python
lgpl-2.1
1,846
#! /usr/bin/python ## \file menu.py # \brief General Menu Class # \author Scott Barlow # \date 2009 # \version 1.0.3 # # This is a menu class written for pygame/Python. The menu is designed to work # with a program using a finite state machine (but it could also be easily # modified to have the 'buttons' return functions). The menu 'buttons' contain # a 'state' (a state could really be anything you want) and this 'state' is # what is returned when the user selects/presses the button. The program # controlling the menu can then act on this returned state as required. This # helps to write non-blocking code. # # The menu can have text buttons, image buttons (that get highlighted on all # sides to detect which is selected), or any combination of the two. # # The menu is flexible and can be dynamically changed. The 'buttons' will # auto-magically update themselves the next time they are drawn to the screen # (via the update method, which calls the draw method). The draw method should # not be called itself. 'Buttons' can be added or removed at any time. # # The menu can be positioned by the top left corner (a rectangle containing all # buttons is what gets moved). It can be changed to center the entire menu # (i.e. center that containing rectangle) on that same position coordinate. Or # the user can center the entire menu on the self.draw_surface. Note that if # the pygame screen is given to the menu, then the entire window will be # available to be drawn to. But if the user gives the menu another pygame # surface, then that surface itself will need to be blitted to the pygame # screen at some point. Furthermore, the user can align the buttons to align # on the left, tobe centerd, or to align themselves on the right. Also, they # can be aligned vertically on the top, center, or bottom. # # The user can dynamically change the colors of the font/highlights, the # padding between buttons (left/right and top/bottom), the thickness of the # highlight around image buttons, and the orientation of the menu (if the # 'buttons' will be stacked top to bottom ('vertical') or left to right # ('horizontal'). # # The best way to figure out the menu is to tinker around with it. Run the # example programs, change attributes, and play with the menu. # # # Copyright 2009 Scott Barlow # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA or see <http://www.gnu.org/licenses/>. # # # Changelog # V1.0.0 - Initial Release # V1.0.1 - Added get_current_image method # V1.0.2 - Fixed a bug in the set_font method (to update the rect of the # text buttons when the font is changed # V1.0.3 - Added self.refresh_whole_surface_on_load functionality # #------------------------------------------------------------------------------- #---[ Imports ]----------------------------------------------------------------- #------------------------------------------------------------------------------- import pygame #------------------------------------------------------------------------------- #---[ Defines ]----------------------------------------------------------------- #------------------------------------------------------------------------------- ## RGB color for Black BLACK = (0, 0, 0) ## RGB color for White WHITE = (255, 255, 255) ## RGB color for Red RED = (255, 0, 0) ## RGB color for Green GREEN = (0, 255, 0) ## RGB color for Blue BLUE = (0, 0, 255) ## This is a user event that should be sent whenever the game state is changed # (at the main game loop level) EVENT_CHANGE_STATE = pygame.USEREVENT + 1 #------------------------------------------------------------------------------- #---[ cMenu Class ]------------------------------------------------------------- #------------------------------------------------------------------------------- ## This class is used to display and control a menu # class cMenu: ## ---[ __init__ ]----------------------------------------------------------- # @param self The class itself, Python standard # @param x The x location to shift the buttons by when the # button is drawn to the surface in the update method # @param y The y location to shift the buttons by when the # button is drawn to the surface in the update method # @param h_pad Number the extra pixels to pad the buttons by (on the # left/right) # @param v_pad Number the extra pixels to pad the buttons by (on the # top/bottom). # @param orientation Should be 'vertical' or 'horizontal'. The buttons # will be put vertically or horizontally until 'number' # (the next argument) of buttons have been created at # which point it will start a new row or column # @param number The number of buttons to put vertically or # horizontally before starting a new row or column # @param background The background to use for the buttons (what will show # up behind the buttons). This is often the # surface that they will be blitted to via the update # method # @param buttonList This is a list of buttons to be added (though # more buttons can be added via another method). The # elements of the list should be tuples of 3 parts as # shown: ('text', state, image) where text is the text # that will be shown for the button, state is what will # be returned when the button is pressed (enter is # hit), and image is None if the button is just going # to display text, or else is an image itself if the # button will be displayed as an image instead of text. # # Initialize the class # def __init__(self, x, y, h_pad, v_pad, orientation, number, background, buttonList): ## menu items self.menu_items = [] # List of menu items self.font = pygame.font.Font(None, 32) # Font to use self.x = x # Top left corner (of surface) self.y = y # relative to the screen/window self.change_number = number # See description above self.orientation = orientation # See description above self.horizontal_padding = h_pad # See description above self.vertical_padding = v_pad # See description above self.selection = 0 # The currently selected button self.u_color = WHITE # Color for unselected text self.s_color = RED # Color for selected text self.image_highlight_color = BLUE # Color for the image highlights self.image_highlight_offset = 2 # Addition padding around image # buttons only for the highlight self.background = background.copy() # The unedited background image self.draw_surface = background # Surface to draw to self.centered = False # True if the menu is centered self.centeredOnScreen = False # True if the menu is centered self.update_buttons = True # True if the positions of the # buttons need to be updated self.refresh_whole_surface_on_load = False# When the menu is first # displayed (when the event # EVENT_CHANGE_STATE is given to # the update method), the entire # self.draw_surface will be # updated # This dictionary contains the alignment orientation of the buttons # related to each other. It shifts the button within the bounds of # 'max_width' and 'max_height' in the self.position_buttons() method. self.alignment = {'vertical' :'top', 'horizontal':'left'} # Now add any buttons that were sent in self.add_buttons(buttonList) ## ---[ redraw_all ]--------------------------------------------------------- def redraw_all(self): for button in self.menu_items: button['redraw'] = True ## ---[ get_current_text ]--------------------------------------------------- def get_current_text(self): return self.menu_items[self.selection]['text'] ## ---[ get_current_image ]-------------------------------------------------- def get_current_image(self): return self.menu_items[self.selection]['b_image'] ## ---[ set_unselected_color ]----------------------------------------------- def set_unselected_color(self, new_color): self.u_color = new_color self.update_buttons = True ## ---[ set_selected_color ]------------------------------------------------- def set_selected_color(self, new_color): self.s_color = new_color self.update_buttons = True ## ---[ set_image_highlight_color ]------------------------------------------ def set_image_highlight_color(self, new_color): self.image_highlight_color = new_color self.update_buttons = True ## ---[ set_image_highlight_thickness ]-------------------------------------- def set_image_highlight_thickness(self, new_thick): old_th = self.image_highlight_offset # We need to update the width of the button images now (the images # themselves will be updated before the next refresh/re-draw). Note that # we only change the rect on the image buttons since we only highlight the # image buttons (not the text buttons) for button in self.menu_items: if button['b_image'] != None: button['rect'][2] = button['rect'][2] - 2 * old_th + 2 * new_thick button['rect'][3] = button['rect'][3] - 2 * old_th + 2 * new_thick self.image_highlight_offset = new_thick self.update_buttons = True ## ---[ set_padding ]-------------------------------------------------------- def set_padding(self, h_pad, v_pad): self.horizontal_padding = h_pad self.vertical_padding = v_pad self.update_buttons = True ## ---[ set_orientation ]---------------------------------------------------- def set_orientation(self, new_orientation): if new_orientation == 'vertical' or new_orientation == 'horizontal': self.orientation = new_orientation self.update_buttons = True else: print 'WARNING: cMenu.set_orientation: Invalid argument '\ 'new_orientation (value: %d)' % new_orientation ## ---[ set_change_number ]-------------------------------------------------- def set_change_number(self, new_change_number): self.change_number = new_change_number self.update_buttons = True ## ---[ set_refresh_whole_screen_on_load ]----------------------------------- def set_refresh_whole_surface_on_load(self, new_val = True): self.refresh_whole_surface_on_load = new_val # Should be True or False ## ---[ set_font ]----------------------------------------------------------- def set_font(self, font): self.font = font # We need to update the width and height of the text buttons since we # calculated their width and height based on the font for button in self.menu_items: if button['b_image'] == None: width, height = self.font.size(button['text']) button['rect'][2] = width button['rect'][3] = height self.update_buttons = True ## ---[ set_alignment ]------------------------------------------------------ # @param self The class itself, Python standard # @param v_align The way to align the text vertically within its 'cell' # @param h_align The way to align the text horizontally within its 'cell' # # This method sets the alignment of the buttons within their 'cell' (i.e. it # sets the alignment of the button (based on it's width and height) within # the max_width and max_height values calculated in the # self.position_buttons() method). The self.position_buttons() method is # also where the alignment occurs. The valid alignments are: # left # center # right # def set_alignment(self, v_align, h_align): if v_align in ['top', 'center', 'bottom']: self.alignment['vertical'] = v_align if h_align in ['left', 'center', 'right']: self.alignment['horizontal'] = h_align self.update_buttons = True ## ---[ set_position ]------------------------------------------------------- # @param self The class itself, Python standard # @param x The x (horizontal location) # @param y The y (vertical location) # # This method sets the x and y locations for the menu. By default, this # sets the position of the menu with respect to the top left corner of the # self.draw_surface. If 'centered' is true, then this is the location of # the center of the menu. # def set_position(self, x, y): self.x = x self.y = y self.update_buttons = True ## ---[ set_center ]--------------------------------------------------------- # @param self The class itself, Python standard # @param centered A boolean, centers the menu if it is True, default # value is True # @param centeredOnScreen If this is true, then the menu will be centered # on the entire self.draw_surface surface. # # When passed a value of True, this centers the menu at the self.x and # self.y locations. If False is passed to it, then this makes the top left # corner of the menu start at the x and y location with respect to the # self.draw_surface. If centerScreen is True, then self.centered is set to # true, regardless of the value passed in # def set_center(self, centered, centeredOnScreen): if centeredOnScreen: self.centeredOnScreen = centeredOnScreen self.centered = False else: self.centeredOnScreen = False self.centered = centered self.update_buttons = True ## ---[ add_buttons ]-------------------------------------------------------- # @param self The class itself, Python standard # @param buttonList List of menu buttons to be added # # Used to add button(s) to the menu # def add_buttons(self, buttonList): for button in buttonList: self.menu_items.append(self.create_button(button)) self.update_buttons = True ## ---[ remove_buttons ]----------------------------------------------------- # @param self The class itself, Python standard # @param indexList List of indexes to be removed # # Used to remove button(s) from the menu # def remove_buttons(self, indexList): old_contained_rect = self.contained_rect for index in indexList: if len(self.menu_items) > 1: self.menu_items.pop(index) self.update_buttons = True return old_contained_rect ## ---[ update_button_locations ]-------------------------------------------- # @param self The class itself, Python standard # # This method is just used to update the location of the buttons when the # a change is made # def update_button_locations(self): self.position_buttons() self.set_button_images() self.update_buttons = False ## ---[ create_button ]------------------------------------------------------ # @param self The class itself, Python standard # @param button_info A list with the button text, the next state to # return, and the image (if applicable) # # Create the button dictionary for a new button. Note that this button is # useless until the set_button_images method is called which is where the # button images are created and assigned. The reason it is not done here # is becuase we need to know the location of the button on the background # which is not assigned until position_buttons() is called. Since position # buttons depends on the width and height of each button, we just calculate # those here, then we set the location of the buttons via the # position_buttons() method, then we make the actual images via the # set_button_images() function # def create_button(self, button_info): # If this button is not an image, set the width and height based on the # text if button_info[2] == None: width, height = self.font.size(button_info[0]) button_rect = pygame.Rect((0, 0), (width, height)) # Else this button is a graphic button, so create the width and height # based on the image provided else: width, height = button_info[2].get_size() offset = (self.image_highlight_offset, self.image_highlight_offset) new_width = width + 2 * offset[0] # Make room for the highlight on new_height = height + 2 * offset[1] # all sides button_rect = pygame.Rect((0, 0), (new_width, new_height)) set_redraw = True # When the button is created, it needs to be drawn set_selected = False # When the button is created, it is not selected new_button = {'text' : button_info[0], 'state' : button_info[1], 'selected': set_selected, 'rect' : button_rect, 'offset' : (0, 0), 'redraw' : set_redraw, 'b_image' : button_info[2], # base image 's_image' : None, # image when selected and not 'u_image' : None} # selected (created in # set_button_images) return new_button ## ---[ set_button_images ]-------------------------------------------------- # @param self The class itself, Python standard # # Create the button images to be displayed - adjusted for the location of # the button over the background image # def set_button_images(self): for button in self.menu_items: # If this button is not an image, create the selected and unselected # images based on the text if button['b_image'] == None: r = self.font.render width = button['rect'][2] height = button['rect'][3] rect = pygame.Rect(button['offset'], (width, height)) # For each of the text button (selected and unselected), create a # surface of the required size (already calculated before), blit # the background image to the surface, then render the text and blit # that text onto the same surface. selected_image = pygame.Surface((width, height), -1) selected_image.blit(self.background, (0, 0), rect) text_image = r(button['text'], True, self.s_color) selected_image.blit(text_image, (0, 0)) unselected_image = pygame.Surface((width, height), -1) unselected_image.blit(self.background, (0, 0), rect) text_image = r(button['text'], True, self.u_color) unselected_image.blit(text_image, (0, 0)) # Else this button is a graphic button, so create the selected and # unselected images based on the image provided else: orig_width, orig_height = button['b_image'].get_size() new_width = button['rect'][2] new_height = button['rect'][3] offset = (self.image_highlight_offset, self.image_highlight_offset) # Selected image! -------------------------------------------------- # Create the surface, fill the surface with the highlight color, # then blit the background image to the surface (inside of the # highlight area), and then blit the actual button base image over # the background selected_image = pygame.Surface((new_width, new_height), -1) selected_image.fill(self.image_highlight_color) rect = pygame.Rect((button['offset'][0] + offset[0], button['offset'][1] + offset[1]), (orig_width, orig_height)) selected_image.blit(self.background, offset, rect) selected_image.blit(button['b_image'], offset) # Unselected image! ------------------------------------------------ # Create the surface, blit the background image onto the surface (to # make sure effects go away when the button is no longer selected), # and then blit the actual button base image over the background unselected_image = pygame.Surface((new_width, new_height), -1) rect = pygame.Rect(button['offset'], (new_width, new_height)) unselected_image.blit(self.background, (0, 0), rect) unselected_image.blit(button['b_image'], offset) button['s_image'] = selected_image button['u_image'] = unselected_image ## ---[ position_buttons ]--------------------------------------------------- # @param self The class itself, Python standard # # Sets the positions for the buttons # def position_buttons(self): width = 0 height = 0 max_width = 0 max_height = 0 counter = 0 x_loc = self.x y_loc = self.y # Get the maximum width and height of the surfaces for button in self.menu_items: width = button['rect'][2] height = button['rect'][3] max_width = max(width, max_width) max_height = max(height, max_height) # Position the button in relation to each other for button in self.menu_items: # Find the offsets for the alignment of the buttons (left, center, or # right # Vertical Alignment if self.alignment['vertical'] == 'top': offset_height = 0 elif self.alignment['vertical'] == 'center': offset_height = (max_height - button['rect'][3])/2 elif self.alignment['vertical'] == 'bottom': offset_height = (max_height - button['rect'][3]) else: offset_height = 0 print 'WARNING: cMenu.position_buttons: Vertical Alignment '\ '(value: %s) not recognized! Left alignment will be used'\ % self.alignment['vertical'] # Horizontal Alignment if self.alignment['horizontal'] == 'left': offset_width = 0 elif self.alignment['horizontal'] == 'center': offset_width = (max_width - button['rect'][2])/2 elif self.alignment['horizontal'] == 'right': offset_width = (max_width - button['rect'][2]) else: offset_width = 0 print 'WARNING: cMenu.position_buttons: Horizontal Alignment '\ '(value: %s) not recognized! Left alignment will be used'\ % self.alignment['horizontal'] # Move the button location slightly based on the alignment offsets x_loc += offset_width y_loc += offset_height # Assign the location of the button button['offset'] = (x_loc, y_loc) # Take the alignment offsets away after the button position has been # assigned so that the new button can start fresh again x_loc -= offset_width y_loc -= offset_height # Add the width/height to the position based on the orientation of the # menu. Add in the padding. if self.orientation == 'vertical': y_loc += max_height + self.vertical_padding else: x_loc += max_width + self.horizontal_padding counter += 1 # If we have reached the self.change_number of buttons, then it is time # to start a new row or column if counter == self.change_number: counter = 0 if self.orientation == 'vertical': x_loc += max_width + self.horizontal_padding y_loc = self.y else: y_loc += max_height + self.vertical_padding x_loc = self.x # Find the smallest Rect that will contain all of the buttons self.contained_rect = pygame.rect.Rect(self.x,self.y,0,0) for button in self.menu_items: temp_rect = button['rect'].move(button['offset']) self.contained_rect.union_ip(temp_rect) # We shift the buttons around on the screen if they are supposed to be # centered (on the surface itself or at (x, y). We do it here instead of # at the beginning of this function becuase we need to know what the # self.contained_rect is to know the correct amount to shift them. if self.centeredOnScreen: shift_x = self.x - (self.draw_surface.get_rect()[2]/2)+(self.contained_rect.width/2) shift_y = self.y - (self.draw_surface.get_rect()[3]/2)+(self.contained_rect.height/2) elif self.centered: shift_x = (self.contained_rect[2]) / 2 shift_y = (self.contained_rect[3]) / 2 if self.centeredOnScreen or self.centered: # Move the buttons to make them centered for button in self.menu_items: button['offset'] = (button['offset'][0] - shift_x, button['offset'][1] - shift_y) # Re-find the smallest Rect that will contain all of the buttons self.contained_rect = pygame.rect.Rect(shift_x*-1,shift_y*-1,0,0) for button in self.menu_items: temp_rect = button['rect'].move(button['offset']) self.contained_rect.union_ip(temp_rect) ## ---[ update ]------------------------------------------------------------- # @param self The class itself, Python standard # @param e The last event # @param c_state The current state of the game from where this is called # @return A list of rectangles of where the screen changed # @return The new state for the game # # Update the menu surface, redraw it to the stored surface self.draw_surface # def update(self, e, c_state): redraw_full_menu = False self.selection_prev = self.selection o = self.orientation s = self.selection n = self.change_number if e.key == pygame.K_DOWN: if (o == 'vertical') and ((s + 1) % n != 0): self.selection += 1 elif o == 'horizontal': self.selection += n elif e.key == pygame.K_UP: if (o == 'vertical') and ((s) % n != 0): self.selection -= 1 elif o == 'horizontal': self.selection -= n elif e.key == pygame.K_RIGHT: if o == 'vertical': self.selection += n elif (o == 'horizontal') and ((s + 1) % n != 0): self.selection += 1 elif e.key == pygame.K_LEFT: if o == 'vertical': self.selection -= n elif (o == 'horizontal') and ((s) % n != 0): self.selection -= 1 # elif e.key == pygame.K_r: # original_contained_rect = self.remove_buttons([s]) # if self.selection -1 >= 0: # self.selection -= 1 # self.selection_prev -= 1 # redraw_full_menu = True elif e.key == pygame.K_RETURN: return [None], self.menu_items[s]['state'] if self.selection >= len(self.menu_items) or self.selection < 0: self.selection = self.selection_prev # If this is an EVENT_CHANGE_STATE, then this is the first time that we # have entered this menu, so lets set it up if e.type == EVENT_CHANGE_STATE: self.selection = 0 self.menu_items[self.selection_prev]['selected'] = False self.menu_items[self.selection]['selected'] = True self.redraw_all() rectangle_list = self.draw_buttons() if self.refresh_whole_surface_on_load: rectangle_list = pygame.Rect((0, 0), self.draw_surface.get_size()) return [rectangle_list], c_state else: return [self.contained_rect], c_state elif redraw_full_menu: self.menu_items[self.selection_prev]['selected'] = False self.menu_items[self.selection]['selected'] = True self.redraw_all() rectangle_list = self.draw_buttons(original_contained_rect) return rectangle_list, c_state elif self.selection != self.selection_prev: self.menu_items[self.selection_prev]['selected'] = False self.menu_items[self.selection]['selected'] = True rectangle_list = self.draw_buttons() return rectangle_list, c_state # If no updates were made, return defaults return [None], c_state ## ---[ draw_buttons ]------------------------------------------------------- # @param self The class itself, Python standard # @param redraw_rect If this pygame.Rect is provided, then the entire # background will be drawn to the surface in the area # of this rect before the buttons are drawn # @return A list of rectangles of where the screen changed # # Draw the buttons to the self.draw_surface and return a list of Rect's that # indicate where on the surface changes were made # def draw_buttons(self, redraw_rect = None): rect_list = [] # If buttons have been changed (added button(s), deleted button(s), # changed colors, etc, etc), then we need to update the button locations # and images if self.update_buttons: self.update_button_locations() # Print a warning if the buttons are partially/completely off the # surface if not self.draw_surface.get_rect().contains(self.contained_rect): print 'WARNING: cMenu.draw_buttons: Some buttons are partially '\ 'or completely off of the self.draw_surface!' # If a rect was provided, redraw the background surface to the area of the # rect before we draw the buttons if redraw_rect != None: offset = (redraw_rect[0], redraw_rect[1]) drawn_rect = self.draw_surface.blit(self.background, offset, redraw_rect) rect_list.append(drawn_rect) # Cycle through the buttons, only draw the ones that need to be redrawn for button in self.menu_items: if button['redraw']: if button['selected']: image = button['s_image'] else: image = button['u_image'] drawn_rect = self.draw_surface.blit(image, button['offset'], button['rect']) rect_list.append(drawn_rect) return rect_list #---[ END OF FILE ]-------------------------------------------------------------
dhatch/PyRunner
menu.py
Python
gpl-2.0
32,636
import pytest from model.knowledge_representation import Competency, Fact, Curriculum, get_available_facts __author__ = 'e.kolpakov' class TestCurriculum: @pytest.fixture def curriculum(self): return Curriculum() def test_empty_curriculum_all_lookups_return_none(self, curriculum): assert curriculum.find_competency("qwe") is None assert curriculum.find_fact("qwe") is None def test_register_competency_can_lookup_by_code(self, curriculum): comp1 = Competency('qwe', []) comp2 = Competency('zxc', []) curriculum.register_competency(comp1) curriculum.register_competency(comp2) assert curriculum.find_competency('qwe') == comp1 assert curriculum.find_competency('zxc') == comp2 def test_register_fact_can_lookup_fact_by_code(self, curriculum): for fact in [Fact("A"), Fact("B"), Fact("C"), Fact("D")]: curriculum.register_fact(fact) assert curriculum.find_fact("A") == Fact("A") assert curriculum.find_fact("B") == Fact("B") assert curriculum.find_fact("C") == Fact("C") assert curriculum.find_fact("D") == Fact("D") assert curriculum.find_fact("Z") is None class TestGetAvailableFacts: def test_get_available_facts_empty_facts_returns_empty(self): facts = set() known_facts = frozenset() result = get_available_facts(facts, known_facts) assert result == set() def test_get_available_facts_no_dependencies_returns_facts_as_is(self): facts = {Fact('A'), Fact('B')} known_facts = frozenset() result = get_available_facts(facts, known_facts) assert result == facts def test_get_available_facts_factc_depends_on_missing_fact(self): facts = {Fact('A'), Fact('C', ['B'])} result = get_available_facts(facts, frozenset()) assert result == {Fact('A')} def test_get_available_facts_factb_depends_on_existing_competency(self): facts = {Fact('B', ['A'])} result = get_available_facts(facts, frozenset([Fact('A')])) assert result == facts def test_get_available_facts_factb_depends_on_fact_in_same_set(self): facts = {Fact('A'), Fact('B', ['A'])} result = get_available_facts(facts, frozenset()) assert result == facts def test_get_available_facts_dependency_chain_on_same_set(self): facts = {Fact('A'), Fact('B', ['A']), Fact('C', ['B'])} result = get_available_facts(facts, frozenset()) assert result == facts def test_get_available_facts_removes_all_known_facts(self): facts = {Fact('A'), Fact('B'), Fact('C'), Fact('D')} known = frozenset([Fact('A'), Fact('B'), Fact('C')]) result = get_available_facts(facts, known) assert result == {Fact('D')}
e-kolpakov/study-model
tests/test_knowledge_representation.py
Python
mit
2,826
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dc_campaign_finance_data_processor.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
codefordc/dc-campaign-finance-data-processor
dc_campaign_finance_data_processor/manage.py
Python
mit
277
from django import forms class PutForm(forms.Form): body = forms.CharField(widget=forms.Textarea()) tube = forms.CharField(initial='default') priority = forms.IntegerField(initial=2147483648) delay = forms.IntegerField(initial=0) ttr = forms.IntegerField(initial=120)
andreisavu/django-jack
jack/beanstalk/forms.py
Python
apache-2.0
291
import os from glob import iglob for file in iglob('./**/*.md', recursive=True): print(file.replace('\\','/')[1:-3]) path = False pastFirstLine = False with open(file, "r") as f: try: lines = f.readlines() except: print("Error with " + file) continue with open(file, "w") as f: for line in lines: #make sure the path does not already exist if line.startswith("path:"): path = True #insert path at the end of the frontmatter if line.startswith("---"): if pastFirstLine: if not path: f.write("path: " + file.replace('\\','/')[1:-3] + '\n') else: pastFirstLine = True #write lines if not (line.startswith("layout:")): f.write(line)
FRCTeam2984/Website
src/markdown-pages/convert.py
Python
mit
912
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2003-2006 Donald N. Allingham # 2009 Gary Burton # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # #------------------------------------------------------------------------- # # internationalization # #------------------------------------------------------------------------- #------------------------------------------------------------------------- # # gramps modules # #------------------------------------------------------------------------- from gramps.gen.const import GRAMPS_LOCALE as glocale _ = glocale.translation.sgettext from ..views.treemodels import RepositoryModel from .baseselector import BaseSelector from gramps.gen.const import URL_MANUAL_SECT2 #------------------------------------------------------------------------- # # Constants # #------------------------------------------------------------------------- #------------------------------------------------------------------------- # # SelectRepository # #------------------------------------------------------------------------- class SelectRepository(BaseSelector): def _local_init(self): """ Perform local initialisation for this class """ self.setup_configs('interface.repo-sel', 600, 450) def get_window_title(self): return _("Select Repository") def get_model_class(self): return RepositoryModel def get_column_titles(self): return [ (_('Title'), 350, BaseSelector.TEXT, 0), (_('ID'), 75, BaseSelector.TEXT, 1), (_('Last Change'), 150, BaseSelector.TEXT, 14), ] def get_from_handle_func(self): return self.db.get_repository_from_handle WIKI_HELP_PAGE = URL_MANUAL_SECT2 WIKI_HELP_SEC = _('Select_Repository_selector', 'manual')
SNoiraud/gramps
gramps/gui/selectors/selectrepository.py
Python
gpl-2.0
2,510
# -*- coding: utf-8 -*- import csv import json from cStringIO import StringIO from datetime import datetime from django.conf import settings from django.core import mail from django.core.cache import cache import mock from pyquery import PyQuery as pq from olympia import amo from olympia.amo.tests import TestCase from olympia.amo.tests import formset, initial from olympia.access.models import Group, GroupUser from olympia.addons.models import Addon, CompatOverride, CompatOverrideRange from olympia.amo.urlresolvers import reverse from olympia.amo.tests.test_helpers import get_image_path from olympia.amo.utils import urlparams from olympia.applications.models import AppVersion from olympia.bandwagon.models import FeaturedCollection, MonthlyPick from olympia.compat.cron import compatibility_report from olympia.compat.models import CompatReport from olympia.constants.base import VALIDATOR_SKELETON_RESULTS from olympia.devhub.models import ActivityLog from olympia.files.models import File, FileUpload from olympia.stats.models import UpdateCount from olympia.users.models import UserProfile from olympia.users.utils import get_task_user from olympia.versions.models import ApplicationsVersions, Version from olympia.zadmin import forms, tasks from olympia.zadmin.forms import DevMailerForm from olympia.zadmin.models import ( EmailPreviewTopic, ValidationJob, ValidationResult) from olympia.zadmin.tasks import updated_versions from olympia.zadmin.views import find_files class TestSiteEvents(TestCase): fixtures = ['base/users', 'zadmin/tests/siteevents'] def setUp(self): super(TestSiteEvents, self).setUp() self.client.login(username='[email protected]', password='password') def test_get(self): url = reverse('zadmin.site_events') response = self.client.get(url) assert response.status_code == 200 events = response.context['events'] assert len(events) == 1 def test_add(self): url = reverse('zadmin.site_events') new_event = { 'event_type': 2, 'start': '2012-01-01', 'description': 'foo', } response = self.client.post(url, new_event, follow=True) assert response.status_code == 200 events = response.context['events'] assert len(events) == 2 def test_edit(self): url = reverse('zadmin.site_events', args=[1]) modified_event = { 'event_type': 2, 'start': '2012-01-01', 'description': 'bar', } response = self.client.post(url, modified_event, follow=True) assert response.status_code == 200 events = response.context['events'] assert events[0].description == 'bar' def test_delete(self): url = reverse('zadmin.site_events.delete', args=[1]) response = self.client.get(url, follow=True) assert response.status_code == 200 events = response.context['events'] assert len(events) == 0 class BulkValidationTest(TestCase): fixtures = ['base/addon_3615', 'base/appversion', 'base/users'] def setUp(self): super(BulkValidationTest, self).setUp() assert self.client.login(username='[email protected]', password='password') self.addon = Addon.objects.get(pk=3615) self.creator = UserProfile.objects.get(username='editor') self.version = self.addon.get_version() ApplicationsVersions.objects.filter( application=amo.FIREFOX.id, version=self.version).update( max=AppVersion.objects.get(application=1, version='3.7a1pre')) self.application_version = self.version.apps.all()[0] self.application = self.application_version.application self.min = self.application_version.min self.max = self.application_version.max self.curr_max = self.appversion('3.7a1pre') self.counter = 0 self.old_task_user = settings.TASK_USER_ID settings.TASK_USER_ID = self.creator.id def tearDown(self): settings.TASK_USER_ID = self.old_task_user super(BulkValidationTest, self).tearDown() def appversion(self, version, application=amo.FIREFOX.id): return AppVersion.objects.get(application=application, version=version) def create_job(self, **kwargs): kw = dict(application=amo.FIREFOX.id, curr_max_version=kwargs.pop('current', self.curr_max), target_version=kwargs.pop('target', self.appversion('3.7a3')), creator=self.creator) kw.update(kwargs) return ValidationJob.objects.create(**kw) def create_file(self, version=None, platform=amo.PLATFORM_ALL.id): if not version: version = self.version return File.objects.create(version=version, filename='file-%s' % self.counter, platform=platform, status=amo.STATUS_PUBLIC) def create_result(self, job, f, **kwargs): self.counter += 1 kw = dict(file=f, validation='{}', errors=0, warnings=0, notices=0, validation_job=job, task_error=None, valid=0, completed=datetime.now()) kw.update(kwargs) return ValidationResult.objects.create(**kw) def start_validation(self, new_max='3.7a3'): self.new_max = self.appversion(new_max) r = self.client.post(reverse('zadmin.start_validation'), {'application': amo.FIREFOX.id, 'curr_max_version': self.curr_max.id, 'target_version': self.new_max.id, 'finish_email': '[email protected]'}, follow=True) assert r.status_code == 200 class TestBulkValidation(BulkValidationTest): @mock.patch('olympia.zadmin.tasks.bulk_validate_file') def test_start(self, bulk_validate_file): new_max = self.appversion('3.7a3') r = self.client.post(reverse('zadmin.start_validation'), {'application': amo.FIREFOX.id, 'curr_max_version': self.curr_max.id, 'target_version': new_max.id, 'finish_email': '[email protected]'}, follow=True) self.assertNoFormErrors(r) self.assert3xx(r, reverse('zadmin.validation')) job = ValidationJob.objects.get() assert job.application == amo.FIREFOX.id assert job.curr_max_version.version == self.curr_max.version assert job.target_version.version == new_max.version assert job.finish_email == '[email protected]' assert job.completed is None assert job.result_set.all().count() == len(self.version.all_files) assert bulk_validate_file.delay.called @mock.patch('olympia.zadmin.tasks.bulk_validate_file') def test_ignore_user_disabled_addons(self, bulk_validate_file): self.addon.update(disabled_by_user=True) r = self.client.post(reverse('zadmin.start_validation'), {'application': amo.FIREFOX.id, 'curr_max_version': self.curr_max.id, 'target_version': self.appversion('3.7a3').id, 'finish_email': '[email protected]'}, follow=True) self.assertNoFormErrors(r) self.assert3xx(r, reverse('zadmin.validation')) assert not bulk_validate_file.delay.called @mock.patch('olympia.zadmin.tasks.bulk_validate_file') def test_ignore_non_public_addons(self, bulk_validate_file): target_ver = self.appversion('3.7a3').id for status in (amo.STATUS_DISABLED, amo.STATUS_NULL, amo.STATUS_DELETED): self.addon.update(status=status) r = self.client.post(reverse('zadmin.start_validation'), {'application': amo.FIREFOX.id, 'curr_max_version': self.curr_max.id, 'target_version': target_ver, 'finish_email': '[email protected]'}, follow=True) self.assertNoFormErrors(r) self.assert3xx(r, reverse('zadmin.validation')) assert not bulk_validate_file.delay.called, ( 'Addon with status %s should be ignored' % status) @mock.patch('olympia.zadmin.tasks.bulk_validate_file') def test_ignore_lang_packs(self, bulk_validate_file): target_ver = self.appversion('3.7a3').id self.addon.update(type=amo.ADDON_LPAPP) r = self.client.post(reverse('zadmin.start_validation'), {'application': amo.FIREFOX.id, 'curr_max_version': self.curr_max.id, 'target_version': target_ver, 'finish_email': '[email protected]'}, follow=True) self.assertNoFormErrors(r) self.assert3xx(r, reverse('zadmin.validation')) assert not bulk_validate_file.delay.called, ( 'Lang pack addons should be ignored') @mock.patch('olympia.zadmin.tasks.bulk_validate_file') def test_ignore_themes(self, bulk_validate_file): target_ver = self.appversion('3.7a3').id self.addon.update(type=amo.ADDON_THEME) self.client.post(reverse('zadmin.start_validation'), {'application': amo.FIREFOX.id, 'curr_max_version': self.curr_max.id, 'target_version': target_ver, 'finish_email': '[email protected]'}) assert not bulk_validate_file.delay.called, ( 'Theme addons should be ignored') @mock.patch('olympia.zadmin.tasks.bulk_validate_file') def test_validate_all_non_disabled_addons(self, bulk_validate_file): target_ver = self.appversion('3.7a3').id bulk_validate_file.delay.called = False self.addon.update(status=amo.STATUS_PUBLIC) r = self.client.post(reverse('zadmin.start_validation'), {'application': amo.FIREFOX.id, 'curr_max_version': self.curr_max.id, 'target_version': target_ver, 'finish_email': '[email protected]'}, follow=True) self.assertNoFormErrors(r) self.assert3xx(r, reverse('zadmin.validation')) assert bulk_validate_file.delay.called, ( 'Addon with status %s should be validated' % self.addon.status) def test_grid(self): job = self.create_job() for res in (dict(errors=0), dict(errors=1)): self.create_result(job, self.create_file(), **res) r = self.client.get(reverse('zadmin.validation')) assert r.status_code == 200 doc = pq(r.content) assert doc('table tr td').eq(0).text() == str(job.pk) # ID assert doc('table tr td').eq(3).text() == 'Firefox' # Application assert doc('table tr td').eq(4).text() == self.curr_max.version assert doc('table tr td').eq(5).text() == '3.7a3' assert doc('table tr td').eq(6).text() == '2' # tested assert doc('table tr td').eq(7).text() == '1' # failing assert doc('table tr td').eq(8).text()[0] == '1' # passing assert doc('table tr td').eq(9).text() == '0' # exceptions def test_application_versions_json(self): r = self.client.post(reverse('zadmin.application_versions_json'), {'application': amo.FIREFOX.id}) assert r.status_code == 200 data = json.loads(r.content) empty = True for id, ver in data['choices']: empty = False assert AppVersion.objects.get(pk=id).version == ver assert not empty, "Unexpected: %r" % data def test_job_status(self): job = self.create_job() def get_data(): self.create_result(job, self.create_file(), **{}) r = self.client.post(reverse('zadmin.job_status'), {'job_ids': json.dumps([job.pk])}) assert r.status_code == 200 data = json.loads(r.content)[str(job.pk)] return data data = get_data() assert data['completed'] == 1 assert data['total'] == 1 assert data['percent_complete'] == '100' assert data['job_id'] == job.pk assert data['completed_timestamp'] == '' job.update(completed=datetime.now()) data = get_data() assert data['completed_timestamp'] != '', ( 'Unexpected: %s' % data['completed_timestamp']) class TestBulkUpdate(BulkValidationTest): def setUp(self): super(TestBulkUpdate, self).setUp() self.job = self.create_job(completed=datetime.now()) self.update_url = reverse('zadmin.notify', args=[self.job.pk]) self.list_url = reverse('zadmin.validation') self.data = {'text': '{{ APPLICATION }} {{ VERSION }}', 'subject': '..'} self.version_one = Version.objects.create(addon=self.addon) self.version_two = Version.objects.create(addon=self.addon) appver = AppVersion.objects.get(application=1, version='3.7a1pre') for v in self.version_one, self.version_two: ApplicationsVersions.objects.create( application=amo.FIREFOX.id, version=v, min=appver, max=appver) def test_no_update_link(self): self.create_result(self.job, self.create_file(), **{}) r = self.client.get(self.list_url) doc = pq(r.content) assert doc('table tr td a.set-max-version').text() == ( 'Notify and set max versions') def test_update_link(self): self.create_result(self.job, self.create_file(), **{'valid': 1}) r = self.client.get(self.list_url) doc = pq(r.content) assert doc('table tr td a.set-max-version').text() == ( 'Notify and set max versions') def test_update_url(self): self.create_result(self.job, self.create_file(), **{'valid': 1}) r = self.client.get(self.list_url) doc = pq(r.content) assert doc('table tr td a.set-max-version').attr('data-job-url') == ( self.update_url) def test_update_anonymous(self): self.client.logout() r = self.client.post(self.update_url) assert r.status_code == 302 def test_version_pks(self): for version in [self.version_one, self.version_two]: for x in range(0, 3): self.create_result(self.job, self.create_file(version)) assert sorted(updated_versions(self.job)) == ( [self.version_one.pk, self.version_two.pk]) def test_update_passing_only(self): self.create_result(self.job, self.create_file(self.version_one)) self.create_result(self.job, self.create_file(self.version_two), errors=1) assert sorted(updated_versions(self.job)) == ( [self.version_one.pk]) def test_update_pks(self): self.create_result(self.job, self.create_file(self.version)) r = self.client.post(self.update_url, self.data) assert r.status_code == 302 assert self.version.apps.all()[0].max == self.job.target_version def test_update_unclean_pks(self): self.create_result(self.job, self.create_file(self.version)) self.create_result(self.job, self.create_file(self.version), errors=1) r = self.client.post(self.update_url, self.data) assert r.status_code == 302 assert self.version.apps.all()[0].max == self.job.curr_max_version def test_update_pks_logs(self): self.create_result(self.job, self.create_file(self.version)) assert ActivityLog.objects.for_addons(self.addon).count() == 0 self.client.post(self.update_url, self.data) upd = amo.LOG.MAX_APPVERSION_UPDATED.id logs = ActivityLog.objects.for_addons(self.addon).filter(action=upd) assert logs.count() == 1 assert logs[0].user == get_task_user() def test_update_wrong_version(self): self.create_result(self.job, self.create_file(self.version)) av = self.version.apps.all()[0] av.max = self.appversion('3.6') av.save() self.client.post(self.update_url, self.data) assert self.version.apps.all()[0].max == self.appversion('3.6') def test_update_all_within_range(self): self.create_result(self.job, self.create_file(self.version)) # Create an appversion in between current and target. av = self.version.apps.all()[0] av.max = self.appversion('3.7a2') av.save() self.client.post(self.update_url, self.data) assert self.version.apps.all()[0].max == self.appversion('3.7a3') def test_version_comparison(self): # regression test for bug 691984 job = self.create_job(completed=datetime.now(), current=self.appversion('3.0.9'), target=self.appversion('3.5')) # .* was not sorting right self.version.apps.all().update(max=self.appversion('3.0.*')) self.create_result(job, self.create_file(self.version)) self.client.post(reverse('zadmin.notify', args=[job.pk]), self.data) assert self.version.apps.all()[0].max == self.appversion('3.5') def test_update_different_app(self): self.create_result(self.job, self.create_file(self.version)) target = self.version.apps.all()[0] target.application = amo.FIREFOX.id target.save() assert self.version.apps.all()[0].max == self.curr_max def test_update_twice(self): self.create_result(self.job, self.create_file(self.version)) self.client.post(self.update_url, self.data) assert self.version.apps.all()[0].max == self.job.target_version now = self.version.modified self.client.post(self.update_url, self.data) assert self.version.modified == now def test_update_notify(self): self.create_result(self.job, self.create_file(self.version)) self.client.post(self.update_url, self.data) assert len(mail.outbox) == 1 def test_update_subject(self): data = self.data.copy() data['subject'] = '{{ PASSING_ADDONS.0.name }}' f = self.create_file(self.version) self.create_result(self.job, f) self.client.post(self.update_url, data) assert mail.outbox[0].subject == ( '%s' % self.addon.name) @mock.patch('olympia.zadmin.tasks.log') def test_bulk_email_logs_stats(self, log): log.info = mock.Mock() self.create_result(self.job, self.create_file(self.version)) self.client.post(self.update_url, self.data) assert log.info.call_args_list[-8][0][0] == ( '[1@None] bulk update stats for job %s: ' '{bumped: 1, is_dry_run: 0, processed: 1}' % self.job.pk) assert log.info.call_args_list[-2][0][0] == ( '[1@None] bulk email stats for job %s: ' '{author_emailed: 1, is_dry_run: 0, processed: 1}' % self.job.pk) def test_application_version(self): self.create_result(self.job, self.create_file(self.version)) self.client.post(self.update_url, self.data) assert mail.outbox[0].body == 'Firefox 3.7a3' def test_multiple_result_links(self): # Creates validation results for two files of the same addon: results = [ self.create_result(self.job, self.create_file(self.version)), self.create_result(self.job, self.create_file(self.version))] self.client.post(self.update_url, {'text': '{{ PASSING_ADDONS.0.links }}', 'subject': '..'}) body = mail.outbox[0].body assert all((reverse('devhub.bulk_compat_result', args=(self.addon.slug, result.pk)) in body) for result in results) def test_notify_mail_preview(self): self.create_result(self.job, self.create_file(self.version)) self.client.post(self.update_url, {'text': 'the message', 'subject': 'the subject', 'preview_only': 'on'}) assert len(mail.outbox) == 0 rs = self.job.get_notify_preview_emails() assert [e.subject for e in rs] == ['the subject'] # version should not be bumped since it's in preview mode: assert self.version.apps.all()[0].max == self.max upd = amo.LOG.MAX_APPVERSION_UPDATED.id logs = ActivityLog.objects.for_addons(self.addon).filter(action=upd) assert logs.count() == 0 class TestBulkNotify(BulkValidationTest): def setUp(self): super(TestBulkNotify, self).setUp() self.job = self.create_job(completed=datetime.now()) self.update_url = reverse('zadmin.notify', args=[self.job.pk]) self.syntax_url = reverse('zadmin.notify.syntax') self.list_url = reverse('zadmin.validation') self.version_one = Version.objects.create(addon=self.addon) self.version_two = Version.objects.create(addon=self.addon) def test_no_notify_link(self): self.create_result(self.job, self.create_file(), **{}) r = self.client.get(self.list_url) doc = pq(r.content) assert len(doc('table tr td a.notify')) == 0 def test_notify_link(self): self.create_result(self.job, self.create_file(), **{'errors': 1}) r = self.client.get(self.list_url) doc = pq(r.content) assert doc('table tr td a.set-max-version').text() == ( 'Notify and set max versions') def test_notify_url(self): self.create_result(self.job, self.create_file(), **{'errors': 1}) r = self.client.get(self.list_url) doc = pq(r.content) assert doc('table tr td a.set-max-version').attr('data-job-url') == ( self.update_url) def test_notify_anonymous(self): self.client.logout() r = self.client.post(self.update_url) assert r.status_code == 302 def test_notify_log(self): self.create_result(self.job, self.create_file(self.version), **{'errors': 1}) assert ActivityLog.objects.for_addons(self.addon).count() == 0 self.client.post(self.update_url, {'text': '..', 'subject': '..'}) upd = amo.LOG.BULK_VALIDATION_USER_EMAILED.id logs = (ActivityLog.objects.for_user(self.creator) .filter(action=upd)) assert logs.count() == 1 assert logs[0].user == self.creator def test_compat_bump_log(self): self.create_result(self.job, self.create_file(self.version), **{'errors': 0}) assert ActivityLog.objects.for_addons(self.addon).count() == 0 self.client.post(self.update_url, {'text': '..', 'subject': '..'}) upd = amo.LOG.MAX_APPVERSION_UPDATED.id logs = ActivityLog.objects.for_addons(self.addon).filter(action=upd) assert logs.count() == 1 assert logs[0].user == self.creator def test_notify_mail(self): self.create_result(self.job, self.create_file(self.version), **{'errors': 1}) r = self.client.post(self.update_url, {'text': '..', 'subject': '{{ FAILING_ADDONS.0.name }}'}) assert r.status_code == 302 assert len(mail.outbox) == 1 assert mail.outbox[0].body == '..' assert mail.outbox[0].subject == self.addon.name assert mail.outbox[0].to == [u'[email protected]'] def test_result_links(self): result = self.create_result(self.job, self.create_file(self.version), **{'errors': 1}) r = self.client.post(self.update_url, {'text': '{{ FAILING_ADDONS.0.links }}', 'subject': '...'}) assert r.status_code == 302 assert len(mail.outbox) == 1 res = reverse('devhub.bulk_compat_result', args=(self.addon.slug, result.pk)) email = mail.outbox[0].body assert res in email, ('Unexpected message: %s' % email) def test_notify_mail_partial(self): self.create_result(self.job, self.create_file(self.version), **{'errors': 1}) self.create_result(self.job, self.create_file(self.version)) r = self.client.post(self.update_url, {'text': '..', 'subject': '..'}) assert r.status_code == 302 assert len(mail.outbox) == 1 def test_notify_mail_multiple(self): self.create_result(self.job, self.create_file(self.version), **{'errors': 1}) self.create_result(self.job, self.create_file(self.version), **{'errors': 1}) r = self.client.post(self.update_url, {'text': '..', 'subject': '..'}) assert r.status_code == 302 assert len(mail.outbox) == 1 def test_notify_mail_preview(self): for i in range(2): self.create_result(self.job, self.create_file(self.version), **{'errors': 1}) r = self.client.post(self.update_url, {'text': 'the message', 'subject': 'the subject', 'preview_only': 'on'}) assert r.status_code == 302 assert len(mail.outbox) == 0 rs = self.job.get_notify_preview_emails() assert [e.subject for e in rs] == ['the subject'] def test_notify_rendering(self): self.create_result(self.job, self.create_file(self.version), **{'errors': 1}) r = self.client.post(self.update_url, {'text': '{{ FAILING_ADDONS.0.name }}' '{{ FAILING_ADDONS.0.compat_link }}', 'subject': '{{ FAILING_ADDONS.0.name }} blah'}) assert r.status_code == 302 assert len(mail.outbox) == 1 url = reverse('devhub.versions.edit', args=[self.addon.pk, self.version.pk]) assert str(self.addon.name) in mail.outbox[0].body assert url in mail.outbox[0].body assert str(self.addon.name) in mail.outbox[0].subject def test_notify_unicode(self): self.addon.name = u'འབྲུག་ཡུལ།' self.addon.save() self.create_result(self.job, self.create_file(self.version), **{'errors': 1}) r = self.client.post(self.update_url, {'text': '{{ FAILING_ADDONS.0.name }}', 'subject': '{{ FAILING_ADDONS.0.name }} blah'}) assert r.status_code == 302 assert len(mail.outbox) == 1 assert mail.outbox[0].body == self.addon.name def test_notify_template(self): for text, res in (['some sample text', True], ['{{ FAILING_ADDONS.0.name }}{% if %}', False]): assert forms.NotifyForm( {'text': text, 'subject': '...'}).is_valid() == res def test_notify_syntax(self): for text, res in (['some sample text', True], ['{{ FAILING_ADDONS.0.name }}{% if %}', False]): r = self.client.post(self.syntax_url, {'text': text, 'subject': '..'}) assert r.status_code == 200 assert json.loads(r.content)['valid'] == res class TestBulkValidationTask(BulkValidationTest): def test_validate(self): self.start_validation() res = ValidationResult.objects.get() self.assertCloseToNow(res.completed) assert not res.task_error validation = json.loads(res.validation) assert res.errors == 1 assert validation['messages'][0]['id'] == ['main', 'prepare_package', 'not_found'] assert res.valid is False assert res.warnings == 0, [mess['message'] for mess in validation['messages']] assert res.notices == 0 assert validation['errors'] == 1 self.assertCloseToNow(res.validation_job.completed) assert res.validation_job.stats['total'] == 1 assert res.validation_job.stats['completed'] == 1 assert res.validation_job.stats['passing'] == 0 assert res.validation_job.stats['failing'] == 1 assert res.validation_job.stats['errors'] == 0 assert len(mail.outbox) == 1 assert mail.outbox[0].subject == ( 'Behold! Validation results for Firefox %s->%s' % (self.curr_max.version, self.new_max.version)) assert mail.outbox[0].to == ['[email protected]'] @mock.patch('validator.validate.validate') def test_validator_bulk_compat_flag(self, validate): try: self.start_validation() except Exception: # We only care about the call to `validate()`, not the result. pass assert validate.call_args[1].get('compat_test') @mock.patch('olympia.zadmin.tasks.run_validator') def test_task_error(self, run_validator): run_validator.side_effect = RuntimeError('validation error') try: self.start_validation() except: # the real test is how it's handled, below... pass res = ValidationResult.objects.get() err = res.task_error.strip() assert err.endswith('RuntimeError: validation error'), ( 'Unexpected: %s' % err) self.assertCloseToNow(res.completed) assert res.validation_job.stats['total'] == 1 assert res.validation_job.stats['errors'] == 1 assert res.validation_job.stats['passing'] == 0 assert res.validation_job.stats['failing'] == 0 @mock.patch('olympia.zadmin.tasks.run_validator') def test_validate_for_appversions(self, run_validator): data = { "errors": 1, "warnings": 50, "notices": 1, "messages": [], "compatibility_summary": { "errors": 0, "warnings": 0, "notices": 0 }, "metadata": {} } run_validator.return_value = json.dumps(data) self.start_validation() assert run_validator.called assert run_validator.call_args[1]['for_appversions'] == ( {amo.FIREFOX.guid: [self.new_max.version]}) @mock.patch('olympia.zadmin.tasks.run_validator') def test_validate_all_tiers(self, run_validator): run_validator.return_value = json.dumps(VALIDATOR_SKELETON_RESULTS) res = self.create_result(self.create_job(), self.create_file(), **{}) tasks.bulk_validate_file(res.id) assert run_validator.called assert run_validator.call_args[1]['test_all_tiers'] @mock.patch('olympia.zadmin.tasks.run_validator') def test_merge_with_compat_summary(self, run_validator): data = { "errors": 1, "detected_type": "extension", "success": False, "warnings": 50, "notices": 1, "ending_tier": 5, "messages": [ {"description": "A global function was called ...", "tier": 3, "message": "Global called in dangerous manner", "uid": "de93a48831454e0b9d965642f6d6bf8f", "id": [], "compatibility_type": None, "for_appversions": None, "type": "warning"}, {"description": ("...no longer indicate the language " "of Firefox's UI..."), "tier": 5, "message": "navigator.language may not behave as expected", "uid": "f44c1930887c4d9e8bd2403d4fe0253a", "id": [], "compatibility_type": "error", "for_appversions": { "{ec8030f7-c20a-464f-9b0e-13a3a9e97384}": ["4.2a1pre", "5.0a2", "6.0a1"]}, "type": "warning"}], "compatibility_summary": { "notices": 1, "errors": 6, "warnings": 0}, "metadata": { "version": "1.0", "name": "FastestFox", "id": "<id>"}} run_validator.return_value = json.dumps(data) res = self.create_result(self.create_job(), self.create_file(), **{}) tasks.bulk_validate_file(res.id) assert run_validator.called res = ValidationResult.objects.get(pk=res.pk) assert res.errors == ( data['errors'] + data['compatibility_summary']['errors']) assert res.warnings == ( data['warnings'] + data['compatibility_summary']['warnings']) assert res.notices == ( data['notices'] + data['compatibility_summary']['notices']) @mock.patch('validator.validate.validate') def test_app_version_overrides(self, validate): validate.return_value = json.dumps(VALIDATOR_SKELETON_RESULTS) self.start_validation(new_max='3.7a4') assert validate.called overrides = validate.call_args[1]['overrides'] assert overrides['targetapp_minVersion'] == {amo.FIREFOX.guid: '3.7a4'} assert overrides['targetapp_maxVersion'] == {amo.FIREFOX.guid: '3.7a4'} def create_version(self, addon, statuses, version_str=None): max = self.max if version_str: max = AppVersion.objects.filter(version=version_str)[0] version = Version.objects.create(addon=addon) ApplicationsVersions.objects.create(application=self.application, min=self.min, max=max, version=version) for status in statuses: File.objects.create(status=status, version=version) return version def find_files(self, job_kwargs=None): if not job_kwargs: job_kwargs = {} job = self.create_job(**job_kwargs) find_files(job) return list(job.result_set.values_list('file_id', flat=True)) def test_getting_disabled(self): self.addon.update(status=amo.STATUS_DISABLED) assert len(self.find_files()) == 0 def test_getting_deleted(self): self.addon.update(status=amo.STATUS_DELETED) assert len(self.find_files()) == 0 def test_getting_status(self): self.create_version(self.addon, [amo.STATUS_PUBLIC, amo.STATUS_NOMINATED]) ids = self.find_files() assert len(ids) == 2 def test_getting_latest_public(self): old_version = self.create_version(self.addon, [amo.STATUS_PUBLIC]) self.create_version(self.addon, [amo.STATUS_NULL]) ids = self.find_files() assert len(ids) == 1 assert old_version.files.all()[0].pk == ids[0] def test_getting_latest_public_order(self): self.create_version(self.addon, [amo.STATUS_PURGATORY]) new_version = self.create_version(self.addon, [amo.STATUS_PUBLIC]) ids = self.find_files() assert len(ids) == 1 assert new_version.files.all()[0].pk == ids[0] def delete_orig_version(self, fixup=True): # Because deleting versions resets the status... self.version.delete() # Don't really care what status this is, as long # as it gets past the first SQL query. self.addon.update(status=amo.STATUS_PUBLIC) def test_no_versions(self): self.delete_orig_version() assert len(self.find_files()) == 0 def test_no_files(self): self.version.files.all().delete() self.addon.update(status=amo.STATUS_PUBLIC) assert len(self.find_files()) == 0 def test_not_public(self): version = self.create_version(self.addon, [amo.STATUS_LITE]) self.delete_orig_version() ids = self.find_files() assert len(ids) == 1 assert version.files.all()[0].pk == ids[0] def test_not_public_and_newer(self): self.create_version(self.addon, [amo.STATUS_LITE]) new_version = self.create_version(self.addon, [amo.STATUS_LITE]) self.delete_orig_version() ids = self.find_files() assert len(ids) == 1 assert new_version.files.all()[0].pk == ids[0] def test_not_public_w_beta(self): self.create_version(self.addon, [amo.STATUS_LITE]) self.create_version(self.addon, [amo.STATUS_BETA]) self.delete_orig_version() ids = self.find_files() assert len(ids) == 2 def test_not_public_w_multiple_files(self): self.create_version(self.addon, [amo.STATUS_BETA]) new_version = self.create_version(self.addon, [amo.STATUS_LITE, amo.STATUS_BETA]) self.delete_orig_version() ids = self.find_files() assert len(ids) == 2 assert sorted([v.id for v in new_version.files.all()]) == sorted(ids) def test_not_prelim_w_multiple_files(self): self.create_version(self.addon, [amo.STATUS_BETA]) self.create_version(self.addon, [amo.STATUS_BETA, amo.STATUS_NOMINATED]) self.delete_orig_version() ids = self.find_files() assert len(ids) == 3 def test_public_partial(self): self.create_version(self.addon, [amo.STATUS_PUBLIC]) new_version = self.create_version(self.addon, [amo.STATUS_BETA, amo.STATUS_DISABLED]) ids = self.find_files() assert len(ids) == 2 assert new_version.files.all()[1].pk not in ids def test_getting_w_unreviewed(self): old_version = self.create_version(self.addon, [amo.STATUS_PUBLIC]) new_version = self.create_version(self.addon, [amo.STATUS_UNREVIEWED]) ids = self.find_files() assert len(ids) == 2 old_version_pk = old_version.files.all()[0].pk new_version_pk = new_version.files.all()[0].pk assert sorted([old_version_pk, new_version_pk]) == sorted(ids) def test_multiple_files(self): self.create_version(self.addon, [amo.STATUS_PUBLIC, amo.STATUS_PUBLIC, amo.STATUS_PUBLIC]) ids = self.find_files() assert len(ids) == 3 def test_multiple_public(self): self.create_version(self.addon, [amo.STATUS_PUBLIC]) new_version = self.create_version(self.addon, [amo.STATUS_PUBLIC]) ids = self.find_files() assert len(ids) == 1 assert new_version.files.all()[0].pk == ids[0] def test_multiple_addons(self): addon = Addon.objects.create(type=amo.ADDON_EXTENSION) self.create_version(addon, [amo.STATUS_PURGATORY]) ids = self.find_files() assert len(ids) == 1 assert self.version.files.all()[0].pk == ids[0] def test_no_app(self): version = self.create_version(self.addon, [amo.STATUS_LITE]) self.delete_orig_version() version.apps.all().delete() ids = self.find_files() assert len(ids) == 0 def test_wrong_version(self): self.create_version(self.addon, [amo.STATUS_LITE], version_str='4.0b2pre') self.delete_orig_version() ids = self.find_files() assert len(ids) == 0 def test_version_slightly_newer_than_current(self): # addon matching current app/version but with a newer public version # that is within range of the target app/version. # See bug 658739. self.create_version(self.addon, [amo.STATUS_PUBLIC], version_str='3.7a2') newer = self.create_version(self.addon, [amo.STATUS_PUBLIC], version_str='3.7a3') kw = dict(curr_max_version=self.appversion('3.7a2'), target_version=self.appversion('3.7a4')) ids = self.find_files(job_kwargs=kw) assert newer.files.all()[0].pk == ids[0] def test_version_compatible_with_newer_app(self): # addon with a newer public version that is already compatible with # an app/version higher than the target. # See bug 658739. self.create_version(self.addon, [amo.STATUS_PUBLIC], version_str='3.7a2') # A version that supports a newer Firefox than what we're targeting self.create_version(self.addon, [amo.STATUS_PUBLIC], version_str='3.7a4') kw = dict(curr_max_version=self.appversion('3.7a2'), target_version=self.appversion('3.7a3')) ids = self.find_files(job_kwargs=kw) assert len(ids) == 0 def test_version_compatible_with_target_app(self): self.create_version(self.addon, [amo.STATUS_PUBLIC], version_str='3.7a2') # Already has a version that supports target: self.create_version(self.addon, [amo.STATUS_PUBLIC], version_str='3.7a3') kw = dict(curr_max_version=self.appversion('3.7a2'), target_version=self.appversion('3.7a3')) ids = self.find_files(job_kwargs=kw) assert len(ids) == 0 def test_version_webextension(self): self.version.files.update(is_webextension=True) assert not self.find_files() class TestTallyValidationErrors(BulkValidationTest): def setUp(self): super(TestTallyValidationErrors, self).setUp() self.data = { "errors": 1, "warnings": 1, "notices": 0, "messages": [ {"message": "message one", "description": ["message one long"], "id": ["path", "to", "test_one"], "uid": "de93a48831454e0b9d965642f6d6bf8f", "type": "error"}, {"message": "message two", "description": "message two long", "id": ["path", "to", "test_two"], "uid": "f44c1930887c4d9e8bd2403d4fe0253a", "compatibility_type": "error", "type": "warning"}], "metadata": {}, "compatibility_summary": { "errors": 1, "warnings": 1, "notices": 0}} def csv(self, job_id): r = self.client.get(reverse('zadmin.validation_tally_csv', args=[job_id])) assert r.status_code == 200 rdr = csv.reader(StringIO(r.content)) header = rdr.next() rows = sorted((r for r in rdr), key=lambda r: r[0]) return header, rows @mock.patch('olympia.zadmin.tasks.run_validator') def test_csv(self, run_validator): run_validator.return_value = json.dumps(self.data) self.start_validation() res = ValidationResult.objects.get() assert res.task_error is None header, rows = self.csv(res.validation_job.pk) assert header == ['message_id', 'message', 'long_message', 'type', 'addons_affected'] assert rows.pop(0) == ['path.to.test_one', 'message one', 'message one long', 'error', '1'] assert rows.pop(0) == ['path.to.test_two', 'message two', 'message two long', 'error', '1'] def test_count_per_addon(self): job = self.create_job() data_str = json.dumps(self.data) for i in range(3): tasks.tally_validation_results(job.pk, data_str) header, rows = self.csv(job.pk) assert rows.pop(0) == ['path.to.test_one', 'message one', 'message one long', 'error', '3'] assert rows.pop(0) == ['path.to.test_two', 'message two', 'message two long', 'error', '3'] def test_nested_list_messages(self): job = self.create_job() self.data['messages'] = [{ "message": "message one", "description": ["message one long", ["something nested"]], "id": ["path", "to", "test_one"], "uid": "de93a48831454e0b9d965642f6d6bf8f", "type": "error", }] data_str = json.dumps(self.data) # This was raising an exception. bug 733845 tasks.tally_validation_results(job.pk, data_str) class TestEmailPreview(TestCase): fixtures = ['base/addon_3615', 'base/users'] def setUp(self): super(TestEmailPreview, self).setUp() assert self.client.login(username='[email protected]', password='password') addon = Addon.objects.get(pk=3615) self.topic = EmailPreviewTopic(addon) def test_csv(self): self.topic.send_mail('the subject', u'Hello Ivan Krsti\u0107', from_email='[email protected]', recipient_list=['[email protected]']) r = self.client.get(reverse('zadmin.email_preview_csv', args=[self.topic.topic])) assert r.status_code == 200 rdr = csv.reader(StringIO(r.content)) assert rdr.next() == ['from_email', 'recipient_list', 'subject', 'body'] assert rdr.next() == ['[email protected]', '[email protected]', 'the subject', 'Hello Ivan Krsti\xc4\x87'] class TestMonthlyPick(TestCase): fixtures = ['base/addon_3615', 'base/users'] def setUp(self): super(TestMonthlyPick, self).setUp() assert self.client.login(username='[email protected]', password='password') self.url = reverse('zadmin.monthly_pick') addon = Addon.objects.get(pk=3615) MonthlyPick.objects.create(addon=addon, locale='zh-CN', blurb="test data", image="http://www.google.com") self.f = self.client.get(self.url).context['form'].initial_forms[0] self.initial = self.f.initial def test_form_initial(self): assert self.initial['addon'] == 3615 assert self.initial['locale'] == 'zh-CN' assert self.initial['blurb'] == 'test data' assert self.initial['image'] == 'http://www.google.com' def test_success_insert(self): dupe = initial(self.f) del dupe['id'] dupe.update(locale='fr') data = formset(initial(self.f), dupe, initial_count=1) self.client.post(self.url, data) assert MonthlyPick.objects.count() == 2 assert MonthlyPick.objects.all()[1].locale == 'fr' def test_insert_no_image(self): dupe = initial(self.f) dupe.update(id='', image='', locale='en-US') data = formset(initial(self.f), dupe, initial_count=1) self.client.post(self.url, data) assert MonthlyPick.objects.count() == 2 assert MonthlyPick.objects.all()[1].image == '' def test_success_insert_no_locale(self): dupe = initial(self.f) del dupe['id'] del dupe['locale'] data = formset(initial(self.f), dupe, initial_count=1) self.client.post(self.url, data) assert MonthlyPick.objects.count() == 2 assert MonthlyPick.objects.all()[1].locale == '' def test_insert_long_blurb(self): dupe = initial(self.f) dupe.update(id='', blurb='x' * 201, locale='en-US') data = formset(initial(self.f), dupe, initial_count=1) r = self.client.post(self.url, data) assert r.context['form'].errors[1]['blurb'][0] == ( 'Ensure this value has at most 200 characters (it has 201).') def test_success_update(self): d = initial(self.f) d.update(locale='fr') r = self.client.post(self.url, formset(d, initial_count=1)) assert r.status_code == 302 assert MonthlyPick.objects.all()[0].locale == 'fr' def test_success_delete(self): d = initial(self.f) d.update(DELETE=True) self.client.post(self.url, formset(d, initial_count=1)) assert MonthlyPick.objects.count() == 0 def test_require_login(self): self.client.logout() r = self.client.get(self.url) assert r.status_code == 302 class TestFeatures(TestCase): fixtures = ['base/users', 'base/collections', 'base/addon_3615.json'] def setUp(self): super(TestFeatures, self).setUp() assert self.client.login(username='[email protected]', password='password') self.url = reverse('zadmin.features') FeaturedCollection.objects.create(application=amo.FIREFOX.id, locale='zh-CN', collection_id=80) self.f = self.client.get(self.url).context['form'].initial_forms[0] self.initial = self.f.initial def test_form_initial(self): assert self.initial['application'] == amo.FIREFOX.id assert self.initial['locale'] == 'zh-CN' assert self.initial['collection'] == 80 def test_form_attrs(self): r = self.client.get(self.url) assert r.status_code == 200 doc = pq(r.content) assert doc('#features tr').attr('data-app') == str(amo.FIREFOX.id) assert doc('#features td.app').hasClass(amo.FIREFOX.short) assert doc('#features td.collection.loading').attr( 'data-collection') == '80' assert doc('#features .collection-ac.js-hidden') assert not doc('#features .collection-ac[disabled]') def test_disabled_autocomplete_errors(self): """If any collection errors, autocomplete field should be enabled.""" d = dict(application=amo.FIREFOX.id, collection=999) data = formset(self.initial, d, initial_count=1) r = self.client.post(self.url, data) doc = pq(r.content) assert not doc('#features .collection-ac[disabled]') def test_required_app(self): d = dict(locale='zh-CN', collection=80) data = formset(self.initial, d, initial_count=1) r = self.client.post(self.url, data) assert r.status_code == 200 assert r.context['form'].errors[0]['application'] == ( ['This field is required.']) assert r.context['form'].errors[0]['collection'] == ( ['Invalid collection for this application.']) def test_bad_app(self): d = dict(application=999, collection=80) data = formset(self.initial, d, initial_count=1) r = self.client.post(self.url, data) assert r.context['form'].errors[0]['application'] == [ 'Select a valid choice. 999 is not one of the available choices.'] def test_bad_collection_for_app(self): d = dict(application=amo.THUNDERBIRD.id, collection=80) data = formset(self.initial, d, initial_count=1) r = self.client.post(self.url, data) assert r.context['form'].errors[0]['collection'] == ( ['Invalid collection for this application.']) def test_optional_locale(self): d = dict(application=amo.FIREFOX.id, collection=80) data = formset(self.initial, d, initial_count=1) r = self.client.post(self.url, data) assert r.context['form'].errors == [{}] def test_bad_locale(self): d = dict(application=amo.FIREFOX.id, locale='klingon', collection=80) data = formset(self.initial, d, initial_count=1) r = self.client.post(self.url, data) assert r.context['form'].errors[0]['locale'] == ( ['Select a valid choice. klingon is not one of the available ' 'choices.']) def test_required_collection(self): d = dict(application=amo.FIREFOX.id) data = formset(self.initial, d, initial_count=1) r = self.client.post(self.url, data) assert r.context['form'].errors[0]['collection'] == ( ['This field is required.']) def test_bad_collection(self): d = dict(application=amo.FIREFOX.id, collection=999) data = formset(self.initial, d, initial_count=1) r = self.client.post(self.url, data) assert r.context['form'].errors[0]['collection'] == ( ['Invalid collection for this application.']) def test_success_insert(self): dupe = initial(self.f) del dupe['id'] dupe.update(locale='fr') data = formset(initial(self.f), dupe, initial_count=1) self.client.post(self.url, data) assert FeaturedCollection.objects.count() == 2 assert FeaturedCollection.objects.all()[1].locale == 'fr' def test_success_update(self): d = initial(self.f) d.update(locale='fr') r = self.client.post(self.url, formset(d, initial_count=1)) assert r.status_code == 302 assert FeaturedCollection.objects.all()[0].locale == 'fr' def test_success_delete(self): d = initial(self.f) d.update(DELETE=True) self.client.post(self.url, formset(d, initial_count=1)) assert FeaturedCollection.objects.count() == 0 class TestLookup(TestCase): fixtures = ['base/users'] def setUp(self): super(TestLookup, self).setUp() assert self.client.login(username='[email protected]', password='password') self.user = UserProfile.objects.get(pk=999) self.url = reverse('zadmin.search', args=['users', 'userprofile']) def test_logged_out(self): self.client.logout() assert self.client.get('%s?q=admin' % self.url).status_code == 403 def check_results(self, q, expected): res = self.client.get(urlparams(self.url, q=q)) assert res.status_code == 200 content = json.loads(res.content) assert len(content) == len(expected) ids = [int(c['value']) for c in content] emails = [u'%s' % c['label'] for c in content] for d in expected: id = d['value'] email = u'%s' % d['label'] assert id in ids, ( 'Expected user ID "%s" not found' % id) assert email in emails, ( 'Expected username "%s" not found' % email) def test_lookup_wrong_model(self): self.url = reverse('zadmin.search', args=['doesnt', 'exist']) res = self.client.get(urlparams(self.url, q='')) assert res.status_code == 404 def test_lookup_empty(self): users = UserProfile.objects.values('id', 'email') self.check_results('', [dict( value=u['id'], label=u['email']) for u in users]) def test_lookup_by_id(self): self.check_results(self.user.id, [dict(value=self.user.id, label=self.user.email)]) def test_lookup_by_email(self): self.check_results(self.user.email, [dict(value=self.user.id, label=self.user.email)]) def test_lookup_by_username(self): self.check_results(self.user.username, [dict(value=self.user.id, label=self.user.email)]) class TestAddonSearch(amo.tests.ESTestCase): fixtures = ['base/users', 'base/addon_3615'] def setUp(self): super(TestAddonSearch, self).setUp() self.reindex(Addon) assert self.client.login(username='[email protected]', password='password') self.url = reverse('zadmin.addon-search') def test_lookup_addon(self): res = self.client.get(urlparams(self.url, q='delicious')) # There's only one result, so it should just forward us to that page. assert res.status_code == 302 class TestAddonAdmin(TestCase): fixtures = ['base/users', 'base/addon_3615'] def setUp(self): super(TestAddonAdmin, self).setUp() assert self.client.login(username='[email protected]', password='password') self.url = reverse('admin:addons_addon_changelist') def test_basic(self): res = self.client.get(self.url) doc = pq(res.content) rows = doc('#result_list tbody tr') assert rows.length == 1 assert rows.find('a').attr('href') == ( '/en-US/admin/models/addons/addon/3615/') class TestAddonManagement(TestCase): fixtures = ['base/addon_3615', 'base/users'] def setUp(self): super(TestAddonManagement, self).setUp() self.addon = Addon.objects.get(pk=3615) self.url = reverse('zadmin.addon_manage', args=[self.addon.slug]) self.client.login(username='[email protected]', password='password') def test_can_manage_unlisted_addons(self): """Unlisted addons can be managed too.""" self.addon.update(is_listed=False) assert self.client.get(self.url).status_code == 200 def _form_data(self, data=None): initial_data = { 'status': '4', 'form-0-status': '4', 'form-0-id': '67442', 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '1', } if data: initial_data.update(data) return initial_data def test_addon_status_change(self): data = self._form_data({'status': '3'}) r = self.client.post(self.url, data, follow=True) assert r.status_code == 200 addon = Addon.objects.get(pk=3615) assert addon.status == 3 def test_addon_file_status_change(self): data = self._form_data({'form-0-status': '1'}) r = self.client.post(self.url, data, follow=True) assert r.status_code == 200 file = File.objects.get(pk=67442) assert file.status == 1 def test_addon_deleted_file_status_change(self): file = File.objects.get(pk=67442) file.version.update(deleted=True) data = self._form_data({'form-0-status': '1'}) r = self.client.post(self.url, data, follow=True) # Form errors are silently suppressed. assert r.status_code == 200 # But no change. assert file.status == 4 @mock.patch.object(File, 'file_path', amo.tests.AMOPaths().file_fixture_path( 'delicious_bookmarks-2.1.106-fx.xpi')) def test_regenerate_hash(self): version = Version.objects.create(addon_id=3615) file = File.objects.create( filename='delicious_bookmarks-2.1.106-fx.xpi', version=version) r = self.client.post(reverse('zadmin.recalc_hash', args=[file.id])) assert json.loads(r.content)[u'success'] == 1 file = File.objects.get(pk=file.id) assert file.size, 'File size should not be zero' assert file.hash, 'File hash should not be empty' @mock.patch.object(File, 'file_path', amo.tests.AMOPaths().file_fixture_path( 'delicious_bookmarks-2.1.106-fx.xpi')) def test_regenerate_hash_get(self): """ Don't allow GET """ version = Version.objects.create(addon_id=3615) file = File.objects.create( filename='delicious_bookmarks-2.1.106-fx.xpi', version=version) r = self.client.get(reverse('zadmin.recalc_hash', args=[file.id])) assert r.status_code == 405 # GET out of here class TestCompat(amo.tests.ESTestCase): fixtures = ['base/users'] def setUp(self): super(TestCompat, self).setUp() self.url = reverse('zadmin.compat') self.client.login(username='[email protected]', password='password') self.app = amo.FIREFOX self.app_version = amo.COMPAT[0]['main'] self.addon = self.populate(guid='xxx') self.generate_reports(self.addon, good=0, bad=0, app=self.app, app_version=self.app_version) def update(self): compatibility_report() self.refresh() def populate(self, **kw): now = datetime.now() name = 'Addon %s' % now kw.update(guid=name) addon = amo.tests.addon_factory(name=name, **kw) UpdateCount.objects.create(addon=addon, count=10, date=now) return addon def generate_reports(self, addon, good, bad, app, app_version): defaults = dict(guid=addon.guid, app_guid=app.guid, app_version=app_version) for x in xrange(good): CompatReport.objects.create(works_properly=True, **defaults) for x in xrange(bad): CompatReport.objects.create(works_properly=False, **defaults) self.update() def get_pq(self, **kw): r = self.client.get(self.url, kw) assert r.status_code == 200 return pq(r.content)('#compat-results') def test_defaults(self): r = self.client.get(self.url) assert r.status_code == 200 assert r.context['app'] == self.app assert r.context['version'] == self.app_version table = pq(r.content)('#compat-results') assert table.length == 1 assert table.find('.no-results').length == 1 def check_row(self, tr, addon, good, bad, percentage, app, app_version): assert tr.length == 1 version = addon.current_version.version name = tr.find('.name') assert name.find('.version').text() == 'v' + version assert name.remove('.version').text() == unicode(addon.name) assert name.find('a').attr('href') == addon.get_url_path() assert tr.find('.maxver').text() == ( addon.compatible_apps[app].max.version) incompat = tr.find('.incompat') assert incompat.find('.bad').text() == str(bad) assert incompat.find('.total').text() == str(good + bad) percentage += '%' assert percentage in incompat.text(), ( 'Expected incompatibility to be %r' % percentage) assert tr.find('.version a').attr('href') == ( reverse('devhub.versions.edit', args=[addon.slug, addon.current_version.id])) assert tr.find('.reports a').attr('href') == ( reverse('compat.reporter_detail', args=[addon.guid])) form = tr.find('.overrides form') assert form.attr('action') == reverse( 'admin:addons_compatoverride_add') self.check_field(form, '_compat_ranges-TOTAL_FORMS', '1') self.check_field(form, '_compat_ranges-INITIAL_FORMS', '0') self.check_field(form, '_continue', '1') self.check_field(form, '_confirm', '1') self.check_field(form, 'addon', str(addon.id)) self.check_field(form, 'guid', addon.guid) compat_field = '_compat_ranges-0-%s' self.check_field(form, compat_field % 'min_version', '0') self.check_field(form, compat_field % 'max_version', version) self.check_field(form, compat_field % 'app', str(app.id)) self.check_field(form, compat_field % 'min_app_version', app_version + 'a1') self.check_field(form, compat_field % 'max_app_version', app_version + '*') def check_field(self, form, name, val): assert form.find('input[name="%s"]' % name).val() == val def test_firefox_hosted(self): addon = self.populate() self.generate_reports(addon, good=0, bad=11, app=self.app, app_version=self.app_version) tr = self.get_pq().find('tr[data-guid="%s"]' % addon.guid) self.check_row(tr, addon, good=0, bad=11, percentage='100.0', app=self.app, app_version=self.app_version) # Add an override for this current app version. compat = CompatOverride.objects.create(addon=addon, guid=addon.guid) CompatOverrideRange.objects.create( compat=compat, app=amo.FIREFOX.id, min_app_version=self.app_version + 'a1', max_app_version=self.app_version + '*') # Check that there is an override for this current app version. tr = self.get_pq().find('tr[data-guid="%s"]' % addon.guid) assert tr.find('.overrides a').attr('href') == ( reverse('admin:addons_compatoverride_change', args=[compat.id])) def test_non_default_version(self): app_version = amo.COMPAT[2]['main'] addon = self.populate() self.generate_reports(addon, good=0, bad=11, app=self.app, app_version=app_version) pq = self.get_pq() assert pq.find('tr[data-guid="%s"]' % addon.guid).length == 0 appver = '%s-%s' % (self.app.id, app_version) tr = self.get_pq(appver=appver)('tr[data-guid="%s"]' % addon.guid) self.check_row(tr, addon, good=0, bad=11, percentage='100.0', app=self.app, app_version=app_version) def test_minor_versions(self): addon = self.populate() self.generate_reports(addon, good=0, bad=1, app=self.app, app_version=self.app_version) self.generate_reports(addon, good=1, bad=2, app=self.app, app_version=self.app_version + 'a2') tr = self.get_pq(ratio=0.0, minimum=0).find('tr[data-guid="%s"]' % addon.guid) self.check_row(tr, addon, good=1, bad=3, percentage='75.0', app=self.app, app_version=self.app_version) def test_ratio(self): addon = self.populate() self.generate_reports(addon, good=11, bad=11, app=self.app, app_version=self.app_version) # Should not show up for > 80%. pq = self.get_pq() assert pq.find('tr[data-guid="%s"]' % addon.guid).length == 0 # Should not show up for > 50%. tr = self.get_pq(ratio=.5).find('tr[data-guid="%s"]' % addon.guid) assert tr.length == 0 # Should show up for > 40%. tr = self.get_pq(ratio=.4).find('tr[data-guid="%s"]' % addon.guid) assert tr.length == 1 def test_min_incompatible(self): addon = self.populate() self.generate_reports(addon, good=0, bad=11, app=self.app, app_version=self.app_version) # Should show up for >= 10. pq = self.get_pq() assert pq.find('tr[data-guid="%s"]' % addon.guid).length == 1 # Should show up for >= 0. tr = self.get_pq(minimum=0).find('tr[data-guid="%s"]' % addon.guid) assert tr.length == 1 # Should not show up for >= 20. tr = self.get_pq(minimum=20).find('tr[data-guid="%s"]' % addon.guid) assert tr.length == 0 class TestMemcache(TestCase): fixtures = ['base/addon_3615', 'base/users'] def setUp(self): super(TestMemcache, self).setUp() self.url = reverse('zadmin.memcache') cache.set('foo', 'bar') self.client.login(username='[email protected]', password='password') def test_login(self): self.client.logout() assert self.client.get(self.url).status_code == 302 def test_can_clear(self): self.client.post(self.url, {'yes': 'True'}) assert cache.get('foo') is None def test_cant_clear(self): self.client.post(self.url, {'yes': 'False'}) assert cache.get('foo') == 'bar' class TestElastic(amo.tests.ESTestCase): fixtures = ['base/addon_3615', 'base/users'] def setUp(self): super(TestElastic, self).setUp() self.url = reverse('zadmin.elastic') self.client.login(username='[email protected]', password='password') def test_login(self): self.client.logout() self.assert3xx( self.client.get(self.url), reverse('users.login') + '?to=/en-US/admin/elastic') class TestEmailDevs(TestCase): fixtures = ['base/addon_3615', 'base/users'] def setUp(self): super(TestEmailDevs, self).setUp() self.login('admin') self.addon = Addon.objects.get(pk=3615) def post(self, recipients='eula', subject='subject', message='msg', preview_only=False): return self.client.post(reverse('zadmin.email_devs'), dict(recipients=recipients, subject=subject, message=message, preview_only=preview_only)) def test_preview(self): res = self.post(preview_only=True) self.assertNoFormErrors(res) preview = EmailPreviewTopic(topic='email-devs') assert [e.recipient_list for e in preview.filter()] == ['[email protected]'] assert len(mail.outbox) == 0 def test_actual(self): subject = 'about eulas' message = 'message about eulas' res = self.post(subject=subject, message=message) self.assertNoFormErrors(res) self.assert3xx(res, reverse('zadmin.email_devs')) assert len(mail.outbox) == 1 assert mail.outbox[0].subject == subject assert mail.outbox[0].body == message assert mail.outbox[0].to == ['[email protected]'] assert mail.outbox[0].from_email == settings.DEFAULT_FROM_EMAIL def test_only_eulas(self): self.addon.update(eula=None) res = self.post() self.assertNoFormErrors(res) assert len(mail.outbox) == 0 def test_sdk_devs(self): (File.objects.filter(version__addon=self.addon) .update(jetpack_version='1.5')) res = self.post(recipients='sdk') self.assertNoFormErrors(res) assert len(mail.outbox) == 1 assert mail.outbox[0].to == ['[email protected]'] def test_only_sdk_devs(self): res = self.post(recipients='sdk') self.assertNoFormErrors(res) assert len(mail.outbox) == 0 def test_only_extensions(self): self.addon.update(type=amo.ADDON_EXTENSION) res = self.post(recipients='all_extensions') self.assertNoFormErrors(res) assert len(mail.outbox) == 1 def test_ignore_deleted_always(self): self.addon.update(status=amo.STATUS_DELETED) for name, label in DevMailerForm._choices: res = self.post(recipients=name) self.assertNoFormErrors(res) assert len(mail.outbox) == 0 def test_exclude_pending_for_addons(self): self.addon.update(status=amo.STATUS_PENDING) for name, label in DevMailerForm._choices: if name in ('payments', 'desktop_apps'): continue res = self.post(recipients=name) self.assertNoFormErrors(res) assert len(mail.outbox) == 0 def test_exclude_fxa_migrated(self): user = self.addon.authors.get() user.update(fxa_id='yup') res = self.post(recipients='fxa') self.assertNoFormErrors(res) assert len(mail.outbox) == 0 def test_include_fxa_not_migrated(self): res = self.post(recipients='fxa') user = self.addon.authors.get() self.assertNoFormErrors(res) assert len(mail.outbox) == 1 user = self.addon.authors.get() user.update(fxa_id='') res = self.post(recipients='fxa') self.assertNoFormErrors(res) assert len(mail.outbox) == 2 class TestFileDownload(TestCase): fixtures = ['base/users'] def setUp(self): super(TestFileDownload, self).setUp() assert self.client.login(username='[email protected]', password='password') self.file = open(get_image_path('animated.png'), 'rb') resp = self.client.post(reverse('devhub.upload'), {'upload': self.file}) assert resp.status_code == 302 self.upload = FileUpload.objects.get() self.url = reverse('zadmin.download_file', args=[self.upload.uuid]) def test_download(self): """Test that downloading file_upload objects works.""" resp = self.client.get(self.url) assert resp.status_code == 200 assert resp.content == self.file.read() class TestPerms(TestCase): fixtures = ['base/users'] FILE_ID = '1234567890abcdef1234567890abcdef' def assert_status(self, view, status, **kw): """Check that requesting the named view returns the expected status.""" assert self.client.get(reverse(view, kwargs=kw)).status_code == status def test_admin_user(self): # Admin should see views with Django's perm decorator and our own. assert self.client.login(username='[email protected]', password='password') self.assert_status('zadmin.index', 200) self.assert_status('zadmin.settings', 200) self.assert_status('zadmin.langpacks', 200) self.assert_status('zadmin.download_file', 404, uuid=self.FILE_ID) self.assert_status('zadmin.addon-search', 200) self.assert_status('zadmin.monthly_pick', 200) self.assert_status('zadmin.features', 200) self.assert_status('discovery.module_admin', 200) def test_staff_user(self): # Staff users have some privileges. user = UserProfile.objects.get(email='[email protected]') group = Group.objects.create(name='Staff', rules='AdminTools:View') GroupUser.objects.create(group=group, user=user) assert self.client.login(username='[email protected]', password='password') self.assert_status('zadmin.index', 200) self.assert_status('zadmin.settings', 200) self.assert_status('zadmin.langpacks', 200) self.assert_status('zadmin.download_file', 404, uuid=self.FILE_ID) self.assert_status('zadmin.addon-search', 200) self.assert_status('zadmin.monthly_pick', 200) self.assert_status('zadmin.features', 200) self.assert_status('discovery.module_admin', 200) def test_sr_reviewers_user(self): # Sr Reviewers users have only a few privileges. user = UserProfile.objects.get(email='[email protected]') group = Group.objects.create(name='Sr Reviewer', rules='ReviewerAdminTools:View') GroupUser.objects.create(group=group, user=user) assert self.client.login(username='[email protected]', password='password') self.assert_status('zadmin.index', 200) self.assert_status('zadmin.langpacks', 200) self.assert_status('zadmin.download_file', 404, uuid=self.FILE_ID) self.assert_status('zadmin.addon-search', 200) self.assert_status('zadmin.settings', 403) def test_bulk_compat_user(self): # Bulk Compatibility Updaters only have access to /admin/validation/*. user = UserProfile.objects.get(email='[email protected]') group = Group.objects.create(name='Bulk Compatibility Updaters', rules='BulkValidationAdminTools:View') GroupUser.objects.create(group=group, user=user) assert self.client.login(username='[email protected]', password='password') self.assert_status('zadmin.index', 200) self.assert_status('zadmin.validation', 200) self.assert_status('zadmin.langpacks', 403) self.assert_status('zadmin.download_file', 403, uuid=self.FILE_ID) self.assert_status('zadmin.addon-search', 403) self.assert_status('zadmin.settings', 403) def test_unprivileged_user(self): # Unprivileged user. assert self.client.login(username='[email protected]', password='password') self.assert_status('zadmin.index', 403) self.assert_status('zadmin.settings', 403) self.assert_status('zadmin.langpacks', 403) self.assert_status('zadmin.download_file', 403, uuid=self.FILE_ID) self.assert_status('zadmin.addon-search', 403) self.assert_status('zadmin.monthly_pick', 403) self.assert_status('zadmin.features', 403) self.assert_status('discovery.module_admin', 403) # Anonymous users should also get a 403. self.client.logout() self.assert3xx(self.client.get(reverse('zadmin.index')), reverse('users.login') + '?to=/en-US/admin/')
andymckay/addons-server
src/olympia/zadmin/tests/test_views.py
Python
bsd-3-clause
77,618