code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# Copyright (C) 2014 Andrey Antukh <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import collections
from django.contrib.contenttypes.models import ContentType
from taiga.base.utils import json
from taiga.base.utils.db import get_typename_for_model_instance
from . import middleware as mw
from . import backends
# The complete list of content types
# of allowed models for change events
watched_types = set([
"userstories.userstory",
"issues.issue",
"tasks.task",
"wiki.wiki_page",
"milestones.milestone",
])
def emit_event(data:dict, routing_key:str, *,
sessionid:str=None, channel:str="events"):
if not sessionid:
sessionid = mw.get_current_session_id()
data = {"session_id": sessionid,
"data": data}
backend = backends.get_events_backend()
return backend.emit_event(message=json.dumps(data),
routing_key=routing_key,
channel=channel)
def emit_event_for_model(obj, *, type:str="change", channel:str="events",
content_type:str=None, sessionid:str=None):
"""
Sends a model change event.
"""
assert type in set(["create", "change", "delete"])
assert hasattr(obj, "project_id")
if not content_type:
content_type = get_typename_for_model_instance(obj)
projectid = getattr(obj, "project_id")
pk = getattr(obj, "pk", None)
app_name, model_name = content_type.split(".", 1)
routing_key = "changes.project.{0}.{1}".format(projectid, app_name)
data = {"type": type,
"matches": content_type,
"pk": pk}
return emit_event(routing_key=routing_key,
channel=channel,
sessionid=sessionid,
data=data)
def emit_event_for_ids(ids, content_type:str, projectid:int, *,
type:str="change", channel:str="events", sessionid:str=None):
assert type in set(["create", "change", "delete"])
assert isinstance(ids, collections.Iterable)
assert content_type, "content_type parameter is mandatory"
app_name, model_name = content_type.split(".", 1)
routing_key = "changes.project.{0}.{1}".format(projectid, app_name)
data = {"type": type,
"matches": content_type,
"pk": ids}
return emit_event(routing_key=routing_key,
channel=channel,
sessionid=sessionid,
data=data)
| Zaneh-/bearded-tribble-back | taiga/events/events.py | Python | agpl-3.0 | 3,133 |
"""ModelLoader reads python modules as specified at runtime, loading and instantiating
objects.
"""
import importlib
import logging
import os
class ModelLoader(object):
"""Load Model from config
Examples
--------
Call :py:meth:`ModelLoader.load` to create, load and return a
:class:`Model` object.
>>> loader = ModelLoader()
>>> sector_model = loader.load(sector_model_config)
>>> conversion_model = loader.load(conversion_model_config)
"""
def __init__(self):
self.logger = logging.getLogger(__name__)
def load(self, config):
"""Loads the model class specified by the config, returns an instance of that class
using the Model.from_dict method.
Arguments
---------
config : dict
The model configuration data. Must include:
- name (name for smif internal use)
- path (absolute path to python module file)
- classname (name of Model implementation class)
- anything required by the Model.from_dict classmethod
Returns
-------
:class:`~smif.model.Model`
"""
klass = self.load_model_class(config['name'], config['path'], config['classname'])
if not hasattr(klass, 'from_dict'):
msg = "Model '{}' does not have a ``from_dict`` method and " \
"cannot be loaded from config"
raise KeyError(msg.format(config['name']))
model_instance = klass.from_dict(config)
if model_instance:
return model_instance
else:
raise ValueError("Model not initialised from configuration data")
def load_model_class(self, model_name, model_path, classname):
"""Dynamically load model class
Arguments
---------
model_name : str
The name used internally to identify the SectorModel
model_path : str
The path to the python module which contains the SectorModel
implementation
classname : str
The name of the class of the SectorModel implementation
Returns
-------
class
The SectorModel implementation
"""
if not os.path.exists(model_path):
msg = "Cannot find '{}' for the '{}' model".format(model_path, model_name)
raise FileNotFoundError(msg)
msg = "Importing model %s as class %s from module at %s"
self.logger.info(msg, model_name, classname, model_path)
spec = importlib.util.spec_from_file_location(model_name, model_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
klass = module.__dict__[classname]
return klass
| nismod/smif | src/smif/data_layer/model_loader.py | Python | mit | 2,774 |
from .theExceptions import ConnectionError, CreationError, DeletionError, UpdateError
class User(object):
"""This class represents a user"""
def __init__(self, users, jsonData = None):
if jsonData is None:
jsonData = {}
self._store = {}
self.users = users
self.connection = self.users.connection
self._store = {
"username": None,
"active": True,
"extra": None,
"changePassword": None,
"password": None,
}
self.isSet = False
if len(jsonData) > 0:
self._set(jsonData)
def _set(self, jsonData):
"""Initialize all fields at once. If no password is specified, it will be set as an empty string"""
self["username"] = jsonData["user"]
self["active"] = jsonData["active"]
self["extra"] = jsonData["extra"]
try:
self["changePassword"] = jsonData["changePassword"]
except Exception as e:
pass
# self["changePassword"] = ""
try:
self["password"] = jsonData["passwd"]
except KeyError:
self["password"] = ""
self.isSet = True
def getURL(self):
return "%s/user/%s" % (self.connection.getURL(), self["username"])
def save(self):
"""Save/updates the user"""
import json
payload = {}
payload.update(self._store)
payload["user"] = payload["username"]
payload["passwd"] = payload["password"]
del(payload["username"])
del(payload["password"])
payload = json.dumps(payload, default=str)
if not self.isSet:
if "username" not in self._store or "password" not in self._store:
raise KeyError("You must define self['name'] and self['password'] to be able to create a new user")
r = self.connection.session.post(self.users.getURL(), data = payload)
data = r.json()
if r.status_code == 201:
self._set(data)
else:
raise CreationError("Unable to create new user", data)
else:
r = self.connection.session.put(self.getURL(), data = payload)
data = r.json()
if r.status_code == 200:
self._set(data)
else:
raise UpdateError("Unable to update user, status: %s" %r.status_code, data)
def setPermissions(self, dbName, access):
"""Grant revoke rights on a database, 'access' is supposed to be boolean. ArangoDB grants/revokes both read and write rights at the same time"""
import json
if not self.isSet:
raise CreationError("Please save user first", None, None)
rights = []
if access:
rights.append("rw")
rights = ''.join(rights)
if not self.connection.hasDatabase(dbName):
raise KeyError("Unknown database: %s" % dbName)
url = "%s/database/%s" % (self.getURL(), dbName)
r = self.connection.session.put(url, data = json.dumps({"grant": rights}, default=str))
if r.status_code < 200 or r.status_code > 202:
raise CreationError("Unable to grant rights", r.content)
def delete(self):
"""Permanently remove the user"""
if not self.isSet:
raise CreationError("Please save user first", None, None)
r = self.connection.session.delete(self.getURL())
if r.status_code < 200 or r.status_code > 202:
raise DeletionError("Unable to delete user, url: %s, status: %s" %(r.url, r.status_code), r.content )
self.isSet = False
def __repr__(self):
return "ArangoUser: %s" % (self._store)
def __setitem__(self, k, v):
if k not in list(self._store.keys()):
raise KeyError("The only keys available for user are: %s" % (list(self._store.keys())))
self._store[k] = v
def __getitem__(self, k):
return self._store[k]
class Users(object):
"""This one manages users."""
def __init__(self, connection):
self.connection = connection
def getURL(self):
return "%s/user" % (self.connection.getURL())
def createUser(self, username, password):
u = User(self)
u["username"] = username
u["password"] = password
return u
def fetchAllUsers(self, rawResults = False):
"""Returns all available users. if rawResults, the result will be a list of python dicts instead of User objects"""
r = self.connection.session.get(self.getURL())
if r.status_code == 200:
data = r.json()
if rawResults:
return data["result"]
else:
res = []
for resu in data["result"]:
u = User(self, resu)
res.append(u)
return res
else:
raise ConnectionError("Unable to get user list", r.url, r.status_code)
def fetchUser(self, username, rawResults = False):
"""Returns a single user. if rawResults, the result will be a list of python dicts instead of User objects"""
url = "%s/%s" % (self.getURL(), username)
r = self.connection.session.get(url)
if r.status_code == 200:
data = r.json()
if rawResults:
return data["result"]
else:
u = User(self, data)
return u
else:
raise KeyError("Unable to get user: %s" % username)
def __getitem__(self, k):
return self.fetchUser(k)
| tariqdaouda/pyArango | pyArango/users.py | Python | apache-2.0 | 5,657 |
# allefriezen naar sqlite.py - Convert A2A dump to relational sqlite3 database
#
# Copyright (C) 2016 T.Hofkamp
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import xml.etree.ElementTree as ET
import time
import sqlite3
baseurl = "http://data.kultuer.frl/allefriezen/"
# 2016 09 2027218 akten
def stripnamespace(tag):
# strip any {namespace url} from tag
if tag.find("}") >= 1:
namespace_uri, tag = tag.split("}", 1)
return tag
def getelement(elem,relpath):
dirs = relpath.split('|')
for tag in dirs:
if tag.startswith('@'):
return elem.get(tag[1::])
x = elem.findall(tag)
if len(x) == 0:
return None
if len(x) > 1:
return None
elem = x[0]
return elem.text
idsdone = {} # array van alle akte ids die we al gehad hebben
# dit is nodig omdat er tijdens de dump modificaties plaats vinden, waardoor akten twee keer in de dump voorkomen
# programma foutje van picturea
def record(elem):
global idsdone
eventtype = getelement(elem,"{http://www.openarchives.org/OAI/2.0/}metadata|{http://Mindbus.nl/A2A}A2A|{http://Mindbus.nl/A2A}Event|{http://Mindbus.nl/A2A}EventType")
if False:
source = elem.find("{http://www.openarchives.org/OAI/2.0/}metadata").find("{http://Mindbus.nl/A2A}A2A").find("{http://Mindbus.nl/A2A}Source")
#source = getelement(elem,"{http://www.openarchives.org/OAI/2.0/}metadata|{http://Mindbus.nl/A2A}A2A|{http://Mindbus.nl/A2A}Source")
if source:
for remark in source.findall("{http://Mindbus.nl/A2A}SourceRemark"):
k = remark.get("Key")
v = getelement(remark,"{http://Mindbus.nl/A2A}Value")
if k == "AkteSoort":
#print(v)
eventtype = v
subject = getelement(elem,"{http://www.openarchives.org/OAI/2.0/}header|{http://www.openarchives.org/OAI/2.0/}identifier")
if subject in idsdone: # hebben we deze akte al gehad (foutje in dump prog van picturea)
# zo ja, gooi eerdere akte weg
fp.execute('''DELETE from akte where recordid = ?''',(subject,))
fp.execute('''DELETE from aktepersoon where recordid = ?''',(subject,))
print('Akte',subject,'staat er meer dan 1 keer in de dump')
else:
idsdone[subject] = 1
eventplace = getelement(elem,"{http://www.openarchives.org/OAI/2.0/}metadata|{http://Mindbus.nl/A2A}A2A|{http://Mindbus.nl/A2A}Event|{http://Mindbus.nl/A2A}EventPlace|{http://Mindbus.nl/A2A}Place")
dag = getelement(elem,"{http://www.openarchives.org/OAI/2.0/}metadata|{http://Mindbus.nl/A2A}A2A|{http://Mindbus.nl/A2A}Event|{http://Mindbus.nl/A2A}EventDate|{http://Mindbus.nl/A2A}Day")
maand = getelement(elem,"{http://www.openarchives.org/OAI/2.0/}metadata|{http://Mindbus.nl/A2A}A2A|{http://Mindbus.nl/A2A}Event|{http://Mindbus.nl/A2A}EventDate|{http://Mindbus.nl/A2A}Month")
jaar = getelement(elem,"{http://www.openarchives.org/OAI/2.0/}metadata|{http://Mindbus.nl/A2A}A2A|{http://Mindbus.nl/A2A}Event|{http://Mindbus.nl/A2A}EventDate|{http://Mindbus.nl/A2A}Year")
eventdate=""
if jaar:
eventdate = str(jaar)
if maand:
eventdate += "-"+str(maand)
if dag:
eventdate += "-"+str(dag)
#print(subject)
# zoek personen
personen = {}
for per in elem.findall("{http://www.openarchives.org/OAI/2.0/}metadata/{http://Mindbus.nl/A2A}A2A/{http://Mindbus.nl/A2A}Person"):
personen[per.get("pid")] = Person(per)
# zoek bijbehorende rol bij persoon
for rel in elem.findall("{http://www.openarchives.org/OAI/2.0/}metadata/{http://Mindbus.nl/A2A}A2A/{http://Mindbus.nl/A2A}RelationEP"):
#print(getelement(rel,"{http://Mindbus.nl/A2A}PersonKeyRef"))
#print(getelement(rel,"{http://Mindbus.nl/A2A}RelationType").replace(" ",""))
i = getelement(rel,"{http://Mindbus.nl/A2A}PersonKeyRef")
t = getelement(rel,"{http://Mindbus.nl/A2A}RelationType").replace(" ","")
(personen[i]).reltype = t
if t == "Overledene":
personen[i].deathdate = eventdate
#personen[i].deathplace = eventplace
#personen[getelement(rel,"{http://Mindbus.nl/A2A}PersonKeyRef")].reltype = getelement(rel,"{http://Mindbus.nl/A2A}RelationType").replace(" ","")
for rel in elem.findall("{http://www.openarchives.org/OAI/2.0/}metadata/{http://Mindbus.nl/A2A}A2A/{http://Mindbus.nl/A2A}RelationPP"):
#print(getelement(rel,"{http://Mindbus.nl/A2A}PersonKeyRef"))
#print(getelement(rel,"{http://Mindbus.nl/A2A}RelationType").replace(" ",""))
p1 = None
if rel[0].tag == "{http://Mindbus.nl/A2A}PersonKeyRef":
p1 = rel[0].text
p2 = None
if rel[1].tag == "{http://Mindbus.nl/A2A}PersonKeyRef":
p2 = rel[1].text
t = getelement(rel,"{http://Mindbus.nl/A2A}RelationType").replace(" ","")
if p1 and p2:
if personen[p2].reltype == "Overledene":
personen[p1].reltype = t
elif personen[p1].reltype == "Overledene":
personen[p2].reltype = t
else:
print("wel keyrefs maar geen overledene",subject)
else:
print("Vreemd https://www.allefriezen.nl/zoeken/deeds/"+subject)
print(personen[p1],personen[p2],personen[p1].reltype,personen[p2].reltype)
#if subject == "06394230-a058-c6cf-58e1-62c73d6e3d74":
# ET.dump(elem)
#personen[getelement(rel,"{http://Mindbus.nl/A2A}PersonKeyRef")].reltype = getelement(rel,"{http://Mindbus.nl/A2A}RelationType").replace(" ","")
persql = []
for p in personen:
#print(p,personen[p].reltype,personen[p])
persql.append((subject,personen[p].givenname,personen[p].patroniem,personen[p].prefixlastname,personen[p].lastname,
personen[p].reltype,p,personen[p].age,personen[p].birthdate,personen[p].birthplace,personen[p].deathdate,personen[p].deathplace,
personen[p].gender,personen[p].residence,personen[p].profession))
#print(persql)
fp.execute('''INSERT INTO akte(recordid,eventtype,eventdate,eventplace)
VALUES(?,?,?,?)''',(subject,eventtype,eventdate,eventplace))
fp.executemany('''INSERT INTO aktepersoon(recordid,firstname,patronym,prefixlastname,lastname,role,personid,age,birthdate,birthplace,deathdate,deathplace,gender,residence,profession)
VALUES(?,?,?,?,?,?,?,?,?,?,?, ?,?,?,?)''',persql)
return True
class Person:
def __init__(self,elem):
self.givenname = getelement(elem,"{http://Mindbus.nl/A2A}PersonName|{http://Mindbus.nl/A2A}PersonNameFirstName")
self.patroniem = getelement(elem,"{http://Mindbus.nl/A2A}PersonName|{http://Mindbus.nl/A2A}PersonNamePatronym")
self.prefixlastname = getelement(elem,"{http://Mindbus.nl/A2A}PersonName|{http://Mindbus.nl/A2A}PersonNamePrefixLastName")
self.lastname = getelement(elem,"{http://Mindbus.nl/A2A}PersonName|{http://Mindbus.nl/A2A}PersonNameLastName")
self.age = getelement(elem,"{http://Mindbus.nl/A2A}Age|{http://Mindbus.nl/A2A}PersonAgeLiteral")
self.birthplace = getelement(elem,"{http://Mindbus.nl/A2A}BirthPlace|{http://Mindbus.nl/A2A}Place")
self.deathplace = getelement(elem,"{http://Mindbus.nl/A2A}DeathPlace|{http://Mindbus.nl/A2A}Place")
self.deathdate = None
self.residence = getelement(elem,"{http://Mindbus.nl/A2A}Residence|{http://Mindbus.nl/A2A}Place")
self.profession = getelement(elem,"{http://Mindbus.nl/A2A}Profession")
self.gender = getelement(elem,"{http://Mindbus.nl/A2A}Gender")
dag = getelement(elem,"{http://Mindbus.nl/A2A}BirthDate|{http://Mindbus.nl/A2A}Day")
maand = getelement(elem,"{http://Mindbus.nl/A2A}BirthDate|{http://Mindbus.nl/A2A}Month")
jaar = getelement(elem,"{http://Mindbus.nl/A2A}BirthDate|{http://Mindbus.nl/A2A}Year")
self.birthdate=""
if jaar:
self.birthdate = str(jaar)
if maand:
self.birthdate += "-"+str(maand)
if dag:
self.birthdate += "-"+str(dag)
#self.deathdate = getelement(elem,"{http://Mindbus.nl/A2A}PersonName|{http://Mindbus.nl/A2A}Age|{http://Mindbus.nl/A2A}PersonAgeLiteral")
self.reltype = None
def __str__(self):
#if self.reltype:
# ret = self.reltype+":"
#else:
ret = ""
if self.givenname:
ret += self.givenname
if self.patroniem:
ret += " "+self.patroniem
if self.prefixlastname:
ret += " "+self.prefixlastname
if self.lastname:
ret += " "+self.lastname
if ret.startswith(" "):
return ret[1::]
return ret
### vergelijk functie ???
def _1_eq__(self,other):
if self.givenname == other.givenname:
if self.patroniem == other.patroniem:
if self.prefixlastname == other.prefixlastname:
return self.lastname == other.lastname
return False
files = (
"frl_a2a_bs_o-201611.xml"
,"frl_a2a_bs_h-201611.xml"
,"frl_a2a_bs_g-201611.xml")
conn = sqlite3.connect('e:/BurgelijkeStand1611.db')
#conn = sqlite3.connect(':memory:')
fp = conn.cursor()
# Create table
fp.execute('''DROP TABLE IF EXISTS akte''')
fp.execute('''DROP TABLE IF EXISTS aktepersoon''')
# Create table
fp.execute('''CREATE TABLE akte
(recordid varchar(36) primary key, eventtype text, eventdate date, eventplace text)''')
fp.execute('''CREATE TABLE aktepersoon
(recordid varchar(36), firstname text, patronym text, prefixlastname text, lastname text,
role text, personid text, age text, birthdate date, birthplace text, deathdate date, deathplace text, gender text, residence text, profession text )''')
fp.execute('''PRAGMA synchronous=OFF''')
fp.execute('''PRAGMA journal_mode=OFF''')
for file in files:
#for file in {"bal2.xml"}:
print("Bezig met",file)
starttime = time.time()
recordcount = 0
context = ET.iterparse('C:/users/tjibbe/Desktop/resources/'+file)
#context = ET.iterparse('resources/'+file)
# turn it into an iterator
context = iter(context)
for event, elem in context:
#print(event,elem.tag)
tag = stripnamespace(elem.tag)
if event == 'end' and tag == "record":
recordcount += 1
ret = record(elem)
if ret:
elem.clear()
#fp.close()
endtime = time.time()
print(file,recordcount,endtime - starttime,recordcount /(endtime - starttime))
# Save (commit) the changes
conn.commit()
for row in fp.execute('SELECT count(*) FROM aktepersoon'):
print(row[0]) #5740379 in 11-2016
for row in fp.execute('SELECT count(*) FROM akte'):
print(row[0]) #2027215 in 11-2016
# We can also close the connection if we are done with it.
# Just be sure any changes have been committed or they will be lost.
conn.close()
| tofkamp/akteconnect | allefriezen naar sqlite.py | Python | gpl-3.0 | 12,006 |
# Copyright 2017 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, models
class AccountMoveLine(models.Model):
_inherit = "account.move.line"
@api.model_create_multi
def create(self, vals_list):
for vals in vals_list:
st_line_id = vals.get("statement_line_id")
if st_line_id:
mode, partner = self.env[
"account.bank.statement.line"
]._get_info_from_reference(vals.get("name", False))
if mode == "membership" and partner:
vals["partner_id"] = partner.id
return super().create(vals_list)
def _remove_membership(self):
memberships = self.env["membership.line"].search(
[
("move_id", "in", self.mapped("move_id").ids),
]
)
memberships.write(
{
"paid": False,
"price_paid": False,
"move_id": False,
"bank_account_id": False,
}
)
def remove_move_reconcile(self):
# when undoing the bank statement reconciliation
# (will be deleted by ondelete='cascade')
self._remove_membership()
return super().remove_move_reconcile()
def unlink(self):
self._remove_membership()
super().unlink()
| mozaik-association/mozaik | mozaik_account/models/account_move_line.py | Python | agpl-3.0 | 1,390 |
#!/usr/bin/python
import time
import datetime
try:
f = open('ActivatedAlarmTimes.log', 'r')
lines = f.readlines()
f.close()
start = True
while(start == True):
method = raw_input("Enter what method you want to use to clean the log file. Delete line per line: 'line', or delete per date range: 'range'. \n")
if(method == 'line'):
start = False
newLines = []
print("Method line per line: \n\n")
for line in lines:
print("Next line from log: \n")
print(line)
option = raw_input("Delete (D) or keep (K) this line?")
if (option == 'D'):
print("\nDELETED LINE")
#lines.remove(line)
elif (option == 'K'):
newLines.append(line)
print("\nKEPT LINE")
else:
print("Invalid request.")
f = open('ActivatedAlarmTimes.log', 'w')
for line in newLines:
f.write(line)
f.close
elif(method == 'range'):
start = False
newLines = []
print("method range")
startTimeStamp = time.strptime(raw_input("Start-time to delete logs.(dd-mm-yy hh:mm:ss): "), "%d-%m-%y %H:%M:%S")
endTimeStamp = time.strptime(raw_input("End-time to delete logs.(dd-mm-yy hh:mm:ss): "), "%d-%m-%y %H:%M:%S")
for line in lines:
#Ik krijg deze text objecten niet geparsed naar tijd object, op geen enkele manier. Veel opgezocht en niets werkt. Is het enige dat niet werkt van het labo.
time = time.strptime(line, "%d-%m-%y %H:%M:%S")
time.struct_time(tm_year=2000, tm_mon=11, tm_mday=30, tm_hour=0, tm_min=0,
tm_sec=0, tm_wday=3, tm_yday=335, tm_isdst=-1)
if(time < startTimeStamp) or (time > endTimeStamp):
newLines.append(line)
f = open('ActivatedAlarmTimes.log', 'w')
for line in newLines:
f.write(line)
f.close
else:
print("Invalid request. \n")
except KeyboardInterrupt:
print("Exiting program")
| skidekeersmaecker/raspi-cursus | labo5/tool.py | Python | mit | 2,255 |
# -*- coding: utf-8 -*-
from flask import Flask, url_for, render_template, request
from format_name import *
import requests
app = Flask(__name__)
@app.route('/', methods=['GET','POST'])
def api_root():
if request.method == 'POST':
movie_name= format_name(request.form[u'movie_name'])
url = "http://www.omdbapi.com/?t={0}&y=&plot=short&r=json".format(movie_name)
omdb_request = requests.get(url)
omdb_result = omdb_request.json()
return render_template("index.html", movie_name=movie_name, omdb_request=omdb_result)
return render_template("index.html")
if __name__ == '__main__':
app.run(host='192.168.56.102', port=8080, debug=True)
| benoittgt/flask_omdb_api | flask_demo.py | Python | mit | 690 |
import os
import re
from nose.tools import raises
import seqpoet
class TestSequence:
def setup(self):
self.seq1 = 'ACATacacagaATAgagaCacata'
self.illegal = 'agagcatgcacthisisnotcorrect'
def test_sequence_length(self):
s = seqpoet.Sequence(self.seq1)
assert len(s) == len(self.seq1)
def test_casing(self):
s = seqpoet.Sequence(self.seq1)
assert re.match('^[acgt]+$', str(s))
def test_reverse_complement(self):
s = seqpoet.Sequence(self.seq1)
s2 = seqpoet.Sequence('acct')
assert s.revcomp() == 'tatgtgtctctattctgtgtatgt', \
'"{0}" is not "tatgtgtctctattctgtgtatgt"'.format(s.revcomp().seq)
assert s2.revcomp() == 'aggt', \
'"{0}" is not "aggt"'.format(s2.revcomp().seq)
def test_str(self):
s = seqpoet.Sequence(self.seq1)
assert str(s) == self.seq1.lower()
def test_repr(self):
s = seqpoet.Sequence(self.seq1)
assert repr(s) == '<Sequence: acata...>'
assert repr(s.revcomp()) == '<Sequence: tatgt...>'
def test_indexing(self):
s = seqpoet.Sequence(self.seq1)
assert s[4] == 'a'
assert s[:5] == 'acata'
assert s[-6:] == 'cacata'
assert s[4:8] == 'acac'
def test_equality(self):
s = seqpoet.Sequence(self.seq1)
assert s == self.seq1.lower()
assert s[:3] == seqpoet.Sequence(self.seq1[:3])
@raises(ValueError)
def test_illegal_characters(self):
s = seqpoet.Sequence(self.illegal)
| maehler/seqpoet | seqpoet/tests/test_sequence.py | Python | mit | 1,544 |
"""
This module complements the math and cmath builtin modules by providing
fast machine precision versions of some additional functions (gamma, ...)
and wrapping math/cmath functions so that they can be called with either
real or complex arguments.
"""
import operator
import math
import cmath
# Irrational (?) constants
pi = 3.1415926535897932385
e = 2.7182818284590452354
sqrt2 = 1.4142135623730950488
sqrt5 = 2.2360679774997896964
phi = 1.6180339887498948482
ln2 = 0.69314718055994530942
ln10 = 2.302585092994045684
euler = 0.57721566490153286061
catalan = 0.91596559417721901505
khinchin = 2.6854520010653064453
apery = 1.2020569031595942854
logpi = 1.1447298858494001741
def _mathfun_real(f_real, f_complex):
def f(x, **kwargs):
if type(x) is float:
return f_real(x)
if type(x) is complex:
return f_complex(x)
try:
x = float(x)
return f_real(x)
except (TypeError, ValueError):
x = complex(x)
return f_complex(x)
f.__name__ = f_real.__name__
return f
def _mathfun(f_real, f_complex):
def f(x, **kwargs):
if type(x) is complex:
return f_complex(x)
try:
return f_real(float(x))
except (TypeError, ValueError):
return f_complex(complex(x))
f.__name__ = f_real.__name__
return f
def _mathfun_n(f_real, f_complex):
def f(*args, **kwargs):
try:
return f_real(*(float(x) for x in args))
except (TypeError, ValueError):
return f_complex(*(complex(x) for x in args))
f.__name__ = f_real.__name__
return f
# Workaround for non-raising log and sqrt in Python 2.5 and 2.4
# on Unix system
try:
math.log(-2.0)
def math_log(x):
if x <= 0.0:
raise ValueError("math domain error")
return math.log(x)
def math_sqrt(x):
if x < 0.0:
raise ValueError("math domain error")
return math.sqrt(x)
except (ValueError, TypeError):
math_log = math.log
math_sqrt = math.sqrt
pow = _mathfun_n(operator.pow, lambda x, y: complex(x)**y)
log = _mathfun_n(math_log, cmath.log)
sqrt = _mathfun(math_sqrt, cmath.sqrt)
exp = _mathfun_real(math.exp, cmath.exp)
cos = _mathfun_real(math.cos, cmath.cos)
sin = _mathfun_real(math.sin, cmath.sin)
tan = _mathfun_real(math.tan, cmath.tan)
acos = _mathfun(math.acos, cmath.acos)
asin = _mathfun(math.asin, cmath.asin)
atan = _mathfun_real(math.atan, cmath.atan)
cosh = _mathfun_real(math.cosh, cmath.cosh)
sinh = _mathfun_real(math.sinh, cmath.sinh)
tanh = _mathfun_real(math.tanh, cmath.tanh)
floor = _mathfun_real(math.floor,
lambda z: complex(math.floor(z.real), math.floor(z.imag)))
ceil = _mathfun_real(math.ceil,
lambda z: complex(math.ceil(z.real), math.ceil(z.imag)))
cos_sin = _mathfun_real(lambda x: (math.cos(x), math.sin(x)),
lambda z: (cmath.cos(z), cmath.sin(z)))
cbrt = _mathfun(lambda x: x**(1./3), lambda z: z**(1./3))
def nthroot(x, n):
r = 1./n
try:
return float(x) ** r
except (ValueError, TypeError):
return complex(x) ** r
def _sinpi_real(x):
if x < 0:
return -_sinpi_real(-x)
n, r = divmod(x, 0.5)
r *= pi
n %= 4
if n == 0: return math.sin(r)
if n == 1: return math.cos(r)
if n == 2: return -math.sin(r)
if n == 3: return -math.cos(r)
def _cospi_real(x):
if x < 0:
x = -x
n, r = divmod(x, 0.5)
r *= pi
n %= 4
if n == 0: return math.cos(r)
if n == 1: return -math.sin(r)
if n == 2: return -math.cos(r)
if n == 3: return math.sin(r)
def _sinpi_complex(z):
if z.real < 0:
return -_sinpi_complex(-z)
n, r = divmod(z.real, 0.5)
z = pi*complex(r, z.imag)
n %= 4
if n == 0: return cmath.sin(z)
if n == 1: return cmath.cos(z)
if n == 2: return -cmath.sin(z)
if n == 3: return -cmath.cos(z)
def _cospi_complex(z):
if z.real < 0:
z = -z
n, r = divmod(z.real, 0.5)
z = pi*complex(r, z.imag)
n %= 4
if n == 0: return cmath.cos(z)
if n == 1: return -cmath.sin(z)
if n == 2: return -cmath.cos(z)
if n == 3: return cmath.sin(z)
cospi = _mathfun_real(_cospi_real, _cospi_complex)
sinpi = _mathfun_real(_sinpi_real, _sinpi_complex)
def tanpi(x):
try:
return sinpi(x) / cospi(x)
except OverflowError:
if complex(x).imag > 10:
return 1j
if complex(x).imag < 10:
return -1j
raise
def cotpi(x):
try:
return cospi(x) / sinpi(x)
except OverflowError:
if complex(x).imag > 10:
return -1j
if complex(x).imag < 10:
return 1j
raise
INF = 1e300*1e300
NINF = -INF
NAN = INF-INF
EPS = 2.2204460492503131e-16
_exact_gamma = (INF, 1.0, 1.0, 2.0, 6.0, 24.0, 120.0, 720.0, 5040.0, 40320.0,
362880.0, 3628800.0, 39916800.0, 479001600.0, 6227020800.0, 87178291200.0,
1307674368000.0, 20922789888000.0, 355687428096000.0, 6402373705728000.0,
121645100408832000.0, 2432902008176640000.0)
_max_exact_gamma = len(_exact_gamma)-1
# Lanczos coefficients used by the GNU Scientific Library
_lanczos_g = 7
_lanczos_p = (0.99999999999980993, 676.5203681218851, -1259.1392167224028,
771.32342877765313, -176.61502916214059, 12.507343278686905,
-0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7)
def _gamma_real(x):
_intx = int(x)
if _intx == x:
if _intx <= 0:
#return (-1)**_intx * INF
raise ZeroDivisionError("gamma function pole")
if _intx <= _max_exact_gamma:
return _exact_gamma[_intx]
if x < 0.5:
# TODO: sinpi
return pi / (_sinpi_real(x)*_gamma_real(1-x))
else:
x -= 1.0
r = _lanczos_p[0]
for i in range(1, _lanczos_g+2):
r += _lanczos_p[i]/(x+i)
t = x + _lanczos_g + 0.5
return 2.506628274631000502417 * t**(x+0.5) * math.exp(-t) * r
def _gamma_complex(x):
if not x.imag:
return complex(_gamma_real(x.real))
if x.real < 0.5:
# TODO: sinpi
return pi / (_sinpi_complex(x)*_gamma_complex(1-x))
else:
x -= 1.0
r = _lanczos_p[0]
for i in range(1, _lanczos_g+2):
r += _lanczos_p[i]/(x+i)
t = x + _lanczos_g + 0.5
return 2.506628274631000502417 * t**(x+0.5) * cmath.exp(-t) * r
gamma = _mathfun_real(_gamma_real, _gamma_complex)
def rgamma(x):
try:
return 1./gamma(x)
except ZeroDivisionError:
return x*0.0
def factorial(x):
return gamma(x+1.0)
def arg(x):
if type(x) is float:
return math.atan2(0.0,x)
return math.atan2(x.imag,x.real)
# XXX: broken for negatives
def loggamma(x):
if type(x) not in (float, complex):
try:
x = float(x)
except (ValueError, TypeError):
x = complex(x)
try:
xreal = x.real
ximag = x.imag
except AttributeError: # py2.5
xreal = x
ximag = 0.0
# Reflection formula
# http://functions.wolfram.com/GammaBetaErf/LogGamma/16/01/01/0003/
if xreal < 0.0:
if abs(x) < 0.5:
v = log(gamma(x))
if ximag == 0:
v = v.conjugate()
return v
z = 1-x
try:
re = z.real
im = z.imag
except AttributeError: # py2.5
re = z
im = 0.0
refloor = floor(re)
if im == 0.0:
imsign = 0
elif im < 0.0:
imsign = -1
else:
imsign = 1
return (-pi*1j)*abs(refloor)*(1-abs(imsign)) + logpi - \
log(sinpi(z-refloor)) - loggamma(z) + 1j*pi*refloor*imsign
if x == 1.0 or x == 2.0:
return x*0
p = 0.
while abs(x) < 11:
p -= log(x)
x += 1.0
s = 0.918938533204672742 + (x-0.5)*log(x) - x
r = 1./x
r2 = r*r
s += 0.083333333333333333333*r; r *= r2
s += -0.0027777777777777777778*r; r *= r2
s += 0.00079365079365079365079*r; r *= r2
s += -0.0005952380952380952381*r; r *= r2
s += 0.00084175084175084175084*r; r *= r2
s += -0.0019175269175269175269*r; r *= r2
s += 0.0064102564102564102564*r; r *= r2
s += -0.02955065359477124183*r
return s + p
_psi_coeff = [
0.083333333333333333333,
-0.0083333333333333333333,
0.003968253968253968254,
-0.0041666666666666666667,
0.0075757575757575757576,
-0.021092796092796092796,
0.083333333333333333333,
-0.44325980392156862745,
3.0539543302701197438,
-26.456212121212121212]
def _digamma_real(x):
_intx = int(x)
if _intx == x:
if _intx <= 0:
raise ZeroDivisionError("polygamma pole")
if x < 0.5:
x = 1.0-x
s = pi*cotpi(x)
else:
s = 0.0
while x < 10.0:
s -= 1.0/x
x += 1.0
x2 = x**-2
t = x2
for c in _psi_coeff:
s -= c*t
if t < 1e-20:
break
t *= x2
return s + math_log(x) - 0.5/x
def _digamma_complex(x):
if not x.imag:
return complex(_digamma_real(x.real))
if x.real < 0.5:
x = 1.0-x
s = pi*cotpi(x)
else:
s = 0.0
while abs(x) < 10.0:
s -= 1.0/x
x += 1.0
x2 = x**-2
t = x2
for c in _psi_coeff:
s -= c*t
if abs(t) < 1e-20:
break
t *= x2
return s + cmath.log(x) - 0.5/x
digamma = _mathfun_real(_digamma_real, _digamma_complex)
# TODO: could implement complex erf and erfc here. Need
# to find an accurate method (avoiding cancellation)
# for approx. 1 < abs(x) < 9.
_erfc_coeff_P = [
1.0000000161203922312,
2.1275306946297962644,
2.2280433377390253297,
1.4695509105618423961,
0.66275911699770787537,
0.20924776504163751585,
0.045459713768411264339,
0.0063065951710717791934,
0.00044560259661560421715][::-1]
_erfc_coeff_Q = [
1.0000000000000000000,
3.2559100272784894318,
4.9019435608903239131,
4.4971472894498014205,
2.7845640601891186528,
1.2146026030046904138,
0.37647108453729465912,
0.080970149639040548613,
0.011178148899483545902,
0.00078981003831980423513][::-1]
def _polyval(coeffs, x):
p = coeffs[0]
for c in coeffs[1:]:
p = c + x*p
return p
def _erf_taylor(x):
# Taylor series assuming 0 <= x <= 1
x2 = x*x
s = t = x
n = 1
while abs(t) > 1e-17:
t *= x2/n
s -= t/(n+n+1)
n += 1
t *= x2/n
s += t/(n+n+1)
n += 1
return 1.1283791670955125739*s
def _erfc_mid(x):
# Rational approximation assuming 0 <= x <= 9
return exp(-x*x)*_polyval(_erfc_coeff_P,x)/_polyval(_erfc_coeff_Q,x)
def _erfc_asymp(x):
# Asymptotic expansion assuming x >= 9
x2 = x*x
v = exp(-x2)/x*0.56418958354775628695
r = t = 0.5 / x2
s = 1.0
for n in range(1,22,4):
s -= t
t *= r * (n+2)
s += t
t *= r * (n+4)
if abs(t) < 1e-17:
break
return s * v
def erf(x):
"""
erf of a real number.
"""
x = float(x)
if x != x:
return x
if x < 0.0:
return -erf(-x)
if x >= 1.0:
if x >= 6.0:
return 1.0
return 1.0 - _erfc_mid(x)
return _erf_taylor(x)
def erfc(x):
"""
erfc of a real number.
"""
x = float(x)
if x != x:
return x
if x < 0.0:
if x < -6.0:
return 2.0
return 2.0-erfc(-x)
if x > 9.0:
return _erfc_asymp(x)
if x >= 1.0:
return _erfc_mid(x)
return 1.0 - _erf_taylor(x)
gauss42 = [\
(0.99839961899006235, 0.0041059986046490839),
(-0.99839961899006235, 0.0041059986046490839),
(0.9915772883408609, 0.009536220301748501),
(-0.9915772883408609,0.009536220301748501),
(0.97934250806374812, 0.014922443697357493),
(-0.97934250806374812, 0.014922443697357493),
(0.96175936533820439,0.020227869569052644),
(-0.96175936533820439, 0.020227869569052644),
(0.93892355735498811, 0.025422959526113047),
(-0.93892355735498811,0.025422959526113047),
(0.91095972490412735, 0.030479240699603467),
(-0.91095972490412735, 0.030479240699603467),
(0.87802056981217269,0.03536907109759211),
(-0.87802056981217269, 0.03536907109759211),
(0.8402859832618168, 0.040065735180692258),
(-0.8402859832618168,0.040065735180692258),
(0.7979620532554873, 0.044543577771965874),
(-0.7979620532554873, 0.044543577771965874),
(0.75127993568948048,0.048778140792803244),
(-0.75127993568948048, 0.048778140792803244),
(0.70049459055617114, 0.052746295699174064),
(-0.70049459055617114,0.052746295699174064),
(0.64588338886924779, 0.056426369358018376),
(-0.64588338886924779, 0.056426369358018376),
(0.58774459748510932, 0.059798262227586649),
(-0.58774459748510932, 0.059798262227586649),
(0.5263957499311922, 0.062843558045002565),
(-0.5263957499311922, 0.062843558045002565),
(0.46217191207042191, 0.065545624364908975),
(-0.46217191207042191, 0.065545624364908975),
(0.39542385204297503, 0.067889703376521934),
(-0.39542385204297503, 0.067889703376521934),
(0.32651612446541151, 0.069862992492594159),
(-0.32651612446541151, 0.069862992492594159),
(0.25582507934287907, 0.071454714265170971),
(-0.25582507934287907, 0.071454714265170971),
(0.18373680656485453, 0.072656175243804091),
(-0.18373680656485453, 0.072656175243804091),
(0.11064502720851986, 0.073460813453467527),
(-0.11064502720851986, 0.073460813453467527),
(0.036948943165351772, 0.073864234232172879),
(-0.036948943165351772, 0.073864234232172879)]
EI_ASYMP_CONVERGENCE_RADIUS = 40.0
def ei_asymp(z, _e1=False):
r = 1./z
s = t = 1.0
k = 1
while 1:
t *= k*r
s += t
if abs(t) < 1e-16:
break
k += 1
v = s*exp(z)/z
if _e1:
if type(z) is complex:
zreal = z.real
zimag = z.imag
else:
zreal = z
zimag = 0.0
if zimag == 0.0 and zreal > 0.0:
v += pi*1j
else:
if type(z) is complex:
if z.imag > 0:
v += pi*1j
if z.imag < 0:
v -= pi*1j
return v
def ei_taylor(z, _e1=False):
s = t = z
k = 2
while 1:
t = t*z/k
term = t/k
if abs(term) < 1e-17:
break
s += term
k += 1
s += euler
if _e1:
s += log(-z)
else:
if type(z) is float or z.imag == 0.0:
s += math_log(abs(z))
else:
s += cmath.log(z)
return s
def ei(z, _e1=False):
typez = type(z)
if typez not in (float, complex):
try:
z = float(z)
typez = float
except (TypeError, ValueError):
z = complex(z)
typez = complex
if not z:
return -INF
absz = abs(z)
if absz > EI_ASYMP_CONVERGENCE_RADIUS:
return ei_asymp(z, _e1)
elif absz <= 2.0 or (typez is float and z > 0.0):
return ei_taylor(z, _e1)
# Integrate, starting from whichever is smaller of a Taylor
# series value or an asymptotic series value
if typez is complex and z.real > 0.0:
zref = z / absz
ref = ei_taylor(zref, _e1)
else:
zref = EI_ASYMP_CONVERGENCE_RADIUS * z / absz
ref = ei_asymp(zref, _e1)
C = (zref-z)*0.5
D = (zref+z)*0.5
s = 0.0
if type(z) is complex:
_exp = cmath.exp
else:
_exp = math.exp
for x,w in gauss42:
t = C*x+D
s += w*_exp(t)/t
ref -= C*s
return ref
def e1(z):
# hack to get consistent signs if the imaginary part if 0
# and signed
typez = type(z)
if type(z) not in (float, complex):
try:
z = float(z)
typez = float
except (TypeError, ValueError):
z = complex(z)
typez = complex
if typez is complex and not z.imag:
z = complex(z.real, 0.0)
# end hack
return -ei(-z, _e1=True)
_zeta_int = [\
-0.5,
0.0,
1.6449340668482264365,1.2020569031595942854,1.0823232337111381915,
1.0369277551433699263,1.0173430619844491397,1.0083492773819228268,
1.0040773561979443394,1.0020083928260822144,1.0009945751278180853,
1.0004941886041194646,1.0002460865533080483,1.0001227133475784891,
1.0000612481350587048,1.0000305882363070205,1.0000152822594086519,
1.0000076371976378998,1.0000038172932649998,1.0000019082127165539,
1.0000009539620338728,1.0000004769329867878,1.0000002384505027277,
1.0000001192199259653,1.0000000596081890513,1.0000000298035035147,
1.0000000149015548284]
_zeta_P = [-3.50000000087575873, -0.701274355654678147,
-0.0672313458590012612, -0.00398731457954257841,
-0.000160948723019303141, -4.67633010038383371e-6,
-1.02078104417700585e-7, -1.68030037095896287e-9,
-1.85231868742346722e-11][::-1]
_zeta_Q = [1.00000000000000000, -0.936552848762465319,
-0.0588835413263763741, -0.00441498861482948666,
-0.000143416758067432622, -5.10691659585090782e-6,
-9.58813053268913799e-8, -1.72963791443181972e-9,
-1.83527919681474132e-11][::-1]
_zeta_1 = [3.03768838606128127e-10, -1.21924525236601262e-8,
2.01201845887608893e-7, -1.53917240683468381e-6,
-5.09890411005967954e-7, 0.000122464707271619326,
-0.000905721539353130232, -0.00239315326074843037,
0.084239750013159168, 0.418938517907442414, 0.500000001921884009]
_zeta_0 = [-3.46092485016748794e-10, -6.42610089468292485e-9,
1.76409071536679773e-7, -1.47141263991560698e-6, -6.38880222546167613e-7,
0.000122641099800668209, -0.000905894913516772796, -0.00239303348507992713,
0.0842396947501199816, 0.418938533204660256, 0.500000000000000052]
def zeta(s):
"""
Riemann zeta function, real argument
"""
if not isinstance(s, (float, int)):
try:
s = float(s)
except (ValueError, TypeError):
try:
s = complex(s)
if not s.imag:
return complex(zeta(s.real))
except (ValueError, TypeError):
pass
raise NotImplementedError
if s == 1:
raise ValueError("zeta(1) pole")
if s >= 27:
return 1.0 + 2.0**(-s) + 3.0**(-s)
n = int(s)
if n == s:
if n >= 0:
return _zeta_int[n]
if not (n % 2):
return 0.0
if s <= 0.0:
return 2.**s*pi**(s-1)*_sinpi_real(0.5*s)*_gamma_real(1-s)*zeta(1-s)
if s <= 2.0:
if s <= 1.0:
return _polyval(_zeta_0,s)/(s-1)
return _polyval(_zeta_1,s)/(s-1)
z = _polyval(_zeta_P,s) / _polyval(_zeta_Q,s)
return 1.0 + 2.0**(-s) + 3.0**(-s) + 4.0**(-s)*z
| nsalomonis/AltAnalyze | stats_scripts/mpmath/math2.py | Python | apache-2.0 | 18,562 |
import unittest, string
from test import test_support, string_tests
from UserList import UserList
class StringTest(
string_tests.CommonTest,
string_tests.MixinStrStringUserStringTest
):
type2test = str
def checkequal(self, result, object, methodname, *args):
realresult = getattr(string, methodname)(object, *args)
self.assertEqual(
result,
realresult
)
def checkraises(self, exc, object, methodname, *args):
self.assertRaises(
exc,
getattr(string, methodname),
object,
*args
)
def checkcall(self, object, methodname, *args):
getattr(string, methodname)(object, *args)
def test_join(self):
# These are the same checks as in string_test.ObjectTest.test_join
# but the argument order ist different
self.checkequal('a b c d', ['a', 'b', 'c', 'd'], 'join', ' ')
self.checkequal('abcd', ('a', 'b', 'c', 'd'), 'join', '')
self.checkequal('w x y z', string_tests.Sequence(), 'join', ' ')
self.checkequal('abc', ('abc',), 'join', 'a')
self.checkequal('z', UserList(['z']), 'join', 'a')
if test_support.have_unicode:
self.checkequal(unicode('a.b.c'), ['a', 'b', 'c'], 'join', unicode('.'))
self.checkequal(unicode('a.b.c'), [unicode('a'), 'b', 'c'], 'join', '.')
self.checkequal(unicode('a.b.c'), ['a', unicode('b'), 'c'], 'join', '.')
self.checkequal(unicode('a.b.c'), ['a', 'b', unicode('c')], 'join', '.')
self.checkraises(TypeError, ['a', unicode('b'), 3], 'join', '.')
for i in [5, 25, 125]:
self.checkequal(
((('a' * i) + '-') * i)[:-1],
['a' * i] * i, 'join', '-')
self.checkequal(
((('a' * i) + '-') * i)[:-1],
('a' * i,) * i, 'join', '-')
self.checkraises(TypeError, string_tests.BadSeq1(), 'join', ' ')
self.checkequal('a b c', string_tests.BadSeq2(), 'join', ' ')
try:
def f():
yield 4 + ""
self.fixtype(' ').join(f())
except TypeError, e:
if '+' not in str(e):
self.fail('join() ate exception message')
else:
self.fail('exception not raised')
class ModuleTest(unittest.TestCase):
def test_attrs(self):
string.whitespace
string.lowercase
string.uppercase
string.letters
string.digits
string.hexdigits
string.octdigits
string.punctuation
string.printable
def test_atoi(self):
self.assertEqual(string.atoi(" 1 "), 1)
self.assertRaises(ValueError, string.atoi, " 1x")
self.assertRaises(ValueError, string.atoi, " x1 ")
def test_atol(self):
self.assertEqual(string.atol(" 1 "), 1L)
self.assertRaises(ValueError, string.atol, " 1x ")
self.assertRaises(ValueError, string.atol, " x1 ")
def test_atof(self):
self.assertAlmostEqual(string.atof(" 1 "), 1.0)
self.assertRaises(ValueError, string.atof, " 1x ")
self.assertRaises(ValueError, string.atof, " x1 ")
def test_maketrans(self):
transtable = '\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`xyzdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377'
self.assertEqual(string.maketrans('abc', 'xyz'), transtable)
self.assertRaises(ValueError, string.maketrans, 'abc', 'xyzq')
def test_capwords(self):
self.assertEqual(string.capwords('abc def ghi'), 'Abc Def Ghi')
self.assertEqual(string.capwords('abc\tdef\nghi'), 'Abc Def Ghi')
self.assertEqual(string.capwords('abc\t def \nghi'), 'Abc Def Ghi')
self.assertEqual(string.capwords('ABC DEF GHI'), 'Abc Def Ghi')
self.assertEqual(string.capwords('ABC-DEF-GHI', '-'), 'Abc-Def-Ghi')
self.assertEqual(string.capwords('ABC-def DEF-ghi GHI'), 'Abc-def Def-ghi Ghi')
self.assertEqual(string.capwords(' aBc DeF '), 'Abc Def')
self.assertEqual(string.capwords('\taBc\tDeF\t'), 'Abc Def')
self.assertEqual(string.capwords('\taBc\tDeF\t', '\t'), '\tAbc\tDef\t')
def test_formatter(self):
fmt = string.Formatter()
self.assertEqual(fmt.format("foo"), "foo")
self.assertEqual(fmt.format("foo{0}", "bar"), "foobar")
self.assertEqual(fmt.format("foo{1}{0}-{1}", "bar", 6), "foo6bar-6")
self.assertEqual(fmt.format("-{arg!r}-", arg='test'), "-'test'-")
# override get_value ############################################
class NamespaceFormatter(string.Formatter):
def __init__(self, namespace={}):
string.Formatter.__init__(self)
self.namespace = namespace
def get_value(self, key, args, kwds):
if isinstance(key, str):
try:
# Check explicitly passed arguments first
return kwds[key]
except KeyError:
return self.namespace[key]
else:
string.Formatter.get_value(key, args, kwds)
fmt = NamespaceFormatter({'greeting':'hello'})
self.assertEqual(fmt.format("{greeting}, world!"), 'hello, world!')
# override format_field #########################################
class CallFormatter(string.Formatter):
def format_field(self, value, format_spec):
return format(value(), format_spec)
fmt = CallFormatter()
self.assertEqual(fmt.format('*{0}*', lambda : 'result'), '*result*')
# override convert_field ########################################
class XFormatter(string.Formatter):
def convert_field(self, value, conversion):
if conversion == 'x':
return None
return super(XFormatter, self).convert_field(value, conversion)
fmt = XFormatter()
self.assertEqual(fmt.format("{0!r}:{0!x}", 'foo', 'foo'), "'foo':None")
# override parse ################################################
class BarFormatter(string.Formatter):
# returns an iterable that contains tuples of the form:
# (literal_text, field_name, format_spec, conversion)
def parse(self, format_string):
for field in format_string.split('|'):
if field[0] == '+':
# it's markup
field_name, _, format_spec = field[1:].partition(':')
yield '', field_name, format_spec, None
else:
yield field, None, None, None
fmt = BarFormatter()
self.assertEqual(fmt.format('*|+0:^10s|*', 'foo'), '* foo *')
# test all parameters used
class CheckAllUsedFormatter(string.Formatter):
def check_unused_args(self, used_args, args, kwargs):
# Track which arguments actuallly got used
unused_args = set(kwargs.keys())
unused_args.update(range(0, len(args)))
for arg in used_args:
unused_args.remove(arg)
if unused_args:
raise ValueError("unused arguments")
fmt = CheckAllUsedFormatter()
self.assertEqual(fmt.format("{0}", 10), "10")
self.assertEqual(fmt.format("{0}{i}", 10, i=100), "10100")
self.assertEqual(fmt.format("{0}{i}{1}", 10, 20, i=100), "1010020")
self.assertRaises(ValueError, fmt.format, "{0}{i}{1}", 10, 20, i=100, j=0)
self.assertRaises(ValueError, fmt.format, "{0}", 10, 20)
self.assertRaises(ValueError, fmt.format, "{0}", 10, 20, i=100)
self.assertRaises(ValueError, fmt.format, "{i}", 10, 20, i=100)
# Alternate formatting is not supported
self.assertRaises(ValueError, format, '', '#')
self.assertRaises(ValueError, format, '', '#20')
class BytesAliasTest(unittest.TestCase):
def test_builtin(self):
self.assertTrue(str is bytes)
def test_syntax(self):
self.assertEqual(b"spam", "spam")
self.assertEqual(br"egg\foo", "egg\\foo")
self.assertTrue(type(b""), str)
self.assertTrue(type(br""), str)
def test_main():
test_support.run_unittest(StringTest, ModuleTest, BytesAliasTest)
if __name__ == "__main__":
test_main()
| antb/TPT----My-old-mod | src/python/stdlib/test/test_string.py | Python | gpl-2.0 | 9,160 |
import sys
import unittest
from sympy import symbols, sin, cos, Rational, expand, collect, simplify, Symbol
from galgebra.printer import Format, Eprint, Get_Program, latex, GaPrinter, ZERO_STR
from galgebra.ga import Ga, one, zero
from galgebra.mv import Mv, Nga
# for backward compatibility
from galgebra.mv import ONE, ZERO, HALF
from galgebra import ga
def F(x):
global n, nbar
Fx = HALF * ((x * x) * n + 2 * x - nbar)
return(Fx)
def make_vector(a, n=3, ga=None):
if isinstance(a, str):
v = zero
for i in range(n):
a_i = Symbol(a+str(i+1))
v += a_i*ga.basis[i]
v = ga.mv(v)
return(F(v))
else:
return(F(a))
class TestTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_basic_multivector_operations(self):
g3d = Ga('e*x|y|z')
(ex,ey,ez) = g3d.mv()
A = g3d.mv('A','mv')
assert str(A) == 'A + A__x*e_x + A__y*e_y + A__z*e_z + A__xy*e_x^e_y + A__xz*e_x^e_z + A__yz*e_y^e_z + A__xyz*e_x^e_y^e_z'
assert str(A) == 'A + A__x*e_x + A__y*e_y + A__z*e_z + A__xy*e_x^e_y + A__xz*e_x^e_z + A__yz*e_y^e_z + A__xyz*e_x^e_y^e_z'
assert str(A) == 'A + A__x*e_x + A__y*e_y + A__z*e_z + A__xy*e_x^e_y + A__xz*e_x^e_z + A__yz*e_y^e_z + A__xyz*e_x^e_y^e_z'
X = g3d.mv('X','vector')
Y = g3d.mv('Y','vector')
assert str(X) == 'X__x*e_x + X__y*e_y + X__z*e_z'
assert str(Y) == 'Y__x*e_x + Y__y*e_y + Y__z*e_z'
assert str((X*Y)) == '(e_x.e_x)*X__x*Y__x + (e_x.e_y)*X__x*Y__y + (e_x.e_y)*X__y*Y__x + (e_x.e_z)*X__x*Y__z + (e_x.e_z)*X__z*Y__x + (e_y.e_y)*X__y*Y__y + (e_y.e_z)*X__y*Y__z + (e_y.e_z)*X__z*Y__y + (e_z.e_z)*X__z*Y__z + (X__x*Y__y - X__y*Y__x)*e_x^e_y + (X__x*Y__z - X__z*Y__x)*e_x^e_z + (X__y*Y__z - X__z*Y__y)*e_y^e_z'
assert str((X^Y)) == '(X__x*Y__y - X__y*Y__x)*e_x^e_y + (X__x*Y__z - X__z*Y__x)*e_x^e_z + (X__y*Y__z - X__z*Y__y)*e_y^e_z'
assert str((X|Y)) == '(e_x.e_x)*X__x*Y__x + (e_x.e_y)*X__x*Y__y + (e_x.e_y)*X__y*Y__x + (e_x.e_z)*X__x*Y__z + (e_x.e_z)*X__z*Y__x + (e_y.e_y)*X__y*Y__y + (e_y.e_z)*X__y*Y__z + (e_y.e_z)*X__z*Y__y + (e_z.e_z)*X__z*Y__z'
g2d = Ga('e*x|y')
(ex,ey) = g2d.mv()
X = g2d.mv('X','vector')
A = g2d.mv('A','spinor')
assert str(X) == 'X__x*e_x + X__y*e_y'
assert str(A) == 'A + A__xy*e_x^e_y'
assert str((X|A)) == '-A__xy*((e_x.e_y)*X__x + (e_y.e_y)*X__y)*e_x + A__xy*((e_x.e_x)*X__x + (e_x.e_y)*X__y)*e_y'
assert str((X<A)) == '-A__xy*((e_x.e_y)*X__x + (e_y.e_y)*X__y)*e_x + A__xy*((e_x.e_x)*X__x + (e_x.e_y)*X__y)*e_y'
assert str((A>X)) == 'A__xy*((e_x.e_y)*X__x + (e_y.e_y)*X__y)*e_x - A__xy*((e_x.e_x)*X__x + (e_x.e_y)*X__y)*e_y'
o2d = Ga('e*x|y', g=[1, 1])
(ex, ey) = o2d.mv()
X = o2d.mv('X', 'vector')
A = o2d.mv('A', 'spinor')
assert str(X) == 'X__x*e_x + X__y*e_y'
assert str(A) == 'A + A__xy*e_x^e_y'
assert str((X*A)) == '(A*X__x - A__xy*X__y)*e_x + (A*X__y + A__xy*X__x)*e_y'
assert str((X|A)) == '-A__xy*X__y*e_x + A__xy*X__x*e_y'
assert str((X<A)) == '-A__xy*X__y*e_x + A__xy*X__x*e_y'
assert str((X>A)) == 'A*X__x*e_x + A*X__y*e_y'
assert str((A*X)) == '(A*X__x + A__xy*X__y)*e_x + (A*X__y - A__xy*X__x)*e_y'
assert str((A|X)) == 'A__xy*X__y*e_x - A__xy*X__x*e_y'
assert str((A<X)) == 'A*X__x*e_x + A*X__y*e_y'
assert str((A>X)) == 'A__xy*X__y*e_x - A__xy*X__x*e_y'
def test_check_generalized_BAC_CAB_formulas(self):
(a,b,c,d,e) = Ga('a b c d e').mv()
assert str(a|(b*c)) == '-(a.c)*b + (a.b)*c'
assert str(a|(b^c)) == '-(a.c)*b + (a.b)*c'
assert str(a|(b^c^d)) == '(a.d)*b^c - (a.c)*b^d + (a.b)*c^d'
expr = (a|(b^c))+(c|(a^b))+(b|(c^a)) # = (a.b)*c - (b.c)*a - ((a.b)*c - (b.c)*a)
assert str(expr.simplify()) == ZERO_STR
assert str(a*(b^c)-b*(a^c)+c*(a^b)) == '3*a^b^c'
assert str(a*(b^c^d)-b*(a^c^d)+c*(a^b^d)-d*(a^b^c)) == '4*a^b^c^d'
assert str((a^b)|(c^d)) == '-(a.c)*(b.d) + (a.d)*(b.c)'
assert str(((a^b)|c)|d) == '-(a.c)*(b.d) + (a.d)*(b.c)'
assert str(Ga.com(a^b,c^d)) == '-(b.d)*a^c + (b.c)*a^d + (a.d)*b^c - (a.c)*b^d'
assert str((a|(b^c))|(d^e)) == '(-(a.b)*(c.e) + (a.c)*(b.e))*d + ((a.b)*(c.d) - (a.c)*(b.d))*e'
def test_derivatives_in_rectangular_coordinates(self):
X = (x, y, z) = symbols('x y z')
o3d = Ga('e_x e_y e_z', g=[1, 1, 1], coords=X)
(ex, ey, ez) = o3d.mv()
grad = o3d.grad
f = o3d.mv('f', 'scalar', f=True)
A = o3d.mv('A', 'vector', f=True)
B = o3d.mv('B', 'bivector', f=True)
C = o3d.mv('C', 'mv', f=True)
assert str(f) == 'f'
assert str(A) == 'A__x*e_x + A__y*e_y + A__z*e_z'
assert str(B) == 'B__xy*e_x^e_y + B__xz*e_x^e_z + B__yz*e_y^e_z'
assert str(C) == 'C + C__x*e_x + C__y*e_y + C__z*e_z + C__xy*e_x^e_y + C__xz*e_x^e_z + C__yz*e_y^e_z + C__xyz*e_x^e_y^e_z'
assert str(grad*f) == 'D{x}f*e_x + D{y}f*e_y + D{z}f*e_z'
assert str(grad|A) == 'D{x}A__x + D{y}A__y + D{z}A__z'
assert str(grad*A) == 'D{x}A__x + D{y}A__y + D{z}A__z + (-D{y}A__x + D{x}A__y)*e_x^e_y + (-D{z}A__x + D{x}A__z)*e_x^e_z + (-D{z}A__y + D{y}A__z)*e_y^e_z'
assert str(-o3d.I()*(grad^A)) == '(-D{z}A__y + D{y}A__z)*e_x + (D{z}A__x - D{x}A__z)*e_y + (-D{y}A__x + D{x}A__y)*e_z'
assert str(grad*B) == '(-D{y}B__xy - D{z}B__xz)*e_x + (D{x}B__xy - D{z}B__yz)*e_y + (D{x}B__xz + D{y}B__yz)*e_z + (D{z}B__xy - D{y}B__xz + D{x}B__yz)*e_x^e_y^e_z'
assert str(grad^B) == '(D{z}B__xy - D{y}B__xz + D{x}B__yz)*e_x^e_y^e_z'
assert str(grad|B) == '(-D{y}B__xy - D{z}B__xz)*e_x + (D{x}B__xy - D{z}B__yz)*e_y + (D{x}B__xz + D{y}B__yz)*e_z'
assert str(grad<A) == 'D{x}A__x + D{y}A__y + D{z}A__z'
assert str(grad>A) == 'D{x}A__x + D{y}A__y + D{z}A__z'
assert str(grad<B) == '(-D{y}B__xy - D{z}B__xz)*e_x + (D{x}B__xy - D{z}B__yz)*e_y + (D{x}B__xz + D{y}B__yz)*e_z'
assert str(grad>B) == ZERO_STR
assert str(grad<C) == 'D{x}C__x + D{y}C__y + D{z}C__z + (-D{y}C__xy - D{z}C__xz)*e_x + (D{x}C__xy - D{z}C__yz)*e_y + (D{x}C__xz + D{y}C__yz)*e_z + D{z}C__xyz*e_x^e_y - D{y}C__xyz*e_x^e_z + D{x}C__xyz*e_y^e_z'
assert str(grad>C) == 'D{x}C__x + D{y}C__y + D{z}C__z + D{x}C*e_x + D{y}C*e_y + D{z}C*e_z'
def test_derivatives_in_spherical_coordinates(self):
X = (r, th, phi) = symbols('r theta phi')
s3d = Ga('e_r e_theta e_phi', g=[1, r ** 2, r ** 2 * sin(th) ** 2], coords=X, norm=True)
(er, eth, ephi) = s3d.mv()
grad = s3d.grad
f = s3d.mv('f', 'scalar', f=True)
A = s3d.mv('A', 'vector', f=True)
B = s3d.mv('B', 'bivector', f=True)
assert str(f) == 'f'
assert str(A) == 'A__r*e_r + A__theta*e_theta + A__phi*e_phi'
assert str(B) == 'B__rtheta*e_r^e_theta + B__rphi*e_r^e_phi + B__thetaphi*e_theta^e_phi'
assert str(grad*f) == 'D{r}f*e_r + D{theta}f*e_theta/r + D{phi}f*e_phi/(r*sin(theta))'
assert str((grad|A).simplify()) == '(r*D{r}A__r + 2*A__r + A__theta/tan(theta) + D{theta}A__theta + D{phi}A__phi/sin(theta))/r'
assert str(-s3d.I()*(grad^A)) == '(A__phi/tan(theta) + D{theta}A__phi - D{phi}A__theta/sin(theta))*e_r/r + (-r*D{r}A__phi - A__phi + D{phi}A__r/sin(theta))*e_theta/r + (r*D{r}A__theta + A__theta - D{theta}A__r)*e_phi/r'
assert latex(grad) == r'\boldsymbol{e}_{r} \frac{\partial}{\partial r} + \boldsymbol{e}_{\theta } \frac{1}{r} \frac{\partial}{\partial \theta } + \boldsymbol{e}_{\phi } \frac{1}{r \sin{\left (\theta \right )}} \frac{\partial}{\partial \phi }'
assert latex(B|(eth^ephi)) == r'- B^{\theta \phi } {\left (r,\theta ,\phi \right )}'
assert str(grad^B) == '(r*D{r}B__thetaphi - B__rphi/tan(theta) + 2*B__thetaphi - D{theta}B__rphi + D{phi}B__rtheta/sin(theta))*e_r^e_theta^e_phi/r'
def test_rounding_numerical_components(self):
o3d = Ga('e_x e_y e_z', g=[1, 1, 1])
(ex, ey, ez) = o3d.mv()
X = 1.2*ex+2.34*ey+0.555*ez
Y = 0.333*ex+4*ey+5.3*ez
assert str(X) == '1.2*e_x + 2.34*e_y + 0.555*e_z'
assert str(Nga(X,2)) == '1.2*e_x + 2.3*e_y + 0.55*e_z'
assert str(X*Y) == '12.7011000000000 + 4.02078*e_x^e_y + 6.175185*e_x^e_z + 10.182*e_y^e_z'
assert str(Nga(X*Y,2)) == '13. + 4.0*e_x^e_y + 6.2*e_x^e_z + 10.0*e_y^e_z'
def test_noneuclidian_distance_calculation(self):
from sympy import solve,sqrt
g = '0 # #,# 0 #,# # 1'
necl = Ga('X Y e',g=g)
(X,Y,e) = necl.mv()
assert str((X^Y)*(X^Y)) == '(X.Y)**2'
L = X^Y^e
B = (L*e).expand().blade_rep() # D&L 10.152
assert str(B) == 'X^Y - (Y.e)*X^e + (X.e)*Y^e'
Bsq = B*B
assert str(Bsq) == '(X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e)'
Bsq = Bsq.scalar()
assert str(B) == 'X^Y - (Y.e)*X^e + (X.e)*Y^e'
BeBr = B*e*B.rev()
assert str(BeBr) == '(X.Y)*(-(X.Y) + 2*(X.e)*(Y.e))*e'
assert str(B*B) == '(X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e)'
assert str(L*L) == '(X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e)' # D&L 10.153
(s,c,Binv,M,S,C,alpha) = symbols('s c (1/B) M S C alpha')
XdotY = necl.g[0,1]
Xdote = necl.g[0,2]
Ydote = necl.g[1,2]
Bhat = Binv*B # D&L 10.154
R = c+s*Bhat # Rotor R = exp(alpha*Bhat/2)
assert str(R) == 'c + (1/B)*s*X^Y - (1/B)*(Y.e)*s*X^e + (1/B)*(X.e)*s*Y^e'
Z = R*X*R.rev() # D&L 10.155
Z.obj = expand(Z.obj)
Z.obj = Z.obj.collect([Binv,s,c,XdotY])
assert str(Z) == '((1/B)**2*(X.Y)**2*s**2 - 2*(1/B)**2*(X.Y)*(X.e)*(Y.e)*s**2 + 2*(1/B)*(X.Y)*c*s - 2*(1/B)*(X.e)*(Y.e)*c*s + c**2)*X + 2*(1/B)*(X.e)**2*c*s*Y + 2*(1/B)*(X.Y)*(X.e)*s*(-(1/B)*(X.Y)*s + 2*(1/B)*(X.e)*(Y.e)*s - c)*e'
W = Z|Y
# From this point forward all calculations are with sympy scalars
W = W.scalar()
assert str(W) == '(1/B)**2*(X.Y)**3*s**2 - 4*(1/B)**2*(X.Y)**2*(X.e)*(Y.e)*s**2 + 4*(1/B)**2*(X.Y)*(X.e)**2*(Y.e)**2*s**2 + 2*(1/B)*(X.Y)**2*c*s - 4*(1/B)*(X.Y)*(X.e)*(Y.e)*c*s + (X.Y)*c**2'
W = expand(W)
W = simplify(W)
W = W.collect([s*Binv])
M = 1/Bsq
W = W.subs(Binv**2,M)
W = simplify(W)
Bmag = sqrt(XdotY**2-2*XdotY*Xdote*Ydote)
W = W.collect([Binv*c*s,XdotY])
#Double angle substitutions
W = W.subs(2*XdotY**2-4*XdotY*Xdote*Ydote,2/(Binv**2))
W = W.subs(2*c*s,S)
W = W.subs(c**2,(C+1)/2)
W = W.subs(s**2,(C-1)/2)
W = simplify(W)
W = W.subs(Binv,1/Bmag)
W = expand(W)
assert str(W.simplify()) == '(X.Y)*C - (X.e)*(Y.e)*C + (X.e)*(Y.e) + S*sqrt((X.Y)*((X.Y) - 2*(X.e)*(Y.e)))'
Wd = collect(W,[C,S],exact=True,evaluate=False)
Wd_1 = Wd[one]
Wd_C = Wd[C]
Wd_S = Wd[S]
assert str(Wd_1) == '(X.e)*(Y.e)'
assert str(Wd_C) == '(X.Y) - (X.e)*(Y.e)'
assert str(Wd_S) == 'sqrt((X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e))'
assert str(Bmag) == 'sqrt((X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e))'
Wd_1 = Wd_1.subs(Binv,1/Bmag)
Wd_C = Wd_C.subs(Binv,1/Bmag)
Wd_S = Wd_S.subs(Binv,1/Bmag)
lhs = Wd_1+Wd_C*C
rhs = -Wd_S*S
lhs = lhs**2
rhs = rhs**2
W = expand(lhs-rhs)
W = expand(W.subs(1/Binv**2,Bmag**2))
W = expand(W.subs(S**2,C**2-1))
W = W.collect([C,C**2],evaluate=False)
a = simplify(W[C**2])
b = simplify(W[C])
c = simplify(W[one])
assert str(a) == '(X.e)**2*(Y.e)**2'
assert str(b) == '2*(X.e)*(Y.e)*((X.Y) - (X.e)*(Y.e))'
assert str(c) == '(X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e) + (X.e)**2*(Y.e)**2'
x = Symbol('x')
C = solve(a*x**2+b*x+c,x)[0]
assert str(expand(simplify(expand(C)))) == '-(X.Y)/((X.e)*(Y.e)) + 1'
def test_conformal_representations_of_circles_lines_spheres_and_planes(self):
global n,nbar
g = '1 0 0 0 0,0 1 0 0 0,0 0 1 0 0,0 0 0 0 2,0 0 0 2 0'
cnfml3d = Ga('e_1 e_2 e_3 n nbar',g=g)
(e1,e2,e3,n,nbar) = cnfml3d.mv()
e = n+nbar
#conformal representation of points
A = make_vector(e1,ga=cnfml3d) # point a = (1,0,0) A = F(a)
B = make_vector(e2,ga=cnfml3d) # point b = (0,1,0) B = F(b)
C = make_vector(-e1,ga=cnfml3d) # point c = (-1,0,0) C = F(c)
D = make_vector(e3,ga=cnfml3d) # point d = (0,0,1) D = F(d)
X = make_vector('x',3,ga=cnfml3d)
assert str(A) == 'e_1 + n/2 - nbar/2'
assert str(B) == 'e_2 + n/2 - nbar/2'
assert str(C) == '-e_1 + n/2 - nbar/2'
assert str(D) == 'e_3 + n/2 - nbar/2'
assert str(X) == 'x1*e_1 + x2*e_2 + x3*e_3 + (x1**2/2 + x2**2/2 + x3**2/2)*n - nbar/2'
assert str((A^B^C^X)) == '-x3*e_1^e_2^e_3^n + x3*e_1^e_2^e_3^nbar + (x1**2/2 + x2**2/2 + x3**2/2 - 1/2)*e_1^e_2^n^nbar'
assert str((A^B^n^X)) == '-x3*e_1^e_2^e_3^n + (x1/2 + x2/2 - 1/2)*e_1^e_2^n^nbar + x3*e_1^e_3^n^nbar/2 - x3*e_2^e_3^n^nbar/2'
assert str((((A^B)^C)^D)^X) == '(-x1**2/2 - x2**2/2 - x3**2/2 + 1/2)*e_1^e_2^e_3^n^nbar'
assert str((A^B^n^D^X)) == '(-x1/2 - x2/2 - x3/2 + 1/2)*e_1^e_2^e_3^n^nbar'
L = (A^B^e)^X
assert str(L) == '-x3*e_1^e_2^e_3^n - x3*e_1^e_2^e_3^nbar + (-x1**2/2 + x1 - x2**2/2 + x2 - x3**2/2 - 1/2)*e_1^e_2^n^nbar + x3*e_1^e_3^n^nbar - x3*e_2^e_3^n^nbar'
def test_properties_of_geometric_objects(self):
global n, nbar
g = '# # # 0 0,'+ \
'# # # 0 0,'+ \
'# # # 0 0,'+ \
'0 0 0 0 2,'+ \
'0 0 0 2 0'
c3d = Ga('p1 p2 p3 n nbar',g=g)
(p1,p2,p3,n,nbar) = c3d.mv()
P1 = F(p1)
P2 = F(p2)
P3 = F(p3)
L = P1^P2^n
delta = (L|n)|nbar
assert str(delta) == '2*p1 - 2*p2'
C = P1^P2^P3
delta = ((C^n)|n)|nbar
assert str(delta) == '2*p1^p2 - 2*p1^p3 + 2*p2^p3'
assert str((p2-p1)^(p3-p1)) == 'p1^p2 - p1^p3 + p2^p3'
def test_extracting_vectors_from_conformal_2_blade(self):
g = '0 -1 #,'+ \
'-1 0 #,'+ \
'# # #'
e2b = Ga('P1 P2 a',g=g)
(P1,P2,a) = e2b.mv()
B = P1^P2
Bsq = B*B
assert str(Bsq) == '1'
ap = a-(a^B)*B
assert str(ap) == '-(P2.a)*P1 - (P1.a)*P2'
Ap = ap+ap*B
Am = ap-ap*B
assert str(Ap) == '-2*(P2.a)*P1'
assert str(Am) == '-2*(P1.a)*P2'
assert str(Ap*Ap) == ZERO_STR
assert str(Am*Am) == ZERO_STR
aB = a|B
assert str(aB) == '-(P2.a)*P1 + (P1.a)*P2'
def test_reciprocal_frame_test(self):
g = '1 # #,'+ \
'# 1 #,'+ \
'# # 1'
g3dn = Ga('e1 e2 e3',g=g)
(e1,e2,e3) = g3dn.mv()
E = e1^e2^e3
Esq = (E*E).scalar()
assert str(E) == 'e1^e2^e3'
assert str(Esq) == '(e1.e2)**2 - 2*(e1.e2)*(e1.e3)*(e2.e3) + (e1.e3)**2 + (e2.e3)**2 - 1'
Esq_inv = 1/Esq
E1 = (e2^e3)*E
E2 = (-1)*(e1^e3)*E
E3 = (e1^e2)*E
assert str(E1) == '((e2.e3)**2 - 1)*e1 + ((e1.e2) - (e1.e3)*(e2.e3))*e2 + (-(e1.e2)*(e2.e3) + (e1.e3))*e3'
assert str(E2) == '((e1.e2) - (e1.e3)*(e2.e3))*e1 + ((e1.e3)**2 - 1)*e2 + (-(e1.e2)*(e1.e3) + (e2.e3))*e3'
assert str(E3) == '(-(e1.e2)*(e2.e3) + (e1.e3))*e1 + (-(e1.e2)*(e1.e3) + (e2.e3))*e2 + ((e1.e2)**2 - 1)*e3'
w = (E1|e2)
w = w.expand()
assert str(w) == ZERO_STR
w = (E1|e3)
w = w.expand()
assert str(w) == ZERO_STR
w = (E2|e1)
w = w.expand()
assert str(w) == ZERO_STR
w = (E2|e3)
w = w.expand()
assert str(w) == ZERO_STR
w = (E3|e1)
w = w.expand()
assert str(w) == ZERO_STR
w = (E3|e2)
w = w.expand()
assert str(w) == ZERO_STR
w = (E1|e1)
w = (w.expand()).scalar()
Esq = expand(Esq)
assert str(simplify(w/Esq)) == '1'
w = (E2|e2)
w = (w.expand()).scalar()
assert str(simplify(w/Esq)) == '1'
w = (E3|e3)
w = (w.expand()).scalar()
assert str(simplify(w/Esq)) == '1'
| arsenovic/galgebra | test/test_test.py | Python | bsd-3-clause | 16,441 |
# -*- coding: utf-8 -*-
"""
This module contains ExtensionLoader class that works with "model"
folder and is used to initialize the pipeline with all available image
processing categories and their respective algorithms. It uses config.json
settings to initialize image processing categories accordingly.
ExtensionLoader creates a collection of categories and algorithms ready to
be loaded into the pipeline object.
"""
import re
import os
import xml.etree.ElementTree as et
import sys
from collections import OrderedDict as od
from importlib.machinery import SourceFileLoader
__authors__ = {"Pavel Shkadzko": "[email protected]"}
class ExtensionLoader:
def __init__(self):
"""
public Attributes:
| *cats_container*: a dict with Category names and instances
private Attributes:
| *_category_dir*: a directory path for categories
| *_config_path*: a path to config.xml
| *_found_cats*: a list of category paths
Returns:
instance of the ExtensionLoader object
"""
_category_dir = os.path.join(os.path.dirname(__file__), 'categories')
_found_cats = self._scan_model(_category_dir)
self.cats_container = self._instantiate_cats(_found_cats)
@staticmethod
def _scan_model(cat_dir):
"""
Check for any available category files in cat_dir.
Return found file names.
Raise an error if no file was found.
Args:
| *cat_dir* (str): category dir provided by the ext_loader
Vars:
| *found_cats* (list): a filtered list of category file names
| *category_files* (list): a list of algorithm file names
| *ignored*: a regex object, used to filter unnecessary files
Returns:
| *found_cats_paths* (list): a list of found category paths
"""
category_files = os.listdir(cat_dir)
ign = re.compile(r'.*.pyc|__init__|_category.py|_alg.py|_utility.py')
found_cats = list(filter(lambda x: not ign.match(x), category_files))
if not found_cats:
raise FileNotFoundError("No image processing categories "
"found in ./model/categories")
# add abs paths
found_cats_paths = [os.path.join(os.path.dirname(__file__), 'categories', cat)
for cat in found_cats]
return found_cats_paths
@staticmethod
def _instantiate_cats(found_cats):
"""
Instantiate imported categories and return a list of instantiated
categories.
Create a list with methods that represent a pipeline with selected
algorithms and predefined settings.
<When the Category object is instantiated it automatically imports and
creates a list of algorithms that belong to it>
Args:
| *found_cats*: a list of found category file names
Vars:
| *cats_inst*: a list of found and instantiated methods
| *categories*: a dictionary of algs where {Algorithm: Category}
Returns:
| *categories*: a list with Method instances
"""
cats_inst = []
for cat_path in found_cats:
cat_name = os.path.basename(cat_path).split('.')[0]
cat = SourceFileLoader(cat_name, cat_path).load_module()
inst = cat.CatBody()
cats_inst.append(inst)
# sort the cats
order = ['Preprocessing', 'Segmentation', 'Graph Detection',
'Graph Filtering']
# in case we have custom cats, add them
for cat in cats_inst:
if cat.name not in order:
order.append(cat.name)
cats_inst.sort(key=lambda x: order.index(x.name))
# create a dict of {Category name: Category instance}
cats = od()
for category in cats_inst:
cats[category.name] = category
return cats
if __name__ == '__main__':
pass
| LumPenPacK/NetworkExtractionFromImages | osx_build/nefi2_osx_amd64_xcode_2015/bin/nefi2/model/ext_loader.py | Python | bsd-2-clause | 4,135 |
#!/usr/bin/env python
import subprocess
import re
import os
import collections
class Platform(object):
pass
sdk_re = re.compile(r'.*-sdk ([a-zA-Z0-9.]*)')
def sdkinfo(sdkname):
ret = {}
for line in subprocess.Popen(['xcodebuild', '-sdk', sdkname, '-version'], stdout=subprocess.PIPE).stdout:
kv = line.strip().split(': ', 1)
if len(kv) == 2:
k, v = kv
ret[k] = v
return ret
desktop_sdk_info = sdkinfo('macosx')
def latest_sdks():
latest_desktop = None
for line in subprocess.Popen(['xcodebuild', '-showsdks'], stdout=subprocess.PIPE).stdout:
match = sdk_re.match(line)
if match:
if 'OS X' in line:
latest_desktop = match.group(1)
return latest_desktop
desktop_sdk = latest_sdks()
class desktop_platform_32(Platform):
sdk = 'macosx'
arch = 'i386'
name = 'mac32'
triple = 'i386-apple-darwin10'
sdkroot = desktop_sdk_info['Path']
prefix = "#if defined(__i386__) && !defined(__x86_64__)\n\n"
suffix = "\n\n#endif"
class desktop_platform_64(Platform):
sdk = 'macosx'
arch = 'x86_64'
name = 'mac'
triple = 'x86_64-apple-darwin10'
sdkroot = desktop_sdk_info['Path']
prefix = "#if !defined(__i386__) && defined(__x86_64__)\n\n"
suffix = "\n\n#endif"
def move_file(src_dir, dst_dir, filename, file_suffix=None, prefix='', suffix=''):
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
out_filename = filename
if file_suffix:
split_name = os.path.splitext(filename)
out_filename = "%s_%s%s" % (split_name[0], file_suffix, split_name[1])
with open(os.path.join(src_dir, filename)) as in_file:
with open(os.path.join(dst_dir, out_filename), 'w') as out_file:
if prefix:
out_file.write(prefix)
out_file.write(in_file.read())
if suffix:
out_file.write(suffix)
headers_seen = collections.defaultdict(set)
def move_source_tree(src_dir, dest_dir, dest_include_dir, arch=None, prefix=None, suffix=None):
for root, dirs, files in os.walk(src_dir, followlinks=True):
relroot = os.path.relpath(root, src_dir)
def move_dir(arch, prefix='', suffix='', files=[]):
for file in files:
if file.endswith('.h'):
if dest_include_dir:
if arch:
headers_seen[file].add(arch)
move_file(root, dest_include_dir, file, arch, prefix=prefix, suffix=suffix)
elif dest_dir:
outroot = os.path.join(dest_dir, relroot)
move_file(root, outroot, file, prefix=prefix, suffix=suffix)
if relroot == '.':
move_dir(arch=arch,
files=files,
prefix=prefix,
suffix=suffix)
elif relroot == 'x86':
move_dir(arch='i386',
prefix="#if defined(__i386__) && !defined(__x86_64__)\n\n",
suffix="\n\n#endif",
files=['darwin.S', 'ffi.c'])
move_dir(arch='x86_64',
prefix="#if !defined(__i386__) && defined(__x86_64__)\n\n",
suffix="\n\n#endif",
files=['darwin64.S', 'ffi64.c'])
def build_target(platform):
def xcrun_cmd(cmd):
return subprocess.check_output(['xcrun', '-sdk', platform.sdkroot, '-find', cmd]).strip()
build_dir = 'build_' + platform.name
if not os.path.exists(build_dir):
os.makedirs(build_dir)
env = dict(CC=xcrun_cmd('clang'),
LD=xcrun_cmd('ld'),
CFLAGS='-arch %s -isysroot %s -mmacosx-version-min=10.6' % (platform.arch, platform.sdkroot))
working_dir = os.getcwd()
try:
os.chdir(build_dir)
subprocess.check_call(['../configure', '-host', platform.triple], env=env)
move_source_tree('.', None, '../osx/include',
arch=platform.arch,
prefix=platform.prefix,
suffix=platform.suffix)
move_source_tree('./include', None, '../osx/include',
arch=platform.arch,
prefix=platform.prefix,
suffix=platform.suffix)
finally:
os.chdir(working_dir)
for header_name, archs in headers_seen.iteritems():
basename, suffix = os.path.splitext(header_name)
def main():
move_source_tree('src', 'osx/src', 'osx/include')
move_source_tree('include', None, 'osx/include')
build_target(desktop_platform_32)
build_target(desktop_platform_64)
for header_name, archs in headers_seen.iteritems():
basename, suffix = os.path.splitext(header_name)
with open(os.path.join('osx/include', header_name), 'w') as header:
for arch in archs:
header.write('#include <%s_%s%s>\n' % (basename, arch, suffix))
if __name__ == '__main__':
main()
| zwaldowski/libffi-iOS | generate-osx-source-and-headers.py | Python | mit | 5,134 |
"""
Copyright (c) 2015 Michael Bright and Bamboo HR LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from flask import Response
from rapid.lib.version import Version
from rapid.lib import api_key_required
from rapid.lib.utils import UpgradeUtil
class UpgradeController(object):
def __init__(self, flask_app):
self.flask_app = flask_app
def configure_routing(self):
self.flask_app.add_url_rule('/api/upgrade/<path:version>', 'upgrade_master', api_key_required(self.upgrade_master), methods=['POST'])
def upgrade_master(self, version):
worked = UpgradeUtil.upgrade_version(version, self.flask_app.rapid_config)
return Response("It worked!" if worked else "It didn't work, version {} restored!".format(Version.get_version()), status=200 if worked else 505)
| BambooHR/rapid | rapid/master/controllers/api/upgrade_controller.py | Python | apache-2.0 | 1,295 |
# coding: utf-8
import time
import sublime
from sublime_plugin import WindowCommand
from .util import noop
from .cmd import GitCmd
from .helpers import GitStashHelper, GitStatusHelper, GitErrorHelper
class GitStashWindowCmd(GitCmd, GitStashHelper, GitErrorHelper):
def pop_or_apply_from_panel(self, action):
repo = self.get_repo()
if not repo:
return
stashes = self.get_stashes(repo)
if not stashes:
return sublime.error_message('No stashes. Use the Git: Stash command to stash changes')
callback = self.pop_or_apply_callback(repo, action, stashes)
panel = []
for name, title in stashes:
panel.append([title, "stash@{%s}" % name])
self.window.show_quick_panel(panel, callback)
def pop_or_apply_callback(self, repo, action, stashes):
def inner(choice):
if choice != -1:
name, _ = stashes[choice]
exit_code, stdout, stderr = self.git(['stash', action, '-q', 'stash@{%s}' % name], cwd=repo)
if exit_code != 0:
sublime.error_message(self.format_error_message(stderr))
window = sublime.active_window()
if window:
window.run_command('git_status', {'refresh_only': True})
return inner
class GitStashCommand(WindowCommand, GitCmd, GitStatusHelper):
"""
Documentation coming soon.
"""
def run(self, untracked=False):
repo = self.get_repo()
if not repo:
return
def on_done(title):
title = title.strip()
self.git(['stash', 'save', '--include-untracked' if untracked else None, '--', title], cwd=repo)
self.window.run_command('git_status', {'refresh_only': True})
# update the index
self.git_exit_code(['update-index', '--refresh'], cwd=repo)
# get files status
untracked_files, unstaged_files, _ = self.get_files_status(repo)
# check for if there's something to stash
if not unstaged_files:
if (untracked and not untracked_files) or (not untracked):
return sublime.error_message("No local changes to save")
self.window.show_input_panel('Stash title:', '', on_done, noop, noop)
class GitSnapshotCommand(WindowCommand, GitStashWindowCmd):
"""
Documentation coming soon.
"""
def run(self):
repo = self.get_repo()
if not repo:
return
snapshot = time.strftime("Snapshot at %Y-%m-%d %H:%M:%S")
self.git(['stash', 'save', '--', snapshot], cwd=repo)
self.git(['stash', 'apply', '-q', 'stash@{0}'], cwd=repo)
self.window.run_command('git_status', {'refresh_only': True})
class GitStashPopCommand(WindowCommand, GitStashWindowCmd):
"""
Documentation coming soon.
"""
def run(self):
self.pop_or_apply_from_panel('pop')
class GitStashApplyCommand(WindowCommand, GitStashWindowCmd):
"""
Documentation coming soon.
"""
def run(self):
self.pop_or_apply_from_panel('apply')
| SublimeGit/SublimeGit | sgit/stash.py | Python | mit | 3,127 |
import os.path
import re
from anki.template import Template
class View(object):
# Path where this view's template(s) live
template_path = '.'
# Extension for templates
template_extension = 'mustache'
# The name of this template. If none is given the View will try
# to infer it based on the class name.
template_name = None
# Absolute path to the template itself. Pystache will try to guess
# if it's not provided.
template_file = None
# Contents of the template.
template = None
# Character encoding of the template file. If None, Pystache will not
# do any decoding of the template.
template_encoding = None
def __init__(self, template=None, context=None, **kwargs):
self.template = template
self.context = context or {}
# If the context we're handed is a View, we want to inherit
# its settings.
if isinstance(context, View):
self.inherit_settings(context)
if kwargs:
self.context.update(kwargs)
def inherit_settings(self, view):
"""Given another View, copies its settings."""
if view.template_path:
self.template_path = view.template_path
if view.template_name:
self.template_name = view.template_name
def load_template(self):
if self.template:
return self.template
if self.template_file:
return self._load_template()
name = self.get_template_name() + '.' + self.template_extension
if isinstance(self.template_path, str):
self.template_file = os.path.join(self.template_path, name)
return self._load_template()
for path in self.template_path:
self.template_file = os.path.join(path, name)
if os.path.exists(self.template_file):
return self._load_template()
raise IOError('"%s" not found in "%s"' % (name, ':'.join(self.template_path),))
def _load_template(self):
f = open(self.template_file, 'r')
try:
template = f.read()
if self.template_encoding:
template = str(template, self.template_encoding)
finally:
f.close()
return template
def get_template_name(self, name=None):
"""TemplatePartial => template_partial
Takes a string but defaults to using the current class' name or
the `template_name` attribute
"""
if self.template_name:
return self.template_name
if not name:
name = self.__class__.__name__
def repl(match):
return '_' + match.group(0).lower()
return re.sub('[A-Z]', repl, name)[1:]
def __contains__(self, needle):
return needle in self.context or hasattr(self, needle)
def __getitem__(self, attr):
val = self.get(attr, None)
if not val:
raise KeyError("No such key.")
return val
def get(self, attr, default):
attr = self.context.get(attr, getattr(self, attr, default))
if hasattr(attr, '__call__'):
return attr()
else:
return attr
def render(self, encoding=None):
template = self.load_template()
return Template(template, self).render(encoding=encoding)
def __str__(self):
return self.render()
| victos/busuu_anki | anki/template/view.py | Python | agpl-3.0 | 3,397 |
"""
BrowserPlus
-----------
Advanced Mechanize browser.
"""
from setuptools import setup, find_packages
from browserplus import __version__
setup(
name='browserplus',
version=__version__,
url='https://github.com/vinceau/browserplus',
license='MIT',
author='Vince Au',
author_email='[email protected]',
description='Advanced Mechanize browser.',
long_description=__doc__,
data_files=[('browserplus', ['README.rst'])],
packages=find_packages(),
install_requires=['mechanize', 'lxml', 'cssselect'],
include_package_data=True,
zip_safe=False,
platforms='any',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| xujun10110/browserplus | setup.py | Python | mit | 1,053 |
# -*- coding: utf-8 -*-
# © 2014 Serv. Tecnol. Avanzados (http://www.serviciosbaeza.com)
# Pedro M. Baeza <[email protected]>
# © 2016 ACSONE SA/NV (<http://acsone.eu>)
{
'name': 'URL attachment',
'version': '9.0.2.0.1',
'category': 'Tools',
'author': "Tecnativa,"
"Odoo Community Association (OCA)",
'website': 'https://www.tecnativa.com',
'license': 'AGPL-3',
'depends': [
'document',
],
'data': [
'view/document_url_view.xml',
],
'qweb': [
'static/src/xml/url.xml',
],
'installable': True,
}
| ClearCorp/knowledge | document_url/__openerp__.py | Python | agpl-3.0 | 622 |
#! /usr/bin/python
# Joe Deller 2014
# A simple colour version of the jsw.py code
# Level : Advanced
# Uses : Libraries, variables, operators, loops, lists
import mcpi.minecraft as minecraft
import mcpi.block as block
import time
# In this version there are only three frames of animation
def drawFrame(x, y, z, frame_data):
width = 12
# We draw the character from the top downwards, one line at a time
# Quick hack from the V1 of this code.
# We will allow a coloured wool block, from 2-9
# A "0" still means an empty AIR block
# a 1 still means a STONE block
# Python doesn't have a switch / case keyword, which is a pity
# Could use a dictionary
for line in frame_data:
for i in range(0, width):
colour = int(line[i])
if (colour < 2):
# We cheat a little, as zero is the same as block.AIR
# and a "1" is a stone block, we can just use colour
# as the block number. This is generally not a good idea
# and can make code fragile. However, sometimes it
# just makes sense to do things like this
# if (colour == 0):
mc.setBlock(x + i, y, z, colour)
#else:
# mc.setBlock(x + i, y, z, block.STONE.id)
else:
mc.setBlock(x + i, y, z, block.WOOL.id,colour)
y = y - 1
# Adjust the sleep or take it out depending on how fast or slow
# your Pi is
# time.sleep(.1)
def moveMe(x, y, z, direction):
# we need to choose which set of data to use to animate our character
# if we set "left" to 1, then we will use the left facing frames
# otherwise we will use the ones facing right
# Our character is 16 rows high, so to draw the figure
# so the feet are on the ground we add 15 to our current position
# If we wanted to, we could keep the frames in a list and use that rather than
# call drawFrame several times.
# For now, we will keep it simple
# Modified from the version 1 of this code
# We only actually have 3 frames of animation
# we reuse the "starting left" frame
height = y + 15
if (direction == LEFT):
x = x + direction
drawFrame(x, height, z, standing_left)
drawFrame(x, height, z, starting_left)
x = x + direction
drawFrame(x, height, z, moving_left)
x = x + (direction * 2)
drawFrame(x, height, z, starting_left)
# x = x + (direction * 1)
x = x + direction
else:
direction = RIGHT
x = x + direction
drawFrame(x, height, z, standing_right)
#x = x + direction
drawFrame(x, height, z, starting_right)
x = x + direction
drawFrame(x, height, z, moving_right)
x = x + ( direction * 2 )
drawFrame(x, height, z, starting_right)
# The data needed to draw the character is stored in 4 lists,
# it was easier to do this when I was drawing the character in my spreadsheet
# There are other ways you could use
# Note that I have deliberately put the information on single lines
# as a way of visually checking all the frames had the same amount
# of data and each piece was the same length. You don't have to do this.
# A more advanced program might read this information from a file
# so that it could be modified more easily and our program could use
# different characters
# If you look carefully, you might be able to see the shape of the character
moving_left = [
"000333300000",
"000333300000",
"002222220000",
"000404400000",
"004444400000",
"000444400000",
"000044000000",
"000111100000",
"001111110000",
"011111111000",
"111111111100",
"410111101400",
"005555500000",
"905505550000",
"999000099000",
"099000999000"
]
standing_left = [
"000003333000",
"000003333000",
"000022222200",
"000004044000",
"000044444000",
"000004444000",
"000000440000",
"000001111000",
"000011101100",
"000011101100",
"000011101100",
"000041001400",
"000005555000",
"000000550000",
"000000990000",
"000009990000"]
starting_left = [
"000033330000",
"000033330000",
"000222222000",
"000040440000",
"000444440000",
"000044440000",
"000004400000",
"000011110000",
"000111111000",
"000111111000",
"001110111100",
"004101111400",
"000055550000",
"000550555000",
"000999099000",
"009990999000"]
# We can save ourselves some typing as the character moving right
# is the mirror image of moving left, so if we reverse our data
# We can use that instead. To reverse a string in python,
# we can use something called "Extended Slices"
# which takes the form [start:finish:step]
# If you miss out the start and finish part, python assumes you mean start at the first character and stop at the last
# a step of -1 means go backwards, so effectively [::-1] means take what you give me and reverse it
# Some languages have more friendly functions to to this called "reverse"
# Creating a mirror image now will save us time later on, we only need to do this the once as
# the information does not change
# Create an empty list
standing_right = []
# for every row of block data, reverse the information and add it to our list
for line in standing_left:
standing_right.append(line[::-1])
# Now do the same for the remaining frames
starting_right = []
for line in starting_left:
starting_right.append(line[::-1])
moving_right = []
for line in moving_left:
moving_right.append(line[::-1])
# Connect to Minecraft and find out where we are
mc = minecraft.Minecraft.create()
playerPos = mc.player.getTilePos()
x, y, z = playerPos.x, playerPos.y, playerPos.z
# We will draw a black wool screen to help show the animation
# but set it back 16 blocks so we can see it
z = z -20
mc.setBlocks(x - 30, y, z - 22, x + 40, y + 22, z + 20, block.AIR.id)
mc.setBlocks(x - 30, y - 1, z - 20, x + 30, y - 1, z + 20, block.GRASS.id)
mc.setBlocks(x - 25, y, z, x + 25, y + 16, z, block.WOOL.id, 15)
# To help make our code more readable, we sometimes use words to help make clear what a number means
# In other programming languages there are special variables called constants that are used for this
# Unfortunately python doesn't have a direct equivalent, but we can still
# do something similar
RIGHT = 1
LEFT = -1
# Each time we draw our character, four frames are drawn, each moving 1 block left or right at a time
# Once the four frames have been drawn, the next animation needs to start 5 blocks to the left or right
# from where we started
#
# moving left means moving negatively along the X axis, from a small negative number to a bigger negative number
# moving right means moving the other way
# Our character will be one block forward from the wool screen
# We will move a total of 40 blocks left, then back 40 to the right
# These values mean we can see most of the animation
for i in range(15, -25, -5):
moveMe(playerPos.x + i, y, z + 1, LEFT)
time.sleep(.1)
time.sleep(1)
for i in range(-25, 15, 5):
moveMe(playerPos.x + i, y, z + 1, RIGHT)
time.sleep(.05)
| joedeller/pymine | jswcolour.py | Python | mit | 7,176 |
# comments.views
# PYTHON
import json
# DJANGO
from django.contrib import messages
from django.contrib.auth.decorators import login_required, user_passes_test
from django.core.urlresolvers import reverse
from django.db.models import Max
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404
from django.views.generic.base import TemplateView
from django.views.generic import (
ArchiveIndexView,
CreateView,
UpdateView,
DetailView
)
# CAPTCHA
from captcha.models import CaptchaStore
from captcha.helpers import captcha_image_url
# DJANGO-BRACES
from braces.views import LoginRequiredMixin, StaffuserRequiredMixin
# COMMENTS
from .forms import CommentForm
from .models import Comment
############
# COMMENTS ARCHIVE
############
class CommentArchiveView(ArchiveIndexView):
extra_context = None
def get_context_data(self, **kwargs):
context = super(CommentArchiveView, self).get_context_data(**kwargs)
if self.extra_context:
context.update(self.extra_context)
return context
def comment_archive_view(request, **kwargs):
form = CommentForm()
return CommentArchiveView.as_view(
queryset=Comment.objects.filter(display=True),
paginate_by=10,
allow_empty=True,
date_field="created",
template_name='comments/index.html',
extra_context={'form':form})(request, **kwargs)
@login_required
@user_passes_test(lambda u:u.is_staff, login_url='/login/')
def delete_comment_view(request, pk):
try:
Comment.objects.get(pk=pk).delete()
except Comment.DoesNotExist:
raise Http404
return HttpResponseRedirect(reverse('current_comments'))
############
# COMMENTS
############
class CommentActionMixin(object):
@property
def action(self):
msg = "{0} is missing action.".format(self.__class__)
raise NotImplementedError(msg)
def form_valid(self, form):
msg = 'Comment {0}.'.format(self.action)
messages.info(self.request, msg)
return super(CommentActionMixin, self).form_valid(form)
class CommentCreateView(CreateView):
"""View for creating a single comment."""
template_name = 'comments/comment_edit.html'
model = Comment
action = 'created'
form_class = CommentForm
def form_invalid(self, form):
if self.request.is_ajax():
json_pkt = dict()
json_pkt['status'] = 0
json_pkt['form_errors'] = form.errors
json_pkt['new_cptch_key'] = CaptchaStore.generate_key()
json_pkt['new_cptch_image'] = captcha_image_url(
json_pkt['new_cptch_key'])
return HttpResponse(
json.dumps(json_pkt), content_type='application/json')
else:
return super(CommentCreateView, self).form_invalid(form)
def form_valid(self, form):
form.save()
if self.request.is_ajax():
json_pkt = dict()
json_pkt['status'] = 1
json_pkt['new_cptch_key'] = CaptchaStore.generate_key()
json_pkt['new_cptch_image'] = captcha_image_url(
json_pkt['new_cptch_key'])
return HttpResponse(
json.dumps(json_pkt), content_type='application/json')
else:
return super(CommentCreateView, self).form_valid(form)
class CommentUpdateView(UpdateView):
"""View for editing a single comment."""
template_name = 'comments/comment_edit.html'
model = Comment
action = 'updated'
form_class = CommentForm
class CommentDetailView(DetailView):
"""View for displaying a single comment."""
template_name = 'comments/comment.html'
model = Comment
| valuesandvalue/valuesandvalue | vavs_project/comments/views.py | Python | mit | 3,904 |
class PlanningFailedError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
| jonbinney/python-planning | python_task_planning/src/python_task_planning/exceptions.py | Python | mit | 138 |
# Copyright (C) 2006, Giovanni Bajo
# Based on previous work under copyright (c) 2001, 2002 McMillan Enterprises, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# PIL's SpiderImagePlugin features a tkPhotoImage() method which imports
# ImageTk (and thus brings the whole Tcl/Tk library in).
# We cheat a little and remove the ImageTk import: I assume that if people
# are really using ImageTk in their application, they will also import it
# directly.
def hook(mod):
for i in range(len(mod.imports)):
if mod.imports[i][0] == "ImageTk":
del mod.imports[i]
break
return mod
| pdubroy/kurt | build/MacOS/PyInstaller/pyinstaller-svn-r812/hooks/shared_PIL_SpiderImagePlugin.py | Python | gpl-2.0 | 1,278 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# stats.py - part of the FDroid server tools
# Copyright (C) 2010-13, Ciaran Gultnieks, [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import re
import time
import traceback
import glob
import json
from optparse import OptionParser
import paramiko
import socket
import logging
import common
import metadata
import subprocess
from collections import Counter
def carbon_send(key, value):
s = socket.socket()
s.connect((config['carbon_host'], config['carbon_port']))
msg = '%s %d %d\n' % (key, value, int(time.time()))
s.sendall(msg)
s.close()
options = None
config = None
def main():
global options, config
# Parse command line...
parser = OptionParser()
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="Spew out even more information than normal")
parser.add_option("-q", "--quiet", action="store_true", default=False,
help="Restrict output to warnings and errors")
parser.add_option("-d", "--download", action="store_true", default=False,
help="Download logs we don't have")
parser.add_option("--recalc", action="store_true", default=False,
help="Recalculate aggregate stats - use when changes "
"have been made that would invalidate old cached data.")
parser.add_option("--nologs", action="store_true", default=False,
help="Don't do anything logs-related")
(options, args) = parser.parse_args()
config = common.read_config(options)
if not config['update_stats']:
logging.info("Stats are disabled - check your configuration")
sys.exit(1)
# Get all metadata-defined apps...
metaapps = metadata.read_metadata()
statsdir = 'stats'
logsdir = os.path.join(statsdir, 'logs')
datadir = os.path.join(statsdir, 'data')
if not os.path.exists(statsdir):
os.mkdir(statsdir)
if not os.path.exists(logsdir):
os.mkdir(logsdir)
if not os.path.exists(datadir):
os.mkdir(datadir)
if options.download:
# Get any access logs we don't have...
ssh = None
ftp = None
try:
logging.info('Retrieving logs')
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.connect('f-droid.org', username='fdroid', timeout=10,
key_filename=config['webserver_keyfile'])
ftp = ssh.open_sftp()
ftp.get_channel().settimeout(60)
logging.info("...connected")
ftp.chdir('logs')
files = ftp.listdir()
for f in files:
if f.startswith('access-') and f.endswith('.log.gz'):
destpath = os.path.join(logsdir, f)
destsize = ftp.stat(f).st_size
if (not os.path.exists(destpath) or
os.path.getsize(destpath) != destsize):
logging.debug("...retrieving " + f)
ftp.get(f, destpath)
except Exception:
traceback.print_exc()
sys.exit(1)
finally:
# Disconnect
if ftp is not None:
ftp.close()
if ssh is not None:
ssh.close()
knownapks = common.KnownApks()
unknownapks = []
if not options.nologs:
# Process logs
logging.info('Processing logs...')
appscount = Counter()
appsvercount = Counter()
logexpr = '(?P<ip>[.:0-9a-fA-F]+) - - \[(?P<time>.*?)\] ' + \
'"GET (?P<uri>.*?) HTTP/1.\d" (?P<statuscode>\d+) ' + \
'\d+ "(?P<referral>.*?)" "(?P<useragent>.*?)"'
logsearch = re.compile(logexpr).search
for logfile in glob.glob(os.path.join(logsdir, 'access-*.log.gz')):
logging.debug('...' + logfile)
# Get the date for this log - e.g. 2012-02-28
thisdate = os.path.basename(logfile)[7:-7]
agg_path = os.path.join(datadir, thisdate + '.json')
if not options.recalc and os.path.exists(agg_path):
# Use previously calculated aggregate data
with open(agg_path, 'r') as f:
today = json.load(f)
else:
# Calculate from logs...
today = {
'apps': Counter(),
'appsver': Counter(),
'unknown': []
}
p = subprocess.Popen(["zcat", logfile], stdout=subprocess.PIPE)
matches = (logsearch(line) for line in p.stdout)
for match in matches:
if match and match.group('statuscode') == '200':
uri = match.group('uri')
if uri.endswith('.apk'):
_, apkname = os.path.split(uri)
app = knownapks.getapp(apkname)
if app:
appid, _ = app
today['apps'][appid] += 1
# Strip the '.apk' from apkname
appver = apkname[:-4]
today['appsver'][appver] += 1
else:
if apkname not in today['unknown']:
today['unknown'].append(apkname)
# Save calculated aggregate data for today to cache
with open(agg_path, 'w') as f:
json.dump(today, f)
# Add today's stats (whether cached or recalculated) to the total
for appid in today['apps']:
appscount[appid] += today['apps'][appid]
for appid in today['appsver']:
appsvercount[appid] += today['appsver'][appid]
for uk in today['unknown']:
if uk not in unknownapks:
unknownapks.append(uk)
# Calculate and write stats for total downloads...
lst = []
alldownloads = 0
for appid in appscount:
count = appscount[appid]
lst.append(appid + " " + str(count))
if config['stats_to_carbon']:
carbon_send('fdroid.download.' + appid.replace('.', '_'),
count)
alldownloads += count
lst.append("ALL " + str(alldownloads))
f = open('stats/total_downloads_app.txt', 'w')
f.write('# Total downloads by application, since October 2011\n')
for line in sorted(lst):
f.write(line + '\n')
f.close()
f = open('stats/total_downloads_app_version.txt', 'w')
f.write('# Total downloads by application and version, '
'since October 2011\n')
lst = []
for appver in appsvercount:
count = appsvercount[appver]
lst.append(appver + " " + str(count))
for line in sorted(lst):
f.write(line + "\n")
f.close()
# Calculate and write stats for repo types...
logging.info("Processing repo types...")
repotypes = Counter()
for app in metaapps:
if len(app['Repo Type']) == 0:
rtype = 'none'
else:
if app['Repo Type'] == 'srclib':
rtype = common.getsrclibvcs(app['Repo'])
else:
rtype = app['Repo Type']
repotypes[rtype] += 1
f = open('stats/repotypes.txt', 'w')
for rtype in repotypes:
count = repotypes[rtype]
f.write(rtype + ' ' + str(count) + '\n')
f.close()
# Calculate and write stats for update check modes...
logging.info("Processing update check modes...")
ucms = Counter()
for app in metaapps:
checkmode = app['Update Check Mode']
if checkmode.startswith('RepoManifest/'):
checkmode = checkmode[:12]
if checkmode.startswith('Tags '):
checkmode = checkmode[:4]
ucms[checkmode] += 1
f = open('stats/update_check_modes.txt', 'w')
for checkmode in ucms:
count = ucms[checkmode]
f.write(checkmode + ' ' + str(count) + '\n')
f.close()
logging.info("Processing categories...")
ctgs = Counter()
for app in metaapps:
for category in app['Categories']:
ctgs[category] += 1
f = open('stats/categories.txt', 'w')
for category in ctgs:
count = ctgs[category]
f.write(category + ' ' + str(count) + '\n')
f.close()
logging.info("Processing antifeatures...")
afs = Counter()
for app in metaapps:
if app['AntiFeatures'] is None:
continue
antifeatures = [a.strip() for a in app['AntiFeatures'].split(',')]
for antifeature in antifeatures:
afs[antifeature] += 1
f = open('stats/antifeatures.txt', 'w')
for antifeature in afs:
count = afs[antifeature]
f.write(antifeature + ' ' + str(count) + '\n')
f.close()
# Calculate and write stats for licenses...
logging.info("Processing licenses...")
licenses = Counter()
for app in metaapps:
license = app['License']
licenses[license] += 1
f = open('stats/licenses.txt', 'w')
for license in licenses:
count = licenses[license]
f.write(license + ' ' + str(count) + '\n')
f.close()
# Write list of latest apps added to the repo...
logging.info("Processing latest apps...")
latest = knownapks.getlatest(10)
f = open('stats/latestapps.txt', 'w')
for app in latest:
f.write(app + '\n')
f.close()
if unknownapks:
logging.info('\nUnknown apks:')
for apk in unknownapks:
logging.info(apk)
logging.info("Finished.")
if __name__ == "__main__":
main()
| fantastico/fdroidserver | fdroidserver/stats.py | Python | agpl-3.0 | 10,581 |
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import optparse
from webkitpy.layout_tests.port import mac
from webkitpy.layout_tests.port import port_testcase
class MacPortTest(port_testcase.PortTestCase):
os_name = 'mac'
os_version = 'mac10.11'
port_name = 'mac'
full_port_name = 'mac-mac10.11'
port_maker = mac.MacPort
def assert_name(self, port_name, os_version_string, expected):
port = self.make_port(os_version=os_version_string, port_name=port_name)
self.assertEqual(expected, port.name())
def test_operating_system(self):
self.assertEqual('mac', self.make_port().operating_system())
def test_build_path(self):
# Test that optional paths are used regardless of whether they exist.
options = optparse.Values({'configuration': 'Release', 'build_directory': '/foo'})
self.assert_build_path(options, ['/mock-checkout/out/Release'], '/foo/Release')
# Test that optional relative paths are returned unmodified.
options = optparse.Values({'configuration': 'Release', 'build_directory': 'foo'})
self.assert_build_path(options, ['/mock-checkout/out/Release'], 'foo/Release')
# Test that we prefer the legacy dir over the new dir.
options = optparse.Values({'configuration': 'Release', 'build_directory': None})
self.assert_build_path(options, ['/mock-checkout/xcodebuild/Release',
'/mock-checkout/out/Release'], '/mock-checkout/xcodebuild/Release')
def test_build_path_timestamps(self):
options = optparse.Values({'configuration': 'Release', 'build_directory': None})
port = self.make_port(options=options)
port.host.filesystem.maybe_make_directory('/mock-checkout/out/Release')
port.host.filesystem.maybe_make_directory('/mock-checkout/xcodebuild/Release')
# Check with 'out' being newer.
port.host.filesystem.mtime = lambda f: 5 if '/out/' in f else 4
self.assertEqual(port._build_path(), '/mock-checkout/out/Release')
# Check with 'xcodebuild' being newer.
port.host.filesystem.mtime = lambda f: 5 if '/xcodebuild/' in f else 4
self.assertEqual(port._build_path(), '/mock-checkout/xcodebuild/Release')
def test_driver_name_option(self):
self.assertTrue(self.make_port()._path_to_driver().endswith('Content Shell'))
self.assertTrue(self.make_port(options=optparse.Values(dict(driver_name='OtherDriver')))._path_to_driver().endswith('OtherDriver'))
def test_path_to_image_diff(self):
self.assertEqual(self.make_port()._path_to_image_diff(), '/mock-checkout/out/Release/image_diff')
def test_expectation_files(self):
# FIXME: crbug.com/589709 - Delete this test override once the 10.11 failures have been rebaselined or triaged.
pass
| danakj/chromium | third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py | Python | bsd-3-clause | 4,329 |
import signal
import time
import os
import copy
import json
import tempfile
import pickle
import itertools
import sys
import multiprocessing
import pyfastaq
import minimap_ariba
from ariba import cluster, common, histogram, mlst_reporter, read_store, report, report_filter, reference_data, tb
class Error (Exception): pass
# passing shared objects (remaining_clusters) through here and thus making them
# explicit arguments to Pool.startmap when running this function. That seems to be
# a recommended safe transfer mechanism as opposed making them attributes of a
# pre-constructed 'obj' variable (although the docs are a bit hazy on that)
def _run_cluster(obj, verbose, clean, fails_dir, remaining_clusters, remaining_clusters_lock):
failed_clusters = os.listdir(fails_dir)
if len(failed_clusters) > 0:
print('Other clusters failed. Will not start cluster', obj.name, file=sys.stderr)
return obj
if verbose:
print('Start running cluster', obj.name, 'in directory', obj.root_dir, flush=True)
try:
obj.run(remaining_clusters=remaining_clusters,remaining_clusters_lock=remaining_clusters_lock)
except:
print('Failed cluster:', obj.name, file=sys.stderr)
with open(os.path.join(fails_dir, obj.name), 'w'):
pass
if verbose:
print('Finished running cluster', obj.name, 'in directory', obj.root_dir, flush=True)
if clean:
if verbose:
print('Deleting cluster dir', obj.root_dir, flush=True)
if os.path.exists(obj.root_dir):
try:
common.rmtree(obj.root_dir)
except:
pass
return obj
class Clusters:
def __init__(self,
refdata_dir,
reads_1,
reads_2,
outdir,
extern_progs,
version_report_lines=None,
assembly_kmer=21,
assembly_coverage=100,
threads=1,
verbose=False,
assembler='fermilite',
spades_mode='rna',
spades_options=None,
max_insert=1000,
min_scaff_depth=10,
nucmer_min_id=90,
nucmer_min_len=20,
nucmer_breaklen=200,
assembled_threshold=0.95,
unique_threshold=0.03,
max_gene_nt_extend=30,
clean=True,
tmp_dir=None,
):
self.refdata_dir = os.path.abspath(refdata_dir)
self.refdata, self.cluster_ids = self._load_reference_data_from_dir(refdata_dir)
self.reads_1 = os.path.abspath(reads_1)
self.reads_2 = os.path.abspath(reads_2)
self.outdir = os.path.abspath(outdir)
self.extern_progs = extern_progs
self.clusters_tsv = os.path.abspath(os.path.join(refdata_dir, '02.cdhit.clusters.tsv'))
self.all_ref_seqs_fasta = os.path.abspath(os.path.join(refdata_dir, '02.cdhit.all.fa'))
if version_report_lines is None:
self.version_report_lines = []
else:
self.version_report_lines = version_report_lines
self.clean = clean
self.logs_dir = os.path.join(self.outdir, 'Logs')
self.assembler = assembler
self.assembly_kmer = assembly_kmer
self.assembly_coverage = assembly_coverage
self.spades_mode = spades_mode
self.spades_options = spades_options
self.cdhit_files_prefix = os.path.join(self.refdata_dir, 'cdhit')
self.cdhit_cluster_representatives_fa = self.cdhit_files_prefix + '.cluster_representatives.fa'
self.bam_prefix = os.path.join(self.outdir, 'map_reads_to_cluster_reps')
self.bam = self.bam_prefix + '.bam'
self.report_file_all_tsv = os.path.join(self.outdir, 'debug.report.tsv')
self.report_file_filtered = os.path.join(self.outdir, 'report.tsv')
self.mlst_reports_prefix = os.path.join(self.outdir, 'mlst_report')
self.mlst_profile_file = os.path.join(self.refdata_dir, 'pubmlst.profile.txt')
self.tb_resistance_calls_file = os.path.join(self.outdir, 'tb.resistance.json')
self.catted_assembled_seqs_fasta = os.path.join(self.outdir, 'assembled_seqs.fa.gz')
self.catted_genes_matching_refs_fasta = os.path.join(self.outdir, 'assembled_genes.fa.gz')
self.catted_assemblies_fasta = os.path.join(self.outdir, 'assemblies.fa.gz')
self.threads = threads
self.verbose = verbose
self.max_insert = max_insert
self.insert_hist_bin = 10
self.insert_hist = histogram.Histogram(self.insert_hist_bin)
self.insert_size = None
self.insert_sspace_sd = None
self.insert_proper_pair_max = None
self.min_scaff_depth = min_scaff_depth
self.nucmer_min_id = nucmer_min_id
self.nucmer_min_len = nucmer_min_len
self.nucmer_breaklen = nucmer_breaklen
self.assembled_threshold = assembled_threshold
self.unique_threshold = unique_threshold
self.max_gene_nt_extend = max_gene_nt_extend
self.cluster_to_dir = {} # gene name -> abs path of cluster directory
self.clusters = {} # gene name -> Cluster object
self.cluster_read_counts = {} # gene name -> number of reads
self.cluster_base_counts = {} # gene name -> number of bases
self.pool = None
self.fails_dir = os.path.join(self.outdir ,'.fails')
self.clusters_all_ran_ok = True
for d in [self.outdir, self.logs_dir, self.fails_dir]:
try:
os.mkdir(d)
except:
raise Error('Error mkdir ' + d)
if tmp_dir is None:
if 'ARIBA_TMPDIR' in os.environ:
tmp_dir = os.path.abspath(os.environ['ARIBA_TMPDIR'])
elif 'TMPDIR' in os.environ:
tmp_dir = os.path.abspath(os.environ['TMPDIR'])
else:
tmp_dir = self.outdir
if not os.path.exists(tmp_dir):
raise Error('Temporary directory ' + tmp_dir + ' not found. Cannot continue')
if self.clean:
self.tmp_dir_obj = tempfile.TemporaryDirectory(prefix='ariba.tmp.', dir=os.path.abspath(tmp_dir))
self.tmp_dir = self.tmp_dir_obj.name
else:
self.tmp_dir_obj = None
self.tmp_dir = os.path.join(self.outdir, 'clusters')
try:
os.mkdir(self.tmp_dir)
except:
raise Error('Error making directory ' + self.tmp_dir)
if self.verbose:
print('Temporary directory:', self.tmp_dir)
for i in [x for x in dir(signal) if x.startswith("SIG") and x not in {'SIGCHLD', 'SIGCLD', 'SIGPIPE', 'SIGTSTP', 'SIGCONT'}]:
try:
signum = getattr(signal, i)
signal.signal(signum, self._receive_signal)
except:
pass
def _stop_pool(self):
if self.pool is None:
return
self.pool.close()
self.pool.terminate()
while len(multiprocessing.active_children()) > 0:
time.sleep(1)
def _emergency_stop(self):
self._stop_pool()
if self.clean:
try:
self.tmp_dir_obj.cleanup()
except:
pass
def _receive_signal(self, signum, stack):
print('Stopping! Signal received:', signum, file=sys.stderr, flush=True)
self._emergency_stop()
sys.exit(1)
@classmethod
def _load_reference_data_info_file(cls, filename):
data = {
'genetic_code': None
}
with open(filename) as f:
for line in f:
key, val = line.rstrip().split('\t')
if key in data:
data[key] = val
if None in data.values():
missing_values = [x for x in data if data[x] is None]
raise Error('Error reading reference info file ' + filename + '. These values not found: ' + ','.join(missing_values))
data['genetic_code'] = int(data['genetic_code'])
return data
@staticmethod
def _load_reference_data_from_dir(indir):
if not os.path.exists(indir):
raise Error('Error loading reference data. Input directory ' + indir + ' not found. Cannot continue')
fasta_file = os.path.join(indir, '02.cdhit.all.fa')
metadata_file = os.path.join(indir, '01.filter.check_metadata.tsv')
info_file = os.path.join(indir, '00.info.txt')
parameters_file = os.path.join(indir, '00.params.json')
clusters_pickle_file = os.path.join(indir, '02.cdhit.clusters.pickle')
params = Clusters._load_reference_data_info_file(info_file)
refdata = reference_data.ReferenceData(
[fasta_file],
[metadata_file],
genetic_code=params['genetic_code'],
parameters_file=parameters_file,
)
with open(clusters_pickle_file, 'rb') as f:
cluster_ids = pickle.load(f)
return refdata, cluster_ids
def _map_and_cluster_reads(self):
if self.verbose:
print('{:_^79}'.format(' Mapping reads to clustered genes '), flush=True)
minimap_prefix = 'minimap'
self._minimap_reads_to_all_ref_seqs(
self.clusters_tsv,
self.all_ref_seqs_fasta,
self.reads_1,
self.reads_2,
minimap_prefix,
verbose=self.verbose
)
if self.verbose:
print('Finished mapping\n')
print('{:_^79}'.format(' Generating clusters '), flush=True)
self.cluster_to_rep, self.cluster_read_counts, self.cluster_base_counts, self.insert_hist, self.proper_pairs = self._load_minimap_files(minimap_prefix, self.insert_hist_bin)
self.cluster_to_dir = {x: os.path.join(self.tmp_dir, x) for x in self.cluster_to_rep}
reads_file_for_read_store = minimap_prefix + '.reads'
if len(self.cluster_read_counts):
if self.verbose:
filehandle = sys.stdout
else:
filehandle = None
self.read_store = read_store.ReadStore(
reads_file_for_read_store,
os.path.join(self.outdir, 'read_store'),
log_fh=filehandle
)
os.unlink(reads_file_for_read_store)
if self.clean:
for suffix in ['cluster2representative', 'clusterCounts', 'insertHistogram', 'properPairs']:
filename = minimap_prefix + '.' + suffix
try:
os.unlink(filename)
except:
pass
if self.verbose:
print('Found', self.proper_pairs, 'proper read pairs from minimap')
print('Total clusters to perform local assemblies:', len(self.cluster_to_dir), flush=True)
@staticmethod
def _minimap_reads_to_all_ref_seqs(clusters_tsv, ref_fasta, reads_1, reads_2, outprefix, verbose=False):
got = minimap_ariba.minimap_ariba(clusters_tsv, ref_fasta, reads_1, reads_2, outprefix)
if (got != 0):
raise Error('Error running minimap. Cannot continue')
@classmethod
def _load_minimap_out_cluster2representative(cls, infile):
cluster2rep = {}
with open(infile) as f:
for line in f:
cluster, rep = line.rstrip().split('\t')
cluster2rep[cluster] = rep
return cluster2rep
@classmethod
def _load_minimap_out_cluster_counts(cls, infile):
reads = {}
bases = {}
with open(infile) as f:
for line in f:
cluster, read, base = line.rstrip().split('\t')
reads[cluster] = int(read)
bases[cluster] = int(base)
return reads, bases
@classmethod
def _load_minimap_insert_histogram(cls, infile, bin_size):
hist = histogram.Histogram(bin_size)
with open(infile) as f:
for line in f:
value, count = line.rstrip().split('\t')
hist.add(int(value), count=int(count))
return hist
@classmethod
def _load_minimap_proper_pairs(cls, infile):
with open(infile) as f:
for line in f:
pairs = int(line.rstrip())
break
return pairs
@staticmethod
def _load_minimap_files(inprefix, hist_bin_size):
cluster2rep = Clusters._load_minimap_out_cluster2representative(inprefix + '.cluster2representative')
cluster_read_count, cluster_base_count = Clusters._load_minimap_out_cluster_counts(inprefix + '.clusterCounts')
insert_hist = Clusters._load_minimap_insert_histogram(inprefix + '.insertHistogram', hist_bin_size)
proper_pairs = Clusters._load_minimap_proper_pairs(inprefix + '.properPairs')
return cluster2rep, cluster_read_count, cluster_base_count, insert_hist, proper_pairs
def _set_insert_size_data(self):
if len(self.insert_hist) == 0:
return False
else:
(x, self.insert_size, pc95, self.insert_sspace_sd) = self.insert_hist.stats()
self.insert_sspace_sd = min(1, self.insert_sspace_sd)
self.insert_proper_pair_max = 1.1 * pc95
if self.verbose:
print('\nInsert size information from reads mapped to reference genes:')
print('Insert size:', self.insert_size, sep='\t')
print('Insert sspace sd:', self.insert_sspace_sd, sep='\t')
print('Max insert:', self.insert_proper_pair_max, sep='\t')
print()
return True
def _init_and_run_clusters(self):
if len(self.cluster_to_dir) == 0:
raise Error('Did not get any reads mapped to genes. Cannot continue')
counter = 0
cluster_list = []
self.log_files = []
# How the thread count withing each Cluster.run is managed:
# We want to handle those cases where there are more total threads allocated to the application than there are clusters
# remaining to run (for example,
# there are only two references, and eight threads). If we keep the default thread value of 1 in cluster. Cluster,
# then we will be wasting the allocated threads. The most simple approach would be to divide all threads equally between clusters
# before calling Pool.map. Multithreaded external programs like Spades and Bowtie2 are then called with multiple threads. That should
# never be slower than keeping just one thread in cluster.Cluster, except maybe in the extreme cases when (if)
# a multi-threaded run of the external program takes longer wall-clock time than a single-threaded one.
# However, this solution would always keep
# Cluster.threads=1 if the initial number of clusters > number of total threads. This can result in inefficiency at the
# tail of the Pool.map execution flow - when the clusters are getting finished overall, we are waiting for the completion of
# fewer and fewer remaining
# single-threaded cluster tasks while more and more total threads are staying idle. We mitigate this through the following approach:
# - Create a shared Value object that holds the number of remaining clusters (remaining_clusters).
# - Each Cluster.run decrements the remaining_clusters when it completes
# - Cluster.run sets its own thread count to max(1,threads_total//remaining_clusters). This can be done as many times
# as needed at various points within Cluster.run (e.g. once before Spades is called, and again before Bowtie2 is called),
# in order to catch more idle threads.
# This is a simple and conservative approach to adaptively use all threads at the tail of the map flow. It
# never over-subscribes the threads, and it does not require any extra blocking within Cluster.run in order to
# wait for threads becoming available.
for cluster_name in sorted(self.cluster_to_dir):
counter += 1
if self.cluster_read_counts[cluster_name] <= 2:
if self.verbose:
print('Not constructing cluster ', cluster_name, ' because it only has ', self.cluster_read_counts[cluster_name], ' reads (', counter, ' of ', len(self.cluster_to_dir), ')', sep='')
continue
if self.verbose:
print('Constructing cluster ', cluster_name, ' (', counter, ' of ', len(self.cluster_to_dir), ')', sep='')
new_dir = self.cluster_to_dir[cluster_name]
self.log_files.append(os.path.join(self.logs_dir, cluster_name + '.log'))
cluster_list.append(cluster.Cluster(
new_dir,
cluster_name,
self.refdata,
all_ref_seqs_fasta=self.all_ref_seqs_fasta,
fail_file=os.path.join(self.fails_dir, cluster_name),
read_store=self.read_store,
reference_names=self.cluster_ids[cluster_name],
logfile=self.log_files[-1],
assembly_coverage=self.assembly_coverage,
assembly_kmer=self.assembly_kmer,
assembler=self.assembler,
max_insert=self.insert_proper_pair_max,
min_scaff_depth=self.min_scaff_depth,
nucmer_min_id=self.nucmer_min_id,
nucmer_min_len=self.nucmer_min_len,
nucmer_breaklen=self.nucmer_breaklen,
reads_insert=self.insert_size,
sspace_k=self.min_scaff_depth,
sspace_sd=self.insert_sspace_sd,
threads=1, # initially set to 1, then will adaptively self-modify while running
assembled_threshold=self.assembled_threshold,
unique_threshold=self.unique_threshold,
max_gene_nt_extend=self.max_gene_nt_extend,
spades_mode=self.spades_mode,
spades_options=self.spades_options,
clean=self.clean,
extern_progs=self.extern_progs,
threads_total=self.threads
))
# Here is why we use proxy objects from a Manager process below
# instead of simple shared multiprocessing.Value counter:
# Shared memory objects in multiprocessing use tempfile module to
# create temporary directory, then create temporary file inside it,
# memmap the file and unlink it. If TMPDIR envar points to a NFS
# mount, the final cleanup handler from multiprocessing will often
# return an exception due to a stale NFS file (.nfsxxxx) from a shutil.rmtree
# call. See help on tempfile.gettempdir() for how the default location of
# temporary files is selected. The exception is caught in except clause
# inside multiprocessing cleanup, and only a harmless traceback is printed,
# but it looks very spooky to the user and causes confusion. We use
# instead shared proxies from the Manager. Those do not rely on shared
# memory, and thus bypass the NFS issues. The counter is accesses infrequently
# relative to computations, so the performance does not suffer.
# default authkey in the manager will be some generated random-looking string
manager = multiprocessing.Manager()
remaining_clusters = manager.Value('l',len(cluster_list))
# manager.Value does not provide access to the internal RLock that we need for
# implementing atomic -=, so we need to carry around a separate RLock object.
remaining_clusters_lock = manager.RLock()
try:
if self.threads > 1:
self.pool = multiprocessing.Pool(self.threads)
cluster_list = self.pool.starmap(_run_cluster, zip(cluster_list, itertools.repeat(self.verbose), itertools.repeat(self.clean), itertools.repeat(self.fails_dir),
itertools.repeat(remaining_clusters),itertools.repeat(remaining_clusters_lock)))
# harvest the pool as soon as we no longer need it
self.pool.close()
self.pool.join()
else:
for c in cluster_list:
_run_cluster(c, self.verbose, self.clean, self.fails_dir, remaining_clusters, remaining_clusters_lock)
except:
self.clusters_all_ran_ok = False
if self.verbose:
print('Final value of remaining_clusters counter:', remaining_clusters)
remaining_clusters = None
remaining_clusters_lock = None
manager.shutdown()
if len(os.listdir(self.fails_dir)) > 0:
self.clusters_all_ran_ok = False
self.clusters = {c.name: c for c in cluster_list}
@staticmethod
def _write_report(clusters_in, tsv_out):
columns = copy.copy(report.columns)
columns[0] = '#' + columns[0]
f = pyfastaq.utils.open_file_write(tsv_out)
print('\t'.join(columns), file=f)
columns[0] = columns[0][1:]
for seq_name in sorted(clusters_in):
if clusters_in[seq_name].report_lines is None:
continue
for line in clusters_in[seq_name].report_lines:
print(line, file=f)
pyfastaq.utils.close(f)
def _write_catted_assemblies_fasta(self, outfile):
f = pyfastaq.utils.open_file_write(outfile)
for gene in sorted(self.clusters):
try:
seq_dict = self.clusters[gene].assembly.sequences
except:
continue
for seq_name in sorted(seq_dict):
print(seq_dict[seq_name], file=f)
pyfastaq.utils.close(f)
def _write_catted_assembled_seqs_fasta(self, outfile):
f = pyfastaq.utils.open_file_write(outfile)
for gene in sorted(self.clusters):
try:
seq_dict = self.clusters[gene].assembly_compare.assembled_reference_sequences
except:
continue
for seq_name in sorted(seq_dict):
print(seq_dict[seq_name], file=f)
pyfastaq.utils.close(f)
def _write_catted_genes_matching_refs_fasta(self, outfile):
f = pyfastaq.utils.open_file_write(outfile)
for gene in sorted(self.clusters):
if self.clusters[gene].assembly_compare is not None and self.clusters[gene].assembly_compare.gene_matching_ref is not None:
seq = copy.copy(self.clusters[gene].assembly_compare.gene_matching_ref)
seq.id += '.' + '.'.join([
self.clusters[gene].assembly_compare.gene_matching_ref_type,
str(self.clusters[gene].assembly_compare.gene_start_bases_added),
str(self.clusters[gene].assembly_compare.gene_end_bases_added)
])
print(seq, file=f)
pyfastaq.utils.close(f)
def _clean(self):
if self.clean:
common.rmtree(self.fails_dir)
try:
self.tmp_dir_obj.cleanup()
except:
pass
if self.verbose:
print('Deleting Logs directory', self.logs_dir)
common.rmtree(self.logs_dir)
try:
if self.verbose:
print('Deleting reads store files', self.read_store.outfile + '[.tbi]')
self.read_store.clean()
except:
pass
else:
if self.verbose:
print('Not deleting anything because --noclean used')
@classmethod
def _write_mlst_reports(cls, mlst_profile_file, ariba_report_tsv, outprefix, verbose=False):
if os.path.exists(mlst_profile_file):
if verbose:
print('\nMaking MLST reports', flush=True)
reporter = mlst_reporter.MlstReporter(ariba_report_tsv, mlst_profile_file, outprefix)
reporter.run()
@classmethod
def _write_tb_resistance_calls_json(cls, ariba_report_tsv, outfile):
calls = tb.report_to_resistance_dict(ariba_report_tsv)
with open(outfile, 'w') as f:
json.dump(calls, f, sort_keys=True, indent=4)
def write_versions_file(self, original_dir):
with open('version_info.txt', 'w') as f:
print('ARIBA run with this command:', file=f)
print(' '.join([sys.argv[0]] + sys.argv[1:]), file=f)
print('from this directory:', original_dir, file=f)
print(file=f)
print(*self.version_report_lines, sep='\n', file=f)
def run(self):
try:
self._run()
except Error as err:
self._emergency_stop()
raise Error('Something went wrong during ariba run. Cannot continue. Error was:\n' + str(err))
def _run(self):
cwd = os.getcwd()
try:
os.chdir(self.outdir)
self.write_versions_file(cwd)
self._map_and_cluster_reads()
self.log_files = None
if len(self.cluster_to_dir) > 0:
got_insert_data_ok = self._set_insert_size_data()
if not got_insert_data_ok:
print('WARNING: not enough proper read pairs (found ' + str(self.proper_pairs) + ') to determine insert size.', file=sys.stderr)
print('This probably means that very few reads were mapped at all. No local assemblies will be run', file=sys.stderr)
if self.verbose:
print('Not enough proper read pairs mapped to determine insert size. Skipping all assemblies.', flush=True)
else:
if self.verbose:
print('{:_^79}'.format(' Assembling each cluster '))
print('Will run', self.threads, 'cluster(s) in parallel', flush=True)
self._init_and_run_clusters()
if self.verbose:
print('Finished assembling clusters\n')
else:
if self.verbose:
print('No reads mapped. Skipping all assemblies', flush=True)
print('WARNING: no reads mapped to reference genes. Therefore no local assemblies will be run', file=sys.stderr)
if not self.clusters_all_ran_ok:
raise Error('At least one cluster failed! Stopping...')
if self.verbose:
print('{:_^79}'.format(' Writing reports '), flush=True)
print('Making', self.report_file_all_tsv)
self._write_report(self.clusters, self.report_file_all_tsv)
if self.verbose:
print('Making', self.report_file_filtered)
rf = report_filter.ReportFilter(infile=self.report_file_all_tsv)
rf.run(self.report_file_filtered)
if self.verbose:
print()
print('{:_^79}'.format(' Writing fasta of assembled sequences '), flush=True)
print(self.catted_assembled_seqs_fasta, 'and', self.catted_genes_matching_refs_fasta, flush=True)
self._write_catted_assembled_seqs_fasta(self.catted_assembled_seqs_fasta)
self._write_catted_genes_matching_refs_fasta(self.catted_genes_matching_refs_fasta)
self._write_catted_assemblies_fasta(self.catted_assemblies_fasta)
if self.log_files is not None:
clusters_log_file = os.path.join(self.outdir, 'log.clusters.gz')
if self.verbose:
print()
print('{:_^79}'.format(' Catting cluster log files '), flush=True)
print('Writing file', clusters_log_file, flush=True)
common.cat_files(self.log_files, clusters_log_file)
if self.verbose:
print()
print('{:_^79}'.format(' Cleaning files '), flush=True)
self._clean()
Clusters._write_mlst_reports(self.mlst_profile_file, self.report_file_filtered, self.mlst_reports_prefix, verbose=self.verbose)
if 'tb' in self.refdata.extra_parameters and self.refdata.extra_parameters['tb']:
Clusters._write_tb_resistance_calls_json(self.report_file_filtered, self.tb_resistance_calls_file)
if self.clusters_all_ran_ok and self.verbose:
print('\nAll done!\n')
finally:
os.chdir(cwd)
| martinghunt/ariba | ariba/clusters.py | Python | gpl-3.0 | 28,393 |
#!/usr/bin/env python
import os,os.path,shutil
bin_path = os.path.abspath(__file__)
d_path = os.path.abspath(os.path.join(bin_path, os.pardir))
m_path = os.path.abspath(os.path.join(d_path, os.pardir))
y_path = os.path.abspath(os.path.join(m_path, os.pardir))
category_path = os.path.abspath(os.path.join(y_path, os.pardir))
print("ERROR: use this script by removing exit only if you are really really sure you know what you're doing.")
exit(1)
print("day_path: "+d_path)
#print(m_path)
print("category_path: " + category_path)
print("")
if (len(os.path.basename(y_path))!=4) or (len(os.path.basename(m_path))!=2) or (len(os.path.basename(d_path))!=2):
print("either this is >= the year 10000 you must run this script from a day folder")
folder_path = d_path # not ".", so as to avoid self via compare below
for sub_name in os.listdir(folder_path):
sub_path = os.path.join(folder_path, sub_name)
if sub_name[:1]!="." and os.path.isfile(sub_path):
if sub_path != bin_path:
name_no_ext = os.path.splitext(sub_name)[0]
print(sub_name)
change_enable = False
with open(sub_path) as f:
content = f.readlines()
stated_date = None
actual_date = None
stated_date_i = None
time = None
for line_i in range(0,len(content)):
content[line_i] = content[line_i].rstrip() # or newlines via rstrip("\n\r")
line = content[line_i]
#print(line)
op_i = line.find(":")
if op_i >= 0:
v_name = line[:op_i].strip()
v_val = line[op_i+1:].strip()
if (len(v_val)>=2) and (v_val[:1]=="'") and (v_val[-1:]=="'"):
v_val = v_val[1:-1]
if v_name == "stated_date":
stated_date = v_val
stated_date_i = line_i
elif v_name == "ctime":
actual_date = v_val[:10] # get 2018-01-01 from time
date = None
if stated_date is not None:
date = stated_date
print(" stated_date: "+stated_date)
if stated_date==actual_date:
print(" info: removing same stated date")
content[stated_date_i] = None
change_enable = True
elif actual_date is not None:
date = actual_date
#print(" date: "+date)
else:
#print(" date_missing_from: "+sub_name)
pass
target_item_path = None
if date is not None:
if (date[4:5]=="-") and (date[7:8]=="-"):
target_y_s = date[0:4]
target_m_s = date[5:7]
target_d_s = date[8:10]
target_y_path = os.path.join(category_path, target_y_s)
target_m_path = os.path.join(target_y_path, target_m_s)
target_d_path = os.path.join(target_m_path, target_d_s)
if (target_d_path != d_path):
try:
target_item_path = os.path.join(target_d_path, sub_name)
shutil.move(sub_path, target_item_path)
print(" - moved to "+target_d_path)
except:
print(" - could not finish moving to to "+target_d_path)
else:
print(" - bad date format")
if change_enable:
outs = None
save_path = None
if target_item_path is not None:
save_path = target_item_path
else:
save_path = sub_path
outs = open(save_path, 'w')
print(" - resaving")
for line_i in range(0,len(content)):
try:
if content[line_i] is not None:
outs.write(content[line_i] + "\n")
else:
print(" - removed line number "+str(line_i+1))
except:
print(" - could not finish writing line "+str(line_i+1))
outs.close()
#else:
# print("<the current script>")
| expertmm/integratoredu | etc/moveit.py | Python | gpl-3.0 | 4,480 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Tests for the `spack.verify` module"""
import os
import shutil
import llnl.util.filesystem as fs
import spack.spec
import spack.store
import spack.util.spack_json as sjson
import spack.verify
def test_link_manifest_entry(tmpdir):
# Test that symlinks are properly checked against the manifest.
# Test that the appropriate errors are generated when the check fails.
file = str(tmpdir.join('file'))
open(file, 'a').close()
link = str(tmpdir.join('link'))
os.symlink(file, link)
data = spack.verify.create_manifest_entry(link)
assert data['type'] == 'link'
assert data['dest'] == file
assert all(x in data for x in ('mode', 'owner', 'group'))
results = spack.verify.check_entry(link, data)
assert not results.has_errors()
data['type'] = 'garbage'
results = spack.verify.check_entry(link, data)
assert results.has_errors()
assert link in results.errors
assert results.errors[link] == ['type']
data['type'] = 'link'
file2 = str(tmpdir.join('file2'))
open(file2, 'a').close()
os.remove(link)
os.symlink(file2, link)
results = spack.verify.check_entry(link, data)
assert results.has_errors()
assert link in results.errors
assert results.errors[link] == ['link']
def test_dir_manifest_entry(tmpdir):
# Test that directories are properly checked against the manifest.
# Test that the appropriate errors are generated when the check fails.
dirent = str(tmpdir.join('dir'))
fs.mkdirp(dirent)
data = spack.verify.create_manifest_entry(dirent)
assert data['type'] == 'dir'
assert all(x in data for x in ('mode', 'owner', 'group'))
results = spack.verify.check_entry(dirent, data)
assert not results.has_errors()
data['type'] = 'garbage'
results = spack.verify.check_entry(dirent, data)
assert results.has_errors()
assert dirent in results.errors
assert results.errors[dirent] == ['type']
def test_file_manifest_entry(tmpdir):
# Test that files are properly checked against the manifest.
# Test that the appropriate errors are generated when the check fails.
orig_str = 'This is a file'
new_str = 'The file has changed'
file = str(tmpdir.join('dir'))
with open(file, 'w') as f:
f.write(orig_str)
data = spack.verify.create_manifest_entry(file)
assert data['type'] == 'file'
assert data['size'] == len(orig_str)
assert all(x in data for x in ('mode', 'owner', 'group'))
results = spack.verify.check_entry(file, data)
assert not results.has_errors()
data['type'] = 'garbage'
results = spack.verify.check_entry(file, data)
assert results.has_errors()
assert file in results.errors
assert results.errors[file] == ['type']
data['type'] = 'file'
with open(file, 'w') as f:
f.write(new_str)
results = spack.verify.check_entry(file, data)
expected = ['size', 'hash']
mtime = os.stat(file).st_mtime
if mtime != data['time']:
expected.append('mtime')
assert results.has_errors()
assert file in results.errors
assert sorted(results.errors[file]) == sorted(expected)
def test_check_chmod_manifest_entry(tmpdir):
# Check that the verification properly identifies errors for files whose
# permissions have been modified.
file = str(tmpdir.join('dir'))
with open(file, 'w') as f:
f.write('This is a file')
data = spack.verify.create_manifest_entry(file)
os.chmod(file, data['mode'] - 1)
results = spack.verify.check_entry(file, data)
assert results.has_errors()
assert file in results.errors
assert results.errors[file] == ['mode']
def test_check_prefix_manifest(tmpdir):
# Test the verification of an entire prefix and its contents
prefix_path = tmpdir.join('prefix')
prefix = str(prefix_path)
spec = spack.spec.Spec('libelf')
spec._mark_concrete()
spec.prefix = prefix
results = spack.verify.check_spec_manifest(spec)
assert results.has_errors()
assert prefix in results.errors
assert results.errors[prefix] == ['manifest missing']
metadata_dir = str(prefix_path.join('.spack'))
bin_dir = str(prefix_path.join('bin'))
other_dir = str(prefix_path.join('other'))
for d in (metadata_dir, bin_dir, other_dir):
fs.mkdirp(d)
file = os.path.join(other_dir, 'file')
with open(file, 'w') as f:
f.write("I'm a little file short and stout")
link = os.path.join(bin_dir, 'run')
os.symlink(file, link)
spack.verify.write_manifest(spec)
results = spack.verify.check_spec_manifest(spec)
assert not results.has_errors()
os.remove(link)
malware = os.path.join(metadata_dir, 'hiddenmalware')
with open(malware, 'w') as f:
f.write("Foul evil deeds")
results = spack.verify.check_spec_manifest(spec)
assert results.has_errors()
assert all(x in results.errors for x in (malware, link))
assert len(results.errors) == 2
assert results.errors[link] == ['deleted']
assert results.errors[malware] == ['added']
manifest_file = os.path.join(spec.prefix,
spack.store.layout.metadata_dir,
spack.store.layout.manifest_file_name)
with open(manifest_file, 'w') as f:
f.write("{This) string is not proper json")
results = spack.verify.check_spec_manifest(spec)
assert results.has_errors()
assert results.errors[spec.prefix] == ['manifest corrupted']
def test_single_file_verification(tmpdir):
# Test the API to verify a single file, including finding the package
# to which it belongs
filedir = os.path.join(str(tmpdir), 'a', 'b', 'c', 'd')
filepath = os.path.join(filedir, 'file')
metadir = os.path.join(str(tmpdir), spack.store.layout.metadata_dir)
fs.mkdirp(filedir)
fs.mkdirp(metadir)
with open(filepath, 'w') as f:
f.write("I'm a file")
data = spack.verify.create_manifest_entry(filepath)
manifest_file = os.path.join(metadir,
spack.store.layout.manifest_file_name)
with open(manifest_file, 'w') as f:
sjson.dump({filepath: data}, f)
results = spack.verify.check_file_manifest(filepath)
assert not results.has_errors()
os.utime(filepath, (0, 0))
with open(filepath, 'w') as f:
f.write("I changed.")
results = spack.verify.check_file_manifest(filepath)
expected = ['hash']
mtime = os.stat(filepath).st_mtime
if mtime != data['time']:
expected.append('mtime')
assert results.has_errors()
assert filepath in results.errors
assert sorted(results.errors[filepath]) == sorted(expected)
shutil.rmtree(metadir)
results = spack.verify.check_file_manifest(filepath)
assert results.has_errors()
assert results.errors[filepath] == ['not owned by any package']
| LLNL/spack | lib/spack/spack/test/verification.py | Python | lgpl-2.1 | 7,097 |
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: [email protected]
@organization:
"""
import volatility.obj as obj
import volatility.plugins.mac.pslist as pslist
import volatility.plugins.mac.common as common
class mac_tasks(pslist.mac_pslist):
""" List Active Tasks """
def __init__(self, config, *args, **kwargs):
pslist.mac_pslist.__init__(self, config, *args, **kwargs)
def allprocs(self):
common.set_plugin_members(self)
tasksaddr = self.addr_space.profile.get_symbol("_tasks")
queue_entry = obj.Object("queue_entry", offset = tasksaddr, vm = self.addr_space)
seen = [tasksaddr]
for task in queue_entry.walk_list(list_head = tasksaddr):
if (task.bsd_info and task.obj_offset not in seen):
proc = task.bsd_info.dereference_as("proc")
yield proc
seen.append(task.obj_offset)
| wroersma/volatility | volatility/plugins/mac/pstasks.py | Python | gpl-2.0 | 1,687 |
import sys
import rlp
from utils import int_to_big_endian, big_endian_to_int, safe_ord
import db
def _encode_optimized(item):
"""RLP encode (a nested sequence of) bytes"""
if isinstance(item, bytes):
if len(item) == 1 and ord(item) < 128:
return item
prefix = length_prefix(len(item), 128)
else:
item = b''.join([_encode_optimized(x) for x in item])
prefix = length_prefix(len(item), 192)
return prefix + item
def length_prefix(length, offset):
"""Construct the prefix to lists or strings denoting their length.
:param length: the length of the item in bytes
:param offset: ``0x80`` when encoding raw bytes, ``0xc0`` when encoding a
list
"""
if length < 56:
return chr(offset + length)
else:
length_string = int_to_big_endian(length)
return chr(offset + 56 - 1 + len(length_string)) + length_string
def _decode_optimized(rlp):
o = []
pos = 0
_typ, _len, pos = consume_length_prefix(rlp, pos)
if _typ != list:
return rlp[pos: pos + _len]
while pos < len(rlp):
_, _l, _p = consume_length_prefix(rlp, pos)
o.append(_decode_optimized(rlp[pos: _l + _p]))
pos = _l + _p
return o
def consume_length_prefix(rlp, start):
"""Read a length prefix from an RLP string.
:param rlp: the rlp string to read from
:param start: the position at which to start reading
:returns: a tuple ``(type, length, end)``, where ``type`` is either ``str``
or ``list`` depending on the type of the following payload,
``length`` is the length of the payload in bytes, and ``end`` is
the position of the first payload byte in the rlp string
"""
b0 = safe_ord(rlp[start])
if b0 < 128: # single byte
return (str, 1, start)
elif b0 < 128 + 56: # short string
return (str, b0 - 128, start + 1)
elif b0 < 192: # long string
ll = b0 - 128 - 56 + 1
l = big_endian_to_int(rlp[start + 1:start + 1 + ll])
return (str, l, start + 1 + ll)
elif b0 < 192 + 56: # short list
return (list, b0 - 192, start + 1)
else: # long list
ll = b0 - 192 - 56 + 1
l = big_endian_to_int(rlp[start + 1:start + 1 + ll])
return (list, l, start + 1 + ll)
#
if sys.version_info.major == 2:
encode_optimized = _encode_optimized
decode_optimized = _decode_optimized
else:
encode_optimized = rlp.codec.encode_raw
decode_optimized = rlp.codec.decode_raw
def main():
import trie
import time
def run():
st = time.time()
x = trie.Trie(db.EphemDB())
for i in range(10000):
x.update(str(i), str(i**3))
print 'elapsed', time.time() - st
return x.root_hash
trie.rlp_encode = _encode_optimized
print 'trie.rlp_encode = encode_optimized'
r3 = run()
trie.rlp_encode = rlp.codec.encode_raw
print 'trie.rlp_encode = rlp.codec.encode_raw'
r2 = run()
assert r2 == r3
trie.rlp_encode = rlp.encode
print 'trie.rlp_encode = rlp.encode'
r = run()
assert r == r2
if __name__ == '__main__':
main()
| pipermerriam/pyethereum | ethereum/fast_rlp.py | Python | mit | 3,196 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Correctness tests for tf.keras CNN models using DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.distribute import combinations
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.keras.distribute import keras_correctness_test_base
from tensorflow.python.keras.optimizer_v2 import gradient_descent
class DistributionStrategyCnnCorrectnessTest(
keras_correctness_test_base.TestDistributionStrategyCorrectnessBase):
def get_model(self,
initial_weights=None,
distribution=None,
experimental_run_tf_function=None,
input_shapes=None):
del input_shapes
with keras_correctness_test_base.MaybeDistributionScope(distribution):
image = keras.layers.Input(shape=(28, 28, 3), name='image')
c1 = keras.layers.Conv2D(
name='conv1',
filters=16,
kernel_size=(3, 3),
strides=(4, 4),
kernel_regularizer=keras.regularizers.l2(1e-4))(
image)
if self.with_batch_norm == 'regular':
c1 = keras.layers.BatchNormalization(name='bn1')(c1)
elif self.with_batch_norm == 'sync':
c1 = keras.layers.SyncBatchNormalization(name='bn1')(c1)
c1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(c1)
logits = keras.layers.Dense(
10, activation='softmax', name='pred')(
keras.layers.Flatten()(c1))
model = keras.Model(inputs=[image], outputs=[logits])
if initial_weights:
model.set_weights(initial_weights)
model.compile(
optimizer=gradient_descent.SGD(learning_rate=0.1),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'],
experimental_run_tf_function=experimental_run_tf_function)
return model
def _get_data(self, count, shape=(28, 28, 3), num_classes=10):
centers = np.random.randn(num_classes, *shape)
features = []
labels = []
for _ in range(count):
label = np.random.randint(0, num_classes, size=1)[0]
offset = np.random.normal(loc=0, scale=0.1, size=np.prod(shape))
offset = offset.reshape(shape)
labels.append(label)
features.append(centers[label] + offset)
x = np.asarray(features, dtype=np.float32)
y = np.asarray(labels, dtype=np.float32).reshape((count, 1))
return x, y
def get_data(self):
x_train, y_train = self._get_data(
count=keras_correctness_test_base._GLOBAL_BATCH_SIZE *
keras_correctness_test_base._EVAL_STEPS)
x_predict = x_train
return x_train, y_train, x_predict
def get_data_with_partial_last_batch_eval(self):
x_train, y_train = self._get_data(count=1280)
x_eval, y_eval = self._get_data(count=1000)
return x_train, y_train, x_eval, y_eval, x_eval
@combinations.generate(
keras_correctness_test_base.all_strategy_and_input_config_combinations())
def test_cnn_correctness(self, distribution, use_numpy, use_validation_data,
experimental_run_tf_function):
self.run_correctness_test(distribution, use_numpy, use_validation_data,
experimental_run_tf_function)
@combinations.generate(
keras_correctness_test_base.all_strategy_and_input_config_combinations())
def test_cnn_with_batch_norm_correctness(self, distribution, use_numpy,
use_validation_data,
experimental_run_tf_function):
self.skipTest('Flakily times out, b/134670856')
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
with_batch_norm='regular',
experimental_run_tf_function=experimental_run_tf_function)
@combinations.generate(
keras_correctness_test_base.all_strategy_and_input_config_combinations())
def test_cnn_with_sync_batch_norm_correctness(self, distribution, use_numpy,
use_validation_data,
experimental_run_tf_function):
if not context.executing_eagerly() or not experimental_run_tf_function:
self.skipTest('SyncBatchNorm is not enabled in graph mode.')
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
with_batch_norm='sync',
experimental_run_tf_function=experimental_run_tf_function)
@combinations.generate(
keras_correctness_test_base.test_combinations_with_tpu_strategies() +
keras_correctness_test_base
.strategy_minus_tpu_and_input_config_combinations_eager())
def test_cnn_correctness_with_partial_last_batch_eval(self, distribution,
use_numpy,
use_validation_data):
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
partial_last_batch=True,
training_epochs=1)
@combinations.generate(
keras_correctness_test_base.test_combinations_with_tpu_strategies() +
keras_correctness_test_base
.strategy_minus_tpu_and_input_config_combinations_eager())
def test_cnn_with_batch_norm_correctness_and_partial_last_batch_eval(
self, distribution, use_numpy, use_validation_data):
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
with_batch_norm='regular',
partial_last_batch=True)
if __name__ == '__main__':
test.main()
| jhseu/tensorflow | tensorflow/python/keras/distribute/keras_image_model_correctness_test.py | Python | apache-2.0 | 6,440 |
"""Support to keep track of user controlled booleans for within automation."""
from __future__ import annotations
import logging
import voluptuous as vol
from homeassistant.const import (
ATTR_EDITABLE,
CONF_ICON,
CONF_ID,
CONF_NAME,
SERVICE_RELOAD,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import collection
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import RestoreEntity
import homeassistant.helpers.service
from homeassistant.helpers.storage import Store
from homeassistant.helpers.typing import ConfigType, ServiceCallType
from homeassistant.loader import bind_hass
DOMAIN = "input_boolean"
_LOGGER = logging.getLogger(__name__)
CONF_INITIAL = "initial"
CREATE_FIELDS = {
vol.Required(CONF_NAME): vol.All(str, vol.Length(min=1)),
vol.Optional(CONF_INITIAL): cv.boolean,
vol.Optional(CONF_ICON): cv.icon,
}
UPDATE_FIELDS = {
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_INITIAL): cv.boolean,
vol.Optional(CONF_ICON): cv.icon,
}
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: cv.schema_with_slug_keys(vol.Any(UPDATE_FIELDS, None))},
extra=vol.ALLOW_EXTRA,
)
RELOAD_SERVICE_SCHEMA = vol.Schema({})
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
class InputBooleanStorageCollection(collection.StorageCollection):
"""Input boolean collection stored in storage."""
CREATE_SCHEMA = vol.Schema(CREATE_FIELDS)
UPDATE_SCHEMA = vol.Schema(UPDATE_FIELDS)
async def _process_create_data(self, data: dict) -> dict:
"""Validate the config is valid."""
return self.CREATE_SCHEMA(data)
@callback
def _get_suggested_id(self, info: dict) -> str:
"""Suggest an ID based on the config."""
return info[CONF_NAME]
async def _update_data(self, data: dict, update_data: dict) -> dict:
"""Return a new updated data object."""
update_data = self.UPDATE_SCHEMA(update_data)
return {**data, **update_data}
@bind_hass
def is_on(hass, entity_id):
"""Test if input_boolean is True."""
return hass.states.is_state(entity_id, STATE_ON)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up an input boolean."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
id_manager = collection.IDManager()
yaml_collection = collection.YamlCollection(
logging.getLogger(f"{__name__}.yaml_collection"), id_manager
)
collection.sync_entity_lifecycle(
hass, DOMAIN, DOMAIN, component, yaml_collection, InputBoolean.from_yaml
)
storage_collection = InputBooleanStorageCollection(
Store(hass, STORAGE_VERSION, STORAGE_KEY),
logging.getLogger(f"{__name__}.storage_collection"),
id_manager,
)
collection.sync_entity_lifecycle(
hass, DOMAIN, DOMAIN, component, storage_collection, InputBoolean
)
await yaml_collection.async_load(
[{CONF_ID: id_, **(conf or {})} for id_, conf in config.get(DOMAIN, {}).items()]
)
await storage_collection.async_load()
collection.StorageCollectionWebsocket(
storage_collection, DOMAIN, DOMAIN, CREATE_FIELDS, UPDATE_FIELDS
).async_setup(hass)
async def reload_service_handler(service_call: ServiceCallType) -> None:
"""Remove all input booleans and load new ones from config."""
conf = await component.async_prepare_reload(skip_reset=True)
if conf is None:
return
await yaml_collection.async_load(
[
{CONF_ID: id_, **(conf or {})}
for id_, conf in conf.get(DOMAIN, {}).items()
]
)
homeassistant.helpers.service.async_register_admin_service(
hass,
DOMAIN,
SERVICE_RELOAD,
reload_service_handler,
schema=RELOAD_SERVICE_SCHEMA,
)
component.async_register_entity_service(SERVICE_TURN_ON, {}, "async_turn_on")
component.async_register_entity_service(SERVICE_TURN_OFF, {}, "async_turn_off")
component.async_register_entity_service(SERVICE_TOGGLE, {}, "async_toggle")
return True
class InputBoolean(ToggleEntity, RestoreEntity):
"""Representation of a boolean input."""
def __init__(self, config: dict | None):
"""Initialize a boolean input."""
self._config = config
self.editable = True
self._state = config.get(CONF_INITIAL)
@classmethod
def from_yaml(cls, config: dict) -> InputBoolean:
"""Return entity instance initialized from yaml storage."""
input_bool = cls(config)
input_bool.entity_id = f"{DOMAIN}.{config[CONF_ID]}"
input_bool.editable = False
return input_bool
@property
def should_poll(self):
"""If entity should be polled."""
return False
@property
def name(self):
"""Return name of the boolean input."""
return self._config.get(CONF_NAME)
@property
def extra_state_attributes(self):
"""Return the state attributes of the entity."""
return {ATTR_EDITABLE: self.editable}
@property
def icon(self):
"""Return the icon to be used for this entity."""
return self._config.get(CONF_ICON)
@property
def is_on(self):
"""Return true if entity is on."""
return self._state
@property
def unique_id(self):
"""Return a unique ID for the person."""
return self._config[CONF_ID]
async def async_added_to_hass(self):
"""Call when entity about to be added to hass."""
# If not None, we got an initial value.
await super().async_added_to_hass()
if self._state is not None:
return
state = await self.async_get_last_state()
self._state = state and state.state == STATE_ON
async def async_turn_on(self, **kwargs):
"""Turn the entity on."""
self._state = True
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the entity off."""
self._state = False
self.async_write_ha_state()
async def async_update_config(self, config: dict) -> None:
"""Handle when the config is updated."""
self._config = config
self.async_write_ha_state()
| w1ll1am23/home-assistant | homeassistant/components/input_boolean/__init__.py | Python | apache-2.0 | 6,512 |
# -*- coding: UTF-8 -*-
"""
Lastship Add-on (C) 2019
Credits to Placenta and Covenant; our thanks go to their creators
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# Addon Name: Lastship
# Addon id: plugin.video.lastship
# Addon Provider: LastShip
import re, os, urllib, urlparse, json, binascii
from resources.lib.modules import client
def google(url):
try:
if any(x in url for x in ['youtube.', 'docid=']): url = 'https://drive.google.com/file/d/%s/view' % re.compile('docid=([\w-]+)').findall(url)[0]
netloc = urlparse.urlparse(url.strip().lower()).netloc
netloc = netloc.split('.google')[0]
if netloc == 'docs' or netloc == 'drive':
url = url.split('/preview', 1)[0]
url = url.replace('drive.google.com', 'docs.google.com')
headers = {'User-Agent': client.agent()}
result = client.request(url, output='extended', headers=headers)
try:
headers['Cookie'] = result[2]['Set-Cookie']
except:
pass
result = result[0]
if netloc == 'docs' or netloc == 'drive':
result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0]
result = json.loads(result)
result = [i.split('|')[-1] for i in result.split(',')]
result = sum([googletag(i, append_height=True) for i in result], [])
elif netloc == 'photos':
result = result.replace('\r', '').replace('\n', '').replace('\t', '')
result = re.compile('"\d*/\d*x\d*.+?","(.+?)"').findall(result)[0]
result = result.replace('\\u003d', '=').replace('\\u0026', '&')
result = re.compile('url=(.+?)&').findall(result)
result = [urllib.unquote(i) for i in result]
result = sum([googletag(i, append_height=True) for i in result], [])
elif netloc == 'picasaweb':
id = re.compile('#(\d*)').findall(url)[0]
result = re.search('feedPreload:\s*(.*}]}})},', result, re.DOTALL).group(1)
result = json.loads(result)['feed']['entry']
if len(result) > 1:
result = [i for i in result if str(id) in i['link'][0]['href']][0]
elif len(result) == 1:
result = result[0]
result = result['media']['content']
result = [i['url'] for i in result if 'video' in i['type']]
result = sum([googletag(i, append_height=True) for i in result], [])
elif netloc == 'plus':
id = (urlparse.urlparse(url).path).split('/')[-1]
result = result.replace('\r', '').replace('\n', '').replace('\t', '')
result = result.split('"%s"' % id)[-1].split(']]')[0]
result = result.replace('\\u003d', '=').replace('\\u0026', '&')
result = re.compile('url=(.+?)&').findall(result)
result = [urllib.unquote(i) for i in result]
result = sum([googletag(i, append_height=True) for i in result], [])
result = sorted(result, key=lambda i: i.get('height', 0), reverse=True)
url = []
for q in ['4K', '1440p', '1080p', 'HD', 'SD']:
try:
url += [[i for i in result if i.get('quality') == q][0]]
except:
pass
for i in url:
i.pop('height', None)
i.update({'url': i['url'] + '|%s' % urllib.urlencode(headers)})
if not url: return
return url
except:
return
def googletag(url, append_height=False):
quality = re.compile('itag=(\d*)').findall(url)
quality += re.compile('=m(\d*)$').findall(url)
try:
quality = quality[0]
except:
return []
itag_map = {'151': {'quality': 'SD', 'height': 72}, '212': {'quality': 'SD', 'height': 480}, '313': {'quality': '4K', 'height': 2160},
'242': {'quality': 'SD', 'height': 240}, '315': {'quality': '4K', 'height': 2160}, '219': {'quality': 'SD', 'height': 480},
'133': {'quality': 'SD', 'height': 240}, '271': {'quality': '1440p', 'height': 1440}, '272': {'quality': '4K', 'height': 2160},
'137': {'quality': '1080p', 'height': 1080}, '136': {'quality': 'HD', 'height': 720}, '135': {'quality': 'SD', 'height': 480},
'134': {'quality': 'SD', 'height': 360}, '82': {'quality': 'SD', 'height': 360}, '83': {'quality': 'SD', 'height': 480},
'218': {'quality': 'SD', 'height': 480}, '93': {'quality': 'SD', 'height': 360}, '84': {'quality': 'HD', 'height': 720},
'170': {'quality': '1080p', 'height': 1080}, '167': {'quality': 'SD', 'height': 360}, '22': {'quality': 'HD', 'height': 720},
'46': {'quality': '1080p', 'height': 1080}, '160': {'quality': 'SD', 'height': 144}, '44': {'quality': 'SD', 'height': 480},
'45': {'quality': 'HD', 'height': 720}, '43': {'quality': 'SD', 'height': 360}, '94': {'quality': 'SD', 'height': 480},
'5': {'quality': 'SD', 'height': 240}, '6': {'quality': 'SD', 'height': 270}, '92': {'quality': 'SD', 'height': 240},
'85': {'quality': '1080p', 'height': 1080}, '308': {'quality': '1440p', 'height': 1440}, '278': {'quality': 'SD', 'height': 144},
'78': {'quality': 'SD', 'height': 480}, '302': {'quality': 'HD', 'height': 720}, '303': {'quality': '1080p', 'height': 1080},
'245': {'quality': 'SD', 'height': 480}, '244': {'quality': 'SD', 'height': 480}, '247': {'quality': 'HD', 'height': 720},
'246': {'quality': 'SD', 'height': 480}, '168': {'quality': 'SD', 'height': 480}, '266': {'quality': '4K', 'height': 2160},
'243': {'quality': 'SD', 'height': 360}, '264': {'quality': '1440p', 'height': 1440}, '102': {'quality': 'HD', 'height': 720},
'100': {'quality': 'SD', 'height': 360}, '101': {'quality': 'SD', 'height': 480}, '95': {'quality': 'HD', 'height': 720},
'248': {'quality': '1080p', 'height': 1080}, '96': {'quality': '1080p', 'height': 1080}, '91': {'quality': 'SD', 'height': 144},
'38': {'quality': '4K', 'height': 3072}, '59': {'quality': 'SD', 'height': 480}, '17': {'quality': 'SD', 'height': 144},
'132': {'quality': 'SD', 'height': 240}, '18': {'quality': 'SD', 'height': 360}, '37': {'quality': '1080p', 'height': 1080},
'35': {'quality': 'SD', 'height': 480}, '34': {'quality': 'SD', 'height': 360}, '298': {'quality': 'HD', 'height': 720},
'299': {'quality': '1080p', 'height': 1080}, '169': {'quality': 'HD', 'height': 720}}
if quality in itag_map:
quality = itag_map[quality]
if append_height:
return [{'quality': quality['quality'], 'height': quality['height'], 'url': url}]
else:
return [{'quality': quality['quality'], 'url': url}]
else:
return []
def googlepass(url):
try:
try:
headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1]))
except:
headers = None
url = url.split('|')[0].replace('\\', '')
url = client.request(url, headers=headers, output='geturl')
if 'requiressl=yes' in url:
url = url.replace('http://', 'https://')
else:
url = url.replace('https://', 'http://')
if headers: url += '|%s' % urllib.urlencode(headers)
return url
except:
return
def vk(url):
try:
query = urlparse.parse_qs(urlparse.urlparse(url).query)
try:
oid, video_id = query['oid'][0], query['id'][0]
except:
oid, video_id = re.findall('\/video(.*)_(.*)', url)[0]
sources_url = 'http://vk.com/al_video.php?act=show_inline&al=1&video=%s_%s' % (oid, video_id)
html = client.request(sources_url)
html = re.sub(r'[^\x00-\x7F]+', ' ', html)
sources = re.findall('(\d+)x\d+.+?(http.+?\.m3u8.+?)n', html)
if not sources:
sources = re.findall('"url(\d+)"\s*:\s*"(.+?)"', html)
sources = [(i[0], i[1].replace('\\', '')) for i in sources]
sources = dict(sources)
url = []
try:
url += [{'quality': 'HD', 'url': sources['720']}]
except:
pass
try:
url += [{'quality': 'SD', 'url': sources['540']}]
except:
pass
try:
url += [{'quality': 'SD', 'url': sources['480']}]
except:
pass
if not url == []: return url
try:
url += [{'quality': 'SD', 'url': sources['360']}]
except:
pass
if not url == []: return url
try:
url += [{'quality': 'SD', 'url': sources['240']}]
except:
pass
if not url == []: return url
except:
return
def odnoklassniki(url):
try:
media_id = re.compile('//.+?/.+?/([\w]+)').findall(url)[0]
result = client.request('http://ok.ru/dk', post={'cmd': 'videoPlayerMetadata', 'mid': media_id})
result = re.sub(r'[^\x00-\x7F]+', ' ', result)
result = json.loads(result).get('videos', [])
hd = []
for name, quali in {'ultra': '4K', 'quad': '1440p', 'full': '1080p', 'hd': 'HD'}.items():
hd += [{'quality': quali, 'url': i.get('url')} for i in result if i.get('name').lower() == name]
sd = []
for name, quali in {'sd': 'SD', 'low': 'SD', 'lowest': 'SD', 'mobile': 'SD'}.items():
sd += [{'quality': quali, 'url': i.get('url')} for i in result if i.get('name').lower() == name]
url = hd + sd[:1]
if not url == []: return url
except:
return
def cldmailru(url):
try:
v = url.split('public')[-1]
r = client.request(url)
r = re.sub(r'[^\x00-\x7F]+', ' ', r)
tok = re.findall('"tokens"\s*:\s*{\s*"download"\s*:\s*"([^"]+)', r)[0]
url = re.findall('"weblink_get"\s*:\s*\[.+?"url"\s*:\s*"([^"]+)', r)[0]
url = '%s%s?key=%s' % (url, v, tok)
return url
except:
return
def yandex(url):
try:
cookie = client.request(url, output='cookie')
r = client.request(url, cookie=cookie)
r = re.sub(r'[^\x00-\x7F]+', ' ', r)
sk = re.findall('"sk"\s*:\s*"([^"]+)', r)[0]
idstring = re.findall('"id"\s*:\s*"([^"]+)', r)[0]
idclient = binascii.b2a_hex(os.urandom(16))
post = {'idClient': idclient, 'version': '3.9.2', 'sk': sk, '_model.0': 'do-get-resource-url', 'id.0': idstring}
post = urllib.urlencode(post)
r = client.request('https://yadi.sk/models/?_m=do-get-resource-url', post=post, cookie=cookie)
r = json.loads(r)
url = r['models'][0]['data']['file']
return url
except:
return
| lastship/plugin.video.lastship | resources/lib/modules/directstream.py | Python | gpl-3.0 | 11,429 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PurpleRobotDeviceNote'
db.create_table(u'purple_robot_app_purplerobotdevicenote', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('device', self.gf('django.db.models.fields.related.ForeignKey')(related_name='notes', to=orm['purple_robot_app.PurpleRobotDevice'])),
('note', self.gf('django.db.models.fields.TextField')(max_length=1024)),
('added', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal(u'purple_robot_app', ['PurpleRobotDeviceNote'])
def backwards(self, orm):
# Deleting model 'PurpleRobotDeviceNote'
db.delete_table(u'purple_robot_app_purplerobotdevicenote')
models = {
u'purple_robot_app.purplerobotalert': {
'Meta': {'object_name': 'PurpleRobotAlert'},
'action_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'dismissed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'generated': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manually_dismissed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'probe': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'severity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'})
},
u'purple_robot_app.purplerobotconfiguration': {
'Meta': {'object_name': 'PurpleRobotConfiguration'},
'added': ('django.db.models.fields.DateTimeField', [], {}),
'contents': ('django.db.models.fields.TextField', [], {'max_length': '1048576'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '1024'})
},
u'purple_robot_app.purplerobotdevice': {
'Meta': {'object_name': 'PurpleRobotDevice'},
'config_last_fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'config_last_user_agent': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'configuration': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'devices'", 'null': 'True', 'to': u"orm['purple_robot_app.PurpleRobotConfiguration']"}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1048576', 'null': 'True', 'blank': 'True'}),
'device_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'devices'", 'null': 'True', 'to': u"orm['purple_robot_app.PurpleRobotDeviceGroup']"}),
'device_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256', 'db_index': 'True'}),
'hash_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'performance_metadata': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'max_length': '1048576'})
},
u'purple_robot_app.purplerobotdevicegroup': {
'Meta': {'object_name': 'PurpleRobotDeviceGroup'},
'configuration': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'groups'", 'null': 'True', 'to': u"orm['purple_robot_app.PurpleRobotConfiguration']"}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1048576', 'null': 'True', 'blank': 'True'}),
'group_id': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '256'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
u'purple_robot_app.purplerobotdevicenote': {
'Meta': {'object_name': 'PurpleRobotDeviceNote'},
'added': ('django.db.models.fields.DateTimeField', [], {}),
'device': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notes'", 'to': u"orm['purple_robot_app.PurpleRobotDevice']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'max_length': '1024'})
},
u'purple_robot_app.purplerobotevent': {
'Meta': {'object_name': 'PurpleRobotEvent'},
'event': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logged': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'payload': ('django.db.models.fields.TextField', [], {'max_length': '8388608', 'null': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'db_index': 'True'})
},
u'purple_robot_app.purplerobotexportjob': {
'Meta': {'object_name': 'PurpleRobotExportJob'},
'destination': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'export_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'probes': ('django.db.models.fields.TextField', [], {'max_length': '8196', 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '512'}),
'users': ('django.db.models.fields.TextField', [], {'max_length': '8196', 'null': 'True', 'blank': 'True'})
},
u'purple_robot_app.purplerobotpayload': {
'Meta': {'object_name': 'PurpleRobotPayload'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'errors': ('django.db.models.fields.TextField', [], {'max_length': '65536', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payload': ('django.db.models.fields.TextField', [], {'max_length': '8388608'}),
'process_tags': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'db_index': 'True'})
},
u'purple_robot_app.purplerobotreading': {
'Meta': {'object_name': 'PurpleRobotReading', 'index_together': "[['probe', 'user_id'], ['logged', 'user_id'], ['probe', 'logged', 'user_id']]"},
'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'guid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '1024', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logged': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'payload': ('django.db.models.fields.TextField', [], {'max_length': '8388608'}),
'probe': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'db_index': 'True'})
},
u'purple_robot_app.purplerobotreport': {
'Meta': {'object_name': 'PurpleRobotReport'},
'generated': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'probe': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'report_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
u'purple_robot_app.purplerobottest': {
'Meta': {'object_name': 'PurpleRobotTest'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'frequency': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {}),
'probe': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'report': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
}
}
complete_apps = ['purple_robot_app'] | cbitstech/Purple-Robot-Django | migrations/0034_auto__add_purplerobotdevicenote.py | Python | gpl-3.0 | 10,647 |
# -*- coding: utf-8 -*-
"""
This module provides all functionality for the tracking events during an VSG process.
The module defines the class VSGLogger. The VSGLogger class provides the main functionality of using Python's native logging process. Since Python allows period-separated hierarchical value (e.g. "VSG.Module.ModuleFeature") and loggers that are further down in the hierarchical list are children of loggers higher up in the list, the VSG Logger manages the hierarchy of loggers in the VSG namespace (i.e. VSG.*).
For completeness, all logging activing in VSG package must go through the VSG Logger.
"""
import os
import sys
import logging
class VSGLogger(object):
"""
The VSG Logger manages messages associate with various priority level.
Optional, it can redirect the messages to any output channel (usually a file).
"""
BASENAME = "VSG"
class LevelFilter(logging.Filter):
"""
The LevelFilter class implements a Filter Object specific to the VSGLogger
"""
def __init__(self, levels=None):
"""
Constructor
"""
self._level = levels or [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL]
def filter(self, record):
"""
Returns NoneZero if the recorsd should be logger; zero otherwise.
"""
return record.levelno in self._level
def __init__(self, filepath=None, threshold=logging.INFO):
"""
Creates a logger with the given name (the name prefixes each log line).
:param filepath: The optional output path for the logger messages.
:param threshold: The threshold for messages; logging messages which are less severe than 'threshold' will be ignored.
"""
# Create the Logger
self._logger = self.getLogger(None)
self._logger.setLevel(threshold)
# Handler management
self._fileHandlers = []
self._handlers = []
# Register the Standard Output handler
stdoutHandler = logging.StreamHandler(sys.stdout)
stdoutHandler.setFormatter(logging.Formatter("%(name)-15s : %(levelname)-8s %(message)s"))
stdoutHandler.addFilter(VSGLogger.LevelFilter([logging.DEBUG, logging.INFO, logging.WARNING]))
self._registerHandler(stdoutHandler)
# Register the Standard Error handler
stderrHandler = logging.StreamHandler(sys.stderr)
stderrHandler.setFormatter(logging.Formatter("%(name)-15 : %(levelname)-8s %(message)s"))
stderrHandler.addFilter(VSGLogger.LevelFilter([logging.ERROR, logging.CRITICAL]))
self._registerHandler(stderrHandler)
# Register a File Handler
if filepath:
fileHandler = logging.FileHandler(filepath, 'a')
fileHandler.setFormatter(logging.Formatter("%(asctime)s | %(name)-15 %(levelname)-8s %(message)s", "%b %d %H:%M:%S"))
self._registerHandler(fileHandler)
def __del__(self):
"""
Destructor.
"""
self.close()
def _registerHandler(self, handler):
"""
Registers a handler.
:param handler: A handler object.
"""
self._logger.addHandler(handler)
self._handlers.append(handler)
def _unregisterHandler(self, handler, shutdown=True):
"""
Unregisters the logging handler.
:param handler: A handler previously registered with this loggger.
:param shutdown: Flag to shutdown the handler.
"""
if handler in self._handlers:
self._handlers.remove(handler)
self._logger.removeHandler(handler)
if shutdown:
try:
handler.close()
except KeyError:
# Depending on the Python version, it's possible for this call
# to fail most likely because some logging module objects get
# garbage collected before the VSGLogger object is.
pass
@classmethod
def getLogger(cls, name=None):
"""
Retrieves the Python native logger
:param name: The name of the logger instance in the VSG namespace (VSG.<name>); a None value will use the VSG root.
:return: The instacne of the Python logger object.
"""
return logging.getLogger("{0}.{1}".format(cls.BASENAME, name) if name else cls.BASENAME)
@classmethod
def debug(cls, name, message, *args):
"""
Convenience function to log a message at the DEBUG level.
:param name: The name of the logger instance in the VSG namespace (VSG.<name>)
:param message: A message format string.
:param args: The arguments that are are merged into msg using the string formatting operator.
:..note: The native logger's `kwargs` are not used in this function.
"""
cls.getLogger(name).debug(message, *args)
@classmethod
def info(cls, name, message, *args):
"""
Convenience function to log a message at the INFO level.
:param name: The name of the logger instance in the VSG namespace (VSG.<name>)
:param message: A message format string.
:param args: The arguments that are are merged into msg using the string formatting operator.
:..note: The native logger's `kwargs` are not used in this function.
"""
cls.getLogger(name).info(message, *args)
@classmethod
def warning(cls, name, message, *args):
"""
Convenience function to log a message at the WARNING level.
:param name: The name of the logger instance in the VSG namespace (VSG.<name>)
:param message: A message format string.
:param args: The arguments that are are merged into msg using the string formatting operator.
:..note: The native logger's `kwargs` are not used in this function.
"""
cls.getLogger(name).warning(message, *args)
@classmethod
def error(cls, name, message, *args):
"""
Convenience function to log a message at the ERROR level.
:param name: The name of the logger instance in the VSG namespace (VSG.<name>)
:param message: A message format string.
:param args: The arguments that are are merged into msg using the string formatting operator.
:..note: The native logger's `kwargs` are not used in this function.
"""
cls.getLogger(name).error(message, *args)
@classmethod
def critical(cls, name, message, *args):
"""
Convenience function to log a message at the CRITICAL level.
:param name: The name of the logger instance in the VSG namespace (VSG.<name>)
:param message: A message format string.
:param args: The arguments that are are merged into msg using the string formatting operator.
:..note: The native logger's `kwargs` are not used in this function.
"""
cls.getLogger(name).critical(message, *args)
@classmethod
def exception(cls, name, message, *args):
"""
Convenience function to log a message at the ERROR level with additonal exception information.
:param name: The name of the logger instance in the VSG namespace (VSG.<name>)
:param message: A message format string.
:param args: The arguments that are are merged into msg using the string formatting operator.
:..note: This method should only be called from an exception handler.
"""
cls.getLogger(name).exception(message, *args)
def close(self):
"""
Closes and unregisters all logging handlers.
"""
while self._handlers:
self._unregisterHandler(self._handlers[0])
if __name__ == "__main__":
logfile = os.path.join(os.path.dirname(__file__), 'log.txt')
logger = VSGLogger(logfile, logging.DEBUG)
VSGLogger.debug("DebugLogger", "Debug = %d", int(logging.DEBUG))
try:
raise NotImplementedError("This has not been implemented")
except NotImplementedError:
VSGLogger.exception(__name__, "Something bad happened.")
VSGLogger.info(__name__, "This is a multiline logger message:\n %s \n %s \n %s", '****************', 'Message!', '****************')
VSGLogger.info(__name__, "Info = %d", int(logging.INFO))
VSGLogger.error("MyName", "Error = %d", int(logging.ERROR))
VSGLogger.critical("YourName", "Critical = %d", int(logging.CRITICAL))
VSGLogger.warning(__name__, "Warning = %d", int(logging.WARNING))
import webbrowser
webbrowser.open(logfile)
| dbarsam/python-vsgen | vsgen/util/logger.py | Python | mit | 8,737 |
"""A library of helper functions for the CherryPy test suite."""
import datetime
import io
import logging
import os
import re
import subprocess
import sys
import time
import unittest
import warnings
import contextlib
import portend
import pytest
from cheroot.test import webtest
import cherrypy
from cherrypy._cpcompat import text_or_bytes, HTTPSConnection, ntob
from cherrypy.lib import httputil
from cherrypy.lib import gctools
log = logging.getLogger(__name__)
thisdir = os.path.abspath(os.path.dirname(__file__))
serverpem = os.path.join(os.getcwd(), thisdir, 'test.pem')
class Supervisor(object):
"""Base class for modeling and controlling servers during testing."""
def __init__(self, **kwargs):
for k, v in kwargs.items():
if k == 'port':
setattr(self, k, int(v))
setattr(self, k, v)
def log_to_stderr(msg, level):
return sys.stderr.write(msg + os.linesep)
class LocalSupervisor(Supervisor):
"""Base class for modeling/controlling servers which run in the same
process.
When the server side runs in a different process, start/stop can dump all
state between each test module easily. When the server side runs in the
same process as the client, however, we have to do a bit more work to
ensure config and mounted apps are reset between tests.
"""
using_apache = False
using_wsgi = False
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
cherrypy.server.httpserver = self.httpserver_class
# This is perhaps the wrong place for this call but this is the only
# place that i've found so far that I KNOW is early enough to set this.
cherrypy.config.update({'log.screen': False})
engine = cherrypy.engine
if hasattr(engine, 'signal_handler'):
engine.signal_handler.subscribe()
if hasattr(engine, 'console_control_handler'):
engine.console_control_handler.subscribe()
def start(self, modulename=None):
"""Load and start the HTTP server."""
if modulename:
# Unhook httpserver so cherrypy.server.start() creates a new
# one (with config from setup_server, if declared).
cherrypy.server.httpserver = None
cherrypy.engine.start()
self.sync_apps()
def sync_apps(self):
"""Tell the server about any apps which the setup functions mounted."""
pass
def stop(self):
td = getattr(self, 'teardown', None)
if td:
td()
cherrypy.engine.exit()
for name, server in getattr(cherrypy, 'servers', {}).copy().items():
server.unsubscribe()
del cherrypy.servers[name]
class NativeServerSupervisor(LocalSupervisor):
"""Server supervisor for the builtin HTTP server."""
httpserver_class = 'cherrypy._cpnative_server.CPHTTPServer'
using_apache = False
using_wsgi = False
def __str__(self):
return 'Builtin HTTP Server on %s:%s' % (self.host, self.port)
class LocalWSGISupervisor(LocalSupervisor):
"""Server supervisor for the builtin WSGI server."""
httpserver_class = 'cherrypy._cpwsgi_server.CPWSGIServer'
using_apache = False
using_wsgi = True
def __str__(self):
return 'Builtin WSGI Server on %s:%s' % (self.host, self.port)
def sync_apps(self):
"""Hook a new WSGI app into the origin server."""
cherrypy.server.httpserver.wsgi_app = self.get_app()
def get_app(self, app=None):
"""Obtain a new (decorated) WSGI app to hook into the origin server."""
if app is None:
app = cherrypy.tree
if self.validate:
try:
from wsgiref import validate
except ImportError:
warnings.warn(
'Error importing wsgiref. The validator will not run.')
else:
# wraps the app in the validator
app = validate.validator(app)
return app
def get_cpmodpy_supervisor(**options):
from cherrypy.test import modpy
sup = modpy.ModPythonSupervisor(**options)
sup.template = modpy.conf_cpmodpy
return sup
def get_modpygw_supervisor(**options):
from cherrypy.test import modpy
sup = modpy.ModPythonSupervisor(**options)
sup.template = modpy.conf_modpython_gateway
sup.using_wsgi = True
return sup
def get_modwsgi_supervisor(**options):
from cherrypy.test import modwsgi
return modwsgi.ModWSGISupervisor(**options)
def get_modfcgid_supervisor(**options):
from cherrypy.test import modfcgid
return modfcgid.ModFCGISupervisor(**options)
def get_modfastcgi_supervisor(**options):
from cherrypy.test import modfastcgi
return modfastcgi.ModFCGISupervisor(**options)
def get_wsgi_u_supervisor(**options):
cherrypy.server.wsgi_version = ('u', 0)
return LocalWSGISupervisor(**options)
class CPWebCase(webtest.WebCase):
script_name = ''
scheme = 'http'
available_servers = {'wsgi': LocalWSGISupervisor,
'wsgi_u': get_wsgi_u_supervisor,
'native': NativeServerSupervisor,
'cpmodpy': get_cpmodpy_supervisor,
'modpygw': get_modpygw_supervisor,
'modwsgi': get_modwsgi_supervisor,
'modfcgid': get_modfcgid_supervisor,
'modfastcgi': get_modfastcgi_supervisor,
}
default_server = 'wsgi'
@classmethod
def _setup_server(cls, supervisor, conf):
v = sys.version.split()[0]
log.info('Python version used to run this test script: %s' % v)
log.info('CherryPy version: %s' % cherrypy.__version__)
if supervisor.scheme == 'https':
ssl = ' (ssl)'
else:
ssl = ''
log.info('HTTP server version: %s%s' % (supervisor.protocol, ssl))
log.info('PID: %s' % os.getpid())
cherrypy.server.using_apache = supervisor.using_apache
cherrypy.server.using_wsgi = supervisor.using_wsgi
if sys.platform[:4] == 'java':
cherrypy.config.update({'server.nodelay': False})
if isinstance(conf, text_or_bytes):
parser = cherrypy.lib.reprconf.Parser()
conf = parser.dict_from_file(conf).get('global', {})
else:
conf = conf or {}
baseconf = conf.copy()
baseconf.update({'server.socket_host': supervisor.host,
'server.socket_port': supervisor.port,
'server.protocol_version': supervisor.protocol,
'environment': 'test_suite',
})
if supervisor.scheme == 'https':
# baseconf['server.ssl_module'] = 'builtin'
baseconf['server.ssl_certificate'] = serverpem
baseconf['server.ssl_private_key'] = serverpem
# helper must be imported lazily so the coverage tool
# can run against module-level statements within cherrypy.
# Also, we have to do "from cherrypy.test import helper",
# exactly like each test module does, because a relative import
# would stick a second instance of webtest in sys.modules,
# and we wouldn't be able to globally override the port anymore.
if supervisor.scheme == 'https':
webtest.WebCase.HTTP_CONN = HTTPSConnection
return baseconf
@classmethod
def setup_class(cls):
''
# Creates a server
conf = {
'scheme': 'http',
'protocol': 'HTTP/1.1',
'port': 54583,
'host': '127.0.0.1',
'validate': False,
'server': 'wsgi',
}
supervisor_factory = cls.available_servers.get(
conf.get('server', 'wsgi'))
if supervisor_factory is None:
raise RuntimeError('Unknown server in config: %s' % conf['server'])
supervisor = supervisor_factory(**conf)
# Copied from "run_test_suite"
cherrypy.config.reset()
baseconf = cls._setup_server(supervisor, conf)
cherrypy.config.update(baseconf)
setup_client()
if hasattr(cls, 'setup_server'):
# Clear the cherrypy tree and clear the wsgi server so that
# it can be updated with the new root
cherrypy.tree = cherrypy._cptree.Tree()
cherrypy.server.httpserver = None
cls.setup_server()
# Add a resource for verifying there are no refleaks
# to *every* test class.
cherrypy.tree.mount(gctools.GCRoot(), '/gc')
cls.do_gc_test = True
supervisor.start(cls.__module__)
cls.supervisor = supervisor
@classmethod
def teardown_class(cls):
''
if hasattr(cls, 'setup_server'):
cls.supervisor.stop()
do_gc_test = False
def test_gc(self):
if not self.do_gc_test:
return
self.getPage('/gc/stats')
try:
self.assertBody('Statistics:')
except Exception:
'Failures occur intermittently. See #1420'
def prefix(self):
return self.script_name.rstrip('/')
def base(self):
if ((self.scheme == 'http' and self.PORT == 80) or
(self.scheme == 'https' and self.PORT == 443)):
port = ''
else:
port = ':%s' % self.PORT
return '%s://%s%s%s' % (self.scheme, self.HOST, port,
self.script_name.rstrip('/'))
def exit(self):
sys.exit()
def getPage(self, url, *args, **kwargs):
"""Open the url.
"""
if self.script_name:
url = httputil.urljoin(self.script_name, url)
return webtest.WebCase.getPage(self, url, *args, **kwargs)
def skip(self, msg='skipped '):
pytest.skip(msg)
def assertErrorPage(self, status, message=None, pattern=''):
"""Compare the response body with a built in error page.
The function will optionally look for the regexp pattern,
within the exception embedded in the error page."""
# This will never contain a traceback
page = cherrypy._cperror.get_error_page(status, message=message)
# First, test the response body without checking the traceback.
# Stick a match-all group (.*) in to grab the traceback.
def esc(text):
return re.escape(ntob(text))
epage = re.escape(page)
epage = epage.replace(
esc('<pre id="traceback"></pre>'),
esc('<pre id="traceback">') + b'(.*)' + esc('</pre>'))
m = re.match(epage, self.body, re.DOTALL)
if not m:
self._handlewebError(
'Error page does not match; expected:\n' + page)
return
# Now test the pattern against the traceback
if pattern is None:
# Special-case None to mean that there should be *no* traceback.
if m and m.group(1):
self._handlewebError('Error page contains traceback')
else:
if (m is None) or (
not re.search(ntob(re.escape(pattern), self.encoding),
m.group(1))):
msg = 'Error page does not contain %s in traceback'
self._handlewebError(msg % repr(pattern))
date_tolerance = 2
def assertEqualDates(self, dt1, dt2, seconds=None):
"""Assert abs(dt1 - dt2) is within Y seconds."""
if seconds is None:
seconds = self.date_tolerance
if dt1 > dt2:
diff = dt1 - dt2
else:
diff = dt2 - dt1
if not diff < datetime.timedelta(seconds=seconds):
raise AssertionError('%r and %r are not within %r seconds.' %
(dt1, dt2, seconds))
def _test_method_sorter(_, x, y):
"""Monkeypatch the test sorter to always run test_gc last in each suite."""
if x == 'test_gc':
return 1
if y == 'test_gc':
return -1
if x > y:
return 1
if x < y:
return -1
return 0
unittest.TestLoader.sortTestMethodsUsing = _test_method_sorter
def setup_client():
"""Set up the WebCase classes to match the server's socket settings."""
webtest.WebCase.PORT = cherrypy.server.socket_port
webtest.WebCase.HOST = cherrypy.server.socket_host
if cherrypy.server.ssl_certificate:
CPWebCase.scheme = 'https'
# --------------------------- Spawning helpers --------------------------- #
class CPProcess(object):
pid_file = os.path.join(thisdir, 'test.pid')
config_file = os.path.join(thisdir, 'test.conf')
config_template = """[global]
server.socket_host: '%(host)s'
server.socket_port: %(port)s
checker.on: False
log.screen: False
log.error_file: r'%(error_log)s'
log.access_file: r'%(access_log)s'
%(ssl)s
%(extra)s
"""
error_log = os.path.join(thisdir, 'test.error.log')
access_log = os.path.join(thisdir, 'test.access.log')
def __init__(self, wait=False, daemonize=False, ssl=False,
socket_host=None, socket_port=None):
self.wait = wait
self.daemonize = daemonize
self.ssl = ssl
self.host = socket_host or cherrypy.server.socket_host
self.port = socket_port or cherrypy.server.socket_port
def write_conf(self, extra=''):
if self.ssl:
serverpem = os.path.join(thisdir, 'test.pem')
ssl = """
server.ssl_certificate: r'%s'
server.ssl_private_key: r'%s'
""" % (serverpem, serverpem)
else:
ssl = ''
conf = self.config_template % {
'host': self.host,
'port': self.port,
'error_log': self.error_log,
'access_log': self.access_log,
'ssl': ssl,
'extra': extra,
}
with io.open(self.config_file, 'w', encoding='utf-8') as f:
f.write(str(conf))
def start(self, imports=None):
"""Start cherryd in a subprocess."""
portend.free(self.host, self.port, timeout=1)
args = [
'-m',
'cherrypy',
'-c', self.config_file,
'-p', self.pid_file,
]
r"""
Command for running cherryd server with autoreload enabled
Using
```
['-c',
"__requires__ = 'CherryPy'; \
import pkg_resources, re, sys; \
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]); \
sys.exit(\
pkg_resources.load_entry_point(\
'CherryPy', 'console_scripts', 'cherryd')())"]
```
doesn't work as it's impossible to reconstruct the `-c`'s contents.
Ref: https://github.com/cherrypy/cherrypy/issues/1545
"""
if not isinstance(imports, (list, tuple)):
imports = [imports]
for i in imports:
if i:
args.append('-i')
args.append(i)
if self.daemonize:
args.append('-d')
env = os.environ.copy()
# Make sure we import the cherrypy package in which this module is
# defined.
grandparentdir = os.path.abspath(os.path.join(thisdir, '..', '..'))
if env.get('PYTHONPATH', ''):
env['PYTHONPATH'] = os.pathsep.join(
(grandparentdir, env['PYTHONPATH']))
else:
env['PYTHONPATH'] = grandparentdir
self._proc = subprocess.Popen([sys.executable] + args, env=env)
if self.wait:
self.exit_code = self._proc.wait()
else:
portend.occupied(self.host, self.port, timeout=5)
# Give the engine a wee bit more time to finish STARTING
if self.daemonize:
time.sleep(2)
else:
time.sleep(1)
def get_pid(self):
if self.daemonize:
return int(open(self.pid_file, 'rb').read())
return self._proc.pid
def join(self):
"""Wait for the process to exit."""
if self.daemonize:
return self._join_daemon()
self._proc.wait()
def _join_daemon(self):
with contextlib.suppress(IOError):
os.waitpid(self.get_pid(), 0)
| JonnyWong16/plexpy | lib/cherrypy/test/helper.py | Python | gpl-3.0 | 16,369 |
# vim:fileencoding=utf-8
from calibre.constants import plugins
monotonicp, err = plugins['monotonic']
if err:
raise RuntimeError('Failed to load the monotonic module with error: ' + err)
monotonic = monotonicp.monotonic
del monotonicp, err
| jelly/calibre | src/calibre/utils/monotonic.py | Python | gpl-3.0 | 246 |
import numpy as np
from .core import LayerBase, FFBase, NoParamMixin
from ..ops import Sigmoid
from ..util import white, rtm
sigmoid = Sigmoid()
class HighwayLayer(FFBase):
"""
Neural Highway Layer
Based on Srivastava et al., 2015
A carry gate is applied to the raw input.
A transform gate is applied to the output activation.
y = y_ * g_t + x * g_c
Output shape equals the input shape.
"""
def __init__(self, activation="tanh", **kw):
FFBase.__init__(self, 1, activation, **kw)
self.gates = None
def connect(self, to, inshape):
self.neurons = int(np.prod(inshape))
self.weights = white(self.neurons, self.neurons*3)
self.biases = np.zeros((self.neurons*3,))
FFBase.connect(self, to, inshape)
def feedforward(self, stimuli) -> np.ndarray:
self.inputs = rtm(stimuli)
self.gates = self.inputs.dot(self.weights) + self.biases
self.gates[:, :self.neurons] = self.activation(self.gates[:, :self.neurons])
self.gates[:, self.neurons:] = sigmoid(self.gates[:, self.neurons:])
h, t, c = np.split(self.gates, 3, axis=1)
self.output = h * t + self.inputs * c
return self.output.reshape(stimuli.shape)
def backpropagate(self, error) -> np.ndarray:
shape = error.shape
error = rtm(error)
h, t, c = np.split(self.gates, 3, axis=1)
dh = self.activation.derivative(h) * t * error
dt = sigmoid.derivative(t) * h * error
dc = sigmoid.derivative(c) * self.inputs * error
dx = c * error
dgates = np.concatenate((dh, dt, dc), axis=1)
self.nabla_w = self.inputs.T.dot(dgates)
self.nabla_b = dgates.sum(axis=0)
return (dgates.dot(self.weights.T) + dx).reshape(shape)
def capsule(self):
return FFBase.capsule(self) + [self.activation, self.get_weights(unfold=False)]
@classmethod
def from_capsule(cls, capsule):
return cls(activation=capsule[-2])
@property
def outshape(self):
return self.inshape
def __str__(self):
return "Highway-{}".format(str(self.activation))
class DropOut(LayerBase, NoParamMixin):
def __init__(self, dropchance):
LayerBase.__init__(self, activation="linear", trainable=False)
self.dropchance = 1. - dropchance
self.mask = None
self.neurons = None
self.training = True
def connect(self, to, inshape):
self.neurons = inshape
LayerBase.connect(self, to, inshape)
def feedforward(self, stimuli: np.ndarray) -> np.ndarray:
self.inputs = stimuli
self.mask = np.random.uniform(0, 1, self.neurons) < self.dropchance
self.output = stimuli * (self.mask if self.brain.learning else self.dropchance)
return self.output
def backpropagate(self, error: np.ndarray) -> np.ndarray:
output = error * self.mask
self.mask = np.ones_like(self.mask) * self.dropchance
return output
@property
def outshape(self):
return self.neurons
def capsule(self):
return LayerBase.capsule(self) + [self.dropchance]
@classmethod
def from_capsule(cls, capsule):
return cls(dropchance=capsule[-1])
def __str__(self):
return "DropOut({})".format(self.dropchance)
class Experimental:
class AboLayer(LayerBase):
def __init__(self, brain, position, activation):
LayerBase.__init__(self, brain, position, activation)
self.brain = brain
self.fanin = brain.layers[-1].fanout
self.neurons = []
@classmethod
def from_capsule(cls, capsule):
pass
def add_minion(self, empty_network):
minion = empty_network
minion.add_fc(10)
minion.finalize_architecture()
self.neurons.append(minion)
def feedforward(self, inputs):
"""this ain't so simple after all O.O"""
pass
def receive_error(self, error_vector: np.ndarray) -> None:
pass
def shuffle(self) -> None:
pass
def backpropagate(self, error) -> np.ndarray:
pass
def weight_update(self) -> None:
pass
def predict(self, stimuli: np.ndarray) -> np.ndarray:
pass
def outshape(self):
return ...
def __str__(self):
pass
| csxeba/ReSkiv | brainforge/layers/fancy.py | Python | gpl-3.0 | 4,450 |
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import posixpath
import requests
import urllib
from django.utils.encoding import iri_to_uri, smart_str
from requests import exceptions
from requests.auth import HTTPBasicAuth
from requests_kerberos import HTTPKerberosAuth, OPTIONAL
__docformat__ = "epytext"
LOG = logging.getLogger(__name__)
class RestException(Exception):
"""
Any error result from the Rest API is converted into this exception type.
"""
def __init__(self, error):
Exception.__init__(self, error)
self._error = error
self._code = None
self._message = str(error)
self._headers = {}
# Get more information if urllib2.HTTPError.
try:
self._code = error.response.status_code
self._headers = error.response.headers
self._message = self._error.response.text
except AttributeError:
pass
def __str__(self):
res = self._message or ""
if self._code is not None:
res += " (error %s)" % self._code
return res
def get_parent_ex(self):
if isinstance(self._error, Exception):
return self._error
return None
@property
def code(self):
return self._code
@property
def message(self):
return self._message
class HttpClient(object):
"""
Basic HTTP client tailored for rest APIs.
"""
def __init__(self, base_url, exc_class=None, logger=None):
"""
@param base_url: The base url to the API.
@param exc_class: An exception class to handle non-200 results.
"""
self._base_url = base_url.rstrip('/')
self._exc_class = exc_class or RestException
self._logger = logger or LOG
self._session = requests.Session()
def set_kerberos_auth(self):
"""Set up kerberos auth for the client, based on the current ticket."""
self._session.auth = HTTPKerberosAuth(mutual_authentication=OPTIONAL)
return self
def set_basic_auth(self, username, password):
self._session.auth = HTTPBasicAuth(username, password)
return self
def set_headers(self, headers):
"""
Add headers to the request
@param headers: A dictionary with the key value pairs for the headers
@return: The current object
"""
self._session.headers.update(headers)
return self
@property
def base_url(self):
return self._base_url
@property
def logger(self):
return self._logger
def set_verify(self, verify=True):
self._session.verify = verify
return self
def _get_headers(self, headers):
if headers:
self._session.headers.update(headers)
return self._session.headers.copy()
def execute(self, http_method, path, params=None, data=None, headers=None, allow_redirects=False, urlencode=True, files=None):
"""
Submit an HTTP request.
@param http_method: GET, POST, PUT, DELETE
@param path: The path of the resource. Unsafe characters will be quoted.
@param params: Key-value parameter data.
@param data: The data to attach to the body of the request.
@param headers: The headers to set for this request.
@param allow_redirects: requests should automatically resolve redirects.
@param urlencode: percent encode paths.
@param files: for posting Multipart-Encoded files
@return: The result of urllib2.urlopen()
"""
# Prepare URL and params
if urlencode:
path = urllib.quote(smart_str(path))
url = self._make_url(path, params)
if http_method in ("GET", "DELETE"):
if data is not None:
self.logger.warn("GET and DELETE methods do not pass any data. Path '%s'" % path)
data = None
request_kwargs = {'allow_redirects': allow_redirects}
if headers:
request_kwargs['headers'] = headers
if data:
request_kwargs['data'] = data
if files:
request_kwargs['files'] = files
try:
resp = getattr(self._session, http_method.lower())(url, **request_kwargs)
if resp.status_code >= 300:
resp.raise_for_status()
raise exceptions.HTTPError(response=resp)
return resp
except (exceptions.ConnectionError,
exceptions.HTTPError,
exceptions.RequestException,
exceptions.URLRequired,
exceptions.TooManyRedirects), ex:
raise self._exc_class(ex)
def _make_url(self, path, params):
res = self._base_url
if path:
res += posixpath.normpath('/' + path.lstrip('/'))
if params:
param_str = urllib.urlencode(params)
res += '?' + param_str
return iri_to_uri(res)
| MobinRanjbar/hue | desktop/core/src/desktop/lib/rest/http_client.py | Python | apache-2.0 | 5,229 |
"""Optimization index for accelerated response of api status.
Revision ID: 27a3d7e96d1f
Revises: 1f13d13948dd
Create Date: 2021-08-13 09:40:24.612261
"""
# revision identifiers, used by Alembic.
revision = "27a3d7e96d1f"
down_revision = "1f13d13948dd"
from alembic import op
import sqlalchemy as sa
def upgrade():
# CREATE INDEX CONCURRENTLY cannot run inside a transaction block, COMMIT transaction before create index
# see https://stackoverflow.com/questions/20091380/concurrent-db-table-indexing-through-alembic-script
op.execute("COMMIT")
try:
op.create_index(
"realtime_update_contributor_id_and_created_at_and_status",
"real_time_update",
["contributor_id", "status", sa.text("created_at DESC")],
unique=False,
postgresql_concurrently=True,
)
op.create_index(
"realtime_update_created_at_tmp_up",
"real_time_update",
[sa.text("created_at DESC")],
unique=False,
postgresql_concurrently=True,
)
except Exception as _:
op.execute("DROP INDEX CONCURRENTLY IF EXISTS realtime_update_contributor_id_and_created_at_and_status;")
op.execute("DROP INDEX CONCURRENTLY IF EXISTS realtime_update_created_at_tmp_up;")
raise
# In this state, all indexes is created, we can drop oldest indexes
op.execute("DROP INDEX CONCURRENTLY IF EXISTS realtime_update_contributor_id_and_created_at;")
op.execute("DROP INDEX CONCURRENTLY IF EXISTS realtime_update_created_at;")
op.execute("ALTER INDEX realtime_update_created_at_tmp_up RENAME TO realtime_update_created_at;")
op.execute("DROP INDEX CONCURRENTLY IF EXISTS status_idx;")
def downgrade():
# CREATE INDEX CONCURRENTLY cannot run inside a transaction block, COMMIT transaction before create index
# see https://stackoverflow.com/questions/20091380/concurrent-db-table-indexing-through-alembic-script
op.execute("COMMIT")
try:
op.create_index("status_idx", "real_time_update", ["status"], unique=False, postgresql_concurrently=True)
op.create_index(
"realtime_update_created_at_tmp_down",
"real_time_update",
["created_at"],
unique=False,
postgresql_concurrently=True,
)
op.create_index(
"realtime_update_contributor_id_and_created_at",
"real_time_update",
["created_at", "contributor_id"],
unique=False,
postgresql_concurrently=True,
)
except Exception as _:
op.execute("DROP INDEX CONCURRENTLY IF EXISTS status_idx;")
op.execute("DROP INDEX CONCURRENTLY IF EXISTS realtime_update_created_at_tmp_down;")
op.execute("DROP INDEX CONCURRENTLY IF EXISTS realtime_update_contributor_id_and_created_at;")
raise
# In this state, all indexes is created, we can drop oldest indexes
op.execute("DROP INDEX CONCURRENTLY IF EXISTS realtime_update_created_at;")
op.execute("ALTER INDEX realtime_update_created_at_tmp_down RENAME TO realtime_update_created_at;")
op.execute("DROP INDEX CONCURRENTLY IF EXISTS realtime_update_contributor_id_and_created_at_and_status;")
| CanalTP/kirin | migrations/versions/27a3d7e96d1f_del_index_useless_optimize_index_real_time_update.py | Python | agpl-3.0 | 3,250 |
import serial
import numpy as np
import json
from datetime import datetime
class ElectronicNose:
def __init__(self, devAdd='/dev/ttyUSB0', baudrate=115200/3, \
tmax = 1000, outputFile = '', numSensors = 8):
## Creating the serial object
self.Sensor = serial.Serial(devAdd, baudrate)
self.memory = np.empty((0, numSensors + 2 + 1))
## File to store samples
if outputFile != '':
self.outfile = open(outputFile, 'a')
else:
self.outfile = []
## Writing the parameters
Vparam = '54'
if False: self.Sensor.write('P000' + 8*Vparam )
return
def save(self, filename):
np.save(filename, self.memory)
return
def closeConnection(self):
self.Sensor.close()
return
def forget(self):
self.memory = np.empty( (0, self.memory.shape[1] ) )
return
def refresh(self, nmax):
self.t[:self.tMax - nmax] = self.t[nmax:]
self.S[:self.tMax - nmax,:] = self.S[nmax:,:]
return
def sniff(self, nsamples=5):
# Flushing to ensure time precision
self.Sensor.flush()
# Possibly getting partial line -- this will be discarded
self.Sensor.readline()
avg = np.zeros( (1,11) )
nsamples_ = 0
for j in range(nsamples):
r = self.Sensor.readline()
if len(r) == 44:
nsamples_ += 1
avg[0,1:] += self.convert( r.split('\rV')[1].split('\n')[0][8:39] )
if nsamples_ > 0:
avg = avg/float(nsamples_)
now = datetime.now()
avg[0,0] = now.hour*3600 + now.minute*60 + now.second + now.microsecond/1.e6
self.memory = np.concatenate( (self.memory, np.reshape(avg, (1,11)) ), axis=0 )
return
def convert(self, string):
s = np.zeros(10)
# Converting 8 sensors
for j in range(8):
s[j] = int( string[j*3:j*3+3] , 16 )
# Converting temperature and humidity
s[8] = int( string[24:28] , 16)
s[9] = int( string[28:31] , 16)
return s
if __name__ == "__main__":
# Instantiating the class
EN = ElectronicNose()
# Acquiring some data
EN.sniff(1000)
# Closing connection
EN.closeConnection()
| VandroiyLabs/FaroresWind | faroreswind/collector/ElectronicNose.py | Python | gpl-3.0 | 2,352 |
from pygame.mixer import music as _music
from .loaders import ResourceLoader
from . import constants
__all__ = [
'rewind', 'stop', 'fadeout', 'set_volume', 'get_volume', 'get_pos',
'set_pos', 'play', 'queue', 'pause', 'unpause',
]
_music.set_endevent(constants.MUSIC_END)
class _MusicLoader(ResourceLoader):
"""Pygame's music API acts as a singleton with one 'current' track.
No objects are returned that represent different tracks, so this loader
can't return anything useful. But it can perform all the path name
validations and return the validated path, so that's what we do.
This loader should not be exposed to the user.
"""
EXTNS = ['mp3', 'ogg', 'oga']
TYPE = 'music'
def _load(self, path):
return path
_loader = _MusicLoader('music')
# State of whether we are paused or not
_paused = False
def _play(name, loop):
global _paused
path = _loader.load(name)
_music.load(path)
_music.play(loop)
_paused = False
def play(name):
"""Play a music file from the music/ directory.
The music will loop when it finishes playing.
"""
_play(name, -1)
def play_once(name):
"""Play a music file from the music/ directory."""
_play(name, 0)
def queue(name):
"""Queue a music file to follow the current track.
This will load a music file and queue it. A queued music file will begin as
soon as the current music naturally ends. If the current music is ever
stopped or changed, the queued song will be lost.
"""
path = _loader.load(name)
_music.queue(path)
def is_playing(name):
"""Return True if the music is playing and not paused."""
return _music.get_busy() and not _paused
def pause():
"""Temporarily stop playback of the music stream.
Call `unpause()` to resume.
"""
global _paused
_music.pause()
_paused = True
def unpause():
"""Resume playback of the music stream after it has been paused."""
global _paused
_music.unpause()
_paused = False
def fadeout(seconds):
"""Fade out and eventually stop the music playback.
:param seconds: The duration in seconds over which the sound will be faded
out. For example, to fade out over half a second, call
``music.fadeout(0.5)``.
"""
_music.fadeout(int(seconds * 1000))
rewind = _music.rewind
stop = _music.stop
get_volume = _music.get_volume
set_volume = _music.set_volume
get_pos = _music.get_pos
set_pos = _music.set_pos
| yrobla/pyjuegos | pgzero/music.py | Python | lgpl-3.0 | 2,526 |
# -*- test-case-name: twisted.web2.test.test_server -*-
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This is a web-server which integrates with the twisted.internet
infrastructure.
"""
# System Imports
import cgi, time, urlparse
from urllib import quote, unquote
from urlparse import urlsplit
import weakref
from zope.interface import implements
# Twisted Imports
from twisted.internet import defer
from twisted.python import log, failure
# Sibling Imports
from twisted.web2 import http, iweb, fileupload, responsecode
from twisted.web2 import http_headers
from twisted.web2.filter.range import rangefilter
from twisted.web2 import error
from twisted.web2 import version as web2_version
from twisted import __version__ as twisted_version
VERSION = "Twisted/%s TwistedWeb/%s" % (twisted_version, web2_version)
_errorMarker = object()
def defaultHeadersFilter(request, response):
if not response.headers.hasHeader('server'):
response.headers.setHeader('server', VERSION)
if not response.headers.hasHeader('date'):
response.headers.setHeader('date', time.time())
return response
defaultHeadersFilter.handleErrors = True
def preconditionfilter(request, response):
if request.method in ("GET", "HEAD"):
http.checkPreconditions(request, response)
return response
def doTrace(request):
request = iweb.IRequest(request)
txt = "%s %s HTTP/%d.%d\r\n" % (request.method, request.uri,
request.clientproto[0], request.clientproto[1])
l=[]
for name, valuelist in request.headers.getAllRawHeaders():
for value in valuelist:
l.append("%s: %s\r\n" % (name, value))
txt += ''.join(l)
return http.Response(
responsecode.OK,
{'content-type': http_headers.MimeType('message', 'http')},
txt)
def parsePOSTData(request, maxMem=100*1024, maxFields=1024,
maxSize=10*1024*1024):
"""
Parse data of a POST request.
@param request: the request to parse.
@type request: L{twisted.web2.http.Request}.
@param maxMem: maximum memory used during the parsing of the data.
@type maxMem: C{int}
@param maxFields: maximum number of form fields allowed.
@type maxFields: C{int}
@param maxSize: maximum size of file upload allowed.
@type maxSize: C{int}
@return: a deferred that will fire when the parsing is done. The deferred
itself doesn't hold a return value, the request is modified directly.
@rtype: C{defer.Deferred}
"""
if request.stream.length == 0:
return defer.succeed(None)
parser = None
ctype = request.headers.getHeader('content-type')
if ctype is None:
return defer.succeed(None)
def updateArgs(data):
args = data
request.args.update(args)
def updateArgsAndFiles(data):
args, files = data
request.args.update(args)
request.files.update(files)
def error(f):
f.trap(fileupload.MimeFormatError)
raise http.HTTPError(
http.StatusResponse(responsecode.BAD_REQUEST, str(f.value)))
if (ctype.mediaType == 'application'
and ctype.mediaSubtype == 'x-www-form-urlencoded'):
d = fileupload.parse_urlencoded(request.stream)
d.addCallbacks(updateArgs, error)
return d
elif (ctype.mediaType == 'multipart'
and ctype.mediaSubtype == 'form-data'):
boundary = ctype.params.get('boundary')
if boundary is None:
return defer.fail(http.HTTPError(
http.StatusResponse(
responsecode.BAD_REQUEST,
"Boundary not specified in Content-Type.")))
d = fileupload.parseMultipartFormData(request.stream, boundary,
maxMem, maxFields, maxSize)
d.addCallbacks(updateArgsAndFiles, error)
return d
else:
return defer.fail(http.HTTPError(
http.StatusResponse(
responsecode.BAD_REQUEST,
"Invalid content-type: %s/%s" % (
ctype.mediaType, ctype.mediaSubtype))))
class StopTraversal(object):
"""
Indicates to Request._handleSegment that it should stop handling
path segments.
"""
pass
class Request(http.Request):
"""
vars:
site
remoteAddr
scheme
host
port
path
params
querystring
args
files
prepath
postpath
@ivar path: The path only (arguments not included).
@ivar args: All of the arguments, including URL and POST arguments.
@type args: A mapping of strings (the argument names) to lists of values.
i.e., ?foo=bar&foo=baz&quux=spam results in
{'foo': ['bar', 'baz'], 'quux': ['spam']}.
"""
implements(iweb.IRequest)
site = None
_initialprepath = None
responseFilters = [rangefilter, preconditionfilter,
error.defaultErrorHandler, defaultHeadersFilter]
def __init__(self, *args, **kw):
if kw.has_key('site'):
self.site = kw['site']
del kw['site']
if kw.has_key('prepathuri'):
self._initialprepath = kw['prepathuri']
del kw['prepathuri']
# Copy response filters from the class
self.responseFilters = self.responseFilters[:]
self.files = {}
self.resources = []
http.Request.__init__(self, *args, **kw)
def addResponseFilter(self, f, atEnd=False):
if atEnd:
self.responseFilters.append(f)
else:
self.responseFilters.insert(0, f)
def unparseURL(self, scheme=None, host=None, port=None,
path=None, params=None, querystring=None, fragment=None):
"""Turn the request path into a url string. For any pieces of
the url that are not specified, use the value from the
request. The arguments have the same meaning as the same named
attributes of Request."""
if scheme is None: scheme = self.scheme
if host is None: host = self.host
if port is None: port = self.port
if path is None: path = self.path
if params is None: params = self.params
if querystring is None: query = self.querystring
if fragment is None: fragment = ''
if port == http.defaultPortForScheme.get(scheme, 0):
hostport = host
else:
hostport = host + ':' + str(port)
return urlparse.urlunparse((
scheme, hostport, path,
params, querystring, fragment))
def _parseURL(self):
if self.uri[0] == '/':
# Can't use urlparse for request_uri because urlparse
# wants to be given an absolute or relative URI, not just
# an abs_path, and thus gets '//foo' wrong.
self.scheme = self.host = self.path = self.params = self.querystring = ''
if '?' in self.uri:
self.path, self.querystring = self.uri.split('?', 1)
else:
self.path = self.uri
if ';' in self.path:
self.path, self.params = self.path.split(';', 1)
else:
# It is an absolute uri, use standard urlparse
(self.scheme, self.host, self.path,
self.params, self.querystring, fragment) = urlparse.urlparse(self.uri)
if self.querystring:
self.args = cgi.parse_qs(self.querystring, True)
else:
self.args = {}
path = map(unquote, self.path[1:].split('/'))
if self._initialprepath:
# We were given an initial prepath -- this is for supporting
# CGI-ish applications where part of the path has already
# been processed
prepath = map(unquote, self._initialprepath[1:].split('/'))
if path[:len(prepath)] == prepath:
self.prepath = prepath
self.postpath = path[len(prepath):]
else:
self.prepath = []
self.postpath = path
else:
self.prepath = []
self.postpath = path
#print "_parseURL", self.uri, (self.uri, self.scheme, self.host, self.path, self.params, self.querystring)
def _fixupURLParts(self):
hostaddr, secure = self.chanRequest.getHostInfo()
if not self.scheme:
self.scheme = ('http', 'https')[secure]
if self.host:
self.host, self.port = http.splitHostPort(self.scheme, self.host)
else:
# If GET line wasn't an absolute URL
host = self.headers.getHeader('host')
if host:
self.host, self.port = http.splitHostPort(self.scheme, host)
else:
# When no hostname specified anywhere, either raise an
# error, or use the interface hostname, depending on
# protocol version
if self.clientproto >= (1,1):
raise http.HTTPError(responsecode.BAD_REQUEST)
self.host = hostaddr.host
self.port = hostaddr.port
def process(self):
"Process a request."
try:
self.checkExpect()
resp = self.preprocessRequest()
if resp is not None:
self._cbFinishRender(resp).addErrback(self._processingFailed)
return
self._parseURL()
self._fixupURLParts()
self.remoteAddr = self.chanRequest.getRemoteHost()
except:
failedDeferred = self._processingFailed(failure.Failure())
return
d = defer.Deferred()
d.addCallback(self._getChild, self.site.resource, self.postpath)
d.addCallback(lambda res, req: res.renderHTTP(req), self)
d.addCallback(self._cbFinishRender)
d.addErrback(self._processingFailed)
d.callback(None)
def preprocessRequest(self):
"""Do any request processing that doesn't follow the normal
resource lookup procedure. "OPTIONS *" is handled here, for
example. This would also be the place to do any CONNECT
processing."""
if self.method == "OPTIONS" and self.uri == "*":
response = http.Response(responsecode.OK)
response.headers.setHeader('allow', ('GET', 'HEAD', 'OPTIONS', 'TRACE'))
return response
# This is where CONNECT would go if we wanted it
return None
def _getChild(self, _, res, path, updatepaths=True):
"""Call res.locateChild, and pass the result on to _handleSegment."""
self.resources.append(res)
if not path:
return res
result = res.locateChild(self, path)
if isinstance(result, defer.Deferred):
return result.addCallback(self._handleSegment, res, path, updatepaths)
else:
return self._handleSegment(result, res, path, updatepaths)
def _handleSegment(self, result, res, path, updatepaths):
"""Handle the result of a locateChild call done in _getChild."""
newres, newpath = result
# If the child resource is None then display a error page
if newres is None:
raise http.HTTPError(responsecode.NOT_FOUND)
# If we got a deferred then we need to call back later, once the
# child is actually available.
if isinstance(newres, defer.Deferred):
return newres.addCallback(
lambda actualRes: self._handleSegment(
(actualRes, newpath), res, path, updatepaths)
)
if path:
url = quote("/" + "/".join(path))
else:
url = "/"
if newpath is StopTraversal:
# We need to rethink how to do this.
#if newres is res:
self._rememberResource(res, url)
return res
#else:
# raise ValueError("locateChild must not return StopTraversal with a resource other than self.")
newres = iweb.IResource(newres)
if newres is res:
assert not newpath is path, "URL traversal cycle detected when attempting to locateChild %r from resource %r." % (path, res)
assert len(newpath) < len(path), "Infinite loop impending..."
if updatepaths:
# We found a Resource... update the request.prepath and postpath
for x in xrange(len(path) - len(newpath)):
self.prepath.append(self.postpath.pop(0))
child = self._getChild(None, newres, newpath, updatepaths=updatepaths)
self._rememberResource(child, url)
return child
_urlsByResource = weakref.WeakKeyDictionary()
def _rememberResource(self, resource, url):
"""
Remember the URL of a visited resource.
"""
self._urlsByResource[resource] = url
return resource
def urlForResource(self, resource):
"""
Looks up the URL of the given resource if this resource was found while
processing this request. Specifically, this includes the requested
resource, and resources looked up via L{locateResource}.
Note that a resource may be found at multiple URIs; if the same resource
is visited at more than one location while processing this request,
this method will return one of those URLs, but which one is not defined,
nor whether the same URL is returned in subsequent calls.
@param resource: the resource to find a URI for. This resource must
have been obtained from the request (ie. via its C{uri} attribute, or
through its C{locateResource} or C{locateChildResource} methods).
@return: a valid URL for C{resource} in this request.
@raise NoURLForResourceError: if C{resource} has no URL in this request
(because it was not obtained from the request).
"""
resource = self._urlsByResource.get(resource, None)
if resource is None:
raise NoURLForResourceError(resource)
return resource
def locateResource(self, url):
"""
Looks up the resource with the given URL.
@param uri: The URL of the desired resource.
@return: a L{Deferred} resulting in the L{IResource} at the
given URL or C{None} if no such resource can be located.
@raise HTTPError: If C{url} is not a URL on the site that this
request is being applied to. The contained response will
have a status code of L{responsecode.BAD_GATEWAY}.
@raise HTTPError: If C{url} contains a query or fragment.
The contained response will have a status code of
L{responsecode.BAD_REQUEST}.
"""
if url is None: return None
#
# Parse the URL
#
(scheme, host, path, query, fragment) = urlsplit(url)
if query or fragment:
raise http.HTTPError(http.StatusResponse(
responsecode.BAD_REQUEST,
"URL may not contain a query or fragment: %s" % (url,)
))
# The caller shouldn't be asking a request on one server to lookup a
# resource on some other server.
if (scheme and scheme != self.scheme) or (host and host != self.headers.getHeader("host")):
raise http.HTTPError(http.StatusResponse(
responsecode.BAD_GATEWAY,
"URL is not on this site (%s://%s/): %s" % (scheme, self.headers.getHeader("host"), url)
))
segments = path.split("/")
assert segments[0] == "", "URL path didn't begin with '/': %s" % (path,)
segments = map(unquote, segments[1:])
def notFound(f):
f.trap(http.HTTPError)
if f.value.response.code != responsecode.NOT_FOUND:
return f
return None
d = defer.maybeDeferred(self._getChild, None, self.site.resource, segments, updatepaths=False)
d.addCallback(self._rememberResource, path)
d.addErrback(notFound)
return d
def locateChildResource(self, parent, childName):
"""
Looks up the child resource with the given name given the parent
resource. This is similar to locateResource(), but doesn't have to
start the lookup from the root resource, so it is potentially faster.
@param parent: the parent of the resource being looked up. This resource
must have been obtained from the request (ie. via its C{uri} attribute,
or through its C{locateResource} or C{locateChildResource} methods).
@param childName: the name of the child of C{parent} to looked up.
to C{parent}.
@return: a L{Deferred} resulting in the L{IResource} at the
given URL or C{None} if no such resource can be located.
@raise NoURLForResourceError: if C{resource} was not obtained from the
request.
"""
if parent is None or childName is None:
return None
assert "/" not in childName, "Child name may not contain '/': %s" % (childName,)
parentURL = self.urlForResource(parent)
if not parentURL.endswith("/"):
parentURL += "/"
url = parentURL + quote(childName)
segment = childName
def notFound(f):
f.trap(http.HTTPError)
if f.value.response.code != responsecode.NOT_FOUND:
return f
return None
d = defer.maybeDeferred(self._getChild, None, parent, [segment], updatepaths=False)
d.addCallback(self._rememberResource, url)
d.addErrback(notFound)
return d
def _processingFailed(self, reason):
if reason.check(http.HTTPError) is not None:
# If the exception was an HTTPError, leave it alone
d = defer.succeed(reason.value.response)
else:
# Otherwise, it was a random exception, so give a
# ICanHandleException implementer a chance to render the page.
def _processingFailed_inner(reason):
handler = iweb.ICanHandleException(self, self)
return handler.renderHTTP_exception(self, reason)
d = defer.maybeDeferred(_processingFailed_inner, reason)
d.addCallback(self._cbFinishRender)
d.addErrback(self._processingReallyFailed, reason)
return d
def _processingReallyFailed(self, reason, origReason):
log.msg("Exception rendering error page:", isErr=1)
log.err(reason)
log.msg("Original exception:", isErr=1)
log.err(origReason)
body = ("<html><head><title>Internal Server Error</title></head>"
"<body><h1>Internal Server Error</h1>An error occurred rendering the requested page. Additionally, an error occured rendering the error page.</body></html>")
response = http.Response(
responsecode.INTERNAL_SERVER_ERROR,
{'content-type': http_headers.MimeType('text','html')},
body)
self.writeResponse(response)
def _cbFinishRender(self, result):
def filterit(response, f):
if (hasattr(f, 'handleErrors') or
(response.code >= 200 and response.code < 300)):
return f(self, response)
else:
return response
response = iweb.IResponse(result, None)
if response:
d = defer.Deferred()
for f in self.responseFilters:
d.addCallback(filterit, f)
d.addCallback(self.writeResponse)
d.callback(response)
return d
resource = iweb.IResource(result, None)
if resource:
self.resources.append(resource)
d = defer.maybeDeferred(resource.renderHTTP, self)
d.addCallback(self._cbFinishRender)
return d
raise TypeError("html is not a resource or a response")
def renderHTTP_exception(self, req, reason):
log.msg("Exception rendering:", isErr=1)
log.err(reason)
body = ("<html><head><title>Internal Server Error</title></head>"
"<body><h1>Internal Server Error</h1>An error occurred rendering the requested page. More information is available in the server log.</body></html>")
return http.Response(
responsecode.INTERNAL_SERVER_ERROR,
{'content-type': http_headers.MimeType('text','html')},
body)
class Site(object):
def __init__(self, resource):
"""Initialize.
"""
self.resource = iweb.IResource(resource)
def __call__(self, *args, **kwargs):
return Request(site=self, *args, **kwargs)
class NoURLForResourceError(RuntimeError):
def __init__(self, resource):
RuntimeError.__init__(self, "Resource %r has no URL in this request." % (resource,))
self.resource = resource
__all__ = ['Request', 'Site', 'StopTraversal', 'VERSION', 'defaultHeadersFilter', 'doTrace', 'parsePOSTData', 'preconditionfilter', 'NoURLForResourceError']
| Donkyhotay/MoonPy | twisted/web2/server.py | Python | gpl-3.0 | 21,209 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2019 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
import json
import httpretty
from botocore.stub import ANY, Stubber
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.encoding import force_text
from weblate.checks.tests.test_checks import MockUnit
from weblate.machinery.apertium import ApertiumAPYTranslation
from weblate.machinery.aws import AWSTranslation
from weblate.machinery.baidu import BAIDU_API, BaiduTranslation
from weblate.machinery.base import MachineTranslationError
from weblate.machinery.deepl import DeepLTranslation
from weblate.machinery.dummy import DummyTranslation
from weblate.machinery.glosbe import GlosbeTranslation
from weblate.machinery.google import GOOGLE_API_ROOT, GoogleTranslation
from weblate.machinery.microsoft import MicrosoftCognitiveTranslation
from weblate.machinery.microsoftterminology import (
MST_API_URL,
MicrosoftTerminologyService,
)
from weblate.machinery.mymemory import MyMemoryTranslation
from weblate.machinery.netease import NETEASE_API_ROOT, NeteaseSightTranslation
from weblate.machinery.saptranslationhub import SAPTranslationHub
from weblate.machinery.tmserver import AMAGAMA_LIVE, AmagamaTranslation
from weblate.machinery.weblatetm import WeblateTranslation
from weblate.machinery.yandex import YandexTranslation
from weblate.machinery.youdao import YoudaoTranslation
from weblate.trans.models.unit import Unit
from weblate.trans.search import update_fulltext
from weblate.trans.tests.test_views import FixtureTestCase
from weblate.trans.tests.utils import get_test_file
from weblate.utils.state import STATE_TRANSLATED
GLOSBE_JSON = '''
{
"result":"ok",
"authors":{
"1":{"U":"http://en.wiktionary.org","id":1,"N":"en.wiktionary.org"}
},
"dest":"ces",
"phrase":"world",
"tuc":[
{
"authors":[1],
"meaningId":-311020347498476098,
"meanings":[
{
"text":"geographic terms (above country level)",
"language":"eng"
}
],
"phrase":{"text":"svět","language":"ces"}}],
"from":"eng"
}
'''.encode(
'utf-8'
)
MYMEMORY_JSON = '''
\r\n
{"responseData":{"translatedText":"svět"},"responseDetails":"",
"responseStatus":200,
"matches":[
{"id":"428492143","segment":"world","translation":"svět","quality":"",
"reference":"http://aims.fao.org/standards/agrovoc",
"usage-count":15,"subject":"Agriculture_and_Farming",
"created-by":"MyMemoryLoader",
"last-updated-by":"MyMemoryLoader","create-date":"2013-06-12 17:02:07",
"last-update-date":"2013-06-12 17:02:07","match":1},
{"id":"424273685","segment":"World view","translation":"Světový názor",
"quality":"80",
"reference":"//cs.wikipedia.org/wiki/Sv%C4%9Btov%C3%BD_n%C3%A1zor",
"usage-count":1,"subject":"All","created-by":"","last-updated-by":"Wikipedia",
"create-date":"2012-02-22 13:23:31","last-update-date":"2012-02-22 13:23:31",
"match":0.85},
{"id":"428493395","segment":"World Bank","translation":"IBRD","quality":"",
"reference":"http://aims.fao.org/standards/agrovoc",
"usage-count":1,"subject":"Agriculture_and_Farming",
"created-by":"MyMemoryLoader","last-updated-by":"MyMemoryLoader",
"create-date":"2013-06-12 17:02:07",
"last-update-date":"2013-06-12 17:02:07","match":0.84}
]}
'''.encode(
'utf-8'
)
AMAGAMA_JSON = '''
[{"source": "World", "quality": 80.0, "target": "Svět", "rank": 100.0}]
'''.encode(
'utf-8'
)
SAPTRANSLATIONHUB_JSON = '''
{
"units": [
{
"textType": "XFLD",
"domain": "BC",
"key": "LOGIN_USERNAME_FIELD",
"value": "User Name",
"translations": [
{
"language": "es",
"value": "Usuario",
"translationProvider": 0,
"qualityIndex": 100
}
]
}
]
}
'''.encode(
'utf-8'
)
TERMINOLOGY_LANGUAGES = '''
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/">
<s:Body>
<GetLanguagesResponse xmlns="http://api.terminology.microsoft.com/terminology">
<GetLanguagesResult xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
<Language>
<Code>af-za</Code>
</Language>
<Language>
<Code>am-et</Code>
</Language>
<Language>
<Code>ar-dz</Code>
</Language>
<Language>
<Code>ar-eg</Code>
</Language>
<Language>
<Code>ar-sa</Code>
</Language>
<Language>
<Code>as-in</Code>
</Language>
<Language>
<Code>az-latn-az</Code>
</Language>
<Language>
<Code>be-by</Code>
</Language>
<Language>
<Code>bg-bg</Code>
</Language>
<Language>
<Code>bn-bd</Code>
</Language>
<Language>
<Code>bn-in</Code>
</Language>
<Language>
<Code>bs-cyrl-ba</Code>
</Language>
<Language>
<Code>bs-latn-ba</Code>
</Language>
<Language>
<Code>ca-es</Code>
</Language>
<Language>
<Code>ca-es-valencia</Code>
</Language>
<Language>
<Code>chr-cher-us</Code>
</Language>
<Language>
<Code>cs-cz</Code>
</Language>
<Language>
<Code>en-us</Code>
</Language>
</GetLanguagesResult>
</GetLanguagesResponse>
</s:Body>
</s:Envelope>
'''.encode(
'utf-8'
)
TERMINOLOGY_TRANSLATE = '''
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/">
<s:Body>
<GetTranslationsResponse xmlns="http://api.terminology.microsoft.com/terminology">
<GetTranslationsResult xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
<Match>
<ConfidenceLevel>100</ConfidenceLevel>
<Count>8</Count>
<Definition i:nil="true"/>
<OriginalText>Hello World</OriginalText>
<Product i:nil="true"/>
<ProductVersion i:nil="true"/>
<Source i:nil="true"/>
<Translations>
<Translation>
<Language>cs-cz</Language>
<TranslatedText>Hello World</TranslatedText>
</Translation>
</Translations>
</Match>
<Match>
<ConfidenceLevel>100</ConfidenceLevel>
<Count>1</Count>
<Definition i:nil="true"/>
<OriginalText>Hello world.</OriginalText>
<Product i:nil="true"/>
<ProductVersion i:nil="true"/>
<Source i:nil="true"/>
<Translations>
<Translation>
<Language>cs-cz</Language>
<TranslatedText>Ahoj světe.</TranslatedText>
</Translation>
</Translations>
</Match>
</GetTranslationsResult>
</GetTranslationsResponse>
</s:Body>
</s:Envelope>
'''.encode(
'utf-8'
)
TERMINOLOGY_WDSL = get_test_file('microsoftterminology.wsdl')
DEEPL_RESPONSE = b'''{
"translations": [
{ "detected_source_language": "EN", "text": "Hallo" }
]
}'''
class MachineTranslationTest(TestCase):
"""Testing of machine translation core."""
def get_machine(self, cls, cache=False):
machine = cls()
machine.delete_cache()
machine.cache_translations = cache
return machine
def test_support(self):
machine_translation = self.get_machine(DummyTranslation)
machine_translation.get_supported_languages()
self.assertTrue(machine_translation.is_supported('en', 'cs'))
self.assertFalse(machine_translation.is_supported('en', 'de'))
def test_translate(self):
machine_translation = self.get_machine(DummyTranslation)
self.assertEqual(
machine_translation.translate('cs', 'Hello', MockUnit(), None), []
)
self.assertEqual(
len(machine_translation.translate('cs', 'Hello, world!', MockUnit(), None)),
2,
)
def test_translate_fallback(self):
machine_translation = self.get_machine(DummyTranslation)
self.assertEqual(
len(
machine_translation.translate(
'cs_CZ', 'Hello, world!', MockUnit(), None
)
),
2,
)
def test_translate_fallback_missing(self):
machine_translation = self.get_machine(DummyTranslation)
self.assertEqual(
machine_translation.translate('de_CZ', 'Hello, world!', MockUnit(), None),
[],
)
def assert_translate(self, machine, lang='cs', word='world', empty=False):
translation = machine.translate(lang, word, MockUnit(), None)
self.assertIsInstance(translation, list)
if not empty:
self.assertTrue(translation)
@httpretty.activate
def test_glosbe(self):
machine = self.get_machine(GlosbeTranslation)
httpretty.register_uri(
httpretty.GET, 'https://glosbe.com/gapi/translate', body=GLOSBE_JSON
)
self.assert_translate(machine)
self.assert_translate(machine, word='Zkouška')
@httpretty.activate
def test_glosbe_ratelimit(self):
machine = self.get_machine(GlosbeTranslation)
httpretty.register_uri(
httpretty.GET,
'https://glosbe.com/gapi/translate',
body=GLOSBE_JSON,
status=429,
)
with self.assertRaises(MachineTranslationError):
self.assert_translate(machine, empty=True)
self.assert_translate(machine, empty=True)
@httpretty.activate
def test_glosbe_ratelimit_set(self):
machine = self.get_machine(GlosbeTranslation)
machine.set_rate_limit()
httpretty.register_uri(
httpretty.GET, 'https://glosbe.com/gapi/translate', body=GLOSBE_JSON
)
self.assert_translate(machine, empty=True)
@override_settings(MT_MYMEMORY_EMAIL='[email protected]')
@httpretty.activate
def test_mymemory(self):
machine = self.get_machine(MyMemoryTranslation)
httpretty.register_uri(
httpretty.GET, 'https://mymemory.translated.net/api/get', body=MYMEMORY_JSON
)
self.assert_translate(machine)
self.assert_translate(machine, word='Zkouška')
def register_apertium_urls(self):
httpretty.register_uri(
httpretty.GET,
'http://apertium.example.com/listPairs',
body='{"responseStatus": 200, "responseData":'
'[{"sourceLanguage": "eng","targetLanguage": "spa"}]}',
)
httpretty.register_uri(
httpretty.GET,
'http://apertium.example.com/translate',
body='{"responseData":{"translatedText":"Mundial"},'
'"responseDetails":null,"responseStatus":200}',
)
@override_settings(MT_APERTIUM_APY='http://apertium.example.com/')
@httpretty.activate
def test_apertium_apy(self):
machine = self.get_machine(ApertiumAPYTranslation)
self.register_apertium_urls()
self.assert_translate(machine, 'es')
self.assert_translate(machine, 'es', word='Zkouška')
@override_settings(MT_MICROSOFT_COGNITIVE_KEY='KEY')
@httpretty.activate
def test_microsoft_cognitive(self):
machine = self.get_machine(MicrosoftCognitiveTranslation)
httpretty.register_uri(
httpretty.POST,
'https://api.cognitive.microsoft.com/sts/v1.0/issueToken'
'?Subscription-Key=KEY',
body='TOKEN',
)
httpretty.register_uri(
httpretty.GET,
'https://api.microsofttranslator.com/V2/Ajax.svc/'
'GetLanguagesForTranslate',
body='["en","cs"]',
)
httpretty.register_uri(
httpretty.GET,
'https://api.microsofttranslator.com/V2/Ajax.svc/Translate',
body='"svět"'.encode('utf-8'),
)
self.assert_translate(machine)
self.assert_translate(machine, word='Zkouška')
def register_microsoft_terminology(self, fail=False):
def request_callback_get(request, uri, headers):
if request.path == '/Terminology.svc?wsdl':
with open(TERMINOLOGY_WDSL, 'rb') as handle:
return (200, headers, handle.read())
if request.path.startswith('/Terminology.svc?wsdl='):
suffix = request.path[22:]
with open(TERMINOLOGY_WDSL + '.' + suffix, 'rb') as handle:
return (200, headers, handle.read())
if request.path.startswith('/Terminology.svc?xsd='):
suffix = request.path[21:]
with open(TERMINOLOGY_WDSL + '.' + suffix, 'rb') as handle:
return (200, headers, handle.read())
return (500, headers, '')
def request_callback_post(request, uri, headers):
if fail:
return (500, headers, '')
if b'GetLanguages' in request.body:
return (200, headers, TERMINOLOGY_LANGUAGES)
return (200, headers, TERMINOLOGY_TRANSLATE)
httpretty.register_uri(
httpretty.GET,
MST_API_URL,
body=request_callback_get,
content_type='text/xml',
)
httpretty.register_uri(
httpretty.POST,
MST_API_URL,
body=request_callback_post,
content_type='text/xml',
)
@httpretty.activate
def test_microsoft_terminology(self):
self.register_microsoft_terminology()
machine = self.get_machine(MicrosoftTerminologyService)
self.assert_translate(machine)
self.assert_translate(machine, lang='cs_CZ')
@httpretty.activate
def test_microsoft_terminology_error(self):
self.register_microsoft_terminology(True)
machine = self.get_machine(MicrosoftTerminologyService)
machine.get_supported_languages()
self.assertEqual(machine.supported_languages, [])
with self.assertRaises(MachineTranslationError):
self.assert_translate(machine, empty=True)
@override_settings(MT_GOOGLE_KEY='KEY')
@httpretty.activate
def test_google(self):
machine = self.get_machine(GoogleTranslation)
httpretty.register_uri(
httpretty.GET,
GOOGLE_API_ROOT + 'languages',
body=json.dumps(
{
'data': {
'languages': [
{'language': 'en'},
{'language': 'iw'},
{'language': 'cs'},
]
}
}
),
)
httpretty.register_uri(
httpretty.GET,
GOOGLE_API_ROOT,
body=b'{"data":{"translations":[{"translatedText":"svet"}]}}',
)
self.assert_translate(machine)
self.assert_translate(machine, lang='he')
self.assert_translate(machine, word='Zkouška')
@override_settings(MT_GOOGLE_KEY='KEY')
@httpretty.activate
def test_google_invalid(self):
"""Test handling of server failure."""
machine = self.get_machine(GoogleTranslation)
httpretty.register_uri(
httpretty.GET, GOOGLE_API_ROOT + 'languages', body='', status=500
)
httpretty.register_uri(httpretty.GET, GOOGLE_API_ROOT, body='', status=500)
machine.get_supported_languages()
self.assertEqual(machine.supported_languages, [])
with self.assertRaises(MachineTranslationError):
self.assert_translate(machine, empty=True)
@httpretty.activate
def test_amagama_nolang(self):
machine = self.get_machine(AmagamaTranslation)
httpretty.register_uri(
httpretty.GET, AMAGAMA_LIVE + '/languages/', body='', status=404
)
httpretty.register_uri(
httpretty.GET, AMAGAMA_LIVE + '/en/cs/unit/world', body=AMAGAMA_JSON
)
httpretty.register_uri(
httpretty.GET, AMAGAMA_LIVE + '/en/cs/unit/Zkou%C5%A1ka', body=AMAGAMA_JSON
)
self.assert_translate(machine)
self.assert_translate(machine, word='Zkouška')
@override_settings(DEBUG=True)
def test_amagama_nolang_debug(self):
self.test_amagama_nolang()
@httpretty.activate
def test_amagama(self):
machine = self.get_machine(AmagamaTranslation)
httpretty.register_uri(
httpretty.GET,
AMAGAMA_LIVE + '/languages/',
body='{"sourceLanguages": ["en"], "targetLanguages": ["cs"]}',
)
httpretty.register_uri(
httpretty.GET, AMAGAMA_LIVE + '/en/cs/unit/world', body=AMAGAMA_JSON
)
httpretty.register_uri(
httpretty.GET, AMAGAMA_LIVE + '/en/cs/unit/Zkou%C5%A1ka', body=AMAGAMA_JSON
)
self.assert_translate(machine)
self.assert_translate(machine, word='Zkouška')
@override_settings(MT_YANDEX_KEY='KEY')
@httpretty.activate
def test_yandex(self):
machine = self.get_machine(YandexTranslation)
httpretty.register_uri(
httpretty.GET,
'https://translate.yandex.net/api/v1.5/tr.json/getLangs',
body=b'{"dirs": ["en-cs"]}',
)
httpretty.register_uri(
httpretty.GET,
'https://translate.yandex.net/api/v1.5/tr.json/translate',
body=b'{"code": 200, "lang": "en-cs", "text": ["svet"]}',
)
self.assert_translate(machine)
self.assert_translate(machine, word='Zkouška')
@override_settings(MT_YANDEX_KEY='KEY')
@httpretty.activate
def test_yandex_error(self):
machine = self.get_machine(YandexTranslation)
httpretty.register_uri(
httpretty.GET,
'https://translate.yandex.net/api/v1.5/tr.json/getLangs',
body=b'{"code": 401}',
)
httpretty.register_uri(
httpretty.GET,
'https://translate.yandex.net/api/v1.5/tr.json/translate',
body=b'{"code": 401, "message": "Invalid request"}',
)
machine.get_supported_languages()
self.assertEqual(machine.supported_languages, [])
with self.assertRaises(MachineTranslationError):
self.assert_translate(machine, empty=True)
@override_settings(MT_YOUDAO_ID='id', MT_YOUDAO_SECRET='secret')
@httpretty.activate
def test_youdao(self):
machine = self.get_machine(YoudaoTranslation)
httpretty.register_uri(
httpretty.GET,
'https://openapi.youdao.com/api',
body=b'{"errorCode": 0, "translation": ["hello"]}',
)
self.assert_translate(machine, lang='ja')
self.assert_translate(machine, lang='ja', word='Zkouška')
@override_settings(MT_YOUDAO_ID='id', MT_YOUDAO_SECRET='secret')
@httpretty.activate
def test_youdao_error(self):
machine = self.get_machine(YoudaoTranslation)
httpretty.register_uri(
httpretty.GET, 'https://openapi.youdao.com/api', body=b'{"errorCode": 1}'
)
with self.assertRaises(MachineTranslationError):
self.assert_translate(machine, lang='ja', empty=True)
@override_settings(MT_NETEASE_KEY='key', MT_NETEASE_SECRET='secret')
@httpretty.activate
def test_netease(self):
machine = self.get_machine(NeteaseSightTranslation)
httpretty.register_uri(
httpretty.POST,
NETEASE_API_ROOT,
body=b'''
{
"success": "true",
"relatedObject": {
"content": [
{
"transContent": "hello"
}
]
}
}''',
)
self.assert_translate(machine, lang='zh')
self.assert_translate(machine, lang='zh', word='Zkouška')
@override_settings(MT_NETEASE_KEY='key', MT_NETEASE_SECRET='secret')
@httpretty.activate
def test_netease_error(self):
machine = self.get_machine(NeteaseSightTranslation)
httpretty.register_uri(
httpretty.POST, NETEASE_API_ROOT, body=b'{"success": "false"}'
)
with self.assertRaises(MachineTranslationError):
self.assert_translate(machine, lang='zh', empty=True)
@override_settings(MT_BAIDU_ID='id', MT_BAIDU_SECRET='secret')
@httpretty.activate
def test_baidu(self):
machine = self.get_machine(BaiduTranslation)
httpretty.register_uri(
httpretty.GET,
BAIDU_API,
body=b'{"trans_result": [{"src": "hello", "dst": "hallo"}]}',
)
self.assert_translate(machine, lang='ja')
self.assert_translate(machine, lang='ja', word='Zkouška')
@override_settings(MT_BAIDU_ID='id', MT_BAIDU_SECRET='secret')
@httpretty.activate
def test_baidu_error(self):
machine = self.get_machine(BaiduTranslation)
httpretty.register_uri(
httpretty.GET, BAIDU_API, body=b'{"error_code": 1, "error_msg": "Error"}'
)
with self.assertRaises(MachineTranslationError):
self.assert_translate(machine, lang='ja', empty=True)
@override_settings(MT_SAP_BASE_URL='http://sth.example.com/')
@override_settings(MT_SAP_SANDBOX_APIKEY='http://sandbox.example.com')
@override_settings(MT_SAP_USERNAME='username')
@override_settings(MT_SAP_PASSWORD='password')
@httpretty.activate
def test_saptranslationhub(self):
machine = self.get_machine(SAPTranslationHub)
httpretty.register_uri(
httpretty.GET,
'http://sth.example.com/languages',
body=json.dumps(
{
'languages': [
{'id': 'en', 'name': 'English', 'bcp-47-code': 'en'},
{'id': 'cs', 'name': 'Czech', 'bcp-47-code': 'cs'},
]
}
),
status=200,
)
httpretty.register_uri(
httpretty.POST,
'http://sth.example.com/translate',
body=SAPTRANSLATIONHUB_JSON,
status=200,
content_type='text/json',
)
self.assert_translate(machine)
self.assert_translate(machine, word='Zkouška')
@override_settings(MT_SAP_BASE_URL='http://sth.example.com/')
@httpretty.activate
def test_saptranslationhub_invalid(self):
machine = self.get_machine(SAPTranslationHub)
httpretty.register_uri(
httpretty.GET, 'http://sth.example.com/languages', body='', status=500
)
httpretty.register_uri(
httpretty.POST, 'http://sth.example.com/translate', body='', status=500
)
machine.get_supported_languages()
self.assertEqual(machine.supported_languages, [])
with self.assertRaises(MachineTranslationError):
self.assert_translate(machine, empty=True)
@override_settings(MT_DEEPL_KEY='KEY')
@httpretty.activate
def test_deepl(self):
machine = self.get_machine(DeepLTranslation)
httpretty.register_uri(
httpretty.POST, 'https://api.deepl.com/v1/translate', body=DEEPL_RESPONSE
)
self.assert_translate(machine, lang='de', word='Hello')
@override_settings(MT_DEEPL_KEY='KEY')
@httpretty.activate
def test_cache(self):
machine = self.get_machine(DeepLTranslation, True)
httpretty.register_uri(
httpretty.POST, 'https://api.deepl.com/v1/translate', body=DEEPL_RESPONSE
)
# Fetch from service
self.assert_translate(machine, lang='de', word='Hello')
self.assertTrue(httpretty.has_request())
httpretty.reset()
# Fetch from cache
self.assert_translate(machine, lang='de', word='Hello')
self.assertFalse(httpretty.has_request())
@override_settings(MT_AWS_REGION='us-west-2')
def test_aws(self):
machine = self.get_machine(AWSTranslation)
with Stubber(machine.client) as stubber:
stubber.add_response(
'translate_text',
{
'TranslatedText': 'Hallo',
'SourceLanguageCode': 'en',
'TargetLanguageCode': 'de',
},
{'SourceLanguageCode': ANY, 'TargetLanguageCode': ANY, 'Text': ANY},
)
self.assert_translate(machine, lang='de', word='Hello')
@override_settings(MT_APERTIUM_APY='http://apertium.example.com/')
@httpretty.activate
def test_languages_cache(self):
machine = self.get_machine(ApertiumAPYTranslation, True)
self.register_apertium_urls()
self.assert_translate(machine, 'es')
self.assert_translate(machine, 'es', word='Zkouška')
self.assertTrue(httpretty.has_request())
httpretty.reset()
# New instance should use cached languages
machine = ApertiumAPYTranslation()
self.assert_translate(machine, 'es')
self.assertFalse(httpretty.has_request())
class WeblateTranslationTest(FixtureTestCase):
def test_empty(self):
machine = WeblateTranslation()
unit = Unit.objects.all()[0]
results = machine.translate(
unit.translation.language.code,
unit.get_source_plurals()[0],
unit,
self.user,
)
self.assertEqual(results, [])
def test_exists(self):
unit = Unit.objects.all()[0]
# Create fake fulltext entry
other = unit.translation.unit_set.exclude(pk=unit.pk)[0]
other.source = unit.source
other.target = 'Preklad'
other.state = STATE_TRANSLATED
other.save()
update_fulltext(
None,
pk=other.pk,
source=force_text(unit.source),
context=force_text(unit.context),
location=force_text(unit.location),
target=force_text(other.target),
comment='',
language=force_text(unit.translation.language.code),
)
# Perform lookup
machine = WeblateTranslation()
results = machine.translate(
unit.translation.language.code,
unit.get_source_plurals()[0],
unit,
self.user,
)
self.assertNotEqual(results, [])
| dontnod/weblate | weblate/machinery/tests.py | Python | gpl-3.0 | 27,388 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest.api.volume import base
from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
CONF = config.CONF
LOG = logging.getLogger(__name__)
class VolumeRetypeWithMigrationTest(base.BaseVolumeAdminTest):
@classmethod
def skip_checks(cls):
super(VolumeRetypeWithMigrationTest, cls).skip_checks()
if not CONF.volume_feature_enabled.multi_backend:
raise cls.skipException("Cinder multi-backend feature disabled.")
if len(set(CONF.volume.backend_names)) < 2:
raise cls.skipException("Requires at least two different "
"backend names")
@classmethod
def resource_setup(cls):
super(VolumeRetypeWithMigrationTest, cls).resource_setup()
# read backend name from a list.
backend_src = CONF.volume.backend_names[0]
backend_dst = CONF.volume.backend_names[1]
extra_specs_src = {"volume_backend_name": backend_src}
extra_specs_dst = {"volume_backend_name": backend_dst}
src_vol_type = cls.create_volume_type(extra_specs=extra_specs_src)
cls.dst_vol_type = cls.create_volume_type(extra_specs=extra_specs_dst)
cls.src_vol = cls.create_volume(volume_type=src_vol_type['name'])
@classmethod
def resource_cleanup(cls):
# When retyping a volume, Cinder creates an internal volume in the
# target backend. The volume in the source backend is deleted after
# the migration, so we need to wait for Cinder delete this volume
# before deleting the types we've created.
# This list should return 2 volumes until the copy and cleanup
# process is finished.
fetched_list = cls.admin_volume_client.list_volumes(
params={'all_tenants': True,
'display_name': cls.src_vol['name']})['volumes']
for fetched_vol in fetched_list:
if fetched_vol['id'] != cls.src_vol['id']:
# This is the Cinder internal volume
LOG.debug('Waiting for internal volume %s deletion',
fetched_vol['id'])
cls.admin_volume_client.wait_for_resource_deletion(
fetched_vol['id'])
break
super(VolumeRetypeWithMigrationTest, cls).resource_cleanup()
@decorators.idempotent_id('a1a41f3f-9dad-493e-9f09-3ff197d477cd')
def test_available_volume_retype_with_migration(self):
keys_with_no_change = ('id', 'size', 'description', 'name', 'user_id',
'os-vol-tenant-attr:tenant_id')
keys_with_change = ('volume_type', 'os-vol-host-attr:host')
volume_source = self.admin_volume_client.show_volume(
self.src_vol['id'])['volume']
self.volumes_client.retype_volume(
self.src_vol['id'],
new_type=self.dst_vol_type['name'],
migration_policy='on-demand')
waiters.wait_for_volume_retype(self.volumes_client, self.src_vol['id'],
self.dst_vol_type['name'])
volume_dest = self.admin_volume_client.show_volume(
self.src_vol['id'])['volume']
# Check the volume information after the migration.
self.assertEqual('success',
volume_dest['os-vol-mig-status-attr:migstat'])
self.assertEqual('success', volume_dest['migration_status'])
for key in keys_with_no_change:
self.assertEqual(volume_source[key], volume_dest[key])
for key in keys_with_change:
self.assertNotEqual(volume_source[key], volume_dest[key])
| Juniper/tempest | tempest/api/volume/admin/test_volume_retype_with_migration.py | Python | apache-2.0 | 4,277 |
score = [list(map(lambda x: int(x) - 1 , input().split())) for _ in range(int(n*(n-1)/2))]
points = [0 for _ in range(n)]
for a,b,c,d in score:
if c > d:
points[a] += 3
elif c < d:
points[b] += 3
else:
points[a] += 1
points[b] += 1
rank = sorted(points)
rank.reverse()
for p in points:
print(rank.index(p) + 1)
| knuu/competitive-programming | aoj/2/AOJ0566.py | Python | mit | 373 |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2013-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilentBase8590A import *
class agilent8593A(agilentBase8590A):
"Agilent 8593A IVI spectrum analyzer driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'HP8593A')
super(agilent8593A, self).__init__(*args, **kwargs)
self._input_impedance = 50
self._frequency_low = 9e3
self._frequency_high = 22e9
| python-ivi/python-ivi | ivi/agilent/agilent8593A.py | Python | mit | 1,518 |
# Generated by Django 2.0.6 on 2018-06-29 11:04
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('library', '0009_product_summary'),
]
operations = [
migrations.AddField(
model_name='achievement',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='created at'),
preserve_default=False,
),
migrations.AddField(
model_name='achievement',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name='updated at'),
),
migrations.AddField(
model_name='product',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='created at'),
preserve_default=False,
),
migrations.AddField(
model_name='product',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name='updated at'),
),
]
| xavierdutreilh/robots.midgar.fr | services/backbone/backbone/library/migrations/0010_auto_20180629_1304.py | Python | mit | 1,165 |
#!/usr/bin/env pmpython
#
# Copyright (C) 2020 Red Hat.
# Copyright (C) 2017 Alperen Karaoglu.
# Copyright (C) 2016 Sitaram Shelke.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# pylint: disable=bad-whitespace,too-many-arguments,too-many-lines
# pylint: disable=redefined-outer-name,unnecessary-lambda
#
import sys
import re
import time
from pcp import pmcc
from pcp import pmapi
process_state_info = {}
# Metric list to be fetched
PIDSTAT_METRICS = ['kernel.uname.nodename', 'kernel.uname.release', 'kernel.uname.sysname',
'kernel.uname.machine','hinv.ncpu','proc.psinfo.pid','proc.nprocs','proc.psinfo.utime',
'proc.psinfo.stime','proc.psinfo.guest_time','proc.psinfo.processor',
'proc.id.uid','proc.psinfo.cmd','kernel.all.cpu.user','kernel.all.cpu.vuser',
'kernel.all.cpu.sys','kernel.all.cpu.guest','kernel.all.cpu.nice','kernel.all.cpu.idle',
'proc.id.uid_nm', 'proc.psinfo.rt_priority', 'proc.psinfo.policy', 'proc.psinfo.minflt',
'proc.psinfo.maj_flt', 'proc.psinfo.vsize', 'proc.psinfo.rss', 'mem.physmem',
'proc.memory.vmstack']
PIDSTAT_METRICS_B = ['kernel.uname.nodename', 'kernel.uname.release', 'kernel.uname.sysname',
'kernel.uname.machine','hinv.ncpu','proc.psinfo.pid','proc.nprocs','proc.psinfo.utime',
'proc.psinfo.stime','proc.psinfo.guest_time','proc.psinfo.processor',
'proc.id.uid','proc.psinfo.cmd','kernel.all.cpu.user','kernel.all.cpu.vuser',
'kernel.all.cpu.sys','kernel.all.cpu.guest','kernel.all.cpu.nice','kernel.all.cpu.idle',
'proc.id.uid_nm', 'proc.psinfo.rt_priority', 'proc.psinfo.policy', 'proc.psinfo.minflt',
'proc.psinfo.maj_flt', 'proc.psinfo.vsize', 'proc.psinfo.rss', 'mem.physmem',
'proc.memory.vmstack','proc.psinfo.sname','proc.psinfo.start_time','proc.psinfo.wchan_s']
PIDSTAT_METRICS_L = ['kernel.uname.nodename', 'kernel.uname.release', 'kernel.uname.sysname',
'kernel.uname.machine','hinv.ncpu','proc.psinfo.pid','proc.nprocs','proc.psinfo.utime',
'proc.psinfo.stime','proc.psinfo.guest_time','proc.psinfo.processor',
'proc.id.uid','proc.psinfo.cmd','kernel.all.cpu.user','kernel.all.cpu.vuser',
'kernel.all.cpu.sys','kernel.all.cpu.guest','kernel.all.cpu.nice','kernel.all.cpu.idle',
'proc.id.uid_nm', 'proc.psinfo.rt_priority', 'proc.psinfo.policy', 'proc.psinfo.minflt',
'proc.psinfo.maj_flt', 'proc.psinfo.vsize', 'proc.psinfo.rss', 'mem.physmem',
'proc.memory.vmstack','proc.psinfo.sname','proc.psinfo.start_time','proc.psinfo.wchan_s',
'proc.psinfo.psargs']
#We define a new metric array so that some missing metrics aren't flagged in existing archives using PIDSTAT_METRICS
SCHED_POLICY = ['NORMAL','FIFO','RR','BATCH','','IDLE','DEADLINE']
class StdoutPrinter:
def Print(self, args):
print(args)
# After fetching non singular metric values, create a mapping of instance id
# to instance value rather than instance name to instance value.
# The reason is, in PCP, instance names require a separate pmGetIndom() request
# and some of the names may not be available.
class ReportingMetricRepository:
def __init__(self,group):
self.group = group
self.current_cached_values = {}
self.previous_cached_values = {}
def __fetch_current_values(self,metric,instance):
if instance:
return dict(map(lambda x: (x[0].inst, x[2]), self.group[metric].netValues))
else:
return self.group[metric].netValues[0][2]
def __fetch_previous_values(self,metric,instance):
if instance:
return dict(map(lambda x: (x[0].inst, x[2]), self.group[metric].netPrevValues))
else:
return self.group[metric].netPrevValues[0][2]
def current_value(self, metric, instance):
if not metric in self.group:
return None
if instance:
if self.current_cached_values.get(metric, None) is None:
lst = self.__fetch_current_values(metric,instance)
self.current_cached_values[metric] = lst
return self.current_cached_values[metric].get(instance,None)
else:
if self.current_cached_values.get(metric, None) is None:
self.current_cached_values[metric] = self.__fetch_current_values(metric,instance)
return self.current_cached_values.get(metric, None)
def previous_value(self, metric, instance):
if not metric in self.group:
return None
if instance:
if self.previous_cached_values.get(metric, None) is None:
lst = self.__fetch_previous_values(metric,instance)
self.previous_cached_values[metric] = lst
return self.previous_cached_values[metric].get(instance,None)
else:
if self.previous_cached_values.get(metric, None) is None:
self.previous_cached_values[metric] = self.__fetch_previous_values(metric,instance)
return self.previous_cached_values.get(metric, None)
def current_values(self, metric_name):
if self.group.get(metric_name, None) is None:
return None
if self.current_cached_values.get(metric_name, None) is None:
self.current_cached_values[metric_name] = self.__fetch_current_values(metric_name,True)
return self.current_cached_values.get(metric_name, None)
def previous_values(self, metric_name):
if self.group.get(metric_name, None) is None:
return None
if self.previous_cached_values.get(metric_name, None) is None:
self.previous_cached_values[metric_name] = self.__fetch_previous_values(metric_name,True)
return self.previous_cached_values.get(metric_name, None)
class ProcessCpuUsage:
def __init__(self, instance, delta_time, metrics_repository):
self.instance = instance
self.__delta_time = delta_time
self.__metric_repository = metrics_repository
def user_percent(self):
c_usertime = self.__metric_repository.current_value('proc.psinfo.utime', self.instance)
p_usertime = self.__metric_repository.previous_value('proc.psinfo.utime', self.instance)
if c_usertime is not None and p_usertime is not None:
percent_of_time = 100 * float(c_usertime - p_usertime) / float(1000 * self.__delta_time)
return float("%.2f"%percent_of_time)
else:
return None
def guest_percent(self):
c_guesttime = self.__metric_repository.current_value('proc.psinfo.guest_time', self.instance)
p_guesttime = self.__metric_repository.previous_value('proc.psinfo.guest_time', self.instance)
if c_guesttime is not None and p_guesttime is not None:
percent_of_time = 100 * float(c_guesttime - p_guesttime) / float(1000 * self.__delta_time)
return float("%.2f"%percent_of_time)
else:
return None
def system_percent(self):
c_systemtime = self.__metric_repository.current_value('proc.psinfo.stime', self.instance)
p_systemtime = self.__metric_repository.previous_value('proc.psinfo.stime', self.instance)
if c_systemtime is not None and p_systemtime is not None:
percent_of_time = 100 * float(c_systemtime - p_systemtime) / float(1000 * self.__delta_time)
return float("%.2f"%percent_of_time)
else:
return None
def total_percent(self):
if self.user_percent() is not None and self.guest_percent() is not None and self.system_percent() is not None:
return float("%.2f"%(self.user_percent()+self.guest_percent()+self.system_percent()))
else:
return None
def pid(self):
return self.__metric_repository.current_value('proc.psinfo.pid', self.instance)
def process_name(self):
return self.__metric_repository.current_value('proc.psinfo.cmd', self.instance)
def process_name_with_args(self):
return self.__metric_repository.current_value('proc.psinfo.psargs', self.instance)
def cpu_number(self):
return self.__metric_repository.current_value('proc.psinfo.processor', self.instance)
def user_id(self):
return self.__metric_repository.current_value('proc.id.uid', self.instance)
def user_name(self):
return self.__metric_repository.current_value('proc.id.uid_nm', self.instance)
class CpuUsage:
def __init__(self, metric_repository):
self.__metric_repository = metric_repository
def get_processes(self, delta_time):
return map(lambda pid: (ProcessCpuUsage(pid,delta_time,self.__metric_repository)), self.__pids())
def __pids(self):
pid_dict = self.__metric_repository.current_values('proc.psinfo.pid')
return sorted(pid_dict.values())
class ProcessPriority:
def __init__(self, instance, metrics_repository):
self.instance = instance
self.__metric_repository = metrics_repository
def pid(self):
return self.__metric_repository.current_value('proc.psinfo.pid', self.instance)
def user_id(self):
return self.__metric_repository.current_value('proc.id.uid', self.instance)
def process_name(self):
return self.__metric_repository.current_value('proc.psinfo.cmd', self.instance)
def process_name_with_args(self):
return self.__metric_repository.current_value('proc.psinfo.psargs', self.instance)
def priority(self):
return self.__metric_repository.current_value('proc.psinfo.rt_priority', self.instance)
def policy_int(self):
return self.__metric_repository.current_value('proc.psinfo.policy', self.instance)
def policy(self):
policy_int = self.__metric_repository.current_value('proc.psinfo.policy', self.instance)
if policy_int is not None:
return SCHED_POLICY[policy_int]
return None
def user_name(self):
return self.__metric_repository.current_value('proc.id.uid_nm', self.instance)
class CpuProcessPriorities:
def __init__(self, metric_repository):
self.__metric_repository = metric_repository
def get_processes(self):
return map((lambda pid: (ProcessPriority(pid,self.__metric_repository))), self.__pids())
def __pids(self):
pid_dict = self.__metric_repository.current_values('proc.psinfo.pid')
return sorted(pid_dict.values())
class ProcessMemoryUtil:
def __init__(self, instance, delta_time, metric_repository):
self.instance = instance
self.__metric_repository = metric_repository
self.delta_time = delta_time
def pid(self):
return self.__metric_repository.current_value('proc.psinfo.pid', self.instance)
def user_id(self):
return self.__metric_repository.current_value('proc.id.uid', self.instance)
def process_name(self):
return self.__metric_repository.current_value('proc.psinfo.cmd', self.instance)
def process_name_with_args(self):
return self.__metric_repository.current_value('proc.psinfo.psargs', self.instance)
def minflt(self):
c_min_flt = self.__metric_repository.current_value('proc.psinfo.minflt', self.instance)
p_min_flt = self.__metric_repository.previous_value('proc.psinfo.minflt', self.instance)
if c_min_flt is not None and p_min_flt is not None:
return float("%.2f" % ((c_min_flt - p_min_flt)/self.delta_time))
else:
return None
def majflt(self):
c_maj_flt = self.__metric_repository.current_value('proc.psinfo.maj_flt', self.instance)
p_maj_flt = self.__metric_repository.previous_value('proc.psinfo.maj_flt', self.instance)
if c_maj_flt is not None and p_maj_flt is not None:
return float("%.2f" % ((c_maj_flt - p_maj_flt)/self.delta_time))
else:
return None
def vsize(self):
return self.__metric_repository.current_value('proc.psinfo.vsize', self.instance)
def rss(self):
return self.__metric_repository.current_value('proc.psinfo.rss', self.instance)
def mem(self):
total_mem = self.__metric_repository.current_value('mem.physmem', None)
rss = self.__metric_repository.current_value('proc.psinfo.rss', self.instance)
if total_mem is not None and rss is not None:
return float("%.2f" % (100*float(rss)/total_mem))
else:
return None
def user_name(self):
return self.__metric_repository.current_value('proc.id.uid_nm', self.instance)
class CpuProcessMemoryUtil:
def __init__(self, metric_repository):
self.__metric_repository = metric_repository
def get_processes(self, delta_time):
return map((lambda pid: (ProcessMemoryUtil(pid, delta_time, self.__metric_repository))), self.__pids())
def __pids(self):
pid_dict = self.__metric_repository.current_values('proc.psinfo.pid')
return sorted(pid_dict.values())
class ProcessStackUtil:
def __init__(self, instance, metric_repository):
self.instance = instance
self.__metric_repository = metric_repository
def pid(self):
return self.__metric_repository.current_value('proc.psinfo.pid', self.instance)
def user_id(self):
return self.__metric_repository.current_value('proc.id.uid', self.instance)
def process_name(self):
return self.__metric_repository.current_value('proc.psinfo.cmd', self.instance)
def process_name_with_args(self):
return self.__metric_repository.current_value('proc.psinfo.psargs', self.instance)
def stack_size(self):
return self.__metric_repository.current_value('proc.memory.vmstack', self.instance)
def user_name(self):
return self.__metric_repository.current_value('proc.id.uid_nm', self.instance)
class CpuProcessStackUtil:
def __init__(self, metric_repository):
self.__metric_repository = metric_repository
def get_processes(self):
return map((lambda pid: (ProcessStackUtil(pid, self.__metric_repository))), self.__pids())
def __pids(self):
pid_dict = self.__metric_repository.current_values('proc.psinfo.pid')
return sorted(pid_dict.values())
# ==============================================================================
# process state reporting
class ProcessState:
# def __init__(self, instance, metric_repository):
def __init__(self, instance, delta_time, metric_repository):
self.instance = instance
self.__metric_repository = metric_repository
def pid(self):
return self.__metric_repository.current_value('proc.psinfo.pid', self.instance)
def user_id(self):
return self.__metric_repository.current_value('proc.id.uid', self.instance)
def process_name(self):
return self.__metric_repository.current_value('proc.psinfo.cmd', self.instance)
def process_name_with_args(self):
return self.__metric_repository.current_value('proc.psinfo.psargs', self.instance)
def s_name(self):
return self.__metric_repository.current_value('proc.psinfo.sname', self.instance)
def start_time(self):
return self.__metric_repository.current_value('proc.psinfo.start_time', self.instance)
def wchan_s(self):
return self.__metric_repository.current_value('proc.psinfo.wchan_s', self.instance)
def user_name(self):
return self.__metric_repository.current_value('proc.id.uid_nm', self.instance)
#def process_blocked(self):
# return self.__metric_repository.current_value('proc.psinfo.blocked', self.instance)
def utime(self):
#return self.__metric_repository.current_value('proc.psinfo.utime', self.instance)
c_usertime = self.__metric_repository.current_value('proc.psinfo.utime', self.instance)
p_usertime = self.__metric_repository.previous_value('proc.psinfo.utime', self.instance)
# sometimes the previous_value seems to be Nonetype, not sure why
if p_usertime is None: # print a '?' here
#return c_usertime
return '?'
else:
return c_usertime - p_usertime
def stime(self):
c_systime = self.__metric_repository.current_value('proc.psinfo.stime', self.instance)
p_systime = self.__metric_repository.previous_value('proc.psinfo.stime', self.instance)
# sometimes the previous_value seems to be Nonetype, not sure why
if p_systime is None: # print a '?' here
return '?'
else:
return c_systime - p_systime
class CpuProcessState:
def __init__(self, metric_repository):
self.__metric_repository = metric_repository
# def get_processes(self):
# return map((lambda pid: (ProcessState(pid, self.__metric_repository))), self.__pids())
def get_processes(self, delta_time):
# return map(lambda pid: (ProcessState(pid,delta_time,self.__metric_repository)), self.__pids())
return map((lambda pid: (ProcessState(pid, delta_time, self.__metric_repository))), self.__pids())
def __pids(self):
pid_dict = self.__metric_repository.current_values('proc.psinfo.pid')
return sorted(pid_dict.values())
class CpuProcessStateReporter:
def __init__(self, process_state, process_filter, delta_time, printer, pidstat_options):
self.process_state = process_state
self.process_filter = process_filter
self.printer = printer
self.pidstat_options = pidstat_options
self.delta_time = delta_time
#TODO: SORTING
# for sorting this report, we need to put every process info in a table
# without printing individual processes as we run through the for loop below,
# then sort as required based on which field we need to sort on, and then
# display that entire table for each time print_report is called.
def print_report(self, timestamp, header_indentation, value_indentation):
#if not "detail" == self.pidstat_options.filterstate:
# self.printer ("\nTimestamp" + "\tPID\tState\tUtime\tStime\tTotal Time\tFunction\t\t\tCommand")
#else:
# self.printer ("\nTimestamp" + "\tPID\t\tR\t\tS\t\tZ\t\tT\t\tD\t\tCommand")
# Print out the header only if there are entries for that particular iteration
# So this print statement is moved inside the loop protected by a flag print_once
print_once = 0
# processes = self.process_filter.filter_processes(self.process_state.get_processes())
processes = self.process_filter.filter_processes(self.process_state.get_processes(self.delta_time))
#print self.pidstat_options.filterstate
for process in processes:
current_process_sname = process.s_name()
if self.pidstat_options.filterstate != "detail":
if not "all" in self.pidstat_options.filterstate:
if not current_process_sname in self.pidstat_options.filterstate:
# if not detailed report (which shows all states),
# and we dont find "all" after -B, filter out the processes
# in states we dont want
continue
current_process_pid = process.pid()
# tuple key to map to 1-d dictionary of {(pid,state):total_time}
key = (current_process_sname,current_process_pid)
if key in process_state_info:
process_state_info[key]=process_state_info[key] + self.delta_time
else:
process_state_info[key]=self.delta_time
# ------------------------------------------------------------------------------
# TODO : need to add the ability to filter by username
#if self.pidstat_options.show_process_user:
# self.printer("%s%s%s\t%s\t%s\t%s" %
# (timestamp, value_indentation, process.user_name(),
# process.pid(), process.process_name(),
# process.s_name(), process.start_time(),
# process.wchan_s(), process.process_blocked()))
#else:
# ------------------------------------------------------------------------------
if self.pidstat_options.filterstate != "detail":
if print_once == 0:
print_once = 1 # dont print ever again for this loop iteration
self.printer ("\nTimestamp" + "\tPID\tState\tUtime\tStime\tTotal Time\tFunction\t\t\tCommand")
if process.s_name() == 'R':
func = "N/A" # if a process is running, there will be no function it's waiting on
elif process.s_name is None:
func = "?"
else:
func = process.wchan_s()
if process.s_name() is not None and (len(process.wchan_s()) < 8 or func == "N/A"):
if self.pidstat_options.process_name_with_args:
self.printer("%s\t%s\t%s\t%s\t%s\t%.2f\t\t%s\t\t\t\t%s" %
(timestamp, process.pid(), process.s_name(),
process.utime(), process.stime(),
process_state_info[key], func,
process.process_name_with_args()))
else:
self.printer("%s\t%s\t%s\t%s\t%s\t%.2f\t\t%s\t\t\t\t%s" %
(timestamp, process.pid(), process.s_name(),
process.utime(), process.stime(),
process_state_info[key], func,
process.process_name()))
elif process.s_name() is not None and (len(process.wchan_s()) < 16 or func == "N/A"):
if self.pidstat_options.process_name_with_args:
self.printer("%s\t%s\t%s\t%s\t%s\t%.2f\t\t%s\t\t\t%s" %
(timestamp, process.pid(), process.s_name(),
process.utime(), process.stime(),
process_state_info[key], func,
process.process_name_with_args()))
else:
self.printer("%s\t%s\t%s\t%s\t%s\t%.2f\t\t%s\t\t\t%s" %
(timestamp, process.pid(), process.s_name(),
process.utime(), process.stime(),
process_state_info[key], func,
process.process_name()))
elif process.s_name() is not None and len(process.wchan_s()) < 24:
if self.pidstat_options.process_name_with_args:
self.printer("%s\t%s\t%s\t%s\t%s\t%.2f\t\t%s\t\t%s" %
(timestamp, process.pid(), process.s_name(),
process.utime(), process.stime(),
process_state_info[key], func,
process.process_name_with_args()))
else:
self.printer("%s\t%s\t%s\t%s\t%s\t%.2f\t\t%s\t\t%s" %
(timestamp, process.pid(), process.s_name(),
process.utime(), process.stime(),
process_state_info[key], func,
process.process_name()))
else:
if process.s_name() is not None:
self.printer("%s\t%s\t%s\t%s\t%s\t%.2f\t\t%s\t%s" %
(timestamp, process.pid(), process.s_name(),
process.utime(), process.stime(),
process_state_info[key], func,
process.process_name()))
#continue
else:
if print_once == 0:
print_once = 1 # dont print again for loop iteration
self.printer("\nTimestamp" + "\tPID\t\tR\t\tS\t\tZ\t\tT\t\tD\t\tCommand")
# show detailed report of processes with accumulated timings in each state
key1 = ("R", current_process_pid)
key2 = ("S", current_process_pid)
key3 = ("Z", current_process_pid)
key4 = ("T", current_process_pid)
key5 = ("D", current_process_pid)
R = process_state_info.get(key1, 0)
S = process_state_info.get(key2, 0)
Z = process_state_info.get(key3, 0)
T = process_state_info.get(key4, 0)
D = process_state_info.get(key5, 0)
if self.pidstat_options.process_name_with_args:
self.printer("%s\t%s\t\t%.2f\t\t%.2f\t\t%.2f\t\t%.2f\t\t%.2f\t\t%s" %
(timestamp, process.pid(), R, S, Z, T, D,
process.process_name_with_args()))
else:
self.printer("%s\t%s\t\t%.2f\t\t%.2f\t\t%.2f\t\t%.2f\t\t%.2f\t\t%s" %
(timestamp, process.pid(), R, S, Z, T, D,
process.process_name()))
#===============================================================================
class ProcessFilter:
def __init__(self,options):
self.options = options
def filter_processes(self, processes):
return filter(lambda p: self.__predicate(p), processes)
def __predicate(self, process):
return bool(self.__matches_process_username(process)
and self.__matches_process_pid(process)
and self.__matches_process_name(process)
and self.__matches_process_priority(process)
and self.__matches_process_memory_util(process)
and self.__matches_process_stack_size(process))
def __matches_process_username(self, process):
if self.options.filtered_process_user is not None:
return self.options.filtered_process_user == process.user_name()
return True
def __matches_process_pid(self, process):
if self.options.pid_filter is not None:
pid = process.pid()
return bool(pid in self.options.pid_list)
return True
def __matches_process_name(self, process):
name = process.process_name()
if name is None:
return False # no match
if self.options.process_name is not None:
return re.search(self.options.process_name, name)
return True # all match
def __matches_process_priority(self, process):
if self.options.show_process_priority and process.priority() is not None:
return process.priority() > 0
return True
def __matches_process_memory_util(self, process):
if self.options.show_process_memory_util and process.vsize() is not None:
return process.vsize() > 0
return True
def __matches_process_stack_size(self, process):
if self.options.show_process_stack_util and process.stack_size() is not None:
return process.stack_size() > 0
return True
class CpuUsageReporter:
def __init__(self, cpu_usage, process_filter, delta_time, printer, pidstat_options):
self.cpu_usage = cpu_usage
self.process_filter = process_filter
self.printer = printer
self.delta_time = delta_time
self.pidstat_options = pidstat_options
def print_report(self, timestamp, ncpu, header_indentation, value_indentation):
if self.pidstat_options.show_process_user:
self.printer("Timestamp" + header_indentation +
"UName\tPID\tusr\tsystem\tguest\t%CPU\tCPU\tCommand")
else:
self.printer("Timestamp" + header_indentation +
"UID\tPID\tusr\tsystem\tguest\t%CPU\tCPU\tCommand")
processes = self.process_filter.filter_processes(self.cpu_usage.get_processes(self.delta_time))
for process in processes:
user_percent = process.user_percent()
guest_percent = process.guest_percent()
system_percent = process.system_percent()
total_percent = process.total_percent()
if self.pidstat_options.per_processor_usage and total_percent is not None:
total_percent = float("%.2f"%(total_percent/ncpu))
if self.pidstat_options.show_process_user:
if self.pidstat_options.process_name_with_args:
self.printer("%s%s%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" %
(timestamp, value_indentation, process.user_name(),
process.pid(), user_percent, system_percent,
guest_percent, total_percent, process.cpu_number(),
process.process_name_with_args()))
else:
self.printer("%s%s%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" %
(timestamp, value_indentation, process.user_name(),
process.pid(), user_percent, system_percent,
guest_percent, total_percent, process.cpu_number(),
process.process_name()))
else:
if self.pidstat_options.process_name_with_args:
self.printer("%s%s%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" %
(timestamp, value_indentation, process.user_id(),
process.pid(), user_percent, system_percent,
guest_percent, total_percent, process.cpu_number(),
process.process_name_with_args()))
else:
self.printer("%s%s%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" %
(timestamp, value_indentation, process.user_id(),
process.pid(), user_percent, system_percent,
guest_percent, total_percent, process.cpu_number(),
process.process_name()))
class CpuProcessPrioritiesReporter:
def __init__(self, process_priority, process_filter, printer, pidstat_options):
self.process_priority = process_priority
self.process_filter = process_filter
self.printer = printer
self.pidstat_options = pidstat_options
def print_report(self, timestamp, header_indentation, value_indentation):
self.printer("Timestamp" + header_indentation +
"UID\tPID\tprio\tpolicy\tCommand")
processes = self.process_filter.filter_processes(self.process_priority.get_processes())
for process in processes:
if self.pidstat_options.show_process_user:
if self.pidstat_options.process_name_with_args:
self.printer("%s%s%s\t%s\t%s\t%s\t%s" %
(timestamp, value_indentation, process.user_name(),
process.pid(), process.priority(), process.policy(),
process.process_name_with_args()))
else:
self.printer("%s%s%s\t%s\t%s\t%s\t%s" %
(timestamp, value_indentation, process.user_name(),
process.pid(), process.priority(), process.policy(),
process.process_name()))
else:
if self.pidstat_options.process_name_with_args:
self.printer("%s%s%s\t%s\t%s\t%s\t%s" %
(timestamp, value_indentation, process.user_id(),
process.pid(), process.priority(), process.policy(),
process.process_name_with_args()))
else:
self.printer("%s%s%s\t%s\t%s\t%s\t%s" %
(timestamp, value_indentation, process.user_id(),
process.pid(), process.priority(), process.policy(),
process.process_name()))
class CpuProcessMemoryUtilReporter:
def __init__(self, process_memory_util, process_filter, delta_time, printer, pidstat_options):
self.process_memory_util = process_memory_util
self.process_filter = process_filter
self.printer = printer
self.delta_time = delta_time
self.pidstat_options = pidstat_options
def print_report(self, timestamp, header_indentation, value_indentation):
self.printer("Timestamp" + header_indentation +
"UID\tPID\tMinFlt/s\tMajFlt/s\tVSize\tRSS\t%Mem\tCommand")
processes = self.process_filter.filter_processes(self.process_memory_util.get_processes(self.delta_time))
for process in processes:
maj_flt = process.majflt()
min_flt = process.minflt()
if self.pidstat_options.show_process_user:
if self.pidstat_options.process_name_with_args:
self.printer("%s%s%s\t%s\t%s\t\t%s\t\t%s\t%s\t%s\t%s" %
(timestamp, value_indentation, process.user_name(),
process.pid(), min_flt, maj_flt, process.vsize(),
process.rss(), process.mem(),
process.process_name_with_args()))
else:
self.printer("%s%s%s\t%s\t%s\t\t%s\t\t%s\t%s\t%s\t%s" %
(timestamp, value_indentation, process.user_name(),
process.pid(), min_flt, maj_flt, process.vsize(),
process.rss(), process.mem(), process.process_name()))
else:
if self.pidstat_options.process_name_with_args:
self.printer("%s%s%s\t%s\t%s\t\t%s\t\t%s\t%s\t%s\t%s" %
(timestamp, value_indentation, process.user_id(),
process.pid(), min_flt, maj_flt, process.vsize(),
process.rss(), process.mem(),
process.process_name_with_args()))
else:
self.printer("%s%s%s\t%s\t%s\t\t%s\t\t%s\t%s\t%s\t%s" %
(timestamp, value_indentation,process.user_id(),
process.pid(), min_flt, maj_flt, process.vsize(),
process.rss(), process.mem(), process.process_name()))
class CpuProcessStackUtilReporter:
def __init__(self, process_stack_util, process_filter, printer, pidstat_options):
self.process_stack_util = process_stack_util
self.process_filter = process_filter
self.printer = printer
self.pidstat_options = pidstat_options
def print_report(self, timestamp, header_indentation, value_indentation):
self.printer ("Timestamp" + header_indentation + "UID\tPID\tStkSize\tCommand")
processes = self.process_filter.filter_processes(self.process_stack_util.get_processes())
for process in processes:
if self.pidstat_options.show_process_user:
if self.pidstat_options.process_name_with_args:
self.printer("%s%s%s\t%s\t%s\t%s" %
(timestamp, value_indentation, process.user_name(),
process.pid(), process.stack_size(),
process.process_name_with_args()))
else :
self.printer("%s%s%s\t%s\t%s\t%s" %
(timestamp, value_indentation, process.user_name(),
process.pid(), process.stack_size(),
process.process_name()))
else:
if self.pidstat_options.process_name_with_args:
self.printer("%s%s%s\t%s\t%s\t%s" %
(timestamp, value_indentation, process.user_id(),
process.pid(), process.stack_size(),
process.process_name_with_args()))
else :
self.printer("%s%s%s\t%s\t%s\t%s" %
(timestamp, value_indentation, process.user_id(),
process.pid(), process.stack_size(),
process.process_name()))
class NoneHandlingPrinterDecorator:
def __init__(self, printer):
self.printer = printer
def Print(self, args):
new_args = args.replace('None','?')
self.printer.Print(new_args)
class PidstatOptions(pmapi.pmOptions):
process_name = None
process_name_with_args = False
ps_args_flag=False
show_process_memory_util = False
show_process_priority = False
show_process_stack_util = False
per_processor_usage = False
show_process_user = False
show_process_state = False
flag_error = False
filtered_process_user = None
state = ""
filterstate = []
pid_filter = None
pid_list = []
timefmt = "%H:%M:%S"
def checkOptions(self):
if self.show_process_priority and self.show_process_memory_util:
print("Error: -R is incompatible with -r")
return False
elif self.show_process_priority and self.show_process_stack_util:
print("Error: -R is incompatible with -k")
return False
elif self.show_process_memory_util and self.show_process_stack_util:
print("Error: -r is incompatible with -k")
return False
elif (self.show_process_memory_util or self.show_process_stack_util or
self.show_process_priority) and self.show_process_state:
print("Error: Incompatible flags provided")
return False
elif self.flag_error:
print("Error: Incorrect usage of the B flag")
return False
elif self.ps_args_flag:
print("Error: Incorrect usage of the -l flag")
return False
else:
return True
def extraOptions(self, opt,optarg, index):
if opt == 'k':
PidstatOptions.show_process_stack_util = True
elif opt == 'r':
PidstatOptions.show_process_memory_util = True
elif opt == 'R':
PidstatOptions.show_process_priority = True
#process state
elif opt == 'B':
if PidstatOptions.show_process_state:
#print("Error: Cannot use -B multiple times")
PidstatOptions.flag_error = True
PidstatOptions.show_process_state = True
if optarg in ["All", "all"]:
PidstatOptions.filterstate = "all"
elif optarg in ["detail", "Detail"]:
PidstatOptions.filterstate = "detail"
else:
# tried to handle the error usage like pcp-pidstat.py -B all R,S
# or pcp-pidstat.py -B detail all
# or pcp-pidstat.py -B R all, etc but seems like the first optarg is all we have and
# we ignore the following ones. So pcp-pidstat.py -B detail all will treat it as
# pcp.pidstat.py -B detail
#if not PidstatOptions.flag_error:
# if (PidstatOptions.filterstate == "all" or PidstatOptions.filterstate == "detail"):
# print("Error: Use either all/detail or specific filters for states")
# PidstatOptions.flag_error = True
# else:
# need to put checks for correct states in this string like UN,TT
# shouldnt be accepted because TT isnt a valid state
# TODO: make sure only R,S,T,D,Z are part of this optarg so if
# anything other than these exists in
# PidstatOptions.filterstate, we might want to flag error of usage ?
PidstatOptions.filterstate += optarg.replace(',', ' ').split(' ')
elif opt == 'G':
PidstatOptions.process_name = optarg
elif opt == 'I':
PidstatOptions.per_processor_usage = True
elif opt == 'U':
PidstatOptions.show_process_user = True
PidstatOptions.filtered_process_user = optarg
elif opt == 'p':
if optarg in ["ALL", "SELF"]:
PidstatOptions.pid_filter = optarg
else:
PidstatOptions.pid_filter = "ALL"
try:
PidstatOptions.pid_list = list(map(lambda x:int(x),optarg.split(',')))
except ValueError:
print("Invalid Process Id List: use comma separated pids without whitespaces")
sys.exit(1)
elif opt == 'f':
PidstatOptions.timefmt = optarg
elif opt == 'l':
if PidstatOptions.process_name_with_args:
PidstatOptions.ps_args_flag=True
PidstatOptions.process_name_with_args = True
def override(self, opt):
""" Override standard PCP options to match pidstat(1) """
return bool(opt == 'p')
#After reading in the provided command line options
#initalize them by passing them in
def __init__(self):
pmapi.pmOptions.__init__(self,"a:s:t:G:IU::p:RrkVZ:z?:f:B:l")
self.pmSetOptionCallback(self.extraOptions)
self.pmSetOverrideCallback(self.override)
self.pmSetLongOptionHeader("General options")
self.pmSetLongOptionArchive()
self.pmSetLongOptionSamples()
self.pmSetLongOptionInterval()
self.pmSetLongOption("process-name", 1, "G", "NAME",
"Select process names using regular expression.")
self.pmSetLongOption("", 0, "I", "", "Show CPU usage per processor.")
self.pmSetLongOption("user-name", 2, "U","[USERNAME]",
"Show real user name of the tasks and optionally filter by user name.")
self.pmSetLongOption("pid-list", 1, "p", "PID1,PID2.. ",
"Show stats for specified pids; " +
"use SELF for current process and ALL for all processes.")
self.pmSetLongOption("", 0, "R", "",
"Report realtime priority and scheduling policy information.")
self.pmSetLongOption("", 0,"r","","Report page faults and memory utilization.")
self.pmSetLongOption("", 0,"k","","Report stack utilization.")
self.pmSetLongOption("", 0,"f","","Format the timestamp output")
self.pmSetLongOption("", 0, "B", "state1,state2,..",
"Report process state information. " +
"Use -B [all] or -B [comma separated states]. " +
"Use -B detail for showing time spent in every state per process")
self.pmSetLongOptionVersion()
self.pmSetLongOptionTimeZone()
self.pmSetLongOptionHostZone()
self.pmSetLongOption("", 0, "l", "", "Display the process command name and all its arguments.")
self.pmSetLongOptionHelp()
class PidstatReport(pmcc.MetricGroupPrinter):
Machine_info_count = 0
def timeStampDelta(self, group):
s = group.timestamp.tv_sec - group.prevTimestamp.tv_sec
u = group.timestamp.tv_usec - group.prevTimestamp.tv_usec
return s + u / 1000000.0
def print_machine_info(self,group, context):
timestamp = context.pmLocaltime(group.timestamp.tv_sec)
# Please check strftime(3) for different formatting options.
# Also check TZ and LC_TIME environment variables for more
# information on how to override the default formatting of
# the date display in the header
time_string = time.strftime("%x", timestamp.struct_time())
header_string = ''
header_string += group['kernel.uname.sysname'].netValues[0][2] + ' '
header_string += group['kernel.uname.release'].netValues[0][2] + ' '
header_string += '(' + group['kernel.uname.nodename'].netValues[0][2] + ') '
header_string += time_string + ' '
header_string += group['kernel.uname.machine'].netValues[0][2] + ' '
print("%s (%s CPU)" % (header_string, self.get_ncpu(group)))
def get_ncpu(self,group):
return group['hinv.ncpu'].netValues[0][2]
def report(self,manager):
group = manager['pidstat']
if group['proc.psinfo.utime'].netPrevValues is None:
# need two fetches to report rate converted counter metrics
return
if not group['hinv.ncpu'].netValues or not group['kernel.uname.sysname'].netValues:
return
try:
ncpu = self.get_ncpu(group)
if not self.Machine_info_count:
self.print_machine_info(group, manager)
self.Machine_info_count = 1
except IndexError:
# missing some metrics
return
ts = group.contextCache.pmLocaltime(int(group.timestamp))
timestamp = time.strftime(PidstatOptions.timefmt, ts.struct_time())
interval_in_seconds = self.timeStampDelta(group)
header_indentation = " " if len(timestamp)<9 else (len(timestamp)-7)*" "
value_indentation = ((len(header_indentation)+9)-len(timestamp))*" "
metric_repository = ReportingMetricRepository(group)
if PidstatOptions.show_process_stack_util:
process_stack_util = CpuProcessStackUtil(metric_repository)
process_filter = ProcessFilter(PidstatOptions)
stdout = StdoutPrinter()
printdecorator = NoneHandlingPrinterDecorator(stdout)
report = CpuProcessStackUtilReporter(process_stack_util, process_filter,
printdecorator.Print, PidstatOptions)
report.print_report(timestamp, header_indentation, value_indentation)
elif PidstatOptions.show_process_memory_util:
process_memory_util = CpuProcessMemoryUtil(metric_repository)
process_filter = ProcessFilter(PidstatOptions)
stdout = StdoutPrinter()
printdecorator = NoneHandlingPrinterDecorator(stdout)
report = CpuProcessMemoryUtilReporter(process_memory_util, process_filter,
interval_in_seconds,
printdecorator.Print, PidstatOptions)
report.print_report(timestamp, header_indentation, value_indentation)
elif PidstatOptions.show_process_priority:
process_priority = CpuProcessPriorities(metric_repository)
process_filter = ProcessFilter(PidstatOptions)
stdout = StdoutPrinter()
printdecorator = NoneHandlingPrinterDecorator(stdout)
report = CpuProcessPrioritiesReporter(process_priority, process_filter,
printdecorator.Print, PidstatOptions)
report.print_report(timestamp, header_indentation, value_indentation)
#===========================================================================================================
elif PidstatOptions.show_process_state:
process_state = CpuProcessState(metric_repository)
process_filter = ProcessFilter(PidstatOptions)
stdout = StdoutPrinter()
printdecorator = NoneHandlingPrinterDecorator(stdout)
report = CpuProcessStateReporter(process_state, process_filter,
interval_in_seconds,
printdecorator.Print, PidstatOptions)
report.print_report(timestamp, header_indentation, value_indentation)
#===========================================================================================================
else:
cpu_usage = CpuUsage(metric_repository)
process_filter = ProcessFilter(PidstatOptions)
stdout = StdoutPrinter()
printdecorator = NoneHandlingPrinterDecorator(stdout)
report = CpuUsageReporter(cpu_usage, process_filter, interval_in_seconds,
printdecorator.Print, PidstatOptions)
report.print_report(timestamp, ncpu, header_indentation, value_indentation)
if __name__ == "__main__":
try:
opts = PidstatOptions()
manager = pmcc.MetricGroupManager.builder(opts,sys.argv)
if not opts.checkOptions():
raise pmapi.pmUsageErr
if opts.show_process_state:
missing = manager.checkMissingMetrics(PIDSTAT_METRICS_B)
elif opts.process_name_with_args:
missing = manager.checkMissingMetrics(PIDSTAT_METRICS_L)
else:
missing = manager.checkMissingMetrics(PIDSTAT_METRICS)
if missing is not None:
sys.stderr.write('Error: not all required metrics are available\nMissing %s\n' % (missing))
sys.exit(1)
if opts.process_name_with_args:
manager['pidstat'] = PIDSTAT_METRICS_L
elif opts.show_process_state:
manager['pidstat'] = PIDSTAT_METRICS_B
else:
manager['pidstat'] = PIDSTAT_METRICS
manager.printer = PidstatReport()
sts = manager.run()
sys.exit(sts)
except pmapi.pmErr as pmerror:
sys.stderr.write('%s: %s\n' % (pmerror.progname,pmerror.message()))
except pmapi.pmUsageErr as usage:
usage.message()
sys.exit(1)
except KeyboardInterrupt:
pass
| adfernandes/pcp | src/pcp/pidstat/pcp-pidstat.py | Python | lgpl-2.1 | 50,840 |
#!/usr/bin/python
#
# This program converts a series of raw files into Blender Voxel format
#
#
import math;
import sys;
import array;
import glob
from optparse import OptionParser;
usage = "Append files to a single voxel file.\n syntax: voxel_join.py inputfile outputfile [options]";
parser = OptionParser(usage=usage);
parser.add_option("-V","--verbose",action="store_true",
dest="Verbose",default=False,
help="Switch on verbose mode.");
parser.add_option("-x","--Xresolution",type="int",action="store", dest="xres",default=32,
help="Resolution in X dimension");
parser.add_option("-y","--Yresolution",type="int",action="store", dest="yres",default=32,
help="Resolution in X dimension");
parser.add_option("-z","--Zresolution",type="int",action="store", dest="zres",default=32,
help="Resolution in X dimension");
parser.add_option("-m","--minimum",type="float",action="store", dest="minval",default=0.0,
help="Minimum value (converted to 0.0)");
parser.add_option("-M","--maximum",type="float",action="store", dest="maxval",default=1.0,
help="Maximum value (converted to 1.0)");
parser.add_option("-t","--times",type="int",action="store", dest="times",default=100000,
help="Maximum number of frames to join");
parser.add_option("-n","--normaliza",action="store_true", dest="LocalNormalize",default=False,
help="Normalize each frame to the local Max/Min");
# Parse arguments
(options, args) = parser.parse_args();
xres = options.xres
yres = options.yres
zres = options.zres
Maximum = options.maxval
Minimum = options.minval
Maxtimes = options.times
Verbose = options.Verbose
LocalNormalize = options.LocalNormalize
if LocalNormalize:
Maximum=-1.0E40
Minimum=0.0
filenamein = args[0];
filenameout = args[1];
fileinlist = sorted(glob.glob(filenamein+"*"))
fileout = open(filenameout, "wb")
times = min(Maxtimes,len(fileinlist))
headerout = array.array('I')
headerout.append(zres)
headerout.append(xres)
headerout.append(yres)
headerout.append(times)
headerout.tofile(fileout)
if Verbose:
print("Out as "+filenameout);
print("x as "+str(xres));
print("y as "+str(yres));
print("z as "+str(zres));
print("T as "+str(times));
print("Max is "+str(Maximum));
print("Min is "+str(Minimum));
def normalize(num):
return min(1.0,max(0.0,(abs(num)-Minimum)/(Maximum-Minimum)))
for time in range(times):
filename=fileinlist[time]
if LocalNormalize:
with open(filename,"rb") as file:
if LocalNormalize: Maximum=-1.0E40
datain=array.array('f')
if Verbose: print "Reading..."
for y in range(yres*xres):
datain.fromfile(file,zres)
if Verbose: print "Finding maximum..."
Maximum=max(Maximum,max(map(abs,datain)))
if Verbose: print "Max found is "+str(Maximum)
if (Maximum <= Minimum): Maximum=Minimum+1
if Verbose: print "Normalizing..."
dataout=array.array('f',map(normalize, datain))
if Verbose:
print "Writing..."+str(len(dataout))+" numbers"
dataout.tofile(fileout)
fileout.flush();
if Verbose:
print("Did file "+str(time+1)+" out of "+str(times))
fileout.close();
| fercook/SciViz | Voxels/Blender_voxels/voxel_join.py | Python | gpl-2.0 | 3,262 |
from client import exceptions as ex
from client.sources.common import core
import mock
import unittest
###############
# Field Tests #
###############
class MockField(core.Field):
VALID_INT = 42
OK_INT = 3
INVALID_INT = 2
def is_valid(self, value):
return value == self.VALID_INT
def to_json(self, value):
value = super().to_json(value)
return self.VALID_INT
class FieldTest(unittest.TestCase):
def testNoArguments(self):
field = MockField()
self.assertEqual(field.optional, False)
self.assertEqual(field.default, core.NoValue)
def testDefaultArgument_validDefault(self):
field = MockField(default=MockField.VALID_INT)
self.assertEqual(field.optional, True)
self.assertEqual(field.default, MockField.VALID_INT)
def testDefaultArgument_invalidDefault(self):
self.assertRaises(ex.SerializeException, MockField,
default=MockField.INVALID_INT)
def testDefaultArgument_optionalFalse(self):
field = MockField(optional=False, default=MockField.VALID_INT)
# Setting a default always sets optional to True
self.assertEqual(field.optional, True)
self.assertEqual(field.default, MockField.VALID_INT)
def testOptional(self):
field = MockField(optional=True)
self.assertEqual(field.optional, True)
self.assertEqual(field.default, core.NoValue)
def testToJson_validValue(self):
field = MockField()
self.assertEqual(MockField.VALID_INT, field.to_json(MockField.VALID_INT))
def testToJson_invalidValue(self):
field = MockField()
self.assertRaises(ex.SerializeException, field.to_json,
MockField.INVALID_INT)
class ListFieldTest(unittest.TestCase):
TEST_INT = 42
def testConstructor_heterogeneous(self):
field = core.List()
self.assertTrue(field.is_valid([1, 'hi', 6]))
def testConstructor_homogeneous(self):
field = core.List(type=int)
self.assertFalse(field.is_valid([1, 'hi', 6]))
self.assertTrue(field.is_valid([1, 2, 3, 4]))
def testConstructor_homogeneousSubclass(self):
class IntSubclass(int):
def __init__(self):
pass
field = core.List(type=int)
self.assertTrue(field.is_valid([1, IntSubclass()]))
def testConstructor_heterogeneousEmptyList(self):
field = core.List()
self.assertTrue(field.is_valid([]))
def testConstructor_homogeneousEmptyList(self):
field = core.List(type=str)
self.assertTrue(field.is_valid([]))
def assertCoerce_pass(self, expect, value, **fields):
field = core.List(**fields)
self.assertEqual(expect, field.coerce(value))
def assertCoerce_errors(self, value, **fields):
field = core.List(**fields)
self.assertRaises(ex.SerializeException, field.coerce, value)
def testCoerce_heterogeneousList(self):
lst = [1, 'hi', 3, True]
self.assertCoerce_pass(lst, lst)
def testCoerce_heterogeneousValidNonList(self):
value = (1, 'hi', 3, True)
expect = list(value)
self.assertCoerce_pass(expect, value)
def testCoerce_heterogeneousInvalidNonList(self):
self.assertCoerce_errors(4)
def testCoerce_homogeneousValidList(self):
value = [1, 2, 3, 4]
self.assertCoerce_pass(value, value, type=int)
def testCoerce_homogeneousInvalidList(self):
# TODO(albert): should make primitive list elements perform
# strict coercion, to avoid unintended conversions.
# value = [1, 2, 3, 4]
# self.assertCoerce_errors(value, type=str)
pass
def testCoerce_homogeneousValidNonList(self):
value = (1, 2, 3, 4)
expect = list(value)
self.assertCoerce_pass(expect, value, type=int)
def testCoerce_homogeneousInvalidNonList_notIterable(self):
self.assertCoerce_errors(4, type=int)
def testCoerce_homogeneousInvalidNonList_wrongType(self):
# TODO(albert): should make primitive list elements perform
# strict coercion, to avoid unintended conversions.
# value = [1, 2, 3]
# self.assertCoerce_errors(value, type=str)
pass
def testToJson_shallow(self):
field = core.List()
expect = [1, 'hi', True]
self.assertEqual(expect, field.to_json(expect))
def testToJson_recursive(self):
field = core.List()
class Recursive(object):
def to_json(self):
return ListFieldTest.TEST_INT
expect = [1, self.TEST_INT, True]
arg = [1, Recursive(), True]
self.assertEqual(expect, field.to_json(arg))
class DictFieldTest(unittest.TestCase):
TEST_INT = 42
def testConstructor_heterogeneous(self):
field = core.Dict()
self.assertTrue(field.is_valid({'hi': 4, True: 'boo'}))
def testConstructor_homogeneousKey(self):
field = core.Dict(keys=int)
self.assertFalse(field.is_valid({'hi': 4}))
self.assertTrue(field.is_valid({4: 'hi', 2: 1}))
def testConstructor_homogeneousValue(self):
field = core.Dict(values=str)
self.assertFalse(field.is_valid({'hi': 4, 'f': 'bye'}))
self.assertTrue(field.is_valid({4: 'hi', 'f': 'bye'}))
def testConstructor_homogeneousSubclass(self):
class IntSubclass(int):
def __init__(self):
pass
field = core.Dict(keys=int, values=int)
self.assertTrue(field.is_valid({IntSubclass(): IntSubclass()}))
def testConstructor_heterogeneousEmptyDict(self):
field = core.Dict()
self.assertTrue(field.is_valid({}))
def testConstructor_homogeneousEmptyDict(self):
field = core.Dict(keys=str, values=int)
self.assertTrue(field.is_valid({}))
def assertCoerce_pass(self, expect, value, **fields):
field = core.Dict(**fields)
self.assertEqual(expect, field.coerce(value))
def assertCoerce_errors(self, value, **fields):
field = core.Dict(**fields)
self.assertRaises(ex.SerializeException, field.coerce, value)
def testCoerce_heterogeneousDict(self):
d = {'a': 1, 2: False}
self.assertCoerce_pass(d, d)
def testCoerce_heterogeneousValidNonDict(self):
value = (('a', 1), (2, False))
expect = dict(value)
self.assertCoerce_pass(expect, value)
def testCoerce_heterogeneousInvalidNonDict(self):
self.assertCoerce_errors([1, 2, 3])
def testCoerce_homogeneousValidDict(self):
value = {'a': 1, 'b': 2}
self.assertCoerce_pass(value, value, keys=str, values=int)
def testCoerce_homogeneousInvalidDict(self):
# TODO(albert): should make primitive dict elements perform
# strict coercion, to avoid unintended conversions.
# value = {'a': True, 'b': False}
# self.assertCoerce_errors(value, keys=str, values=int)
pass
def testCoerce_homogeneousValidNonDict(self):
value = (('a', 1), ('b', 2))
expect = dict(value)
self.assertCoerce_pass(expect, value, keys=str, values=int)
def testCoerce_homogeneousInvalidNonDict_notDictLike(self):
self.assertCoerce_errors([1, 2, 3], keys=int)
def testCoerce_homogeneousInvalidNonDict_wrongType(self):
# TODO(albert): should make primitive dict elements perform
# strict coercion, to avoid unintended conversions.
# value = (('a', True), ('b', False))
# self.assertCoerce_errors(value, keys=str, values=int)
pass
def testToJson_shallow(self):
field = core.Dict()
expect = {'hi': 4, True: 3}
self.assertEqual(expect, field.to_json(expect))
def testToJson_recursiveKey(self):
field = core.Dict()
class Recursive(object):
def to_json(self):
return DictFieldTest.TEST_INT
expect = {self.TEST_INT: 4, True: 3}
arg = {Recursive(): 4, True: 3}
self.assertEqual(expect, field.to_json(arg))
def testToJson_recursiveValue(self):
field = core.Dict()
class Recursive(object):
def to_json(self):
return DictFieldTest.TEST_INT
expect = {4: self.TEST_INT, True: 3}
arg = {4: Recursive(), True: 3}
self.assertEqual(expect, field.to_json(arg))
######################
# Serializable Tests #
######################
class MockSerializable(core.Serializable):
TEST_INT = 2
var1 = core.Boolean()
var2 = core.Int(default=TEST_INT)
var3 = core.String(optional=True)
var4 = core.List(optional=True)
class MockSerializable2(MockSerializable):
TEST_INT = 1
var2 = core.Int(default=TEST_INT)
var5 = core.String(optional=True)
class SerializableTest(unittest.TestCase):
TEST_INT = 42
TEST_BOOL = True
TEST_STR = 'hi'
def testConstructor_missingRequiredFields(self):
self.assertRaises(ex.SerializeException, MockSerializable)
def testConstructor_incorrectRequiredFields(self):
self.assertRaises(ex.SerializeException, MockSerializable, var1=self.TEST_INT)
def testConstructor_incorrectOptionalFields(self):
self.assertRaises(ex.SerializeException, MockSerializable, var1=self.TEST_BOOL,
var2=self.TEST_BOOL)
def testConstructor_unexpectedFields(self):
self.assertRaises(ex.SerializeException, MockSerializable, var1=self.TEST_BOOL,
var2=self.TEST_INT, foo=self.TEST_INT)
def testConstructor_validArguments(self):
try:
MockSerializable(var1=self.TEST_BOOL, var3=self.TEST_STR)
except ex.SerializeException:
self.fail("Should not have failed")
def testConstructor_overrideSuperclassFields(self):
try:
obj = MockSerializable2(var1=self.TEST_BOOL)
except ex.SerializeException:
self.fail("Should not have failed")
self.assertEqual(MockSerializable2.TEST_INT, obj.var2)
def testSetAttr_validType(self):
obj = MockSerializable(var1=self.TEST_BOOL)
value = (1, 2, 3)
obj.var4 = value
self.assertEqual(list(value), obj.var4)
def testSetAttr_coercibleType(self):
obj = MockSerializable(var1=self.TEST_BOOL, var3=self.TEST_STR)
obj.var1 = not self.TEST_BOOL
self.assertEqual(not self.TEST_BOOL, obj.var1)
def testSetAttr_invalidType(self):
obj = MockSerializable(var1=self.TEST_BOOL, var3=self.TEST_STR)
try:
obj.var1 = self.TEST_INT
except ex.SerializeException:
pass
else:
self.fail("Should have raised a SerializeException")
def testToJson_noOptional(self):
obj = MockSerializable(var1=self.TEST_BOOL)
expect = {'var1': self.TEST_BOOL, 'var2': MockSerializable.TEST_INT}
self.assertEqual(expect, obj.to_json())
def testToJson_withOptional(self):
obj = MockSerializable(var1=self.TEST_BOOL, var3=self.TEST_STR)
expect = {'var1': self.TEST_BOOL, 'var2': MockSerializable.TEST_INT,
'var3': self.TEST_STR}
self.assertEqual(expect, obj.to_json())
| jackzhao-mj/ok-client | tests/sources/common/core_test.py | Python | apache-2.0 | 11,269 |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import tokenize
from pants.contrib.python.checks.checker.common import CheckstylePlugin
# TODO(wickman) Update this to sanitize line continuation styling as we have
# disabled it from pycodestyle.py due to mismatched indentation styles.
class Indentation(CheckstylePlugin):
"""Enforce proper indentation."""
@classmethod
def name(cls):
return 'indentation'
INDENT_LEVEL = 2 # the one true way
def nits(self):
indents = []
for token in self.python_file.tokens:
token_type, token_text, token_start = token[0:3]
if token_type is tokenize.INDENT:
last_indent = len(indents[-1]) if indents else 0
current_indent = len(token_text)
if current_indent - last_indent != self.INDENT_LEVEL:
yield self.error('T100',
'Indentation of {} instead of {}'.format(
current_indent - last_indent, self.INDENT_LEVEL),
token_start[0])
indents.append(token_text)
elif token_type is tokenize.DEDENT:
indents.pop()
| twitter/pants | contrib/python/src/python/pants/contrib/python/checks/checker/indentation.py | Python | apache-2.0 | 1,262 |
import logging
import re
from collections import defaultdict
from datetime import datetime
from urllib.parse import urlparse
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.paginator import Paginator, EmptyPage
from django.db import transaction
from django.db.models import Q
from django.http import (
Http404,
HttpResponse,
HttpResponseForbidden,
JsonResponse,
StreamingHttpResponse,
)
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse
from django.utils.datastructures import MultiValueDictKeyError
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import condition, require_POST
from django.views.generic.edit import FormView
from notifications.signals import notify
from pontoon.actionlog.models import ActionLog
from pontoon.actionlog.utils import log_action
from pontoon.base import forms
from pontoon.base import utils
from pontoon.base.models import (
Entity,
Locale,
Project,
ProjectLocale,
TranslationMemoryEntry,
TranslatedResource,
Translation,
Comment,
)
from pontoon.base.templatetags.helpers import provider_login_url
from pontoon.checks.libraries import run_checks
from pontoon.checks.utils import are_blocking_checks
log = logging.getLogger(__name__)
# TRANSLATE VIEWs
def translate_locale_agnostic(request, slug, part):
"""Locale Agnostic Translate view."""
user = request.user
query = urlparse(request.get_full_path()).query
query = "?%s" % query if query else ""
if slug.lower() == "all-projects":
project_locales = Locale.objects.available()
else:
project = get_object_or_404(
Project.objects.visible_for(request.user).available(), slug=slug
)
project_locales = project.locales
if user.is_authenticated:
locale = user.profile.custom_homepage
if locale and project_locales.filter(code=locale).exists():
path = reverse(
"pontoon.translate",
kwargs=dict(project=slug, locale=locale, resource=part),
)
return redirect(f"{path}{query}")
locale = utils.get_project_locale_from_request(request, project_locales)
path = (
reverse(
"pontoon.translate", kwargs=dict(project=slug, locale=locale, resource=part)
)
if locale
else reverse("pontoon.projects.project", kwargs=dict(slug=slug))
)
return redirect(f"{path}{query}")
@utils.require_AJAX
def locale_projects(request, locale):
"""Get active projects for locale."""
locale = get_object_or_404(Locale, code=locale)
return JsonResponse(locale.available_projects_list(request.user), safe=False)
@utils.require_AJAX
def locale_stats(request, locale):
"""Get locale stats used in All Resources part."""
locale = get_object_or_404(Locale, code=locale)
return JsonResponse(locale.stats(), safe=False)
@utils.require_AJAX
def locale_project_parts(request, locale, slug):
"""Get locale-project pages/paths with stats."""
try:
locale = Locale.objects.get(code=locale)
except Locale.DoesNotExist as e:
return JsonResponse(
{"status": False, "message": f"Not Found: {e}"},
status=404,
)
try:
project = Project.objects.visible_for(request.user).get(slug=slug)
except Project.DoesNotExist as e:
return JsonResponse(
{"status": False, "message": f"Not Found: {e}"},
status=404,
)
try:
return JsonResponse(locale.parts_stats(project), safe=False)
except ProjectLocale.DoesNotExist:
return JsonResponse(
{"status": False, "message": "Locale not enabled for selected project."},
status=400,
)
@utils.require_AJAX
def authors_and_time_range(request, locale, slug, part):
locale = get_object_or_404(Locale, code=locale)
project = get_object_or_404(
Project.objects.visible_for(request.user).available(), slug=slug
)
paths = [part] if part != "all-resources" else None
translations = Translation.for_locale_project_paths(locale, project, paths)
return JsonResponse(
{
"authors": translations.authors(),
"counts_per_minute": translations.counts_per_minute(),
},
safe=False,
)
def _get_entities_list(locale, preferred_source_locale, project, form):
"""Return a specific list of entities, as defined by the `entity_ids` field of the form.
This is used for batch editing.
"""
entities = (
Entity.objects.filter(pk__in=form.cleaned_data["entity_ids"])
.distinct()
.order_by("order")
)
return JsonResponse(
{
"entities": Entity.map_entities(locale, preferred_source_locale, entities),
"stats": TranslatedResource.objects.stats(
project, form.cleaned_data["paths"], locale
),
},
safe=False,
)
def _get_paginated_entities(locale, preferred_source_locale, project, form, entities):
"""Return a paginated list of entities.
This is used by the regular mode of the Translate page.
"""
paginator = Paginator(entities, form.cleaned_data["limit"])
try:
entities_page = paginator.page(1)
except EmptyPage:
return JsonResponse({"has_next": False, "stats": {}})
has_next = entities_page.has_next()
entities_to_map = entities_page.object_list
# If requested entity not on the first page
if form.cleaned_data["entity"]:
entity_pk = form.cleaned_data["entity"]
entities_to_map_pks = [e.pk for e in entities_to_map]
# TODO: entities_to_map.values_list() doesn't return entities from selected page
if entity_pk not in entities_to_map_pks:
if entity_pk in entities.values_list("pk", flat=True):
entities_to_map_pks.append(entity_pk)
entities_to_map = entities.filter(pk__in=entities_to_map_pks)
return JsonResponse(
{
"entities": Entity.map_entities(
locale, preferred_source_locale, entities_to_map, []
),
"has_next": has_next,
"stats": TranslatedResource.objects.stats(
project, form.cleaned_data["paths"], locale
),
},
safe=False,
)
@csrf_exempt
@require_POST
@utils.require_AJAX
def entities(request):
"""Get entities for the specified project, locale and paths."""
form = forms.GetEntitiesForm(request.POST)
if not form.is_valid():
return JsonResponse(
{
"status": False,
"message": "{error}".format(
error=form.errors.as_json(escape_html=True)
),
},
status=400,
)
locale = get_object_or_404(Locale, code=form.cleaned_data["locale"])
preferred_source_locale = ""
if request.user.is_authenticated:
preferred_source_locale = request.user.profile.preferred_source_locale
project_slug = form.cleaned_data["project"]
if project_slug == "all-projects":
project = Project(slug=project_slug)
else:
project = get_object_or_404(Project, slug=project_slug)
# Only return entities with provided IDs (batch editing)
if form.cleaned_data["entity_ids"]:
return _get_entities_list(locale, preferred_source_locale, project, form)
# `Entity.for_project_locale` only requires a subset of the fields the form contains. We thus
# make a new dict with only the keys we want to pass to that function.
restrict_to_keys = (
"paths",
"status",
"search",
"exclude_entities",
"extra",
"time",
"author",
"tag",
)
form_data = {
k: form.cleaned_data[k] for k in restrict_to_keys if k in form.cleaned_data
}
try:
entities = Entity.for_project_locale(request.user, project, locale, **form_data)
except ValueError as error:
return JsonResponse({"status": False, "message": f"{error}"}, status=500)
# Only return a list of entity PKs (batch editing: select all)
if form.cleaned_data["pk_only"]:
return JsonResponse({"entity_pks": list(entities.values_list("pk", flat=True))})
# Out-of-context view: paginate entities
return _get_paginated_entities(
locale, preferred_source_locale, project, form, entities
)
def _serialize_translation_values(translation, preferred_values):
serialized = {
"locale": {
"pk": translation["locale__pk"],
"code": translation["locale__code"],
"name": translation["locale__name"],
"direction": translation["locale__direction"],
"script": translation["locale__script"],
},
"translation": translation["string"],
}
if translation["locale__code"] in preferred_values:
serialized["is_preferred"] = True
return serialized
@utils.require_AJAX
def get_translations_from_other_locales(request):
"""Get entity translations for all but specified locale."""
try:
entity = int(request.GET["entity"])
locale = request.GET["locale"]
except (MultiValueDictKeyError, ValueError) as e:
return JsonResponse(
{"status": False, "message": f"Bad Request: {e}"},
status=400,
)
entity = get_object_or_404(Entity, pk=entity)
locale = get_object_or_404(Locale, code=locale)
plural_form = None if entity.string_plural == "" else 0
translations = (
Translation.objects.filter(
entity=entity,
plural_form=plural_form,
approved=True,
)
.exclude(locale=locale)
.order_by("locale__name")
).values(
"locale__pk",
"locale__code",
"locale__name",
"locale__direction",
"locale__script",
"string",
)
preferred_locales = []
if request.user.is_authenticated:
preferred_locales = request.user.profile.preferred_locales.values_list(
"code", flat=True
)
payload = [
_serialize_translation_values(translation, preferred_locales)
for translation in translations
]
return JsonResponse(payload, safe=False)
@utils.require_AJAX
def get_sibling_entities(request):
"""Get entities preceding and succeeding the current entity"""
try:
entity = int(request.GET["entity"])
locale = request.GET["locale"]
except (MultiValueDictKeyError, ValueError) as e:
return JsonResponse(
{"status": False, "message": f"Bad Request: {e}"},
status=400,
)
entity = get_object_or_404(Entity, pk=entity)
locale = get_object_or_404(Locale, code=locale)
preferred_source_locale = ""
if request.user.is_authenticated:
preferred_source_locale = request.user.profile.preferred_source_locale
entities = Entity.objects.filter(resource=entity.resource, obsolete=False).order_by(
"order"
)
succeeding_entities = entities.filter(order__gt=entity.order)[:2]
preceding_entities = entities.filter(order__lt=entity.order).order_by("-order")[:2]
return JsonResponse(
{
"succeeding": Entity.map_entities(
locale,
preferred_source_locale,
succeeding_entities,
[],
True,
),
"preceding": Entity.map_entities(
locale,
preferred_source_locale,
preceding_entities,
[],
True,
),
},
safe=False,
)
@utils.require_AJAX
def get_translation_history(request):
"""Get history of translations of given entity to given locale."""
try:
entity = int(request.GET["entity"])
locale = request.GET["locale"]
plural_form = int(request.GET["plural_form"])
except (MultiValueDictKeyError, ValueError) as e:
return JsonResponse(
{"status": False, "message": f"Bad Request: {e}"},
status=400,
)
entity = get_object_or_404(Entity, pk=entity)
locale = get_object_or_404(Locale, code=locale)
translations = Translation.objects.filter(
entity=entity,
locale=locale,
).prefetch_related("comments")
if plural_form != -1:
translations = translations.filter(plural_form=plural_form)
translations = translations.order_by("-active", "rejected", "-date")
payload = []
for t in translations:
u = t.user or User(username="Imported", first_name="Imported", email="imported")
translation_dict = t.serialize()
translation_dict.update(
{
"user": u.name_or_email,
"uid": u.id,
"username": u.username,
"user_gravatar_url_small": u.gravatar_url(88),
"date": t.date.strftime("%b %d, %Y %H:%M"),
"date_iso": t.date.isoformat(),
"approved_user": User.display_name_or_blank(t.approved_user),
"unapproved_user": User.display_name_or_blank(t.unapproved_user),
"comments": [c.serialize() for c in t.comments.order_by("timestamp")],
"machinery_sources": t.machinery_sources_values,
}
)
payload.append(translation_dict)
return JsonResponse(payload, safe=False)
@utils.require_AJAX
def get_team_comments(request):
"""Get team comments for given locale."""
try:
entity = int(request.GET["entity"])
locale = request.GET["locale"]
except (MultiValueDictKeyError, ValueError) as e:
return JsonResponse(
{"status": False, "message": f"Bad Request: {e}"},
status=400,
)
entity = get_object_or_404(Entity, pk=entity)
locale = get_object_or_404(Locale, code=locale)
comments = (
Comment.objects.filter(entity=entity)
.filter(Q(locale=locale) | Q(pinned=True))
.order_by("timestamp")
)
payload = [c.serialize() for c in comments]
return JsonResponse(payload, safe=False)
def _send_add_comment_notifications(user, comment, entity, locale, translation):
# On translation comment, notify:
# - authors of other translation comments in the thread
# - translation author
# - translation reviewers
if translation:
recipients = set(translation.comments.values_list("author__pk", flat=True))
if translation.user:
recipients.add(translation.user.pk)
if translation.approved_user:
recipients.add(translation.approved_user.pk)
if translation.unapproved_user:
recipients.add(translation.unapproved_user.pk)
if translation.rejected_user:
recipients.add(translation.rejected_user.pk)
if translation.unrejected_user:
recipients.add(translation.unrejected_user.pk)
# On team comment, notify:
# - project-locale translators or locale translators
# - locale managers
# - authors of other team comments in the thread
# - authors of translation comments
# - translation authors
# - translation reviewers
else:
recipients = set()
project_locale = ProjectLocale.objects.get(
project=entity.resource.project,
locale=locale,
)
translations = Translation.objects.filter(entity=entity, locale=locale)
translators = []
# Some projects (e.g. system projects) don't have translators group
if project_locale.translators_group:
# Only notify translators of the project if defined
translators = project_locale.translators_group.user_set.values_list(
"pk", flat=True
)
if not translators:
translators = locale.translators_group.user_set.values_list("pk", flat=True)
recipients = recipients.union(translators)
recipients = recipients.union(
locale.managers_group.user_set.values_list("pk", flat=True)
)
recipients = recipients.union(
Comment.objects.filter(entity=entity, locale=locale).values_list(
"author__pk", flat=True
)
)
recipients = recipients.union(
Comment.objects.filter(translation__in=translations).values_list(
"author__pk", flat=True
)
)
recipients = recipients.union(translations.values_list("user__pk", flat=True))
recipients = recipients.union(
translations.values_list("approved_user__pk", flat=True)
)
recipients = recipients.union(
translations.values_list("unapproved_user__pk", flat=True)
)
recipients = recipients.union(
translations.values_list("rejected_user__pk", flat=True)
)
recipients = recipients.union(
translations.values_list("unrejected_user__pk", flat=True)
)
# Notify users, mentioned in a comment
usernames = re.findall(r"<a href=\"\/contributors/([\w.@+-]+)/\">.+</a>", comment)
recipients = recipients.union(
User.objects.filter(username__in=usernames).values_list("pk", flat=True)
)
for recipient in User.objects.filter(
pk__in=recipients,
profile__comment_notifications=True,
).exclude(pk=user.pk):
notify.send(
user,
recipient=recipient,
verb="has added a comment in",
action_object=locale,
target=entity,
description=comment,
)
def _send_pin_comment_notifications(user, comment):
# When pinning a comment, notify:
# - authors of existing translations across all locales
# - reviewers of existing translations across all locales
recipient_data = defaultdict(list)
entity = comment.entity
translations = Translation.objects.filter(entity=entity)
for t in translations:
for u in (
t.user,
t.approved_user,
t.unapproved_user,
t.rejected_user,
t.unrejected_user,
):
if u:
recipient_data[u.pk].append(t.locale.pk)
for recipient in User.objects.filter(pk__in=recipient_data.keys()).exclude(
pk=user.pk
):
# Send separate notification for each locale (which results in links to corresponding translate views)
for locale in Locale.objects.filter(pk__in=recipient_data[recipient.pk]):
notify.send(
user,
recipient=recipient,
verb="has pinned a comment in",
action_object=locale,
target=entity,
description=comment.content,
)
@require_POST
@utils.require_AJAX
@login_required(redirect_field_name="", login_url="/403")
@transaction.atomic
def add_comment(request):
"""Add a comment."""
form = forms.AddCommentForm(request.POST)
if not form.is_valid():
return JsonResponse(
{
"status": False,
"message": "{error}".format(
error=form.errors.as_json(escape_html=True)
),
},
status=400,
)
user = request.user
comment = form.cleaned_data["comment"]
translationId = form.cleaned_data["translation"]
entity = get_object_or_404(Entity, pk=form.cleaned_data["entity"])
locale = get_object_or_404(Locale, code=form.cleaned_data["locale"])
if translationId:
translation = get_object_or_404(Translation, pk=translationId)
else:
translation = None
# Translation comment
if translation:
c = Comment(author=user, translation=translation, content=comment)
log_action(ActionLog.ActionType.COMMENT_ADDED, user, translation=translation)
# Team comment
else:
c = Comment(author=user, entity=entity, locale=locale, content=comment)
log_action(
ActionLog.ActionType.COMMENT_ADDED, user, entity=entity, locale=locale
)
c.save()
_send_add_comment_notifications(user, comment, entity, locale, translation)
return JsonResponse({"status": True})
@login_required(redirect_field_name="", login_url="/403")
@require_POST
@transaction.atomic
def pin_comment(request):
"""Update a comment as pinned"""
comment_id = request.POST.get("comment_id", None)
if not comment_id:
return JsonResponse({"status": False, "message": "Bad Request"}, status=400)
comment = get_object_or_404(Comment, id=comment_id)
comment.pinned = True
comment.save()
_send_pin_comment_notifications(request.user, comment)
return JsonResponse({"status": True})
@login_required(redirect_field_name="", login_url="/403")
@require_POST
@transaction.atomic
def unpin_comment(request):
"""Update a comment as unpinned"""
comment_id = request.POST.get("comment_id", None)
if not comment_id:
return JsonResponse({"status": False, "message": "Bad Request"}, status=400)
comment = get_object_or_404(Comment, id=comment_id)
comment.pinned = False
comment.save()
return JsonResponse({"status": True})
@utils.require_AJAX
@login_required(redirect_field_name="", login_url="/403")
def get_users(request):
"""Get all users."""
users = (
User.objects
# Exclude system users
.exclude(email__regex=r"^pontoon-(\w+)@example.com$")
# Exclude deleted users
.exclude(email__regex=r"^deleted-user-(\w+)@example.com$")
)
payload = []
for u in users:
payload.append(
{
"gravatar": u.gravatar_url(44),
"name": u.name_or_email,
"url": u.profile_url,
}
)
return JsonResponse(payload, safe=False)
@utils.require_AJAX
def perform_checks(request):
"""Perform quality checks and return a list of any failed ones."""
try:
entity = request.POST["entity"]
locale_code = request.POST["locale_code"]
original = request.POST["original"]
string = request.POST["string"]
ignore_warnings = request.POST.get("ignore_warnings", "false") == "true"
except MultiValueDictKeyError as e:
return JsonResponse(
{"status": False, "message": f"Bad Request: {e}"},
status=400,
)
try:
entity = Entity.objects.get(pk=entity)
except Entity.DoesNotExist as e:
return JsonResponse(
{"status": False, "message": f"Bad Request: {e}"},
status=400,
)
failed_checks = run_checks(
entity,
locale_code,
original,
string,
request.user.profile.quality_checks,
)
if are_blocking_checks(failed_checks, ignore_warnings):
return JsonResponse({"failedChecks": failed_checks})
else:
return JsonResponse({"status": True})
@transaction.atomic
def download_translations(request):
"""Download translated resource."""
try:
slug = request.GET["slug"]
code = request.GET["code"]
part = request.GET["part"]
except MultiValueDictKeyError:
raise Http404
content, filename = utils.get_download_content(slug, code, part)
if content is None:
raise Http404
response = HttpResponse()
response.content = content
if filename.endswith(".zip"):
response["Content-Type"] = "application/zip"
else:
response["Content-Type"] = "text/plain"
response["Content-Disposition"] = "attachment; filename=" + filename
return response
@login_required(redirect_field_name="", login_url="/403")
@require_POST
@transaction.atomic
def upload(request):
"""Upload translated resource."""
try:
slug = request.POST["slug"]
code = request.POST["code"]
part = request.POST["part"]
except MultiValueDictKeyError:
raise Http404
locale = get_object_or_404(Locale, code=code)
project = get_object_or_404(Project.objects.visible_for(request.user), slug=slug)
if not request.user.can_translate(
project=project, locale=locale
) or utils.readonly_exists(project, locale):
return HttpResponseForbidden("You don't have permission to upload files.")
form = forms.UploadFileForm(request.POST, request.FILES)
if form.is_valid():
f = request.FILES["uploadfile"]
utils.handle_upload_content(slug, code, part, f, request.user)
messages.success(request, "Translations updated from uploaded file.")
else:
for field, errors in form.errors.items():
for error in errors:
messages.error(request, error)
response = HttpResponse(content="", status=303)
response["Location"] = reverse(
"pontoon.translate",
kwargs={"locale": code, "project": slug, "resource": part},
)
return response
@condition(etag_func=None)
def download_translation_memory(request, locale, slug):
locale = get_object_or_404(Locale, code=locale)
if slug.lower() == "all-projects":
project_filter = Q()
else:
project = get_object_or_404(
Project.objects.visible_for(request.user).available(), slug=slug
)
project_filter = Q(project=project)
tm_entries = (
TranslationMemoryEntry.objects.filter(project_filter)
.filter(locale=locale, translation__isnull=False)
.exclude(Q(source="") | Q(target=""))
.exclude(translation__approved=False, translation__fuzzy=False)
)
filename = f"{locale.code}.{slug}.tmx"
response = StreamingHttpResponse(
utils.build_translation_memory_file(
datetime.now(),
locale.code,
tm_entries.values_list(
"entity__resource__path",
"entity__key",
"source",
"target",
"project__slug",
).order_by("project__slug", "source"),
),
content_type="text/xml",
)
response["Content-Disposition"] = 'attachment; filename="{filename}"'.format(
filename=filename
)
return response
@utils.require_AJAX
def user_data(request):
user = request.user
if not user.is_authenticated:
if settings.AUTHENTICATION_METHOD == "django":
login_url = reverse("standalone_login")
else:
login_url = provider_login_url(request)
return JsonResponse({"is_authenticated": False, "login_url": login_url})
if settings.AUTHENTICATION_METHOD == "django":
logout_url = reverse("standalone_logout")
else:
logout_url = reverse("account_logout")
return JsonResponse(
{
"is_authenticated": True,
"is_admin": user.has_perm("base.can_manage_project"),
"id": user.id,
"email": user.email,
"display_name": user.display_name,
"name_or_email": user.name_or_email,
"username": user.username,
"manager_for_locales": list(
user.managed_locales.values_list("code", flat=True)
),
"translator_for_locales": list(
user.translated_locales.values_list("code", flat=True)
),
"translator_for_projects": user.translated_projects,
"settings": {
"quality_checks": user.profile.quality_checks,
"force_suggestions": user.profile.force_suggestions,
},
"tour_status": user.profile.tour_status,
"has_dismissed_addon_promotion": user.profile.has_dismissed_addon_promotion,
"logout_url": logout_url,
"gravatar_url_small": user.gravatar_url(88),
"gravatar_url_big": user.gravatar_url(176),
"notifications": user.serialized_notifications,
}
)
class AjaxFormView(FormView):
"""A form view that when the form is submitted, it will return a json
response containing either an ``errors`` object with a bad response status
if the form fails, or a ``result`` object supplied by the form's save
method
"""
@method_decorator(utils.require_AJAX)
def get(self, *args, **kwargs):
return super().get(*args, **kwargs)
@method_decorator(utils.require_AJAX)
def post(self, *args, **kwargs):
return super().post(*args, **kwargs)
def form_invalid(self, form):
return JsonResponse(dict(errors=form.errors), status=400)
def form_valid(self, form):
return JsonResponse(dict(data=form.save()))
class AjaxFormPostView(AjaxFormView):
"""An Ajax form view that only allows POST requests"""
def get(self, *args, **kwargs):
raise Http404
| mozilla/pontoon | pontoon/base/views.py | Python | bsd-3-clause | 29,190 |
#!/usr/bin/env python
import os
import json
from jinja2 import Environment, FileSystemLoader
PATH = os.path.dirname(os.path.abspath(__file__))
TEMPLATE_ENVIRONMENT = Environment(
autoescape=False,
loader=FileSystemLoader(os.path.join(PATH, 'templates')),
trim_blocks=False)
def path_to_dict(path):
d = {'name': os.path.basename(path)}
if os.path.isdir(path):
d['type'] = 'directory'
d['children'] = [path_to_dict(os.path.join(path, x)) for x in os.listdir(path)]
else:
d['type'] = "file"
return d
def render_template(template_filename, context):
return TEMPLATE_ENVIRONMENT.get_template(template_filename).render(context)
def create_index_html():
modules = []
filetreeJSON = path_to_dict('./modules')
for t in filetreeJSON['children']:
if t['type'] == "directory":
newDict = {'name': t['name'], 'files' : []}
for f in t["children"]:
if f['type'] == "file":
newDict['files'].append(f['name'])
modules.append(newDict)
print modules
fname = "index.html"
context = {
'modules': modules
}
#
with open(fname, 'w') as f:
html = render_template('index.html', context)
f.write(html)
def main():
create_index_html()
########################################
if __name__ == "__main__":
main()
| jurvis/mugger | generate.py | Python | mit | 1,268 |
"""Support for Fritzbox binary sensors."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from typing import Final
from pyfritzhome.fritzhomedevice import FritzhomeDevice
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
BinarySensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import FritzBoxEntity
from .const import CONF_COORDINATOR, DOMAIN as FRITZBOX_DOMAIN
from .coordinator import FritzboxDataUpdateCoordinator
from .model import FritzEntityDescriptionMixinBase
@dataclass
class FritzEntityDescriptionMixinBinarySensor(FritzEntityDescriptionMixinBase):
"""BinarySensor description mixin for Fritz!Smarthome entities."""
is_on: Callable[[FritzhomeDevice], bool | None]
@dataclass
class FritzBinarySensorEntityDescription(
BinarySensorEntityDescription, FritzEntityDescriptionMixinBinarySensor
):
"""Description for Fritz!Smarthome binary sensor entities."""
BINARY_SENSOR_TYPES: Final[tuple[FritzBinarySensorEntityDescription, ...]] = (
FritzBinarySensorEntityDescription(
key="alarm",
name="Alarm",
device_class=BinarySensorDeviceClass.WINDOW,
suitable=lambda device: device.has_alarm, # type: ignore[no-any-return]
is_on=lambda device: device.alert_state, # type: ignore[no-any-return]
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the FRITZ!SmartHome binary sensor from ConfigEntry."""
coordinator = hass.data[FRITZBOX_DOMAIN][entry.entry_id][CONF_COORDINATOR]
async_add_entities(
[
FritzboxBinarySensor(coordinator, ain, description)
for ain, device in coordinator.data.items()
for description in BINARY_SENSOR_TYPES
if description.suitable(device)
]
)
class FritzboxBinarySensor(FritzBoxEntity, BinarySensorEntity):
"""Representation of a binary FRITZ!SmartHome device."""
entity_description: FritzBinarySensorEntityDescription
def __init__(
self,
coordinator: FritzboxDataUpdateCoordinator,
ain: str,
entity_description: FritzBinarySensorEntityDescription,
) -> None:
"""Initialize the FritzBox entity."""
super().__init__(coordinator, ain, entity_description)
self._attr_name = self.device.name
self._attr_unique_id = ain
@property
def is_on(self) -> bool | None:
"""Return true if sensor is on."""
return self.entity_description.is_on(self.device)
| mezz64/home-assistant | homeassistant/components/fritzbox/binary_sensor.py | Python | apache-2.0 | 2,795 |
import logging
import threading
import time
import datetime
#import gdrivefs.report
import gdrivefs.state
from gdrivefs.conf import Conf
from gdrivefs.cache.cache_registry import CacheRegistry, CacheFault
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.INFO)
class CacheAgent(object):
"""A particular namespace within the cache."""
registry = None
resource_name = None
max_age = None
fault_handler = None
cleanup_pretrigger = None
report = None
report_source_name = None
def __init__(self, resource_name, max_age, fault_handler=None,
cleanup_pretrigger=None):
_logger.debug("CacheAgent(%s,%s,%s,%s)" % (resource_name, max_age,
type(fault_handler),
cleanup_pretrigger))
self.registry = CacheRegistry.get_instance(resource_name)
self.resource_name = resource_name
self.max_age = max_age
self.fault_handler = fault_handler
self.cleanup_pretrigger = cleanup_pretrigger
# self.report = Report.get_instance()
# self.report_source_name = ("cache-%s" % (self.resource_name))
self.__t = None
self.__t_quit_ev = threading.Event()
self.__start_cleanup()
def __del__(self):
self.__stop_cleanup()
# TODO(dustin): Currently disabled. The system doesn't rely on it, and it's
# just another thread that unnecessarily runs, and trips up our
# ability to test individual components in simple isolation. It
# needs to be refactored.
#
# We'd like to either refactor into a multiprocessing worker, or
# just send to statsd (which would be kindof cool).
# self.__post_status()
# def __del__(self):
#
# if self.report.is_source(self.report_source_name):
# self.report.remove_all_values(self.report_source_name)
# pass
# def __post_status(self):
# """Send the current status to our reporting tool."""
#
# num_values = self.registry.count(self.resource_name)
#
# self.report.set_values(self.report_source_name, 'count',
# num_values)
#
# status_post_interval_s = Conf.get('cache_status_post_frequency_s')
# status_timer = Timer(status_post_interval_s, self.__post_status)
#
# Timers.get_instance().register_timer('status', status_timer)
def __cleanup(self):
"""Scan the current cache and determine items old-enough to be
removed.
"""
cleanup_interval_s = Conf.get('cache_cleanup_check_frequency_s')
_logger.info("Cache-cleanup thread running: %s", self)
while self.__t_quit_ev.is_set() is False and \
gdrivefs.state.GLOBAL_EXIT_EVENT.is_set() is False:
_logger.debug("Doing clean-up for cache resource with name [%s]." %
(self.resource_name))
cache_dict = self.registry.list_raw(self.resource_name)
total_keys = [ (key, value_tuple[1]) for key, value_tuple \
in cache_dict.iteritems() ]
cleanup_keys = [ key for key, value_tuple \
in cache_dict.iteritems() \
if (datetime.datetime.now() - value_tuple[1]).seconds > \
self.max_age ]
_logger.debug("Found (%d) entries to clean-up from entry-cache." %
(len(cleanup_keys)))
if cleanup_keys:
for key in cleanup_keys:
_logger.debug("Cache entry [%s] under resource-name [%s] "
"will be cleaned-up." %
(key, self.resource_name))
if self.exists(key, no_fault_check=True) == False:
_logger.debug("Entry with ID [%s] has already been "
"cleaned-up." % (key))
else:
self.remove(key)
else:
_logger.debug("No cache-cleanup required.")
time.sleep(cleanup_interval_s)
_logger.info("Cache-cleanup thread terminating: %s", self)
def __start_cleanup(self):
_logger.info("Starting cache-cleanup thread: %s", self)
self.__t = threading.Thread(target=self.__cleanup)
self.__t.start()
def __stop_cleanup(self):
_logger.info("Stopping cache-cleanup thread: %s", self)
self.__t_quit_ev.set()
self.__t.join()
def set(self, key, value):
_logger.debug("CacheAgent.set(%s,%s)" % (key, value))
return self.registry.set(self.resource_name, key, value)
def remove(self, key):
_logger.debug("CacheAgent.remove(%s)" % (key))
return self.registry.remove(self.resource_name,
key,
cleanup_pretrigger=self.cleanup_pretrigger)
def get(self, key, handle_fault = None):
if handle_fault == None:
handle_fault = True
_logger.debug("CacheAgent.get(%s)" % (key))
try:
result = self.registry.get(self.resource_name,
key,
max_age=self.max_age,
cleanup_pretrigger=self.cleanup_pretrigger)
except CacheFault:
_logger.debug("There was a cache-miss while requesting item with "
"ID (key).")
if self.fault_handler == None or not handle_fault:
raise
result = self.fault_handler(self.resource_name, key)
if result is None:
raise
return result
def exists(self, key, no_fault_check=False):
_logger.debug("CacheAgent.exists(%s)" % (key))
return self.registry.exists(self.resource_name, key,
max_age=self.max_age,
cleanup_pretrigger=self.cleanup_pretrigger,
no_fault_check=no_fault_check)
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
return self.set(key, value)
def __delitem__(self, key):
return self.remove(key)
| tvierling/GDriveFS | gdrivefs/cache/cache_agent.py | Python | gpl-2.0 | 6,509 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic assertions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import reference_test_base
import tensorflow.compat.v1 as tf
def simple_assertion(x):
assert x > 0
return x
class ReferenceTest(reference_test_base.TestCase):
def test_basic(self):
self.assertNativeMatchesCompiled(simple_assertion, 1)
with self.assertRaises(tf.errors.InvalidArgumentError):
self.try_execute_compiled(simple_assertion, 0)
if __name__ == '__main__':
tf.test.main()
| tensorflow/autograph | reference_tests/assertion_test.py | Python | apache-2.0 | 1,233 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Prabhat Kumar'
| 4979/machines | code/github/recipes/setup.py | Python | apache-2.0 | 76 |
# -*- coding: utf-8 -*-
from app import app
from cache.public import cache_url
from models import WechatMenu
__author__ = 'fengguanhua'
import time
import json
from flask import request, jsonify
from flask_weixin import FlaskWeixin
from wexin.util import *
from app import logger
from flask import Blueprint
weixin_module = Blueprint('weixin', __name__, static_folder='static')
weixin = FlaskWeixin(app)
app.add_url_rule('/weixin', view_func=weixin.view_func)
def get_menus_json():
try:
menulist = []
menus = db.session.query(WechatMenu).filter(WechatMenu.parent_id == None).order_by(WechatMenu.seqid.asc()).all()
for menu in menus:
menulist.append(menu.to_dict())
dicts = {"button": menulist}
return dicts
except:
return None
@weixin('*')
def reply_all(**kwargs):
return weixin.view_func()
@weixin_module.route('/api/menu/register', methods=['POST'])
def createMenu():
menu = get_menus_json()
logger.info('[WEIXIN] menu=%s' % menu)
try:
result = weixin.weixin_helper.create_menu(menu)
result = jsonify(result)
return result
except Exception as e:
logger.error(e.message)
return jsonify({'result': 255, 'errmsg': e.message})
@weixin_module.route('/weixin_push', methods=['GET', 'POST'])
def weixin_push():
cache_url(request.host_url)
if request.data:
data = request.values
tag = data.get('tag')
newsid = data.get('newsid')
user = data.get('user')
else:
tag = request.args['tag']
newsid = request.args['newsid']
user = request.args['user']
users = [user]
if tag.find("news") >= 0:
ret = weixin.weixin_reply.push_news_reply(weixin.weixin_helper, newsid, users)
else:
ret = weixin.weixin_reply.push_text_reply(weixin.weixin_helper, newsid, users)
return str(ret)
@weixin_module.route('/update_customer_info')
def batch_update_customer_info():
sucsess_count = 0
error_count = 0
total_count = 0
openid = request.args.get('openid')
customers = []
if openid:
customer = Customer.query.filter_by(openid=openid).first()
if customer:
customers.append(customer)
else:
customers = Customer.query.filter(Customer.nickname == None, Customer.head_image == None,
Customer.active == True).all()
for customer in customers:
total_count += 1
result = update_customer_info(customer.openid)
if result:
sucsess_count += 1
else:
error_count += 1
time.sleep(0.5)
return '{"err":"%d", "updated":%d, "all":%d}' % (error_count, sucsess_count, len(customers))
| davidvon/pipa-pay-server | admin/wexin/views.py | Python | apache-2.0 | 2,750 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models, api, _
import odoo.addons.decimal_precision as dp
from odoo.exceptions import UserError
class AccountVoucher(models.Model):
_name = 'account.voucher'
_description = 'Accounting Voucher'
_inherit = ['mail.thread']
_order = "date desc, id desc"
@api.model
def _default_journal(self):
voucher_type = self._context.get('voucher_type', 'sale')
company_id = self._context.get('company_id', self.env.user.company_id.id)
domain = [
('type', '=', voucher_type),
('company_id', '=', company_id),
]
return self.env['account.journal'].search(domain, limit=1)
voucher_type = fields.Selection([
('sale', 'Sale'),
('purchase', 'Purchase')
], string='Type', readonly=True, states={'draft': [('readonly', False)]}, oldname="type")
name = fields.Char('Payment Reference',
readonly=True, states={'draft': [('readonly', False)]}, default='')
date = fields.Date("Bill Date", readonly=True,
select=True, states={'draft': [('readonly', False)]},
copy=False, default=fields.Date.context_today)
account_date = fields.Date("Accounting Date",
readonly=True, select=True, states={'draft': [('readonly', False)]},
help="Effective date for accounting entries", copy=False, default=fields.Date.context_today)
journal_id = fields.Many2one('account.journal', 'Journal',
required=True, readonly=True, states={'draft': [('readonly', False)]}, default=_default_journal)
account_id = fields.Many2one('account.account', 'Account',
required=True, readonly=True, states={'draft': [('readonly', False)]},
domain="[('deprecated', '=', False), ('internal_type','=', (pay_now == 'pay_now' and 'liquidity' or voucher_type == 'purchase' and 'payable' or 'receivable'))]")
line_ids = fields.One2many('account.voucher.line', 'voucher_id', 'Voucher Lines',
readonly=True, copy=True,
states={'draft': [('readonly', False)]})
narration = fields.Text('Notes', readonly=True, states={'draft': [('readonly', False)]})
currency_id = fields.Many2one('res.currency', compute='_get_journal_currency',
string='Currency', readonly=True, required=True, default=lambda self: self._get_currency())
company_id = fields.Many2one('res.company', 'Company',
required=True, readonly=True, states={'draft': [('readonly', False)]},
related='journal_id.company_id', default=lambda self: self._get_company())
state = fields.Selection([
('draft', 'Draft'),
('cancel', 'Cancelled'),
('proforma', 'Pro-forma'),
('posted', 'Posted')
], 'Status', readonly=True, track_visibility='onchange', copy=False, default='draft',
help=" * The 'Draft' status is used when a user is encoding a new and unconfirmed Voucher.\n"
" * The 'Pro-forma' status is used when the voucher does not have a voucher number.\n"
" * The 'Posted' status is used when user create voucher,a voucher number is generated and voucher entries are created in account.\n"
" * The 'Cancelled' status is used when user cancel voucher.")
reference = fields.Char('Bill Reference', readonly=True, states={'draft': [('readonly', False)]},
help="The partner reference of this document.", copy=False)
amount = fields.Monetary(string='Total', store=True, readonly=True, compute='_compute_total')
tax_amount = fields.Monetary(readonly=True, store=True, compute='_compute_total')
tax_correction = fields.Monetary(readonly=True, states={'draft': [('readonly', False)]},
help='In case we have a rounding problem in the tax, use this field to correct it')
number = fields.Char(readonly=True, copy=False)
move_id = fields.Many2one('account.move', 'Journal Entry', copy=False)
partner_id = fields.Many2one('res.partner', 'Partner', change_default=1, readonly=True, states={'draft': [('readonly', False)]})
paid = fields.Boolean(compute='_check_paid', help="The Voucher has been totally paid.")
pay_now = fields.Selection([
('pay_now', 'Pay Directly'),
('pay_later', 'Pay Later'),
], 'Payment', select=True, readonly=True, states={'draft': [('readonly', False)]}, default='pay_later')
date_due = fields.Date('Due Date', readonly=True, select=True, states={'draft': [('readonly', False)]})
@api.one
@api.depends('move_id.line_ids.reconciled', 'move_id.line_ids.account_id.internal_type')
def _check_paid(self):
self.paid = any([((line.account_id.internal_type, 'in', ('receivable', 'payable')) and line.reconciled) for line in self.move_id.line_ids])
@api.model
def _get_currency(self):
journal = self.env['account.journal'].browse(self._context.get('journal_id', False))
if journal.currency_id:
return journal.currency_id.id
return self.env.user.company_id.currency_id.id
@api.model
def _get_company(self):
return self._context.get('company_id', self.env.user.company_id.id)
@api.multi
@api.depends('name', 'number')
def name_get(self):
return [(r.id, (r.number or _('Voucher'))) for r in self]
@api.one
@api.depends('journal_id', 'company_id')
def _get_journal_currency(self):
self.currency_id = self.journal_id.currency_id.id or self.company_id.currency_id.id
@api.multi
@api.depends('tax_correction', 'line_ids.price_subtotal')
def _compute_total(self):
for voucher in self:
total = 0
tax_amount = 0
for line in voucher.line_ids:
tax_info = line.tax_ids.compute_all(line.price_unit, voucher.currency_id, line.quantity, line.product_id, voucher.partner_id)
total += tax_info.get('total_included', 0.0)
tax_amount += sum([t.get('amount',0.0) for t in tax_info.get('taxes', False)])
voucher.amount = total + voucher.tax_correction
voucher.tax_amount = tax_amount
@api.one
@api.depends('account_pay_now_id', 'account_pay_later_id', 'pay_now')
def _get_account(self):
self.account_id = self.account_pay_now_id if self.pay_now == 'pay_now' else self.account_pay_later_id
@api.onchange('date')
def onchange_date(self):
self.account_date = self.date
@api.onchange('partner_id', 'pay_now')
def onchange_partner_id(self):
if self.pay_now == 'pay_now':
liq_journal = self.env['account.journal'].search([('type', 'in', ('bank', 'cash'))], limit=1)
self.account_id = liq_journal.default_debit_account_id \
if self.voucher_type == 'sale' else liq_journal.default_credit_account_id
else:
if self.partner_id:
self.account_id = self.partner_id.property_account_receivable_id \
if self.voucher_type == 'sale' else self.partner_id.property_account_payable_id
else:
self.account_id = self.journal_id.default_debit_account_id \
if self.voucher_type == 'sale' else self.journal_id.default_credit_account_id
@api.multi
def button_proforma_voucher(self):
self.signal_workflow('proforma_voucher')
return {'type': 'ir.actions.act_window_close'}
@api.multi
def proforma_voucher(self):
self.action_move_line_create()
@api.multi
def action_cancel_draft(self):
self.create_workflow()
self.write({'state': 'draft'})
@api.multi
def cancel_voucher(self):
for voucher in self:
voucher.move_id.button_cancel()
voucher.move_id.unlink()
self.write({'state': 'cancel', 'move_id': False})
@api.multi
def unlink(self):
for voucher in self:
if voucher.state not in ('draft', 'cancel'):
raise UserError(_('Cannot delete voucher(s) which are already opened or paid.'))
return super(AccountVoucher, self).unlink()
@api.multi
def first_move_line_get(self, move_id, company_currency, current_currency):
debit = credit = 0.0
if self.voucher_type == 'purchase':
credit = self._convert_amount(self.amount)
elif self.voucher_type == 'sale':
debit = self._convert_amount(self.amount)
if debit < 0.0: debit = 0.0
if credit < 0.0: credit = 0.0
sign = debit - credit < 0 and -1 or 1
#set the first line of the voucher
move_line = {
'name': self.name or '/',
'debit': debit,
'credit': credit,
'account_id': self.account_id.id,
'move_id': move_id,
'journal_id': self.journal_id.id,
'partner_id': self.partner_id.id,
'currency_id': company_currency != current_currency and current_currency or False,
'amount_currency': (sign * abs(self.amount) # amount < 0 for refunds
if company_currency != current_currency else 0.0),
'date': self.account_date,
'date_maturity': self.date_due
}
return move_line
@api.multi
def account_move_get(self):
if self.number:
name = self.number
elif self.journal_id.sequence_id:
if not self.journal_id.sequence_id.active:
raise UserError(_('Please activate the sequence of selected journal !'))
name = self.journal_id.sequence_id.with_context(ir_sequence_date=self.date).next_by_id()
else:
raise UserError(_('Please define a sequence on the journal.'))
move = {
'name': name,
'journal_id': self.journal_id.id,
'narration': self.narration,
'date': self.account_date,
'ref': self.reference,
}
return move
@api.multi
def _convert_amount(self, amount):
'''
This function convert the amount given in company currency. It takes either the rate in the voucher (if the
payment_rate_currency_id is relevant) either the rate encoded in the system.
:param amount: float. The amount to convert
:param voucher: id of the voucher on which we want the conversion
:param context: to context to use for the conversion. It may contain the key 'date' set to the voucher date
field in order to select the good rate to use.
:return: the amount in the currency of the voucher's company
:rtype: float
'''
for voucher in self:
return voucher.currency_id.compute(amount, voucher.company_id.currency_id)
@api.multi
def voucher_move_line_create(self, line_total, move_id, company_currency, current_currency):
'''
Create one account move line, on the given account move, per voucher line where amount is not 0.0.
It returns Tuple with tot_line what is total of difference between debit and credit and
a list of lists with ids to be reconciled with this format (total_deb_cred,list_of_lists).
:param voucher_id: Voucher id what we are working with
:param line_total: Amount of the first line, which correspond to the amount we should totally split among all voucher lines.
:param move_id: Account move wher those lines will be joined.
:param company_currency: id of currency of the company to which the voucher belong
:param current_currency: id of currency of the voucher
:return: Tuple build as (remaining amount not allocated on voucher lines, list of account_move_line created in this method)
:rtype: tuple(float, list of int)
'''
for line in self.line_ids:
#create one move line per voucher line where amount is not 0.0
if not line.price_subtotal:
continue
# convert the amount set on the voucher line into the currency of the voucher's company
# this calls res_curreny.compute() with the right context,
# so that it will take either the rate on the voucher if it is relevant or will use the default behaviour
amount = self._convert_amount(line.price_unit*line.quantity)
move_line = {
'journal_id': self.journal_id.id,
'name': line.name or '/',
'account_id': line.account_id.id,
'move_id': move_id,
'partner_id': self.partner_id.id,
'analytic_account_id': line.account_analytic_id and line.account_analytic_id.id or False,
'quantity': 1,
'credit': abs(amount) if self.voucher_type == 'sale' else 0.0,
'debit': abs(amount) if self.voucher_type == 'purchase' else 0.0,
'date': self.account_date,
'tax_ids': [(4,t.id) for t in line.tax_ids],
'amount_currency': line.price_subtotal if current_currency != company_currency else 0.0,
}
self.env['account.move.line'].create(move_line)
return line_total
@api.multi
def action_move_line_create(self):
'''
Confirm the vouchers given in ids and create the journal entries for each of them
'''
for voucher in self:
local_context = dict(self._context, force_company=voucher.journal_id.company_id.id)
if voucher.move_id:
continue
company_currency = voucher.journal_id.company_id.currency_id.id
current_currency = voucher.currency_id.id or company_currency
# we select the context to use accordingly if it's a multicurrency case or not
# But for the operations made by _convert_amount, we always need to give the date in the context
ctx = local_context.copy()
ctx['date'] = voucher.account_date
ctx['check_move_validity'] = False
# Create the account move record.
move = self.env['account.move'].create(voucher.account_move_get())
# Get the name of the account_move just created
# Create the first line of the voucher
move_line = self.env['account.move.line'].with_context(ctx).create(voucher.first_move_line_get(move.id, company_currency, current_currency))
line_total = move_line.debit - move_line.credit
if voucher.voucher_type == 'sale':
line_total = line_total - voucher._convert_amount(voucher.tax_amount)
elif voucher.voucher_type == 'purchase':
line_total = line_total + voucher._convert_amount(voucher.tax_amount)
# Create one move line per voucher line where amount is not 0.0
line_total = voucher.with_context(ctx).voucher_move_line_create(line_total, move.id, company_currency, current_currency)
# Add tax correction to move line if any tax correction specified
if voucher.tax_correction != 0.0:
tax_move_line = self.env['account.move.line'].search([('move_id', '=', move.id), ('tax_line_id', '!=', False)], limit=1)
if len(tax_move_line):
tax_move_line.write({'debit': tax_move_line.debit + voucher.tax_correction if tax_move_line.debit > 0 else 0,
'credit': tax_move_line.credit + voucher.tax_correction if tax_move_line.credit > 0 else 0})
# We post the voucher.
voucher.write({
'move_id': move.id,
'state': 'posted',
'number': move.name
})
move.post()
return True
@api.multi
def _track_subtype(self, init_values):
if 'state' in init_values:
return 'account_voucher.mt_voucher_state_change'
return super(AccountVoucher, self)._track_subtype(init_values)
class AccountVoucherLine(models.Model):
_name = 'account.voucher.line'
_description = 'Voucher Lines'
name = fields.Text(string='Description', required=True)
sequence = fields.Integer(default=10,
help="Gives the sequence of this line when displaying the voucher.")
voucher_id = fields.Many2one('account.voucher', 'Voucher', required=1, ondelete='cascade')
product_id = fields.Many2one('product.product', string='Product',
ondelete='set null', index=True)
account_id = fields.Many2one('account.account', string='Account',
required=True, domain=[('deprecated', '=', False)],
help="The income or expense account related to the selected product.")
price_unit = fields.Float(string='Unit Price', required=True, digits=dp.get_precision('Product Price'), oldname='amount')
price_subtotal = fields.Monetary(string='Amount',
store=True, readonly=True, compute='_compute_subtotal')
quantity = fields.Float(digits=dp.get_precision('Product Unit of Measure'),
required=True, default=1)
account_analytic_id = fields.Many2one('account.analytic.account', 'Analytic Account')
company_id = fields.Many2one('res.company', related='voucher_id.company_id', string='Company', store=True, readonly=True)
tax_ids = fields.Many2many('account.tax', string='Tax', help="Only for tax excluded from price")
currency_id = fields.Many2one('res.currency', related='voucher_id.currency_id')
@api.one
@api.depends('price_unit', 'tax_ids', 'quantity', 'product_id', 'voucher_id.currency_id')
def _compute_subtotal(self):
self.price_subtotal = self.quantity * self.price_unit
if self.tax_ids:
taxes = self.tax_ids.compute_all(self.price_unit, self.voucher_id.currency_id, self.quantity, product=self.product_id, partner=self.voucher_id.partner_id)
self.price_subtotal = taxes['total_excluded']
def _get_account(self, product, fpos, type):
accounts = product.product_tmpl_id.get_product_accounts(fpos)
if type == 'sale':
return accounts['income']
return accounts['expense']
@api.multi
def product_id_change(self, product_id, partner_id=False, price_unit=False, company_id=None, currency_id=None, type=None):
context = self._context
company_id = company_id if company_id is not None else context.get('company_id', False)
company = self.env['res.company'].browse(company_id)
currency = self.env['res.currency'].browse(currency_id)
if not partner_id:
raise UserError(_("You must first select a partner!"))
part = self.env['res.partner'].browse(partner_id)
if part.lang:
self = self.with_context(lang=part.lang)
product = self.env['product.product'].browse(product_id)
fpos = part.property_account_position_id
account = self._get_account(product, fpos, type)
values = {
'name': product.partner_ref,
'account_id': account.id,
}
if type == 'purchase':
values['price_unit'] = price_unit or product.standard_price
taxes = product.supplier_taxes_id or account.tax_ids
if product.description_purchase:
values['name'] += '\n' + product.description_purchase
else:
values['price_unit'] = product.lst_price
taxes = product.taxes_id or account.tax_ids
if product.description_sale:
values['name'] += '\n' + product.description_sale
values['tax_ids'] = taxes.ids
if company and currency:
if company.currency_id != currency:
if type == 'purchase':
values['price_unit'] = product.standard_price
values['price_unit'] = values['price_unit'] * currency.rate
return {'value': values, 'domain': {}}
| ayepezv/GAD_ERP | addons/account_voucher/models/account_voucher.py | Python | gpl-3.0 | 19,955 |
from django.contrib import admin
from .models import ConfigurationVariable
class ConfigurationVariableAdmin(admin.ModelAdmin):
list_display = ('name', 'value', 'description', 'editable')
fields = ('name', 'value', 'description', 'editable')
def get_readonly_fields(self, request, obj=None):
if obj:
if not obj.editable:
self.readonly_fields = ('value',)
else:
self.readonly_fields = ()
return ('name', 'editable', 'description',) + self.readonly_fields
return self.readonly_fields
def has_delete_permission(self, request, obj=None):
return False
def has_add_permission(self, request):
return False
admin.site.register(ConfigurationVariable, ConfigurationVariableAdmin)
| maoaiz/django-admin-conf-vars | django_admin_conf_vars/admin.py | Python | bsd-3-clause | 796 |
##
# Copyright (c) 2006-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Extended account-specific logging.
Allows different sub-systems to log data on a per-principal basis.
"""
__all__ = [
"accountingEnabled",
"emitAccounting",
]
import datetime
import os
from twext.python.log import Logger
from twistedcaldav.config import config
log = Logger()
def accountingEnabled(category, record):
"""
Determine if accounting is enabled for the given category and record.
"""
return (
accountingEnabledForCategory(category) and
accountingEnabledForRecord(record)
)
def accountingEnabledForCategory(category):
"""
Determine if accounting is enabled for the given category.
"""
AccountingCategories = getattr(config, "AccountingCategories", None)
if AccountingCategories is None:
return False
return AccountingCategories.get(category, False)
def accountingEnabledForRecord(record):
"""
Determine if accounting is enabled for the given record.
"""
enabledRecordGUIDs = config.AccountingPrincipals
if "*" in enabledRecordGUIDs:
return True
return record.uid in enabledRecordGUIDs
def emitAccounting(category, record, data, tag=None, filename=None):
"""
Write the supplied data to the appropriate location for the given
category and record.
@param record: the record for whom a log entry is to be created.
@type record: L{DirectoryRecord}
@param category: accounting category
@type category: C{tuple}
@param data: data to write.
@type data: C{str}
"""
if isinstance(record, str):
principalLogPath = record
elif accountingEnabled(category, record):
principalLogPath = os.path.join(
record.uid[0:2],
record.uid[2:4],
record.uid
)
else:
return None
try:
#
# Obtain the accounting log file name
#
logRoot = config.AccountingLogRoot
logDirectory = category
if principalLogPath:
logDirectory = os.path.join(
logDirectory,
principalLogPath,
)
logFilename = os.path.join(
logDirectory,
datetime.datetime.now().isoformat() if filename is None else filename
)
if not os.path.isdir(os.path.join(logRoot, logDirectory)):
os.makedirs(os.path.join(logRoot, logDirectory))
if filename is None:
logFilename = "%s-01" % (logFilename,)
if tag:
logFilename += " (%s)" % (tag,)
logFilename += ".txt"
else:
if filename is None:
index = 1
while True:
path = "%s-%02d" % (logFilename, index)
if tag:
path += " (%s)" % (tag,)
path += ".txt"
if not os.path.isfile(os.path.join(logRoot, path)):
logFilename = path
break
if index == 1000:
log.error("Too many %s accounting files for %s" % (category, record))
return None
index += 1
#
# Now write out the data to the log file
#
logFile = open(os.path.join(logRoot, logFilename), "a")
try:
logFile.write(data)
finally:
logFile.close()
return logFilename
except OSError, e:
# No failures in accounting should propagate out
log.error("Failed to write accounting data due to: %s" % (str(e),))
return None
| trevor/calendarserver | twistedcaldav/accounting.py | Python | apache-2.0 | 4,247 |
# -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
from MaKaC.fossils.contribution import IContributionWithSpeakersFossil
from MaKaC.fossils.subcontribution import ISubContributionWithSpeakersFossil
from MaKaC.common.fossilize import addFossil
from MaKaC.conference import Contribution
from MaKaC.plugins.Collaboration.fossils import ICSErrorBaseFossil
from MaKaC.plugins.Collaboration.base import CollaborationTools
class IContributionRRFossil(IContributionWithSpeakersFossil):
""" This fossil is ready for when we add subcontribution granularity to contributions
and to provide an example for a plugin-specific fossil
"""
def getSubContributionList(self):
pass
getSubContributionList.result = ISubContributionWithSpeakersFossil
def getRecordingCapable(self):
pass
getRecordingCapable.produce = lambda self: CollaborationTools.isAbleToBeWebcastOrRecorded(self, "RecordingRequest")
# We cannot include this fossil in the Contribution class directly because it belongs to a plugin
addFossil(Contribution, IContributionRRFossil)
class IRecordingRequestErrorFossil(ICSErrorBaseFossil):
def getOperation(self):
pass
def getInner(self):
pass
| pferreir/indico-backup | indico/MaKaC/plugins/Collaboration/RecordingRequest/fossils.py | Python | gpl-3.0 | 1,938 |
#!/usr/bin/env python3
import argparse
import sys
import random
from contextlib import contextmanager
@contextmanager
def redirect_stdout(new_target):
old_target, sys.stdout = sys.stdout, new_target
try:
yield new_target
finally:
sys.stdout = old_target
def random_expression(depth = 3, maxparam = 0):
def recursion():
return random_expression(depth = depth-1, maxparam = maxparam)
if depth == 0:
if maxparam != 0 and random.randint(0, 1) != 0:
return 'p%02d' % random.randint(0, maxparam-1)
return random.choice([ '%e', '%f', '%g' ]) % random.uniform(-2, +2)
if random.randint(0, 4) == 0:
return recursion() + random.choice([ ' < ', ' <= ', ' == ', ' != ', ' >= ', ' > ' ]) + recursion() + ' ? ' + recursion() + ' : ' + recursion()
op_prefix = [ '+(', '-(' ]
op_infix = [ ' + ', ' - ', ' * ', ' / ' ]
op_func1 = [ '$ln', '$log10', '$exp', '$sqrt', '$floor', '$ceil', '$sin', '$cos', '$tan', '$asin', '$acos', '$atan', '$sinh', '$cosh', '$tanh', '$asinh', '$acosh', '$atanh' ]
op_func2 = [ '$pow', '$atan2', '$hypot' ]
op = random.choice(op_prefix + op_infix + op_func1 + op_func2)
if op in op_prefix:
return op + recursion() + ')'
if op in op_infix:
return '(' + recursion() + op + recursion() + ')'
if op in op_func1:
return op + '(' + recursion() + ')'
if op in op_func2:
return op + '(' + recursion() + ', ' + recursion() + ')'
raise
parser = argparse.ArgumentParser(formatter_class = argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-S', '--seed', type = int, help = 'seed for PRNG')
parser.add_argument('-c', '--count', type = int, default = 100, help = 'number of test cases to generate')
args = parser.parse_args()
if args.seed is not None:
print("PRNG seed: %d" % args.seed)
random.seed(args.seed)
for idx in range(args.count):
with open('temp/uut_%05d.v' % idx, 'w') as f:
with redirect_stdout(f):
print('module uut_%05d(output [63:0] %s);\n' % (idx, ', '.join(['y%02d' % i for i in range(100)])))
for i in range(30):
if idx < 10:
print('localparam p%02d = %s;' % (i, random_expression()))
else:
print('localparam%s p%02d = %s;' % (random.choice(['', ' real', ' integer']), i, random_expression()))
for i in range(30, 60):
if idx < 10:
print('localparam p%02d = %s;' % (i, random_expression(maxparam = 30)))
else:
print('localparam%s p%02d = %s;' % (random.choice(['', ' real', ' integer']), i, random_expression(maxparam = 30)))
for i in range(100):
print('assign y%02d = 65536 * (%s);' % (i, random_expression(maxparam = 60)))
print('endmodule')
with open('temp/uut_%05d.ys' % idx, 'w') as f:
with redirect_stdout(f):
print('read_verilog uut_%05d.v' % idx)
print('rename uut_%05d uut_%05d_syn' % (idx, idx))
print('write_verilog uut_%05d_syn.v' % idx)
with open('temp/uut_%05d_tb.v' % idx, 'w') as f:
with redirect_stdout(f):
print('module uut_%05d_tb;\n' % idx)
print('wire [63:0] %s;' % (', '.join(['r%02d' % i for i in range(100)])))
print('wire [63:0] %s;' % (', '.join(['s%02d' % i for i in range(100)])))
print('uut_%05d ref(%s);' % (idx, ', '.join(['r%02d' % i for i in range(100)])))
print('uut_%05d_syn syn(%s);' % (idx, ', '.join(['s%02d' % i for i in range(100)])))
print('task compare_ref_syn;')
print(' input [7:0] i;')
print(' input [63:0] r, s;')
print(' reg [64*8-1:0] buffer;')
print(' integer j;')
print(' begin')
print(' if (-1 <= $signed(r-s) && $signed(r-s) <= +1) begin')
print(' // $display("%d: %b %b", i, r, s);')
print(' end else if (r === s) begin ')
print(' // $display("%d: %b %b", i, r, s);')
print(' end else begin ')
print(' for (j = 0; j < 64; j = j+1)')
print(' buffer[j*8 +: 8] = r[j] !== s[j] ? "^" : " ";')
print(' $display("\\n%3d: %b %b", i, r, s);')
print(' $display(" %s %s", buffer, buffer);')
print(' end')
print(' end')
print('endtask')
print('initial begin #1;')
for i in range(100):
print(' compare_ref_syn(%2d, r%02d, s%02d);' % (i, i, i))
print('end')
print('endmodule')
| azonenberg/yosys | tests/realmath/generate.py | Python | isc | 4,702 |
import urllib2
from lxml import html
# To address paging in Google
PAGE = 0
# url and query string from PART I
# this is a custom range from Jan 1, 2000 to Jan 1, 2001
URL = 'https://www.google.com/search?q=new+york+times&tbs=cdr%3A1%2Ccd_min%3A1%2F1%2F2000%2Ccd_max%3A1%2F1%2F2001&start=' + str(PAGE*10)
# here we setup the necessary agent to download a google html page
opener = urllib2.build_opener()
opener.addheaders = [('User-agent',
'Mozilla/5.0 (Windows NT 6.3; WOW64) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/39.0.2171.95 Safari/537.36 \
OPR/26.0.1656.60')]
# let's download
google_html = opener.open(URL)
# parse the html
google_parsed = html.parse(google_html)
# Here's a smarter way to see what exactly it is you've downloaded/parsed with lxml:
html.open_in_browser(google_parsed)
#file://c:/users/rodrigo/appdata/local/temp/tmp1xllau.html
# Here comes the 'selecting'!
google_results = google_parsed.xpath('//*[@id="rso"]/div[2]')
print len(google_results)
#1
# the xpath in this line basically selects all children, which in our
# case are the 10 'li' elements
print len(google_results[0].xpath('./*'))
#10
# print out hyperlinks
# Note: after using devtool's magnifying glass and 'copy xpath', I got:
# //*[@id="rso"]/div[2]/li[1]/div/h3/a
google_list_items = google_results[0].xpath('.//h3/a/@href')
for elem in google_list_items:
print elem
| rodricios/crawl-to-the-future | crawlers/Crawling-Google/simpleselect.py | Python | gpl-2.0 | 1,471 |
# coding: utf-8
from __future__ import division, unicode_literals
"""
This module contains an algorithm to solve the Linear Assignment Problem
"""
__author__ = "Will Richards"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Will Richards"
__email__ = "[email protected]"
__date__ = "Jan 28, 2013"
import numpy as np
from six.moves import range
class LinearAssignment(object):
"""
This class finds the solution to the Linear Assignment Problem.
It finds a minimum cost matching between two sets, given a cost
matrix.
This class is an implementation of the LAPJV algorithm described in:
R. Jonker, A. Volgenant. A Shortest Augmenting Path Algorithm for
Dense and Sparse Linear Assignment Problems. Computing 38, 325-340
(1987)
Args:
costs: The cost matrix of the problem. cost[i,j] should be the
cost of matching x[i] to y[j]. The cost matrix may be
rectangular
epsilon: Tolerance for determining if solution vector is < 0
.. attribute: min_cost:
The minimum cost of the matching
.. attribute: solution:
The matching of the rows to columns. i.e solution = [1, 2, 0]
would match row 0 to column 1, row 1 to column 2 and row 2
to column 0. Total cost would be c[0, 1] + c[1, 2] + c[2, 0]
"""
def __init__(self, costs, epsilon=1e-6):
self.orig_c = np.array(costs, dtype=np.float64)
self.nx, self.ny = self.orig_c.shape
self.n = self.ny
self._inds = np.arange(self.n)
self.epsilon = abs(epsilon)
#check that cost matrix is square
if self.nx > self.ny:
raise ValueError("cost matrix must have at least as many columns as rows")
if self.nx == self.ny:
self.c = self.orig_c
else:
# Can run into precision issues if np.max is used as the fill value (since a
# value of this size doesn't necessarily end up in the solution). A value
# at least as large as the maximin is, however, guaranteed to appear so it
# is a safer choice. The fill value is not zero to avoid choosing the extra
# rows in the initial column reduction step
self.c = np.full((self.n, self.n), np.max(np.min(self.orig_c, axis=1)))
self.c[:self.nx] = self.orig_c
#initialize solution vectors
self._x = np.zeros(self.n, dtype=np.int) - 1
self._y = self._x.copy()
#if column reduction doesn't find a solution, augment with shortest
#paths until one is found
if self._column_reduction():
self._augmenting_row_reduction()
#initialize the reduced costs
self._update_cred()
while -1 in self._x:
self._augment()
self.solution = self._x[:self.nx]
self._min_cost = None
@property
def min_cost(self):
"""
Returns the cost of the best assignment
"""
if self._min_cost:
return self._min_cost
self._min_cost = np.sum(self.c[np.arange(self.nx), self.solution])
return self._min_cost
def _column_reduction(self):
"""
Column reduction and reduction transfer steps from LAPJV algorithm
"""
#assign each column to its lowest cost row, ensuring that only row
#or column is assigned once
i1, j = np.unique(np.argmin(self.c, axis=0), return_index=True)
self._x[i1] = j
#if problem is solved, return
if len(i1) == self.n:
return False
self._y[j] = i1
#reduction_transfer
#tempc is array with previously assigned matchings masked
self._v = np.min(self.c, axis=0)
tempc = self.c.copy()
tempc[i1, j] = np.inf
mu = np.min(tempc[i1, :] - self._v[None, :], axis=1)
self._v[j] -= mu
return True
def _augmenting_row_reduction(self):
"""
Augmenting row reduction step from LAPJV algorithm
"""
unassigned = np.where(self._x == -1)[0]
for i in unassigned:
for _ in range(self.c.size):
# Time in this loop can be proportional to 1/epsilon
# This step is not strictly necessary, so cutoff early
# to avoid near-infinite loops
# find smallest 2 values and indices
temp = self.c[i] - self._v
j1 = np.argmin(temp)
u1 = temp[j1]
temp[j1] = np.inf
j2 = np.argmin(temp)
u2 = temp[j2]
if u1 < u2:
self._v[j1] -= u2 - u1
elif self._y[j1] != -1:
j1 = j2
k = self._y[j1]
if k != -1:
self._x[k] = -1
self._x[i] = j1
self._y[j1] = i
i = k
if k == -1 or abs(u1 - u2) < self.epsilon:
break
def _update_cred(self):
"""
Updates the reduced costs with the values from the
dual solution
"""
ui = self.c[self._inds, self._x] - self._v[self._x]
self.cred = self.c - ui[:, None] - self._v[None, :]
def _augment(self):
"""
Finds a minimum cost path and adds it to the matching
"""
#build a minimum cost tree
_pred, _ready, istar, j, mu = self._build_tree()
#update prices
self._v[_ready] += self._d[_ready] - mu
#augment the solution with the minimum cost path from the
#tree. Follows an alternating path along matched, unmatched
#edges from X to Y
while True:
i = _pred[j]
self._y[j] = i
k = j
j = self._x[i]
self._x[i] = k
if i == istar:
break
self._update_cred()
def _build_tree(self):
"""
Builds the tree finding an augmenting path. Alternates along
matched and unmatched edges between X and Y. The paths are
stored in _pred (new predecessor of nodes in Y), and
self._x and self._y
"""
#find unassigned i*
istar = np.argmin(self._x)
#compute distances
self._d = self.c[istar] - self._v
_pred = np.zeros(self.n, dtype=np.int) + istar
#initialize sets
#READY: set of nodes visited and in the path (whose price gets
#updated in augment)
#SCAN: set of nodes at the bottom of the tree, which we need to
#look at
#T0DO: unvisited nodes
_ready = np.zeros(self.n, dtype=np.bool)
_scan = np.zeros(self.n, dtype=np.bool)
_todo = np.zeros(self.n, dtype=np.bool) + True
while True:
#populate scan with minimum reduced distances
if True not in _scan:
mu = np.min(self._d[_todo])
_scan[self._d == mu] = True
_todo[_scan] = False
j = np.argmin(self._y * _scan)
if self._y[j] == -1 and _scan[j]:
return _pred, _ready, istar, j, mu
#pick jstar from scan (scan always has at least 1)
_jstar = np.argmax(_scan)
#pick i associated with jstar
i = self._y[_jstar]
_scan[_jstar] = False
_ready[_jstar] = True
#find shorter distances
newdists = mu + self.cred[i, :]
shorter = np.logical_and(newdists < self._d, _todo)
#update distances
self._d[shorter] = newdists[shorter]
#update predecessors
_pred[shorter] = i
for j in np.nonzero(np.logical_and(self._d == mu, _todo))[0]:
if self._y[j] == -1:
return _pred, _ready, istar, j, mu
_scan[j] = True
_todo[j] = False
| rousseab/pymatgen | pymatgen/optimization/linear_assignment.py | Python | mit | 8,010 |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class PinboardLinkItem(scrapy.Item):
# fields that we get from pinboard
id = scrapy.Field()
link_url = scrapy.Field()
link_url_slug = scrapy.Field()
title = scrapy.Field()
description = scrapy.Field()
created_at = scrapy.Field()
saved_by_others = scrapy.Field()
tags = scrapy.Field() #array of tags
private = scrapy.Field()
to_read = scrapy.Field()
author = scrapy.Field()
# fields that we get when fetching the link
html_content = scrapy.Field()
html_code = scrapy.Field()
html_content_size = scrapy.Field()
html_fetch_date = scrapy.Field()
# JSON that's on pinboard results page
#{
#"id":"95383442",
#"url":"http:\/\/lovelycharts.com\/",
#"url_id":"112815",
#"author":"lfcipriani",
#"created":"2010-10-09 01:31:25",
#"description":"",
#"title":"Lovely Charts | Free online diagram software - Flowchart & process diagram, Network diagram, BPMN diagrams, Sitemap, Organisation chart, Wireframe, business drawing software",
#"slug":"2c72bdd86db1",
#"toread":"0",
#"cached":null,
#"code":null,
#"private":"0",
#"user_id":"60410",
#"snapshot_id":null,
#"updated":"2011-02-14 17:52:29",
#"in_collection":null,
#"sertags":",application,graph,graphics,chart,design,diagram,diagramming,flowchart,tool,visualization,",
#"source":"7",
#"tags":[
#"application",
#"graph",
#"graphics",
#"chart",
#"design",
#"diagram",
#"diagramming",
#"flowchart",
#"tool",
#"visualization"
#],
#"author_id":"60410",
#"url_slug":"c9f75b6d4b90340713effa1ddac4f876778c4f1b",
#"url_count":"145"
#};
| spare-time/pinboogle | crawlers/items.py | Python | apache-2.0 | 1,937 |
import _plotly_utils.basevalidators
class ComputedValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="computed", parent_name="layout", **kwargs):
super(ComputedValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/layout/_computed.py | Python | mit | 395 |
from comics.aggregator.crawler import ComicsKingdomCrawlerBase
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = "Hägar the Horrible"
language = "en"
url = "https://www.comicskingdom.com/hagar-the-horrible"
rights = "Chris Browne"
class Crawler(ComicsKingdomCrawlerBase):
history_capable_days = 6
schedule = "Mo,Tu,We,Th,Fr,Sa,Su"
time_zone = "US/Eastern"
def crawl(self, pub_date):
return self.crawl_helper("hagar-the-horrible", pub_date)
| jodal/comics | comics/comics/hagarthehorrible.py | Python | agpl-3.0 | 524 |
#
# @file TestSpeciesConcentrationRule.py
# @brief SpeciesConcentrationRule unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Ben Bornstein
#
# $Id$
# $HeadURL$
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestSpeciesConcentrationRule.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestSpeciesConcentrationRule(unittest.TestCase):
global SCR
SCR = None
def setUp(self):
self.SCR = libsbml.AssignmentRule(1,2)
self.SCR.setL1TypeCode(libsbml.SBML_SPECIES_CONCENTRATION_RULE)
if (self.SCR == None):
pass
pass
def tearDown(self):
_dummyList = [ self.SCR ]; _dummyList[:] = []; del _dummyList
pass
def test_SpeciesConcentrationRule_create(self):
self.assert_( self.SCR.getTypeCode() == libsbml.SBML_ASSIGNMENT_RULE )
self.assert_( self.SCR.getL1TypeCode() == libsbml.SBML_SPECIES_CONCENTRATION_RULE )
self.assert_( self.SCR.getNotes() == None )
self.assert_( self.SCR.getAnnotation() == None )
self.assert_( self.SCR.getFormula() == "" )
self.assert_( self.SCR.getType() == libsbml.RULE_TYPE_SCALAR )
self.assert_( self.SCR.getVariable() == "" )
self.assertEqual( False, self.SCR.isSetVariable() )
pass
def test_SpeciesConcentrationRule_free_NULL(self):
_dummyList = [ None ]; _dummyList[:] = []; del _dummyList
pass
def test_SpeciesConcentrationRule_setSpecies(self):
species = "s2";
self.SCR.setVariable(species)
self.assert_(( species == self.SCR.getVariable() ))
self.assertEqual( True, self.SCR.isSetVariable() )
if (self.SCR.getVariable() == species):
pass
s = self.SCR.getVariable()
self.SCR.setVariable(s)
self.assert_(( species == self.SCR.getVariable() ))
self.SCR.setVariable("")
self.assertEqual( False, self.SCR.isSetVariable() )
if (self.SCR.getVariable() != None):
pass
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestSpeciesConcentrationRule))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
| alexholehouse/SBMLIntegrator | libsbml-5.0.0/src/bindings/python/test/sbml/TestSpeciesConcentrationRule.py | Python | gpl-3.0 | 3,332 |
__author__ = 'Brandon C. Kelly'
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import solve
from scipy.optimize import minimize
import samplers
import multiprocessing
import _carmcmc as carmcmcLib
class CarmaModel(object):
"""
Class for performing statistical inference assuming a CARMA(p,q) model.
"""
def __init__(self, time, y, ysig, p=1, q=0):
"""
Constructor for the CarmaModel class.
:param time: The observation times.
:param y: The measured time series.
:param ysig: The standard deviation in the measurements errors on the time series.
:param p: The order of the autoregressive (AR) polynomial. Default is p = 1.
:param q: The order of the moving average (MA) polynomial. Default is q = 0. Note that p > q.
"""
try:
p > q
except ValueError:
" Order of AR polynomial, p, must be larger than order of MA polynomial, q."
# check that time values are unique and in ascending ordered
s_idx = np.argsort(time)
t_unique, u_idx = np.unique(time[s_idx], return_index=True)
u_idx = s_idx[u_idx]
# convert input to std::vector<double> extension class
self._time = carmcmcLib.vecD()
self._time.extend(time[u_idx])
self._y = carmcmcLib.vecD()
self._y.extend(y[u_idx])
self._ysig = carmcmcLib.vecD()
self._ysig.extend(ysig[u_idx])
# save parameters
self.time = time[u_idx]
self.y = y[u_idx]
self.ysig = ysig[u_idx]
self.p = p
self.q = q
self.mcmc_sample = None
def run_mcmc(self, nsamples, nburnin=None, ntemperatures=None, nthin=1, init=None):
"""
Run the MCMC sampler. This is actually a wrapper that calls the C++ code that runs the MCMC sampler.
:param nsamples: The number of samples from the posterior to generate.
:param ntemperatures: Number of parallel MCMC chains to run in the parallel tempering algorithm. Default is 1
(no tempering) for p = 1 and max(10, p+q) for p > 1.
:param nburnin: Number of burnin iterations to run. The default is nsamples / 2.
:param nthin: Thinning interval for the MCMC sampler. Default is 1 (no thinning).
:return: Either a CarmaSample or Car1Sample object, depending on the values of self.p. The CarmaSample object
will also be stored as a data member of the CarmaModel object.
"""
if ntemperatures is None:
ntemperatures = max(10, self.p + self.q)
if nburnin is None:
nburnin = nsamples / 2
if init is None:
init = carmcmcLib.vecD()
if self.p == 1:
# Treat the CAR(1) case separately
cppSample = carmcmcLib.run_mcmc_car1(nsamples, nburnin, self._time, self._y, self._ysig,
nthin, init)
# run_mcmc_car1 returns a wrapper around the C++ CAR1 class, convert to python object
sample = Car1Sample(self.time, self.y, self.ysig, cppSample)
else:
cppSample = carmcmcLib.run_mcmc_carma(nsamples, nburnin, self._time, self._y, self._ysig,
self.p, self.q, ntemperatures, False, nthin, init)
# run_mcmc_car returns a wrapper around the C++ CARMA class, convert to a python object
sample = CarmaSample(self.time, self.y, self.ysig, cppSample, q=self.q)
self.mcmc_sample = sample
return sample
def get_mle(self, p, q, ntrials=100, njobs=1):
"""
Return the maximum likelihood estimate (MLE) of the CARMA model parameters. This is done by using the
L-BFGS-B algorithm from scipy.optimize on ntrials randomly distributed starting values of the parameters. This
this return NaN for more complex CARMA models, especially if the data are not well-described by a CARMA model.
In addition, the likelihood space can be highly multi-modal, and there is no guarantee that the global MLE will
be found using this procedure.
@param p: The order of the AR polynomial.
@param q: The order of the MA polynomial. Must be q < p.
@param ntrials: The number of random starting values for the optimizer. Default is 100.
@param njobs: The number of processors to use. If njobs = -1, then all of them are used. Default is njobs = 1.
@return: The scipy.optimize.Result object corresponding to the MLE.
"""
if njobs == -1:
njobs = multiprocessing.cpu_count()
args = [(p, q, self.time, self.y, self.ysig)] * ntrials
if njobs == 1:
MLEs = map(_get_mle_single, args)
else:
# use multiple processors
pool = multiprocessing.Pool(njobs)
# warm up the pool
pool.map(int, range(multiprocessing.cpu_count()))
MLEs = pool.map(_get_mle_single, args)
pool.terminate()
best_MLE = MLEs[0]
for MLE in MLEs:
if MLE.fun < best_MLE.fun: # note that MLE.fun is -loglik since we use scipy.optimize.minimize
# new MLE found, save this value
best_MLE = MLE
print best_MLE.message
return best_MLE
def choose_order(self, pmax, qmax=None, pqlist=None, njobs=1, ntrials=100):
"""
Choose the order of the CARMA model by minimizing the AICc(p,q). This first computes the maximum likelihood
estimate on a grid of (p,q) values using self.get_mle, and then choosing the value of (p,q) that minimizes
the AICc. These values of p and q are stored as self.p and self.q.
@param pmax: The maximum order of the AR(p) polynomial to search over.
@param qmax: The maximum order of the MA(q) polynomial to search over. If none, search over all possible values
of q < p.
@param pqlist: A list of (p,q) tuples. If supplied, the (p,q) pairs are used instead of being generated from the
values of pmax and qmax.
@param njobs: The number of processors to use for calculating the MLE. A value of njobs = -1 will use all
available processors.
@param ntrials: The number of random starts to use in the MLE, the default is 100.
@return: A tuple of (MLE, pqlist, AICc). MLE is a scipy.optimize.Result object containing the maximum-likelihood
estimate. pqlist contains the values of (p,q) used in the search, and AICc contains the values of AICc for
each (p,q) pair in pqlist.
"""
try:
pmax > 0
except ValueError:
"Order of AR polynomial must be at least 1."
if qmax is None:
qmax = pmax - 1
try:
pmax > qmax
except ValueError:
" Order of AR polynomial, p, must be larger than order of MA polynimial, q."
if pqlist is None:
pqlist = []
for p in xrange(1, pmax+1):
for q in xrange(p):
pqlist.append((p, q))
MLEs = []
for pq in pqlist:
MLE = self.get_mle(pq[0], pq[1], ntrials=ntrials, njobs=njobs)
MLEs.append(MLE)
best_AICc = 1e300
AICc = []
best_MLE = MLEs[0]
print 'p, q, AICc:'
for MLE, pq in zip(MLEs, pqlist):
nparams = 2 + pq[0] + pq[1]
deviance = 2.0 * MLE.fun
this_AICc = 2.0 * nparams + deviance + 2.0 * nparams * (nparams + 1.0) / (self.time.size - nparams - 1.0)
print pq[0], pq[1], this_AICc
AICc.append(this_AICc)
if this_AICc < best_AICc:
# new optimum found, save values
best_MLE = MLE
best_AICc = this_AICc
self.p = pq[0]
self.q = pq[1]
print 'Model with best AICc has p =', self.p, ' and q = ', self.q
return best_MLE, pqlist, AICc
def _get_mle_single(args):
p, q, time, y, ysig = args
nsamples = 1
nburnin = 25
nwalkers = 10
# get a CARMA process object by running the MCMC sampler for a very short period. This will provide the initial
# guess and the function to compute the log-posterior
tvec = arrayToVec(time) # convert to std::vector<double> object for input into C++ wrapper
yvec = arrayToVec(y)
ysig_vec = arrayToVec(ysig)
if p == 1:
# Treat the CAR(1) case separately
CarmaProcess = carmcmcLib.run_mcmc_car1(nsamples, nburnin, tvec, yvec, ysig_vec, 1)
else:
CarmaProcess = carmcmcLib.run_mcmc_carma(nsamples, nburnin, tvec, yvec, ysig_vec,
p, q, nwalkers, False, 1)
initial_theta = CarmaProcess.getSamples()
initial_theta = np.array(initial_theta[0])
initial_theta[1] = 1.0 # initial guess for measurement error scale parameter
# set bounds on parameters
ysigma = y.std()
dt = time[1:] - time[:-1]
max_freq = 1.0 / dt.min()
max_freq = 0.9 * max_freq
min_freq = 1.0 / (time.max() - time.min())
theta_bnds = [(ysigma / 10.0, 10.0 * ysigma)]
theta_bnds.append((0.9, 1.1))
theta_bnds.append((None, None))
if p == 1:
theta_bnds.append((np.log(min_freq), np.log(max_freq)))
else:
# monte carlo estimates of bounds on quadratic term parameterization of AR(p) roots
qterm_lbound = min(min_freq ** 2, 2.0 * min_freq)
qterm_lbound = np.log(qterm_lbound)
qterm_ubound = max(max_freq ** 2, 2.0 * max_freq)
qterm_ubound = np.log(qterm_ubound)
theta_bnds.extend([(qterm_lbound, qterm_ubound)] * p)
# no bounds on MA coefficients
if q > 0:
theta_bnds.extend([(None, None)] * q)
CarmaProcess.SetMLE(True) # ignore the prior bounds when calculating CarmaProcess.getLogDensity in C++ code
# make sure initial guess of theta does not violate bounds
for j in xrange(len(initial_theta)):
if theta_bnds[j][0] is not None:
if (initial_theta[j] < theta_bnds[j][0]) or (initial_theta[j] > theta_bnds[j][1]):
initial_theta[j] = np.random.uniform(theta_bnds[j][0], theta_bnds[j][1])
thisMLE = minimize(_carma_loglik, initial_theta, args=(CarmaProcess,), method="L-BFGS-B", bounds=theta_bnds)
return thisMLE
def _carma_loglik(theta, args):
CppCarma = args
theta_vec = carmcmcLib.vecD()
theta_vec.extend(theta)
logdens = CppCarma.getLogDensity(theta_vec)
return -logdens
class CarmaSample(samplers.MCMCSample):
"""
Class for storing and analyzing the MCMC samples of a CARMA(p,q) model.
"""
def __init__(self, time, y, ysig, sampler, q=0, filename=None, MLE=None):
"""
Constructor for the CarmaSample class. In general a CarmaSample object should never be constructed directly,
but should be constructed by calling CarmaModel.run_mcmc().
@param time: The array of time values for the time series.
@param y: The array of measured values for the time series.
@param ysig: The array of measurement noise standard deviations for the time series.
@param sampler: A C++ object return by _carmcmcm.run_carma_mcmc(). In general this should not be obtained
directly, but a CarmaSample object should be obtained by running CarmaModel.run_mcmc().
@param q: The order of the MA polynomial.
@param filename: A string of the name of the file containing the MCMC samples generated by the C++ carpack.
@param MLE: The maximum-likelihood estimate, obtained as a scipy.optimize.Result object.
"""
self.time = time # The time values of the time series
self.y = y # The measured values of the time series
self.ysig = ysig # The standard deviation of the measurement errors of the time series
self.q = q # order of moving average polynomial
logpost = np.array(sampler.GetLogLikes())
trace = np.array(sampler.getSamples())
super(CarmaSample, self).__init__(filename=filename, logpost=logpost, trace=trace)
# now calculate the AR(p) characteristic polynomial roots, coefficients, MA coefficients, and amplitude of
# driving noise and add them to the MCMC samples
print "Calculating PSD Lorentzian parameters..."
self._ar_roots()
print "Calculating coefficients of AR polynomial..."
self._ar_coefs()
if self.q > 0:
print "Calculating coefficients of MA polynomial..."
self._ma_coefs(trace)
print "Calculating sigma..."
self._sigma_noise()
# add the log-likelihoods
print "Calculating log-likelihoods..."
loglik = np.empty(logpost.size)
sampler.SetMLE(True)
for i in xrange(logpost.size):
std_theta = carmcmcLib.vecD()
std_theta.extend(trace[i, :])
# loglik[i] = logpost[i] - sampler.getLogPrior(std_theta)
loglik[i] = sampler.getLogDensity(std_theta)
self._samples['loglik'] = loglik
# make the parameter names (i.e., the keys) public so the user knows how to get them
self.parameters = self._samples.keys()
self.newaxis()
self.mle = {}
if MLE is not None:
# add maximum a posteriori estimate
self.add_mle(MLE)
def add_mle(self, MLE):
"""
Add the maximum-likelihood estimate to the CarmaSample object. This will convert the MLE to a dictionary, and
add it as a data member of the CarmaSample object. The values can be accessed as self.mle['parameter']. For
example, the MLE of the CARMA process variance is accessed as self.mle['var'].
@param MLE: The maximum-likelihood estimate, returned by CarmaModel.get_mle() or CarmaModel.choose_order().
"""
self.mle = {'loglik': -MLE.fun, 'var': MLE.x[0] ** 2, 'measerr_scale': MLE.x[1], 'mu': MLE.x[2]}
# add AR polynomial roots and PSD lorentzian parameters
quad_coefs = np.exp(MLE.x[3:self.p + 3])
ar_roots = np.zeros(self.p, dtype=complex)
psd_width = np.zeros(self.p)
psd_cent = np.zeros(self.p)
for i in xrange(self.p / 2):
quad1 = quad_coefs[2 * i]
quad2 = quad_coefs[2 * i + 1]
discriminant = quad2 ** 2 - 4.0 * quad1
if discriminant > 0:
sqrt_disc = np.sqrt(discriminant)
else:
sqrt_disc = 1j * np.sqrt(np.abs(discriminant))
ar_roots[2 * i] = -0.5 * (quad2 + sqrt_disc)
ar_roots[2 * i + 1] = -0.5 * (quad2 - sqrt_disc)
psd_width[2 * i] = -np.real(ar_roots[2 * i]) / (2.0 * np.pi)
psd_cent[2 * i] = np.abs(np.imag(ar_roots[2 * i])) / (2.0 * np.pi)
psd_width[2 * i + 1] = -np.real(ar_roots[2 * i + 1]) / (2.0 * np.pi)
psd_cent[2 * i + 1] = np.abs(np.imag(ar_roots[2 * i + 1])) / (2.0 * np.pi)
if self.p % 2 == 1:
# p is odd, so add in root from linear term
ar_roots[-1] = -quad_coefs[-1]
psd_cent[-1] = 0.0
psd_width[-1] = quad_coefs[-1] / (2.0 * np.pi)
self.mle['ar_roots'] = ar_roots
self.mle['psd_width'] = psd_width
self.mle['psd_cent'] = psd_cent
self.mle['ar_coefs'] = np.poly(ar_roots).real
# now calculate the moving average coefficients
if self.q == 0:
self.mle['ma_coefs'] = 1.0
else:
quad_coefs = np.exp(MLE.x[3 + self.p:])
ma_roots = np.empty(quad_coefs.size, dtype=complex)
for i in xrange(self.q / 2):
quad1 = quad_coefs[2 * i]
quad2 = quad_coefs[2 * i + 1]
discriminant = quad2 ** 2 - 4.0 * quad1
if discriminant > 0:
sqrt_disc = np.sqrt(discriminant)
else:
sqrt_disc = 1j * np.sqrt(np.abs(discriminant))
ma_roots[2 * i] = -0.5 * (quad2 + sqrt_disc)
ma_roots[2 * i + 1] = -0.5 * (quad2 - sqrt_disc)
if self.q % 2 == 1:
# q is odd, so add in root from linear term
ma_roots[-1] = -quad_coefs[-1]
ma_coefs = np.poly(ma_roots)
# normalize so constant in polynomial is unity, and reverse order to be consistent with MA
# representation
self.mle['ma_coefs'] = np.real(ma_coefs / ma_coefs[self.q])[::-1]
# finally, calculate sigma, the standard deviation in the driving white noise
unit_var = carma_variance(1.0, self.mle['ar_roots'], np.atleast_1d(self.mle['ma_coefs']))
self.mle['sigma'] = np.sqrt(self.mle['var'] / unit_var.real)
def set_logpost(self, logpost):
"""
Add the input log-posterior MCMC values to the CarmaSample parameter dictionary.
@param logpost: The values of the log-posterior obtained from the MCMC sampler.
"""
self._samples['logpost'] = logpost # log-posterior of the CAR(p) model
def generate_from_trace(self, trace):
"""
Generate the dictionary of MCMC samples for the CARMA process parameters from the input array.
@param trace: An array containing the MCMC samples.
"""
# Figure out how many AR terms we have
self.p = trace.shape[1] - 3 - self.q
names = ['var', 'measerr_scale', 'mu', 'quad_coefs']
if names != self._samples.keys():
idx = 0
# Parameters are not already in the dictionary, add them.
self._samples['var'] = (trace[:, 0] ** 2) # Variance of the CAR(p) process
self._samples['measerr_scale'] = trace[:, 1] # Measurement errors are scaled by this much.
self._samples['mu'] = trace[:, 2] # model mean of time series
# AR(p) polynomial is factored as a product of quadratic terms:
# alpha(s) = (quad_coefs[0] + quad_coefs[1] * s + s ** 2) * ...
self._samples['quad_coefs'] = np.exp(trace[:, 3:self.p + 3])
def generate_from_file(self, filename):
"""
Build the dictionary of parameter samples from an ascii file of MCMC samples from carpack.
:param filename: The name of the file containing the MCMC samples generated by carpack.
"""
# TODO: put in exceptions to make sure files are ready correctly
# Grab the MCMC output
trace = np.genfromtxt(filename[0], skip_header=1)
self.generate_from_trace(trace[:, 0:-1])
self.set_logpost(trace[:, -1])
def _ar_roots(self):
"""
Calculate the roots of the CARMA(p,q) characteristic polynomial and add them to the MCMC samples.
"""
var = self._samples['var']
quad_coefs = self._samples['quad_coefs']
self._samples['ar_roots'] = np.empty((var.size, self.p), dtype=complex)
self._samples['psd_centroid'] = np.empty((var.size, self.p))
self._samples['psd_width'] = np.empty((var.size, self.p))
for i in xrange(self.p / 2):
quad1 = quad_coefs[:, 2 * i]
quad2 = quad_coefs[:, 2 * i + 1]
discriminant = quad2 ** 2 - 4.0 * quad1
sqrt_disc = np.where(discriminant > 0, np.sqrt(discriminant), 1j * np.sqrt(np.abs(discriminant)))
self._samples['ar_roots'][:, 2 * i] = -0.5 * (quad2 + sqrt_disc)
self._samples['ar_roots'][:, 2 * i + 1] = -0.5 * (quad2 - sqrt_disc)
self._samples['psd_width'][:, 2 * i] = -np.real(self._samples['ar_roots'][:, 2 * i]) / (2.0 * np.pi)
self._samples['psd_centroid'][:, 2 * i] = np.abs(np.imag(self._samples['ar_roots'][:, 2 * i])) / \
(2.0 * np.pi)
self._samples['psd_width'][:, 2 * i + 1] = -np.real(self._samples['ar_roots'][:, 2 * i + 1]) / (2.0 * np.pi)
self._samples['psd_centroid'][:, 2 * i + 1] = np.abs(np.imag(self._samples['ar_roots'][:, 2 * i + 1])) / \
(2.0 * np.pi)
if self.p % 2 == 1:
# p is odd, so add in root from linear term
self._samples['ar_roots'][:, -1] = -quad_coefs[:, -1]
self._samples['psd_centroid'][:, -1] = 0.0
self._samples['psd_width'][:, -1] = quad_coefs[:, -1] / (2.0 * np.pi)
def _ma_coefs(self, trace):
"""
Calculate the CARMA(p,q) moving average coefficients and add them to the MCMC samples.
"""
nsamples = trace.shape[0]
if self.q == 0:
self._samples['ma_coefs'] = np.ones((nsamples, 1))
else:
quad_coefs = np.exp(trace[:, 3 + self.p:])
roots = np.empty(quad_coefs.shape, dtype=complex)
for i in xrange(self.q / 2):
quad1 = quad_coefs[:, 2 * i]
quad2 = quad_coefs[:, 2 * i + 1]
discriminant = quad2 ** 2 - 4.0 * quad1
sqrt_disc = np.where(discriminant > 0, np.sqrt(discriminant), 1j * np.sqrt(np.abs(discriminant)))
roots[:, 2 * i] = -0.5 * (quad2 + sqrt_disc)
roots[:, 2 * i + 1] = -0.5 * (quad2 - sqrt_disc)
if self.q % 2 == 1:
# q is odd, so add in root from linear term
roots[:, -1] = -quad_coefs[:, -1]
coefs = np.empty((nsamples, self.q + 1), dtype=complex)
for i in xrange(nsamples):
coefs_i = np.poly(roots[i, :])
# normalize so constant in polynomial is unity, and reverse order to be consistent with MA
# representation
coefs[i, :] = (coefs_i / coefs_i[self.q])[::-1]
self._samples['ma_coefs'] = coefs.real
def _ar_coefs(self):
"""
Calculate the CARMA(p,q) autoregressive coefficients and add them to the MCMC samples.
"""
roots = self._samples['ar_roots']
coefs = np.empty((roots.shape[0], self.p + 1), dtype=complex)
for i in xrange(roots.shape[0]):
coefs[i, :] = np.poly(roots[i, :])
self._samples['ar_coefs'] = coefs.real
def _sigma_noise(self):
"""
Calculate the MCMC samples of the standard deviation of the white noise driving process and add them to the
MCMC samples.
"""
# get the CARMA(p,q) model variance of the time series
var = self._samples['var']
# get the roots of the AR(p) characteristic polynomial
ar_roots = self._samples['ar_roots']
# get the moving average coefficients
ma_coefs = self._samples['ma_coefs']
# calculate the variance of a CAR(p) process, assuming sigma = 1.0
sigma1_variance = np.zeros_like(var) + 0j
for k in xrange(self.p):
denom = -2.0 * ar_roots[:, k].real + 0j
for l in xrange(self.p):
if l != k:
denom *= (ar_roots[:, l] - ar_roots[:, k]) * (np.conjugate(ar_roots[:, l]) + ar_roots[:, k])
ma_sum1 = np.zeros_like(ar_roots[:, 0])
ma_sum2 = ma_sum1.copy()
for l in xrange(ma_coefs.shape[1]):
ma_sum1 += ma_coefs[:, l] * ar_roots[:, k] ** l
ma_sum2 += ma_coefs[:, l] * (-1.0 * ar_roots[:, k]) ** l
numer = ma_sum1 * ma_sum2
sigma1_variance += numer / denom
sigsqr = var / sigma1_variance.real
# add the white noise sigmas to the MCMC samples
self._samples['sigma'] = np.sqrt(sigsqr)
def plot_power_spectrum(self, figname="", percentile=68.0, nsamples=None, plot_log=True, color="b", alpha=0.5, sp=None, doShow=True, dpi=None):
"""
Plot the posterior median and the credibility interval corresponding to percentile of the CARMA(p,q) PSD. This
function returns a tuple containing the lower and upper PSD credibility intervals as a function of frequency,
the median PSD as a function of frequency, and the frequencies.
:rtype : A tuple of numpy arrays, (lower PSD, upper PSD, median PSD, frequencies). If no subplot axes object
is supplied (i.e., if sp = None), then the subplot axes object used will also be returned as the last
element of the tuple.
:param percentile: The percentile of the PSD credibility interval to plot.
:param nsamples: The number of MCMC samples to use to estimate the credibility interval. The default is all
of them. Use less samples for increased speed.
:param plot_log: A boolean. If true, then a logarithmic plot is made.
:param color: The color of the shaded credibility region.
:param alpha: The transparency level.
:param sp: A matplotlib subplot axes object to use.
:param doShow: If true, call plt.show()
"""
sigmas = self._samples['sigma']
ar_coefs = self._samples['ar_coefs']
ma_coefs = self._samples['ma_coefs']
if nsamples is None:
# Use all of the MCMC samples
nsamples = sigmas.shape[0]
else:
try:
nsamples <= sigmas.shape[0]
except ValueError:
"nsamples must be less than the total number of MCMC samples."
nsamples0 = sigmas.shape[0]
index = np.arange(nsamples) * (nsamples0 / nsamples)
sigmas = sigmas[index]
ar_coefs = ar_coefs[index]
ma_coefs = ma_coefs[index]
nfreq = 1000
dt_min = self.time[1:] - self.time[0:self.time.size - 1]
dt_min = dt_min.min()
dt_max = self.time.max() - self.time.min()
# Only plot frequencies corresponding to time scales a factor of 2 shorter and longer than the minimum and
# maximum time scales probed by the time series.
freq_max = 0.5 / dt_min
freq_min = 1.0 / dt_max
frequencies = np.linspace(np.log(freq_min), np.log(freq_max), num=nfreq)
frequencies = np.exp(frequencies)
psd_credint = np.empty((nfreq, 3))
lower = (100.0 - percentile) / 2.0 # lower and upper intervals for credible region
upper = 100.0 - lower
# Compute the PSDs from the MCMC samples
omega = 2.0 * np.pi * 1j * frequencies
ar_poly = np.zeros((nfreq, nsamples), dtype=complex)
ma_poly = np.zeros_like(ar_poly)
for k in xrange(self.p):
# Here we compute:
# alpha(omega) = ar_coefs[0] * omega^p + ar_coefs[1] * omega^(p-1) + ... + ar_coefs[p]
# Note that ar_coefs[0] = 1.0.
argrid, omgrid = np.meshgrid(ar_coefs[:, k], omega)
ar_poly += argrid * (omgrid ** (self.p - k))
ar_poly += ar_coefs[:, self.p]
for k in xrange(ma_coefs.shape[1]):
# Here we compute:
# delta(omega) = ma_coefs[0] + ma_coefs[1] * omega + ... + ma_coefs[q] * omega^q
magrid, omgrid = np.meshgrid(ma_coefs[:, k], omega)
ma_poly += magrid * (omgrid ** k)
psd_samples = np.squeeze(sigmas) ** 2 * np.abs(ma_poly) ** 2 / np.abs(ar_poly) ** 2
# Now compute credibility interval for power spectrum
psd_credint[:, 0] = np.percentile(psd_samples, lower, axis=1)
psd_credint[:, 2] = np.percentile(psd_samples, upper, axis=1)
psd_credint[:, 1] = np.median(psd_samples, axis=1)
# Plot the power spectra
if sp == None:
fig = plt.figure()
sp = fig.add_subplot(111)
if plot_log:
# plot the posterior median first
sp.loglog(frequencies, psd_credint[:, 1], color=color)
else:
sp.plot(frequencies, psd_credint[:, 1], color=color)
sp.fill_between(frequencies, psd_credint[:, 2], psd_credint[:, 0], facecolor=color, alpha=alpha)
sp.set_xlim(frequencies.min(), frequencies.max())
sp.set_xlabel('Frequency')
sp.set_ylabel('Power Spectrum')
# Ajout perso 02/06/2016 (+ajout de "figname" dans les inputs)
if(figname!=""):
plt.savefig(figname,dpi=dpi)
if doShow:
plt.show()
else:
plt.close()
elif doShow:
plt.show()
# Fin ajout perso
if sp == None:
return (psd_credint[:, 0], psd_credint[:, 2], psd_credint[:, 1], frequencies, fig)
else:
return (psd_credint[:, 0], psd_credint[:, 2], psd_credint[:, 1], frequencies)
def makeKalmanFilter(self, bestfit):
if bestfit == 'map':
# use maximum a posteriori estimate
max_index = self._samples['logpost'].argmax()
sigsqr = (self._samples['sigma'][max_index] ** 2)[0]
mu = self._samples['mu'][max_index][0]
ar_roots = self._samples['ar_roots'][max_index]
ma_coefs = self._samples['ma_coefs'][max_index]
elif bestfit == 'median':
# use posterior median estimate
sigsqr = np.median(self._samples['sigma']) ** 2
mu = np.median(self._samples['mu'])
ar_roots = np.median(self._samples['ar_roots'], axis=0)
ma_coefs = np.median(self._samples['ma_coefs'], axis=0)
elif bestfit == 'mean':
# use posterior mean as the best-fit
sigsqr = np.mean(self._samples['sigma'] ** 2)
mu = np.mean(self._samples['mu'])
ar_roots = np.mean(self._samples['ar_roots'], axis=0)
ma_coefs = np.mean(self._samples['ma_coefs'], axis=0)
else:
# use a random draw from the posterior
random_index = np.random.random_integers(0, self._samples.values()[0].shape[0] - 1)
sigsqr = (self._samples['sigma'][random_index] ** 2)[0]
mu = self._samples['mu'][random_index][0]
ar_roots = self._samples['ar_roots'][random_index]
ma_coefs = self._samples['ma_coefs'][random_index]
# expose C++ Kalman filter class to python
kfilter = carmcmcLib.KalmanFilterp(arrayToVec(self.time),
arrayToVec(self.y - mu),
arrayToVec(self.ysig),
sigsqr,
arrayToVec(ar_roots, carmcmcLib.vecC),
arrayToVec(ma_coefs))
return kfilter, mu
def assess_fit(self, t_axis_label, mydata_axis_label, t_units, mydata_units, bestfit="map", nplot=256, doShow=True, tstart=0.0, figname="", nbins=10, maxlag=50, dpi=None, reverse_xaxis=False):
"""
Display plots and provide useful information for assessing the quality of the CARMA(p,q) model fit.
:param bestfit: A string specifying how to define 'best-fit'. Can be the maximum a posteriori value (MAP),
the posterior mean ("mean"), or the posterior median ("median").
:param nplot: The number of interpolated time series values to plot.
:param doShow: If true, call pyplot.show(). Else if false, return the matplotlib figure object.
"""
bestfit = bestfit.lower()
try:
bestfit in ['map', 'median', 'mean']
except ValueError:
"bestfit must be one of 'map, 'median', or 'mean'"
# compute the marginal mean and variance of the predicted values
time_predict = np.linspace(1.001 * self.time.min(), self.time.max(), nplot)
predicted_mean, predicted_var = self.predict(time_predict, bestfit=bestfit)
predicted_low = predicted_mean - np.sqrt(predicted_var)
predicted_high = predicted_mean + np.sqrt(predicted_var)
# plot the time series and the marginal 1-sigma error bands
# Substitution perso 22/05/2016 (+ajout de "tstart" dans les inputs)
#plt.fill_between(time_predict, predicted_low, predicted_high, color='cyan')
#plt.plot(time_predict, predicted_mean, '-b', label='Interpolation')
#plt.plot(self.time, self.y, 'k.', label='Data')
#plt.xlabel('Time')
#plt.xlim(self.time.min(), self.time.max())
# modif perso 02/08/16
fig1 = plt.figure()
plt.fill_between(time_predict+tstart, predicted_low, predicted_high, color='cyan',label='$1\sigma$ error band')
plt.plot(time_predict+tstart, predicted_mean, '-b', label='Interpolation')
plt.plot(self.time+tstart, self.y, 'k.', label='Detrended Data')
# Fin modif perso 02/08/16
# Ajout perso 02/08/16
if t_units is None:
t_label=""
else:
t_label=" ("+t_units+")"
if mydata_units is None:
mydata_label=""
else:
mydata_label=" ("+mydata_units+")"
# Fin ajout perso
# Modif perso 02/08/2016 + ajout de "t_axis_label" et "t_units" dans les inupts
#plt.xlabel('Time')
plt.xlabel(t_axis_label+t_label)
# Fin modif perso 02/08/2016
# Ajout perso 02/08/2016 + ajout de "mydata_axis_label" et "mydata_units" dans les inupts
plt.ylabel(mydata_axis_label+mydata_label)
# Fin ajout perso 02/08/2016
plt.xlim(self.time.min()+tstart, self.time.max()+tstart)
# Fin substitution perso
plt.legend(fancybox=True,fontsize='xx-small',bbox_to_anchor=(1.1, 1.05))
# Ajout perso 03/01/2017
if reverse_xaxis is True:
plt.gca().invert_xaxis()
# Fin ajout perso
if(figname!=""):
myind=figname.rfind(".")
plt.savefig(figname[:myind]+"1"+figname[myind:],dpi=dpi)
if doShow:
plt.show()
else:
plt.close()
elif doShow:
plt.show()
# plot the standardized residuals and compare with the standard normal
fig2 = plt.figure()
kfilter, mu = self.makeKalmanFilter(bestfit)
kfilter.Filter()
kmean = np.asarray(kfilter.GetMean())
kvar = np.asarray(kfilter.GetVar())
standardized_residuals = (self.y - mu - kmean) / np.sqrt(kvar)
# Modif perso 02/08/2016 + ajout de "t_axis_label" et "t_units" dans les inupts
#plt.xlabel('Time')
plt.xlabel(t_axis_label+t_label)
# Fin modif perso 02/08/2016
plt.ylabel('Standardized Residuals')
# Substitution perso 22/05/2016 (+ajout de "tstart" dans les inputs)
#plt.xlim(self.time.min(), self.time.max())
plt.xlim(self.time.min()+tstart, self.time.max()+tstart)
# Fin substitution perso
# Now add the histogram of values to the standardized residuals plot
# Substitution perso 02/08/2016 + ajout de nbins dans les inputs
#pdf, bin_edges = np.histogram(standardized_residuals, bins=10)
pdf, bin_edges = np.histogram(standardized_residuals, bins=nbins)
# Fin substitution perso
bin_edges = bin_edges[0:pdf.size]
# Stretch the PDF so that it is readable on the residual plot when plotted horizontally
pdf = pdf / float(pdf.max()) * 0.4 * self.time.max()
# Add the histogram to the plot
# Substitution perso 22/05/2016 (+ajout de "tstart" dans les inputs)
#plt.barh(bin_edges, pdf, height=bin_edges[1] - bin_edges[0])
# plt.barh(bin_edges, pdf, height=bin_edges[1] - bin_edges[0],left=tstart)
# Fin substitution perso
# now overplot the expected standard normal distribution
# expected_pdf = np.exp(-0.5 * bin_edges ** 2)
# # Substitution perso 22/05/2016 (+ajout de "tstart" dans les inputs)
# # expected_pdf = expected_pdf / expected_pdf.max() * 0.4 * self.time.max()
# expected_pdf = expected_pdf / expected_pdf.max() * 0.4 * (self.time.max()-tstart)
# # Fin substitution perso
# # Substitution perso 22/05/2016 (+ajout de "tstart" dans les inputs)
# #plt.plot(expected_pdf, bin_edges, 'DarkOrange', lw=2)
# #plt.plot(self.time, standardized_residuals, '.k')
# plt.plot(expected_pdf+tstart, bin_edges, 'DarkOrange', lw=2)
plt.plot(self.time+tstart, standardized_residuals, '.k',zorder=1)
# Ajout perso 02/08/16: alpha=0.5
plt.barh(bin_edges, pdf, height=bin_edges[1] - bin_edges[0],left=tstart,alpha=0.7,zorder=2)
# Fin substitution perso
# Ajout perso 03/01/2017
if reverse_xaxis is True:
plt.gca().invert_xaxis()
# Fin ajout perso
if(figname!=""):
myind=figname.rfind(".")
plt.savefig(figname[:myind]+"2"+figname[myind:],dpi=dpi)
if doShow:
plt.show()
else:
plt.close()
elif doShow:
plt.show()
# plot the autocorrelation function of the residuals and compare with the 95% confidence intervals for white
# noise
fig3 = plt.figure()
wnoise_upper = 1.96 / np.sqrt(self.time.size)
wnoise_lower = -1.96 / np.sqrt(self.time.size)
plt.fill_between([0, maxlag], wnoise_upper, wnoise_lower, facecolor='grey')
lags, acf, not_needed1, not_needed2 = plt.acorr(standardized_residuals, maxlags=maxlag, lw=2)
plt.xlim(0, maxlag)
# Modif perso 02/08/2016
#plt.xlabel('Time Lag')
plt.xlabel('Lag')
# Fin modif perso
plt.ylabel('ACF of Residuals')
if(figname!=""):
myind=figname.rfind(".")
plt.savefig(figname[:myind]+"3"+figname[myind:],dpi=dpi)
if doShow:
plt.show()
else:
plt.close()
elif doShow:
plt.show()
# plot the autocorrelation function of the residuals and compare with the 95% confidence intervals for white noise
fig4 = plt.figure()
squared_residuals = standardized_residuals ** 2
wnoise_upper = 1.96 / np.sqrt(self.time.size)
wnoise_lower = -1.96 / np.sqrt(self.time.size)
plt.fill_between([0, maxlag], wnoise_upper, wnoise_lower, facecolor='grey')
lags, acf, not_needed1, not_needed2 = plt.acorr(squared_residuals - squared_residuals.mean(), maxlags=maxlag,lw=2)
plt.xlim(0, maxlag)
plt.xlabel('Lag')
plt.ylabel('ACF of Sqrd. Resid.')
plt.tight_layout()
if(figname!=""):
myind=figname.rfind(".")
plt.savefig(figname[:myind]+"4"+figname[myind:],dpi=dpi)
if doShow:
plt.show()
else:
plt.close()
elif doShow:
plt.show()
if not doShow:
return fig1, fig2, fig3, fig4
def predict(self, time, bestfit='map'):
"""
Return the predicted value of the time series and its standard deviation at the input time(s) given the best-fit
value of the CARMA(p,q) model and the measured time series.
:param time: A scalar or numpy array containing the time values to predict the time series at.
:param bestfit: A string specifying how to define 'best-fit'. Can be the Maximum Posterior (MAP), the posterior
mean ("mean"), the posterior median ("median"), or a random sample from the MCMC sampler ("random").
:rtype : A tuple of numpy arrays containing the expected value and variance of the time series at the input
time values.
"""
bestfit = bestfit.lower()
try:
bestfit in ['map', 'median', 'mean', 'random']
except ValueError:
"bestfit must be one of 'map, 'median', 'mean', or 'random'"
# note that KalmanFilter class assumes the time series has zero mean
kfilter, mu = self.makeKalmanFilter(bestfit)
kfilter.Filter()
if np.isscalar(time):
pred = kfilter.Predict(time)
yhat = pred.first
yhat_var = pred.second
else:
yhat = np.empty(time.size)
yhat_var = np.empty(time.size)
for i in xrange(time.size):
pred = kfilter.Predict(time[i])
yhat[i] = pred.first
yhat_var[i] = pred.second
yhat += mu # add mean back into time series
return yhat, yhat_var
def simulate(self, time, bestfit='map'):
"""
Simulate a time series at the input time(s) given the best-fit value of the CARMA(p,q) model and the measured
time series.
:param time: A scalar or numpy array containing the time values to simulate the time series at.
:param bestfit: A string specifying how to define 'best-fit'. Can be the Maximum Posterior (MAP), the posterior
mean ("mean"), the posterior median ("median"), or a random sample from the MCMC sampler ("random").
:rtype : The time series values simulated at the input values of time.
"""
bestfit = bestfit.lower()
try:
bestfit in ['map', 'median', 'mean', 'random']
except ValueError:
"bestfit must be one of 'map, 'median', 'mean', 'random'"
# note that KalmanFilter class assumes the time series has zero mean
kfilter, mu = self.makeKalmanFilter(bestfit)
kfilter.Filter()
vtime = carmcmcLib.vecD()
if np.isscalar(time):
vtime.append(time)
else:
vtime.extend(time)
ysim = np.asarray(kfilter.Simulate(vtime))
ysim += mu # add mean back into time series
return ysim
def DIC(self):
"""
Calculate the Deviance Information Criterion for the model.
The deviance is -2 * log-likelihood, and the DIC is:
DIC = mean(deviance) + 0.5 * variance(deviance)
"""
deviance = -2.0 * self._samples['loglik']
mean_deviance = np.mean(deviance, axis=0)
effect_npar = 0.5 * np.var(deviance, axis=0)
dic = mean_deviance + effect_npar
return dic
def arrayToVec(array, arrType=carmcmcLib.vecD):
"""
Convert the input numpy array to a python wrapper of a C++ std::vector<double> object.
"""
vec = arrType()
vec.extend(array)
return vec
class Car1Sample(CarmaSample):
def __init__(self, time, y, ysig, sampler, filename=None):
"""
Constructor for a CAR(1) sample. This is a special case of the CarmaSample class for p = 1. As with the
CarmaSample class, this class should never be constructed directly. Instead, one should obtain a Car1Sample
class by calling CarmaModel.run_mcmc().
@param time: The array of time values for the time series.
@param y: The array of measured time series values.
@param ysig: The standard deviation in the measurement noise for the time series.
@param sampler: A wrapper for an instantiated C++ Car1 object.
@param filename: The name of an ascii file containing the MCMC samples.
"""
self.time = time # The time values of the time series
self.y = y # The measured values of the time series
self.ysig = ysig # The standard deviation of the measurement errors of the time series
self.p = 1 # How many AR terms
self.q = 0 # How many MA terms
logpost = np.array(sampler.GetLogLikes())
trace = np.array(sampler.getSamples())
super(CarmaSample, self).__init__(filename=filename, logpost=logpost, trace=trace)
print "Calculating sigma..."
self._sigma_noise()
# add the log-likelihoods
print "Calculating log-likelihoods..."
loglik = np.empty(logpost.size)
for i in xrange(logpost.size):
std_theta = carmcmcLib.vecD()
std_theta.extend(trace[i, :])
loglik[i] = logpost[i] - sampler.getLogPrior(std_theta)
self._samples['loglik'] = loglik
# make the parameter names (i.e., the keys) public so the use knows how to get them
self.parameters = self._samples.keys()
self.newaxis()
def generate_from_trace(self, trace):
names = ['sigma', 'measerr_scale', 'mu', 'log_omega']
if names != self._samples.keys():
self._samples['var'] = trace[:, 0] ** 2
self._samples['measerr_scale'] = trace[:, 1]
self._samples['mu'] = trace[:, 2]
self._samples['log_omega'] = trace[:, 3]
def _ar_roots(self):
print "_ar_roots not supported for CAR1"
return
def _ar_coefs(self):
print "_ar_coefs not supported for CAR1"
return
def _sigma_noise(self):
self._samples['sigma'] = np.sqrt(2.0 * self._samples['var'] * np.exp(self._samples['log_omega']))
def makeKalmanFilter(self, bestfit):
if bestfit == 'map':
# use maximum a posteriori estimate
max_index = self._samples['logpost'].argmax()
sigsqr = (self._samples['sigma'][max_index] ** 2)[0]
mu = self._samples['mu'][max_index][0]
log_omega = self._samples['log_omega'][max_index][0]
elif bestfit == 'median':
# use posterior median estimate
sigsqr = np.median(self._samples['sigma']) ** 2
mu = np.median(self._samples['mu'])
log_omega = np.median(self._samples['log_omega'])
else:
# use posterior mean as the best-fit
sigsqr = np.mean(self._samples['sigma'] ** 2)
mu = np.mean(self._samples['mu'])
log_omega = np.mean(self._samples['log_omega'])
kfilter = carmcmcLib.KalmanFilter1(arrayToVec(self.time),
arrayToVec(self.y - mu),
arrayToVec(self.ysig),
sigsqr,
np.exp(log_omega))
return kfilter, mu
def plot_power_spectrum(self, figname="", percentile=68.0, nsamples=None, plot_log=True, color="b", alpha=0.5, sp=None, doShow=True, dpi=None):
"""
Plot the posterior median and the credibility interval corresponding to percentile of the CAR(1) PSD. This
function returns a tuple containing the lower and upper PSD credibility intervals as a function of
frequency, the median PSD as a function of frequency, and the frequencies.
:rtype : A tuple of numpy arrays, (lower PSD, upper PSD, median PSD, frequencies). If no subplot axes object
is supplied (i.e., if sp = None), then the subplot axes object used will also be returned as the last
element of the tuple.
:param percentile: The percentile of the PSD credibility interval to plot.
:param nsamples: The number of MCMC samples to use to estimate the credibility interval. The default is all
of them. Use less samples for increased speed.
:param plot_log: A boolean. If true, then a logarithmic plot is made.
:param color: The color of the shaded credibility region.
:param alpha: The transparency level.
:param sp: A matplotlib subplot axes object to use.
:param doShow: If true, call plt.show()
"""
sigmas = self._samples['sigma']
log_omegas = self._samples['log_omega']
if nsamples is None:
# Use all of the MCMC samples
nsamples = sigmas.shape[0]
else:
try:
nsamples <= sigmas.shape[0]
except ValueError:
"nsamples must be less than the total number of MCMC samples."
nsamples0 = sigmas.shape[0]
index = np.arange(nsamples) * (nsamples0 / nsamples)
sigmas = sigmas[index]
log_omegas = log_omegas[index]
nfreq = 1000
dt_min = self.time[1:] - self.time[0:self.time.size - 1]
dt_min = dt_min.min()
dt_max = self.time.max() - self.time.min()
# Only plot frequencies corresponding to time scales a factor of 2 shorter and longer than the minimum and
# maximum time scales probed by the time series.
freq_max = 0.5 / dt_min
freq_min = 1.0 / dt_max
frequencies = np.linspace(np.log(freq_min), np.log(freq_max), num=nfreq)
frequencies = np.exp(frequencies)
psd_credint = np.empty((nfreq, 3))
lower = (100.0 - percentile) / 2.0 # lower and upper intervals for credible region
upper = 100.0 - lower
numer = sigmas ** 2
omegasq = np.exp(log_omegas) ** 2
for i in xrange(nfreq):
denom = omegasq + (2. * np.pi * frequencies[i]) ** 2
psd_samples = numer / denom
# Now compute credibility interval for power spectrum
psd_credint[i, 0] = np.percentile(psd_samples, lower, axis=0)
psd_credint[i, 2] = np.percentile(psd_samples, upper, axis=0)
psd_credint[i, 1] = np.median(psd_samples, axis=0)
# Plot the power spectra
if sp == None:
fig = plt.figure()
sp = fig.add_subplot(111)
if plot_log:
# plot the posterior median first
sp.loglog(frequencies, psd_credint[:, 1], color=color)
else:
sp.plot(frequencies, psd_credint[:, 1], color=color)
sp.fill_between(frequencies, psd_credint[:, 2], psd_credint[:, 0], facecolor=color, alpha=alpha)
sp.set_xlim(frequencies.min(), frequencies.max())
sp.set_xlabel('Frequency')
sp.set_ylabel('Power Spectrum')
# Ajout perso 02/06/2016 (+ajout de "figname" dans les inputs)
if(figname!=""):
plt.savefig(figname,dpi=dpi)
if doShow:
plt.show()
else:
plt.close()
elif doShow:
plt.show()
# Fin ajout perso
if sp == None:
return (psd_credint[:, 0], psd_credint[:, 2], psd_credint[:, 1], frequencies, fig)
else:
return (psd_credint[:, 0], psd_credint[:, 2], psd_credint[:, 1], frequencies)
def get_ar_roots(qpo_width, qpo_centroid):
"""
Return the roots of the characteristic AR(p) polynomial of the CARMA(p,q) process, given the lorentzian widths and
centroids.
:rtype : The roots of the autoregressive polynomial, a numpy array.
:param qpo_width: The widths of the lorentzian functions defining the PSD.
:param qpo_centroid: The centroids of the lorentzian functions defining the PSD. For all values of qpo_centroid
that are greater than zero, the complex conjugate of the root will also be added.
"""
ar_roots = []
for i in xrange(len(qpo_centroid)):
ar_roots.append(qpo_width[i] + 1j * qpo_centroid[i])
if qpo_centroid[i] > 1e-10:
# lorentzian is centered at a frequency > 0, so add complex conjugate of this root
ar_roots.append(np.conjugate(ar_roots[-1]))
if len(qpo_width) - len(qpo_centroid) == 1:
# odd number of lorentzian functions, so add in low-frequency component
ar_roots.append(qpo_width[-1] + 1j * 0.0)
ar_roots = np.array(ar_roots)
return -2.0 * np.pi * ar_roots
def power_spectrum(freq, sigma, ar_coef, ma_coefs=[1.0]):
"""
Return the power spectrum for a CARMA(p,q) process calculated at the input frequencies.
:param freq: The frequencies at which to calculate the PSD.
:param sigma: The standard deviation driving white noise.
:param ar_coef: The CARMA model autoregressive coefficients.
:param ma_coefs: Coefficients of the moving average polynomial
:rtype : The power spectrum at the input frequencies, a numpy array.
"""
try:
len(ma_coefs) <= len(ar_coef)
except ValueError:
"Size of ma_coefs must be less or equal to size of ar_roots."
ma_poly = np.polyval(ma_coefs[::-1], 2.0 * np.pi * 1j * freq) # Evaluate the polynomial in the PSD numerator
ar_poly = np.polyval(ar_coef, 2.0 * np.pi * 1j * freq) # Evaluate the polynomial in the PSD denominator
pspec = sigma ** 2 * np.abs(ma_poly) ** 2 / np.abs(ar_poly) ** 2
return pspec
def carma_variance(sigsqr, ar_roots, ma_coefs=[1.0], lag=0.0):
"""
Return the autocovariance function of a CARMA(p,q) process.
:param sigsqr: The variance in the driving white noise.
:param ar_roots: The roots of the AR characteristic polynomial.
:param ma_coefs: The moving average coefficients.
:param lag: The lag at which to calculate the autocovariance function.
"""
try:
len(ma_coefs) <= len(ar_roots)
except ValueError:
"Size of ma_coefs must be less or equal to size of ar_roots."
if len(ma_coefs) < len(ar_roots):
# add extra zeros to end of ma_coefs
nmore = len(ar_roots) - len(ma_coefs)
ma_coefs = np.append(ma_coefs, np.zeros(nmore))
sigma1_variance = 0.0 + 0j
p = ar_roots.size
for k in xrange(p):
denom_product = 1.0 + 0j
for l in xrange(p):
if l != k:
denom_product *= (ar_roots[l] - ar_roots[k]) * (np.conjugate(ar_roots[l]) + ar_roots[k])
denom = -2.0 * denom_product * ar_roots[k].real
ma_sum1 = 0.0 + 0j
ma_sum2 = 0.0 + 0j
for l in xrange(p):
ma_sum1 += ma_coefs[l] * ar_roots[k] ** l
ma_sum2 += ma_coefs[l] * (-1.0 * ar_roots[k]) ** l
numer = ma_sum1 * ma_sum2 * np.exp(ar_roots[k] * abs(lag))
sigma1_variance += numer / denom
return sigsqr * sigma1_variance.real
def car1_process(time, sigsqr, tau):
"""
Generate a CAR(1) process.
:param time: The time values at which to generate the CAR(1) process at.
:param sigsqr: The variance in the driving white noise term.
:param tau: The e-folding (mean-reversion) time scale of the CAR(1) process. Note that tau = -1.0 / ar_root.
:rtype : A numpy array containing the simulated CAR(1) process values at time.
"""
marginal_var = sigsqr * tau / 2.0
y = np.zeros(len(time))
y[0] = np.sqrt(marginal_var) * np.random.standard_normal()
for i in range(1, len(time)):
dt = time[i] - time[i-1]
rho = np.exp(-dt / tau)
conditional_var = marginal_var * (1.0 - rho ** 2)
y[i] = rho * y[i-1] + np.sqrt(conditional_var) * np.random.standard_normal()
return y
def carma_process(time, sigsqr, ar_roots, ma_coefs=[1.0]):
"""
Generate a CARMA(p,q) process.
:param time: The time values at which to generate the CARMA(p,q) process at.
:param sigsqr: The variance in the driving white noise term.
:param ar_roots: The roots of the autoregressive characteristic polynomial.
:param ma_coefs: The moving average coefficients.
:rtype : A numpy array containing the simulated CARMA(p,q) process values at time.
"""
try:
len(ma_coefs) <= len(ar_roots)
except ValueError:
"Size of ma_coefs must be less or equal to size of ar_roots."
p = len(ar_roots)
if p == 1:
# generate a CAR(1) process
return car1_process(time, sigsqr, -1.0 / np.asscalar(ar_roots))
if len(ma_coefs) < p:
# add extra zeros to end of ma_coefs
q = len(ma_coefs)
ma_coefs = np.resize(np.array(ma_coefs), len(ar_roots))
ma_coefs[q:] = 0.0
time.sort()
# make sure process is stationary
try:
np.any(ar_roots.real < 0)
except ValueError:
"Process is not stationary, real part of roots must be negative."
# make sure the roots are unique
tol = 1e-8
roots_grid = np.meshgrid(ar_roots, ar_roots)
roots_grid1 = roots_grid[0].ravel()
roots_grid2 = roots_grid[1].ravel()
diff_roots = np.abs(roots_grid1 - roots_grid2) / np.abs(roots_grid1 + roots_grid2)
try:
np.any(diff_roots > tol)
except ValueError:
"Roots are not unique."
# Setup the matrix of Eigenvectors for the Kalman Filter transition matrix. This allows us to transform
# quantities into the rotated state basis, which makes the computations for the Kalman filter easier and faster.
EigenMat = np.ones((p, p), dtype=complex)
EigenMat[1, :] = ar_roots
for k in xrange(2, p):
EigenMat[k, :] = ar_roots ** k
# Input vector under the original state space representation
Rvector = np.zeros(p, dtype=complex)
Rvector[-1] = 1.0
# Input vector under rotated state space representation
Jvector = solve(EigenMat, Rvector) # J = inv(E) * R
# Compute the vector of moving average coefficients in the rotated state.
rotated_MA_coefs = ma_coefs.dot(EigenMat)
# Calculate the stationary covariance matrix of the state vector
StateVar = np.empty((p, p), dtype=complex)
for j in xrange(p):
StateVar[:, j] = -sigsqr * Jvector * np.conjugate(Jvector[j]) / (ar_roots + np.conjugate(ar_roots[j]))
# Initialize variance in one-step prediction error and the state vector
PredictionVar = StateVar.copy()
StateVector = np.zeros(p, dtype=complex)
# Convert the current state to matrices for convenience, since we'll be doing some Linear algebra.
StateVector = np.matrix(StateVector).T
StateVar = np.matrix(StateVar)
PredictionVar = np.matrix(PredictionVar)
rotated_MA_coefs = np.matrix(rotated_MA_coefs) # this is a row vector, so no transpose
StateTransition = np.zeros_like(StateVector)
KalmanGain = np.zeros_like(StateVector)
# Initialize the Kalman mean and variance. These are the forecasted values and their variances.
kalman_mean = 0.0
kalman_var = np.real(np.asscalar(rotated_MA_coefs * PredictionVar * rotated_MA_coefs.H))
# simulate the first time series value
y = np.empty_like(time)
y[0] = np.random.normal(kalman_mean, np.sqrt(kalman_var))
# Initialize the innovations, i.e., the KF residuals
innovation = y[0]
for i in xrange(1, time.size):
# First compute the Kalman gain
KalmanGain = PredictionVar * rotated_MA_coefs.H / kalman_var
# update the state vector
StateVector += innovation * KalmanGain
# update the state one-step prediction error variance
PredictionVar -= kalman_var * (KalmanGain * KalmanGain.H)
# predict the next state, do element-wise multiplication
dt = time[i] - time[i - 1]
StateTransition = np.matrix(np.exp(ar_roots * dt)).T
StateVector = np.multiply(StateVector, StateTransition)
# update the predicted state covariance matrix
PredictionVar = np.multiply(StateTransition * StateTransition.H, PredictionVar - StateVar) + StateVar
# now predict the observation and its variance
kalman_mean = np.real(np.asscalar(rotated_MA_coefs * StateVector))
kalman_var = np.real(np.asscalar(rotated_MA_coefs * PredictionVar * rotated_MA_coefs.H))
# simulate the next time series value
y[i] = np.random.normal(kalman_mean, np.sqrt(kalman_var))
# finally, update the innovation
innovation = y[i] - kalman_mean
return y
##################
# Deprecated
class KalmanFilterDeprecated(object):
def __init__(self, time, y, yvar, sigsqr, ar_roots, ma_coefs=[1.0]):
"""
Constructor for Kalman Filter class.
:param time: The time values of the time series.
:param y: The mean-subtracted time series.
:param yvar: The variance in the measurement errors on the time series.
:param sigsqr: The variance of the driving white noise term in the CAR(p) process.
:param ar_roots: The roots of the autoregressive characteristic polynomial.
"""
try:
len(ma_coefs) <= ar_roots.size
except ValueError:
"Order of MA polynomial cannot be larger than order of AR polynomial."
self.time = time
self.y = y
self.yvar = yvar
self.sigsqr = sigsqr
self.ar_roots = ar_roots
self.p = ar_roots.size # order of the CARMA(p,q) process
self.q = len(ma_coefs)
self.ma_coefs = np.append(ma_coefs, np.zeros(self.p - self.q))
def reset(self):
"""
Reset the Kalman Filter to its initial state.
"""
# Setup the matrix of Eigenvectors for the Kalman Filter transition matrix. This allows us to transform
# quantities into the rotated state basis, which makes the computations for the Kalman filter easier and faster.
EigenMat = np.ones((self.p, self.p), dtype=complex)
EigenMat[1, :] = self.ar_roots
for k in xrange(2, self.p):
EigenMat[k, :] = self.ar_roots ** k
# Input vector under the original state space representation
Rvector = np.zeros(self.p, dtype=complex)
Rvector[-1] = 1.0
# Input vector under rotated state space representation
Jvector = solve(EigenMat, Rvector) # J = inv(E) * R
# Compute the vector of moving average coefficients in the rotated state.
rotated_MA_coefs = self.ma_coefs.dot(EigenMat)
# Calculate the stationary covariance matrix of the state vector
StateVar = np.empty((self.p, self.p), dtype=complex)
for j in xrange(self.p):
StateVar[:, j] = -self.sigsqr * Jvector * np.conjugate(Jvector[j]) / \
(self.ar_roots + np.conjugate(self.ar_roots[j]))
# Initialize variance in one-step prediction error and the state vector
PredictionVar = StateVar.copy()
StateVector = np.zeros(self.p, dtype=complex)
# Convert the current state to matrices for convenience, since we'll be doing some Linear algebra.
self._StateVector = np.matrix(StateVector).T
self._StateVar = np.matrix(StateVar)
self._PredictionVar = np.matrix(PredictionVar)
self._rotated_MA_coefs = np.matrix(rotated_MA_coefs) # this is a row vector, so no transpose
self._StateTransition = np.zeros_like(self._StateVector)
self._KalmanGain = np.zeros_like(self._StateVector)
# Initialize the Kalman mean and variance. These are the forecasted values and their variances.
self.kalman_mean = np.empty_like(self.time)
self.kalman_var = np.empty_like(self.time)
self.kalman_mean[0] = 0.0
self.kalman_var[0] = np.real(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H) \
+ self.yvar[0]
# Initialize the innovations, i.e., the KF residuals
self._innovation = self.y[0]
self._current_index = 1
def update(self):
"""
Perform one iteration (update) of the Kalman Filter.
"""
# First compute the Kalman gain
self._KalmanGain = self._PredictionVar * self._rotated_MA_coefs.H / self.kalman_var[self._current_index - 1]
# update the state vector
self._StateVector += self._innovation * self._KalmanGain
# update the state one-step prediction error variance
self._PredictionVar -= self.kalman_var[self._current_index - 1] * (self._KalmanGain * self._KalmanGain.H)
# predict the next state, do element-wise multiplication
dt = self.time[self._current_index] - self.time[self._current_index - 1]
self._StateTransition = np.matrix(np.exp(self.ar_roots * dt)).T
self._StateVector = np.multiply(self._StateVector, self._StateTransition)
# update the predicted state covariance matrix
self._PredictionVar = np.multiply(self._StateTransition * self._StateTransition.H,
self._PredictionVar - self._StateVar) + self._StateVar
# now predict the observation and its variance
self.kalman_mean[self._current_index] = np.real(np.asscalar(self._rotated_MA_coefs * self._StateVector))
self.kalman_var[self._current_index] = \
np.real(np.asscalar(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H))
self.kalman_var[self._current_index] += self.yvar[self._current_index]
# finally, update the innovation
self._innovation = self.y[self._current_index] - self.kalman_mean[self._current_index]
self._current_index += 1
def filter(self):
"""
Perform the Kalman Filter on all points of the time series. The kalman mean and variance are returned upon
completion, and are stored in the instantiated KalmanFilter object.
"""
self.reset()
for i in xrange(self.time.size - 1):
self.update()
return self.kalman_mean, self.kalman_var
def predict(self, time_predict):
"""
Return the predicted value of a time series and its standard deviation at the input time given the input
values of the CARMA(p,q) model parameters and a measured time series.
:rtype : A tuple containing the predicted value and its variance.
:param time_predict: The time at which to predict the time series.
"""
try:
self.time.min() > time_predict
except ValueError:
"backcasting currently not supported: time_predict must be greater than self.time.min()"
self.reset()
# find the index where time[ipredict-1] < time_predict < time[ipredict]
ipredict = np.max(np.where(self.time < time_predict)) + 1
for i in xrange(ipredict - 1):
# run the kalman filter for time < time_predict
self.update()
# predict the value of y[time_predict]
self._KalmanGain = self._PredictionVar * self._rotated_MA_coefs.H / self.kalman_var[ipredict - 1]
self._StateVector += self._innovation * self._KalmanGain
self._PredictionVar -= self.kalman_var[ipredict - 1] * (self._KalmanGain * self._KalmanGain.H)
dt = time_predict - self.time[ipredict - 1]
self._StateTransition = np.matrix(np.exp(self.ar_roots * dt)).T
self._StateVector = np.multiply(self._StateVector, self._StateTransition)
self._PredictionVar = np.multiply(self._StateTransition * self._StateTransition.H,
self._PredictionVar - self._StateVar) + self._StateVar
ypredict_mean = np.asscalar(np.real(self._rotated_MA_coefs * self._StateVector))
ypredict_var = np.asscalar(np.real(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H))
# start the running statistics for the conditional mean and precision of the predicted time series value, given
# the measured time series
cprecision = 1.0 / ypredict_var
cmean = cprecision * ypredict_mean
if ipredict >= self.time.size:
# we are forecasting (extrapolating) the value, so no need to run interpolation steps below
return ypredict_mean, ypredict_var
# for time > time_predict we need to compute the coefficients for the linear filter, i.e., at time[j]:
# E(y[j]|{y[i]; j<i}) = alpha[j] + beta[j] * ypredict. we do this using recursions similar to the kalman
# filter.
# first set the initial values.
self._KalmanGain = self._PredictionVar * self._rotated_MA_coefs.H / ypredict_var
# initialize the coefficients for predicting the state vector at coefs(time_predict|time_predict)
const_state = self._StateVector - self._KalmanGain * ypredict_mean
slope_state = self._KalmanGain
# update the state one-step prediction error variance
self._PredictionVar -= ypredict_var * (self._KalmanGain * self._KalmanGain.H)
# do coefs(time_predict|time_predict) --> coefs(time[i+1]|time_predict)
dt = self.time[ipredict] - time_predict
self._StateTransition = np.matrix(np.exp(self.ar_roots * dt)).T
const_state = np.multiply(const_state, self._StateTransition)
slope_state = np.multiply(slope_state, self._StateTransition)
# update the predicted state covariance matrix
self._PredictionVar = np.multiply(self._StateTransition * self._StateTransition.H,
self._PredictionVar - self._StateVar) + self._StateVar
# compute the coefficients for the linear filter at time[ipredict], and compute the variance in the predicted
# y[ipredict]
const = np.asscalar(np.real(self._rotated_MA_coefs * const_state))
slope = np.asscalar(np.real(self._rotated_MA_coefs * slope_state))
self.kalman_var[ipredict] = \
np.real(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H) + \
self.yvar[ipredict]
# update the running conditional mean and variance of the predicted time series value
cprecision += slope ** 2 / self.kalman_var[ipredict]
cmean += slope * (self.y[ipredict] - const) / self.kalman_var[ipredict]
self.const = np.zeros(self.time.size)
self.slope = np.zeros(self.time.size)
self.const[ipredict] = const
self.slope[ipredict] = slope
# now repeat for time > time_predict
for i in xrange(ipredict + 1, self.time.size):
self._KalmanGain = self._PredictionVar * self._rotated_MA_coefs.H / self.kalman_var[i - 1]
# update the state prediction coefficients: coefs(i|i-1) --> coefs(i|i)
const_state += self._KalmanGain * (self.y[i - 1] - const)
slope_state -= self._KalmanGain * slope
# update the state one-step prediction error variance
self._PredictionVar -= self.kalman_var[i - 1] * (self._KalmanGain * self._KalmanGain.H)
# compute the one-step state prediction coefficients: coefs(i|i) --> coefs(i+1|i)
dt = self.time[i] - self.time[i - 1]
self._StateTransition = np.matrix(np.exp(self.ar_roots * dt)).T
const_state = np.multiply(const_state, self._StateTransition)
slope_state = np.multiply(slope_state, self._StateTransition)
# compute the state one-step prediction error variance
self._PredictionVar = np.multiply(self._StateTransition * self._StateTransition.H,
self._PredictionVar - self._StateVar) + self._StateVar
# compute the coefficients for predicting y[i]|y[j],j<i as a function of ypredict
const = np.asscalar(np.real(self._rotated_MA_coefs * const_state))
slope = np.asscalar(np.real(self._rotated_MA_coefs * slope_state))
# compute the variance in predicting y[i]|y[j],j<i
self.kalman_var[i] = \
np.real(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H) + \
self.yvar[i]
# finally, update the running conditional mean and variance of the predicted time series value
cprecision += slope ** 2 / self.kalman_var[i]
cmean += slope * (self.y[i] - const) / self.kalman_var[i]
self.const[i] = const
self.slope[i] = slope
cvar = 1.0 / cprecision
cmean *= cvar
return cmean, cvar
def simulate(self, time_simulate):
"""
Simulate a time series at the input time values of time_simulate, given the measured time series and input
CARMA(p,q) parameters.
:rtype : A scalar or numpy array, depending on type of time_simulate.
:param time_simulate: The time(s) at which to simulate a random draw of the time series conditional on the
measured time series and the input parameters.
"""
if np.isscalar(time_simulate):
cmean, cvar = self.predict(time_simulate)
ysimulated = np.random.normal(cmean, np.sqrt(cvar))
return ysimulated
else:
# input is array-like, need to simulate values sequentially, adding each value to the measured time series
# as they are simulated
time0 = self.time # save original values
y0 = self.y
yvar0 = self.yvar
ysimulated = np.empty(time_simulate.size)
time_simulate.sort()
for i in xrange(time_simulate.size):
cmean, cvar = self.predict(time_simulate[i])
ysimulated[i] = np.random.normal(cmean, np.sqrt(cvar)) # simulate the time series value
# find the index where time[isimulate-1] < time_simulate < time[isimulate]
isimulate = np.max(np.where(self.time < time_simulate[i])) + 1
# insert the simulated value into the time series array
self.time = np.insert(self.time, isimulate, time_simulate[i])
self.y = np.insert(self.y, isimulate, ysimulated[i])
self.yvar = np.insert(self.yvar, isimulate, 0.0)
# reset measured time series to original values
self.y = y0
self.time = time0
self.yvar = yvar0
return ysimulated
| guillaumelenoir/WAVEPAL | carmcmc/carma_pack.py | Python | mit | 72,802 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.tests import opus_unittest
from numpy import array, arange
from opus_core.tests.utils.variable_tester import VariableTester
class Tests(opus_unittest.OpusTestCase):
def test_my_inputs(self):
tester = VariableTester(
__file__,
package_order=['urbansim_parcel', 'urbansim'],
test_data={
"parcel":{"parcel_id":array([1,2,3,4,5]),
"grid_id":array([1, 1, 3, 2, 3])
},
"gridcell":{
"grid_id":array([1, 2, 3]),
"travel_time_to_cbd":array([100, 1000, 1500]),
}
}
)
should_be = array([100.0, 100.0, 1500.0, 1000.0, 1500.0])
tester.test_is_close_for_variable_defined_by_this_module(self, should_be)
if __name__=='__main__':
opus_unittest.main() | christianurich/VIBe2UrbanSim | 3rdparty/opus/src/urbansim_parcel/parcel/tests/travel_time_to_cbd.py | Python | gpl-2.0 | 1,051 |
from test_helper import ApiTestCase, is_auth, is_recursor
class Servers(ApiTestCase):
def test_flush(self):
r = self.session.put(self.url("/api/v1/servers/localhost/cache/flush?domain=example.org."))
self.assert_success_json(r)
data = r.json()
self.assertIn('count', data)
def test_flush_root(self):
r = self.session.put(self.url("/api/v1/servers/localhost/cache/flush?domain=."))
self.assert_success_json(r)
data = r.json()
self.assertIn('count', data)
self.assertEqual(data['result'], 'Flushed cache.')
def test_flush_no_domain(self):
r = self.session.put(
self.url("/api/v1/servers/localhost/cache/flush"))
self.assertEquals(r.status_code, 422)
def test_flush_unqualified(self):
r = self.session.put(
self.url("/api/v1/servers/localhost/cache/flush?domain=bar"))
self.assertEquals(r.status_code, 422)
| DrRemorse/pdns | regression-tests.api/test_Cache.py | Python | gpl-2.0 | 954 |
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid as stdlib_uuid
from oslo_utils import timeutils
import webob
from nova.api.openstack.compute import consoles as consoles_v2
from nova.api.openstack.compute.plugins.v3 import consoles as consoles_v21
from nova.compute import vm_states
from nova import console
from nova import db
from nova import exception
from nova.openstack.common import policy as common_policy
from nova import policy
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import matchers
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
class FakeInstanceDB(object):
def __init__(self):
self.instances_by_id = {}
self.ids_by_uuid = {}
self.max_id = 0
def return_server_by_id(self, context, id):
if id not in self.instances_by_id:
self._add_server(id=id)
return dict(self.instances_by_id[id])
def return_server_by_uuid(self, context, uuid):
if uuid not in self.ids_by_uuid:
self._add_server(uuid=uuid)
return dict(self.instances_by_id[self.ids_by_uuid[uuid]])
def _add_server(self, id=None, uuid=None):
if id is None:
id = self.max_id + 1
if uuid is None:
uuid = str(stdlib_uuid.uuid4())
instance = stub_instance(id, uuid=uuid)
self.instances_by_id[id] = instance
self.ids_by_uuid[uuid] = id
if id > self.max_id:
self.max_id = id
def stub_instance(id, user_id='fake', project_id='fake', host=None,
vm_state=None, task_state=None,
reservation_id="", uuid=FAKE_UUID, image_ref="10",
flavor_id="1", name=None, key_name='',
access_ipv4=None, access_ipv6=None, progress=0):
if host is not None:
host = str(host)
if key_name:
key_data = 'FAKE'
else:
key_data = ''
# ReservationID isn't sent back, hack it in there.
server_name = name or "server%s" % id
if reservation_id != "":
server_name = "reservation_%s" % (reservation_id, )
instance = {
"id": int(id),
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"admin_pass": "",
"user_id": user_id,
"project_id": project_id,
"image_ref": image_ref,
"kernel_id": "",
"ramdisk_id": "",
"launch_index": 0,
"key_name": key_name,
"key_data": key_data,
"vm_state": vm_state or vm_states.BUILDING,
"task_state": task_state,
"memory_mb": 0,
"vcpus": 0,
"root_gb": 0,
"hostname": "",
"host": host,
"instance_type": {},
"user_data": "",
"reservation_id": reservation_id,
"mac_address": "",
"launched_at": timeutils.utcnow(),
"terminated_at": timeutils.utcnow(),
"availability_zone": "",
"display_name": server_name,
"display_description": "",
"locked": False,
"metadata": [],
"access_ip_v4": access_ipv4,
"access_ip_v6": access_ipv6,
"uuid": uuid,
"progress": progress}
return instance
class ConsolesControllerTestV21(test.NoDBTestCase):
def setUp(self):
super(ConsolesControllerTestV21, self).setUp()
self.flags(verbose=True)
self.instance_db = FakeInstanceDB()
self.stubs.Set(db, 'instance_get',
self.instance_db.return_server_by_id)
self.stubs.Set(db, 'instance_get_by_uuid',
self.instance_db.return_server_by_uuid)
self.uuid = str(stdlib_uuid.uuid4())
self.url = '/v2/fake/servers/%s/consoles' % self.uuid
self._set_up_controller()
def _set_up_controller(self):
self.controller = consoles_v21.ConsolesController()
def test_create_console(self):
def fake_create_console(cons_self, context, instance_id):
self.assertEqual(instance_id, self.uuid)
return {}
self.stubs.Set(console.api.API, 'create_console', fake_create_console)
req = fakes.HTTPRequest.blank(self.url)
self.controller.create(req, self.uuid, None)
def test_create_console_unknown_instance(self):
def fake_create_console(cons_self, context, instance_id):
raise exception.InstanceNotFound(instance_id=instance_id)
self.stubs.Set(console.api.API, 'create_console', fake_create_console)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, self.uuid, None)
def test_show_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
self.assertEqual(instance_id, self.uuid)
self.assertEqual(console_id, 20)
pool = dict(console_type='fake_type',
public_hostname='fake_hostname')
return dict(id=console_id, password='fake_password',
port='fake_port', pool=pool, instance_name='inst-0001')
expected = {'console': {'id': 20,
'port': 'fake_port',
'host': 'fake_hostname',
'password': 'fake_password',
'instance_name': 'inst-0001',
'console_type': 'fake_type'}}
self.stubs.Set(console.api.API, 'get_console', fake_get_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
res_dict = self.controller.show(req, self.uuid, '20')
self.assertThat(res_dict, matchers.DictMatches(expected))
def test_show_console_unknown_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
raise exception.ConsoleNotFound(console_id=console_id)
self.stubs.Set(console.api.API, 'get_console', fake_get_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, self.uuid, '20')
def test_show_console_unknown_instance(self):
def fake_get_console(cons_self, context, instance_id, console_id):
raise exception.ConsoleNotFoundForInstance(
instance_uuid=instance_id)
self.stubs.Set(console.api.API, 'get_console', fake_get_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, self.uuid, '20')
def test_list_consoles(self):
def fake_get_consoles(cons_self, context, instance_id):
self.assertEqual(instance_id, self.uuid)
pool1 = dict(console_type='fake_type',
public_hostname='fake_hostname')
cons1 = dict(id=10, password='fake_password',
port='fake_port', pool=pool1)
pool2 = dict(console_type='fake_type2',
public_hostname='fake_hostname2')
cons2 = dict(id=11, password='fake_password2',
port='fake_port2', pool=pool2)
return [cons1, cons2]
expected = {'consoles':
[{'console': {'id': 10, 'console_type': 'fake_type'}},
{'console': {'id': 11, 'console_type': 'fake_type2'}}]}
self.stubs.Set(console.api.API, 'get_consoles', fake_get_consoles)
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.uuid)
self.assertThat(res_dict, matchers.DictMatches(expected))
def test_delete_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
self.assertEqual(instance_id, self.uuid)
self.assertEqual(console_id, 20)
pool = dict(console_type='fake_type',
public_hostname='fake_hostname')
return dict(id=console_id, password='fake_password',
port='fake_port', pool=pool)
def fake_delete_console(cons_self, context, instance_id, console_id):
self.assertEqual(instance_id, self.uuid)
self.assertEqual(console_id, 20)
self.stubs.Set(console.api.API, 'get_console', fake_get_console)
self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.controller.delete(req, self.uuid, '20')
def test_delete_console_unknown_console(self):
def fake_delete_console(cons_self, context, instance_id, console_id):
raise exception.ConsoleNotFound(console_id=console_id)
self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.uuid, '20')
def test_delete_console_unknown_instance(self):
def fake_delete_console(cons_self, context, instance_id, console_id):
raise exception.ConsoleNotFoundForInstance(
instance_uuid=instance_id)
self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.uuid, '20')
def _test_fail_policy(self, rule, action, data=None):
rules = {
rule: common_policy.parse_rule("!"),
}
policy.set_rules(rules)
req = fakes.HTTPRequest.blank(self.url + '/20')
if data is not None:
self.assertRaises(exception.PolicyNotAuthorized, action,
req, self.uuid, data)
else:
self.assertRaises(exception.PolicyNotAuthorized, action,
req, self.uuid)
def test_delete_console_fail_policy(self):
self._test_fail_policy("os_compute_api:os-consoles:delete",
self.controller.delete, data='20')
def test_create_console_fail_policy(self):
self._test_fail_policy("os_compute_api:os-consoles:create",
self.controller.create, data='20')
def test_index_console_fail_policy(self):
self._test_fail_policy("os_compute_api:os-consoles:index",
self.controller.index)
def test_show_console_fail_policy(self):
self._test_fail_policy("os_compute_api:os-consoles:show",
self.controller.show, data='20')
class ConsolesControllerTestV2(ConsolesControllerTestV21):
def _set_up_controller(self):
self.controller = consoles_v2.Controller()
def _test_fail_policy(self, rule, action, data=None):
# V2 API don't have policy
pass
| akash1808/nova_test_latest | nova/tests/unit/api/openstack/compute/test_consoles.py | Python | apache-2.0 | 11,651 |
#!/usr/bin/env python3
"""
This script has two arguments. A sitemap url and a api profile.
It will download the entire profile from the NPO Front end api, and it will also download the entire sitemap.
Then, it compares the found URL's in both. They should represent the same set.
If there are URL in the API which are not in the Sitemap, which are indeed not existing (give 404's) then the script
supposes this is an error and can delete the object from the API (if --delete is specified)
If objects are in the API but not in the sitemap, then we suppose the sitemap is outdated.
If objects are in the sitemap but not in the API then there are two possibilities
- The object is in the API, but not in the profile
- The object does not existing in the API at all
In both cases the object needs the be reindexed from the CMS.
"""
import datetime
import io
import os
import pickle
import re
import urllib
import xml.etree.ElementTree
import requests
from npoapi import Pages
from npoapi import PagesBackend
class CheckWithSitemap:
def __init__(self):
self.api = Pages().command_line_client()
self.backend = PagesBackend(env=self.api.actualenv).configured_login()
self.add_arguments()
self.args = self.api.parse_args()
args = self.args
self.profile = args.profile
self.sitemap_url = args.sitemap[0]
self.clean = args.clean
self.get_check = not args.no_get_check
self.delete_from_api = args.delete
self.show_docs_from_api = args.show
self.https_to_http = args.https_to_http
self.http_to_https = args.http_to_https
self.use_database = args.use_database
self.api_as_now = args.api_as_now
self.log = self.api.logger
if self.use_database and self.clean:
raise Exception("Can't use both use_database and clean")
if self.https_to_http and self.http_to_https:
raise Exception("Can't set both https_to_http and http_to_https")
if args.target_directory:
self.target_directory = args.target_directory
self.log.info("Target directory: %s" % self.target_directory)
if not os.path.exists(self.target_directory):
self.log.info("Created")
os.makedirs(self.target_directory)
else:
self.target_directory = ""
if self.clean:
self.log.info("Cleaning")
self.log.info("API: %s, profile: %s" % (self.api.url, self.profile))
def add_arguments(self):
api = self.api
api.add_argument('sitemap', type=str, nargs=1, help='URL to the sitemap')
api.add_argument('profile', type=str, nargs='?', help='NPO pages profile')
api.add_argument('-C', '--clean', action='store_true', default=False, help='clean build')
api.add_argument('-D', '--delete', action='store_true', default=False, help='remove from api')
api.add_argument('--no_get_check', action='store_true', default=False, help='when removing from api, dont check http status code first (only 404s will be deleted)')
api.add_argument('-S', '--show', action='store_true', default=False, help='show from api')
api.add_argument('--use_database', action='store_true', default=False, help='explicitly use the local database (inverse of clean)')
api.add_argument('--https_to_http', action='store_true', default=False, help='Replace all https with http')
api.add_argument('--http_to_https', action='store_true', default=False, help='Replace all http with https')
api.add_argument('--api_as_now', action='store_true', default=False, help='Normally api object created after this morning are ignored. After repairing you could use this argument to check results')
api.add_argument('--post_process_sitemap', type=str, default=None, help='A piec')
api.add_argument('--post_process_api', type=str, default=None, help='')
api.add_argument('--post_process', type=str, default=None, help='')
api.add_argument('--target_directory', type=str, default=None, help='')
def file_in_target(self, file: str) -> str:
return os.path.join(self.target_directory, file)
def get_urls_from_api_iterate(self, until = None) -> set:
"""
Gets all urls as they are in the pages api
:param datetime.datetime until:a constraint on creationdate. Defaults to 6 o'clock today
"""
new_urls = set()
from npoapi.data import api as API
form = API.PagesForm()
form.sortFields = []
form.sortFields.append(API.PageSortTypeEnum.CREATION_DATE)
form.searches = API.PagesSearchType()
form.searches.creationDates = API.DateRangeMatcherListType()
if not until:
now = datetime.datetime.now()
until = now.replace(hour=6, minute=0, second=0, microsecond=0)
pages = self.api.iterate(profile=self.profile, form=form)
for page in pages:
if 'creationDate' in page:
creationDate = datetime.datetime.fromtimestamp(page['creationDate'] / 1000)
else:
creationDate = datetime.datetime.fromtimestamp(0)
url = page['url']
if creationDate < until:
new_urls.add(url)
else:
self.log.info("Ignoring %s since it is newer (%s) than sitemap itself" % (url, str(creationDate)))
if len(new_urls) % 100 == 0:
self.log.info("API: Found %d urls for profile %s" % (len(new_urls), self.profile))
return new_urls
def get_urls(self, until=None) -> list:
if self.profile is None:
raise Exception("No profile")
url_file = self.file_in_target("data." + self.profile + ".api.p")
if self.use_database or (os.path.exists(url_file) and not self.clean):
new_urls = pickle.load(open(url_file, "rb"))
else:
if os.path.exists(url_file):
if self.clean:
self.log.info("Ignoring %s because of clean parameter" % url_file)
else:
self.log.info("No %s found, creating it now" % url_file)
# new_urls = get_urls_from_api_search()
new_urls = sorted(self.get_urls_from_api_iterate(until=until))
#new_urls = sorted(get_urls_from_api_iterate(datetime.datetime.now()))
pickle.dump(new_urls, open(url_file, "wb"))
self.write_urls_to_file(new_urls, "data." + self.profile + ".api.txt")
return list(new_urls)
def get_sitemap_from_xml(self) -> list:
self.log.debug("Opening %s", self.sitemap_url)
response = urllib.request.urlopen(self.sitemap_url)
locs = set()
for ev, el in xml.etree.ElementTree.iterparse(response):
if el.tag == "{http://www.sitemaps.org/schemas/sitemap/0.9}loc":
locs.add(el.text)
response.close()
new_urls = set()
for loc in locs:
#print(loc)
response = urllib.request.urlopen(loc)
for ev, el in xml.etree.ElementTree.iterparse(response):
if el.tag == "{http://www.sitemaps.org/schemas/sitemap/0.9}loc":
url = el.text
new_urls.add(url)
if len(new_urls) % 1000 == 0:
self.log.info("Sitemap: %s urls", len(new_urls))
response.close()
return list(new_urls)
def write_urls_to_file(self, urls: list, file_name : str):
dest_file = self.file_in_target(file_name)
with io.open(dest_file, 'w', encoding="utf-8") as f:
f.write('\n'.join(urls))
f.write('\n')
self.log.info("Wrote %s (%d entries)", dest_file, len(urls))
def get_sitemap(self) -> list:
sitemap_file = self.file_in_target("data." + self.profile + ".sitemap.p")
if self.use_database or (os.path.exists(sitemap_file) and not self.clean):
new_urls = pickle.load(open(sitemap_file, "rb"))
else:
new_urls = sorted(self.get_sitemap_from_xml())
pickle.dump(new_urls, open(sitemap_file, "wb"))
self.write_urls_to_file(new_urls, "data." + self.profile + ".sitemap.txt")
return new_urls
def http_status(self, url):
try:
resp = requests.head(url, allow_redirects=False)
return resp.status_code
except Exception as e:
self.log.info("%s" % str(e))
return 404
def unmap(self, mapped_urls: list, urls: list, url: str):
try:
i = mapped_urls.index(url)
return urls[i]
except ValueError:
self.log.error("Could not map")
return ""
def clean_from_api(self,
mapped_api_urls: list,
api_urls: list,
mapped_sitemap_urls: list):
"""Explores what needs to be cleaned from the API, and (optionally) also tries to do that."""
dest_file_name = "report." + self.profile + ".in_api_but_not_in_sitemap.txt"
dest_file = self.file_in_target(dest_file_name)
if not os.path.exists(dest_file) or self.clean:
self.log.info("Calculating what needs to be removed from api")
mapped_not_in_sitemap = set(mapped_api_urls) - set(mapped_sitemap_urls)
# translate to actual urls
not_in_sitemap = sorted(list(set(map(lambda url: self.unmap(mapped_api_urls, api_urls, url), mapped_not_in_sitemap))))
self.write_urls_to_file(sorted(list(not_in_sitemap)), dest_file_name)
else:
with io.open(dest_file, 'r', encoding='utf-8') as f:
not_in_sitemap = f.read().splitlines()
self.log.info("Read from %s" % f.name)
self.log.info("In api but not in sitemap: %s" % len(not_in_sitemap))
if self.delete_from_api:
clean_from_es = self.file_in_target("todo." + self.profile + ".should_be_removed_from_es.txt")
remove_from_api = self.file_in_target("done." + self.profile + ".removed_from_api.txt")
self.log.info("Deleting from api")
todo_delete_from_es = 0
with io.open(clean_from_es, 'w', encoding='utf-8') as f_clean_from_es, \
io.open(remove_from_api, 'w', encoding='utf-8') as f_removed_from_api:
for idx, url in enumerate(not_in_sitemap):
if self.get_check:
status = self.http_status(url)
else:
status = None
if status is None or status == 404 or status == 301:
self.log.info("(%d/%d) Deleting %s (http status: %s)", idx, len(not_in_sitemap), url, str(status))
response = self.backend.delete(url)
if self.backend.code == 404:
self.log.info("Backend gave 404 for delete call: %s", url)
f_clean_from_es.write(url + '\n')
todo_delete_from_es += 1
elif self.backend.code == 400:
self.log.info("Backend gave 400 for delete call: %s", url)
else:
self.log.info("%s" % response)
f_removed_from_api.write(url + '\n')
else:
result = self.backend.get(url)
if not result is None:
page = self.backend.to_object(result)
self.log.info("(%d/%d) In api, not in sitemap, but not giving 404 (but %s) url %s: %s", idx, len(not_in_sitemap), str(status), url, str(page.lastPublished))
else:
self.log.info("(%d/%d) In api, not giving 404 (but %s), but not found in publisher %s", idx, len(not_in_sitemap), str(status), url)
if todo_delete_from_es > 0:
self.log.info("""
Some things could not be removed from api (gave 404). Wrote to %s. You may want to run
clean_from_es.sh %s
""" % (clean_from_es, clean_from_es))
else:
self.log.info("No actual deletes requested")
def perform_add_to_api(self, not_in_api: list):
"""Actually add to api"""
self.log.info("Not implemented")
def add_to_api(
self,
mapped_api_urls: list,
mapped_sitemap_urls: list,
sitemap_urls:list):
"""Explores what needs to be added to the API"""
dest_file = self.file_in_target("report." + self.profile + ".in_sitemap_but_not_in_api.txt")
if not os.path.exists(dest_file) or self.clean:
self.log.info("Calculating what needs to be added to the api")
mapped_not_in_api = set(mapped_sitemap_urls) - set(mapped_api_urls)
not_in_api = sorted(list(set(map(lambda url: self.unmap(mapped_sitemap_urls, sitemap_urls, url), mapped_not_in_api))))
with io.open(dest_file, 'w', encoding='utf-8') as f:
f.write('\n'.join(not_in_api))
print("Wrote to %s" % f.name)
else:
with io.open(dest_file, 'r', encoding='utf-8') as f:
not_in_api = f.read().splitlines()
self.log.info("Read from %s" % f.name)
self.log.info("In sitemap but not in api: %s urls. E.g.:" % len(not_in_api))
for url in not_in_api[:10]:
print(url)
self.perform_add_to_api(not_in_api)
def main(self):
# list of all urls as they are present in the page api
api_urls = self.get_urls(until= datetime.datetime.now() if self.api_as_now else None)
# list of all urls as there are present in the sitemap
sitemap_urls = self.get_sitemap()
post_process = lambda url: url
if self.args.post_process:
post_process = eval(self.args.post_process)
post_process_sitemap = lambda url: url
post_process_api = lambda url: url
if self.args.post_process_sitemap:
post_process_sitemap = eval(self.args.post_process_sitemap)
if self.args.post_process_api:
post_process_api = eval(self.args.post_process_api)
schema_mapper = lambda url: url
if self.https_to_http:
schema_mapper = lambda url: re.sub(r'^https://(.*)', r'http://\1', url)
if self.http_to_https:
schema_mapper = lambda url: re.sub(r'^http://(.*)', r'https://\1', url)
self.log.info("Post processing")
# list of all urls as they are present in the page api, but post processed. Should be used for comparing, not for operations
mapped_api_urls = list(filter(None.__ne__, list(map(lambda url: post_process(post_process_api(schema_mapper(url))), api_urls))))
mapped_file = self.file_in_target("mappeddata." + self.profile + ".api.txt")
with io.open(mapped_file, 'w', encoding="utf-8") as f:
f.write('\n'.join(sorted(mapped_api_urls)))
# list of all urls as they are present in the sitemap, but post processed. Should be used for comparing, not for operations
mapped_sitemap_urls = list(filter(None.__ne__, list(map(lambda url: post_process(post_process_sitemap(schema_mapper(url))), sitemap_urls))))
mapped_file = self.file_in_target("mappeddata." + self.profile + ".sitemap.txt")
with io.open(mapped_file, 'w', encoding="utf-8") as f:
f.write('\n'.join(sorted(mapped_sitemap_urls)))
self.log.info(".")
self.clean_from_api(
mapped_api_urls,
api_urls,
mapped_sitemap_urls)
self.add_to_api(
mapped_api_urls,
mapped_sitemap_urls,
sitemap_urls)
self.log.info("Ready.")
if __name__ == "__main__":
CheckWithSitemap().main()
| npo-poms/scripts | python/check_with_sitemap.py | Python | gpl-2.0 | 15,955 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from importlib import import_module
import logging
import os
import pkgutil
from horizon.utils import file_discovery
from openstack_dashboard import theme_settings
def import_submodules(module):
"""Import all submodules and make them available in a dict."""
submodules = {}
for loader, name, ispkg in pkgutil.iter_modules(module.__path__,
module.__name__ + '.'):
try:
submodule = import_module(name)
except ImportError as e:
# FIXME: Make the errors non-fatal (do we want that?).
logging.warning("Error importing %s", name)
logging.exception(e)
else:
parent, child = name.rsplit('.', 1)
submodules[child] = submodule
return submodules
def import_dashboard_config(modules):
"""Imports configuration from all the modules and merges it."""
config = collections.defaultdict(dict)
for module in modules:
for submodule in import_submodules(module).values():
if hasattr(submodule, 'DASHBOARD'):
dashboard = submodule.DASHBOARD
config[dashboard].update(submodule.__dict__)
elif (hasattr(submodule, 'PANEL')
or hasattr(submodule, 'PANEL_GROUP')
or hasattr(submodule, 'FEATURE')):
config[submodule.__name__] = submodule.__dict__
else:
logging.warning("Skipping %s because it doesn't have DASHBOARD"
", PANEL, PANEL_GROUP, or FEATURE defined.",
submodule.__name__)
return sorted(config.items(),
key=lambda c: c[1]['__name__'].rsplit('.', 1)[1])
def update_dashboards(modules, horizon_config, installed_apps):
"""Imports dashboard and panel configuration from modules and applies it.
The submodules from specified modules are imported, and the configuration
for the specific dashboards is merged, with the later modules overriding
settings from the former. Then the configuration is applied to
horizon_config and installed_apps, in alphabetical order of files from
which the configurations were imported.
For example, given this setup:
| foo/__init__.py
| foo/_10_baz.py
| foo/_20_qux.py
| bar/__init__.py
| bar/_30_baz_.py
and being called with ``modules=[foo, bar]``, we will first have the
configuration from ``_10_baz`` and ``_30_baz`` merged, then the
configurations will be applied in order ``qux``, ``baz`` (``baz`` is
second, because the most recent file which contributed to it, ``_30_baz``,
comes after ``_20_qux``).
Panel specific configurations are stored in horizon_config. Dashboards
from both plugin-based and openstack_dashboard must be registered before
the panel configuration can be applied. Making changes to the panel is
deferred until the horizon autodiscover is completed, configurations are
applied in alphabetical order of files where it was imported.
"""
config_dashboards = horizon_config.get('dashboards', [])
if config_dashboards or horizon_config.get('default_dashboard'):
logging.warning(
'"dashboards" and "default_dashboard" in (local_)settings is '
'DEPRECATED now and may be unsupported in some future release. '
'The preferred way to specify the order of dashboards and the '
'default dashboard is the pluggable dashboard mechanism (in %s).',
', '.join([os.path.abspath(module.__path__[0])
for module in modules])
)
enabled_dashboards = []
disabled_dashboards = []
exceptions = horizon_config.get('exceptions', {})
apps = []
angular_modules = []
js_files = []
js_spec_files = []
scss_files = []
panel_customization = []
update_horizon_config = {}
for key, config in import_dashboard_config(modules):
if config.get('DISABLED', False):
if config.get('DASHBOARD'):
disabled_dashboards.append(config.get('DASHBOARD'))
continue
_apps = config.get('ADD_INSTALLED_APPS', [])
apps.extend(_apps)
if config.get('AUTO_DISCOVER_STATIC_FILES', False):
for _app in _apps:
module = import_module(_app)
base_path = os.path.join(module.__path__[0], 'static/')
file_discovery.populate_horizon_config(horizon_config,
base_path)
add_exceptions = config.get('ADD_EXCEPTIONS', {}).items()
for category, exc_list in add_exceptions:
exceptions[category] = tuple(set(exceptions.get(category, ())
+ exc_list))
angular_modules.extend(config.get('ADD_ANGULAR_MODULES', []))
# avoid pulling in dashboard javascript dependencies multiple times
existing = set(js_files)
js_files.extend([f for f in config.get('ADD_JS_FILES', [])
if f not in existing])
js_spec_files.extend(config.get('ADD_JS_SPEC_FILES', []))
scss_files.extend(config.get('ADD_SCSS_FILES', []))
update_horizon_config.update(
config.get('UPDATE_HORIZON_CONFIG', {}))
if config.get('DASHBOARD'):
dashboard = key
enabled_dashboards.append(dashboard)
if config.get('DEFAULT', False):
horizon_config['default_dashboard'] = dashboard
elif config.get('PANEL') or config.get('PANEL_GROUP'):
config.pop("__builtins__", None)
panel_customization.append(config)
# Preserve the dashboard order specified in settings
dashboards = ([d for d in config_dashboards
if d not in disabled_dashboards] +
[d for d in enabled_dashboards
if d not in config_dashboards])
horizon_config['panel_customization'] = panel_customization
horizon_config['dashboards'] = tuple(dashboards)
horizon_config.setdefault('exceptions', {}).update(exceptions)
horizon_config.update(update_horizon_config)
horizon_config.setdefault('angular_modules', []).extend(angular_modules)
horizon_config.setdefault('js_files', []).extend(js_files)
horizon_config.setdefault('js_spec_files', []).extend(js_spec_files)
horizon_config.setdefault('scss_files', []).extend(scss_files)
# apps contains reference to applications declared in the enabled folder
# basically a list of applications that are internal and external plugins
# installed_apps contains reference to applications declared in settings
# such as django.contribe.*, django_pyscss, compressor, horizon, etc...
# for translation, we are only interested in the list of external plugins
# so we save the reference to it before we append to installed_apps
horizon_config.setdefault('plugins', []).extend(apps)
installed_apps[0:0] = apps
# Order matters, list the xstatic module name and the entry point file(s) for
# that module (this is often defined as the "main" in bower.json, and
# as the xstatic module MAIN variable in the very few compliant xstatic
# modules). If the xstatic module does define a MAIN then set the files
# list to None.
# This list is to be used as the base list which is potentially added to in
# local_settings.py before being passed to get_xstatic_dirs()
BASE_XSTATIC_MODULES = [
('xstatic.pkg.jquery', ['jquery.js']),
('xstatic.pkg.jquery_migrate', ['jquery-migrate.js']),
('xstatic.pkg.angular', [
'angular.js',
'angular-cookies.js',
'angular-sanitize.js',
'angular-route.js'
]),
('xstatic.pkg.angular_bootstrap', ['angular-bootstrap.js']),
('xstatic.pkg.angular_gettext', None),
('xstatic.pkg.angular_lrdragndrop', None),
('xstatic.pkg.angular_smart_table', None),
('xstatic.pkg.angular_fileupload', ['ng-file-upload-all.js']),
('xstatic.pkg.d3', ['d3.js']),
('xstatic.pkg.jquery_quicksearch', ['jquery.quicksearch.js']),
('xstatic.pkg.jquery_tablesorter', ['jquery.tablesorter.js']),
('xstatic.pkg.spin', ['spin.js', 'spin.jquery.js']),
('xstatic.pkg.jquery_ui', ['jquery-ui.js']),
('xstatic.pkg.bootstrap_scss', ['js/bootstrap.js']),
('xstatic.pkg.bootstrap_datepicker', ['bootstrap-datepicker.js']),
('xstatic.pkg.hogan', ['hogan.js']),
('xstatic.pkg.rickshaw', ['rickshaw.js']),
('xstatic.pkg.jsencrypt', None),
('xstatic.pkg.objectpath', ['ObjectPath.js']),
('xstatic.pkg.tv4', ['tv4.js']),
('xstatic.pkg.angular_schema_form', ['schema-form.js']),
# @imported in scss files diectly
('xstatic.pkg.font_awesome', []),
('xstatic.pkg.bootswatch', []),
('xstatic.pkg.roboto_fontface', []),
('xstatic.pkg.mdi', []),
# testing only, not included in application
('xstatic.pkg.jasmine', []),
('xstatic.pkg.termjs', []),
]
def get_xstatic_dirs(XSTATIC_MODULES, HORIZON_CONFIG):
"""Discover static file configuration of the xstatic modules.
For each entry in the XSTATIC_MODULES list we determine the entry
point files (which may come from the xstatic MAIN var) and then
determine where in the Django static tree the xstatic package's contents
should be placed.
For jquery.bootstrap.wizard.js the module name is None the static file is
actually a 3rd-party file but resides in the Horizon source tree and not
an xstatic package.
The xstatic.pkg.jquery_ui package had its contents moved by packagers so
it must be handled as a special case.
"""
STATICFILES_DIRS = []
HORIZON_CONFIG['xstatic_lib_files'] = []
for module_name, files in XSTATIC_MODULES:
module = import_module(module_name)
if module_name == 'xstatic.pkg.jquery_ui':
# determine the correct path for jquery-ui which packagers moved
if module.VERSION.startswith('1.10.'):
# The 1.10.x versions already contain 'ui' directory.
files = ['ui/' + files[0]]
STATICFILES_DIRS.append(
('horizon/lib/' + module.NAME, module.BASE_DIR)
)
# pull the file entry points from the xstatic package MAIN if possible
if hasattr(module, 'MAIN'):
files = module.MAIN
if not isinstance(files, list):
files = [files]
# just the Javascript files, please (don't <script> css, etc
# which is explicitly included in style/themes as appropriate)
files = [file for file in files if file.endswith('.js')]
# add to the list of files to link in the HTML
for file in files:
file = 'horizon/lib/' + module.NAME + '/' + file
HORIZON_CONFIG['xstatic_lib_files'].append(file)
return STATICFILES_DIRS
def find_static_files(
HORIZON_CONFIG,
AVAILABLE_THEMES,
THEME_COLLECTION_DIR,
ROOT_PATH):
import horizon
import openstack_dashboard
os_dashboard_home_dir = openstack_dashboard.__path__[0]
horizon_home_dir = horizon.__path__[0]
# note the path must end in a '/' or the resultant file paths will have a
# leading "/"
file_discovery.populate_horizon_config(
HORIZON_CONFIG,
os.path.join(horizon_home_dir, 'static/')
)
# filter out non-angular javascript code and lib
HORIZON_CONFIG['js_files'] = ([f for f in HORIZON_CONFIG['js_files']
if not f.startswith('horizon/')])
# note the path must end in a '/' or the resultant file paths will have a
# leading "/"
file_discovery.populate_horizon_config(
HORIZON_CONFIG,
os.path.join(os_dashboard_home_dir, 'static/'),
sub_path='app/'
)
# Discover theme static resources, and in particular any
# static HTML (client-side) that the theme overrides
theme_static_files = {}
theme_info = theme_settings.get_theme_static_dirs(
AVAILABLE_THEMES,
THEME_COLLECTION_DIR,
ROOT_PATH)
for url, path in theme_info:
discovered_files = {}
# discover static files provided by the theme
file_discovery.populate_horizon_config(
discovered_files,
path
)
# Get the theme name from the theme url
theme_name = url.split('/')[-1]
# build a dictionary of this theme's static HTML templates.
# For each overridden template, strip off the '/templates/' part of the
# theme filename then use that name as the key, and the location in the
# theme directory as the value. This allows the quick lookup of
# theme path for any file overridden by a theme template
template_overrides = {}
for theme_file in discovered_files['external_templates']:
# Example:
# external_templates_dict[
# 'framework/widgets/help-panel/help-panel.html'
# ] = 'themes/material/templates/framework/widgets/\
# help-panel/help-panel.html'
(templates_part, override_path) = theme_file.split('/templates/')
template_overrides[override_path] = 'themes/' + \
theme_name + theme_file
discovered_files['template_overrides'] = template_overrides
# Save all of the discovered file info for this theme in our
# 'theme_files' object using the theme name as the key
theme_static_files[theme_name] = discovered_files
# Add the theme file info to the horizon config for use by template tags
HORIZON_CONFIG['theme_static_files'] = theme_static_files
| kogotko/carburetor | openstack_dashboard/utils/settings.py | Python | apache-2.0 | 14,387 |
# Natural Language Toolkit: RTE Classifier
#
# Copyright (C) 2001-2011 NLTK Project
# Author: Ewan Klein <[email protected]>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
Simple classifier for RTE corpus.
It calculates the overlap in words and named entities between text and
hypothesis, and also whether there are words / named entities in the
hypothesis which fail to occur in the text, since this is an indicator that
the hypothesis is more informative than (i.e not entailed by) the text.
TO DO: better Named Entity classification
TO DO: add lemmatization
"""
import nltk
from util import accuracy
def ne(token):
"""
This just assumes that words in all caps or titles are
named entities.
@type token: C{str}
"""
if token.istitle() or \
token.isupper():
return True
return False
def lemmatize(word):
"""
Use morphy from WordNet to find the base form of verbs.
"""
lemma = nltk.corpus.wordnet.morphy(word, pos='verb')
if lemma is not None:
return lemma
return word
class RTEFeatureExtractor(object):
"""
This builds a bag of words for both the text and the hypothesis after
throwing away some stopwords, then calculates overlap and difference.
"""
def __init__(self, rtepair, stop=True, lemmatize=False):
"""
@param rtepair: a L{RTEPair} from which features should be extracted
@param stop: if C{True}, stopwords are thrown away.
@type stop: C{bool}
"""
self.stop = stop
self.stopwords = set(['a', 'the', 'it', 'they', 'of', 'in', 'to',
'have', 'is', 'are', 'were', 'and', 'very', '.',','])
self.negwords = set(['no', 'not', 'never', 'failed' 'rejected', 'denied'])
# Try to tokenize so that abbreviations like U.S.and monetary amounts
# like "$23.00" are kept as tokens.
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer('([A-Z]\.)+|\w+|\$[\d\.]+')
#Get the set of word types for text and hypothesis
self.text_tokens = tokenizer.tokenize(rtepair.text)
self.hyp_tokens = tokenizer.tokenize(rtepair.hyp)
self.text_words = set(self.text_tokens)
self.hyp_words = set(self.hyp_tokens)
if lemmatize:
self.text_words = set([lemmatize(token) for token in self.text_tokens])
self.hyp_words = set([lemmatize(token) for token in self.hyp_tokens])
if self.stop:
self.text_words = self.text_words - self.stopwords
self.hyp_words = self.hyp_words - self.stopwords
self._overlap = self.hyp_words & self.text_words
self._hyp_extra = self.hyp_words - self.text_words
self._txt_extra = self.text_words - self.hyp_words
def overlap(self, toktype, debug=False):
"""
Compute the overlap between text and hypothesis.
@param toktype: distinguish Named Entities from ordinary words
@type toktype: 'ne' or 'word'
"""
ne_overlap = set([token for token in self._overlap if ne(token)])
if toktype == 'ne':
if debug: print "ne overlap", ne_overlap
return ne_overlap
elif toktype == 'word':
if debug: print "word overlap", self._overlap - ne_overlap
return self._overlap - ne_overlap
else:
raise ValueError("Type not recognized:'%s'" % toktype)
def hyp_extra(self, toktype, debug=True):
"""
Compute the extraneous material in the hypothesis.
@param toktype: distinguish Named Entities from ordinary words
@type toktype: 'ne' or 'word'
"""
ne_extra = set([token for token in self._hyp_extra if ne(token)])
if toktype == 'ne':
return ne_extra
elif toktype == 'word':
return self._hyp_extra - ne_extra
else:
raise ValueError("Type not recognized: '%s'" % toktype)
def rte_features(rtepair):
extractor = RTEFeatureExtractor(rtepair)
features = {}
features['alwayson'] = True
features['word_overlap'] = len(extractor.overlap('word'))
features['word_hyp_extra'] = len(extractor.hyp_extra('word'))
features['ne_overlap'] = len(extractor.overlap('ne'))
features['ne_hyp_extra'] = len(extractor.hyp_extra('ne'))
features['neg_txt'] = len(extractor.negwords & extractor.text_words)
features['neg_hyp'] = len(extractor.negwords & extractor.hyp_words)
return features
def rte_classifier(trainer, features=rte_features):
"""
Classify RTEPairs
"""
train = [(pair, pair.value) for pair in nltk.corpus.rte.pairs(['rte1_dev.xml', 'rte2_dev.xml', 'rte3_dev.xml'])]
test = [(pair, pair.value) for pair in nltk.corpus.rte.pairs(['rte1_test.xml', 'rte2_test.xml', 'rte3_test.xml'])]
# Train up a classifier.
print 'Training classifier...'
classifier = trainer( [(features(pair), label) for (pair,label) in train] )
# Run the classifier on the test data.
print 'Testing classifier...'
acc = accuracy(classifier, [(features(pair), label) for (pair,label) in test])
print 'Accuracy: %6.4f' % acc
# Return the classifier
return classifier
def demo_features():
pairs = nltk.corpus.rte.pairs(['rte1_dev.xml'])[:6]
for pair in pairs:
print
for key in sorted(rte_features(pair)):
print "%-15s => %s" % (key, rte_features(pair)[key])
def demo_feature_extractor():
rtepair = nltk.corpus.rte.pairs(['rte3_dev.xml'])[33]
extractor = RTEFeatureExtractor(rtepair)
print extractor.hyp_words
print extractor.overlap('word')
print extractor.overlap('ne')
print extractor.hyp_extra('word')
def demo():
import nltk
try:
nltk.config_megam('/usr/local/bin/megam')
trainer = lambda x: nltk.MaxentClassifier.train(x, 'megam')
except ValueError:
try:
trainer = lambda x: nltk.MaxentClassifier.train(x, 'BFGS')
except ValueError:
trainer = nltk.MaxentClassifier.train
nltk.classify.rte_classifier(trainer)
if __name__ == '__main__':
demo_features()
demo_feature_extractor()
demo()
| tadgh/ArgoRevisit | third_party/nltk/classify/rte_classify.py | Python | apache-2.0 | 6,409 |
# -*- coding: utf-8 -*-
"""attenuation.py - A collection of simple functions implementing various
proposed attenuation curves.
"""
import numpy as np
import warnings, sys
# --------------------
# ATTENUATION CURVES
# --------------------
__all__ = ["calzetti", "chevallard", "conroy", "noll",
"powerlaw", "drude", "broken_powerlaw",
"cardelli", "smc", "lmc"]
def powerlaw(wave, tau_v=1, alpha=1.0, **kwargs):
"""Simple power-law attenuation, normalized to 5500\AA.
:param wave:
The wavelengths at which optical depth estimates are desired.
:param tau_v: (default: 1)
The optical depth at 5500\AA, used to normalize the
attenuation curve.
:returns tau:
The optical depth at each wavelength.
"""
return tau_v * (wave / 5500)**(-alpha)
def calzetti(wave, tau_v=1, R_v=4.05, **kwargs):
"""Calzetti et al. 2000 starburst attenuation curve, with extrapolations to
the FUV and NIR.
:param wave:
The wavelengths at which optical depth estimates are desired.
:param tau_v: (default: 1)
The optical depth at 5500\AA, used to normalize the attenuation curve.
:param R_v: (default: 4.05)
The ratio of total selective extinction, parameterizing the slope of
the attenuation curve. A_v = R_v * E(B-V)
:returns tau:
The optical depth at each wavelength.
"""
# optical/NIR
k1 = lambda x: 2.659 * (-1.857 + 1.040 * x)
# UV
k2 = lambda x: 2.659 * (-2.156 + 1.509 * x - 0.198 * x**2. + 0.011 * x**3.)
# get slopes at edges and k(5500)
uv = np.array([0.12, 0.13]) * 1e4
kuv = k2(1e4 / uv) + R_v
uv_slope = np.diff(kuv) / np.diff(uv)
ir = np.array([2.19, 2.20]) * 1e4
kir = k1(1e4 / ir) + R_v
ir_slope = np.diff(kir) / np.diff(ir)
k_v = k2(1e4 / 5500.) + R_v
# define segments
uinds = (wave >= 1200.) & (wave < 6300) # uv
oinds = (wave >= 6300.) & (wave <= 22000) # optical
xinds = (wave < 1200.) # xuv
iinds = (wave > 22000.) # ir
# do it
x = 1e4 / wave
ktot = oinds * (k1(x) + R_v)
ktot += uinds * (k2(x) + R_v)
ktot += xinds * (kuv[0] + (wave - uv[0]) * uv_slope)
ktot += iinds * (kir[1] + (wave - ir[1]) * ir_slope)
ktot[ktot < 0] = 0
tau_lambda = tau_v * (ktot / k_v)
return tau_lambda
def drude(x, x0=4.59, gamma=0.90, **extras):
"""Drude profile for the 2175AA bump.
:param x:
Inverse wavelength (inverse microns) at which values for the drude
profile are requested.
:param gamma:
Width of the Drude profile (inverse microns).
:param x0:
Center of the Drude profile (inverse microns).
:returns k_lambda:
The value of the Drude profile at x, normalized such that the peak is 1.
"""
#return (w * gamma)**2 / ((w**2 - w0**2)**2 + (w * gamma)**2)
return (x*gamma)**2 / ((x**2 - x0**2)**2 + (x * gamma)**2)
def noll(wave, tau_v=1, delta=0.0, c_r=0.0, Ebump=0.0, **kwargs):
"""Noll 2009 attenuation curve. This is based on the Calzetti curve, with
added variable bump (as Drude) and overall slope change. Any extra
keywords are passed to the Drude (e.g. x0, gamma, both in inverse microns).
:param wave:
The wavelengths at which optical depth estimates are desired.
:param tau_v: (default: 1)
The optical depth at 5500\AA, used to normalize the attenuation curve.
:param Ebump: (default: 0.0)
Stength of the 2175\AA bump. Normalizes the Drude profile.
:param delta: (default 0.)
Slope of the power-law that modifies the Calzetti curve.
:param c_r:
Constant used to alter R_v=A_V/E(B-V) of the curve. To maintain the
Calzetti R_v=4.05, use c_r = -delta. Note that even with c_r = -delta
the calzetti curve will not be recovered unless delta=0
:returns tau:
The optical depth at each wavelength.
"""
kcalz = calzetti(wave, tau_v=1.0, R_v=4.05) - 1
k = kcalz + Ebump / 4.05 * drude(1e4 / wave, **kwargs)
a = (k * (1 - 1.12 * c_r) + 1) * (wave / 5500.)**delta
return a * tau_v
def chevallard(wave, tau_v=1, **kwargs):
""" \tau_v dependent attenuation curves matched to disk RT models,
as in Chevallard et al. 2013. No UV bump (or indeed tests in the
UV at all).
:param wave:
The wavelengths at which optical depth estimates are desired.
:param tau_v: (default: 1)
The optical depth at 5500\AA, used to normalize the
attenuation curve.
:returns tau:
The optical depth at each wavelength.
"""
# missing a UV bump
alpha_v = 2.8 / (1 + np.sqrt(tau_v)) # +/- 25%
bb = 0.3 - 0.05 * tau_v # +/- 10%
alpha = alpha_v + bb * (wave * 1e-4 - 0.55)
tau_lambda = tau_v * (wave / 5500.0)**(-alpha)
return tau_lambda
def conroy(wave, tau_v=1, R_v=3.1, f_bump=0.6, **kwargs):
""" Conroy & Schiminovich 2010 dust attenuation curves including a
decreased UV bump.
:param wave:
The wavelengths at which optical depth estimates are desired.
:param tau_v: (default: 1)
The optical depth at 5500\AA, used to normalize the
attenuation curve.
:param R_v: (default: 3.1)
The ratio of total selective extinction, parameterizing the
slope of the attenuation curve. A_v = R_v * E(B-V)
:param f_bump: (default: 0.6)
The strength of the 2175\AA UV bump, as a fraction of the bump
strength in Cardelli et al. extinction curve.
:returns tau:
The optical depth at each wavelength.
"""
x = 1e4 / wave
nx = x.shape[0]
a = np.zeros_like(x)
b = np.zeros_like(x)
# IR 0.909 - 3.3 micron
ir = (x >= 0.3) & (x < 1.1)
a[ir] = 0.574 * x[ir]**1.61
b[ir] = -0.527 * x[ir]**1.61
# optical 0.303 - 0.909 micron
opt = (x >= 1.1) & (x < 3.3)
y = x[opt]-1.82
a[opt] = (1 + 0.177 * y - 0.504 * y**2 - 0.0243 * y**3 +
0.721 * y**4 + 0.0198 * y**5 - 0.7750 * y**6 +
0.330 * y**7)
b[opt] = (1.413 * y + 2.283 * y**2 + 1.072 * y**3 -
5.384 * y**4 - 0.622 * y**5 + 5.303 * y**6 -
2.090 * y**7)
# NUV 0.17 to 0.303 micron
nuv = (x >= 3.3) & (x < 5.9)
tmp = (-0.0370 + 0.0469 * f_bump - 0.601 * f_bump / R_v + 0.542 / R_v)
fa = (3.3 / x[nuv])**6. * tmp
tmp = 0.104 * f_bump / ((x[nuv] - 4.67)**2 + 0.341)
a[nuv] = 1.752 - 0.316 * x[nuv] - tmp + fa
tmp = 1.206 * f_bump / ((x[nuv] - 4.62)**2 + 0.263)
b[nuv] = -3.09 + 1.825 * x[nuv] + tmp
# FUV 0.125 - 0.17 micron
fuv = (x >= 5.9) & (x < 8.0)
fa = -0.0447 * (x[fuv] - 5.9)**2.0 - 0.00978 * (x[fuv] - 5.9)**3
fb = 0.213 * (x[fuv] - 5.9)**2. + 0.121 * (x[fuv] - 5.9)**3
tmp = 0.104 * f_bump / ((x[fuv] - 4.67)**2 + 0.341)
a[fuv] = 1.752 - 0.316 * x[fuv] - tmp + fa
tmp = 1.206 * f_bump / ((x[fuv] - 4.62)**2 + 0.263)
b[fuv] = -3.09 + 1.825 * x[fuv] + tmp + fb
alam = (a + b / R_v)
# XUV below 1250AA
xuv = x >= 8.0
x8 = 8.0
fa = -0.0447 * (x8 - 5.9)**2 - 0.00978 * (x8 - 5.9)**3
fb = 0.213 * (x8 - 5.9)**2. + 0.121 * (x8 - 5.9)**3
tmp = 0.104 * f_bump / ((x8 - 4.67)**2 + 0.341)
af = 1.752 - 0.316 * x8 - tmp + fa
tmp = 1.206 * f_bump / ((x8 - 4.62)**2 + 0.263)
bf = -3.09 + 1.825 * x8 + tmp + fb
a8 = (af + bf / R_v)
alam[xuv] = (x8 / x[xuv])**(-1.3) * a8
return tau_v * alam
def broken_powerlaw(wave, tau_v=1, alpha=[0.7, 0.7, 0.7],
breaks=[0, 3000, 10000, 4e4], **kwargs):
""" Attenuation curve as in V. Wild et al. 2011, i.e. power-law
slope can change between regions. Superceded by Chevallard 2013
for optical/NIR.
:param wave:
The wavelengths at which optical depth estimates are desired.
:param tau_v: (default: 1)
The optical depth at 5500\AA, used to normalize the
attenuation curve.
:returns tau:
The optical depth at each wavelength.
"""
if len(breaks) == len(alpha)+1:
print("make sure of your power law breaks")
tau = np.array(len(wave))
for i in range(alpha):
inds = (wave > breaks[i]) & (wave <= breaks[i+1])
tau[inds] = tau_v * (wave / 5500)**alpha[i]
return tau
def wg00(wave, tau_v=1, geometry='SHELL', composition='MW',
local='homogenous', **kwargs):
""" Witt+Gordon 2000 DIRTY radiative transfer results, for
idealized geometries.
"""
raise NotImplementedError
# ------------------
# EXTINCTION CURVES
# ------------------
def cardelli(wave, tau_v=1, R_v=3.1, **kwargs):
""" Cardelli, Clayton, and Mathis 1998 Milky Way extinction curve,
with an update in the near-UV from O'Donnell 1994
:param wave:
The wavelengths at which optical depth estimates are desired.
:param tau_v: (default: 1)
The optical depth at 5500\AA, used to normalize the
attenuation curve.
:param R_v: (default: 3.1)
The ratio of total selective extinction, parameterizing the
slope of the attenuation curve. A_v = R_v * E(B-V)
:returns tau:
The optical depth at each wavelength.
"""
# if (wave < 1e3).any() :
# warnings.warn('Cardelli: extinction not defined (set to zero) below 1000AA')
mic = wave*1e-4
x_sup, x_inf = 10.0, 0.3
x = 1 / mic
a = np.zeros_like(x)
b = np.zeros_like(x)
w1 = (x >= 1.1) & (x <= 3.3) # Optical 0.303 to 0.909 micron
w2 = (x >= x_inf) & (x < 1.1) # NIR 0.909 to 3.3 micron
w3 = (x > 3.3) & (x <= 8) # UV 0.125 - 0.303 micron
w4 = (x > 8.0) & (x <= x_sup) # XUV, 1000 -1250AA
wsh = x > x_sup
wlg = x < x_inf
y = x[w1] - 1.82
a[w1] = (1 + 0.17699 * y - 0.50447 * y**2. - 0.02427 * y**3. +
0.72085 * y**4. + 0.01979 * y**5. - 0.77530 * y**6. +
0.32999 * y**7.0)
b[w1] = (1.41338 * y + 2.28305 * y**2. + 1.07233 * y**3. -
5.38434 * y**4. - 0.62251 * y**5. + 5.30260 * y**6. -
2.09002 * y**7.)
y = x[w2]**1.61
a[w2] = 0.574 * y
b[w2] = -0.527 * y
fa = x[w3] * 0.
fb = x[w3] * 0.
ou = (x[w3] > 5.9)
# print(type(ou),ou[0], type(w3))
if ou.any():
y = x[w3][ou] - 5.9
fa[ou] = -0.04473 * y**2. - 0.009779 * y**3.
fb[ou] = 0.2130 * y**2. + 0.1207 * y**3.
a[w3] = 1.752 - 0.316 * x[w3] - 0.104 / ((x[w3] - 4.67)**2. + 0.341) + fa
b[w3] = -3.090 + 1.825 * x[w3] + 1.206 / ((x[w3] - 4.62)**2. + 0.263) + fb
y = x[w4] - 8.
a[w4] = -1.073 - 0.628 * y + 0.137 * y**2. - 0.070 * y**3.
b[w4] = 13.670 + 4.257 * y - 0.420 * y**2. + 0.374 * y**3.
tau = a + b / R_v
return tau_v * tau
def smc(wave, tau_v=1, **kwargs):
"""Pei 1992 SMC extinction curve.
:param wave:
The wavelengths at which optical depth estimates are desired.
:param tau_v: (default: 1)
The optical depth at 5500\AA, used to normalize the
attenuation curve.
:returns tau:
The optical depth at each wavelength.
"""
if (wave < 1e3).any():
warnings.warn('SMC: extinction extrapolation below 1000AA is poor')
mic = wave * 1e-4
aa = [185., 27., 0.005, 0.010, 0.012, 0.030]
ll = [0.042, 0.08, 0.22, 9.7, 18., 25.]
bb = [90., 5.50, -1.95, -1.95, -1.80, 0.00]
nn = [2.0, 4.0, 2.0, 2.0, 2.0, 2.0]
abs_ab = np.zeros_like(mic)
norm_v = 0 # hack to go from tau_b to tau_v
mic_5500 = 5500 * 1e-4
for i, a in enumerate(aa):
norm_v += aa[i] / ((mic_5500 / ll[i])**nn[i] +
(ll[i] / mic_5500)**nn[i] + bb[i])
abs_ab += aa[i] / ((mic / ll[i])**nn[i] + (ll[i] / mic)**nn[i] + bb[i])
return tau_v * (abs_ab / norm_v)
def lmc(wave, tau_v=1, **kwargs):
""" Pei 1992 LMC extinction curve.
:param wave:
The wavelengths at which optical depth estimates are desired.
:param tau_v: (default: 1)
The optical depth at 5500\AA, used to normalize the
attenuation curve.
:returns tau:
The optical depth at each wavelength.
"""
if (wave < 1e3).any():
warnings.warn('LMC: extinction extrapolation below 1000AA is poor')
mic = wave * 1e-4
aa = [175., 19., 0.023, 0.005, 0.006, 0.020]
ll = [0.046, 0.08, 0.22, 9.7, 18., 25.]
bb = [90., 5.50, -1.95, -1.95, -1.80, 0.00]
nn = [2.0, 4.5, 2.0, 2.0, 2.0, 2.0]
abs_ab = mic * 0.
norm_v = 0 # hack to go from tau_b to tau_v
mic_5500 = 5500 * 1e-4
for i, a in enumerate(aa):
norm_v += aa[i] / ((mic_5500 / ll[i])**nn[i] +
(ll[i] / mic_5500)**nn[i] + bb[i])
abs_ab += aa[i] / ((mic / ll[i])**nn[i] + (ll[i] / mic)**nn[i] + bb[i])
return tau_v * (abs_ab / norm_v)
| bd-j/sedpy | sedpy/attenuation.py | Python | mit | 12,814 |
from __future__ import print_function
import os
from setuptools import setup
# Utility function to read the README file.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
# Setup
# 1. zip_safe needs to be False since we need access to templates
setup(
name="TimeVis",
version="0.2",
author="Ce Gao",
author_email="[email protected]",
description=("TimeVis: An interactive tool to query and visualize "
"time series gene expression data"),
license="MIT",
install_requires=[
"flask",
"Flask-RESTful",
"SQLAlchemy",
"pandas",
"scikits.bootstrap",
],
packages=['timevis'],
package_dir={"timevis": "timevis"},
package_data={
"timevis": [
"db/*.db",
"static/images/*",
"static/js/*.js",
"static/js/lib/*.js",
"static/css/*.css",
"static/css/lib/*.css",
"static/css/lib/images/*",
"templates/*.html",
]
},
long_description=read('README.md'),
entry_points={'console_scripts': ['timevis = timevis.run:main']},
zip_safe=False,
)
| gaoce/TimeVis | setup.py | Python | mit | 1,191 |
# -*- encoding: utf-8 -*-
#
# Copyright © 2016-2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
import tenacity
from gnocchi import carbonara
from gnocchi.common import s3
from gnocchi import storage
from gnocchi import utils
boto3 = s3.boto3
botocore = s3.botocore
OPTS = [
cfg.StrOpt('s3_endpoint_url',
help='S3 endpoint URL'),
cfg.StrOpt('s3_region_name',
default=os.getenv("AWS_DEFAULT_REGION"),
help='S3 region name'),
cfg.StrOpt('s3_access_key_id',
secret=True,
default=os.getenv("AWS_ACCESS_KEY_ID"),
help='S3 access key id'),
cfg.StrOpt('s3_secret_access_key',
secret=True,
default=os.getenv("AWS_SECRET_ACCESS_KEY"),
help='S3 secret access key'),
cfg.StrOpt('s3_bucket_prefix',
# Max bucket length is 63 and we use "-" as separator
# 63 - 1 - len(uuid) = 26
max_length=26,
default='gnocchi',
help='Prefix to namespace metric bucket.'),
cfg.FloatOpt('s3_check_consistency_timeout',
min=0,
default=60,
help="Maximum time to wait checking data consistency when "
"writing to S3. Set to 0 to disable data consistency "
"validation."),
cfg.IntOpt('s3_max_pool_connections',
min=1,
default=50,
help="The maximum number of connections to keep in a "
"connection pool."),
]
def retry_if_operationaborted(exception):
return (isinstance(exception, botocore.exceptions.ClientError)
and exception.response['Error'].get('Code') == "OperationAborted")
class S3Storage(storage.StorageDriver):
WRITE_FULL = True
_consistency_wait = tenacity.wait_exponential(multiplier=0.1)
def __init__(self, conf):
super(S3Storage, self).__init__(conf)
self.s3, self._region_name, self._bucket_prefix = (
s3.get_connection(conf)
)
self._bucket_name = '%s-aggregates' % self._bucket_prefix
if conf.s3_check_consistency_timeout > 0:
self._consistency_stop = tenacity.stop_after_delay(
conf.s3_check_consistency_timeout)
else:
self._consistency_stop = None
def __str__(self):
return "%s: %s" % (self.__class__.__name__, self._bucket_name)
def upgrade(self):
super(S3Storage, self).upgrade()
try:
s3.create_bucket(self.s3, self._bucket_name, self._region_name)
except botocore.exceptions.ClientError as e:
if e.response['Error'].get('Code') != "BucketAlreadyExists":
raise
@staticmethod
def _object_name(split_key, aggregation, version=3):
name = '%s_%s_%s' % (
aggregation,
utils.timespan_total_seconds(split_key.sampling),
split_key,
)
return name + '_v%s' % version if version else name
@staticmethod
def _prefix(metric):
return str(metric.id) + '/'
def _put_object_safe(self, Bucket, Key, Body):
put = self.s3.put_object(Bucket=Bucket, Key=Key, Body=Body)
if self._consistency_stop:
def _head():
return self.s3.head_object(Bucket=Bucket,
Key=Key, IfMatch=put['ETag'])
tenacity.Retrying(
retry=tenacity.retry_if_result(
lambda r: r['ETag'] != put['ETag']),
wait=self._consistency_wait,
stop=self._consistency_stop)(_head)
def _store_metric_splits_unbatched(self, metric, key, aggregation, data,
offset, version):
self._put_object_safe(
Bucket=self._bucket_name,
Key=self._prefix(metric) + self._object_name(
key, aggregation.method, version),
Body=data)
def _delete_metric_splits_unbatched(self, metric, key, aggregation,
version=3):
self.s3.delete_object(
Bucket=self._bucket_name,
Key=self._prefix(metric) + self._object_name(
key, aggregation.method, version))
def _delete_metric(self, metric):
bucket = self._bucket_name
response = {}
while response.get('IsTruncated', True):
if 'NextContinuationToken' in response:
kwargs = {
'ContinuationToken': response['NextContinuationToken']
}
else:
kwargs = {}
try:
response = self.s3.list_objects_v2(
Bucket=bucket, Prefix=self._prefix(metric), **kwargs)
except botocore.exceptions.ClientError as e:
if e.response['Error'].get('Code') == "NoSuchKey":
# Maybe it never has been created (no measure)
return
raise
s3.bulk_delete(self.s3, bucket,
[c['Key'] for c in response.get('Contents', ())])
def _get_splits_unbatched(self, metric, key, aggregation, version=3):
try:
response = self.s3.get_object(
Bucket=self._bucket_name,
Key=self._prefix(metric) + self._object_name(
key, aggregation.method, version))
except botocore.exceptions.ClientError as e:
if e.response['Error'].get('Code') == 'NoSuchKey':
return
raise
return response['Body'].read()
def _metric_exists_p(self, metric, version):
unaggkey = self._build_unaggregated_timeserie_path(metric, version)
try:
self.s3.head_object(Bucket=self._bucket_name, Key=unaggkey)
except botocore.exceptions.ClientError as e:
if e.response['Error'].get('Code') == "404":
return False
raise
return True
def _list_split_keys_unbatched(self, metric, aggregations, version=3):
bucket = self._bucket_name
keys = {}
for aggregation in aggregations:
keys[aggregation] = set()
response = {}
while response.get('IsTruncated', True):
if 'NextContinuationToken' in response:
kwargs = {
'ContinuationToken': response['NextContinuationToken']
}
else:
kwargs = {}
response = self.s3.list_objects_v2(
Bucket=bucket,
Prefix=self._prefix(metric) + '%s_%s' % (
aggregation.method,
utils.timespan_total_seconds(
aggregation.granularity),
),
**kwargs)
# If response is empty then check that the metric exists
contents = response.get('Contents', ())
if not contents and not self._metric_exists_p(metric, version):
raise storage.MetricDoesNotExist(metric)
for f in contents:
try:
if (self._version_check(f['Key'], version)):
meta = f['Key'].split('_')
keys[aggregation].add(carbonara.SplitKey(
utils.to_timestamp(meta[2]),
sampling=aggregation.granularity))
except (ValueError, IndexError):
# Might be "none", or any other file. Be resilient.
continue
return keys
@staticmethod
def _build_unaggregated_timeserie_path(metric, version):
return S3Storage._prefix(metric) + 'none' + ("_v%s" % version
if version else "")
def _get_or_create_unaggregated_timeseries_unbatched(
self, metric, version=3):
key = self._build_unaggregated_timeserie_path(metric, version)
try:
response = self.s3.get_object(
Bucket=self._bucket_name, Key=key)
except botocore.exceptions.ClientError as e:
if e.response['Error'].get('Code') == "NoSuchKey":
# Create the metric with empty data
self._put_object_safe(
Bucket=self._bucket_name, Key=key, Body="")
else:
raise
else:
return response['Body'].read() or None
def _store_unaggregated_timeseries_unbatched(
self, metric, data, version=3):
self._put_object_safe(
Bucket=self._bucket_name,
Key=self._build_unaggregated_timeserie_path(metric, version),
Body=data)
| gnocchixyz/gnocchi | gnocchi/storage/s3.py | Python | apache-2.0 | 9,452 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'TabGeneral.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_TabGeneral(object):
def setupUi(self, TabGeneral):
TabGeneral.setObjectName("TabGeneral")
TabGeneral.resize(963, 704)
self.gridLayout = QtWidgets.QGridLayout(TabGeneral)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.scrollArea = QtWidgets.QScrollArea(TabGeneral)
self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, -289, 947, 1075))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.gridLayout_6 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)
self.gridLayout_6.setContentsMargins(6, 6, 6, 6)
self.gridLayout_6.setHorizontalSpacing(0)
self.gridLayout_6.setVerticalSpacing(10)
self.gridLayout_6.setObjectName("gridLayout_6")
self.frame = CollapsablePanel(self.scrollAreaWidgetContents)
self.frame.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame.setObjectName("frame")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.frame)
self.verticalLayout_4.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_4.setSpacing(0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_13 = CollapsablePanelHeader(self.frame)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_13.setFont(font)
self.label_13.setObjectName("label_13")
self.verticalLayout_4.addWidget(self.label_13)
self.line_2 = QtWidgets.QFrame(self.frame)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.verticalLayout_4.addWidget(self.line_2)
self.frame_3 = CollapsablePanelContent(self.frame)
self.frame_3.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_3.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_3.setObjectName("frame_3")
self.gridLayout_2 = QtWidgets.QGridLayout(self.frame_3)
self.gridLayout_2.setContentsMargins(6, 6, 0, 0)
self.gridLayout_2.setHorizontalSpacing(6)
self.gridLayout_2.setVerticalSpacing(3)
self.gridLayout_2.setObjectName("gridLayout_2")
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setContentsMargins(3, -1, -1, -1)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.label_10 = QtWidgets.QLabel(self.frame_3)
self.label_10.setObjectName("label_10")
self.horizontalLayout_7.addWidget(self.label_10)
self.maximumDimension = QtWidgets.QLabel(self.frame_3)
self.maximumDimension.setObjectName("maximumDimension")
self.horizontalLayout_7.addWidget(self.maximumDimension)
self.gridLayout_2.addLayout(self.horizontalLayout_7, 0, 2, 1, 1)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setContentsMargins(3, -1, -1, -1)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.label_9 = QtWidgets.QLabel(self.frame_3)
self.label_9.setObjectName("label_9")
self.horizontalLayout_6.addWidget(self.label_9)
self.maximumIndex = QtWidgets.QLabel(self.frame_3)
self.maximumIndex.setObjectName("maximumIndex")
self.horizontalLayout_6.addWidget(self.maximumIndex)
self.gridLayout_2.addLayout(self.horizontalLayout_6, 1, 2, 1, 1)
self.label_14 = QtWidgets.QLabel(self.frame_3)
self.label_14.setObjectName("label_14")
self.gridLayout_2.addWidget(self.label_14, 0, 0, 1, 1)
self.general_dimension = QtWidgets.QSpinBox(self.frame_3)
self.general_dimension.setObjectName("general_dimension")
self.gridLayout_2.addWidget(self.general_dimension, 0, 1, 1, 1)
self.label_16 = QtWidgets.QLabel(self.frame_3)
self.label_16.setObjectName("label_16")
self.gridLayout_2.addWidget(self.label_16, 1, 0, 1, 1)
self.general_slice_index = QtWidgets.QSpinBox(self.frame_3)
self.general_slice_index.setObjectName("general_slice_index")
self.gridLayout_2.addWidget(self.general_slice_index, 1, 1, 1, 1)
self.label_15 = QtWidgets.QLabel(self.frame_3)
self.label_15.setObjectName("label_15")
self.gridLayout_2.addWidget(self.label_15, 2, 0, 1, 1)
self.general_volume_index = QtWidgets.QSpinBox(self.frame_3)
self.general_volume_index.setObjectName("general_volume_index")
self.gridLayout_2.addWidget(self.general_volume_index, 2, 1, 1, 1)
self.horizontalLayout_10 = QtWidgets.QHBoxLayout()
self.horizontalLayout_10.setContentsMargins(3, -1, -1, -1)
self.horizontalLayout_10.setObjectName("horizontalLayout_10")
self.label_25 = QtWidgets.QLabel(self.frame_3)
self.label_25.setObjectName("label_25")
self.horizontalLayout_10.addWidget(self.label_25)
self.maximumVolume = QtWidgets.QLabel(self.frame_3)
self.maximumVolume.setObjectName("maximumVolume")
self.horizontalLayout_10.addWidget(self.maximumVolume)
self.gridLayout_2.addLayout(self.horizontalLayout_10, 2, 2, 1, 1)
self.gridLayout_2.setColumnStretch(1, 1)
self.verticalLayout_4.addWidget(self.frame_3)
self.gridLayout_6.addWidget(self.frame, 0, 0, 1, 1)
self.general_Miscellaneous = CollapsablePanel(self.scrollAreaWidgetContents)
self.general_Miscellaneous.setFrameShape(QtWidgets.QFrame.NoFrame)
self.general_Miscellaneous.setFrameShadow(QtWidgets.QFrame.Plain)
self.general_Miscellaneous.setObjectName("general_Miscellaneous")
self.verticalLayout_8 = QtWidgets.QVBoxLayout(self.general_Miscellaneous)
self.verticalLayout_8.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_8.setSpacing(0)
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.label_23 = CollapsablePanelHeader(self.general_Miscellaneous)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_23.setFont(font)
self.label_23.setObjectName("label_23")
self.verticalLayout_8.addWidget(self.label_23)
self.line_6 = QtWidgets.QFrame(self.general_Miscellaneous)
self.line_6.setFrameShape(QtWidgets.QFrame.HLine)
self.line_6.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_6.setObjectName("line_6")
self.verticalLayout_8.addWidget(self.line_6)
self.frame_10 = CollapsablePanelContent(self.general_Miscellaneous)
self.frame_10.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_10.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_10.setObjectName("frame_10")
self.formLayout = QtWidgets.QFormLayout(self.frame_10)
self.formLayout.setContentsMargins(6, 6, 0, 0)
self.formLayout.setHorizontalSpacing(6)
self.formLayout.setVerticalSpacing(3)
self.formLayout.setObjectName("formLayout")
self.label = QtWidgets.QLabel(self.frame_10)
self.label.setObjectName("label")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label)
self.general_colormap = QtWidgets.QComboBox(self.frame_10)
self.general_colormap.setObjectName("general_colormap")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.general_colormap)
self.label_2 = QtWidgets.QLabel(self.frame_10)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.general_rotate = QtWidgets.QComboBox(self.frame_10)
self.general_rotate.setObjectName("general_rotate")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.general_rotate)
self.label_4 = QtWidgets.QLabel(self.frame_10)
self.label_4.setObjectName("label_4")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_4)
self.general_show_axis = QtWidgets.QCheckBox(self.frame_10)
self.general_show_axis.setText("")
self.general_show_axis.setObjectName("general_show_axis")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.general_show_axis)
self.label_11 = QtWidgets.QLabel(self.frame_10)
self.label_11.setObjectName("label_11")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_11)
self.general_flipud = QtWidgets.QCheckBox(self.frame_10)
self.general_flipud.setText("")
self.general_flipud.setObjectName("general_flipud")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.general_flipud)
self.label_27 = QtWidgets.QLabel(self.frame_10)
self.label_27.setObjectName("label_27")
self.formLayout.setWidget(7, QtWidgets.QFormLayout.LabelRole, self.label_27)
self.general_interpolation = QtWidgets.QComboBox(self.frame_10)
self.general_interpolation.setObjectName("general_interpolation")
self.formLayout.setWidget(7, QtWidgets.QFormLayout.FieldRole, self.general_interpolation)
self.label_28 = QtWidgets.QLabel(self.frame_10)
self.label_28.setObjectName("label_28")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_28)
self.plot_title = QtWidgets.QLineEdit(self.frame_10)
self.plot_title.setObjectName("plot_title")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.plot_title)
self.label_29 = QtWidgets.QLabel(self.frame_10)
self.label_29.setObjectName("label_29")
self.formLayout.setWidget(8, QtWidgets.QFormLayout.LabelRole, self.label_29)
self.mask_name = QtWidgets.QComboBox(self.frame_10)
self.mask_name.setObjectName("mask_name")
self.formLayout.setWidget(8, QtWidgets.QFormLayout.FieldRole, self.mask_name)
self.label_33 = QtWidgets.QLabel(self.frame_10)
self.label_33.setObjectName("label_33")
self.formLayout.setWidget(6, QtWidgets.QFormLayout.LabelRole, self.label_33)
self.general_show_plot_titles = QtWidgets.QCheckBox(self.frame_10)
self.general_show_plot_titles.setText("")
self.general_show_plot_titles.setObjectName("general_show_plot_titles")
self.formLayout.setWidget(6, QtWidgets.QFormLayout.FieldRole, self.general_show_plot_titles)
self.verticalLayout_8.addWidget(self.frame_10)
self.gridLayout_6.addWidget(self.general_Miscellaneous, 8, 0, 1, 1)
self.general_DisplayOrder = CollapsablePanel(self.scrollAreaWidgetContents)
self.general_DisplayOrder.setFrameShape(QtWidgets.QFrame.NoFrame)
self.general_DisplayOrder.setFrameShadow(QtWidgets.QFrame.Plain)
self.general_DisplayOrder.setObjectName("general_DisplayOrder")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.general_DisplayOrder)
self.verticalLayout_7.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_7.setSpacing(0)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.label_22 = CollapsablePanelHeader(self.general_DisplayOrder)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_22.setFont(font)
self.label_22.setObjectName("label_22")
self.verticalLayout_7.addWidget(self.label_22)
self.line_5 = QtWidgets.QFrame(self.general_DisplayOrder)
self.line_5.setFrameShape(QtWidgets.QFrame.HLine)
self.line_5.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_5.setObjectName("line_5")
self.verticalLayout_7.addWidget(self.line_5)
self.frame_9 = CollapsablePanelContent(self.general_DisplayOrder)
self.frame_9.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_9.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_9.setObjectName("frame_9")
self.gridLayout_11 = QtWidgets.QGridLayout(self.frame_9)
self.gridLayout_11.setContentsMargins(6, 6, 0, 0)
self.gridLayout_11.setHorizontalSpacing(6)
self.gridLayout_11.setVerticalSpacing(3)
self.gridLayout_11.setObjectName("gridLayout_11")
self.general_display_order = MapsReorderer(self.frame_9)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.general_display_order.sizePolicy().hasHeightForWidth())
self.general_display_order.setSizePolicy(sizePolicy)
self.general_display_order.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
self.general_display_order.setObjectName("general_display_order")
self.gridLayout_11.addWidget(self.general_display_order, 0, 0, 1, 1)
self.verticalLayout_7.addWidget(self.frame_9)
self.gridLayout_6.addWidget(self.general_DisplayOrder, 4, 0, 1, 1)
self.frame_6 = CollapsablePanel(self.scrollAreaWidgetContents)
self.frame_6.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_6.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_6.setObjectName("frame_6")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.frame_6)
self.verticalLayout_6.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_6.setSpacing(0)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.label_21 = CollapsablePanelHeader(self.frame_6)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_21.setFont(font)
self.label_21.setObjectName("label_21")
self.verticalLayout_6.addWidget(self.label_21)
self.line_4 = QtWidgets.QFrame(self.frame_6)
self.line_4.setFrameShape(QtWidgets.QFrame.HLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.verticalLayout_6.addWidget(self.line_4)
self.frame_7 = CollapsablePanelContent(self.frame_6)
self.frame_7.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_7.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_7.setObjectName("frame_7")
self.gridLayout_5 = QtWidgets.QGridLayout(self.frame_7)
self.gridLayout_5.setContentsMargins(6, 6, 0, 0)
self.gridLayout_5.setHorizontalSpacing(6)
self.gridLayout_5.setVerticalSpacing(3)
self.gridLayout_5.setObjectName("gridLayout_5")
self.general_map_selection = QtWidgets.QListWidget(self.frame_7)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.general_map_selection.sizePolicy().hasHeightForWidth())
self.general_map_selection.setSizePolicy(sizePolicy)
self.general_map_selection.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
self.general_map_selection.setObjectName("general_map_selection")
self.gridLayout_5.addWidget(self.general_map_selection, 0, 0, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setContentsMargins(-1, -1, -1, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.general_deselect_all_maps = QtWidgets.QPushButton(self.frame_7)
self.general_deselect_all_maps.setObjectName("general_deselect_all_maps")
self.horizontalLayout.addWidget(self.general_deselect_all_maps)
self.general_invert_map_selection = QtWidgets.QPushButton(self.frame_7)
self.general_invert_map_selection.setObjectName("general_invert_map_selection")
self.horizontalLayout.addWidget(self.general_invert_map_selection)
self.gridLayout_5.addLayout(self.horizontalLayout, 1, 0, 1, 1)
self.verticalLayout_6.addWidget(self.frame_7)
self.gridLayout_6.addWidget(self.frame_6, 2, 0, 1, 1)
self.general_Info = CollapsablePanel(self.scrollAreaWidgetContents)
self.general_Info.setFrameShape(QtWidgets.QFrame.NoFrame)
self.general_Info.setFrameShadow(QtWidgets.QFrame.Plain)
self.general_Info.setObjectName("general_Info")
self.verticalLayout_9 = QtWidgets.QVBoxLayout(self.general_Info)
self.verticalLayout_9.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_9.setSpacing(0)
self.verticalLayout_9.setObjectName("verticalLayout_9")
self.label_24 = CollapsablePanelHeader(self.general_Info)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_24.setFont(font)
self.label_24.setObjectName("label_24")
self.verticalLayout_9.addWidget(self.label_24)
self.line_7 = QtWidgets.QFrame(self.general_Info)
self.line_7.setFrameShape(QtWidgets.QFrame.HLine)
self.line_7.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_7.setObjectName("line_7")
self.verticalLayout_9.addWidget(self.line_7)
self.frame_8 = CollapsablePanelContent(self.general_Info)
self.frame_8.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_8.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_8.setObjectName("frame_8")
self.gridLayout_12 = QtWidgets.QGridLayout(self.frame_8)
self.gridLayout_12.setContentsMargins(6, 6, 0, 0)
self.gridLayout_12.setHorizontalSpacing(6)
self.gridLayout_12.setVerticalSpacing(3)
self.gridLayout_12.setObjectName("gridLayout_12")
self.label_7 = QtWidgets.QLabel(self.frame_8)
self.label_7.setObjectName("label_7")
self.gridLayout_12.addWidget(self.label_7, 1, 0, 1, 1)
self.general_info_nmr_maps = QtWidgets.QLabel(self.frame_8)
self.general_info_nmr_maps.setObjectName("general_info_nmr_maps")
self.gridLayout_12.addWidget(self.general_info_nmr_maps, 1, 1, 1, 1)
self.verticalLayout_9.addWidget(self.frame_8)
self.gridLayout_6.addWidget(self.general_Info, 10, 0, 1, 1)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_6.addItem(spacerItem, 11, 0, 1, 1)
self.general_Font = CollapsablePanel(self.scrollAreaWidgetContents)
self.general_Font.setFrameShape(QtWidgets.QFrame.NoFrame)
self.general_Font.setFrameShadow(QtWidgets.QFrame.Plain)
self.general_Font.setObjectName("general_Font")
self.verticalLayout_10 = QtWidgets.QVBoxLayout(self.general_Font)
self.verticalLayout_10.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_10.setSpacing(0)
self.verticalLayout_10.setObjectName("verticalLayout_10")
self.label_26 = CollapsablePanelHeader(self.general_Font)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_26.setFont(font)
self.label_26.setObjectName("label_26")
self.verticalLayout_10.addWidget(self.label_26)
self.line_8 = QtWidgets.QFrame(self.general_Font)
self.line_8.setFrameShape(QtWidgets.QFrame.HLine)
self.line_8.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_8.setObjectName("line_8")
self.verticalLayout_10.addWidget(self.line_8)
self.frame_11 = CollapsablePanelContent(self.general_Font)
self.frame_11.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_11.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_11.setObjectName("frame_11")
self.formLayout_2 = QtWidgets.QFormLayout(self.frame_11)
self.formLayout_2.setContentsMargins(6, 6, 0, 0)
self.formLayout_2.setHorizontalSpacing(6)
self.formLayout_2.setVerticalSpacing(3)
self.formLayout_2.setObjectName("formLayout_2")
self.label_6 = QtWidgets.QLabel(self.frame_11)
self.label_6.setObjectName("label_6")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_6)
self.general_font_family = QtWidgets.QComboBox(self.frame_11)
self.general_font_family.setObjectName("general_font_family")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.general_font_family)
self.label_8 = QtWidgets.QLabel(self.frame_11)
self.label_8.setObjectName("label_8")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_8)
self.general_font_size = QtWidgets.QSpinBox(self.frame_11)
self.general_font_size.setMinimum(1)
self.general_font_size.setObjectName("general_font_size")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.general_font_size)
self.verticalLayout_10.addWidget(self.frame_11)
self.gridLayout_6.addWidget(self.general_Font, 6, 0, 1, 1)
self.general_Zoom = CollapsablePanel(self.scrollAreaWidgetContents)
self.general_Zoom.setFrameShape(QtWidgets.QFrame.NoFrame)
self.general_Zoom.setFrameShadow(QtWidgets.QFrame.Plain)
self.general_Zoom.setObjectName("general_Zoom")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.general_Zoom)
self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_5.setSpacing(0)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.label_17 = CollapsablePanelHeader(self.general_Zoom)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_17.setFont(font)
self.label_17.setObjectName("label_17")
self.verticalLayout_5.addWidget(self.label_17)
self.line_3 = QtWidgets.QFrame(self.general_Zoom)
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.verticalLayout_5.addWidget(self.line_3)
self.frame_5 = CollapsablePanelContent(self.general_Zoom)
self.frame_5.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_5.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_5.setObjectName("frame_5")
self.gridLayout_10 = QtWidgets.QGridLayout(self.frame_5)
self.gridLayout_10.setContentsMargins(6, 6, 0, 0)
self.gridLayout_10.setHorizontalSpacing(6)
self.gridLayout_10.setVerticalSpacing(3)
self.gridLayout_10.setObjectName("gridLayout_10")
self.general_zoom_x_1 = QtWidgets.QSpinBox(self.frame_5)
self.general_zoom_x_1.setObjectName("general_zoom_x_1")
self.gridLayout_10.addWidget(self.general_zoom_x_1, 1, 3, 1, 1)
self.label_19 = QtWidgets.QLabel(self.frame_5)
self.label_19.setObjectName("label_19")
self.gridLayout_10.addWidget(self.label_19, 7, 0, 1, 1)
self.label_20 = QtWidgets.QLabel(self.frame_5)
self.label_20.setObjectName("label_20")
self.gridLayout_10.addWidget(self.label_20, 1, 0, 1, 1)
self.general_zoom_x_0 = QtWidgets.QSpinBox(self.frame_5)
self.general_zoom_x_0.setObjectName("general_zoom_x_0")
self.gridLayout_10.addWidget(self.general_zoom_x_0, 1, 1, 1, 1)
self.general_zoom_y_1 = QtWidgets.QSpinBox(self.frame_5)
self.general_zoom_y_1.setObjectName("general_zoom_y_1")
self.gridLayout_10.addWidget(self.general_zoom_y_1, 7, 3, 1, 1)
self.label_18 = QtWidgets.QLabel(self.frame_5)
self.label_18.setObjectName("label_18")
self.gridLayout_10.addWidget(self.label_18, 7, 2, 1, 1)
self.general_zoom_y_0 = QtWidgets.QSpinBox(self.frame_5)
self.general_zoom_y_0.setObjectName("general_zoom_y_0")
self.gridLayout_10.addWidget(self.general_zoom_y_0, 7, 1, 1, 1)
self.label_12 = QtWidgets.QLabel(self.frame_5)
self.label_12.setObjectName("label_12")
self.gridLayout_10.addWidget(self.label_12, 1, 2, 1, 1)
self.general_zoom_fit = QtWidgets.QPushButton(self.frame_5)
self.general_zoom_fit.setObjectName("general_zoom_fit")
self.gridLayout_10.addWidget(self.general_zoom_fit, 8, 1, 1, 1)
self.general_zoom_reset = QtWidgets.QPushButton(self.frame_5)
self.general_zoom_reset.setObjectName("general_zoom_reset")
self.gridLayout_10.addWidget(self.general_zoom_reset, 8, 3, 1, 1)
self.gridLayout_10.setColumnStretch(1, 1)
self.gridLayout_10.setColumnStretch(3, 1)
self.verticalLayout_5.addWidget(self.frame_5)
self.gridLayout_6.addWidget(self.general_Zoom, 5, 0, 1, 1)
self.general_Colorbar = CollapsablePanel(self.scrollAreaWidgetContents)
self.general_Colorbar.setFrameShape(QtWidgets.QFrame.NoFrame)
self.general_Colorbar.setFrameShadow(QtWidgets.QFrame.Plain)
self.general_Colorbar.setObjectName("general_Colorbar")
self.verticalLayout_11 = QtWidgets.QVBoxLayout(self.general_Colorbar)
self.verticalLayout_11.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_11.setSpacing(0)
self.verticalLayout_11.setObjectName("verticalLayout_11")
self.label_30 = CollapsablePanelHeader(self.general_Colorbar)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_30.setFont(font)
self.label_30.setObjectName("label_30")
self.verticalLayout_11.addWidget(self.label_30)
self.line_9 = QtWidgets.QFrame(self.general_Colorbar)
self.line_9.setFrameShape(QtWidgets.QFrame.HLine)
self.line_9.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_9.setObjectName("line_9")
self.verticalLayout_11.addWidget(self.line_9)
self.frame_12 = CollapsablePanelContent(self.general_Colorbar)
self.frame_12.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_12.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_12.setObjectName("frame_12")
self.formLayout_3 = QtWidgets.QFormLayout(self.frame_12)
self.formLayout_3.setContentsMargins(6, 6, 0, 0)
self.formLayout_3.setHorizontalSpacing(6)
self.formLayout_3.setVerticalSpacing(3)
self.formLayout_3.setObjectName("formLayout_3")
self.label_5 = QtWidgets.QLabel(self.frame_12)
self.label_5.setObjectName("label_5")
self.formLayout_3.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_5)
self.general_colorbar_nmr_ticks = QtWidgets.QSpinBox(self.frame_12)
self.general_colorbar_nmr_ticks.setMinimum(2)
self.general_colorbar_nmr_ticks.setProperty("value", 10)
self.general_colorbar_nmr_ticks.setObjectName("general_colorbar_nmr_ticks")
self.formLayout_3.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.general_colorbar_nmr_ticks)
self.label_31 = QtWidgets.QLabel(self.frame_12)
self.label_31.setObjectName("label_31")
self.formLayout_3.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_31)
self.general_show_colorbar = QtWidgets.QCheckBox(self.frame_12)
self.general_show_colorbar.setText("")
self.general_show_colorbar.setObjectName("general_show_colorbar")
self.formLayout_3.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.general_show_colorbar)
self.label_32 = QtWidgets.QLabel(self.frame_12)
self.label_32.setObjectName("label_32")
self.formLayout_3.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_32)
self.general_colorbar_location = QtWidgets.QComboBox(self.frame_12)
self.general_colorbar_location.setObjectName("general_colorbar_location")
self.general_colorbar_location.addItem("")
self.general_colorbar_location.addItem("")
self.general_colorbar_location.addItem("")
self.general_colorbar_location.addItem("")
self.formLayout_3.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.general_colorbar_location)
self.verticalLayout_11.addWidget(self.frame_12)
self.gridLayout_6.addWidget(self.general_Colorbar, 7, 0, 1, 1)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.gridLayout.addWidget(self.scrollArea, 0, 0, 1, 1)
self.retranslateUi(TabGeneral)
QtCore.QMetaObject.connectSlotsByName(TabGeneral)
TabGeneral.setTabOrder(self.scrollArea, self.general_dimension)
TabGeneral.setTabOrder(self.general_dimension, self.general_slice_index)
TabGeneral.setTabOrder(self.general_slice_index, self.general_volume_index)
TabGeneral.setTabOrder(self.general_volume_index, self.general_zoom_x_0)
TabGeneral.setTabOrder(self.general_zoom_x_0, self.general_zoom_y_0)
TabGeneral.setTabOrder(self.general_zoom_y_0, self.general_zoom_x_1)
TabGeneral.setTabOrder(self.general_zoom_x_1, self.general_zoom_y_1)
TabGeneral.setTabOrder(self.general_zoom_y_1, self.general_map_selection)
TabGeneral.setTabOrder(self.general_map_selection, self.general_display_order)
TabGeneral.setTabOrder(self.general_display_order, self.general_colormap)
TabGeneral.setTabOrder(self.general_colormap, self.general_rotate)
TabGeneral.setTabOrder(self.general_rotate, self.general_show_axis)
def retranslateUi(self, TabGeneral):
_translate = QtCore.QCoreApplication.translate
TabGeneral.setWindowTitle(_translate("TabGeneral", "Form"))
self.label_13.setText(_translate("TabGeneral", "Index"))
self.label_10.setText(_translate("TabGeneral", "/ "))
self.maximumDimension.setText(_translate("TabGeneral", "x"))
self.label_9.setText(_translate("TabGeneral", "/ "))
self.maximumIndex.setText(_translate("TabGeneral", "x"))
self.label_14.setText(_translate("TabGeneral", "Dimension:"))
self.label_16.setText(_translate("TabGeneral", "Slice index:"))
self.label_15.setText(_translate("TabGeneral", "Volume:"))
self.label_25.setText(_translate("TabGeneral", "/ "))
self.maximumVolume.setText(_translate("TabGeneral", "x"))
self.label_23.setText(_translate("TabGeneral", "Miscellaneous"))
self.label.setText(_translate("TabGeneral", "Colormap:"))
self.label_2.setText(_translate("TabGeneral", "Rotate:"))
self.label_4.setText(_translate("TabGeneral", "Show axis:"))
self.label_11.setText(_translate("TabGeneral", "Flip up/down:"))
self.label_27.setText(_translate("TabGeneral", "Interpolation:"))
self.label_28.setText(_translate("TabGeneral", "Plot title:"))
self.label_29.setText(_translate("TabGeneral", "Mask:"))
self.label_33.setText(_translate("TabGeneral", "Show plot titles:"))
self.label_22.setText(_translate("TabGeneral", "Display order"))
self.label_21.setText(_translate("TabGeneral", "Map selection"))
self.general_deselect_all_maps.setText(_translate("TabGeneral", "Deselect all"))
self.general_invert_map_selection.setText(_translate("TabGeneral", "Invert selection"))
self.label_24.setText(_translate("TabGeneral", "Info"))
self.label_7.setText(_translate("TabGeneral", "Map count:"))
self.general_info_nmr_maps.setText(_translate("TabGeneral", "-"))
self.label_26.setText(_translate("TabGeneral", "Font"))
self.label_6.setText(_translate("TabGeneral", "Family:"))
self.label_8.setText(_translate("TabGeneral", "Size:"))
self.label_17.setText(_translate("TabGeneral", "Zoom"))
self.label_19.setText(_translate("TabGeneral", "y<sub>0</sub>:"))
self.label_20.setText(_translate("TabGeneral", "<html><head/><body><p>x<span style=\" vertical-align:sub;\">0</span>:</p></body></html>"))
self.label_18.setText(_translate("TabGeneral", "y<sub>1</sub>:"))
self.label_12.setText(_translate("TabGeneral", "x<sub>1</sub>:"))
self.general_zoom_fit.setText(_translate("TabGeneral", "Zoom fit"))
self.general_zoom_reset.setText(_translate("TabGeneral", "Reset"))
self.label_30.setText(_translate("TabGeneral", "Colorbar"))
self.label_5.setText(_translate("TabGeneral", "Number of ticks:"))
self.label_31.setText(_translate("TabGeneral", "Show colorbar:"))
self.label_32.setText(_translate("TabGeneral", "Location:"))
self.general_colorbar_location.setItemText(0, _translate("TabGeneral", "Right"))
self.general_colorbar_location.setItemText(1, _translate("TabGeneral", "Bottom"))
self.general_colorbar_location.setItemText(2, _translate("TabGeneral", "Left"))
self.general_colorbar_location.setItemText(3, _translate("TabGeneral", "Top"))
from ..widgets import CollapsablePanel, CollapsablePanelContent, CollapsablePanelHeader, MapsReorderer
| cbclab/MDT | mdt/gui/maps_visualizer/design/ui_TabGeneral.py | Python | lgpl-3.0 | 33,361 |
from django.apps import AppConfig
class MovieBookingConfig(AppConfig):
name = 'movie_booking'
| Mtudy/movie-booking-demo | app/movie_booking/apps.py | Python | mit | 100 |
'''
Produce minimum, maximum and the difference of number list
Status: Accepted
'''
###############################################################################
def read_line_of_integers():
"""Read one line of numbers or detect EOF"""
try:
text = input()
return [int(i) for i in text.split()][1:]
except EOFError:
pass
return None
###############################################################################
def main():
"""Read input and print output statistics about list of numbers"""
test_case = 0
while True:
numbers = read_line_of_integers()
if numbers:
test_case += 1
mini = min(numbers)
maxi = max(numbers)
print('Case {0}: {1} {2} {3}'.format(test_case, mini, maxi, maxi - mini))
else:
break
###############################################################################
if __name__ == '__main__':
main()
| ivanlyon/exercises | kattis/k_statistics.py | Python | mit | 970 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2011, Nicolas Clairon
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California, Berkeley nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from mongokit.helpers import DotCollapsedDict
from mongokit.mongo_exceptions import UpdateQueryError
from mongokit.mongo_exceptions import OperationFailure
class DocumentMigration(object):
def __init__(self, doc_class):
self.doc_class = doc_class
self.target = None
self.update = None
self.doc = None
self.collection = None
self.status = None
def clean(self):
self.target = None
self.update = None
self.doc = None
self.collection = None
self.status = False
def validate_update(self, update_query):
structure = DotCollapsedDict(self.doc_class.structure)
for op, fields in update_query.iteritems():
for field in fields:
if op != '$unset' and op != '$rename':
if field not in structure:
raise UpdateQueryError("'%s' not found in %s's structure" % (
field, self.doc_class.__name__))
def migrate(self, doc, safe=True):
"""migrate the doc through all migration process"""
method_names = sorted([i for i in dir(self) if i.startswith('migration')])
for method_name in method_names:
self.clean()
self.doc = doc
getattr(self, method_name)()
if self.target and self.update:
if '_id' in doc:
self.target['_id'] = doc['_id']
doc.collection.update(self.target, self.update, multi=False, safe=safe)
# reload
try:
doc.update(doc.collection.get_from_id(doc['_id']))
except:
raise OperationFailure('Can not reload an unsaved document. '
'%s is not found in the database' % doc['_id'])
# self.reload()
def migrate_all(self, collection, safe=True):
method_names = sorted([i for i in dir(self) if i.startswith('allmigration')])
for method_name in method_names:
self.clean()
self.collection = collection
getattr(self, method_name)()
if self.target and self.update:
self.validate_update(self.update)
collection.update(self.target, self.update, multi=True, safe=safe)
status = collection.database.last_status()
if not status.get('updatedExisting', 1):
print "%s : %s >>> deprecated" % (self.__class__.__name__, method_name)
def get_deprecated(self, collection):
method_names = sorted([i for i in dir(self) if i.startswith('migration') or i.startswith('allmigration')])
deprecated = []
active = []
for method_name in method_names:
self.clean()
self.status = True
getattr(self, method_name)()
if not collection.find(self.target).count():
deprecated.append(method_name)
else:
active.append(method_name)
return {'deprecated': deprecated, 'active': active}
| wshcdr/mongokit | mongokit/migration.py | Python | bsd-3-clause | 4,737 |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
from ctypes import *
import unicodedata
import warnings
from pyglet import compat_platform
if compat_platform not in ('cygwin', 'win32'):
raise ImportError('Not a win32 platform.')
import pyglet
from pyglet.window import BaseWindow, \
WindowException, MouseCursor, DefaultMouseCursor, _PlatformEventHandler, \
_ViewEventHandler
from pyglet.event import EventDispatcher
from pyglet.window import key
from pyglet.window import mouse
from pyglet.canvas.win32 import Win32Canvas
from pyglet.libs.win32 import _user32, _kernel32, _gdi32
from pyglet.libs.win32.constants import *
from pyglet.libs.win32.winkey import *
from pyglet.libs.win32.types import *
# symbol,ctrl -> motion mapping
_motion_map = {
(key.UP, False): key.MOTION_UP,
(key.RIGHT, False): key.MOTION_RIGHT,
(key.DOWN, False): key.MOTION_DOWN,
(key.LEFT, False): key.MOTION_LEFT,
(key.RIGHT, True): key.MOTION_NEXT_WORD,
(key.LEFT, True): key.MOTION_PREVIOUS_WORD,
(key.HOME, False): key.MOTION_BEGINNING_OF_LINE,
(key.END, False): key.MOTION_END_OF_LINE,
(key.PAGEUP, False): key.MOTION_PREVIOUS_PAGE,
(key.PAGEDOWN, False): key.MOTION_NEXT_PAGE,
(key.HOME, True): key.MOTION_BEGINNING_OF_FILE,
(key.END, True): key.MOTION_END_OF_FILE,
(key.BACKSPACE, False): key.MOTION_BACKSPACE,
(key.DELETE, False): key.MOTION_DELETE,
}
class Win32MouseCursor(MouseCursor):
drawable = False
def __init__(self, cursor):
self.cursor = cursor
# This is global state, we have to be careful not to set the same state twice,
# which will throw off the ShowCursor counter.
_win32_cursor_visible = True
Win32EventHandler = _PlatformEventHandler
ViewEventHandler = _ViewEventHandler
class Win32Window(BaseWindow):
_window_class = None
_hwnd = None
_dc = None
_wgl_context = None
_tracking = False
_hidden = False
_has_focus = False
_exclusive_keyboard = False
_exclusive_keyboard_focus = True
_exclusive_mouse = False
_exclusive_mouse_focus = True
_exclusive_mouse_screen = None
_exclusive_mouse_client = None
_mouse_platform_visible = True
_ws_style = 0
_ex_ws_style = 0
_minimum_size = None
_maximum_size = None
def __init__(self, *args, **kwargs):
# Bind event handlers
self._event_handlers = {}
self._view_event_handlers = {}
for func_name in self._platform_event_names:
if not hasattr(self, func_name):
continue
func = getattr(self, func_name)
for message in func._platform_event_data:
if hasattr(func, '_view'):
self._view_event_handlers[message] = func
else:
self._event_handlers[message] = func
super(Win32Window, self).__init__(*args, **kwargs)
def _recreate(self, changes):
if 'context' in changes:
self._wgl_context = None
self._create()
def _create(self):
# Ensure style is set before determining width/height.
if self._fullscreen:
self._ws_style = WS_POPUP
self._ex_ws_style = 0 # WS_EX_TOPMOST
else:
styles = {
self.WINDOW_STYLE_DEFAULT: (WS_OVERLAPPEDWINDOW, 0),
self.WINDOW_STYLE_DIALOG: (WS_OVERLAPPED|WS_CAPTION|WS_SYSMENU,
WS_EX_DLGMODALFRAME),
self.WINDOW_STYLE_TOOL: (WS_OVERLAPPED|WS_CAPTION|WS_SYSMENU,
WS_EX_TOOLWINDOW),
self.WINDOW_STYLE_BORDERLESS: (WS_POPUP, 0),
}
self._ws_style, self._ex_ws_style = styles[self._style]
if self._resizable and not self._fullscreen:
self._ws_style |= WS_THICKFRAME
else:
self._ws_style &= ~(WS_THICKFRAME|WS_MAXIMIZEBOX)
if self._fullscreen:
width = self.screen.width
height = self.screen.height
else:
width, height = \
self._client_to_window_size(self._width, self._height)
if not self._window_class:
module = _kernel32.GetModuleHandleW(None)
white = _gdi32.GetStockObject(WHITE_BRUSH)
black = _gdi32.GetStockObject(BLACK_BRUSH)
self._window_class = WNDCLASS()
self._window_class.lpszClassName = u'GenericAppClass%d' % id(self)
self._window_class.lpfnWndProc = WNDPROC(self._wnd_proc)
self._window_class.style = CS_VREDRAW | CS_HREDRAW
self._window_class.hInstance = 0
self._window_class.hIcon = _user32.LoadIconW(module, MAKEINTRESOURCE(1))
self._window_class.hbrBackground = black
self._window_class.lpszMenuName = None
self._window_class.cbClsExtra = 0
self._window_class.cbWndExtra = 0
_user32.RegisterClassW(byref(self._window_class))
self._view_window_class = WNDCLASS()
self._view_window_class.lpszClassName = \
u'GenericViewClass%d' % id(self)
self._view_window_class.lpfnWndProc = WNDPROC(self._wnd_proc_view)
self._view_window_class.style = 0
self._view_window_class.hInstance = 0
self._view_window_class.hIcon = 0
self._view_window_class.hbrBackground = white
self._view_window_class.lpszMenuName = None
self._view_window_class.cbClsExtra = 0
self._view_window_class.cbWndExtra = 0
_user32.RegisterClassW(byref(self._view_window_class))
if not self._hwnd:
self._hwnd = _user32.CreateWindowExW(
self._ex_ws_style,
self._window_class.lpszClassName,
u'',
self._ws_style,
CW_USEDEFAULT,
CW_USEDEFAULT,
width,
height,
0,
0,
self._window_class.hInstance,
0)
self._view_hwnd = _user32.CreateWindowExW(
0,
self._view_window_class.lpszClassName,
u'',
WS_CHILD | WS_VISIBLE,
0, 0, 0, 0,
self._hwnd,
0,
self._view_window_class.hInstance,
0)
self._dc = _user32.GetDC(self._view_hwnd)
else:
# Window already exists, update it with new style
# We need to hide window here, otherwise Windows forgets
# to redraw the whole screen after leaving fullscreen.
_user32.ShowWindow(self._hwnd, SW_HIDE)
_user32.SetWindowLongW(self._hwnd,
GWL_STYLE,
self._ws_style)
_user32.SetWindowLongW(self._hwnd,
GWL_EXSTYLE,
self._ex_ws_style)
if self._fullscreen:
hwnd_after = HWND_TOPMOST
else:
hwnd_after = HWND_NOTOPMOST
# Position and size window
if self._fullscreen:
_user32.SetWindowPos(self._hwnd, hwnd_after,
self._screen.x, self._screen.y, width, height, SWP_FRAMECHANGED)
elif False: # TODO location not in pyglet API
x, y = self._client_to_window_pos(*factory.get_location())
_user32.SetWindowPos(self._hwnd, hwnd_after,
x, y, width, height, SWP_FRAMECHANGED)
else:
_user32.SetWindowPos(self._hwnd, hwnd_after,
0, 0, width, height, SWP_NOMOVE | SWP_FRAMECHANGED)
self._update_view_location(self._width, self._height)
# Context must be created after window is created.
if not self._wgl_context:
self.canvas = Win32Canvas(self.display, self._view_hwnd, self._dc)
self.context.attach(self.canvas)
self._wgl_context = self.context._context
self.set_caption(self._caption)
self.switch_to()
self.set_vsync(self._vsync)
if self._visible:
self.set_visible()
self.dispatch_event('on_expose')
# Might need resize event if going from fullscreen to fullscreen
self.dispatch_event('on_resize', self._width, self._height)
def _update_view_location(self, width, height):
if self._fullscreen:
x = (self.screen.width - width) // 2
y = (self.screen.height - height) // 2
else:
x = y = 0
_user32.SetWindowPos(self._view_hwnd, 0,
x, y, width, height, SWP_NOZORDER | SWP_NOOWNERZORDER)
def close(self):
super(Win32Window, self).close()
if not self._hwnd:
return
_user32.DestroyWindow(self._hwnd)
_user32.UnregisterClassW(self._window_class.lpszClassName, 0)
self.set_mouse_platform_visible(True)
self._hwnd = None
self._dc = None
self._wgl_context = None
def _get_vsync(self):
return self.context.get_vsync()
vsync = property(_get_vsync) # overrides BaseWindow property
def set_vsync(self, vsync):
if pyglet.options['vsync'] is not None:
vsync = pyglet.options['vsync']
self.context.set_vsync(vsync)
def switch_to(self):
self.context.set_current()
def flip(self):
self.draw_mouse_cursor()
self.context.flip()
def set_location(self, x, y):
x, y = self._client_to_window_pos(x, y)
_user32.SetWindowPos(self._hwnd, 0, x, y, 0, 0,
(SWP_NOZORDER |
SWP_NOSIZE |
SWP_NOOWNERZORDER))
def get_location(self):
rect = RECT()
_user32.GetClientRect(self._hwnd, byref(rect))
point = POINT()
point.x = rect.left
point.y = rect.top
_user32.ClientToScreen(self._hwnd, byref(point))
return point.x, point.y
def set_size(self, width, height):
if self._fullscreen:
raise WindowException('Cannot set size of fullscreen window.')
width, height = self._client_to_window_size(width, height)
_user32.SetWindowPos(self._hwnd, 0, 0, 0, width, height,
(SWP_NOZORDER |
SWP_NOMOVE |
SWP_NOOWNERZORDER))
def get_size(self):
#rect = RECT()
#_user32.GetClientRect(self._hwnd, byref(rect))
#return rect.right - rect.left, rect.bottom - rect.top
return self._width, self._height
def set_minimum_size(self, width, height):
self._minimum_size = width, height
def set_maximum_size(self, width, height):
self._maximum_size = width, height
def activate(self):
_user32.SetForegroundWindow(self._hwnd)
def set_visible(self, visible=True):
if visible:
insertAfter = HWND_TOPMOST if self._fullscreen else HWND_TOP
_user32.SetWindowPos(self._hwnd, insertAfter, 0, 0, 0, 0,
SWP_NOMOVE | SWP_NOSIZE | SWP_SHOWWINDOW)
self.dispatch_event('on_show')
self.activate()
self.dispatch_event('on_resize', self._width, self._height)
else:
_user32.ShowWindow(self._hwnd, SW_HIDE)
self.dispatch_event('on_hide')
self._visible = visible
self.set_mouse_platform_visible()
def minimize(self):
_user32.ShowWindow(self._hwnd, SW_MINIMIZE)
def maximize(self):
_user32.ShowWindow(self._hwnd, SW_MAXIMIZE)
def set_caption(self, caption):
self._caption = caption
_user32.SetWindowTextW(self._hwnd, c_wchar_p(caption))
def set_mouse_platform_visible(self, platform_visible=None):
if platform_visible is None:
platform_visible = (self._mouse_visible and
not self._exclusive_mouse and
not self._mouse_cursor.drawable) or \
(not self._mouse_in_window or
not self._has_focus)
if platform_visible and not self._mouse_cursor.drawable:
if isinstance(self._mouse_cursor, Win32MouseCursor):
cursor = self._mouse_cursor.cursor
else:
cursor = _user32.LoadCursorW(None, MAKEINTRESOURCE(IDC_ARROW))
_user32.SetClassLongW(self._view_hwnd, GCL_HCURSOR, cursor)
_user32.SetCursor(cursor)
if platform_visible == self._mouse_platform_visible:
return
# Avoid calling ShowCursor with the current visibility (which would
# push the counter too far away from zero).
global _win32_cursor_visible
if _win32_cursor_visible != platform_visible:
_user32.ShowCursor(platform_visible)
_win32_cursor_visible = platform_visible
self._mouse_platform_visible = platform_visible
def _reset_exclusive_mouse_screen(self):
'''Recalculate screen coords of mouse warp point for exclusive
mouse.'''
p = POINT()
rect = RECT()
_user32.GetClientRect(self._view_hwnd, byref(rect))
_user32.MapWindowPoints(self._view_hwnd, HWND_DESKTOP, byref(rect), 2)
p.x = (rect.left + rect.right) // 2
p.y = (rect.top + rect.bottom) // 2
# This is the point the mouse will be kept at while in exclusive
# mode.
self._exclusive_mouse_screen = p.x, p.y
self._exclusive_mouse_client = p.x - rect.left, p.y - rect.top
def set_exclusive_mouse(self, exclusive=True):
if self._exclusive_mouse == exclusive and \
self._exclusive_mouse_focus == self._has_focus:
return
if exclusive and self._has_focus:
# Move mouse to the center of the window.
self._reset_exclusive_mouse_screen()
x, y = self._exclusive_mouse_screen
self.set_mouse_position(x, y, absolute=True)
# Clip to client area, to prevent large mouse movements taking
# it outside the client area.
rect = RECT()
_user32.GetClientRect(self._view_hwnd, byref(rect))
_user32.MapWindowPoints(self._view_hwnd, HWND_DESKTOP,
byref(rect), 2)
_user32.ClipCursor(byref(rect))
else:
# Release clip
_user32.ClipCursor(None)
self._exclusive_mouse = exclusive
self._exclusive_mouse_focus = self._has_focus
self.set_mouse_platform_visible()
def set_mouse_position(self, x, y, absolute=False):
if not absolute:
rect = RECT()
_user32.GetClientRect(self._view_hwnd, byref(rect))
_user32.MapWindowPoints(self._view_hwnd, HWND_DESKTOP, byref(rect), 2)
x = x + rect.left
y = rect.top + (rect.bottom - rect.top) - y
_user32.SetCursorPos(x, y)
def set_exclusive_keyboard(self, exclusive=True):
if self._exclusive_keyboard == exclusive and \
self._exclusive_keyboard_focus == self._has_focus:
return
if exclusive and self._has_focus:
_user32.RegisterHotKey(self._hwnd, 0, WIN32_MOD_ALT, VK_TAB)
else:
_user32.UnregisterHotKey(self._hwnd, 0)
self._exclusive_keyboard = exclusive
self._exclusive_keyboard_focus = self._has_focus
def get_system_mouse_cursor(self, name):
if name == self.CURSOR_DEFAULT:
return DefaultMouseCursor()
names = {
self.CURSOR_CROSSHAIR: IDC_CROSS,
self.CURSOR_HAND: IDC_HAND,
self.CURSOR_HELP: IDC_HELP,
self.CURSOR_NO: IDC_NO,
self.CURSOR_SIZE: IDC_SIZEALL,
self.CURSOR_SIZE_UP: IDC_SIZENS,
self.CURSOR_SIZE_UP_RIGHT: IDC_SIZENESW,
self.CURSOR_SIZE_RIGHT: IDC_SIZEWE,
self.CURSOR_SIZE_DOWN_RIGHT: IDC_SIZENWSE,
self.CURSOR_SIZE_DOWN: IDC_SIZENS,
self.CURSOR_SIZE_DOWN_LEFT: IDC_SIZENESW,
self.CURSOR_SIZE_LEFT: IDC_SIZEWE,
self.CURSOR_SIZE_UP_LEFT: IDC_SIZENWSE,
self.CURSOR_SIZE_UP_DOWN: IDC_SIZENS,
self.CURSOR_SIZE_LEFT_RIGHT: IDC_SIZEWE,
self.CURSOR_TEXT: IDC_IBEAM,
self.CURSOR_WAIT: IDC_WAIT,
self.CURSOR_WAIT_ARROW: IDC_APPSTARTING,
}
if name not in names:
raise RuntimeError('Unknown cursor name "%s"' % name)
cursor = _user32.LoadCursorW(None, MAKEINTRESOURCE(names[name]))
return Win32MouseCursor(cursor)
def set_icon(self, *images):
# XXX Undocumented AFAICT, but XP seems happy to resize an image
# of any size, so no scaling necessary.
def best_image(width, height):
# A heuristic for finding closest sized image to required size.
image = images[0]
for img in images:
if img.width == width and img.height == height:
# Exact match always used
return img
elif img.width >= width and \
img.width * img.height > image.width * image.height:
# At least wide enough, and largest area
image = img
return image
def get_icon(image):
# Alpha-blended icon: see http://support.microsoft.com/kb/318876
format = 'BGRA'
pitch = len(format) * image.width
header = BITMAPV5HEADER()
header.bV5Size = sizeof(header)
header.bV5Width = image.width
header.bV5Height = image.height
header.bV5Planes = 1
header.bV5BitCount = 32
header.bV5Compression = BI_BITFIELDS
header.bV5RedMask = 0x00ff0000
header.bV5GreenMask = 0x0000ff00
header.bV5BlueMask = 0x000000ff
header.bV5AlphaMask = 0xff000000
hdc = _user32.GetDC(None)
dataptr = c_void_p()
bitmap = _gdi32.CreateDIBSection(hdc, byref(header), DIB_RGB_COLORS,
byref(dataptr), None, 0)
_user32.ReleaseDC(None, hdc)
data = image.get_data(format, pitch)
memmove(dataptr, data, len(data))
mask = _gdi32.CreateBitmap(image.width, image.height, 1, 1, None)
iconinfo = ICONINFO()
iconinfo.fIcon = True
iconinfo.hbmMask = mask
iconinfo.hbmColor = bitmap
icon = _user32.CreateIconIndirect(byref(iconinfo))
_gdi32.DeleteObject(mask)
_gdi32.DeleteObject(bitmap)
return icon
# Set large icon
image = best_image(_user32.GetSystemMetrics(SM_CXICON),
_user32.GetSystemMetrics(SM_CYICON))
icon = get_icon(image)
_user32.SetClassLongW(self._hwnd, GCL_HICON, icon)
# Set small icon
image = best_image(_user32.GetSystemMetrics(SM_CXSMICON),
_user32.GetSystemMetrics(SM_CYSMICON))
icon = get_icon(image)
_user32.SetClassLongW(self._hwnd, GCL_HICONSM, icon)
# Private util
def _client_to_window_size(self, width, height):
rect = RECT()
rect.left = 0
rect.top = 0
rect.right = width
rect.bottom = height
_user32.AdjustWindowRectEx(byref(rect),
self._ws_style, False, self._ex_ws_style)
return rect.right - rect.left, rect.bottom - rect.top
def _client_to_window_pos(self, x, y):
rect = RECT()
rect.left = x
rect.top = y
_user32.AdjustWindowRectEx(byref(rect),
self._ws_style, False, self._ex_ws_style)
return rect.left, rect.top
# Event dispatching
def dispatch_events(self):
from pyglet import app
app.platform_event_loop.start()
self._allow_dispatch_event = True
self.dispatch_pending_events()
msg = MSG()
while _user32.PeekMessageW(byref(msg), 0, 0, 0, PM_REMOVE):
_user32.TranslateMessage(byref(msg))
_user32.DispatchMessageW(byref(msg))
self._allow_dispatch_event = False
def dispatch_pending_events(self):
while self._event_queue:
event = self._event_queue.pop(0)
if type(event[0]) is str:
# pyglet event
EventDispatcher.dispatch_event(self, *event)
else:
# win32 event
event[0](*event[1:])
def _wnd_proc(self, hwnd, msg, wParam, lParam):
event_handler = self._event_handlers.get(msg, None)
result = 0
if event_handler:
if self._allow_dispatch_event or not self._enable_event_queue:
result = event_handler(msg, wParam, lParam)
else:
self._event_queue.append((event_handler, msg, wParam, lParam))
result = 0
if not result and msg != WM_CLOSE:
result = _user32.DefWindowProcW(hwnd, msg, wParam, lParam)
return result
def _wnd_proc_view(self, hwnd, msg, wParam, lParam):
event_handler = self._view_event_handlers.get(msg, None)
result = 0
if event_handler:
if self._allow_dispatch_event or not self._enable_event_queue:
result = event_handler(msg, wParam, lParam)
else:
self._event_queue.append((event_handler, msg, wParam, lParam))
result = 0
if not result and msg != WM_CLOSE:
result = _user32.DefWindowProcW(hwnd, msg, wParam, lParam)
return result
# Event handlers
def _get_modifiers(self, key_lParam=0):
modifiers = 0
if _user32.GetKeyState(VK_SHIFT) & 0xff00:
modifiers |= key.MOD_SHIFT
if _user32.GetKeyState(VK_CONTROL) & 0xff00:
modifiers |= key.MOD_CTRL
if _user32.GetKeyState(VK_LWIN) & 0xff00:
modifiers |= key.MOD_WINDOWS
if _user32.GetKeyState(VK_CAPITAL) & 0x00ff: # toggle
modifiers |= key.MOD_CAPSLOCK
if _user32.GetKeyState(VK_NUMLOCK) & 0x00ff: # toggle
modifiers |= key.MOD_NUMLOCK
if _user32.GetKeyState(VK_SCROLL) & 0x00ff: # toggle
modifiers |= key.MOD_SCROLLLOCK
if key_lParam:
if key_lParam & (1 << 29):
modifiers |= key.MOD_ALT
elif _user32.GetKeyState(VK_MENU) < 0:
modifiers |= key.MOD_ALT
return modifiers
@staticmethod
def _get_location(lParam):
x = c_int16(lParam & 0xffff).value
y = c_int16(lParam >> 16).value
return x, y
@Win32EventHandler(WM_KEYDOWN)
@Win32EventHandler(WM_KEYUP)
@Win32EventHandler(WM_SYSKEYDOWN)
@Win32EventHandler(WM_SYSKEYUP)
def _event_key(self, msg, wParam, lParam):
repeat = False
if lParam & (1 << 30):
if msg not in (WM_KEYUP, WM_SYSKEYUP):
repeat = True
ev = 'on_key_release'
else:
ev = 'on_key_press'
symbol = keymap.get(wParam, None)
if symbol is None:
ch = _user32.MapVirtualKeyW(wParam, MAPVK_VK_TO_CHAR)
symbol = chmap.get(ch)
if symbol is None:
symbol = key.user_key(wParam)
elif symbol == key.LCTRL and lParam & (1 << 24):
symbol = key.RCTRL
elif symbol == key.LALT and lParam & (1 << 24):
symbol = key.RALT
elif symbol == key.LSHIFT:
pass # TODO: some magic with getstate to find out if it's the
# right or left shift key.
modifiers = self._get_modifiers(lParam)
if not repeat:
self.dispatch_event(ev, symbol, modifiers)
ctrl = modifiers & key.MOD_CTRL != 0
if (symbol, ctrl) in _motion_map and msg not in (WM_KEYUP, WM_SYSKEYUP):
motion = _motion_map[symbol, ctrl]
if modifiers & key.MOD_SHIFT:
self.dispatch_event('on_text_motion_select', motion)
else:
self.dispatch_event('on_text_motion', motion)
# Send on to DefWindowProc if not exclusive.
if self._exclusive_keyboard:
return 0
else:
return None
@Win32EventHandler(WM_CHAR)
def _event_char(self, msg, wParam, lParam):
text = unichr(wParam)
if unicodedata.category(text) != 'Cc' or text == '\r':
self.dispatch_event('on_text', text)
return 0
@ViewEventHandler
@Win32EventHandler(WM_MOUSEMOVE)
def _event_mousemove(self, msg, wParam, lParam):
x, y = self._get_location(lParam)
if (x, y) == self._exclusive_mouse_client:
# Ignore the event caused by SetCursorPos
self._mouse_x = x
self._mouse_y = y
return 0
y = self._height - y
if self._exclusive_mouse and self._has_focus:
# Reset mouse position (so we don't hit the edge of the screen).
_x, _y = self._exclusive_mouse_screen
self.set_mouse_position(_x, _y, absolute=True)
dx = x - self._mouse_x
dy = y - self._mouse_y
if not self._tracking:
# There is no WM_MOUSEENTER message (!), so fake it from the
# first WM_MOUSEMOVE event after leaving. Use self._tracking
# to determine when to recreate the tracking structure after
# re-entering (to track the next WM_MOUSELEAVE).
self._mouse_in_window = True
self.set_mouse_platform_visible()
self.dispatch_event('on_mouse_enter', x, y)
self._tracking = True
track = TRACKMOUSEEVENT()
track.cbSize = sizeof(track)
track.dwFlags = TME_LEAVE
track.hwndTrack = self._view_hwnd
_user32.TrackMouseEvent(byref(track))
# Don't generate motion/drag events when mouse hasn't moved. (Issue
# 305)
if self._mouse_x == x and self._mouse_y == y:
return 0
self._mouse_x = x
self._mouse_y = y
buttons = 0
if wParam & MK_LBUTTON:
buttons |= mouse.LEFT
if wParam & MK_MBUTTON:
buttons |= mouse.MIDDLE
if wParam & MK_RBUTTON:
buttons |= mouse.RIGHT
if buttons:
# Drag event
modifiers = self._get_modifiers()
self.dispatch_event('on_mouse_drag',
x, y, dx, dy, buttons, modifiers)
else:
# Motion event
self.dispatch_event('on_mouse_motion', x, y, dx, dy)
return 0
@ViewEventHandler
@Win32EventHandler(WM_MOUSELEAVE)
def _event_mouseleave(self, msg, wParam, lParam):
point = POINT()
_user32.GetCursorPos(byref(point))
_user32.ScreenToClient(self._view_hwnd, byref(point))
x = point.x
y = self._height - point.y
self._tracking = False
self._mouse_in_window = False
self.set_mouse_platform_visible()
self.dispatch_event('on_mouse_leave', x, y)
return 0
def _event_mousebutton(self, ev, button, lParam):
if ev == 'on_mouse_press':
_user32.SetCapture(self._view_hwnd)
else:
_user32.ReleaseCapture()
x, y = self._get_location(lParam)
y = self._height - y
self.dispatch_event(ev, x, y, button, self._get_modifiers())
return 0
@ViewEventHandler
@Win32EventHandler(WM_LBUTTONDOWN)
def _event_lbuttondown(self, msg, wParam, lParam):
return self._event_mousebutton(
'on_mouse_press', mouse.LEFT, lParam)
@ViewEventHandler
@Win32EventHandler(WM_LBUTTONUP)
def _event_lbuttonup(self, msg, wParam, lParam):
return self._event_mousebutton(
'on_mouse_release', mouse.LEFT, lParam)
@ViewEventHandler
@Win32EventHandler(WM_MBUTTONDOWN)
def _event_mbuttondown(self, msg, wParam, lParam):
return self._event_mousebutton(
'on_mouse_press', mouse.MIDDLE, lParam)
@ViewEventHandler
@Win32EventHandler(WM_MBUTTONUP)
def _event_mbuttonup(self, msg, wParam, lParam):
return self._event_mousebutton(
'on_mouse_release', mouse.MIDDLE, lParam)
@ViewEventHandler
@Win32EventHandler(WM_RBUTTONDOWN)
def _event_rbuttondown(self, msg, wParam, lParam):
return self._event_mousebutton(
'on_mouse_press', mouse.RIGHT, lParam)
@ViewEventHandler
@Win32EventHandler(WM_RBUTTONUP)
def _event_rbuttonup(self, msg, wParam, lParam):
return self._event_mousebutton(
'on_mouse_release', mouse.RIGHT, lParam)
@Win32EventHandler(WM_MOUSEWHEEL)
def _event_mousewheel(self, msg, wParam, lParam):
delta = c_short(wParam >> 16).value
self.dispatch_event('on_mouse_scroll',
self._mouse_x, self._mouse_y, 0, delta / float(WHEEL_DELTA))
return 0
@Win32EventHandler(WM_CLOSE)
def _event_close(self, msg, wParam, lParam):
self.dispatch_event('on_close')
return 0
@ViewEventHandler
@Win32EventHandler(WM_PAINT)
def _event_paint(self, msg, wParam, lParam):
self.dispatch_event('on_expose')
# Validating the window using ValidateRect or ValidateRgn
# doesn't clear the paint message when more than one window
# is open [why?]; defer to DefWindowProc instead.
return None
@Win32EventHandler(WM_SIZING)
def _event_sizing(self, msg, wParam, lParam):
#rect = cast(lParam, POINTER(RECT)).contents
#width, height = self.get_size()
from pyglet import app
if app.event_loop is not None:
app.event_loop.enter_blocking()
return 1
@Win32EventHandler(WM_SIZE)
def _event_size(self, msg, wParam, lParam):
if not self._dc:
# Ignore window creation size event (appears for fullscreen
# only) -- we haven't got DC or HWND yet.
return None
if wParam == SIZE_MINIMIZED:
# Minimized, not resized.
self._hidden = True
self.dispatch_event('on_hide')
return 0
if self._hidden:
# Restored
self._hidden = False
self.dispatch_event('on_show')
w, h = self._get_location(lParam)
if not self._fullscreen:
self._width, self._height = w, h
self._update_view_location(self._width, self._height)
self._reset_exclusive_mouse_screen()
self.switch_to()
self.dispatch_event('on_resize', self._width, self._height)
return 0
@Win32EventHandler(WM_SYSCOMMAND)
def _event_syscommand(self, msg, wParam, lParam):
if wParam & 0xfff0 in (SC_MOVE, SC_SIZE):
# Should be in WM_ENTERSIZEMOVE, but we never get that message.
from pyglet import app
if app.event_loop is not None:
app.event_loop.enter_blocking()
return 0
@Win32EventHandler(WM_MOVE)
def _event_move(self, msg, wParam, lParam):
x, y = self._get_location(lParam)
self._reset_exclusive_mouse_screen()
self.dispatch_event('on_move', x, y)
return 0
@Win32EventHandler(WM_EXITSIZEMOVE)
def _event_entersizemove(self, msg, wParam, lParam):
from pyglet import app
if app.event_loop is not None:
app.event_loop.exit_blocking()
return 0
'''
# Alternative to using WM_SETFOCUS and WM_KILLFOCUS. Which
# is better?
@Win32EventHandler(WM_ACTIVATE)
def _event_activate(self, msg, wParam, lParam):
if wParam & 0xffff == WA_INACTIVE:
self.dispatch_event('on_deactivate')
else:
self.dispatch_event('on_activate')
_user32.SetFocus(self._hwnd)
return 0
'''
@Win32EventHandler(WM_SETFOCUS)
def _event_setfocus(self, msg, wParam, lParam):
self.dispatch_event('on_activate')
self._has_focus = True
self.set_exclusive_keyboard(self._exclusive_keyboard)
self.set_exclusive_mouse(self._exclusive_mouse)
return 0
@Win32EventHandler(WM_KILLFOCUS)
def _event_killfocus(self, msg, wParam, lParam):
self.dispatch_event('on_deactivate')
self._has_focus = False
self.set_exclusive_keyboard(self._exclusive_keyboard)
self.set_exclusive_mouse(self._exclusive_mouse)
return 0
@Win32EventHandler(WM_GETMINMAXINFO)
def _event_getminmaxinfo(self, msg, wParam, lParam):
info = MINMAXINFO.from_address(lParam)
if self._minimum_size:
info.ptMinTrackSize.x, info.ptMinTrackSize.y = \
self._client_to_window_size(*self._minimum_size)
if self._maximum_size:
info.ptMaxTrackSize.x, info.ptMaxTrackSize.y = \
self._client_to_window_size(*self._maximum_size)
return 0
@Win32EventHandler(WM_ERASEBKGND)
def _event_erasebkgnd(self, msg, wParam, lParam):
# Prevent flicker during resize; but erase bkgnd if we're fullscreen.
if self._fullscreen:
return 0
else:
return 1
@ViewEventHandler
@Win32EventHandler(WM_ERASEBKGND)
def _event_erasebkgnd_view(self, msg, wParam, lParam):
# Prevent flicker during resize.
return 1
| NiclasEriksen/py-towerwars | src/pyglet/window/win32/__init__.py | Python | cc0-1.0 | 35,484 |
#!/usr/bin/env python
"""
Script to fully automate the release process. Requires Python 2.6+
with sphinx installed and the 'hg' command on the path.
"""
from __future__ import print_function
import subprocess
import shutil
import os
import sys
import urllib2
import getpass
import collections
try:
import keyring
except Exception:
pass
VERSION = '0.6.35'
def get_next_version():
digits = map(int, VERSION.split('.'))
digits[-1] += 1
return '.'.join(map(str, digits))
NEXT_VERSION = get_next_version()
files_with_versions = ('docs/conf.py', 'setup.py', 'release.py',
'README.txt', 'distribute_setup.py')
def get_repo_name():
"""
Get the repo name from the hgrc default path.
"""
default = subprocess.check_output('hg paths default').strip()
parts = default.split('/')
if parts[-1] == '':
parts.pop()
return '/'.join(parts[-2:])
def get_mercurial_creds(system='https://bitbucket.org', username=None):
"""
Return named tuple of username,password in much the same way that
Mercurial would (from the keyring).
"""
# todo: consider getting this from .hgrc
username = username or getpass.getuser()
keyring_username = '@@'.join((username, system))
system = '@'.join((keyring_username, 'Mercurial'))
password = (
keyring.get_password(system, keyring_username)
if 'keyring' in globals()
else None
)
if not password:
password = getpass.getpass()
Credential = collections.namedtuple('Credential', 'username password')
return Credential(username, password)
def add_milestone_and_version(version=NEXT_VERSION):
auth = 'Basic ' + ':'.join(get_mercurial_creds()).encode('base64').strip()
headers = {
'Authorization': auth,
}
base = 'https://api.bitbucket.org'
for type in 'milestones', 'versions':
url = (base + '/1.0/repositories/{repo}/issues/{type}'
.format(repo = get_repo_name(), type=type))
req = urllib2.Request(url = url, headers = headers,
data='name='+version)
try:
urllib2.urlopen(req)
except urllib2.HTTPError as e:
print(e.fp.read())
def bump_versions():
list(map(bump_version, files_with_versions))
def bump_version(filename):
with open(filename, 'rb') as f:
lines = [line.replace(VERSION, NEXT_VERSION) for line in f]
with open(filename, 'wb') as f:
f.writelines(lines)
def do_release():
assert all(map(os.path.exists, files_with_versions)), (
"Expected file(s) missing")
assert has_sphinx(), "You must have Sphinx installed to release"
res = raw_input('Have you read through the SCM changelog and '
'confirmed the changelog is current for releasing {VERSION}? '
.format(**globals()))
if not res.lower().startswith('y'):
print("Please do that")
raise SystemExit(1)
print("Travis-CI tests: http://travis-ci.org/#!/jaraco/distribute")
res = raw_input('Have you or has someone verified that the tests '
'pass on this revision? ')
if not res.lower().startswith('y'):
print("Please do that")
raise SystemExit(2)
subprocess.check_call(['hg', 'tag', VERSION])
subprocess.check_call(['hg', 'update', VERSION])
has_docs = build_docs()
if os.path.isdir('./dist'):
shutil.rmtree('./dist')
cmd = [sys.executable, 'setup.py', '-q', 'egg_info', '-RD', '-b', '',
'sdist', 'register', 'upload']
if has_docs:
cmd.append('upload_docs')
subprocess.check_call(cmd)
upload_bootstrap_script()
# update to the tip for the next operation
subprocess.check_call(['hg', 'update'])
# we just tagged the current version, bump for the next release.
bump_versions()
subprocess.check_call(['hg', 'ci', '-m',
'Bumped to {NEXT_VERSION} in preparation for next '
'release.'.format(**globals())])
# push the changes
subprocess.check_call(['hg', 'push'])
add_milestone_and_version()
def has_sphinx():
try:
devnull = open(os.path.devnull, 'wb')
subprocess.Popen(['sphinx-build', '--version'], stdout=devnull,
stderr=subprocess.STDOUT).wait()
except Exception:
return False
return True
def build_docs():
if not os.path.isdir('docs'):
return
if os.path.isdir('docs/build'):
shutil.rmtree('docs/build')
subprocess.check_call([
'sphinx-build',
'-b', 'html',
'-d', 'build/doctrees',
'.',
'build/html',
],
cwd='docs')
return True
def upload_bootstrap_script():
scp_command = 'pscp' if sys.platform.startswith('win') else 'scp'
try:
subprocess.check_call([scp_command, 'distribute_setup.py',
'[email protected]:python-distribute.org/'])
except:
print("Unable to upload bootstrap script. Ask Tarek to do it.")
if __name__ == '__main__':
do_release()
| jsaveta/Lance | src/rescal/release.py | Python | apache-2.0 | 4,493 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the SWIG-wrapped events writer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
from tensorflow.core.framework import summary_pb2
from tensorflow.core.util import event_pb2
# pylint: disable=invalid-import-order, g-bad-import-order, wildcard-import, unused-import, undefined-variable
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import _pywrap_events_writer
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import tf_record
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
class PywrapeventsWriterTest(test_util.TensorFlowTestCase):
def testWriteEvents(self):
file_prefix = os.path.join(self.get_temp_dir(), "events")
writer = _pywrap_events_writer.EventsWriter(compat.as_bytes(file_prefix))
filename = compat.as_text(writer.FileName())
event_written = event_pb2.Event(
wall_time=123.45,
step=67,
summary=summary_pb2.Summary(
value=[summary_pb2.Summary.Value(
tag="foo", simple_value=89.0)]))
writer.WriteEvent(event_written)
writer.Flush()
writer.Close()
with self.assertRaises(errors.NotFoundError):
for r in tf_record.tf_record_iterator(filename + "DOES_NOT_EXIST"):
self.assertTrue(False)
reader = tf_record.tf_record_iterator(filename)
event_read = event_pb2.Event()
event_read.ParseFromString(next(reader))
self.assertTrue(event_read.HasField("file_version"))
event_read.ParseFromString(next(reader))
# Second event
self.assertProtoEquals("""
wall_time: 123.45 step: 67
summary { value { tag: 'foo' simple_value: 89.0 } }
""", event_read)
with self.assertRaises(StopIteration):
next(reader)
def testWriteEventInvalidType(self):
class _Invalid(object):
def __str__(self):
return "Invalid"
with self.assertRaisesRegex(TypeError, "Invalid"):
_pywrap_events_writer.EventsWriter(b"foo").WriteEvent(_Invalid())
if __name__ == "__main__":
googletest.main()
| sarvex/tensorflow | tensorflow/python/client/events_writer_test.py | Python | apache-2.0 | 2,890 |
"""Tests for the preset and the history of queries."""
import json
import os
from qgis.core import QgsCoordinateReferenceSystem, QgsRectangle
from qgis.testing import unittest
from QuickOSM.core.utilities.json_encoder import as_enum
from QuickOSM.core.utilities.query_saved import QueryManagement
from QuickOSM.core.utilities.tools import query_preset
from QuickOSM.definitions.format import Format
from QuickOSM.definitions.gui import Panels
from QuickOSM.ui.dialog import Dialog
from QuickOSM.ui.edit_preset import EditPreset
__copyright__ = 'Copyright 2021, 3Liz'
__license__ = 'GPL version 3'
__email__ = '[email protected]'
class TestBookmarkQuery(unittest.TestCase):
"""Tests for the preset and the history of queries."""
def setUp(self):
"""Set up the tests"""
self.maxDiff = None
self.preset_folder = query_preset()
self.dialog = Dialog()
index = self.dialog.table_keys_values_qq.cellWidget(0, 1).findText('amenity')
self.dialog.table_keys_values_qq.cellWidget(0, 1).setCurrentIndex(index)
index = self.dialog.table_keys_values_qq.cellWidget(0, 2).findText('bench')
self.dialog.table_keys_values_qq.cellWidget(0, 2).setCurrentIndex(index)
self.dialog.places_edits[Panels.QuickQuery].setText('foo')
self.dialog.button_save_query.click()
self.preset = self.dialog.list_personal_preset_mp.item(0)
layout_label = self.dialog.list_personal_preset_mp.itemWidget(self.preset).layout()
self.name_preset = layout_label.itemAt(0).itemAt(0).widget().text()
def set_up_preset_data_text(self) -> dict:
"""Load the data save in the json file linked to the preset."""
preset_file = os.path.join(
self.preset_folder, self.name_preset, self.name_preset + '.json')
with open(preset_file, encoding='utf8') as json_file:
data_preset = json.load(json_file)
return data_preset
def set_up_preset_data(self) -> dict:
"""Load the data save in the json file linked to the preset."""
preset_folder = query_preset()
preset_file = os.path.join(
preset_folder, self.name_preset, self.name_preset + '.json')
with open(preset_file, encoding='utf8') as json_file:
data_preset = json.load(json_file, object_hook=as_enum)
return data_preset
def tearDown(self):
"""End of the tests"""
self.dialog.external_panels[Panels.MapPreset].remove_preset(self.preset, self.name_preset)
def test_save_in_preset(self):
"""Test if the file is save in preset."""
nb_preset = self.dialog.list_personal_preset_mp.count()
self.assertEqual(nb_preset, 1)
self.assertEqual(self.name_preset, 'amenity_bench_foo')
def test_preset_format(self):
"""Test if the file in preset is as expected."""
data_preset = self.set_up_preset_data_text()
expected_json = {
"query":
[
"[out:xml] [timeout:25];\n {{geocodeArea:foo}} -> .area_0;\n(\n"
" node[\"amenity\"=\"bench\"](area.area_0);\n "
"way[\"amenity\"=\"bench\"](area.area_0);\n "
"relation[\"amenity\"=\"bench\"](area.area_0);\n);\n"
"(._;>;);\nout body;"
],
"description":
["All OSM objects with the key 'amenity'='bench' in foo are going to be downloaded."],
"advanced": False,
"file_name": "amenity_bench_foo",
"query_layer_name": ["amenity_bench_foo"],
"query_name": ["Query1"],
"type_multi_request": [[]],
"keys": [["amenity"]],
"values": [["bench"]],
"area": ["foo"],
"bbox": [""],
"output_geom_type":
[
[
{"__enum__": "LayerType.Points"},
{"__enum__": "LayerType.Lines"},
{"__enum__": "LayerType.Multilinestrings"},
{"__enum__": "LayerType.Multipolygons"}
]
],
"white_list_column":
[{"multilinestrings": None, "points": None, "lines": None, "multipolygons": None}],
"output_directory": [""],
"output_format": [{"__enum__": "Format.GeoPackage"}]
}
self.assertDictEqual(expected_json, data_preset)
def test_view_bookmark(self):
"""Test if we can display a preset."""
data_preset = self.set_up_preset_data()
edit_dialog = EditPreset(self.dialog, data_preset)
self.assertEqual(data_preset['file_name'], edit_dialog.preset_name.text())
self.assertEqual(
data_preset['description'], edit_dialog.description.toPlainText().split('\\n')
)
self.assertEqual(data_preset['query_layer_name'][0], edit_dialog.layer_name.text())
self.assertEqual(data_preset['query'][0], edit_dialog.query.toPlainText())
self.assertEqual(data_preset['area'][0], edit_dialog.area.text())
self.assertFalse(edit_dialog.bbox.outputExtent().xMinimum())
self.assertFalse(edit_dialog.bbox.outputExtent().yMinimum())
self.assertTrue(edit_dialog.checkbox_points.isChecked())
self.assertTrue(edit_dialog.checkbox_lines.isChecked())
self.assertTrue(edit_dialog.checkbox_multilinestrings.isChecked())
self.assertTrue(edit_dialog.checkbox_multipolygons.isChecked())
self.assertFalse(edit_dialog.white_points.text())
self.assertFalse(edit_dialog.white_lines.text())
self.assertFalse(edit_dialog.white_multilinestrings.text())
self.assertFalse(edit_dialog.white_multipolygons.text())
self.assertEqual(edit_dialog.combo_output_format.currentData(), Format.GeoPackage)
self.assertEqual(
data_preset['output_directory'][0], edit_dialog.output_directory.filePath()
)
nb_queries = edit_dialog.list_queries.count()
self.assertEqual(nb_queries, 1)
edit_dialog.preset_name.setText('Test a new name')
edit_dialog.button_cancel.click()
self.dialog.external_panels[Panels.MapPreset].update_personal_preset_view()
self.preset = self.dialog.list_personal_preset_mp.item(0)
layout_label = self.dialog.list_personal_preset_mp.itemWidget(self.preset).layout()
self.name_preset = layout_label.itemAt(0).itemAt(0).widget().text()
self.assertNotEqual(self.name_preset, 'Test_a_new_name')
def test_edit_rename_bookmark(self):
"""Test if we can edit and rename a preset."""
data_preset = self.set_up_preset_data()
edit_dialog = EditPreset(self.dialog, data_preset)
edit_dialog.preset_name.setText('Test a new name')
edit_dialog.button_validate.click()
self.dialog.external_panels[Panels.MapPreset].update_personal_preset_view()
self.preset = self.dialog.list_personal_preset_mp.item(0)
layout_label = self.dialog.list_personal_preset_mp.itemWidget(self.preset).layout()
self.name_preset = layout_label.itemAt(0).itemAt(0).widget().text()
self.assertEqual(self.name_preset, 'Test_a_new_name')
def test_edited_bookmark_file(self):
"""Test if we can edit a preset and check the edited json file."""
data_preset = self.set_up_preset_data()
edit_dialog = EditPreset(self.dialog, data_preset)
edit_dialog.description.setPlainText('Be or not to be...\\nShakespear')
edit_dialog.layer_name.setText('Misery')
edit_dialog.query.setPlainText('I would like two pencils please.')
edit_dialog.checkbox_points.setChecked(True)
edit_dialog.checkbox_lines.setChecked(True)
edit_dialog.checkbox_multilinestrings.setChecked(False)
edit_dialog.checkbox_multipolygons.setChecked(False)
edit_dialog.white_points.setText('name')
index = edit_dialog.combo_output_format.findData(Format.Kml)
edit_dialog.combo_output_format.setCurrentIndex(index)
edit_dialog.button_validate.click()
self.preset = self.dialog.list_personal_preset_mp.item(0)
new_data = self.set_up_preset_data_text()
expected_json = {
"query":
[
"I would like two pencils please."
],
"description":
["Be or not to be...", "Shakespear"],
"advanced": False,
"file_name": "amenity_bench_foo",
"query_layer_name": ["Misery"],
"query_name": ["Query1"],
"type_multi_request": [[]],
"keys": [["amenity"]],
"values": [["bench"]],
"area": ["foo"],
"bbox": [{'__extent__': '0.0 0.0 0.0 0.0'}],
"output_geom_type":
[
[
{"__enum__": "LayerType.Points"},
{"__enum__": "LayerType.Lines"}
]
],
"white_list_column":
[{"multilinestrings": None, "points": 'name', "lines": None, "multipolygons": None}],
"output_directory": [""],
"output_format": [{"__enum__": "Format.Kml"}]
}
self.assertDictEqual(expected_json, new_data)
def test_advanced_view(self):
"""Test if the view match the preset type."""
data_preset = self.set_up_preset_data()
edit_dialog = EditPreset(self.dialog, data_preset)
current = edit_dialog.stacked_parameters_preset.currentWidget()
self.assertEqual(current, edit_dialog.basic_parameters)
edit_dialog.radio_advanced.setChecked(True)
current = edit_dialog.stacked_parameters_preset.currentWidget()
self.assertEqual(current, edit_dialog.advanced_parameters)
def test_bookmark_several_query(self):
"""Test if we can manage (add and remove) several queries in a preset."""
data_preset = self.set_up_preset_data()
edit_dialog = EditPreset(self.dialog, data_preset)
self.assertEqual(edit_dialog.current_query, 0)
edit_dialog.button_add.click()
nb_queries = edit_dialog.list_queries.count()
self.assertEqual(nb_queries, 2)
self.assertEqual(edit_dialog.current_query, 1)
self.assertEqual(edit_dialog.layer_name.text(), '')
edit_dialog.layer_name.setText('Query2')
index = edit_dialog.table_keys_values_eb.cellWidget(0, 1).findText('type')
edit_dialog.table_keys_values_eb.cellWidget(0, 1).setCurrentIndex(index)
edit_dialog.table_keys_values_eb.cellWidget(0, 3).click()
index = edit_dialog.table_keys_values_eb.cellWidget(1, 1).findText('route')
edit_dialog.table_keys_values_eb.cellWidget(1, 1).setCurrentIndex(index)
edit_dialog.key_edited(1)
index = edit_dialog.table_keys_values_eb.cellWidget(1, 2).findText('bicycle')
edit_dialog.table_keys_values_eb.cellWidget(1, 2).setCurrentIndex(index)
index = edit_dialog.table_keys_values_eb.cellWidget(0, 2).findText('route')
edit_dialog.table_keys_values_eb.cellWidget(0, 2).setCurrentIndex(index)
edit_dialog.button_validate.click()
self.preset = self.dialog.list_personal_preset_mp.item(0)
new_data = self.set_up_preset_data_text()
expected_json = {
"query":
[
"[out:xml] [timeout:25];\n {{geocodeArea:foo}} -> .area_0;\n(\n"
" node[\"amenity\"=\"bench\"](area.area_0);\n "
"way[\"amenity\"=\"bench\"](area.area_0);\n "
"relation[\"amenity\"=\"bench\"](area.area_0);\n);\n"
"(._;>;);\nout body;",
""
],
"description":
["All OSM objects with the key 'amenity'='bench' in foo are going to be downloaded."],
"advanced": False,
"file_name": "amenity_bench_foo",
"query_layer_name": ["amenity_bench_foo", "Query2"],
"query_name": ["Query1", "Query2"],
"type_multi_request": [[], [{"__enum__": "MultiType.AND"}]],
"keys": [["amenity"], ["type", "route"]],
"values": [["bench"], ["route", "bicycle"]],
"area": ["foo", ""],
"bbox": [{'__extent__': '0.0 0.0 0.0 0.0'}, {'__extent__': '0.0 0.0 0.0 0.0'}],
"output_geom_type":
[
[
{"__enum__": "LayerType.Points"},
{"__enum__": "LayerType.Lines"},
{"__enum__": "LayerType.Multilinestrings"},
{"__enum__": "LayerType.Multipolygons"}
], [
{"__enum__": "LayerType.Points"},
{"__enum__": "LayerType.Lines"},
{"__enum__": "LayerType.Multilinestrings"},
{"__enum__": "LayerType.Multipolygons"}
]
],
"white_list_column":
[
{"multilinestrings": None, "points": None, "lines": None, "multipolygons": None},
{"multilinestrings": None, "points": None, "lines": None, "multipolygons": None}
],
"output_directory": ["", ""],
"output_format": [{"__enum__": "Format.GeoPackage"}, None]
}
self.assertDictEqual(expected_json, new_data)
edit_dialog.list_queries.setCurrentRow(0)
self.assertEqual(edit_dialog.current_query, 0)
self.assertEqual(edit_dialog.layer_name.text(), 'amenity_bench_foo')
edit_dialog.delete_query(0)
nb_queries = edit_dialog.list_queries.count()
self.assertEqual(nb_queries, 1)
self.assertEqual(edit_dialog.layer_name.text(), 'Query2')
crs = QgsCoordinateReferenceSystem('EPSG:4326')
x_min = 2.71828
x_max = 3.1415926
y_min = 0.0
y_max = 1.6180339
rect = QgsRectangle(x_min, y_min, x_max, y_max)
edit_dialog.bbox.setOutputExtentFromUser(rect, crs)
self.assertEqual(
edit_dialog.stacked_parameters_preset.currentWidget(), edit_dialog.basic_parameters)
edit_dialog.radio_advanced.setChecked(True)
self.assertEqual(
edit_dialog.stacked_parameters_preset.currentWidget(), edit_dialog.advanced_parameters)
edit_dialog.button_validate.click()
self.preset = self.dialog.list_personal_preset_mp.item(0)
new_data = self.set_up_preset_data_text()
expected_json = {
"query":
[
""
],
"description":
["All OSM objects with the key 'amenity'='bench' in foo are going to be downloaded."],
"advanced": True,
"file_name": "amenity_bench_foo",
"query_layer_name": ["Query2"],
"query_name": ["Query1"],
"type_multi_request": [[{"__enum__": "MultiType.AND"}]],
"keys": [["type", "route"]],
"values": [["route", "bicycle"]],
"area": [""],
"bbox": [{'__extent__': '2.71828 0.0 3.1415926 1.6180339'}],
"output_geom_type":
[
[
{"__enum__": "LayerType.Points"},
{"__enum__": "LayerType.Lines"},
{"__enum__": "LayerType.Multilinestrings"},
{"__enum__": "LayerType.Multipolygons"}
]
],
"white_list_column":
[{"multilinestrings": None, "points": None, "lines": None, "multipolygons": None}],
"output_directory": [""],
"output_format": [None]
}
self.assertDictEqual(expected_json, new_data)
def test_add_in_preset(self):
"""Test if we can add a query in a preset from the Quick Query panel."""
data_preset = self.set_up_preset_data()
edit_dialog = EditPreset(self.dialog, data_preset)
nb_queries = edit_dialog.list_queries.count()
self.assertEqual(nb_queries, 1)
nb_preset = self.dialog.list_personal_preset_mp.count()
self.assertEqual(nb_preset, 1)
q_manage = QueryManagement(
query='',
name='aeroway_control_tower_foo',
description='',
advanced=False,
keys='aeroway',
values='control_tower',
area='foo'
)
q_manage.add_query_in_preset('amenity_bench_foo')
self.preset = self.dialog.list_personal_preset_mp.item(0)
layout_label = self.dialog.list_personal_preset_mp.itemWidget(self.preset).layout()
self.name_preset = layout_label.itemAt(0).itemAt(0).widget().text()
data_preset = self.set_up_preset_data()
edit_dialog = EditPreset(self.dialog, data_preset)
nb_queries = edit_dialog.list_queries.count()
self.assertEqual(nb_queries, 2)
nb_preset = self.dialog.list_personal_preset_mp.count()
self.assertEqual(nb_preset, 1)
| Gustry/QuickOSM | QuickOSM/test/test_saved_query.py | Python | gpl-2.0 | 17,258 |
# Copyright (C) 2007, One Laptop Per Child
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gettext import gettext as _
import logging
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import Wnck
from sugar3.graphics import style
from sugar3.graphics.toolbutton import ToolButton
from sugar3.graphics.objectchooser import FILTER_TYPE_MIME_BY_ACTIVITY
from jarabe.journal.listview import BaseListView
from jarabe.journal.listmodel import ListModel
from jarabe.journal.journaltoolbox import MainToolbox
from jarabe.journal.volumestoolbar import VolumesToolbar
from jarabe.model import bundleregistry
from jarabe.journal.iconview import IconView
class ObjectChooser(Gtk.Window):
__gtype_name__ = 'ObjectChooser'
__gsignals__ = {
'response': (GObject.SignalFlags.RUN_FIRST, None, ([int])),
}
def __init__(self, parent=None, what_filter='', filter_type=None,
show_preview=False):
Gtk.Window.__init__(self)
self.set_type_hint(Gdk.WindowTypeHint.DIALOG)
self.set_decorated(False)
self.set_position(Gtk.WindowPosition.CENTER_ALWAYS)
self.set_border_width(style.LINE_WIDTH)
self.set_has_resize_grip(False)
self._selected_object_id = None
self._show_preview = show_preview
self.add_events(Gdk.EventMask.VISIBILITY_NOTIFY_MASK)
self.connect('visibility-notify-event',
self.__visibility_notify_event_cb)
self.connect('delete-event', self.__delete_event_cb)
self.connect('key-press-event', self.__key_press_event_cb)
if parent is None:
logging.warning('ObjectChooser: No parent window specified')
else:
self.connect('realize', self.__realize_cb, parent)
screen = Wnck.Screen.get_default()
screen.connect('window-closed', self.__window_closed_cb, parent)
vbox = Gtk.VBox()
self.add(vbox)
vbox.show()
title_box = TitleBox(what_filter, filter_type)
title_box.connect('volume-changed', self.__volume_changed_cb)
title_box.close_button.connect('clicked',
self.__close_button_clicked_cb)
title_box.set_size_request(-1, style.GRID_CELL_SIZE)
vbox.pack_start(title_box, False, True, 0)
title_box.show()
separator = Gtk.HSeparator()
vbox.pack_start(separator, False, True, 0)
separator.show()
self._toolbar = MainToolbox(default_what_filter=what_filter,
default_filter_type=filter_type)
self._toolbar.connect('query-changed', self.__query_changed_cb)
self._toolbar.set_size_request(-1, style.GRID_CELL_SIZE)
vbox.pack_start(self._toolbar, False, True, 0)
self._toolbar.show()
if not self._show_preview:
self._list_view = ChooserListView(self._toolbar)
self._list_view.connect('entry-activated',
self.__entry_activated_cb)
self._list_view.connect('clear-clicked', self.__clear_clicked_cb)
vbox.pack_start(self._list_view, True, True, 0)
self._list_view.show()
else:
self._icon_view = IconView(self._toolbar)
self._icon_view.connect('entry-activated',
self.__entry_activated_cb)
self._icon_view.connect('clear-clicked', self.__clear_clicked_cb)
vbox.pack_start(self._icon_view, True, True, 0)
self._icon_view.show()
width = Gdk.Screen.width() - style.GRID_CELL_SIZE * 2
height = Gdk.Screen.height() - style.GRID_CELL_SIZE * 2
self.set_size_request(width, height)
self._toolbar.update_filters('/', what_filter, filter_type)
def __realize_cb(self, chooser, parent):
self.get_window().set_transient_for(parent)
# TODO: Should we disconnect the signal here?
def __window_closed_cb(self, screen, window, parent):
if window.get_xid() == parent.get_xid():
self.destroy()
def __entry_activated_cb(self, list_view, uid):
self._selected_object_id = uid
self.emit('response', Gtk.ResponseType.ACCEPT)
def __delete_event_cb(self, chooser, event):
self.emit('response', Gtk.ResponseType.DELETE_EVENT)
def __key_press_event_cb(self, widget, event):
keyname = Gdk.keyval_name(event.keyval)
if keyname == 'Escape':
self.emit('response', Gtk.ResponseType.DELETE_EVENT)
def __close_button_clicked_cb(self, button):
self.emit('response', Gtk.ResponseType.DELETE_EVENT)
def get_selected_object_id(self):
return self._selected_object_id
def __query_changed_cb(self, toolbar, query):
if not self._show_preview:
self._list_view.update_with_query(query)
else:
self._icon_view.update_with_query(query)
def __volume_changed_cb(self, volume_toolbar, mount_point):
logging.debug('Selected volume: %r.', mount_point)
self._toolbar.set_mount_point(mount_point)
def __visibility_notify_event_cb(self, window, event):
logging.debug('visibility_notify_event_cb %r', self)
visible = event.get_state() == Gdk.VisibilityState.FULLY_OBSCURED
if not self._show_preview:
self._list_view.set_is_visible(visible)
else:
self._icon_view.set_is_visible(visible)
def __clear_clicked_cb(self, list_view):
self._toolbar.clear_query()
class TitleBox(VolumesToolbar):
__gtype_name__ = 'TitleBox'
def __init__(self, what_filter='', filter_type=None):
VolumesToolbar.__init__(self)
label = Gtk.Label()
title = _('Choose an object')
if filter_type == FILTER_TYPE_MIME_BY_ACTIVITY:
registry = bundleregistry.get_registry()
bundle = registry.get_bundle(what_filter)
if bundle is not None:
title = _('Choose an object to open with %s activity') % \
bundle.get_name()
label.set_markup('<b>%s</b>' % title)
label.set_alignment(0, 0.5)
self._add_widget(label, expand=True)
self.close_button = ToolButton(icon_name='dialog-cancel')
self.close_button.set_tooltip(_('Close'))
self.insert(self.close_button, -1)
self.close_button.show()
def _add_widget(self, widget, expand=False):
tool_item = Gtk.ToolItem()
tool_item.set_expand(expand)
tool_item.add(widget)
widget.show()
self.insert(tool_item, -1)
tool_item.show()
class ChooserListView(BaseListView):
__gtype_name__ = 'ChooserListView'
__gsignals__ = {
'entry-activated': (GObject.SignalFlags.RUN_FIRST,
None,
([str])),
}
def __init__(self, toolbar):
BaseListView.__init__(self, None)
self._toolbar = toolbar
self.tree_view.props.hover_selection = True
self.tree_view.connect('button-release-event',
self.__button_release_event_cb)
def _can_clear_query(self):
return self._toolbar.is_filter_changed()
def __entry_activated_cb(self, entry):
self.emit('entry-activated', entry)
def _favorite_clicked_cb(self, cell, path):
pass
def create_palette(self, x, y):
# We don't want show the palette in the object chooser
pass
def __button_release_event_cb(self, tree_view, event):
if event.window != tree_view.get_bin_window():
return False
pos = tree_view.get_path_at_pos(int(event.x), int(event.y))
if pos is None:
return False
path, column_, x_, y_ = pos
uid = tree_view.get_model()[path][ListModel.COLUMN_UID]
self.emit('entry-activated', uid)
return False
| AbrahmAB/sugar | src/jarabe/journal/objectchooser.py | Python | gpl-3.0 | 8,575 |
"""
Crawl job for Wikinews English
https://en.wikinews.org
"""
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
from psicrawler.spiders.default_spider import DefaultSpider
class WikinewsEnSpider(DefaultSpider):
name = "wikinews_en"
source = "wikinews-en"
allowed_domains = ["en.wikinews.org"]
start_urls = [
"https://en.wikinews.org/wiki/Wikinews:Archives/Date/All"
]
rules = (
Rule(LinkExtractor(allow=('Wikinews\:[0-9][0-9][0-9][0-9]\/[A-Za-z]+\/[0-9]+'), deny=('\/w\/index.php'))),
Rule(LinkExtractor(allow=('wiki\/*'), deny=(
'Wikinews\:*',
'Special\:*',
'Category\:*',
'Wikinoticias\:*',
'User\:*',
'User_talk\:*',
'\/w\/index\.php*'
)), callback='parse_article'),
)
source = "wikinews-en"
mapped_topics = {
'Politics and conflicts': 'Politics',
'Disasters and accidents': 'Disasters',
'Economy and business': 'Economics',
'Culture and entertainment': 'Entertainment',
'Science and technology': 'Science',
'Weather': 'Environment',
'Internet': 'Science',
'Crime and law': 'Law',
}
# allowed_topics = None
def extract_title(self, title):
t = super().extract_title(title)
return t.replace(' - Wikinews, the free news source', '')
def extract_text(self, response):
a = super().extract_text(response)
a = a.replace('From Wikinews, the free news source you can write!','')
a = a.replace('This page is archived, and is no longer publicly editable. Articles presented on Wikinews reflect the specific time at which they were written and published, and do not attempt to encompass events or knowledge which occur or become known after their publication. Got a correction? Add the template {{editprotected}} to the talk page along with your corrections, and it will be brought to the attention of the administrators. Please note that due to our archival policy, we will not alter or update the content of articles that are archived, but will only accept requests to make grammatical and formatting corrections. Note that some listed sources or external links may no longer be available online due to age.','')
return a
def extract_topics(self, response):
topics = response.xpath('//div[@id="mw-normal-catlinks"]/ul/li/a/text()').extract()
return self.filter_topics(topics)
| psiopic2/psicrawler | psicrawler/spiders/wikinews_en_spider.py | Python | mit | 2,522 |
from django.apps import AppConfig
class EstudiosSocioeconomicosConfig(AppConfig):
name = 'estudios_socioeconomicos'
| erikiado/jp2_online | estudios_socioeconomicos/apps.py | Python | mit | 122 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import pytest
import json
import itertools
from unittest import SkipTest
from numpy.testing import assert_equal, assert_almost_equal
from .. import fd_app
from ...utils import _silent, dict2type, sdict_keys
from ...ingestion import DocumentIndex
from ...exceptions import OptionalDependencyMissing
from ...tests.run_suite import check_cache
from .base import (parse_res, V01, app, app_notest, get_features, get_features_lsi,
email_data_dir)
#=============================================================================#
#
# Email Parsing
#
#=============================================================================#
def parse_emails(app):
method = V01 + "/email-parser/"
pars = dict(data_dir=email_data_dir)
res = app.post(method, json=pars)
assert res.status_code == 200, method
data = parse_res(res)
assert sorted(data.keys()) == ['filenames', 'id']
dsid = data['id']
return dsid, pars
def test_parse_emails(app):
dsid, pars = parse_emails(app)
method = V01 + "/email-parser/{}".format(dsid)
res = app.get(method)
assert res.status_code == 200, method
data = parse_res(res)
for key, val in pars.items():
if key in ['data_dir']:
continue
assert val == data[key]
def test_delete_parsed_emails(app):
dsid, _ = parse_emails(app)
method = V01 + "/email-parser/{}".format(dsid)
res = app.delete(method)
assert res.status_code == 200
def test_get_email_parser_all(app):
method = V01 + "/email-parser/"
res = app.get(method)
assert res.status_code == 200
data = parse_res(res)
for row in data:
assert sorted(row.keys()) == sorted([ 'data_dir', 'id', 'encoding', 'n_samples'])
def test_get_email_parser(app):
dsid, _ = parse_emails(app)
method = V01 + "/email-parser/{}".format(dsid)
res = app.get(method)
assert res.status_code == 200
data = parse_res(res)
assert sorted(data.keys()) == \
sorted(['data_dir', 'filenames', 'encoding', 'n_samples', 'type'])
def test_get_search_emails_by_filename(app):
dsid, _ = parse_emails(app)
method = V01 + "/email-parser/{}/index".format(dsid)
for pars, indices in [
({ 'filenames': ['1', '2']}, [0, 1]),
({ 'filenames': ['5']}, [4])]:
res = app.post(method, json=pars)
assert res.status_code == 200
data = parse_res(res)
assert sorted(data.keys()) == sorted(['index'])
assert_equal(data['index'], indices)
#=============================================================================#
#
# Email Threading
#
#=============================================================================#
def test_api_thread_emails(app):
dsid, _ = parse_emails(app)
method = V01 + "/email-parser/{}".format(dsid)
res = app.get(method)
assert res.status_code == 200
data = parse_res(res) # TODO unused variable
url = V01 + "/email-threading"
pars = { 'parent_id': dsid }
res = app.post(url, json=pars)
assert res.status_code == 200
data = parse_res(res)
assert sorted(data.keys()) == sorted(['data', 'id'])
mid = data['id']
tree_ref = [ {'id': 0, 'parent': None, 'children': [
{'id': 1, 'children': [], 'parent': 0},
{'id': 2, 'parent': 0, 'children': [
{'id': 3, 'children': [], 'parent': 2},
{'id': 4, 'children': [], 'parent': 2}],
}]
}]
def remove_subject_field(d):
del d['subject']
for el in d['children']:
remove_subject_field(el)
tree_res = data['data']
for el in tree_res:
remove_subject_field(el)
assert data['data'] == tree_ref
url += '/{}'.format(mid)
res = app.get(url)
assert res.status_code == 200
data = parse_res(res)
assert sorted(data.keys()) == sorted(['group_by_subject'])
res = app.delete(method)
assert res.status_code == 200
| kcompher/FreeDiscovUI | freediscovery/server/tests/test_email_threading.py | Python | bsd-3-clause | 4,268 |
import os
import sys
import json
if __name__ == '__main__':
cfg_file_name = 'config_def.json'
if len(sys.argv) > 1: # cmd line input config file
pass
if not os.path.isfile(cfg_file_name):
print('Config file not found ({})'.format(cfg_file_name))
data_url = None
with open(cfg_file_name) as cfg_fd:
cfg_obj = json.load(cfg_fd)
print(cfg_obj)
| mvwicky/acled | src/python/get_data.py | Python | mit | 395 |
# -*- coding: utf-8 -*-
"""API test module."""
#
# (C) Pywikibot team, 2007-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
import datetime
import types
import pywikibot.data.api as api
import pywikibot.family
import pywikibot.login
import pywikibot.page
import pywikibot.site
from pywikibot.tools import (
MediaWikiVersion,
PY2,
UnicodeType,
)
from tests.aspects import (
unittest,
TestCase,
DefaultSiteTestCase,
DefaultDrySiteTestCase,
)
from tests.utils import allowed_failure, FakeLoginManager, PatchedHttp
if not PY2:
from urllib.parse import unquote_to_bytes
else:
from urllib import unquote_plus as unquote_to_bytes
class TestAPIMWException(DefaultSiteTestCase):
"""Test raising an APIMWException."""
data = {'error': {'code': 'internal_api_error_fake',
'info': 'Fake error message'},
'servedby': 'unittest',
}
def _dummy_request(self, **kwargs):
self.assertIn('body', kwargs)
self.assertIn('uri', kwargs)
self.assertIn('site', kwargs)
if kwargs['body'] is None:
# use uri and remove script path
parameters = kwargs['uri']
prefix = kwargs['site'].scriptpath() + '/api.php?'
self.assertEqual(prefix, parameters[:len(prefix)])
parameters = parameters[len(prefix):]
else:
parameters = kwargs['body']
parameters = parameters.encode('ascii') # it should be bytes anyway
# Extract parameter data from the body, it's ugly but allows us
# to verify that we actually test the right request
parameters = [p.split(b'=', 1) for p in parameters.split(b'&')]
keys = [p[0].decode('ascii') for p in parameters]
values = [unquote_to_bytes(p[1]) for p in parameters]
values = [v.decode(kwargs['site'].encoding()) for v in values]
values = [v.replace('+', ' ') for v in values]
values = [set(v.split('|')) for v in values]
parameters = dict(zip(keys, values))
if 'fake' not in parameters:
return False # do an actual request
if self.assert_parameters:
for param, value in self.assert_parameters.items():
self.assertIn(param, parameters)
if value is not None:
if isinstance(value, UnicodeType):
value = value.split('|')
self.assertLessEqual(set(value), parameters[param])
return self.data
def test_API_error(self):
"""Test a static request."""
req = api.Request(site=self.site, parameters={'action': 'query',
'fake': True})
with PatchedHttp(api, self.data):
self.assertRaises(api.APIMWException, req.submit)
def test_API_error_encoding_ASCII(self):
"""Test a Page instance as parameter using ASCII chars."""
page = pywikibot.page.Page(self.site, 'ASCII')
req = api.Request(site=self.site, parameters={'action': 'query',
'fake': True,
'titles': page})
self.assert_parameters = {'fake': ''}
with PatchedHttp(api, self._dummy_request):
self.assertRaises(api.APIMWException, req.submit)
def test_API_error_encoding_Unicode(self):
"""Test a Page instance as parameter using non-ASCII chars."""
page = pywikibot.page.Page(self.site, 'Ümlä üt')
req = api.Request(site=self.site, parameters={'action': 'query',
'fake': True,
'titles': page})
self.assert_parameters = {'fake': ''}
with PatchedHttp(api, self._dummy_request):
self.assertRaises(api.APIMWException, req.submit)
class TestApiFunctions(DefaultSiteTestCase):
"""API Request object test class."""
def testObjectCreation(self):
"""Test api.Request() constructor with implicit site creation."""
req = api.Request(action="test", foo="", bar="test")
self.assertTrue(req)
self.assertEqual(req.site, self.get_site())
class TestDryApiFunctions(DefaultDrySiteTestCase):
"""API Request object test class."""
def testObjectCreation(self):
"""Test api.Request() constructor."""
mysite = self.get_site()
req = api.Request(site=mysite, action="test", foo="", bar="test")
self.assertTrue(req)
self.assertEqual(req.site, mysite)
self.assertIn("foo", req._params)
self.assertEqual(req["bar"], ["test"])
# test item assignment
req["one"] = "1"
self.assertEqual(req._params['one'], ["1"])
# test compliance with dict interface
# req.keys() should contain "action", "foo", "bar", "one"
self.assertEqual(len(req.keys()), 4)
self.assertIn("test", req._encoded_items().values())
for item in req.items():
self.assertEqual(len(item), 2, item)
def test_mixed_mode(self):
"""Test if parameters is used with kwargs."""
req1 = api.Request(site=self.site, action='test', parameters='foo')
self.assertIn('parameters', req1._params)
req2 = api.Request(site=self.site, parameters={'action': 'test',
'parameters': 'foo'})
self.assertEqual(req2['parameters'], ['foo'])
self.assertEqual(req1._params, req2._params)
class TestParamInfo(DefaultSiteTestCase):
"""Test ParamInfo."""
def test_init(self):
"""Test common initialization."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertEqual(len(pi), 0)
pi._init()
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) >= MediaWikiVersion("1.12"):
self.assertEqual(len(pi),
len(pi.preloaded_modules))
self.assertIn('info', pi.query_modules)
self.assertIn('login', pi._action_modules)
def test_init_query_first(self):
"""Test init where it first adds query and then main."""
def patched_generate_submodules(modules):
# Change the query such that query is handled before main
modules = set(modules)
if 'main' in modules:
assert 'query' in modules
modules.discard('main')
modules = list(modules) + ['main']
else:
assert 'query' not in modules
original_generate_submodules(modules)
pi = api.ParamInfo(self.site, set(['query', 'main']))
self.assertEqual(len(pi), 0)
original_generate_submodules = pi._generate_submodules
pi._generate_submodules = patched_generate_submodules
pi._init()
self.assertIn('main', pi._paraminfo)
self.assertIn('query', pi._paraminfo)
def test_init_pageset(self):
"""Test initializing with only the pageset."""
site = self.get_site()
self.assertNotIn('query', api.ParamInfo.init_modules)
pi = api.ParamInfo(site, set(['pageset']))
self.assertNotIn('query', api.ParamInfo.init_modules)
self.assertEqual(len(pi), 0)
pi._init()
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
self.assertIn('pageset', pi._paraminfo)
if MediaWikiVersion(self.site.version()) < MediaWikiVersion("1.12"):
return
if 'query' in pi.preloaded_modules:
self.assertIn('query', pi._paraminfo)
self.assertEqual(len(pi), 4)
else:
self.assertNotIn('query', pi._paraminfo)
self.assertEqual(len(pi), 3)
self.assertEqual(len(pi),
len(pi.preloaded_modules))
if MediaWikiVersion(site.version()) >= MediaWikiVersion("1.21"):
# 'generator' was added to 'pageset' in 1.21
generators_param = pi.parameter('pageset', 'generator')
self.assertGreater(len(generators_param['type']), 1)
def test_generators(self):
"""Test requesting the generator parameter."""
site = self.get_site()
pi = api.ParamInfo(site, set(['pageset', 'query']))
self.assertEqual(len(pi), 0)
pi._init()
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
self.assertIn('pageset', pi._paraminfo)
self.assertIn('query', pi._paraminfo)
if MediaWikiVersion(site.version()) >= MediaWikiVersion("1.21"):
# 'generator' was added to 'pageset' in 1.21
pageset_generators_param = pi.parameter('pageset', 'generator')
query_generators_param = pi.parameter('query', 'generator')
self.assertEqual(pageset_generators_param, query_generators_param)
def test_with_module_info(self):
"""Test requesting the module info."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertEqual(len(pi), 0)
pi.fetch(['info'])
self.assertIn('query+info', pi._paraminfo)
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) >= MediaWikiVersion("1.12"):
self.assertEqual(len(pi),
1 + len(pi.preloaded_modules))
self.assertEqual(pi['info']['prefix'], 'in')
param = pi.parameter('info', 'prop')
self.assertIsInstance(param, dict)
self.assertEqual(param['name'], 'prop')
self.assertNotIn('deprecated', param)
self.assertIsInstance(param['type'], list)
if MediaWikiVersion(self.site.version()) < MediaWikiVersion("1.12"):
return
self.assertIn('protection', param['type'])
def test_with_module_revisions(self):
"""Test requesting the module revisions."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertEqual(len(pi), 0)
pi.fetch(['revisions'])
self.assertIn('query+revisions', pi._paraminfo)
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) >= MediaWikiVersion("1.12"):
self.assertEqual(len(pi),
1 + len(pi.preloaded_modules))
self.assertEqual(pi['revisions']['prefix'], 'rv')
param = pi.parameter('revisions', 'prop')
self.assertIsInstance(param, dict)
self.assertEqual(param['name'], 'prop')
self.assertNotIn('deprecated', param)
self.assertIsInstance(param['type'], list)
if MediaWikiVersion(self.site.version()) < MediaWikiVersion("1.12"):
return
self.assertIn('user', param['type'])
def test_multiple_modules(self):
"""Test requesting multiple modules in one fetch."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertEqual(len(pi), 0)
pi.fetch(['info', 'revisions'])
self.assertIn('query+info', pi._paraminfo)
self.assertIn('query+revisions', pi._paraminfo)
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) < MediaWikiVersion("1.12"):
return
self.assertEqual(len(pi),
2 + len(pi.preloaded_modules))
def test_with_invalid_module(self):
"""Test requesting different kind of invalid modules."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertEqual(len(pi), 0)
pi.fetch('foobar')
self.assertNotIn('foobar', pi._paraminfo)
self.assertRaises(KeyError, pi.__getitem__, 'foobar')
self.assertRaises(KeyError, pi.__getitem__, 'foobar+foobar')
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) < MediaWikiVersion("1.12"):
return
self.assertEqual(len(pi),
len(pi.preloaded_modules))
def test_submodules(self):
"""Test another module apart from query having submodules."""
pi = api.ParamInfo(self.site)
self.assertFalse(pi._modules)
pi.fetch(['query'])
self.assertIn('query', pi._modules)
self.assertIsInstance(pi._modules['query'], frozenset)
self.assertIn('revisions', pi._modules['query'])
self.assertEqual(pi.submodules('query'), pi.query_modules)
for mod in pi.submodules('query', True):
self.assertEqual(mod[:6], 'query+')
self.assertEqual(mod[6:], pi[mod]['name'])
self.assertEqual(mod, pi[mod]['path'])
self.assertRaises(KeyError, pi.__getitem__, 'query+foobar')
self.assertRaises(KeyError, pi.submodules, 'edit')
def test_query_modules_with_limits(self):
"""Test query_modules_with_limits property."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertIn('revisions', pi.query_modules_with_limits)
self.assertNotIn('info', pi.query_modules_with_limits)
def test_modules(self):
"""Test v1.8 modules exist."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertIn('revisions', pi.modules)
self.assertIn('help', pi.modules)
self.assertIn('allpages', pi.modules)
for mod in pi.modules:
self.assertNotIn('+', mod)
def test_module_paths(self):
"""Test module paths use the complete paths."""
pi = api.ParamInfo(self.site)
self.assertIn('help', pi.module_paths)
self.assertNotIn('revisions', pi.module_paths)
self.assertIn('query+revisions', pi.module_paths)
self.assertNotIn('allpages', pi.module_paths)
self.assertIn('query+allpages', pi.module_paths)
def test_prefixes(self):
"""Test v1.8 module prefixes exist."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertIn('revisions', pi.prefixes)
self.assertIn('login', pi.prefixes)
self.assertIn('allpages', pi.prefixes)
def test_prefix_map(self):
"""Test module prefixes use the path."""
pi = api.ParamInfo(self.site)
self.assertIn('query+revisions', pi.prefix_map)
self.assertIn('login', pi.prefix_map)
self.assertIn('query+allpages', pi.prefix_map)
for mod in pi.prefix_map:
self.assertEqual(mod, pi[mod]['path'])
def test_attributes(self):
"""Test attributes method."""
pi = api.ParamInfo(self.site)
attributes = pi.attributes('mustbeposted')
self.assertIn('edit', attributes)
for mod, value in attributes.items():
self.assertEqual(mod, pi[mod]['path'])
self.assertEqual(value, '')
def test_old_mode(self):
"""Test the old mode explicitly."""
site = self.get_site()
pi = api.ParamInfo(site, modules_only_mode=False)
pi.fetch(['info'])
self.assertIn('query+info', pi._paraminfo)
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) >= MediaWikiVersion("1.12"):
self.assertEqual(len(pi),
1 + len(pi.preloaded_modules))
self.assertIn('revisions', pi.prefixes)
def test_new_mode(self):
"""Test the new modules-only mode explicitly."""
site = self.get_site()
if MediaWikiVersion(site.version()) < MediaWikiVersion('1.25wmf4'):
raise unittest.SkipTest(
"version %s doesn't support the new paraminfo api"
% site.version())
pi = api.ParamInfo(site, modules_only_mode=True)
pi.fetch(['info'])
self.assertIn('query+info', pi._paraminfo)
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
self.assertEqual(len(pi),
1 + len(pi.preloaded_modules))
self.assertIn('revisions', pi.prefixes)
class TestOtherSubmodule(TestCase):
"""Test handling multiple different modules having submodules."""
family = 'mediawiki'
code = 'mediawiki'
def test_other_submodule(self):
"""Test another module apart from query having submodules."""
pi = api.ParamInfo(self.site)
self.assertFalse(pi._modules)
pi.fetch(['query'])
self.assertNotIn('flow', pi._modules)
pi.fetch(['flow'])
self.assertIn('flow', pi._modules)
other_modules = set()
for modules in pi._modules.values():
self.assertIsInstance(modules, frozenset)
other_modules |= modules
other_modules -= pi.action_modules
other_modules -= pi.query_modules
self.assertLessEqual(other_modules & pi.submodules('flow'),
pi.submodules('flow'))
self.assertFalse(other_modules & pi.modules)
class TestParaminfoModules(DefaultSiteTestCase):
"""Test loading all paraminfo modules."""
def test_action_modules(self):
"""Test loading all action modules."""
self.site._paraminfo.fetch(self.site._paraminfo.action_modules)
def test_query_modules(self):
"""Test loading all query modules."""
self.site._paraminfo.fetch(self.site._paraminfo.query_modules)
class TestOptionSet(TestCase):
"""OptionSet class test class."""
family = 'wikipedia'
code = 'en'
def test_non_lazy_load(self):
"""Test OptionSet with initialised site."""
options = api.OptionSet(self.get_site(), 'recentchanges', 'show')
self.assertRaises(KeyError, options.__setitem__, 'invalid_name', True)
self.assertRaises(ValueError, options.__setitem__, 'anon', 'invalid_value')
options['anon'] = True
self.assertCountEqual(['anon'], options._enabled)
self.assertEqual(set(), options._disabled)
self.assertEqual(1, len(options))
self.assertEqual(['anon'], list(options))
self.assertEqual(['anon'], list(options.api_iter()))
options['bot'] = False
self.assertCountEqual(['anon'], options._enabled)
self.assertCountEqual(['bot'], options._disabled)
self.assertEqual(2, len(options))
self.assertEqual(['anon', 'bot'], list(options))
self.assertEqual(['anon', '!bot'], list(options.api_iter()))
options.clear()
self.assertEqual(set(), options._enabled)
self.assertEqual(set(), options._disabled)
self.assertEqual(0, len(options))
self.assertEqual([], list(options))
self.assertEqual([], list(options.api_iter()))
def test_lazy_load(self):
"""Test OptionSet with delayed site initialisation."""
options = api.OptionSet()
options['invalid_name'] = True
options['anon'] = True
self.assertIn('invalid_name', options._enabled)
self.assertEqual(2, len(options))
self.assertRaises(KeyError, options._set_site, self.get_site(),
'recentchanges', 'show')
self.assertEqual(2, len(options))
options._set_site(self.get_site(), 'recentchanges', 'show', True)
self.assertEqual(1, len(options))
self.assertRaises(TypeError, options._set_site, self.get_site(),
'recentchanges', 'show')
class TestDryOptionSet(DefaultDrySiteTestCase):
"""OptionSet class test class."""
def test_mutable_mapping(self):
"""Test keys, values and items from MutableMapping."""
options = api.OptionSet()
options['a'] = True
options['b'] = False
options['c'] = None
self.assertCountEqual(['a', 'b'], list(options.keys()))
self.assertCountEqual([True, False], list(options.values()))
self.assertEqual(set(), set(options.values()) - set([True, False]))
self.assertCountEqual([('a', True), ('b', False)], list(options.items()))
class TestDryPageGenerator(TestCase):
"""Dry API PageGenerator object test class."""
family = 'wikipedia'
code = 'en'
dry = True
# api.py sorts 'pages' using the string key, which is not a
# numeric comparison.
titles = ("Broadcaster (definition)", "Wiktionary", "Broadcaster.com",
"Wikipedia:Disambiguation")
def setUp(self):
"""Set up test case."""
super(TestDryPageGenerator, self).setUp()
mysite = self.get_site()
self.gen = api.PageGenerator(site=mysite,
generator="links",
titles="User:R'n'B")
# following test data is copied from an actual api.php response,
# but that query no longer matches this dataset.
# http://en.wikipedia.org/w/api.php?action=query&generator=links&titles=User:R%27n%27B
self.gen.request.submit = types.MethodType(lambda self: {
"query": {"pages": {"296589": {"pageid": 296589,
"ns": 0,
"title": "Broadcaster.com"
},
"13918157": {"pageid": 13918157,
"ns": 0,
"title": "Broadcaster (definition)"
},
"156658": {"pageid": 156658,
"ns": 0,
"title": "Wiktionary"
},
"47757": {"pageid": 47757,
"ns": 4,
"title": "Wikipedia:Disambiguation"
}
}
}
}, self.gen.request)
# On a dry site, the namespace objects only have canonical names.
# Add custom_name for this site namespace, to match the live site.
if 'Wikipedia' not in self.site.namespaces:
self.site.namespaces[4].custom_name = 'Wikipedia'
self.site.namespaces._namespace_names['wikipedia'] = self.site.namespaces[4]
def test_results(self):
"""Test that PageGenerator yields pages with expected attributes."""
self.assertPagelistTitles(self.gen, self.titles)
def test_initial_limit(self):
"""Test the default limit."""
self.assertEqual(self.gen.limit, None) # limit is initally None
def test_set_limit_as_number(self):
"""Test setting the limit using an int."""
for i in range(-2, 4):
self.gen.set_maximum_items(i)
self.assertEqual(self.gen.limit, i)
def test_set_limit_as_string(self):
"""Test setting the limit using an int cast into a string."""
for i in range(-2, 4):
self.gen.set_maximum_items(str(i))
self.assertEqual(self.gen.limit, i)
def test_set_limit_not_number(self):
"""Test setting the limit to not a number."""
with self.assertRaisesRegex(
ValueError,
"invalid literal for int\(\) with base 10: 'test'"):
self.gen.set_maximum_items('test')
def test_limit_equal_total(self):
"""Test that PageGenerator yields the requested amount of pages."""
self.gen.set_maximum_items(4)
self.assertPagelistTitles(self.gen, self.titles)
def test_limit_one(self):
"""Test that PageGenerator yields the requested amount of pages."""
self.gen.set_maximum_items(1)
self.assertPagelistTitles(self.gen, self.titles[0:1])
def test_limit_zero(self):
"""Test that a limit of zero is the same as limit None."""
self.gen.set_maximum_items(0)
self.assertPagelistTitles(self.gen, self.titles)
def test_limit_omit(self):
"""Test that limit omitted is the same as limit None."""
self.gen.set_maximum_items(-1)
self.assertPagelistTitles(self.gen, self.titles)
def test_namespace(self):
"""Test PageGenerator set_namespace."""
self.assertRaises(AssertionError, self.gen.set_namespace, 0)
self.assertRaises(AssertionError, self.gen.set_namespace, 1)
self.assertRaises(AssertionError, self.gen.set_namespace, None)
class TestPropertyGenerator(TestCase):
"""API PropertyGenerator object test class."""
family = 'wikipedia'
code = 'en'
def test_info(self):
"""Test PropertyGenerator with prop 'info'."""
mainpage = self.get_mainpage()
links = list(self.site.pagelinks(mainpage, total=10))
titles = [l.title(withSection=False)
for l in links]
gen = api.PropertyGenerator(site=self.site,
prop="info",
titles='|'.join(titles))
count = 0
for pagedata in gen:
self.assertIsInstance(pagedata, dict)
self.assertIn('pageid', pagedata)
self.assertIn('lastrevid', pagedata)
count += 1
self.assertEqual(len(links), count)
def test_one_continuation(self):
"""Test PropertyGenerator with prop 'revisions'."""
mainpage = self.get_mainpage()
links = list(self.site.pagelinks(mainpage, total=10))
titles = [l.title(withSection=False)
for l in links]
gen = api.PropertyGenerator(site=self.site,
prop="revisions",
titles='|'.join(titles))
gen.set_maximum_items(-1) # suppress use of "rvlimit" parameter
count = 0
for pagedata in gen:
self.assertIsInstance(pagedata, dict)
self.assertIn('pageid', pagedata)
self.assertIn('revisions', pagedata)
self.assertIn('revid', pagedata['revisions'][0])
count += 1
self.assertEqual(len(links), count)
def test_two_continuations(self):
"""Test PropertyGenerator with prop 'revisions' and 'coordinates'."""
mainpage = self.get_mainpage()
links = list(self.site.pagelinks(mainpage, total=10))
titles = [l.title(withSection=False)
for l in links]
gen = api.PropertyGenerator(site=self.site,
prop="revisions|coordinates",
titles='|'.join(titles))
gen.set_maximum_items(-1) # suppress use of "rvlimit" parameter
count = 0
for pagedata in gen:
self.assertIsInstance(pagedata, dict)
self.assertIn('pageid', pagedata)
self.assertIn('revisions', pagedata)
self.assertIn('revid', pagedata['revisions'][0])
count += 1
self.assertEqual(len(links), count)
@allowed_failure
def test_many_continuations_limited(self):
"""Test PropertyGenerator with many limited props."""
mainpage = self.get_mainpage()
links = list(self.site.pagelinks(mainpage, total=30))
titles = [l.title(withSection=False)
for l in links]
gen = api.PropertyGenerator(site=self.site,
prop="revisions|info|categoryinfo|langlinks|templates",
rvprop="ids|flags|timestamp|user|comment|content",
titles='|'.join(titles))
# An APIError is raised if set_maximum_items is not called.
gen.set_maximum_items(-1) # suppress use of "rvlimit" parameter
# Force the generator into continuation mode
gen.set_query_increment(5)
count = 0
for pagedata in gen:
self.assertIsInstance(pagedata, dict)
self.assertIn('pageid', pagedata)
count += 1
self.assertEqual(len(links), count)
# FIXME: AssertionError: 30 != 6150
@allowed_failure
def test_two_continuations_limited(self):
"""Test PropertyGenerator with many limited props and continuations."""
# FIXME: test fails
mainpage = self.get_mainpage()
links = list(self.site.pagelinks(mainpage, total=30))
titles = [l.title(withSection=False)
for l in links]
gen = api.PropertyGenerator(site=self.site,
prop="info|categoryinfo|langlinks|templates",
titles='|'.join(titles))
# Force the generator into continuation mode
gen.set_query_increment(5)
count = 0
for pagedata in gen:
self.assertIsInstance(pagedata, dict)
self.assertIn('pageid', pagedata)
count += 1
self.assertEqual(len(links), count)
# FIXME: AssertionError: 30 != 11550
# FIXME: test disabled as it takes longer than 10 minutes
def _test_two_continuations_limited_long_test(self):
"""Long duration test, with total & step that are a real scenario."""
mainpage = self.get_mainpage()
links = list(mainpage.backlinks(total=300))
titles = [l.title(withSection=False)
for l in links]
gen = api.PropertyGenerator(site=self.site,
prop="info|categoryinfo|langlinks|templates",
titles='|'.join(titles))
# Force the generator into continuation mode
gen.set_query_increment(50)
count = 0
for pagedata in gen:
self.assertIsInstance(pagedata, dict)
self.assertIn('pageid', pagedata)
count += 1
self.assertEqual(len(links), count)
class TestDryListGenerator(TestCase):
"""Test ListGenerator."""
family = 'wikipedia'
code = 'en'
dry = True
def setUp(self):
"""Set up test case."""
super(TestDryListGenerator, self).setUp()
mysite = self.get_site()
mysite._paraminfo['query+allpages'] = {
'prefix': 'ap',
'limit': {'max': 10},
'namespace': {'multi': True}
}
mysite._paraminfo.query_modules_with_limits = set(['allpages'])
self.gen = api.ListGenerator(listaction="allpages", site=mysite)
def test_namespace_none(self):
"""Test ListGenerator set_namespace with None."""
self.assertRaises(TypeError, self.gen.set_namespace, None)
def test_namespace_zero(self):
"""Test ListGenerator set_namespace with 0."""
self.gen.set_namespace(0)
class TestCachedRequest(DefaultSiteTestCase):
"""Test API Request caching.
This test class does not use the forced test caching.
"""
cached = False
def test_normal_use(self):
"""Test the caching of CachedRequest with an ordinary request."""
mysite = self.get_site()
mainpage = self.get_mainpage()
# Run the cached query three times to ensure the
# data returned is equal, and the last two have
# the same cache time.
params = {'action': 'query',
'prop': 'info',
'titles': mainpage.title(),
}
req1 = api.CachedRequest(datetime.timedelta(minutes=10),
site=mysite, **params)
data1 = req1.submit()
req2 = api.CachedRequest(datetime.timedelta(minutes=10),
site=mysite, **params)
data2 = req2.submit()
req3 = api.CachedRequest(datetime.timedelta(minutes=10),
site=mysite, **params)
data3 = req3.submit()
self.assertEqual(data1, data2)
self.assertEqual(data2, data3)
self.assertIsNotNone(req2._cachetime)
self.assertIsNotNone(req3._cachetime)
self.assertEqual(req2._cachetime, req3._cachetime)
def test_internals(self):
"""Test the caching of CachedRequest by faking a unique request."""
mysite = self.get_site()
# Run tests on a missing page unique to this test run so it can
# not be cached the first request, but will be cached after.
now = datetime.datetime.now()
params = {'action': 'query',
'prop': 'info',
'titles': 'TestCachedRequest_test_internals ' + str(now),
}
req = api.CachedRequest(datetime.timedelta(minutes=10),
site=mysite, **params)
rv = req._load_cache()
self.assertFalse(rv)
self.assertIsNone(req._data)
self.assertIsNone(req._cachetime)
data = req.submit()
self.assertIsNotNone(req._data)
self.assertIsNone(req._cachetime)
rv = req._load_cache()
self.assertTrue(rv)
self.assertIsNotNone(req._data)
self.assertIsNotNone(req._cachetime)
self.assertGreater(req._cachetime, now)
self.assertEqual(req._data, data)
class TestLazyLoginBase(TestCase):
"""
Test that it tries to login when read API access is denied.
Because there is no such family configured it creates an AutoFamily and
BaseSite on it's own. It's testing against steward.wikimedia.org.
These tests are split into two subclasses as only the first failed login
behaves as expected. All subsequent logins will raise an APIError, making
it impossible to test two scenarios with the same APISite object.
"""
hostname = 'steward.wikimedia.org'
@classmethod
def setUpClass(cls):
"""Set up steward Family."""
super(TestLazyLoginBase, cls).setUpClass()
fam = pywikibot.family.AutoFamily(
'steward', 'https://steward.wikimedia.org/w/api.php')
cls.site = pywikibot.site.APISite('steward', fam)
class TestLazyLoginNotExistUsername(TestLazyLoginBase):
"""Test missing username."""
# FIXME: due to limitations of LoginManager, it will ask the user
# for a password even if the username does not exist, and even if
# pywikibot is not connected to a tty. T100964
def setUp(self):
"""Patch the LoginManager to avoid UI interaction."""
super(TestLazyLoginNotExistUsername, self).setUp()
self.orig_login_manager = pywikibot.data.api.LoginManager
pywikibot.data.api.LoginManager = FakeLoginManager
def tearDown(self):
"""Restore the original LoginManager."""
pywikibot.data.api.LoginManager = self.orig_login_manager
super(TestLazyLoginNotExistUsername, self).tearDown()
def test_access_denied_notexist_username(self):
"""Test the query with a username which does not exist."""
self.site._username = ['Not registered username', None]
req = api.Request(site=self.site, action='query')
self.assertRaises(pywikibot.NoUsername, req.submit)
# FIXME: T100965
self.assertRaises(api.APIError, req.submit)
class TestLazyLoginNoUsername(TestLazyLoginBase):
"""Test no username."""
def test_access_denied_no_username(self):
"""Test the query without a username."""
self.site._username = [None, None]
# FIXME: The following prevents LoginManager
# from loading the username from the config when the site
# username is None. i.e. site.login(user=None) means load
# username from the configuration.
if 'steward' in pywikibot.config.usernames:
del pywikibot.config.usernames['steward']
req = api.Request(site=self.site, action='query')
self.assertRaises(pywikibot.NoUsername, req.submit)
# FIXME: T100965
self.assertRaises(api.APIError, req.submit)
class TestBadTokenRecovery(TestCase):
"""Test that the request recovers from bad tokens."""
family = 'wikipedia'
code = 'test'
write = True
def test_bad_token(self):
"""Test the bad token recovery by corrupting the cache."""
site = self.get_site()
site.tokens._tokens.setdefault(site.user(), {})['edit'] = 'INVALID'
page = pywikibot.Page(site, 'Pywikibot bad token test')
page.text = ('This page is testing whether pywikibot-core rerequests '
'a token when a badtoken error was received.')
page.save(summary='Bad token test')
class TestUrlEncoding(TestCase):
"""Test encode_url() function."""
net = False
def test_url_encoding_from_list(self):
"""Test moving 'token' parameters from a list to the end."""
query = [('action', 'edit'), ('token', 'a'), ('supertoken', 'b'),
('text', 'text')]
expect = 'action=edit&text=text&token=a&supertoken=b'
result = api.encode_url(query)
self.assertEqual(result, expect)
self.assertIsInstance(result, str)
def test_url_encoding_from_dict(self):
"""Test moving 'token' parameters from a dict to the end."""
# do not add other keys because dictionary is not deterministic
query = {'supertoken': 'b', 'text': 'text'}
expect = 'text=text&supertoken=b'
result = api.encode_url(query)
self.assertEqual(result, expect)
self.assertIsInstance(result, str)
def test_url_encoding_from_unicode(self):
"""Test encoding unicode values."""
query = {'token': 'токен'}
expect = 'token=%D1%82%D0%BE%D0%BA%D0%B5%D0%BD'
result = api.encode_url(query)
self.assertEqual(result, expect)
self.assertIsInstance(result, str)
def test_url_encoding_from_basestring(self):
"""Test encoding basestring values."""
if PY2:
query = {'token': str('test\xe2\x80\x94test'.encode('utf-8'))}
else:
query = {'token': 'test\xe2\x80\x94test'}
expect = str('token=test%C3%A2%C2%80%C2%94test')
result = api.encode_url(query)
self.assertEqual(result, expect)
self.assertIsInstance(result, str)
def test_moving_special_tokens(self):
"""Test moving wpEditToken to the very end."""
query = {'wpEditToken': 'c', 'token': 'b', 'text': 'a'}
expect = 'text=a&token=b&wpEditToken=c'
result = api.encode_url(query)
self.assertEqual(result, expect)
self.assertIsInstance(result, str)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
| darthbhyrava/pywikibot-local | tests/api_tests.py | Python | mit | 38,915 |
#
# Ladybug: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Ladybug.
#
# Copyright (c) 2013-2015, Mostapha Sadeghipour Roudsari <[email protected]>
# Ladybug is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Ladybug is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ladybug; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Use this component to calculate date information from an hour of the year. Date information includes the day of the month, the month of the year and the hour of the day.
-
Provided by Ladybug 0.0.61
Args:
HOY: A number between 1 and 8760 that represents an hour of the year.
Returns:
day: The day of the month on which the input HOY falls.
month: The month of the year on which the input HOY falls.
hour: The hour of the day on which the input HOY falls.
date: The input information written out as a full date and time text string.
"""
ghenv.Component.Name = "Ladybug_Day_Month_Hour"
ghenv.Component.NickName = 'Day_Month_Hour_Calculator'
ghenv.Component.Message = 'VER 0.0.61\nNOV_05_2015'
ghenv.Component.Category = "Ladybug"
ghenv.Component.SubCategory = "4 | Extra"
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "4"
except: pass
import scriptcontext as sc
from clr import AddReference
AddReference('Grasshopper')
import Grasshopper.Kernel as gh
def main(HOY):
# import the classes
if sc.sticky.has_key('ladybug_release'):
try:
if not sc.sticky['ladybug_release'].isCompatible(ghenv.Component): return -1
except:
warning = "You need a newer version of Ladybug to use this compoent." + \
"Use updateLadybug component to update userObjects.\n" + \
"If you have already updated userObjects drag Ladybug_Ladybug component " + \
"into canvas and try again."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return -1
lb_preparation = sc.sticky["ladybug_Preparation"]()
day = []
month = []
hour = []
date = []
for hoy in HOY:
d, m, t = lb_preparation.hour2Date(hoy, True)
day.append(d)
month.append(m + 1)
hour.append(t)
date.append(lb_preparation.hour2Date(hoy))
return day, month, hour, date
else:
print "You should first let the Ladybug fly..."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "You should first let the Ladybug fly...")
return -1
result = main(_HOY)
if result!=-1: day, month, hour, date = result
| boris-p/ladybug | src/Ladybug_Day_Month_Hour.py | Python | gpl-3.0 | 3,385 |
#!/usr/bin/env python
import numpy as np
SEED_ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
SEED_ALPHABET_DICT = dict((c, i) for i, c in enumerate(SEED_ALPHABET))
SEED_MAX = 'ZZZZZ'
SEED_MAX_CHAR_LEN = 5 # ZZZZZ is under max uint32, ZZZZZZ is above max uint32
## Dice roll + modifer
#
# Rolls N number of D dice, adding M as a modifier to the result.
# @param num Number of dice to roll.
# @param die Which sided die to roll.
# @param mod Modifier to add to the roll sum result. Default is 0.
def dice_roll(num,die,mod=0):
return(sum(np.random.random_integers(1,die,num))+mod)
## Random Seed
#
# Randomly selects a seed string and then sets is as the seed.
def random_seed():
randomSeedUInt = np.random.random_integers(0,seed_alphabet_decode(SEED_MAX))
randomSeedString = seed_alphabet_encode(randomSeedUInt)
set_seed(randomSeedUInt)
return(randomSeedString)
## Random seed alphabet decode
#
# Decodes a seed into an unsigned integer.
# @param seedString String to be decoded.
def seed_alphabet_decode(seedString):
# Check length
if (len(seedString)>SEED_MAX_CHAR_LEN):
raise(InvalidSeedLengthError("Seed length exceeds max allowed: length %s and max %s" % (len(seedString),SEED_MAX_CHAR_LEN)))
# Check for invalid characters
for char in seedString:
if (char not in SEED_ALPHABET):
raise(InvalidSeedCharError("Invalid seed character: %s in %s" % (char,seedString)))
# Convert to uInt
reverse_base = SEED_ALPHABET_DICT
length = len(reverse_base)
ret = 0
for i, c in enumerate(seedString[::-1]):
ret += (length ** i) * reverse_base[c]
return(ret)
## Random seed alphabet encode
#
# Encodes an unsigned integer into the seed alphabet.
# @param seedUInt Integer to be encoded.
def seed_alphabet_encode(seedUInt):
if (seedUInt<0):
raise(InvalidSeedNumberError("Negative number: %i" % seedUInt))
if (seedUInt>seed_alphabet_decode(SEED_MAX)):
raise(InvalidSeedNumberError("Seed too large: %i" % seedUInt))
base=SEED_ALPHABET
length = len(base)
ret = ''
while seedUInt != 0:
ret = base[seedUInt % length] + ret
seedUInt /= length
return(ret)
## Set random number generator seed
#
# Set the seed for the numpy random number generator.
# @param seedInt
def set_seed(seedInt):
np.random.seed(seedInt)
## Invalid seed character exception class.
class InvalidSeedCharError(Exception):
pass
## Invalid seed length exception class.
class InvalidSeedLengthError(Exception):
pass
## Invalid seed number exception class.
class InvalidSeedNumberError(Exception):
pass | jimmayjr/swn-gen | swn/random.py | Python | isc | 2,665 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('web', '0010_auto_20150303_0046'),
]
operations = [
migrations.RenameField(
model_name='foro',
old_name='comentario',
new_name='ccomentario',
),
migrations.AddField(
model_name='comentario',
name='cforo',
field=models.ForeignKey(blank=True, to='web.foro', null=True),
preserve_default=True,
),
]
| palichis/elmolino | web/migrations/0011_auto_20150303_0050.py | Python | gpl-2.0 | 603 |
import datetime
import time
from urllib.request import urlopen
isGame = True
#get all games for the day
url = 'http://live.nhle.com/GameData/GCScoreboard/'+str(datetime.date.today())+'.jsonp'
#url = 'http://live.nhle.com/GameData/GCScoreboard/2015-10-08.jsonp'
socket = urlopen(url)
html = socket.read().decode()
socket.close()
if '"id"' not in html:
print("No Games")
isGame = False
else:
if '"CHI"' not in html: #abbr
isGame = False
print("No Blackhawks Game")
lines = html.split("},{")
i = 0
while i < len(lines):
if '"CHICAGO"' in lines[i]:
iid = lines[i].index('"id":')
id = lines[i][iid+5:iid+15]
print("GID: " + id)
break
else:
i = i+1
#import pyglet
#sound = pyglet.media.load('gh.mp3', streaming=False)
#sound.play()
#pyglet.app.run()
while isGame:
#get scoreboard for selected game
url = 'http://live.nhle.com/GameData/20152016/'+id+'/gc/gcsb.jsonp'
socket = urlopen(url)
html = socket.read().decode()
socket.close()
print(html)
#get box score for selected game
url = 'http://live.nhle.com/GameData/20152016/'+id+'/gc/gcbx.jsonp'
socket = urlopen(url)
html = socket.read().decode()
socket.close()
print(html)
url = 'http://live.nhle.com/GameData/GCScoreboard/'+str(datetime.date.today())+'.jsonp'
#url = 'http://live.nhle.com/GameData/GCScoreboard/2015-10-08.jsonp'
socket = urlopen(url)
html = socket.read().decode()
socket.close()
print(html)
#refresh delay
time.sleep(30)
| nsquared25/nhl-score-stream-python | main.py | Python | mit | 1,575 |
# Copyright 2012 - 2013 Zarafa B.V.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License, version 3,
# as published by the Free Software Foundation with the following additional
# term according to sec. 7:
#
# According to sec. 7 of the GNU Affero General Public License, version
# 3, the terms of the AGPL are supplemented with the following terms:
#
# "Zarafa" is a registered trademark of Zarafa B.V. The licensing of
# the Program under the AGPL does not imply a trademark license.
# Therefore any rights, title and interest in our trademarks remain
# entirely with us.
#
# However, if you propagate an unmodified version of the Program you are
# allowed to use the term "Zarafa" to indicate that you distribute the
# Program. Furthermore you may use our trademarks where it is necessary
# to indicate the intended purpose of a product or service provided you
# use it in accordance with honest practices in industrial or commercial
# matters. If you want to propagate modified versions of the Program
# under the name "Zarafa" or "Zarafa Server", you may only do so if you
# have a written permission by Zarafa B.V. (to acquire a permission
# please contact Zarafa at [email protected]).
#
# The interactive user interface of the software displays an attribution
# notice containing the term "Zarafa" and/or the logo of Zarafa.
# Interactive user interfaces of unmodified and modified versions must
# display Appropriate Legal Notices according to sec. 5 of the GNU
# Affero General Public License, version 3, when you propagate
# unmodified or modified versions of the Program. In accordance with
# sec. 7 b) of the GNU Affero General Public License, version 3, these
# Appropriate Legal Notices must retain the logo of Zarafa or display
# the words "Initial Development by Zarafa" if the display of the logo
# is not reasonably feasible for technical reasons. The use of the logo
# of Zarafa in Legal Notices is allowed for unmodified and modified
# versions of the software.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from libzsm.rest_client.utils import get_api
from libzsm.rest_client.exc import Http403
from common import ApiTestBase
class AuthorizationTest(ApiTestBase):
def __init__(self, *args, **kwargs):
super(AuthorizationTest, self).__init__(*args, **kwargs)
self.s = get_api()
def setUp(self):
''' Trans [Harry (adm), Jeeves] # NOQA
# NOQA
| # NOQA
v # NOQA
# NOQA
Wheels [Rob] -> Cars [Jack] # NOQA
# NOQA
| # NOQA
v # NOQA
# NOQA
Bikes [Harry] # NOQA
# NOQA
Refer to the diagram:
https://confluence.zarafa.com/pages/viewpage.action?pageId=20841313
'''
## Hank is a tenant admin
data = dict(
name=u'trans',
)
self.ten_trans = self.s.create_tenant(initial=data)
data = dict(
username=u'hank',
password=u'nk',
name=u'Hank',
surname=u'R',
tenant=self.ten_trans,
userServer=self.server1,
)
self.trans_hank = self.s.create_user(initial=data)
data = {
'permissions': [
'ViewContact',
'ViewGroup',
'ViewGroupPrivileges',
'ViewTenant',
'ViewTenantAcl',
'ViewUser',
'ViewUserPrivileges',
'WriteContact',
'WriteGroup',
'WriteGroupPrivileges',
'WriteTenant',
'WriteTenantAcl',
'WriteUser',
'WriteUserPrivileges',
],
'user': self.trans_hank.resourceUri,
}
self.s.add_tenant_ace(self.ten_trans, data)
data = [
u'CreateTenant',
]
self.s.put_user_privs(self.trans_hank, data)
self.s_trans_hank = self.s.get_session(self.trans_hank)
## Jeeves is Hank's butler
data = dict(
username=u'jeeves',
password=u'jv',
name=u'Jeeves',
surname=u'H',
tenant=self.ten_trans,
userServer=self.server1,
)
self.trans_jeeves = self.s_trans_hank.create_user(initial=data)
self.s_trans_jeeves = self.s.get_session(self.trans_jeeves)
## Trans has a customer Wheels with a user Rob
data = dict(
name=u'wheels',
)
self.ten_wheels = self.s_trans_hank.create_tenant(initial=data)
data = dict(
username=u'rob',
password=u'rb',
name=u'Rob',
surname=u'Dole',
tenant=self.ten_wheels,
userServer=self.server1,
)
self.wheels_rob = self.s_trans_hank.create_user(initial=data)
data = [
u'CreateTenant',
]
self.s_trans_hank.put_user_privs(self.wheels_rob, data)
self.s_wheels_rob = self.s.get_session(self.wheels_rob)
## Wheels has a customer Bikes with a user Harry
data = dict(
name=u'bikes',
)
self.ten_bikes = self.s_wheels_rob.create_tenant(initial=data)
data = dict(
username=u'harry',
password=u'hr',
name=u'Harry',
surname=u'W',
tenant=self.ten_bikes,
userServer=self.server1,
)
self.bikes_harry = self.s_wheels_rob.create_user(initial=data)
self.s_bikes_harry = self.s.get_session(self.bikes_harry)
## Wheels has a customer Cars with a user Jack
data = dict(
name=u'cars',
)
self.ten_cars = self.s_wheels_rob.create_tenant(initial=data)
data = dict(
username=u'jack',
password=u'jk',
name=u'Jack',
surname=u'Hicks',
tenant=self.ten_cars,
userServer=self.server1,
)
self.cars_jack = self.s_wheels_rob.create_user(initial=data)
self.s_cars_jack = self.s.get_session(self.cars_jack)
## Set some handy groupings
self.all_tenants = [
self.ten_trans,
self.ten_wheels,
self.ten_bikes,
self.ten_cars,
]
def tearDown(self):
self.s_wheels_rob.delete_tenant(self.ten_bikes)
self.s_wheels_rob.delete_tenant(self.ten_cars)
self.s_trans_hank.delete_tenant(self.ten_wheels)
self.s.delete_tenant(self.ten_trans)
def test_neg_tenant_access(self):
## Hank only sees the tenants he created
tens = self.s_trans_hank.all_tenant()
self.assertEqual(2, len(tens), u'Incorrect number of tenants.')
self.verify_iterable(tens, [self.ten_trans, self.ten_wheels])
## Jeeves sees no tenants
tens = self.s_trans_jeeves.all_tenant()
self.assertEqual(0, len(tens), u'Incorrect number of tenants.')
## Rob sees Bikes and Cars
tens = self.s_wheels_rob.all_tenant()
self.assertEqual(2, len(tens), u'Incorrect number of tenants.')
self.verify_iterable(tens, [self.ten_bikes, self.ten_cars])
## Harry sees no tenants
tens = self.s_bikes_harry.all_tenant()
self.assertEqual(0, len(tens), u'Incorrect number of tenants.')
## Jack sees no tenants
tens = self.s_cars_jack.all_tenant()
self.assertEqual(0, len(tens), u'Incorrect number of tenants.')
## Hank can access Trans and Wheels, not Bikes or Cars
self.s_trans_hank.get_tenant(id=self.ten_trans.id)
self.s_trans_hank.get_tenant(id=self.ten_wheels.id)
with self.assertRaises(Http403):
self.s_trans_hank.get_tenant(id=self.ten_bikes.id)
with self.assertRaises(Http403):
self.s_trans_hank.get_tenant(id=self.ten_cars.id)
## Rob cannot access Trans nor Wheels, only Bikes and Cars
with self.assertRaises(Http403):
self.s_wheels_rob.get_tenant(id=self.ten_trans.id)
with self.assertRaises(Http403):
self.s_wheels_rob.get_tenant(id=self.ten_wheels.id)
self.s_wheels_rob.get_tenant(id=self.ten_bikes.id)
self.s_wheels_rob.get_tenant(id=self.ten_cars.id)
## Jeeves, Harry and Jack cannot access any tenants
sessions = [
self.s_trans_jeeves,
self.s_bikes_harry,
self.s_cars_jack,
]
for session in sessions:
for tenant in self.all_tenants:
with self.assertRaises(Http403):
session.get_tenant(id=tenant.id)
def test_neg_tenant_creation(self):
## Jeeves, Harry and Jack cannot create tenants
sessions = [
self.s_trans_jeeves,
self.s_bikes_harry,
self.s_cars_jack,
]
for session in sessions:
with self.assertRaises(Http403):
data = dict(
name=u'dummy',
)
session.create_tenant(initial=data)
def test_neg_user_access(self):
## Jeeves, Harry and Jack cannot access users on any tenant
sessions = [
self.s_trans_jeeves,
self.s_bikes_harry,
self.s_cars_jack,
]
for session in sessions:
for tenant in self.all_tenants:
with self.assertRaises(Http403):
session.all_user(tenant=tenant)
## Jeeves, Harry and Jack cannot create users on any tenant
sessions = [
self.s_trans_jeeves,
self.s_bikes_harry,
self.s_cars_jack,
]
for session in sessions:
for tenant in self.all_tenants:
with self.assertRaises(Http403):
data = dict(
username=u'dummy',
name=u'Dummy',
surname=u'H',
tenant=tenant,
userServer=self.server1,
)
session.create_user(initial=data)
## Rob cannot create users in Wheels
with self.assertRaises(Http403):
data = dict(
username=u'dummy',
name=u'Dummy',
surname=u'H',
tenant=self.ten_wheels,
userServer=self.server1,
)
self.s_wheels_rob.create_user(initial=data)
| zarafagroupware/zarafa-zsm | tests/tests_authorization.py | Python | agpl-3.0 | 11,327 |
#Given two arrays, write a function to compute their intersection.
class Solution(object):
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
dict = collections.defaultdict(int)
for i in nums1:
dict[i]+=1
ans=[]
for i in nums2:
if dict[i]>0:
ans.append(i)
dict[i]-=1
return ans | 95subodh/Leetcode | 350. Intersection of Two Arrays II.py | Python | mit | 372 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
try:
import gdata
import gdata.contacts.service
import gdata.contacts
except ImportError:
raise osv.except_osv(_('Google Contacts Import Error!'), _('Please install gdata-python-client from http://code.google.com/p/gdata-python-client/downloads/list'))
from osv import fields
from osv import osv
from tools.translate import _
from import_google import google_import
class google_login_contact(osv.osv_memory):
_inherit = 'google.login'
def _get_next_action(self, cr, uid, context=None):
data_obj = self.pool.get('ir.model.data')
data_id = data_obj._get_id(cr, uid, 'import_google', 'view_synchronize_google_contact_import_form')
view_id = False
if context.get('contact'):
data_id = data_obj._get_id(cr, uid, 'import_google', 'view_synchronize_google_contact_import_form')
if context.get('calendar'):
data_id = data_obj._get_id(cr, uid, 'import_google', 'view_synchronize_google_calendar_import_form')
if data_id:
view_id = data_obj.browse(cr, uid, data_id, context=context).res_id
value = {
'name': _('Import google'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'synchronize.google.import',
'view_id': False,
'context': context,
'views': [(view_id, 'form')],
'type': 'ir.actions.act_window',
'target': 'new',
}
return value
google_login_contact()
class synchronize_google(osv.osv_memory):
_name = 'synchronize.google.import'
def _get_group(self, cr, uid, context=None):
user_obj = self.pool.get('res.users').browse(cr, uid, uid)
google=self.pool.get('google.login')
if not user_obj.gmail_user or not user_obj.gmail_password:
raise osv.except_osv(_('Warning !'), _("No Google Username or password Defined for user.\nPlease define in user view"))
gd_client = google.google_login(user_obj.gmail_user,user_obj.gmail_password,type='group')
if not gd_client:
return [('failed', 'Connection to google fail')]
res = []
query = gdata.contacts.service.GroupsQuery(feed='/m8/feeds/groups/default/full')
if gd_client:
groups = gd_client.GetFeed(query.ToUri())
for grp in groups.entry:
res.append((grp.id.text, grp.title.text))
res.append(('all','All Groups'))
return res
def _get_calendars(self, cr, uid, context=None):
user_obj = self.pool.get('res.users').browse(cr, uid, uid)
google = self.pool.get('google.login')
res = []
try:
gd_client = google.google_login(user_obj.gmail_user, user_obj.gmail_password, type='calendar')
calendars = gd_client.GetAllCalendarsFeed()
for cal in calendars.entry:
res.append((cal.id.text, cal.title.text))
except Exception, e:
return [('failed', 'Connection to google fail')]
res.append(('all', 'All Calendars'))
return res
_columns = {
'create_partner': fields.selection([('create_all','Create partner for each contact'),('create_address','Import only address')],'Options'),
'customer': fields.boolean('Customer', help="Check this box to set newly created partner as Customer."),
'supplier': fields.boolean('Supplier', help="Check this box to set newly created partner as Supplier."),
'group_name': fields.selection(_get_group, "Group Name",help="Choose which group to import, By default it takes all."),
'calendar_name': fields.selection(_get_calendars, "Calendar Name"),
}
_defaults = {
'create_partner': 'create_all',
'group_name': 'all',
}
def import_google(self, cr, uid, ids, context=None):
if context == None:
context = {}
if not ids:
return {'type': 'ir.actions.act_window_close'}
obj = self.browse(cr, uid, ids, context=context)[0]
cust = obj.customer
sup = obj.supplier
tables=[]
user_obj = self.pool.get('res.users').browse(cr, uid, uid)
gmail_user = user_obj.gmail_user
gmail_pwd = user_obj.gmail_password
google = self.pool.get('google.login')
if not gmail_user or not gmail_pwd:
raise osv.except_osv(_('Error'), _("Invalid login detail !\n Specify Username/Password."))
if context.get('contact'):
msg = " You're Contact are import in background, a email will be send when the process is finished to %s"%(user_obj.gmail_user)
gd_client = google.google_login(gmail_user, gmail_pwd, type='contact')
if not gd_client:
raise osv.except_osv(_('Error'), _("Please specify correct user and password !"))
if obj.group_name not in ['all']:
context.update({ 'group_name': obj.group_name})
if obj.create_partner=='create_all':
tables.append('Contact')
else:
tables.append('Address')
context.update({'user': gmail_user,
'password': gmail_pwd,
'instance': 'contact',
'table':tables,
'customer':cust,
'supplier':sup})
elif context.get('calendar'):
msg = " You're Meeting are import in background, a email will be send when the process is finished to %s"%(user_obj.gmail_user)
tables.append('Events')
current_rec = self.browse(cr, uid, ids, context=context)
calendars = False
for rec in current_rec:
if rec.calendar_name != 'all':
calendars = [rec.calendar_name]
else:
calendars = map(lambda x: x[0], [cal for cal in self._get_calendars(cr, uid, context) if cal[0] != 'all'])
context.update({'user': gmail_user,
'password': gmail_pwd,
'calendars': calendars,
'instance': 'calendar'})
imp = google_import(self, cr, uid,'import_google' , "synchronize_google", gmail_user, context)
imp.set_table_list(tables)
imp.start()
context.update({'message': msg})
obj_model = self.pool.get('ir.model.data')
model_data_ids = obj_model.search(cr,uid,[('model','=','ir.ui.view'),('name','=','view_google_import_message_form')])
resource_id = obj_model.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'google.import.message',
'views': [(resource_id,'form')],
'type': 'ir.actions.act_window',
'target': 'new',
'context': context,
}
synchronize_google()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| crmccreary/openerp_server | openerp/addons/import_google/wizard/import_google_data.py | Python | agpl-3.0 | 8,149 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.