text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# explore.py
"""
Module containing functionality for exploratory data analysis and visualization.
"""
import seaborn as sns
import matplotlib.pyplot as plt
__all__ = [
'class_proportions',
'see_nulls',
'distplots',
'pairplots'
]
# ====== Data Statistics
def class_proportions(y):
"""
Function to calculate the proportion of classes for a given set of class labels. Returns a dictionary of class
proportions where the keys are the labels and the values are the percentage of the total number of samples that
occur for each class label.
:param y: (int) List of class labels (typically int in classification problems, but can be passed as strings)
:return:
"""
if not isinstance(y, list):
y = list(y)
counts_dict = {i: y.count(i) for i in y}
prop_dict = {}
for key, val in zip(counts_dict.keys(), counts_dict.values()):
print('Class: %10s | counts: %i (%0.2f%%)' % (key, val, (100 * val / len(y))))
prop_dict[key] = (100 * val / len(y))
print('Total number of samples:', len(y))
return prop_dict
# ====== Visualization
def see_nulls(df):
"""
Function to visualize columns with null values for features in a pandas DataFrame
:param df: pandas DataFrame with feature data
:return:
"""
plt.figure(figsize=(14, 9))
sns.heatmap(df.isnull(), cmap='viridis', yticklabels=False, xticklabels=True, cbar=True)
plt.title("Visualization of Null Values in Data")
plt.xticks(rotation=30)
plt.show()
return None
def distplots(df, features):
"""
Function to show the distribution of a selected feature(s)
:param df: Dataframe containing features
:param features: (str/list): Feature(s) to be plotted in a distribution plot
:return:
"""
if not isinstance(features, list):
title_str = features
features = [features]
else:
title_str = ", ".join(features)
ax_label = ""
for feature in features:
ax_label += ('| %s |' % feature)
sns.distplot(df[feature].values, label=feature, norm_hist=True)
plt.xlabel(s=ax_label)
plt.legend(fontsize=12)
plt.title('Distribution of %s' % title_str)
plt.show()
def pairplots(df, features, kind='reg', diag_kind='kde'):
"""
Function to make a quick pairplot of selected features
:param df: DataFrame containing the feature matrix
:param features: (str/list) Features selected for inclusion in pairplot.
:param kind: (str) Kind of plot for the non-identity relationships ('scatter', 'reg').
:param diag_kind: (str) Kind of plot for the diagonal subplots ('hist', 'kde').
:return:
"""
if not isinstance(features, list):
features = [features]
data = df[features]
sns.pairplot(data=data, vars=features, kind=kind,
diag_kind=diag_kind, dropna=True)
plt.show()
| xfaxca/pymlkit | pymlkit/preproc/eda.py | Python | gpl-3.0 | 2,894 | 0.003455 |
#!/usr/bin/python
# coding=utf-8
"""
Project MCM - Micro Content Management
SDOS - Secure Delete Object Store
Copyright (C) <2016> Tim Waizenegger, <University of Stuttgart>
This software may be modified and distributed under the terms
of the MIT license. See the LICENSE file for details.
"""
import logging, coloredlogs, sys
from mcm.sdos import configuration
log_format = '%(asctime)s %(module)s %(name)s[%(process)d][%(thread)d] %(levelname)s %(message)s'
field_styles = {'module': {'color': 'magenta'}, 'hostname': {'color': 'magenta'}, 'programname': {'color': 'cyan'},
'name': {'color': 'blue'}, 'levelname': {'color': 'black', 'bold': True}, 'asctime': {'color': 'green'}}
coloredlogs.install(level=configuration.log_level, fmt=log_format, field_styles=field_styles)
#logging.getLogger("werkzeug").setLevel(level=logging.WARNING)
#logging.getLogger("swiftclient").setLevel(level=logging.WARNING)
"""
logging.basicConfig(level=configuration.log_level, format=configuration.log_format)
"""
logging.error("###############################################################################")
logging.error("SDOS service running")
logging.error("Python {}".format(sys.version))
logging.error("###############################################################################")
| timwaizenegger/osecm-sdos | mcm/__init__.py | Python | mit | 1,307 | 0.012242 |
# -*- coding: utf-8 -*-
import os,logging
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext.db import Model as DBModel
from google.appengine.api import memcache
from google.appengine.api import mail
from google.appengine.api import urlfetch
from google.appengine.api import datastore
from datetime import datetime
import urllib, hashlib,urlparse
import zipfile,re,pickle,uuid
#from base import *
logging.info('module base reloaded')
rootpath=os.path.dirname(__file__)
def vcache(key="",time=3600):
def _decorate(method):
def _wrapper(*args, **kwargs):
if not g_blog.enable_memcache:
return method(*args, **kwargs)
result=method(*args, **kwargs)
memcache.set(key,result,time)
return result
return _wrapper
return _decorate
class Theme:
def __init__(self, name='default'):
self.name = name
self.mapping_cache = {}
self.dir = '/themes/%s' % name
self.viewdir=os.path.join(rootpath, 'view')
self.server_dir = os.path.join(rootpath, 'themes',self.name)
if os.path.exists(self.server_dir):
self.isZip=False
else:
self.isZip=True
self.server_dir =self.server_dir+".zip"
#self.server_dir=os.path.join(self.server_dir,"templates")
logging.debug('server_dir:%s'%self.server_dir)
def __getattr__(self, name):
if self.mapping_cache.has_key(name):
return self.mapping_cache[name]
else:
path ="/".join((self.name,'templates', name + '.html'))
logging.debug('path:%s'%path)
## if not os.path.exists(path):
## path = os.path.join(rootpath, 'themes', 'default', 'templates', name + '.html')
## if not os.path.exists(path):
## path = None
self.mapping_cache[name]=path
return path
class ThemeIterator:
def __init__(self, theme_path='themes'):
self.iterating = False
self.theme_path = theme_path
self.list = []
def __iter__(self):
return self
def next(self):
if not self.iterating:
self.iterating = True
self.list = os.listdir(self.theme_path)
self.cursor = 0
if self.cursor >= len(self.list):
self.iterating = False
raise StopIteration
else:
value = self.list[self.cursor]
self.cursor += 1
if value.endswith('.zip'):
value=value[:-4]
return value
#return (str(value), unicode(value))
class LangIterator:
def __init__(self,path='locale'):
self.iterating = False
self.path = path
self.list = []
for value in os.listdir(self.path):
if os.path.isdir(os.path.join(self.path,value)):
if os.path.exists(os.path.join(self.path,value,'LC_MESSAGES')):
try:
lang=open(os.path.join(self.path,value,'language')).readline()
self.list.append({'code':value,'lang':lang})
except:
self.list.append( {'code':value,'lang':value})
def __iter__(self):
return self
def next(self):
if not self.iterating:
self.iterating = True
self.cursor = 0
if self.cursor >= len(self.list):
self.iterating = False
raise StopIteration
else:
value = self.list[self.cursor]
self.cursor += 1
return value
def getlang(self,language):
from django.utils.translation import to_locale
for item in self.list:
if item['code']==language or item['code']==to_locale(language):
return item
return {'code':'en_US','lang':'English'}
class BaseModel(db.Model):
def __init__(self, parent=None, key_name=None, _app=None, **kwds):
self.__isdirty = False
DBModel.__init__(self, parent=None, key_name=None, _app=None, **kwds)
def __setattr__(self,attrname,value):
"""
DataStore api stores all prop values say "email" is stored in "_email" so
we intercept the set attribute, see if it has changed, then check for an
onchanged method for that property to call
"""
if (attrname.find('_') != 0):
if hasattr(self,'_' + attrname):
curval = getattr(self,'_' + attrname)
if curval != value:
self.__isdirty = True
if hasattr(self,attrname + '_onchange'):
getattr(self,attrname + '_onchange')(curval,value)
DBModel.__setattr__(self,attrname,value)
class Cache(db.Model):
cachekey = db.StringProperty(multiline=False)
content = db.TextProperty()
class Blog(db.Model):
owner = db.UserProperty()
author=db.StringProperty(default='admin')
rpcuser=db.StringProperty(default='admin')
rpcpassword=db.StringProperty(default='')
description = db.TextProperty()
baseurl = db.StringProperty(multiline=False,default=None)
urlpath = db.StringProperty(multiline=False)
title = db.StringProperty(multiline=False,default='Micolog')
subtitle = db.StringProperty(multiline=False,default='This is a micro blog.')
entrycount = db.IntegerProperty(default=0)
posts_per_page= db.IntegerProperty(default=10)
feedurl = db.StringProperty(multiline=False,default='/feed')
blogversion = db.StringProperty(multiline=False,default='0.30')
theme_name = db.StringProperty(multiline=False,default='default')
enable_memcache = db.BooleanProperty(default = False)
link_format=db.StringProperty(multiline=False,default='%(year)s/%(month)s/%(day)s/%(postname)s.html')
comment_notify_mail=db.BooleanProperty(default=True)
#评论顺序
comments_order=db.IntegerProperty(default=0)
#每页评论数
comments_per_page=db.IntegerProperty(default=20)
#comment check type 0-No 1-算术 2-验证码 3-客户端计算
comment_check_type=db.IntegerProperty(default=1)
#0 default 1 identicon
avatar_style=db.IntegerProperty(default=0)
blognotice=db.TextProperty(default='')
domain=db.StringProperty()
show_excerpt=db.BooleanProperty(default=True)
version=0.736
timedelta=db.FloatProperty(default=8.0)# hours
language=db.StringProperty(default="en-us")
sitemap_entries=db.IntegerProperty(default=30)
sitemap_include_category=db.BooleanProperty(default=False)
sitemap_include_tag=db.BooleanProperty(default=False)
sitemap_ping=db.BooleanProperty(default=False)
default_link_format=db.StringProperty(multiline=False,default='?p=%(post_id)s')
default_theme=Theme("default")
allow_pingback=db.BooleanProperty(default=False)
allow_trackback=db.BooleanProperty(default=False)
theme=None
langs=None
application=None
def __init__(self,
parent=None,
key_name=None,
_app=None,
_from_entity=False,
**kwds):
from micolog_plugin import Plugins
self.plugins=Plugins(self)
db.Model.__init__(self,parent,key_name,_app,_from_entity,**kwds)
def tigger_filter(self,name,content,*arg1,**arg2):
return self.plugins.tigger_filter(name,content,blog=self,*arg1,**arg2)
def tigger_action(self,name,*arg1,**arg2):
return self.plugins.tigger_action(name,blog=self,*arg1,**arg2)
def tigger_urlmap(self,url,*arg1,**arg2):
return self.plugins.tigger_urlmap(url,blog=self,*arg1,**arg2)
def get_ziplist(self):
return self.plugins.get_ziplist();
def save(self):
self.put()
def initialsetup(self):
self.title = 'Your Blog Title'
self.subtitle = 'Your Blog Subtitle'
def get_theme(self):
self.theme= Theme(self.theme_name);
return self.theme
def get_langs(self):
self.langs=LangIterator()
return self.langs
def cur_language(self):
return self.get_langs().getlang(self.language)
def rootpath(self):
return rootpath
@vcache("blog.hotposts")
def hotposts(self):
return Entry.all().filter('entrytype =','post').filter("published =", True).order('-readtimes').fetch(8)
@vcache("blog.recentposts")
def recentposts(self):
return Entry.all().filter('entrytype =','post').filter("published =", True).order('-date').fetch(8)
@vcache("blog.postscount")
def postscount(self):
return Entry.all().filter('entrytype =','post').filter("published =", True).order('-date').count()
class Category(db.Model):
uid=db.IntegerProperty()
name=db.StringProperty(multiline=False)
slug=db.StringProperty(multiline=False)
parent_cat=db.SelfReferenceProperty()
@property
def posts(self):
return Entry.all().filter('entrytype =','post').filter("published =", True).filter('categorie_keys =',self)
@property
def count(self):
return self.posts.count()
def put(self):
db.Model.put(self)
g_blog.tigger_action("save_category",self)
def delete(self):
for entry in Entry.all().filter('categorie_keys =',self):
entry.categorie_keys.remove(self.key())
entry.put()
for cat in Category.all().filter('parent_cat =',self):
cat.delete()
db.Model.delete(self)
g_blog.tigger_action("delete_category",self)
def ID(self):
try:
id=self.key().id()
if id:
return id
except:
pass
if self.uid :
return self.uid
else:
#旧版本Category没有ID,为了与wordpress兼容
from random import randint
uid=randint(0,99999999)
cate=Category.all().filter('uid =',uid).get()
while cate:
uid=randint(0,99999999)
cate=Category.all().filter('uid =',uid).get()
self.uid=uid
print uid
self.put()
return uid
@classmethod
def get_from_id(cls,id):
cate=Category.get_by_id(id)
if cate:
return cate
else:
cate=Category.all().filter('uid =',id).get()
return cate
@property
def children(self):
key=self.key()
return [c for c in Category.all().filter('parent_cat =',self)]
@classmethod
def allTops(self):
return [c for c in Category.all() if not c.parent_cat]
class Archive(db.Model):
monthyear = db.StringProperty(multiline=False)
year = db.StringProperty(multiline=False)
month = db.StringProperty(multiline=False)
entrycount = db.IntegerProperty(default=0)
date = db.DateTimeProperty(auto_now_add=True)
class Tag(db.Model):
tag = db.StringProperty(multiline=False)
tagcount = db.IntegerProperty(default=0)
@property
def posts(self):
return Entry.all('entrytype =','post').filter("published =", True).filter('tags =',self)
@classmethod
def add(cls,value):
if value:
tag= Tag.get_by_key_name(value)
if not tag:
tag=Tag(key_name=value)
tag.tag=value
tag.tagcount+=1
tag.put()
return tag
else:
return None
@classmethod
def remove(cls,value):
if value:
tag= Tag.get_by_key_name(value)
if tag:
if tag.tagcount>1:
tag.tagcount-=1
tag.put()
else:
tag.delete()
class Link(db.Model):
href = db.StringProperty(multiline=False,default='')
linktype = db.StringProperty(multiline=False,default='blogroll')
linktext = db.StringProperty(multiline=False,default='')
linkcomment = db.StringProperty(multiline=False,default='')
createdate=db.DateTimeProperty(auto_now=True)
@property
def get_icon_url(self):
"get ico url of the wetsite"
ico_path = '/favicon.ico'
ix = self.href.find('/',len('http://') )
return (ix>0 and self.href[:ix] or self.href ) + ico_path
def put(self):
db.Model.put(self)
g_blog.tigger_action("save_link",self)
def delete(self):
db.Model.delete(self)
g_blog.tigger_action("delete_link",self)
class Entry(BaseModel):
author = db.UserProperty()
author_name = db.StringProperty()
published = db.BooleanProperty(default=False)
content = db.TextProperty(default='')
readtimes = db.IntegerProperty(default=0)
title = db.StringProperty(multiline=False,default='')
date = db.DateTimeProperty(auto_now_add=True)
mod_date = db.DateTimeProperty(auto_now_add=True)
tags = db.StringListProperty()
categorie_keys=db.ListProperty(db.Key)
slug = db.StringProperty(multiline=False,default='')
link= db.StringProperty(multiline=False,default='')
monthyear = db.StringProperty(multiline=False)
entrytype = db.StringProperty(multiline=False,default='post',choices=[
'post','page'])
entry_parent=db.IntegerProperty(default=0)#When level=0 show on main menu.
menu_order=db.IntegerProperty(default=0)
commentcount = db.IntegerProperty(default=0)
trackbackcount = db.IntegerProperty(default=0)
allow_comment = db.BooleanProperty(default=True) #allow comment
#allow_pingback=db.BooleanProperty(default=False)
allow_trackback=db.BooleanProperty(default=True)
password=db.StringProperty()
#compatible with wordpress
is_wp=db.BooleanProperty(default=False)
post_id= db.IntegerProperty()
excerpt=db.StringProperty(multiline=True)
#external page
is_external_page=db.BooleanProperty(default=False)
target=db.StringProperty(default="_self")
external_page_address=db.StringProperty()
#keep in top
sticky=db.BooleanProperty(default=False)
postname=''
_relatepost=None
@property
def content_excerpt(self):
return self.get_content_excerpt(_('..more').decode('utf8'))
def get_author_user(self):
if not self.author:
self.author=g_blog.owner
return User.all().filter('email =',self.author.email()).get()
def get_content_excerpt(self,more='..more'):
if g_blog.show_excerpt:
if self.excerpt:
return self.excerpt+' <a href="/%s">%s</a>'%(self.link,more)
else:
sc=self.content.split('<!--more-->')
if len(sc)>1:
return sc[0]+u' <a href="/%s">%s</a>'%(self.link,more)
else:
return sc[0]
else:
return self.content
def slug_onchange(self,curval,newval):
if not (curval==newval):
self.setpostname(newval)
def setpostname(self,newval):
#check and fix double slug
if newval:
slugcount=Entry.all()\
.filter('entrytype',self.entrytype)\
.filter('date <',self.date)\
.filter('slug =',newval)\
.filter('published',True)\
.count()
if slugcount>0:
self.postname=newval+str(slugcount)
else:
self.postname=newval
else:
self.postname=""
@property
def fullurl(self):
return g_blog.baseurl+'/'+self.link;
@property
def categories(self):
try:
return db.get(self.categorie_keys)
except:
return []
@property
def post_status(self):
return self.published and 'publish' or 'draft'
def settags(self,values):
if not values:tags=[]
if type(values)==type([]):
tags=values
else:
tags=values.split(',')
if not self.tags:
removelist=[]
addlist=tags
else:
#search different tags
removelist=[n for n in self.tags if n not in tags]
addlist=[n for n in tags if n not in self.tags]
for v in removelist:
Tag.remove(v)
for v in addlist:
Tag.add(v)
self.tags=tags
def get_comments_by_page(self,index,psize):
return self.purecomments().fetch(psize,offset = (index-1) * psize)
@property
def strtags(self):
return ','.join(self.tags)
@property
def edit_url(self):
return '/admin/%s?key=%s&action=edit'%(self.entrytype,self.key())
def comments(self):
if g_blog.comments_order:
return Comment.all().filter('entry =',self).order('-date')
else:
return Comment.all().filter('entry =',self).order('date')
def purecomments(self):
if g_blog.comments_order:
return Comment.all().filter('entry =',self).filter('ctype =',0).order('-date')
else:
return Comment.all().filter('entry =',self).filter('ctype =',0).order('date')
def trackcomments(self):
if g_blog.comments_order:
return Comment.all().filter('entry =',self).filter('ctype IN',[1,2]).order('-date')
else:
return Comment.all().filter('entry =',self).filter('ctype IN',[1,2]).order('date')
def commentsTops(self):
return [c for c in self.purecomments() if c.parent_key()==None]
def delete_comments(self):
cmts = Comment.all().filter('entry =',self)
for comment in cmts:
comment.delete()
self.commentcount = 0
self.trackbackcount = 0
def update_commentno(self):
cmts = Comment.all().filter('entry =',self).order('date')
i=1
for comment in cmts:
comment.no=i
i+=1
comment.store()
def update_archive(self,cnt=1):
"""Checks to see if there is a month-year entry for the
month of current blog, if not creates it and increments count"""
my = self.date.strftime('%B %Y') # September-2008
sy = self.date.strftime('%Y') #2008
sm = self.date.strftime('%m') #09
archive = Archive.all().filter('monthyear',my).get()
if self.entrytype == 'post':
if not archive:
archive = Archive(monthyear=my,year=sy,month=sm,entrycount=1)
self.monthyear = my
archive.put()
else:
# ratchet up the count
archive.entrycount += cnt
archive.put()
g_blog.entrycount+=cnt
g_blog.put()
def save(self,is_publish=False):
"""
Use this instead of self.put(), as we do some other work here
@is_pub:Check if need publish id
"""
g_blog.tigger_action("pre_save_post",self,is_publish)
my = self.date.strftime('%B %Y') # September 2008
self.monthyear = my
old_publish=self.published
self.mod_date=datetime.now()
if is_publish:
if not self.is_wp:
self.put()
self.post_id=self.key().id()
#fix for old version
if not self.postname:
self.setpostname(self.slug)
vals={'year':self.date.year,'month':str(self.date.month).zfill(2),'day':self.date.day,
'postname':self.postname,'post_id':self.post_id}
if self.entrytype=='page':
if self.slug:
self.link=self.postname
else:
#use external page address as link
if self.is_external_page:
self.link=self.external_page_address
else:
self.link=g_blog.default_link_format%vals
else:
if g_blog.link_format and self.postname:
self.link=g_blog.link_format.strip()%vals
else:
self.link=g_blog.default_link_format%vals
self.published=is_publish
self.put()
if is_publish:
if g_blog.sitemap_ping:
Sitemap_NotifySearch()
if old_publish and not is_publish:
self.update_archive(-1)
if not old_publish and is_publish:
self.update_archive(1)
self.removecache()
self.put()
g_blog.tigger_action("save_post",self,is_publish)
def removecache(self):
memcache.delete('/')
memcache.delete('/'+self.link)
memcache.delete('/sitemap')
memcache.delete('blog.postcount')
g_blog.tigger_action("clean_post_cache",self)
@property
def next(self):
return Entry.all().filter('entrytype =','post').filter("published =", True).order('date').filter('date >',self.date).fetch(1)
@property
def prev(self):
return Entry.all().filter('entrytype =','post').filter("published =", True).order('-date').filter('date <',self.date).fetch(1)
@property
def relateposts(self):
if self._relatepost:
return self._relatepost
else:
if self.tags:
self._relatepost= Entry.gql("WHERE published=True and tags IN :1 and post_id!=:2 order by post_id desc ",self.tags,self.post_id).fetch(5)
else:
self._relatepost= []
return self._relatepost
@property
def trackbackurl(self):
if self.link.find("?")>-1:
return g_blog.baseurl+"/"+self.link+"&code="+str(self.key())
else:
return g_blog.baseurl+"/"+self.link+"?code="+str(self.key())
def getbylink(self):
pass
def delete(self):
g_blog.tigger_action("pre_delete_post",self)
if self.published:
self.update_archive(-1)
self.delete_comments()
db.Model.delete(self)
g_blog.tigger_action("delete_post",self)
class User(db.Model):
user = db.UserProperty(required = False)
dispname = db.StringProperty()
email=db.StringProperty()
website = db.LinkProperty()
isadmin=db.BooleanProperty(default=False)
isAuthor=db.BooleanProperty(default=True)
#rpcpwd=db.StringProperty()
def __unicode__(self):
#if self.dispname:
return self.dispname
#else:
# return self.user.nickname()
def __str__(self):
return self.__unicode__().encode('utf-8')
COMMENT_NORMAL=0
COMMENT_TRACKBACK=1
COMMENT_PINGBACK=2
class Comment(db.Model):
entry = db.ReferenceProperty(Entry)
date = db.DateTimeProperty(auto_now_add=True)
content = db.TextProperty(required=True)
author=db.StringProperty()
email=db.EmailProperty()
weburl=db.URLProperty()
status=db.IntegerProperty(default=0)
reply_notify_mail=db.BooleanProperty(default=False)
ip=db.StringProperty()
ctype=db.IntegerProperty(default=COMMENT_NORMAL)
no=db.IntegerProperty(default=0)
comment_order=db.IntegerProperty(default=1)
@property
def mpindex(self):
count=self.entry.commentcount
no=self.no
if g_blog.comments_order:
no=count-no+1
index=no / g_blog.comments_per_page
if no % g_blog.comments_per_page or no==0:
index+=1
return index
@property
def shortcontent(self,len=20):
scontent=self.content
scontent=re.sub(r'<br\s*/>',' ',scontent)
scontent=re.sub(r'<[^>]+>','',scontent)
scontent=re.sub(r'(@[\S]+)-\d{2,7}',r'\1:',scontent)
return scontent[:len].replace('<','<').replace('>','>')
def gravatar_url(self):
# Set your variables here
if g_blog.avatar_style==0:
default = g_blog.baseurl+'/static/images/homsar.jpeg'
else:
default='identicon'
if not self.email:
return default
size = 50
try:
# construct the url
imgurl = "http://www.gravatar.com/avatar/"
imgurl +=hashlib.md5(self.email.lower()).hexdigest()+"?"+ urllib.urlencode({
'd':default, 's':str(size),'r':'G'})
return imgurl
except:
return default
def save(self):
self.put()
self.entry.commentcount+=1
self.comment_order=self.entry.commentcount
if (self.ctype == COMMENT_TRACKBACK) or (self.ctype == COMMENT_PINGBACK):
self.entry.trackbackcount+=1
self.entry.put()
memcache.delete("/"+self.entry.link)
return True
def delit(self):
self.entry.commentcount-=1
if self.entry.commentcount<0:
self.entry.commentcount = 0
if (self.ctype == COMMENT_TRACKBACK) or (self.ctype == COMMENT_PINGBACK):
self.entry.trackbackcount-=1
if self.entry.trackbackcount<0:
self.entry.trackbackcount = 0
self.entry.put()
self.delete()
def put(self):
g_blog.tigger_action("pre_comment",self)
db.Model.put(self)
g_blog.tigger_action("save_comment",self)
def delete(self):
db.Model.delete(self)
g_blog.tigger_action("delete_comment",self)
@property
def children(self):
key=self.key()
comments=Comment.all().ancestor(self)
return [c for c in comments if c.parent_key()==key]
def store(self, **kwargs):
rpc = datastore.GetRpcFromKwargs(kwargs)
self._populate_internal_entity()
return datastore.Put(self._entity, rpc=rpc)
class Media(db.Model):
name =db.StringProperty()
mtype=db.StringProperty()
bits=db.BlobProperty()
date=db.DateTimeProperty(auto_now_add=True)
download=db.IntegerProperty(default=0)
@property
def size(self):
return len(self.bits)
class OptionSet(db.Model):
name=db.StringProperty()
value=db.TextProperty()
#blobValue=db.BlobProperty()
#isBlob=db.BooleanProperty()
@classmethod
def getValue(cls,name,default=None):
try:
opt=OptionSet.get_by_key_name(name)
return pickle.loads(str(opt.value))
except:
return default
@classmethod
def setValue(cls,name,value):
opt=OptionSet.get_or_insert(name)
opt.name=name
opt.value=pickle.dumps(value)
opt.put()
@classmethod
def remove(cls,name):
opt= OptionSet.get_by_key_name(name)
if opt:
opt.delete()
NOTIFICATION_SITES = [
('http', 'www.google.com', 'webmasters/sitemaps/ping', {}, '', 'sitemap')
]
def Sitemap_NotifySearch():
""" Send notification of the new Sitemap(s) to the search engines. """
url=g_blog.baseurl+"/sitemap"
# Cycle through notifications
# To understand this, see the comment near the NOTIFICATION_SITES comment
for ping in NOTIFICATION_SITES:
query_map = ping[3]
query_attr = ping[5]
query_map[query_attr] = url
query = urllib.urlencode(query_map)
notify = urlparse.urlunsplit((ping[0], ping[1], ping[2], query, ping[4]))
# Send the notification
logging.info('Notifying search engines. %s'%ping[1])
logging.info('url: %s'%notify)
try:
result = urlfetch.fetch(notify)
if result.status_code == 200:
logging.info('Notify Result: %s' % result.content)
if result.status_code == 404:
logging.info('HTTP error 404: Not Found')
logging.warning('Cannot contact: %s' % ping[1])
except :
logging.error('Cannot contact: %s' % ping[1])
def InitBlogData():
global g_blog
OptionSet.setValue('PluginActive',[u'googleAnalytics', u'wordpress', u'sys_plugin'])
g_blog = Blog(key_name = 'default')
g_blog.domain=os.environ['HTTP_HOST']
g_blog.baseurl="http://"+g_blog.domain
g_blog.feedurl=g_blog.baseurl+"/feed"
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
lang="zh-cn"
if os.environ.has_key('HTTP_ACCEPT_LANGUAGE'):
lang=os.environ['HTTP_ACCEPT_LANGUAGE'].split(',')[0]
from django.utils.translation import activate,to_locale
g_blog.language=to_locale(lang)
from django.conf import settings
settings._target = None
activate(g_blog.language)
g_blog.save()
entry=Entry(title=_("Hello world!").decode('utf8'))
entry.content=_('<p>Welcome to micolog. This is your first post. Edit or delete it, then start blogging!</p>').decode('utf8')
entry.save(True)
link=Link(href='http://xuming.net',linktext=_("Xuming's blog").decode('utf8'))
link.put()
return g_blog
def gblog_init():
global g_blog
try:
if g_blog :
return g_blog
except:
pass
g_blog = Blog.get_by_key_name('default')
if not g_blog:
g_blog=InitBlogData()
g_blog.get_theme()
g_blog.rootdir=os.path.dirname(__file__)
return g_blog
try:
g_blog=gblog_init()
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from django.utils.translation import activate
from django.conf import settings
settings._target = None
activate(g_blog.language)
except:
pass
| mozillazg/mzgblog | model.py | Python | mit | 30,298 | 0.018222 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <[email protected]>
##
import datetime
import mock
import gtk
from stoqlib.api import api
from stoq.gui.purchase import PurchaseApp
from stoq.gui.test.baseguitest import BaseGUITest
from stoqlib.domain.purchase import PurchaseItem, PurchaseOrder, PurchaseOrderView
from stoqlib.domain.receiving import (ReceivingOrderItem, ReceivingOrder,
PurchaseReceivingMap)
from stoqlib.gui.dialogs.purchasedetails import PurchaseDetailsDialog
from stoqlib.gui.search.searchresultview import SearchResultListView
from stoqlib.gui.wizards.consignmentwizard import ConsignmentWizard
from stoqlib.gui.wizards.productwizard import ProductCreateWizard
from stoqlib.gui.wizards.purchasefinishwizard import PurchaseFinishWizard
from stoqlib.gui.wizards.purchasequotewizard import QuotePurchaseWizard
from stoqlib.gui.wizards.purchasewizard import PurchaseWizard
from stoqlib.reporting.purchase import PurchaseReport
class TestPurchase(BaseGUITest):
def create_app(self, *args, **kwargs):
app = BaseGUITest.create_app(self, *args, **kwargs)
app.branch_filter.combo.select_item_by_data(None)
return app
def test_initial(self):
app = self.create_app(PurchaseApp, u'purchase')
for purchase in app.results:
purchase.open_date = datetime.datetime(2012, 1, 1)
self.check_app(app, u'purchase')
def test_select(self):
self.create_purchase_order()
app = self.create_app(PurchaseApp, u'purchase')
results = app.results
results.select(results[0])
@mock.patch('stoq.gui.purchase.PurchaseApp.run_dialog')
def test_edit_quote_order(self, run_dialog):
api.sysparam.set_bool(self.store, 'SMART_LIST_LOADING', False)
purchase = self.create_purchase_order()
app = self.create_app(PurchaseApp, u'purchase')
for purchase in app.results:
purchase.open_date = datetime.datetime(2012, 1, 1)
olist = app.results
olist.select(olist[0])
with mock.patch('stoq.gui.purchase.api', new=self.fake.api):
self.fake.set_retval(purchase)
self.activate(app.NewQuote)
self.assertEquals(run_dialog.call_count, 1)
args, kwargs = run_dialog.call_args
wizard, store, edit_mode = args
self.assertEquals(wizard, QuotePurchaseWizard)
self.assertTrue(store is not None)
self.assertEquals(edit_mode, None)
@mock.patch('stoq.gui.purchase.PurchaseApp.print_report')
def test_print_report(self, print_report):
api.sysparam.set_bool(self.store, 'SMART_LIST_LOADING', False)
app = self.create_app(PurchaseApp, u'purchase')
self.activate(app.window.Print)
self.assertEquals(print_report.call_count, 1)
args, kwargs = print_report.call_args
report, results, views = args
self.assertEquals(report, PurchaseReport)
self.assertTrue(isinstance(results, SearchResultListView))
for view in views:
self.assertTrue(isinstance(view, PurchaseOrderView))
@mock.patch('stoq.gui.purchase.PurchaseApp.select_result')
@mock.patch('stoq.gui.purchase.PurchaseApp.run_dialog')
@mock.patch('stoq.gui.purchase.api.new_store')
def test_new_quote_order(self, new_store, run_dialog, select_result):
new_store.return_value = self.store
self.clean_domain([ReceivingOrderItem, PurchaseReceivingMap,
ReceivingOrder, PurchaseItem, PurchaseOrder])
quotation = self.create_quotation()
quotation.purchase.add_item(self.create_sellable(), 2)
quotation.purchase.status = PurchaseOrder.ORDER_PENDING
api.sysparam.set_bool(self.store, 'SMART_LIST_LOADING', False)
app = self.create_app(PurchaseApp, u'purchase')
olist = app.results
olist.select(olist[0])
self.store.retval = olist[0]
with mock.patch.object(self.store, 'close'):
with mock.patch.object(self.store, 'commit'):
self.activate(app.Edit)
run_dialog.assert_called_once_with(PurchaseWizard,
self.store,
quotation.purchase, False)
select_result.assert_called_once_with(olist[0])
@mock.patch('stoq.gui.purchase.PurchaseApp.run_dialog')
def test_details_dialog(self, run_dialog):
self.clean_domain([ReceivingOrderItem, PurchaseReceivingMap,
ReceivingOrder, PurchaseItem, PurchaseOrder])
purchase = self.create_purchase_order()
purchase.add_item(self.create_sellable(), 2)
api.sysparam.set_bool(self.store, 'SMART_LIST_LOADING', False)
app = self.create_app(PurchaseApp, u'purchase')
olist = app.results
olist.select(olist[0])
olist.double_click(0)
self.assertEquals(run_dialog.call_count, 1)
args, kwargs = run_dialog.call_args
dialog, store = args
self.assertEquals(dialog, PurchaseDetailsDialog)
self.assertTrue(store is not None)
self.assertEquals(kwargs[u'model'], purchase)
@mock.patch('stoq.gui.purchase.yesno')
@mock.patch('stoq.gui.purchase.api.new_store')
def test_confirm_order(self, new_store, yesno):
new_store.return_value = self.store
yesno.return_value = True
self.clean_domain([ReceivingOrderItem, PurchaseReceivingMap,
ReceivingOrder, PurchaseItem, PurchaseOrder])
purchase = self.create_purchase_order()
purchase.add_item(self.create_sellable(), 2)
purchase.status = PurchaseOrder.ORDER_PENDING
api.sysparam.set_bool(self.store, 'SMART_LIST_LOADING', False)
app = self.create_app(PurchaseApp, u'purchase')
olist = app.results
olist.select(olist[0])
with mock.patch.object(self.store, 'close'):
with mock.patch.object(self.store, 'commit'):
self.activate(app.Confirm)
yesno.assert_called_once_with(u'The selected order will be '
u'marked as sent.',
gtk.RESPONSE_YES,
u"Confirm order", u"Don't confirm")
self.assertEquals(purchase.status, PurchaseOrder.ORDER_CONFIRMED)
@mock.patch('stoq.gui.purchase.PurchaseApp.run_dialog')
@mock.patch('stoq.gui.purchase.api.new_store')
def test_finish_order(self, new_store, run_dialog):
new_store.return_value = self.store
self.clean_domain([ReceivingOrderItem, PurchaseReceivingMap,
ReceivingOrder, PurchaseItem, PurchaseOrder])
purchase = self.create_purchase_order()
purchase.add_item(self.create_sellable(), 2)
purchase.get_items()[0].quantity_received = 2
purchase.status = PurchaseOrder.ORDER_CONFIRMED
purchase.received_quantity = 2
api.sysparam.set_bool(self.store, 'SMART_LIST_LOADING', False)
app = self.create_app(PurchaseApp, u'purchase')
olist = app.results
olist.select(olist[0])
with mock.patch.object(self.store, 'close'):
with mock.patch.object(self.store, 'commit'):
self.activate(app.Finish)
run_dialog.assert_called_once_with(PurchaseFinishWizard,
self.store, purchase)
@mock.patch('stoq.gui.purchase.yesno')
@mock.patch('stoq.gui.purchase.api.new_store')
def test_cancel_order(self, new_store, yesno):
new_store.return_value = self.store
yesno.return_value = True
self.clean_domain([ReceivingOrderItem, PurchaseReceivingMap,
ReceivingOrder, PurchaseItem, PurchaseOrder])
purchase = self.create_purchase_order()
purchase.add_item(self.create_sellable(), 2)
purchase.status = PurchaseOrder.ORDER_PENDING
api.sysparam.set_bool(self.store, 'SMART_LIST_LOADING', False)
app = self.create_app(PurchaseApp, u'purchase')
olist = app.results
olist.select(olist[0])
with mock.patch.object(self.store, 'close'):
with mock.patch.object(self.store, 'commit'):
self.activate(app.Cancel)
yesno.assert_called_once_with(u'The selected order will be '
u'cancelled.', gtk.RESPONSE_YES,
u"Cancel order", u"Don't cancel")
self.assertEquals(purchase.status, PurchaseOrder.ORDER_CANCELLED)
@mock.patch('stoqlib.gui.wizards.productwizard.run_dialog')
@mock.patch('stoqlib.gui.wizards.productwizard.api.new_store')
def test_new_product(self, new_store, run_dialog):
run_dialog.return_value = False
new_store.return_value = self.store
self.clean_domain([ReceivingOrderItem, PurchaseReceivingMap,
ReceivingOrder, PurchaseItem, PurchaseOrder])
purchase = self.create_purchase_order()
purchase.add_item(self.create_sellable(), 2)
purchase.status = PurchaseOrder.ORDER_PENDING
api.sysparam.set_bool(self.store, 'SMART_LIST_LOADING', False)
app = self.create_app(PurchaseApp, u'purchase')
olist = app.results
olist.select(olist[0])
with mock.patch.object(self.store, 'close'):
with mock.patch.object(self.store, 'commit'):
self.activate(app.NewProduct)
run_dialog.assert_called_once_with(ProductCreateWizard,
app, self.store)
@mock.patch('stoq.gui.purchase.PurchaseApp.run_dialog')
def test_new_consignment(self, run_dialog):
api.sysparam.set_bool(self.store, 'SMART_LIST_LOADING', False)
purchase = self.create_purchase_order()
app = self.create_app(PurchaseApp, u'purchase')
for purchase in app.results:
purchase.open_date = datetime.datetime(2012, 1, 1)
olist = app.results
olist.select(olist[0])
with mock.patch('stoq.gui.purchase.api', new=self.fake.api):
self.fake.set_retval(purchase)
self.activate(app.NewConsignment)
self.assertEquals(run_dialog.call_count, 1)
args, kwargs = run_dialog.call_args
wizard, store = args
self.assertEquals(wizard, ConsignmentWizard)
self.assertTrue(store is not None)
self.assertEquals(kwargs[u'model'], None)
| andrebellafronte/stoq | stoq/gui/test/test_purchase.py | Python | gpl-2.0 | 11,536 | 0.00156 |
import argparse, requests, sys, configparser, zipfile, os, shutil
from urllib.parse import urlparse, parse_qs
appname="ConverterUpdater"
author="Leo Durrant (2017)"
builddate="05/10/17"
version="0.1a"
release="alpha"
filesdelete=['ConUpdate.py', 'Converter.py', 'LBT.py', 'ConverterGUI.py', 'LBTGUI.py']
directoriesdelete=['convlib\\', 'LBTLIB\\', "data\\images\\", "data\\text\\"]
def readvaluefromconfig(filename, section, valuename):
try:
config = configparser.ConfigParser()
config.read(filename)
try:
val = config[section][valuename]
return val
except Exception as e:
print("Cannot find value %s in %s. Check %s.\n Exception: %s" % (valuename, section, filename, str(e)))
return None
except Exception as e:
print("Cannot read %s.\n Exception: %s" % (filename, str(e)))
return None
parser = argparse.ArgumentParser(description='Updater for Converter')
parser.add_argument('-cfg', '--config', nargs="?", help="The path to the configuration file. (Usually generated by Converter.)")
args= parser.parse_args()
parameterfile=args.config
if parameterfile == None:
parameterfile="updater.ini"
else:
parameterfile=str(parameterfile)
executeafterupdate=True
updatedownloadurl=urlparse(readvaluefromconfig(parameterfile, "updater", "downloadurl"))
appinstall=readvaluefromconfig(parameterfile, "updater", "appinstall")
executablefile=readvaluefromconfig(parameterfile, "updater", "executablefn")
keepconfig=readvaluefromconfig(parameterfile, "updater", "keepconfig")
if os.path.exists(appinstall):
if os.path.isdir(appinstall):
print("Directory found!")
else:
print("Path is not a directory.")
sys.exit(1)
else:
print("Path doesn't exist.")
sys.exit(1)
if not os.path.exists("{}\\{}".format(appinstall, executablefile)):
executeafterupdate=False
temporaryfile="download.tmp"
# print(str(args.config))
def downloadfile():
try:
with open(temporaryfile, "wb") as f:
print("Connecting...", end="")
response = requests.get(updatedownloadurl.geturl(), stream=True)
print("\rConnected! ")
total_length = response.headers.get('content-length')
if not total_length is None:
print("Downloading %s to %s (%s B)" % (str(updatedownloadurl.geturl()), temporaryfile, total_length))
else:
print("Downloading %s..." % (temporaryfile))
if total_length is None:
f.write(response.content)
else:
total_length=int(total_length)
for data in response.iter_content(chunk_size=4096):
# done = int(50 * dl / total_length)
# print("\r%s/%sB" % (done, total_length))
# dl += len(data)
f.write(data)
cleanfiles()
#print("\r%s/%sB" % (done, total_length))
except Exception as e:
print("\n\nFailed to connect to %s. Check the update parameters or try again later.\nException: %s" % (str(updatedownloadurl.geturl()), str(e)))
def cleanfiles():
for file in filesdelete:
fullpath="{}\\{}".format(appinstall, file)
if not os.path.exists(fullpath):
print("%s does not exist." % (fullpath))
else:
try:
os.remove(fullpath)
print("Deleted %s!" % (fullpath))
except Exception as e:
print("\n\nFailed to delete %s!\nException: %s" % (fullpath, str(e)))
for dirs in directoriesdelete:
fullpath="{}\\{}".format(appinstall, dirs)
if not os.path.exists(fullpath):
print("%s does not exist." % (fullpath))
else:
try:
shutil.rmtree(fullpath)
print("Deleted %s!" % (fullpath))
except Exception as e:
print("\n\nFailed to delete %s!\nException: %s" % (fullpath, str(e)))
extractfile(temporaryfile)
def extractfile(file):
print("Extracting %s to %s. Please wait!" % (str(file), appinstall))
try:
with zipfile.ZipFile(file, "r") as zip_r:
zip_r.extractall(appinstall)
except zipfile.BadZipfile as e:
print("\n\nAttempted to extract a bad zip file '%s'!\nException: %s" % (file, str(e)))
except Exception as e:
print("\n\nAn error occurred while trying to extract '%s'.\nException %s" % (file, str(e)))
print("Cleaning temporary files...")
try:
os.remove(file)
except Exception as e:
print("\n\nAn erro occurred while trying to delete temporary files.\n Exception: %s" % (str(e)))
runapp()
def runapp():
try:
pythonlocation=sys.executable
executablefullpath="{}\\{}".format(appinstall, executablefile)
print("Attempting to run app...")
os.system('{} {}'.format(pythonlocation, executablefullpath))
except Exception as e:
raise e
downloadfile() | ZanyLeonic/LeonicBinaryTool | ConUpdate.py | Python | gpl-3.0 | 5,039 | 0.009724 |
from jobman import DD, expand, flatten
import pynet.layer as layer
from pynet.model import *
from pynet.layer import *
from pynet.datasets.mnist import Mnist, Mnist_Blocks
import pynet.datasets.spec as spec
import pynet.datasets.mnist as mnist
import pynet.datasets.transfactor as tf
import pynet.datasets.mapping as mapping
import pynet.learning_method as learning_methods
from pynet.learning_rule import LearningRule
from pynet.log import Log
from pynet.train_object import TrainObject
from pynet.cost import Cost
import pynet.datasets.preprocessor as preproc
import pynet.datasets.dataset_noise as noisy
import pynet.layer_noise as layer_noise
import cPickle
import os
from hps.models.model import AE
import theano
from theano.sandbox.cuda.var import CudaNdarraySharedVariable
floatX = theano.config.floatX
class Laura_Two_Layers(AE):
def __init__(self, state):
self.state = state
def build_model(self, input_dim):
with open(os.environ['PYNET_SAVE_PATH'] + '/'
+ self.state.hidden1.model + '/model.pkl') as f1:
model1 = cPickle.load(f1)
with open(os.environ['PYNET_SAVE_PATH'] + '/'
+ self.state.hidden2.model + '/model.pkl') as f2:
model2 = cPickle.load(f2)
model = AutoEncoder(input_dim=input_dim)
while len(model1.encode_layers) > 0:
model.add_encode_layer(model1.pop_encode_layer())
while len(model2.encode_layers) > 0:
model.add_encode_layer(model2.pop_encode_layer())
while len(model2.decode_layers) > 0:
model.add_decode_layer(model2.pop_decode_layer())
while len(model1.decode_layers) > 0:
model.add_decode_layer(model1.pop_decode_layer())
return model
def run(self):
dataset = self.build_dataset()
learning_rule = self.build_learning_rule()
learn_method = self.build_learning_method()
model = self.build_model(dataset.feature_size())
model.layers[0].dropout_below = self.state.hidden1.dropout_below
if self.state.log.save_to_database_name:
database = self.build_database(dataset, learning_rule, learn_method, model)
database['records']['h1_model'] = self.state.hidden1.model
database['records']['h2_model'] = self.state.hidden2.model
log = self.build_log(database)
log.info("Fine Tuning")
for layer in model.layers:
layer.dropout_below = None
layer.noise = None
train_obj = TrainObject(log = log,
dataset = dataset,
learning_rule = learning_rule,
learning_method = learn_method,
model = model)
train_obj.run()
| hycis/Pynet | hps/models/Laura_Two_Layers.py | Python | apache-2.0 | 2,815 | 0.005329 |
from muntjac.ui.vertical_layout import VerticalLayout
from muntjac.ui.menu_bar import MenuBar, ICommand
from muntjac.terminal.external_resource import ExternalResource
class MenuBarItemStylesExample(VerticalLayout):
def __init__(self):
super(MenuBarItemStylesExample, self).__init__()
self._menubar = MenuBar()
menuCommand = MenuCommand(self)
# Save reference to individual items so we can add sub-menu items to
# them
f = self._menubar.addItem('File', None)
newItem = f.addItem('New', None)
f.addItem('Open f...', menuCommand)
f.addSeparator()
# Add a style name for a menu item, then use CSS to alter the visuals
f.setStyleName('file')
newItem.addItem('File', menuCommand)
newItem.addItem('Folder', menuCommand)
newItem.addItem('Project...', menuCommand)
f.addItem('Close', menuCommand)
f.addItem('Close All', menuCommand).setStyleName('close-all')
f.addSeparator()
f.addItem('Save', menuCommand)
f.addItem('Save As...', menuCommand)
f.addItem('Save All', menuCommand)
edit = self._menubar.addItem('Edit', None)
edit.addItem('Undo', menuCommand)
edit.addItem('Redo', menuCommand).setEnabled(False)
edit.addSeparator()
edit.addItem('Cut', menuCommand)
edit.addItem('Copy', menuCommand)
edit.addItem('Paste', menuCommand)
edit.addSeparator()
find = edit.addItem('Find/Replace', menuCommand)
# Actions can be added inline as well, of course
find.addItem('Google Search', SearchCommand(self))
find.addSeparator()
find.addItem('Find/Replace...', menuCommand)
find.addItem('Find Next', menuCommand)
find.addItem('Find Previous', menuCommand)
view = self._menubar.addItem('View', None)
view.addItem('Show/Hide Status Bar', menuCommand)
view.addItem('Customize Toolbar...', menuCommand)
view.addSeparator()
view.addItem('Actual Size', menuCommand)
view.addItem('Zoom In', menuCommand)
view.addItem('Zoom Out', menuCommand)
self.addComponent(self._menubar)
class SearchCommand(ICommand):
def __init__(self, c):
self._c = c
def menuSelected(self, selectedItem):
er = ExternalResource('http://www.google.com')
self._c.getWindow().open(er)
class MenuCommand(ICommand):
def __init__(self, c):
self._c = c
def menuSelected(self, selectedItem):
self._c.getWindow().showNotification('Action '
+ selectedItem.getText())
| rwl/muntjac | muntjac/demo/sampler/features/menubar/MenuBarItemStylesExample.py | Python | apache-2.0 | 2,645 | 0.000378 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import glob
import os.path
import re
class Picard(Package):
"""Picard is a set of command line tools for manipulating high-throughput
sequencing (HTS) data and formats such as SAM/BAM/CRAM and VCF.
"""
homepage = "http://broadinstitute.github.io/picard/"
url = "https://github.com/broadinstitute/picard/releases/download/2.9.2/picard.jar"
_urlfmt = "https://github.com/broadinstitute/picard/releases/download/{0}/picard.jar"
_oldurlfmt = 'https://github.com/broadinstitute/picard/releases/download/{0}/picard-tools-{0}.zip'
# They started distributing a single jar file at v2.6.0, prior to
# that it was a .zip file with multiple .jar and .so files
version('2.18.3', '181b1b0731fd35f0d8bd44677d8787e9', expand=False)
version('2.18.0', '20045ff141e4a67512365f0b6bbd8229', expand=False)
version('2.17.0', '72cc527f1e4ca6a799ae0117af60b54e', expand=False)
version('2.16.0', 'fed8928b03bb36e355656f349e579083', expand=False)
version('2.15.0', '3f5751630b1a3449edda47a0712a64e4', expand=False)
version('2.13.2', '3d7b33fd1f43ad2129e6ec7883af56f5', expand=False)
version('2.10.0', '96f3c11b1c9be9fc8088bc1b7b9f7538', expand=False)
version('2.9.4', '5ce72af4d5efd02fba7084dcfbb3c7b3', expand=False)
version('2.9.3', '3a33c231bcf3a61870c3d44b3b183924', expand=False)
version('2.9.2', '0449279a6a89830917e8bcef3a976ef7', expand=False)
version('2.9.0', 'b711d492f16dfe0084d33e684dca2202', expand=False)
version('2.8.3', '4a181f55d378cd61d0b127a40dfd5016', expand=False)
version('2.6.0', '91f35f22977d9692ce2718270077dc50', expand=False)
version('1.140', '308f95516d94c1f3273a4e7e2b315ec2')
depends_on('java@8:', type='run')
def install(self, spec, prefix):
mkdirp(prefix.bin)
# The list of files to install varies with release...
# ... but skip the spack-{build.env}.out files.
files = [x for x in glob.glob("*") if not re.match("^spack-", x)]
for f in files:
install(f, prefix.bin)
# Set up a helper script to call java on the jar file,
# explicitly codes the path for java and the jar file.
script_sh = join_path(os.path.dirname(__file__), "picard.sh")
script = prefix.bin.picard
install(script_sh, script)
set_executable(script)
# Munge the helper script to explicitly point to java and the
# jar file.
java = self.spec['java'].prefix.bin.java
kwargs = {'ignore_absent': False, 'backup': False, 'string': False}
filter_file('^java', java, script, **kwargs)
filter_file('picard.jar', join_path(prefix.bin, 'picard.jar'),
script, **kwargs)
def setup_environment(self, spack_env, run_env):
"""The Picard docs suggest setting this as a convenience."""
run_env.prepend_path('PICARD',
join_path(self.prefix, 'bin', 'picard.jar'))
def url_for_version(self, version):
if version < Version('2.6.0'):
return self._oldurlfmt.format(version)
else:
return self._urlfmt.format(version)
| krafczyk/spack | var/spack/repos/builtin/packages/picard/package.py | Python | lgpl-2.1 | 4,400 | 0.001136 |
from feedgen.feed import FeedGenerator
def format_itunes_duration(td):
return "{hours:02d}:{minutes:02d}:{seconds:02d}".format(
hours=td.seconds//3600,
minutes=(td.seconds//60)%60,
seconds=int(td.seconds%60)
)
def add_entry(fg, md):
fe = fg.add_entry()
fe.id(md.id)
fe.title(md.title)
fe.enclosure(md.link, str(md.length), "audio/mpeg")
if md.duration is not None:
fe.podcast.itunes_duration(format_itunes_duration(md.duration))
def generate_feed(channel_dict, file_metadatas):
fg = FeedGenerator()
fg.load_extension("podcast")
fg.link(href=channel_dict["url"], rel="self")
fg.title(channel_dict["title"])
fg.description(channel_dict["description"])
for file_metadata in file_metadatas:
add_entry(fg, file_metadata)
return fg.rss_str(pretty=True)
| calpaterson/dircast | dircast/feed.py | Python | gpl-3.0 | 848 | 0.005896 |
# Izhikevich.py ---
#
# Filename: Izhikevich.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Fri May 28 14:42:33 2010 (+0530)
# Version:
# Last-Updated: Tue Sep 11 14:27:18 2012 (+0530)
# By: subha
# Update #: 1212
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
# threhold variablity to be checked.
# Bistability not working.
# DAP working with increased parameter value 'a'
# inhibition induced spiking kind of working but not matching with the paper figure
# inhibition induced bursting kind of working but not matching with the paper figure
# Accommodation cannot work with the current implementation: because the equation for u is not what is mentioned in the paper
# it is: u = u + tau*a*(b*(V+65)); [It is nowhere in the paper and you face it only if you look at the matlab code for figure 1].
# It is not possible to tune a, b, c, d in any way to produce this from: u = u + tau*a*(b*V - u)
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import time
from numpy import *
import os
import sys
import moose
class IzhikevichDemo:
"""Class to setup and simulate the various kind of neuronal behaviour using Izhikevich model.
Fields:
"""
# Paramteres for different kinds of behaviour described by Izhikevich
# (1. IEEE TRANSACTIONS ON NEURAL NETWORKS, VOL. 14, NO. 6, NOVEMBER 2003
# and 2. IEEE TRANSACTIONS ON NEURAL NETWORKS, VOL. 15, NO. 5, SEPTEMBER
# 2004)
# Modified and enhanced using: http://www.izhikevich.org/publications/figure1.m
# The entries in the tuple are as follows:
# fig. no. in paper (2), parameter a, parameter b, parameter c (reset value of v in mV), parameter d (after-spike reset value of u), injection current I (uA), initial value of Vm, duration of simulation (ms)
#
# They are all in whatever unit they were in the paper. Just before use we convert them to SI.
parameters = {
"tonic_spiking": ['A', 0.02 , 0.2 , -65.0, 6.0 , 14.0, -70.0, 100.0], # Fig. 1.A
"phasic_spiking": ['B', 0.02 , 0.25 , -65.0, 6.0 , 0.5, -64.0, 200.0], # Fig. 1.B
"tonic_bursting": ['C', 0.02 , 0.2 , -50.0, 2.0 , 15.0, -70.0, 220.0], # Fig. 1.C
"phasic_bursting": ['D', 0.02 , 0.25 , -55.0, 0.05 , 0.6, -64.0, 200.0], # Fig. 1.D
"mixed_mode": ['E', 0.02 , 0.2 , -55.0, 4.0 , 10.0, -70.0, 160.0], # Fig. 1.E
"spike_freq_adapt": ['F', 0.01 , 0.2 , -65.0, 8.0 , 30.0, -70.0, 85.0 ], # Fig. 1.F # spike frequency adaptation
"Class_1": ['G', 0.02 , -0.1 , -55.0, 6.0 , 0, -60.0, 300.0], # Fig. 1.G # Spikining Frequency increases with input strength
"Class_2": ['H', 0.2 , 0.26 , -65.0, 0.0 , 0, -64.0, 300.0], # Fig. 1.H # Produces high frequency spikes
"spike_latency": ['I', 0.02 , 0.2 , -65.0, 6.0 , 7.0, -70.0, 100.0], # Fig. 1.I
"subthresh_osc": ['J', 0.05 , 0.26 , -60.0, 0.0 , 0, -62.0, 200.0], # Fig. 1.J # subthreshold oscillations
"resonator": ['K', 0.1 , 0.26 , -60.0, -1.0 , 0, -62.0, 400.0], # Fig. 1.K
"integrator": ['L', 0.02 , -0.1 , -55.0, 6.0 , 0, -60.0, 100.0], # Fig. 1.L
"rebound_spike": ['M', 0.03 , 0.25 , -60.0, 4.0 , -15, -64.0, 200.0], # Fig. 1.M
"rebound_burst": ['N', 0.03 , 0.25 , -52.0, 0.0 , -15, -64.0, 200.0], # Fig. 1.N
"thresh_var": ['O', 0.03 , 0.25 , -60.0, 4.0 , 0, -64.0, 100.0], # Fig. 1.O # threshold variability
"bistable": ['P', 0.1 , 0.26 , -60.0, 0.0 , 1.24, -61.0, 300.0], # Fig. 1.P
"DAP": ['Q', 1.15 , 0.2 , -60.0, -21.0 , 20, -70.0, 50.0], # Fig. 1.Q # Depolarizing after-potential - a had to be increased in order to reproduce the figure
"accommodation": ['R', 0.02 , 1.0 , -55.0, 4.0 , 0, -65.0, 400.0], # Fig. 1.R
"iispike": ['S', -0.02 , -1.0 , -60.0, 8.0 , 75.0, -63.8, 350.0], # Fig. 1.S # inhibition-induced spiking
"iiburst": ['T', -0.026, -1.0 , -45.0, 0.0 , 75.0, -63.8, 350.0] # Fig. 1.T # inhibition-induced bursting
}
documentation = {
"tonic_spiking": """
Neuron is normally silent but spikes when stimulated with a current injection.""",
"phasic_spiking": """
Neuron fires a single spike only at the start of a current pulse.""",
"tonic_bursting": """
Neuron is normally silent but produces bursts of spikes when
stimulated with current injection.""",
"phasic_bursting": """
Neuron is normally silent but produces a burst of spikes at the
beginning of an input current pulse.""",
"mixed_mode": """
Neuron fires a burst at the beginning of input current pulse, but then
switches to tonic spiking.""",
"spike_freq_adapt": """
Neuron fires spikes when a current injection is applied, but at a
gradually reducing rate.""",
"Class_1": """
Neuron fires low frequency spikes with weak input current injection.""",
"Class_2": """
Neuron fires high frequency (40-200 Hz) spikes when stimulated with
current injection.""",
"spike_latency": """
The spike starts after a delay from the onset of current
injection. The delay is dependent on strength of input.""",
"subthresh_osc": """
Even at subthreshold inputs a neuron exhibits oscillatory membrane potential.""",
"resonator": """
Neuron fires spike only when an input pulsetrain of a frequency
similar to that of the neuron's subthreshold oscillatory frequency is
applied.""",
"integrator": """
The chances of the neuron firing increases with increase in the frequency
of input pulse train.""",
"rebound_spike": """
When the neuron is released from an inhibitory input, it fires a spike.""",
"rebound_burst": """
When the neuron is released from an inhibitory input, it fires a burst
of action potentials.""",
"thresh_var": """
Depending on the previous input, the firing threshold of a neuron may
change. In this example, the first input pulse does not produce
spike, but when the same input is applied after an inhibitory input,
it fires.""",
"bistable": """
These neurons switch between two stable modes (resting and tonic spiking).
The switch happens via an excitatory or inhibitory input.""",
"DAP": """
After firing a spike, the membrane potential shows a prolonged depolarized
after-potential.""",
"accommodation": """
These neurons do not respond to slowly rising input, but a sharp increase
in input may cause firing.""",
"iispike": """
These neurons fire in response to inhibitory input.""",
"iiburst": """
These neurons show bursting in response to inhibitory input."""
}
def __init__(self):
"""Initialize the object."""
self.model_container = moose.Neutral('/model')
self.data_container = moose.Neutral('/data')
self.neurons = {}
self.Vm_tables = {}
self.u_tables = {}
self.inject_tables = {}
self.inputs = {}
self.simtime = 100e-3
self.dt = 0.25e-3
self.steps = int(self.simtime/self.dt)
moose.setClock(0, self.dt)
moose.setClock(1, self.dt)
moose.setClock(2, self.dt)
self.scheduled = {} # this is to bypass multiple clock issue
self.neuron = None
def setup(self, key):
neuron = self._get_neuron(key)
pulsegen = self._make_pulse_input(key)
if pulsegen is None:
print((key, 'Not implemented.'))
def simulate(self, key):
self.setup(key)
return self.run(key)
def run(self, key):
try:
Vm = self.Vm_tables[key]
u = self.u_tables[key]
except KeyError as e:
Vm = moose.Table(self.data_container.path + '/' + key + '_Vm')
nrn = self.neurons[key]
moose.connect(Vm, 'requestOut', nrn, 'getVm')
utable = moose.Table(self.data_container.path + '/' + key + '_u')
utable.connect('requestOut', self.neurons[key], 'getU')
self.Vm_tables[key] = Vm
self.u_tables[key] = utable
try:
Im = self.inject_tables[key]
except KeyError as e:
Im = moose.Table(self.data_container.path + '/' + key + '_inject') # May be different for non-pulsegen sources.
Im.connect('requestOut', self._get_neuron(key), 'getIm')
self.inject_tables[key] = Im
self.simtime = IzhikevichDemo.parameters[key][7] * 1e-3
for obj in moose.wildcardFind('%s/##' % (self.model_container.path)):
if obj not in self.scheduled:
moose.useClock(0, obj.path, 'process')
self.scheduled[obj] = True
for obj in moose.wildcardFind('%s/##' % (self.data_container.path)):
if obj not in self.scheduled:
moose.useClock(2, obj.path, 'process')
self.scheduled[obj] = True
moose.reinit()
moose.start(self.simtime)
while moose.isRunning():
time.sleep(100)
t = linspace(0, IzhikevichDemo.parameters[key][7], len(Vm.vector))
# DEBUG
nrn = self._get_neuron(key)
print(('a = %g, b = %g, c = %g, d = %g, initVm = %g, initU = %g' % (nrn.a,nrn.b, nrn.c, nrn.d, nrn.initVm, nrn.initU)))
#! DEBUG
return (t, Vm, Im)
def _get_neuron(self, key):
try:
params = IzhikevichDemo.parameters[key]
except KeyError as e:
print((' %s : Invalid neuron type. The valid types are:' % (key)))
for key in IzhikevichDemo.parameters:
print(key)
raise e
try:
neuron = self.neurons[key]
return neuron
except KeyError as e:
neuron = moose.IzhikevichNrn(self.model_container.path + '/' + key)
if key == 'integrator' or key == 'Class_1': # Integrator has different constants
neuron.beta = 4.1e3
neuron.gamma = 108.0
if key == 'accommodation':
neuron.accommodating = True
neuron.u0 = -0.065
self.neuron = neuron
neuron.a = params[1] * 1e3 # ms^-1 -> s^-1
neuron.b = params[2] * 1e3 # ms^-1 -> s^-1
neuron.c = params[3] * 1e-3 # mV -> V
neuron.d = params[4] # d is in mV/ms = V/s
neuron.initVm = params[6] * 1e-3 # mV -> V
neuron.Vmax = 0.03 # mV -> V
if key != 'accommodation':
neuron.initU = neuron.initVm * neuron.b
else:
neuron.initU = -16.0 # u is in mV/ms = V/s
moose.showfield(neuron)
self.neurons[key] = neuron
return neuron
def _make_pulse_input(self, key):
"""This is for creating a pulse generator for use as a current
source for all cases except Class_1, Class_2, resonator,
integrator, thresh_var and accommodation."""
try:
return self.inputs[key]
except KeyError:
pass # continue to the reset of the function
baseLevel = 0.0
firstWidth = 1e6
firstDelay = 0.0
firstLevel = IzhikevichDemo.parameters[key][5] * 1e-6
secondDelay = 1e6
secondWidth = 0.0
secondLevel = 0.0
if key == 'tonic_spiking':
firstDelay = 10e-3
elif key == 'phasic_spiking':
firstDelay = 20e-3
elif key == 'tonic_bursting':
firstDelay = 22e-3
elif key == 'phasic_bursting':
firstDelay = 20e-3
elif key == 'mixed_mode':
firstDelay = 16e-3
elif key == 'spike_freq_adapt':
firstDelay = 8.5e-3
elif key == 'spike_latency':
firstDelay = 10e-3
firstWidth = 3e-3
elif key == 'subthresh_osc':
firstDelay = 20e-3
firstWidth = 5e-3
firstLevel = 2e-9
elif key == 'rebound_spike':
firstDelay = 20e-3
firstWidth = 5e-3
elif key == 'rebound_burst':
firstDelay = 20e-3
firstWidth = 5e-3
elif key == 'bistable':
input_table = self._make_bistable_input()
self.inputs[key] = input_table
return input_table
elif key == 'DAP':
firstDelay = 9e-3
firstWidth = 2e-3
elif (key == 'iispike') or (key == 'iiburst'):
baseLevel = 80e-9
firstDelay = 50e-3
firstWidth = 200e-3
fisrtLevel = 75e-9
elif key == 'Class_1':
input_table = self._make_Class_1_input()
self.inputs[key] = input_table
return input_table
elif key == 'Class_2':
input_table = self._make_Class_2_input()
self.inputs[key] = input_table
return input_table
elif key == 'resonator':
input_table = self._make_resonator_input()
self.inputs[key] = input_table
return input_table
elif key == 'integrator':
input_table = self._make_integrator_input()
self.inputs[key] = input_table
return input_table
elif key == 'accommodation':
input_table = self._make_accommodation_input()
self.inputs[key] = input_table
return input_table
elif key == 'thresh_var':
input_table = self._make_thresh_var_input()
self.inputs[key] = input_table
return input_table
else:
raise RuntimeError( key + ': Stimulus is not based on pulse generator.')
pulsegen = self._make_pulsegen(key,
firstLevel,
firstDelay,
firstWidth,
secondLevel,
secondDelay,
secondWidth, baseLevel)
self.inputs[key] = pulsegen
return pulsegen
def _make_pulsegen(self, key, firstLevel, firstDelay, firstWidth=1e6, secondLevel=0, secondDelay=1e6, secondWidth=0, baseLevel=0):
pulsegen = moose.PulseGen(self.model_container.path + '/' + key + '_input')
pulsegen.firstLevel = firstLevel
pulsegen.firstDelay = firstDelay
pulsegen.firstWidth = firstWidth
pulsegen.secondLevel = secondLevel
pulsegen.secondDelay = secondDelay
pulsegen.secondWidth = secondWidth
pulsegen.baseLevel = baseLevel
nrn = self._get_neuron(key)
moose.connect(pulsegen, 'output', nrn, 'injectMsg')
# self.stimulus_table = moose.Table(self.data_container.path + '/stimulus')
# self.stimulus_table.connect('requestOut', pulsegen, 'getOutputValue')
return pulsegen
def _make_Class_1_input(self):
input_table = moose.StimulusTable(self.model_container.path + '/' + 'Class_1_input')
input_table.stepSize = self.dt
input_table.startTime = 30e-3 # The ramp starts at 30 ms
input_table.stopTime = IzhikevichDemo.parameters['Class_1'][7] * 1e-3
# matlab code: if (t>T1) I=(0.075*(t-T1)); else I=0;
input_vec = np.arange(0, int(ceil((input_table.stopTime - input_table.startTime) / input_table.stepSize)), 1.0) * 0.075 * self.dt * 1e3 * 1e-9
input_table.vector = input_vec
input_table.connect('output', self._get_neuron('Class_1'), 'injectMsg')
self.stimulus_table = moose.Table(self.data_container.path + '/stimulus')
moose.connect(input_table, 'output', self.stimulus_table, 'input')
return input_table
def _make_Class_2_input(self):
key = 'Class_2'
input_table = moose.StimulusTable(self.model_container.path + '/' + key + '_input')
input_table.stepSize = self.dt
input_table.startTime = 30e-3 # The ramp starts at 30 ms
input_table.stopTime = IzhikevichDemo.parameters[key][7] * 1e-3
# The matlab code is: if (t>T1) I=-0.5+(0.015*(t-T1)); else I=-0.5
# convert dt from s to ms, and convert total current from nA to A.
input_vec = np.arange(0, int(ceil((input_table.stopTime - input_table.startTime) / input_table.stepSize)), 1.0) * 0.015 * self.dt * 1e3 * 1e-9 - 0.05*1e-9
input_table.vector = input_vec
input_table.connect('output', self._get_neuron(key), 'injectMsg')
return input_table
def _make_bistable_input(self):
key = 'bistable'
input_table = moose.StimulusTable(self.model_container.path + '/' + key + '_input')
input_table.stepSize = self.dt
input_table.startTime = 0
input_table.stopTime = IzhikevichDemo.parameters[key][7] * 1e-3
t1 = IzhikevichDemo.parameters[key][7] * 1e-3/8
t2 = 216e-3
t = np.arange(0,
int(ceil((input_table.stopTime - input_table.startTime) / input_table.stepSize))) * self.dt
input_vec = np.where(np.logical_or(np.logical_and(t > t1, t < t1+5e-3),
np.logical_and(t > t2, t < t2+5e-3)),
1.24e-9,
0.24e-9)
input_table.vector = input_vec
input_table.connect('output', self._get_neuron(key), 'injectMsg')
return input_table
def _make_resonator_input(self):
key = 'resonator'
input_table = moose.StimulusTable(self.model_container.path + '/' + key + '_input')
input_table.stepSize = self.dt
input_table.startTime = 0
input_table.stopTime = IzhikevichDemo.parameters[key][7] * 1e-3
t1 = IzhikevichDemo.parameters[key][7] * 1e-3/10
t2 = t1 + 20e-3
t3 = 0.7 * IzhikevichDemo.parameters[key][7] * 1e-3
t4 = t3 + 40e-3
t = np.arange(0, int(ceil((input_table.stopTime - input_table.startTime) / input_table.stepSize)), 1) * self.dt
input_vec = np.zeros(t.shape)
idx = np.nonzero(((t > t1) & (t < t1 + 4e-3)) |
((t > t2) & (t < t2 + 4e-3)) |
((t > t3) & (t < t3 + 4e-3)) |
((t > t4) & (t < t4 + 4e-3)))[0]
input_vec[idx] = 0.65e-9
input_table.vector = input_vec
input_table.connect('output', self._get_neuron(key), 'injectMsg')
return input_table
def _make_integrator_input(self):
key = 'integrator'
input_table = moose.StimulusTable(self.model_container.path + '/' + key + '_input')
input_table.stepSize = self.dt
input_table.startTime = 0
input_table.stopTime = IzhikevichDemo.parameters[key][7] * 1e-3
t1 = IzhikevichDemo.parameters[key][7] * 1e-3/11
t2 = t1 + 5e-3
t3 = 0.7 * IzhikevichDemo.parameters[key][7] * 1e-3
t4 = t3 + 10e-3
t = np.arange(0, int(ceil((input_table.stopTime - input_table.startTime) / input_table.stepSize))) * self.dt
input_vec = np.where(((t > t1) & (t < t1 + 2e-3)) |
((t > t2) & (t < t2 + 2e-3)) |
((t > t3) & (t < t3 + 2e-3)) |
((t > t4) & (t < t4 + 2e-3)),
9e-9,
0.0)
input_table.vector = input_vec
input_table.connect('output', self._get_neuron(key), 'injectMsg')
return input_table
def _make_accommodation_input(self):
key = 'accommodation'
input_table = moose.StimulusTable(self.model_container.path + '/' + key + '_input')
input_table.stepSize = self.dt
input_table.startTime = 0
input_table.stopTime = IzhikevichDemo.parameters[key][7] * 1e-3
input_vec = np.zeros(int(ceil((input_table.stopTime - input_table.startTime) / input_table.stepSize)))
t = 0.0
for ii in range(len(input_vec)):
if t < 200e-3:
input_vec[ii] = t * 1e-6/25
elif t < 300e-3:
input_vec[ii] = 0.0
elif t < 312.5e-3:
input_vec[ii] = 4e-6 * (t-300e-3)/12.5
else:
input_vec[ii] = 0.0
t = t + self.dt
input_table.vector = input_vec
input_table.connect('output', self._get_neuron(key), 'injectMsg')
return input_table
def _make_thresh_var_input(self):
key = 'thresh_var'
input_table = moose.StimulusTable(self.model_container.path + '/' + key + '_input')
input_table.stepSize = self.dt
input_table.startTime = 0
input_table.stopTime = IzhikevichDemo.parameters[key][7] * 1e-3
t = np.arange(0, int(ceil((input_table.stopTime - input_table.startTime) / input_table.stepSize)), 1) * self.dt
input_vec = np.zeros(t.shape)
input_vec[((t > 10e-3) & (t < 15e-3)) | ((t > 80e-3) & (t < 85e-3))] = 1e-9
input_vec[(t > 70e-3) & (t < 75e-3)] = -6e-9
input_table.vector = input_vec
nrn = self._get_neuron(key)
input_table.connect('output', nrn, 'injectMsg')
return input_table
def getEquation(self, key):
params = IzhikevichDemo.parameters[key]
if key != 'accommodation':
equationText = "<i>v' = 0.04v^2 + 5v + 140 - u + I</i><br><i>u' = a(bv - u)</i><p>If <i>v >= 30 mV, v = c</i> and <i>u = u + d</i><br>where <i>a = %g</i>, <i>b = %g</i>, <i>c = %g</i> and <i>d = %g</i>." % (params[1], params[2], params[3], params[4])
else:
equationText = "<i>v' = 0.04v^2 + 5v + 140 - u + I</i><br><i>u' = ab(v + 65)</i><p>If <i>v >= 30 mV, v = c</i> and <i>u = u + d</i><br>where <i>a = %g</i>, <i>b = %g</i>, <i>c = %g</i> and <i>d = %g</i>." % (params[1], params[2], params[3], params[4])
return equationText
import sys
try:
from pylab import *
if __name__ == '__main__':
key = 'thresh_var'
if len(sys.argv) > 1:
key = sys.argv[1]
demo = IzhikevichDemo()
(t, Vm, Im) = demo.simulate(key)
title(IzhikevichDemo.parameters[key][0] + '. ' + key)
subplot(3,1,1)
plot(t, Vm.vector)
subplot(3,1,2)
plot(t, Im.vector)
subplot(3,1,3)
show()
print('Finished simulation.')
except ImportError:
print('Matplotlib not installed.')
#
# Izhikevich.py ends here
| BhallaLab/moose | moose-examples/izhikevich/Izhikevich.py | Python | gpl-3.0 | 23,767 | 0.009593 |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, [email protected]
#
import unittest
from ...compatibility import StringIO
from ...vml import Vml
class TestWriteOidmap(unittest.TestCase):
"""
Test the Vml _write_idmap() method.
"""
def setUp(self):
self.fh = StringIO()
self.vml = Vml()
self.vml._set_filehandle(self.fh)
def test_write_idmap(self):
"""Test the _write_idmap() method"""
self.vml._write_idmap(1)
exp = """<o:idmap v:ext="edit" data="1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
if __name__ == '__main__':
unittest.main()
| ivmech/iviny-scope | lib/xlsxwriter/test/vml/test_write_idmap.py | Python | gpl-3.0 | 748 | 0 |
"""
@name: PyHouse/src/Modules/Families/Insteon/_test/test_Insteon_HVAC.py
@author: D. Brian Kimmel
@contact: [email protected]
@copyright: (c) 2014-2020 by D. Brian Kimmel
@license: MIT License
@note: Created on Dec 6, 2014
@Summary:
Passed all 2 tests - DBK - 2015-07-29
"""
__updated__ = '2020-02-17'
# Import system type stuff
from twisted.trial import unittest
# Import PyMh files
from _test.testing_mixin import SetupPyHouseObj
from Modules.House.Lighting.Controllers.controllers import Api as controllerApi
from Modules.House.Lighting.Lights.lights import Api as lightingApi
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
from Modules.House.Family.Insteon.insteon_utils import Decode as utilDecode
from Modules.House.Family.Insteon import insteon_decoder
from Modules.House.Family.Insteon.insteon_light import DecodeResponses as Decode_Light
# 16.C9.D0 =
# 1B.47.81 =
MSG_50_A = bytearray(b'\x02\x50\x16\x62\x2d\x1b\x47\x81\x27\x09\x00')
MSG_50_B = bytearray(b'\x02\x50\x21\x34\x1F\x1b\x47\x81\x27\x6e\x4f')
class DummyApi:
def MqttPublish(self, p_topic, p_msg):
return
class SetupMixin(object):
"""
"""
def setUp(self):
self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj()
self.m_xml = SetupPyHouseObj().BuildXml()
self.m_cntl_api = controllerApi()
self.m_light_api = lightingApi()
class A0(unittest.TestCase):
def test_00_Print(self):
_x = PrettyFormatAny.form('_test', 'title', 190) # so it is defined when printing is cleaned up.
print('Id: test_Insteon_Light')
class A1_Prep(SetupMixin, unittest.TestCase):
""" This section tests the setup
"""
def setUp(self):
SetupMixin.setUp(self)
self.m_device = None
def test_01_PyHouse(self):
""" Did we get everything set up for the rest of the tests of this class.
"""
self.assertIsInstance(self.m_pyhouse_obj.House, HouseInformation)
def test_02_FindXml(self):
""" Did we get everything set up for the rest of the tests of this class.
"""
self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)
self.assertEqual(self.m_xml.house_div.tag, 'HouseDivision')
self.assertEqual(self.m_xml.lighting_sect.tag, 'LightingSection')
self.assertEqual(self.m_xml.light_sect.tag, 'LightSection')
self.assertEqual(self.m_xml.button_sect.tag, 'ButtonSection')
self.assertEqual(self.m_xml.controller_sect.tag, 'ControllerSection')
def test_03_House(self):
""" Did we get everything set up for the rest of the tests of this class.
"""
pass
def test_04_Objs(self):
""" Did we get everything set up for the rest of the tests of this class.
"""
pass
def test_05_XML(self):
""" Did we get everything set up for the rest of the tests of this class.
"""
pass
def test_06_Device(self):
""" Be sure that the XML contains the right stuff.
"""
class B1_Util(SetupMixin, unittest.TestCase):
"""This tests the utility section of decoding
"""
def setUp(self):
SetupMixin.setUp(self)
self.m_ctrlr = ControllerInformation()
def test_01_GetObjFromMsg(self):
self.m_ctrlr._Message = MSG_50_A
self.m_controllers = self.m_cntl_api.read_all_controllers_xml(self.m_pyhouse_obj)
self.m_pyhouse_obj.House.Lighting.Controllers = self.m_controllers
print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Lighting, 'B1-01-A Lighting'))
l_ctlr = self.m_pyhouse_obj.House.Lighting.Controllers[0]
print(PrettyFormatAny.form(l_ctlr, 'B1-01-B Controller'))
self.assertEqual(l_ctlr.Name, TESTING_CONTROLLER_NAME_0)
def test_02_NextMsg(self):
self.m_ctrlr._Message = MSG_50_A
# l_msg = Util().get_next_message(self.m_ctrlr)
# print(PrintBytes(l_msg))
# self.assertEqual(l_msg[1], 0x50)
# self.m_ctrlr._Message = bytearray()
# l_msg = self.m_util.get_next_message(self.m_ctrlr)
# self.assertEqual(l_msg, None)
# self.m_ctrlr._Message = MSG_62 + MSG_50
# l_msg = self.m_util.get_next_message(self.m_ctrlr)
# print('Msg {}'.format(FormatBytes(l_msg)))
# print('remaning: {}'.format(FormatBytes(self.m_ctrlr._Message)))
# self.assertEqual(l_msg[1], 0x62)
self.assertEqual(self.m_ctrlr._Message[1], 0x50)
class B2_Decode(SetupMixin, unittest.TestCase):
"""This tests the utility section of decoding
"""
def setUp(self):
SetupMixin.setUp(self)
self.m_ctrlr = ControllerInformation()
self.m_decode = Insteon_decoder.DecodeResponses(self.m_pyhouse_obj, self.m_ctrlr)
def test_01_GetObjFromMsg(self):
self.m_ctrlr._Message = MSG_50_A
l_ctlr = self.m_decode.decode_message(self.m_ctrlr)
print(l_ctlr, 'B2-01-A Controller')
class C1_Light(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self)
self.m_pyhouse_obj.House.Lighting.Controllers = self.m_cntl_api.read_all_controllers_xml(self.m_pyhouse_obj)
self.m_pyhouse_obj.House.Lighting.Lights = self.m_light_api.read_all_lights_xml(self.m_pyhouse_obj)
self.m_ctrlr = self.m_pyhouse_obj.House.Lighting.Controllers[0]
# print(PrettyFormatAny.form(self.m_ctrlr, "C1-0Controlelrs"))
self.m_pyhouse_obj.Core.MqttApi = DummyApi()
def test_01_x(self):
self.m_ctrlr._Message = MSG_50_A
l_device_obj = utilDecode().get_obj_from_message(self.m_pyhouse_obj, self.m_ctrlr._Message[2:5])
l_decode = Decode_Light().decode_0x50(self.m_pyhouse_obj, self.m_ctrlr, l_device_obj)
print(PrettyFormatAny.form(l_device_obj, "C1-01-A - Decode"))
self.assertEqual(len(self.m_ctrlr._Message), 0)
def test_02_x(self):
self.m_ctrlr._Message = MSG_50_B
l_device_obj = utilDecode().get_obj_from_message(self.m_pyhouse_obj, self.m_ctrlr._Message[2:5])
l_decode = Decode_Light().decode_0x50(self.m_pyhouse_obj, self.m_ctrlr, l_device_obj)
print(PrettyFormatAny.form(l_device_obj, "C1-02-A - Decode"))
self.assertEqual(len(self.m_ctrlr._Message), 0)
# ## END DBK
| DBrianKimmel/PyHouse | Project/src/Modules/House/Family/Insteon/_test/test_insteon_light.py | Python | mit | 6,285 | 0.002546 |
# from server.utility.service_utility import count_total_page | bingweichen/GOKU | backend/server/utility/__init__.py | Python | apache-2.0 | 61 | 0.016393 |
import json
from django.contrib.auth import authenticate
from django.contrib.auth import login
from django.contrib.auth import logout
from django.contrib.auth import update_session_auth_hash
from rest_framework import status
from rest_framework import views
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from authentication.models import Account
from authentication.permissions import CanCreateAccount
from authentication.permissions import IsAccountAdminOrAccountOwner
from authentication.serializers import AccountSerializer
from attendance.models import Band
from emails.tasks import send_unsent_emails
from members.models import BandMember
class AccountViewSet(viewsets.ModelViewSet):
queryset = Account.objects.all()
serializer_class = AccountSerializer
permission_classes = (
IsAccountAdminOrAccountOwner,
IsAuthenticated,
)
def create(self, request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.validated_data, status=status.HTTP_201_CREATED)
return Response({
'status': 'Bad request',
'message': 'Account could not be created with received data.',
}, status=status.HTTP_400_BAD_REQUEST)
def partial_update(self, request, pk=None):
data = json.loads(request.body)
if 'password' in data and request.user.id != int(pk):
return Response({
'status': "Forbidden",
'message': "Don't have permission to update password",
}, status=status.HTTP_403_FORBIDDEN)
return super(AccountViewSet, self).partial_update(request, pk=pk)
class LoginView(views.APIView):
def post(self, request, format=None):
data = json.loads(request.body)
email = data.get('email', None)
password = data.get('password', None)
account = authenticate(email=email, password=password)
if account is not None:
if account.is_active:
login(request, account)
serialized = AccountSerializer(account)
return Response(serialized.data)
else:
return Response({
'status': 'Unauthorized',
'message': 'This account has been disabled.'
}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({
'status': 'Unauthorized',
'message': 'Email/password combination invalid.'
}, status=status.HTTP_401_UNAUTHORIZED)
class LogoutView(views.APIView):
permission_classes = (IsAuthenticated,)
def post(self, request, format=None):
logout(request)
return Response({}, status=status.HTTP_204_NO_CONTENT)
class CreateAccountsView(views.APIView):
permission_classes = (CanCreateAccount, IsAuthenticated,)
def post(self, request, format=None):
data = json.loads(request.body)
for account_data in data['accounts']:
section = account_data.pop('section')
account = Account.objects.create_user(**account_data)
band_member = BandMember.objects.create(section=section, account=account)
for band in Band.objects.all():
band.unassigned_members.add(band_member)
band.save()
return Response({}, status=status.HTTP_201_CREATED)
class CreatePasswordView(views.APIView):
def post(self, request, format=None):
data = json.loads(request.body)
email = request.user.email
password = data.get('password')
if not email or not password:
return Response({}, status=status.HTTP_400_BAD_REQUEST)
try:
account = Account.objects.get(email=email)
account.is_registered = True
account.set_password(password)
account.save()
update_session_auth_hash(request, account)
return Response({}, status=status.HTTP_204_NO_CONTENT)
except Account.DoesNotExist:
return Response({}, status=status.HTTP_400_BAD_REQUEST)
| KonichiwaKen/band-dashboard | authentication/views.py | Python | mit | 4,259 | 0.000704 |
## {{{ http://code.activestate.com/recipes/496882/ (r8)
'''
http://code.activestate.com/recipes/496882/
Author: Michael Palmer 13 Jul 2006
a regex-based JavaScript code compression kludge
'''
import re
class JSCompressor(object):
def __init__(self, compressionLevel=2, measureCompression=False):
'''
compressionLevel:
0 - no compression, script returned unchanged. For debugging only -
try if you suspect that compression compromises your script
1 - Strip comments and empty lines, don't change line breaks and indentation (code remains readable)
2 - Additionally strip insignificant whitespace (code will become quite unreadable)
measureCompression: append a comment stating the extent of compression
'''
self.compressionLevel = compressionLevel
self.measureCompression = measureCompression
# a bunch of regexes used in compression
# first, exempt string and regex literals from compression by transient substitution
findLiterals = re.compile(r'''
(\'.*?(?<=[^\\])\') | # single-quoted strings
(\".*?(?<=[^\\])\") | # double-quoted strings
((?<![\*\/])\/(?![\/\*]).*?(?<![\\])\/) # JS regexes, trying hard not to be tripped up by comments
''', re.VERBOSE)
# literals are temporarily replaced by numbered placeholders
literalMarker = '@_@%d@_@' # temporary replacement
backSubst = re.compile('@_@(\d+)@_@') # put the string literals back in
mlc1 = re.compile(r'(\/\*.*?\*\/)') # /* ... */ comments on single line
mlc = re.compile(r'(\/\*.*?\*\/)', re.DOTALL) # real multiline comments
slc = re.compile('\/\/.*') # remove single line comments
collapseWs = re.compile('(?<=\S)[ \t]+') # collapse successive non-leading white space characters into one
squeeze = re.compile('''
\s+(?=[\}\]\)\:\&\|\=\;\,\.\+]) | # remove whitespace preceding control characters
(?<=[\{\[\(\:\&\|\=\;\,\.\+])\s+ | # ... or following such
[ \t]+(?=\W) | # remove spaces or tabs preceding non-word characters
(?<=\W)[ \t]+ # ... or following such
'''
, re.VERBOSE | re.DOTALL)
def compress(self, script):
'''
perform compression and return compressed script
'''
if self.compressionLevel == 0:
return script
lengthBefore = len(script)
# first, substitute string literals by placeholders to prevent the regexes messing with them
literals = []
def insertMarker(mo):
l = mo.group()
literals.append(l)
return self.literalMarker % (len(literals) - 1)
script = self.findLiterals.sub(insertMarker, script)
# now, to the literal-stripped carcass, apply some kludgy regexes for deflation...
script = self.slc.sub('', script) # strip single line comments
script = self.mlc1.sub(' ', script) # replace /* .. */ comments on single lines by space
script = self.mlc.sub('\n', script) # replace real multiline comments by newlines
# remove empty lines and trailing whitespace
script = '\n'.join([l.rstrip() for l in script.splitlines() if l.strip()])
if self.compressionLevel == 2: # squeeze out any dispensible whitespace
script = self.squeeze.sub('', script)
elif self.compressionLevel == 1: # only collapse multiple whitespace characters
script = self.collapseWs.sub(' ', script)
# now back-substitute the string and regex literals
def backsub(mo):
return literals[int(mo.group(1))]
script = self.backSubst.sub(backsub, script)
if self.measureCompression:
lengthAfter = float(len(script))
squeezedBy = int(100*(1-lengthAfter/lengthBefore))
script += '\n// squeezed out %s%%\n' % squeezedBy
return script
if __name__ == '__main__':
script = '''
/* this is a totally useless multiline comment, containing a silly "quoted string",
surrounded by several superfluous line breaks
*/
// and this is an equally important single line comment
sth = "this string contains 'quotes', a /regex/ and a // comment yet it will survive compression";
function wurst(){ // this is a great function
var hans = 33;
}
sthelse = 'and another useless string';
function hans(){ // another function
var bill = 66; // successive spaces will be collapsed into one;
var bob = 77 // this line break will be preserved b/c of lacking semicolon
var george = 88;
}
'''
for x in range(1,3):
print '\ncompression level', x, ':\n--------------'
c = JSCompressor(compressionLevel=x, measureCompression=True)
cpr = c.compress(script)
print cpr
print 'length', len(cpr)
## end of http://code.activestate.com/recipes/496882/ }}}
| MaxTyutyunnikov/lino | lino/utils/jscompressor.py | Python | gpl-3.0 | 5,152 | 0.011258 |
#!/usr/bin/env python
__author__ = 'waroquiers'
import unittest
import numpy as np
from pymatgen.util.testing import PymatgenTest
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import ExplicitPermutationsAlgorithm
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import SeparationPlane
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import CoordinationGeometry
allcg = AllCoordinationGeometries()
class FakeSite:
def __init__(self, coords):
self.coords = coords
class CoordinationGeometriesTest(PymatgenTest):
def test_algorithms(self):
expl_algo = ExplicitPermutationsAlgorithm(permutations=[[0, 1, 2], [1, 2, 3]])
expl_algo2 = ExplicitPermutationsAlgorithm.from_dict(expl_algo.as_dict)
self.assertEqual(expl_algo.permutations, expl_algo2.permutations)
sepplane_algos_oct = allcg['O:6'].algorithms
self.assertEqual(len(sepplane_algos_oct[0].safe_separation_permutations()), 24)
self.assertEqual(len(sepplane_algos_oct[1].safe_separation_permutations()), 36)
sepplane_algos_oct_0 = SeparationPlane.from_dict(sepplane_algos_oct[0].as_dict)
self.assertEqual(sepplane_algos_oct[0].plane_points, sepplane_algos_oct_0.plane_points)
self.assertEqual(sepplane_algos_oct[0].mirror_plane, sepplane_algos_oct_0.mirror_plane)
self.assertEqual(sepplane_algos_oct[0].ordered_plane, sepplane_algos_oct_0.ordered_plane)
self.assertEqual(sepplane_algos_oct[0].point_groups, sepplane_algos_oct_0.point_groups)
self.assertEqual(sepplane_algos_oct[0].ordered_point_groups, sepplane_algos_oct_0.ordered_point_groups)
self.assertTrue(all([np.array_equal(perm, sepplane_algos_oct_0.explicit_optimized_permutations[iperm])
for iperm, perm in enumerate(sepplane_algos_oct[0].explicit_optimized_permutations)]))
self.assertEqual(sepplane_algos_oct[0].__str__(),
'Separation plane algorithm with the following reference separation :\n'
'[[4]] | [[0, 2, 1, 3]] | [[5]]')
def test_hints(self):
hints = CoordinationGeometry.NeighborsSetsHints(hints_type='single_cap',
options={'cap_index': 2, 'csm_max': 8})
myhints = hints.hints({'csm': 12.0})
self.assertEqual(myhints, [])
hints2 = CoordinationGeometry.NeighborsSetsHints.from_dict(hints.as_dict())
self.assertEqual(hints.hints_type, hints2.hints_type)
self.assertEqual(hints.options, hints2.options)
def test_coordination_geometry(self):
cg_oct = allcg['O:6']
cg_oct2 = CoordinationGeometry.from_dict(cg_oct.as_dict())
self.assertArrayAlmostEqual(cg_oct.central_site, cg_oct2.central_site)
self.assertArrayAlmostEqual(cg_oct.points, cg_oct2.points)
self.assertEqual(cg_oct.__str__(), 'Coordination geometry type : Octahedron (IUPAC: OC-6 || IUCr: [6o])\n'
'\n'
' - coordination number : 6\n'
' - list of points :\n'
' - [0.0, 0.0, 1.0]\n'
' - [0.0, 0.0, -1.0]\n'
' - [1.0, 0.0, 0.0]\n'
' - [-1.0, 0.0, 0.0]\n'
' - [0.0, 1.0, 0.0]\n'
' - [0.0, -1.0, 0.0]\n'
'------------------------------------------------------------\n')
self.assertEqual(cg_oct.__len__(), 6)
self.assertEqual(cg_oct.ce_symbol, cg_oct.mp_symbol)
self.assertTrue(cg_oct.is_implemented())
self.assertEqual(cg_oct.get_name(), 'Octahedron')
self.assertEqual(cg_oct.IUPAC_symbol, 'OC-6')
self.assertEqual(cg_oct.IUPAC_symbol_str, 'OC-6')
self.assertEqual(cg_oct.IUCr_symbol, '[6o]')
self.assertEqual(cg_oct.IUCr_symbol_str, '[6o]')
cg_oct.permutations_safe_override = True
self.assertEqual(cg_oct.number_of_permutations, 720.0)
self.assertEqual(cg_oct.ref_permutation([0, 3, 2, 4, 5, 1]), (0, 3, 1, 5, 2, 4))
sites = [FakeSite(coords=pp) for pp in cg_oct.points]
faces = [[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]],
[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]],
[[-1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[-1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]],
[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, -1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]]]
self.assertArrayAlmostEqual(cg_oct.faces(sites=sites, permutation=[0, 3, 2, 4, 5, 1]), faces)
faces = [[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]]]
self.assertArrayAlmostEqual(cg_oct.faces(sites=sites), faces)
edges = [[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, 0.0, -1.0]],
[[-1.0, 0.0, 0.0], [1.0, 0.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, 0.0, -1.0]],
[[1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[1.0, 0.0, 0.0], [0.0, 0.0, -1.0]],
[[0.0, 1.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 1.0, 0.0], [0.0, 0.0, -1.0]]]
self.assertArrayAlmostEqual(cg_oct.edges(sites=sites, permutation=[0, 3, 2, 4, 5, 1]), edges)
edges = [[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, -1.0], [1.0, 0.0, 0.0]],
[[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]],
[[0.0, 0.0, -1.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, -1.0], [0.0, -1.0, 0.0]],
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]]]
self.assertArrayAlmostEqual(cg_oct.edges(sites=sites), edges)
self.assertArrayAlmostEqual(cg_oct.solid_angles(),
[2.0943951, 2.0943951, 2.0943951, 2.0943951, 2.0943951, 2.0943951])
pmeshes = cg_oct.get_pmeshes(sites=sites)
self.assertEqual(pmeshes[0]['pmesh_string'],
'14\n 0.00000000 0.00000000 1.00000000\n'
' 0.00000000 0.00000000 -1.00000000\n'
' 1.00000000 0.00000000 0.00000000\n'
' -1.00000000 0.00000000 0.00000000\n'
' 0.00000000 1.00000000 0.00000000\n'
' 0.00000000 -1.00000000 0.00000000\n'
' 0.33333333 0.33333333 0.33333333\n'
' 0.33333333 -0.33333333 0.33333333\n'
' -0.33333333 0.33333333 0.33333333\n'
' -0.33333333 -0.33333333 0.33333333\n'
' 0.33333333 0.33333333 -0.33333333\n'
' 0.33333333 -0.33333333 -0.33333333\n'
' -0.33333333 0.33333333 -0.33333333\n'
' -0.33333333 -0.33333333 -0.33333333\n'
'8\n4\n0\n2\n4\n0\n4\n0\n2\n5\n0\n4\n0\n3\n4\n0\n'
'4\n0\n3\n5\n0\n4\n1\n2\n4\n1\n4\n1\n2\n5\n1\n4\n'
'1\n3\n4\n1\n4\n1\n3\n5\n1\n')
allcg_str = allcg.__str__()
self.assertTrue('\n#=======================================================#\n'
'# List of coordination geometries currently implemented #\n'
'#=======================================================#\n'
'\nCoordination geometry type : Single neighbor (IUCr: [1l])\n\n'
' - coordination number : 1\n'
' - list of points :\n'
' - [0.0, 0.0, 1.0]\n'
'------------------------------------------------------------\n\n' in allcg_str)
self.assertTrue('Coordination geometry type : Trigonal plane (IUPAC: TP-3 || IUCr: [3l])\n\n'
' - coordination number : 3\n'
' - list of points :\n' in allcg_str)
all_symbols = [u'S:1', u'L:2', u'A:2', u'TL:3', u'TY:3', u'TS:3', u'T:4', u'S:4', u'SY:4', u'SS:4',
u'PP:5', u'S:5', u'T:5', u'O:6', u'T:6', u'PP:6', u'PB:7', u'ST:7', u'ET:7', u'FO:7',
u'C:8', u'SA:8', u'SBT:8', u'TBT:8', u'DD:8', u'DDPN:8', u'HB:8', u'BO_1:8', u'BO_2:8',
u'BO_3:8', u'TC:9', u'TT_1:9', u'TT_2:9', u'TT_3:9', u'HD:9', u'TI:9', u'SMA:9', u'SS:9',
u'TO_1:9', u'TO_2:9', u'TO_3:9', u'PP:10', u'PA:10', u'SBSA:10', u'MI:10', u'S:10',
u'H:10', u'BS_1:10', u'BS_2:10', u'TBSA:10', u'PCPA:11', u'H:11', u'SH:11', u'CO:11',
u'DI:11', u'I:12', u'PBP:12', u'TT:12', u'C:12', u'AC:12', u'SC:12', u'S:12', u'HP:12',
u'HA:12', u'SH:13', u'DD:20', u'UNKNOWN', u'UNCLEAR']
self.assertEqual(len(allcg.get_geometries()), 68)
self.assertEqual(len(allcg.get_geometries(coordination=3)), 3)
self.assertEqual(sorted(allcg.get_geometries(returned='mp_symbol')), sorted(all_symbols))
self.assertEqual(sorted(allcg.get_geometries(returned='mp_symbol', coordination=3)),
['TL:3', 'TS:3', 'TY:3'])
self.assertEqual(allcg.get_symbol_name_mapping(coordination=3),
{u'TY:3': u'Triangular non-coplanar', u'TL:3': u'Trigonal plane', u'TS:3': u'T-shaped'})
self.assertEqual(allcg.get_symbol_cn_mapping(coordination=3),
{u'TY:3': 3, u'TL:3': 3, u'TS:3': 3})
self.assertEqual(sorted(allcg.get_implemented_geometries(coordination=4, returned='mp_symbol')),
[u'S:4', u'SS:4', u'SY:4', u'T:4'])
self.assertEqual(sorted(allcg.get_not_implemented_geometries(returned='mp_symbol')),
[u'CO:11', u'DD:20', u'H:10', u'S:10', u'S:12', u'UNCLEAR', u'UNKNOWN'])
self.assertEqual(allcg.get_geometry_from_name('Octahedron').mp_symbol, cg_oct.mp_symbol)
with self.assertRaises(LookupError) as cm:
allcg.get_geometry_from_name('Octahedran')
self.assertEqual(str(cm.exception), 'No coordination geometry found with name "Octahedran"')
self.assertEqual(allcg.get_geometry_from_IUPAC_symbol('OC-6').mp_symbol, cg_oct.mp_symbol)
with self.assertRaises(LookupError) as cm:
allcg.get_geometry_from_IUPAC_symbol('OC-7')
self.assertEqual(str(cm.exception), 'No coordination geometry found with IUPAC symbol "OC-7"')
self.assertEqual(allcg.get_geometry_from_IUCr_symbol('[6o]').mp_symbol, cg_oct.mp_symbol)
with self.assertRaises(LookupError) as cm:
allcg.get_geometry_from_IUCr_symbol('[6oct]')
self.assertEqual(str(cm.exception), 'No coordination geometry found with IUCr symbol "[6oct]"')
with self.assertRaises(LookupError) as cm:
allcg.get_geometry_from_mp_symbol('O:7')
self.assertEqual(str(cm.exception), 'No coordination geometry found with mp_symbol "O:7"')
self.assertEqual(allcg.pretty_print(maxcn=4),
'+-------------------------+\n| Coordination geometries |\n+-------------------------+\n'
'\n==>> CN = 1 <<==\n - S:1 : Single neighbor\n\n'
'==>> CN = 2 <<==\n'
' - L:2 : Linear\n - A:2 : Angular\n\n'
'==>> CN = 3 <<==\n'
' - TL:3 : Trigonal plane\n - TY:3 : Triangular non-coplanar\n - TS:3 : T-shaped\n\n'
'==>> CN = 4 <<==\n - T:4 : Tetrahedron\n - S:4 : Square plane\n'
' - SY:4 : Square non-coplanar\n - SS:4 : See-saw\n\n')
self.assertEqual(allcg.pretty_print(maxcn=2, type='all_geometries_latex'),
'\\subsection*{Coordination 1}\n\n\\begin{itemize}\n'
'\\item S:1 $\\rightarrow$ Single neighbor (IUPAC : None - IUCr : $[$1l$]$)\n'
'\\end{itemize}\n\n\\subsection*{Coordination 2}\n\n\\begin{itemize}\n'
'\\item L:2 $\\rightarrow$ Linear (IUPAC : L-2 - IUCr : $[$2l$]$)\n'
'\\item A:2 $\\rightarrow$ Angular (IUPAC : A-2 - IUCr : $[$2n$]$)\n'
'\\end{itemize}\n\n')
self.assertEqual(allcg.pretty_print(maxcn=2, type='all_geometries_latex_images'),
'\\section*{Coordination 1}\n\n\\subsubsection*{S:1 : Single neighbor}\n\n'
'IUPAC : None\n\nIUCr : [1l]\n\n\\begin{center}\n'
'\\includegraphics[scale=0.15]{images/S_1.png}\n'
'\\end{center}\n\n\\section*{Coordination 2}\n\n'
'\\subsubsection*{L:2 : Linear}\n\nIUPAC : L-2\n\n'
'IUCr : [2l]\n\n\\begin{center}\n\\includegraphics[scale=0.15]{images/L_2.png}\n'
'\\end{center}\n\n\\subsubsection*{A:2 : Angular}\n\nIUPAC : A-2\n\nIUCr : [2n]\n\n'
'\\begin{center}\n\\includegraphics[scale=0.15]{images/A_2.png}\n\\end{center}\n\n')
self.assertDictEqual(allcg.minpoints, {6: 2, 7: 2, 8: 2, 9: 2, 10: 2, 11: 2, 12: 2, 13: 3})
self.assertDictEqual(allcg.maxpoints, {6: 5, 7: 5, 8: 6, 9: 7, 10: 6, 11: 5, 12: 8, 13: 6})
self.assertDictEqual(allcg.maxpoints_inplane, {6: 5, 7: 5, 8: 6, 9: 7, 10: 6, 11: 5, 12: 8, 13: 6})
self.assertDictEqual(allcg.separations_cg, {6: {(0, 3, 3): [u'O:6', u'T:6'],
(1, 4, 1): [u'O:6'],
(0, 5, 1): [u'PP:6'],
(2, 2, 2): [u'PP:6'],
(0, 4, 2): [u'T:6']},
7: {(1, 3, 3): [u'ET:7', u'FO:7'],
(2, 3, 2): [u'PB:7', u'ST:7', u'ET:7'],
(1, 4, 2): [u'ST:7', u'FO:7'],
(1, 5, 1): [u'PB:7']},
8: {(1, 6, 1): [u'HB:8'],
(0, 4, 4):
[u'C:8', u'SA:8', u'SBT:8'],
(1, 4, 3): [u'SA:8', u'SBT:8', u'BO_2:8', u'BO_3:8'],
(2, 4, 2): [u'C:8', u'TBT:8', u'DD:8', u'DDPN:8', u'HB:8',
u'BO_1:8', u'BO_1:8', u'BO_2:8', u'BO_2:8',
u'BO_3:8', u'BO_3:8']},
9: {(3, 3, 3): [u'TT_1:9', u'TT_1:9', u'TT_2:9', u'SMA:9',
u'SMA:9', u'TO_1:9', u'TO_3:9'],
(0, 6, 3): [u'TC:9'],
(2, 4, 3): [u'TC:9', u'TT_2:9', u'TT_3:9', u'TI:9',
u'SS:9', u'TO_1:9', u'TO_1:9', u'TO_2:9',
u'TO_3:9'],
(1, 3, 5): [u'TI:9'],
(1, 4, 4): [u'TT_1:9', u'SMA:9', u'SS:9'],
(2, 3, 4): [u'TC:9'],
(2, 5, 2): [u'TT_3:9', u'SS:9', u'TO_2:9'],
(1, 7, 1): [u'HD:9']},
10: {(0, 5, 5): [u'PP:10', u'PA:10'],
(3, 4, 3): [u'PA:10', u'SBSA:10', u'MI:10',
u'BS_2:10', u'TBSA:10'],
(2, 6, 2): [u'BS_1:10'],
(2, 4, 4): [u'PP:10', u'MI:10', u'BS_2:10'],
(3, 3, 4): [u'SBSA:10'],
(1, 4, 5): [u'BS_2:10'],
(0, 4, 6): [u'BS_1:10', u'TBSA:10']},
11: {(4, 3, 4): [u'PCPA:11'],
(3, 4, 4): [u'DI:11'],
(1, 5, 5): [u'PCPA:11', u'DI:11'],
(3, 5, 3): [u'H:11']},
12: {(3, 3, 6): [u'TT:12'],
(2, 4, 6): [u'TT:12'],
(0, 6, 6): [u'HP:12', u'HA:12'],
(3, 6, 3): [u'C:12', u'AC:12'],
(4, 4, 4): [u'I:12', u'PBP:12', u'C:12', u'HP:12'],
(0, 8, 4): [u'SC:12']},
13: {(0, 6, 7): [u'SH:13']}})
if __name__ == "__main__":
unittest.main()
| tschaume/pymatgen | pymatgen/analysis/chemenv/coordination_environments/tests/test_coordination_geometries.py | Python | mit | 19,397 | 0.005104 |
from Vintageous.ex.ex_error import ERR_NO_RANGE_ALLOWED
from Vintageous.ex.ex_error import VimError
from Vintageous.ex.parser.tokens import TokenDigits
from Vintageous.ex.parser.tokens import TokenDollar
from Vintageous.ex.parser.tokens import TokenDot
from Vintageous.ex.parser.tokens import TokenMark
from Vintageous.ex.parser.tokens import TokenOffset
from Vintageous.ex.parser.tokens import TokenOfSearch
from Vintageous.ex.parser.tokens import TokenPercent
from Vintageous.ex.parser.tokens import TokenSearchBackward
from Vintageous.ex.parser.tokens import TokenSearchForward
from Vintageous.vi.search import reverse_search_by_pt
from Vintageous.vi.utils import first_sel
from Vintageous.vi.utils import R
from Vintageous.vi.utils import row_at
class Node(object):
pass
class RangeNode(Node):
'''
Represents a Vim line range.
'''
def __init__(self, start=None, end=None, separator=None):
self.start = start or []
self.end = end or []
self.separator = separator
def __str__(self):
return '{0}{1}{2}'.format(
''.join(str(x) for x in self.start),
str(self.separator) if self.separator else '',
''.join(str(x) for x in self.end),
)
def __rpr__(self):
return ('RangeNode<{0}(start:{1}, end:{2}, separator:{3}]>'
.format(self.__class__.__name__, self.start, self.end, self.separator))
def __eq__(self, other):
if not isinstance(other, RangeNode):
return False
return (self.start == other.start and
self.end == other.end and
self.separator == other.separator)
@property
def is_empty(self):
'''
Indicates whether this range has ever been defined. For example, in
interactive mode, if `true`, it means that the user hasn't provided
any line range on the command line.
'''
return not any((self.start, self.end, self.separator))
def resolve_notation(self, view, token, current):
'''
Returns a line number.
'''
if isinstance(token, TokenDot):
pt = view.text_point(current, 0)
return row_at(view, pt)
if isinstance(token, TokenDigits):
return max(int(str(token)) - 1, -1)
if isinstance(token, TokenPercent):
return row_at(view, view.size())
if isinstance(token, TokenDollar):
return row_at(view, view.size())
if isinstance(token, TokenOffset):
return current + sum(token.content)
if isinstance(token, TokenSearchForward):
start_pt = view.text_point(current, 0)
match = view.find(str(token)[1:-1], start_pt)
if not match:
# TODO: Convert this to a VimError or something like that.
raise ValueError('pattern not found')
return row_at(view, match.a)
if isinstance(token, TokenSearchBackward):
start_pt = view.text_point(current, 0)
match = reverse_search_by_pt(view, str(token)[1:-1], 0, start_pt)
if not match:
# TODO: Convert this to a VimError or something like that.
raise ValueError('pattern not found')
return row_at(view, match.a)
if isinstance(token, TokenMark):
return self.resolve_mark(view, token)
raise NotImplementedError()
def resolve_mark(self, view, token):
if token.content == '<':
sel = list(view.sel())[0]
view.sel().clear()
view.sel().add(sel)
if sel.a < sel.b:
return row_at(view, sel.a)
else:
return row_at(view, sel.a - 1)
if token.content == '>':
sel = list(view.sel())[0]
view.sel().clear()
view.sel().add(sel)
if sel.a < sel.b:
return row_at(view, sel.b - 1)
else:
return row_at(view, sel.b)
raise NotImplementedError()
def resolve_line_reference(self, view, line_reference, current=0):
'''
Calculates the line offset determined by @line_reference.
@view
The view where the calculation is made.
@line_reference
The sequence of tokens defining the line range to be calculated.
@current
Line number where we are now.
'''
last_token = None
# XXX: what happens if there is no selection in the view?
current = row_at(view, first_sel(view).b)
for token in line_reference:
# Make sure a search forward doesn't overlap with a match obtained
# right before this search.
if isinstance(last_token, TokenOfSearch) and isinstance(token, TokenOfSearch):
if isinstance(token, TokenSearchForward):
current += 1
current = self.resolve_notation(view, token, current)
last_token = token
return current
def resolve(self, view):
'''
Returns a Sublime Text range representing the Vim line range that the
ex command should operate on.
'''
start = self.resolve_line_reference(view, self.start or [TokenDot()])
if not self.separator:
if start == -1:
return R(-1, -1)
if len(self.start) == 1 and isinstance(self.start[0], TokenPercent):
return R(0, view.size())
return view.full_line(view.text_point(start, 0))
new_start = start if self.separator == ';' else 0
end = self.resolve_line_reference(view, self.end or [TokenDot()], current=new_start)
return view.full_line(R(view.text_point(start, 0), view.text_point(end, 0)))
class CommandLineNode(Node):
def __init__(self, line_range, command):
# A RangeNode
self.line_range = line_range
# A TokenOfCommand
self.command = command
def __str__(self):
return '{0}, {1}'.format(str(self.line_range), str(self.command))
def validate(self):
'''
Raises an error for known conditions.
'''
if not (self.command and self.line_range):
return
if not self.command.addressable and not self.line_range.is_empty:
raise VimError(ERR_NO_RANGE_ALLOWED)
| denim2x/Vintageous | ex/parser/nodes.py | Python | mit | 6,391 | 0.001095 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016-2018 Kevin Deldycke <[email protected]>
# and contributors.
# All Rights Reserved.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals
)
import os
from boltons.cacheutils import cachedproperty
from boltons.strutils import indent, strip_ansi
from packaging.specifiers import SpecifierSet
from packaging.version import parse as parse_version
from . import logger
from .bitbar import run
from .platform import current_os
try:
from shutil import which
except ImportError:
from backports.shutil_which import which
# Rendering format of CLI in JSON fields.
CLI_FORMATS = frozenset(['plain', 'fragments', 'bitbar'])
class CLIError(Exception):
""" An error occured when running package manager CLI. """
def __init__(self, code, output, error):
""" The exception internally keeps the result of CLI execution. """
super(CLIError, self).__init__()
self.code = code
self.output = output
self.error = error
def __str__(self):
""" Human-readable error. """
margin = ' ' * 2
return indent((
"\nReturn code: {}\n"
"Output:\n{}\n"
"Error:\n{}").format(
self.code,
indent(str(self.output), margin),
indent(str(self.error), margin)), margin)
class PackageManager(object):
""" Base class from which all package manager definitions should inherits.
"""
# Systematic options passed to package manager CLI. Might be of use to
# force silencing or high verbosity for instance.
cli_args = []
# List of platforms supported by the manager.
platforms = frozenset()
# Version requirement specifier.
requirement = None
def __init__(self):
# Tell the manager either to raise or continue on errors.
self.raise_on_cli_error = False
# Some managers have the ability to report or ignore packages
# possessing their own auto-update mecanism.
self.ignore_auto_updates = True
# Log of all encountered CLI errors.
self.cli_errors = []
@cachedproperty
def cli_name(self):
""" Package manager's CLI name.
Is derived by default from the manager's ID.
"""
return self.id
@cachedproperty
def cli_path(self):
""" Fully qualified path to the package manager CLI.
Automaticcaly search the location of the CLI in the system.
Returns `None` if CLI is not found or is not a file.
"""
cli_path = which(self.cli_name, mode=os.F_OK)
logger.debug(
"CLI found at {}".format(cli_path) if cli_path
else "{} CLI not found.".format(self.cli_name))
return cli_path
def get_version(self):
""" Invoke the manager and extract its own reported version. """
raise NotImplementedError
@cachedproperty
def version_string(self):
""" Raw but cleaned string of the package manager version.
Returns `None` if the manager had an issue extracting its version.
"""
if self.executable:
version = self.get_version()
if version:
return version.strip()
@cachedproperty
def version(self):
""" Parsed and normalized package manager's own version.
Returns an instance of ``packaging.Version`` or None.
"""
if self.version_string:
return parse_version(self.version_string)
@cachedproperty
def id(self):
""" Return package manager's ID. Defaults based on class name.
This ID must be unique among all package manager definitions and
lower-case as they're used as feature flags for the :command:`mpm` CLI.
"""
return self.__class__.__name__.lower()
@cachedproperty
def name(self):
""" Return package manager's common name. Defaults based on class name.
"""
return self.__class__.__name__
@cachedproperty
def supported(self):
""" Is the package manager supported on that platform? """
return current_os()[0] in self.platforms
@cachedproperty
def executable(self):
""" Is the package manager CLI can be executed by the current user? """
if not self.cli_path:
return False
if not os.access(self.cli_path, os.X_OK):
logger.debug("{} not executable.".format(self.cli_path))
return False
return True
@cachedproperty
def fresh(self):
""" Does the package manager match the version requirement? """
# Version is mandatory.
if not self.version:
return False
if self.requirement:
if self.version not in SpecifierSet(self.requirement):
logger.debug(
"{} {} doesn't fit the '{}' version requirement.".format(
self.id, self.version, self.requirement))
return False
return True
@cachedproperty
def available(self):
""" Is the package manager available and ready-to-use on the system?
Returns True only if the main CLI:
1 - is supported on the current platform,
2 - was found on the system,
3 - is executable, and
4 - match the version requirement.
"""
return bool(
self.supported and
self.cli_path and
self.executable and
self.fresh)
def run(self, args, dry_run=False):
""" Run a shell command, return the output and keep error message.
Removes ANSI escape codes, and returns ready-to-use strings.
"""
assert isinstance(args, list)
logger.debug("Running `{}`...".format(' '.join(args)))
code = 0
output = None
error = None
if not dry_run:
code, output, error = run(*args)
else:
logger.warning("Dry-run mode active: skip execution of command.")
# Normalize messages.
if error:
error = strip_ansi(error)
error = error if error else None
if output:
output = strip_ansi(output)
output = output if output else None
if code and error:
exception = CLIError(code, output, error)
if self.raise_on_cli_error:
raise exception
else:
logger.error(error)
self.cli_errors.append(exception)
logger.debug(output)
return output
@property
def sync(self):
""" Refresh local manager metadata from remote repository. """
logger.info('Sync {} package info...'.format(self.id))
@property
def installed(self):
""" List packages currently installed on the system.
Returns a dict indexed by package IDs. Each item is a dict with
package ID, name and version.
"""
raise NotImplementedError
@staticmethod
def exact_match(query, result):
""" Compare search query and matching result.
Returns `True` if the matching result exactly match the search query.
Still pplies a light normalization and tokenization of strings before
comparison to make the "exactiness" in the human sense instead of
strictly machine sense.
"""
# TODO: tokenize.
return query.lower() == result.lower()
def search(self, query):
""" Search packages whose ID contain exact or partial query.
Returns a dict indexed by package IDs. Each item is a dict with
package ID, name, version and a boolean indicating if the match is
exact or partial.
"""
raise NotImplementedError
@property
def outdated(self):
""" List currently installed packages having a new version available.
Returns a dict indexed by package IDs. Each item is a dict with
package ID, name, current installed version and latest upgradeable
version.
"""
raise NotImplementedError
def upgrade_cli(self, package_id=None):
""" Return a bash-compatible full-CLI to upgrade a package. """
raise NotImplementedError
def upgrade(self, package_id=None, dry_run=False):
""" Perform the upgrade of the provided package to latest version. """
return self.run(self.upgrade_cli(package_id), dry_run=dry_run)
def upgrade_all_cli(self):
""" Return a bash-compatible full-CLI to upgrade all packages. """
raise NotImplementedError
def upgrade_all(self, dry_run=False):
""" Perform a full upgrade of all outdated packages to latest versions.
If the manager doesn't implements a full upgrade one-liner, then
fall-back to calling single-package upgrade one by one.
"""
try:
return self.run(self.upgrade_all_cli(), dry_run=dry_run)
except NotImplementedError:
logger.warning(
"{} doesn't seems to implement a full upgrade subcommand. "
"Call single-package upgrade CLI one by one.".format(self.id))
log = []
for package_id in self.outdated:
output = self.upgrade(package_id, dry_run=dry_run)
if output:
log.append(output)
if log:
return '\n'.join(log)
@staticmethod
def render_cli(cmd, cli_format='plain'):
""" Return a formatted CLI in the provided format. """
assert isinstance(cmd, list)
assert cli_format in CLI_FORMATS
if cli_format != 'fragments':
cmd = ' '.join(cmd)
if cli_format == 'bitbar':
cmd = PackageManager.render_bitbar_cli(cmd)
return cmd
@staticmethod
def render_bitbar_cli(full_cli):
""" Format a bash-runnable full-CLI with parameters into bitbar schema.
"""
cmd, params = full_cli.strip().split(' ', 1)
bitbar_cli = "bash={}".format(cmd)
for index, param in enumerate(params.split(' ')):
bitbar_cli += " param{}={}".format(index + 1, param)
return bitbar_cli
| torkelsson/meta-package-manager | meta_package_manager/base.py | Python | gpl-2.0 | 11,025 | 0 |
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common logging helpers."""
import logging
import requests
from google.cloud.logging.entries import LogEntry
from google.cloud.logging.entries import ProtobufEntry
from google.cloud.logging.entries import StructEntry
from google.cloud.logging.entries import TextEntry
try:
from google.cloud.logging_v2.gapic.enums import LogSeverity
except ImportError: # pragma: NO COVER
class LogSeverity(object):
"""Map severities for non-GAPIC usage."""
DEFAULT = 0
DEBUG = 100
INFO = 200
NOTICE = 300
WARNING = 400
ERROR = 500
CRITICAL = 600
ALERT = 700
EMERGENCY = 800
_NORMALIZED_SEVERITIES = {
logging.CRITICAL: LogSeverity.CRITICAL,
logging.ERROR: LogSeverity.ERROR,
logging.WARNING: LogSeverity.WARNING,
logging.INFO: LogSeverity.INFO,
logging.DEBUG: LogSeverity.DEBUG,
logging.NOTSET: LogSeverity.DEFAULT,
}
METADATA_URL = "http://metadata.google.internal./computeMetadata/v1/"
METADATA_HEADERS = {"Metadata-Flavor": "Google"}
def entry_from_resource(resource, client, loggers):
"""Detect correct entry type from resource and instantiate.
:type resource: dict
:param resource: One entry resource from API response.
:type client: :class:`~google.cloud.logging.client.Client`
:param client: Client that owns the log entry.
:type loggers: dict
:param loggers:
A mapping of logger fullnames -> loggers. If the logger
that owns the entry is not in ``loggers``, the entry
will have a newly-created logger.
:rtype: :class:`~google.cloud.logging.entries._BaseEntry`
:returns: The entry instance, constructed via the resource
"""
if "textPayload" in resource:
return TextEntry.from_api_repr(resource, client, loggers)
if "jsonPayload" in resource:
return StructEntry.from_api_repr(resource, client, loggers)
if "protoPayload" in resource:
return ProtobufEntry.from_api_repr(resource, client, loggers)
return LogEntry.from_api_repr(resource, client, loggers)
def retrieve_metadata_server(metadata_key):
"""Retrieve the metadata key in the metadata server.
See: https://cloud.google.com/compute/docs/storing-retrieving-metadata
:type metadata_key: str
:param metadata_key: Key of the metadata which will form the url. You can
also supply query parameters after the metadata key.
e.g. "tags?alt=json"
:rtype: str
:returns: The value of the metadata key returned by the metadata server.
"""
url = METADATA_URL + metadata_key
try:
response = requests.get(url, headers=METADATA_HEADERS)
if response.status_code == requests.codes.ok:
return response.text
except requests.exceptions.RequestException:
# Ignore the exception, connection failed means the attribute does not
# exist in the metadata server.
pass
return None
def _normalize_severity(stdlib_level):
"""Normalize a Python stdlib severity to LogSeverity enum.
:type stdlib_level: int
:param stdlib_level: 'levelno' from a :class:`logging.LogRecord`
:rtype: int
:returns: Corresponding Stackdriver severity.
"""
return _NORMALIZED_SEVERITIES.get(stdlib_level, stdlib_level)
| tseaver/google-cloud-python | logging/google/cloud/logging/_helpers.py | Python | apache-2.0 | 3,909 | 0 |
# -*- coding: UTF-8 -*-
from openvpnzone import extract_zones_from_status_file
from IPy import IP
def test_empty_server():
assert extract_zones_from_status_file('tests/samples/empty.ovpn-status-v1') \
== {}
def test_one_client_on_server():
assert extract_zones_from_status_file('tests/samples/one.ovpn-status-v1') \
== {'one.vpn.example.org': [IP('198.51.100.8')]}
def test_multiple_client_on_server():
assert extract_zones_from_status_file('tests/samples/multiple.ovpn-status-v1') \
== {
'one.vpn.example.org': [IP('198.51.100.8')],
'two.vpn.example.org': [IP('198.51.100.12')],
'three.vpn.example.org': [IP('198.51.100.16')]
}
def test_subnet_for_client():
assert extract_zones_from_status_file('tests/samples/subnet.ovpn-status-v1') \
== {'one.vpn.example.org': [IP('198.51.100.8')]}
def test_cached_route():
assert extract_zones_from_status_file('tests/samples/cached-route.ovpn-status-v1') \
== {'one.vpn.example.org': [IP('198.51.100.8')]}
| mswart/openvpn2dns | tests/test_parser.py | Python | mit | 1,061 | 0.00377 |
import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class NewVisitorTest(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
def test_can_show_main_menu_and_go_to_each_section(self):
# Jon has heard about a cool new online data joining app. He goes
# to check out its homepage
self.browser.get('http://localhost:8000')
# The page title and header mention data center
self.assertIn('InfoJoiner DataCenter', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('InfoJoiner DataCenter', header_text)
# There is a HTML5 nav menu
# Iterate all menu items "sources","views","tags"
# Foreach menu item enter in page and check title is
# "Menu Title - IJDC"
"""
# He is invited to enter a to-do item straight away
inputbox = self.browser.find_element_by_id('id_new_item')
self.assertEqual(
inputbox.get_attribute('placeholder'),
'Enter a to-do item'
)
# He types "Buy peacock feathers" into a text box (Edith's hobby
# is tying fly-fishing lures)
inputbox.send_keys('Buy peacock feathers')
# When He hits enter, the page updates, and now the page lists
# "1: Buy peacock feathers" as an item in a to-do list table
inputbox.send_keys(Keys.ENTER)
table = self.browser.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertTrue(
any('1: Buy peacock feathers' in row.text for row in rows),
"New to-do item did not appear in table"
)
# There is still a text box inviting her to add another item. He
# enters "Use peacock feathers to make a fly" (Edith is very
# methodical)
self.fail('Finish the test!')
# The page updates again, and now shows both items on her list
# Edith wonders whether the site will remember her list. Then He sees
# that the site has generate a unique URL for her -- there is some
# explanatory text to that effect.
# He visits that URL - her to-do list is still there.
# Satisfied, He goes back to sleep
"""
if __name__ == '__main__':
unittest.main()
| baile/infojoiner | infojoiner/datacenter/functional_test.py | Python | mit | 2,451 | 0.001224 |
# Copyright (C) 2016 Pierre Marchand <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pathlib import Path
from .parser import TemplateParser
parser = TemplateParser()
def load_template (dirpath, name, required=True):
home = Path(dirpath)
template_path = home.joinpath(name + '.html')
try:
with template_path.open() as template_file:
template = template_file.read()
except Exception as exc:
if required:
raise exc
else:
return ''
return template
def apply_template (template, data):
data_local = dict(data)
return parser.apply_template(template, data)
| atelier-cartographique/static-sectioner | sectioner/template.py | Python | agpl-3.0 | 1,286 | 0.002333 |
from config import Prefix
from sigma.core.blacklist import check_black
async def custom_command_detection(ev, message, args):
if message.guild:
if message.content.startswith(Prefix):
cmd = message.content[len(Prefix):].lower()
if cmd not in ev.bot.plugin_manager.commands:
if not check_black(ev.db, message):
try:
custom_commands = ev.db.get_settings(message.guild.id, 'CustomCommands')
except:
ev.db.set_settings(message.guild.id, 'CustomCommands', {})
custom_commands = {}
if cmd in custom_commands:
response = custom_commands[cmd]
await message.channel.send(response)
| aurora-pro/apex-sigma | sigma/plugins/moderation/other/custom_command_detection.py | Python | gpl-3.0 | 805 | 0.003727 |
# Copyright (c) 2013-2017 CodeReclaimers, LLC
# Public API v3 description: https://btc-e.com/api/3/documentation
from collections import namedtuple
from . import common, scraping
PairInfoBase = namedtuple("PairInfoBase",
["decimal_places", "min_price", "max_price", "min_amount", "hidden", "fee"])
class PairInfo(PairInfoBase):
def format_currency(self, value):
return common.formatCurrencyDigits(value, self.decimal_places)
def truncate_amount(self, value):
return common.truncateAmountDigits(value, self.decimal_places)
def validate_order(self, trade_type, rate, amount):
if trade_type not in ("buy", "sell"):
raise common.InvalidTradeTypeException("Unrecognized trade type: %r" % trade_type)
if rate < self.min_price or rate > self.max_price:
raise common.InvalidTradePriceException(
"Allowed price range is from %f to %f" % (self.min_price, self.max_price))
formatted_min_amount = self.format_currency(self.min_amount)
if amount < self.min_amount:
msg = "Trade amount %r too small; should be >= %s" % \
(amount, formatted_min_amount)
raise common.InvalidTradeAmountException(msg)
class APIInfo(object):
def __init__(self, connection):
self.connection = connection
self.currencies = None
self.pair_names = None
self.pairs = None
self.server_time = None
self._scrape_pair_index = 0
self.update()
def update(self):
info = self.connection.makeJSONRequest("/api/3/info")
if type(info) is not dict:
raise TypeError("The response is not a dict.")
self.server_time = info.get(u"server_time")
pairs = info.get(u"pairs")
if type(pairs) is not dict:
raise TypeError("The pairs item is not a dict.")
self.pairs = {}
currencies = set()
for name, data in pairs.items():
self.pairs[name] = PairInfo(**data)
a, b = name.split(u"_")
currencies.add(a)
currencies.add(b)
self.currencies = list(currencies)
self.currencies.sort()
self.pair_names = list(self.pairs.keys())
self.pair_names.sort()
def validate_pair(self, pair):
if pair not in self.pair_names:
if "_" in pair:
a, b = pair.split("_", 1)
swapped_pair = "%s_%s" % (b, a)
if swapped_pair in self.pair_names:
msg = "Unrecognized pair: %r (did you mean %s?)"
msg = msg % (pair, swapped_pair)
raise common.InvalidTradePairException(msg)
raise common.InvalidTradePairException("Unrecognized pair: %r" % pair)
def get_pair_info(self, pair):
self.validate_pair(pair)
return self.pairs[pair]
def validate_order(self, pair, trade_type, rate, amount):
self.validate_pair(pair)
pair_info = self.pairs[pair]
pair_info.validate_order(trade_type, rate, amount)
def format_currency(self, pair, amount):
self.validate_pair(pair)
pair_info = self.pairs[pair]
return pair_info.format_currency(amount)
def scrapeMainPage(self):
parser = scraping.BTCEScraper()
# Rotate through the currency pairs between chat requests so that the
# chat pane contents will update more often than every few minutes.
self._scrape_pair_index = (self._scrape_pair_index + 1) % len(self.pair_names)
current_pair = self.pair_names[self._scrape_pair_index]
response = self.connection.makeRequest('/exchange/%s' % current_pair, with_cookie=True)
parser.feed(parser.unescape(response.decode('utf-8')))
parser.close()
r = scraping.ScraperResults()
r.messages = parser.messages
r.devOnline = parser.devOnline
r.supportOnline = parser.supportOnline
r.adminOnline = parser.adminOnline
return r
Ticker = namedtuple("Ticker",
["high", "low", "avg", "vol", "vol_cur", "last", "buy", "sell", "updated"])
def getTicker(pair, connection=None, info=None):
"""Retrieve the ticker for the given pair. Returns a Ticker instance."""
if info is not None:
info.validate_pair(pair)
if connection is None:
connection = common.BTCEConnection()
response = connection.makeJSONRequest("/api/3/ticker/%s" % pair)
if type(response) is not dict:
raise TypeError("The response is a %r, not a dict." % type(response))
elif u'error' in response:
print("There is a error \"%s\" while obtaining ticker %s" % (response['error'], pair))
ticker = None
else:
ticker = Ticker(**response[pair])
return ticker
def getDepth(pair, connection=None, info=None):
"""Retrieve the depth for the given pair. Returns a tuple (asks, bids);
each of these is a list of (price, volume) tuples."""
if info is not None:
info.validate_pair(pair)
if connection is None:
connection = common.BTCEConnection()
response = connection.makeJSONRequest("/api/3/depth/%s" % pair)
if type(response) is not dict:
raise TypeError("The response is not a dict.")
depth = response.get(pair)
if type(depth) is not dict:
raise TypeError("The pair depth is not a dict.")
asks = depth.get(u'asks')
if type(asks) is not list:
raise TypeError("The response does not contain an asks list.")
bids = depth.get(u'bids')
if type(bids) is not list:
raise TypeError("The response does not contain a bids list.")
return asks, bids
Trade = namedtuple("Trade", ['pair', 'type', 'price', 'tid', 'amount', 'timestamp'])
def getTradeHistory(pair, connection=None, info=None, count=None):
"""Retrieve the trade history for the given pair. Returns a list of
Trade instances. If count is not None, it should be an integer, and
specifies the number of items from the trade history that will be
processed and returned."""
if info is not None:
info.validate_pair(pair)
if connection is None:
connection = common.BTCEConnection()
response = connection.makeJSONRequest("/api/3/trades/%s" % pair)
if type(response) is not dict:
raise TypeError("The response is not a dict.")
history = response.get(pair)
if type(history) is not list:
raise TypeError("The response is a %r, not a list." % type(history))
result = []
# Limit the number of items returned if requested.
if count is not None:
history = history[:count]
for h in history:
h["pair"] = pair
t = Trade(**h)
result.append(t)
return result
| CodeReclaimers/btce-api | btceapi/public.py | Python | mit | 6,783 | 0.001622 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-2013 Elanz (<http://www.openelanz.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class sale_order(osv.osv):
_inherit = 'sale.order'
def action_button_confirm(self, cr, uid, ids, context=None):
# fetch the partner's id and subscribe the partner to the sale order
assert len(ids) == 1
order = self.browse(cr, uid, ids[0], context=context)
add_delivery_method = True
only_service = True
delivery_method = self.pool.get('delivery.carrier').search(cr, uid, [('default_in_sales', '=', True)])
if delivery_method:
delivery_method = self.pool.get('delivery.carrier').browse(cr, uid, delivery_method[0])
if order.amount_untaxed < delivery_method.min_amount and not order.carrier_id:
if order.partner_id.without_delivery:
add_delivery_method = False
else:
for order_line in order.order_line:
if order_line.product_id:
if order_line.product_id.without_delivery:
add_delivery_method = False
break
elif order_line.product_id.type != 'service':
only_service = False
if only_service:
add_delivery_method = False
if add_delivery_method:
delivery_method = delivery_method.id
self.write(cr, uid, ids[0], {'carrier_id': delivery_method})
return super(sale_order, self).action_button_confirm(cr, uid, ids, context=context) | noemis-fr/old-custom | e3z_add_delivery_method/sale_order.py | Python | agpl-3.0 | 2,622 | 0.002288 |
#!/usr/bin/env python
import argparse
import getpass
import sys
import csv
from cassandra.auth import PlainTextAuthProvider
from cassandra.cqlengine import connection
from ddb import configuration
import utils
from coveragestore import SampleCoverage
from collections import defaultdict
def get_target_amplicons(filename):
amplicons_list = list()
sys.stdout.write("Opening file {} to retrieve reporting amplicons\n".format(filename))
with open(filename, "r") as bedfile:
reader = csv.reader(bedfile, dialect='excel-tab')
for row in reader:
amplicons_list.append(row[3])
return amplicons_list
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--samples_file', help="Input configuration file for samples")
parser.add_argument('-c', '--configuration', help="Configuration file for various settings")
parser.add_argument('-r', '--report', help="Root name for reports (per sample)")
parser.add_argument('-a', '--address', help="IP Address for Cassandra connection", default='127.0.0.1')
parser.add_argument('-u', '--username', help='Cassandra username for login', default=None)
args = parser.parse_args()
sys.stdout.write("Parsing configuration data\n")
config = configuration.configure_runtime(args.configuration)
sys.stdout.write("Parsing sample data\n")
samples = configuration.configure_samples(args.samples_file, config)
if args.username:
password = getpass.getpass()
auth_provider = PlainTextAuthProvider(username=args.username, password=password)
connection.setup([args.address], "variantstore", auth_provider=auth_provider)
else:
connection.setup([args.address], "variantstore")
sys.stdout.write("Processing samples\n")
for sample in samples:
sys.stdout.write("Processing coverage for sample {}\n".format(sample))
report_panel_path = "/mnt/shared-data/ddb-configs/disease_panels/{}/{}".format(samples[sample]['panel'],
samples[sample]['report'])
target_amplicons = get_target_amplicons(report_panel_path)
reportable_amplicons = list()
for amplicon in target_amplicons:
coverage_data = SampleCoverage.objects.timeout(None).filter(
SampleCoverage.sample == samples[sample]['sample_name'],
SampleCoverage.amplicon == amplicon,
SampleCoverage.run_id == samples[sample]['run_id'],
SampleCoverage.library_name == samples[sample]['library_name'],
SampleCoverage.program_name == "sambamba"
)
ordered_variants = coverage_data.order_by('amplicon', 'run_id').limit(coverage_data.count() + 1000)
for variant in ordered_variants:
reportable_amplicons.append(variant)
with open("{}_{}.txt".format(sample, args.report), "w") as coverage_report:
coverage_report.write("Sample\tLibrary\tAmplicon\tNum Reads\tCoverage\n")
for amplicon in reportable_amplicons:
coverage_report.write("{}\t{}\t{}\t{}\t{}\n".format(amplicon.sample,
amplicon.library_name,
amplicon.amplicon,
amplicon.num_reads,
amplicon.mean_coverage))
| dgaston/ddbio-variantstore | Misc_and_Old/create_sample_coverage_reports.py | Python | mit | 3,582 | 0.005025 |
"""
The WCS package provides functions to parse World Coordinate System (WCS)
coordinates for solar images as well as convert between various solar
coordinate systems. The solar coordinates supported are
* Helioprojective-Cartesian (HPC): The most often used solar coordinate
system. Describes positions on the Sun as angles measured from the
center of the solar disk (usually in arcseconds) using cartesian
coordinates (X, Y)
* Helioprojective-Radial (HPR): Describes positions on the Sun using angles,
similar to HPC, but uses a radial coordinate (rho, psi) system centered
on solar disk where psi is measured in the counter clock wise direction.
* Heliocentric-Cartesian (HCC): The same as HPC but with positions expressed
in true (deprojected) physical distances instead of angles on the
celestial sphere.
* Heliocentric-Radial (HCR): The same as HPR but with rho expressed in
true (deprojected) physical distances instead of angles on the celestial
sphere.
* Stonyhurst-Heliographic (HG): Expressed positions on the Sun using
longitude and latitude on the solar sphere but with the origin which is
at the intersection of the solar equator and the central meridian as
seen from Earth. This means that the coordinate system remains fixed
with respect to Earth while the Sun rotates underneath it.
* Carrington-Heliographic (HG): Carrington longitude is offset
from Stonyhurst longitude by a time-dependent scalar value, L0. At the
start of each Carrington rotation, L0 = 360, and steadily decreases
until it reaches L0 = 0, at which point the next Carrington rotation
starts.
Some definitions
* b0: Tilt of the solar North rotational axis toward the observer
(helio- graphic latitude of the observer). Note that SOLAR_B0,
HGLT_OBS, and CRLT_OBS are all synonyms.
* l0: Carrington longitude of central meridian as seen from Earth.
* dsun_meters: Distance between observer and the Sun. Default is 1 AU.
* rsun_meters: Radius of the Sun in meters. Default is 6.955e8 meters. This valued is stored
locally in this module and can be modified if necessary.
References
----------
| Thompson (2006), A&A, 449, 791 <http://dx.doi.org/10.1051/0004-6361:20054262>
| PDF <http://fits.gsfc.nasa.gov/wcs/coordinates.pdf>
"""
from __future__ import absolute_import
from sunpy.wcs.wcs import *
| Alex-Ian-Hamilton/sunpy | sunpy/wcs/__init__.py | Python | bsd-2-clause | 2,364 | 0.000423 |
#Convert to lower (lol)
string = input()
print (string.lower())
| LTKills/languages | python/17.py | Python | gpl-3.0 | 66 | 0.030303 |
import logging
import random
import urllib.parse
import collections
import aiohttp
import discord
import motor.motor_asyncio
from discord.ext import commands
from .common import Cog
log = logging.getLogger(__name__)
class BooruError(Exception):
pass
class BooruProvider:
url = ''
@classmethod
def transform_file_url(cls, url):
return url
@classmethod
def get_author(cls, post):
return post['author']
@classmethod
async def get_posts(cls, bot, tags, *, limit=15):
headers = {
'User-Agent': 'Yiffmobile v2 (José, https://github.com/lnmds/jose)'
}
tags = urllib.parse.quote(' '.join(tags), safe='')
async with bot.session.get(
f'{cls.url}&limit={limit}&tags={tags}',
headers=headers) as resp:
results = await resp.json()
if not results:
return []
try:
# e621 sets this to false
# when the request fails
if not results.get('success', True):
raise BooruError(results.get('reason'))
except AttributeError:
# when the thing actually worked and
# its a list of posts and not a fucking
# dictionary
# where am I gonna see good porn APIs?
pass
# transform file url
for post in results:
post['file_url'] = cls.transform_file_url(post['file_url'])
return results
class E621Booru(BooruProvider):
url = 'https://e621.net/post/index.json?'
url_post = 'https://e621.net/post/show/{0}'
class HypnohubBooru(BooruProvider):
url = 'http://hypnohub.net/post/index.json?'
url_post = 'https://hypnohub.net/post/show/{0}'
@classmethod
def transform_file_url(cls, url):
return 'https:' + url.replace('.net//', '.net/')
class GelBooru(BooruProvider):
url = 'https://gelbooru.com/index.php?page=dapi&s=post&json=1&q=index'
url_post = 'https://gelbooru.com/index.php?page=post&s=view&id={0}'
@classmethod
def get_author(cls, post):
return post['owner']
class NSFW(Cog, requires=['config']):
"""NSFW commands.
Fetching works on a "non-repeataibility" basis (unless
the bot restarts). This means that with each set of tags
you give for José to search, it will record the given post
and make sure it doesn't repeat again.
"""
def __init__(self, bot):
super().__init__(bot)
self.whip_coll = self.config.jose_db['whip']
self.repeat_cache = collections.defaultdict(dict)
def key(self, tags):
return ','.join(tags)
def mark_post(self, ctx, tags: list, post: dict):
"""Mark this post as seen."""
cache = self.repeat_cache[ctx.guild.id]
k = self.key(tags)
used = cache.get(k, [])
used.append(post['id'])
cache[k] = used
def filter(self, ctx, tags: list, posts):
"""Filter the posts so we get the only posts
that weren't seen."""
cache = self.repeat_cache[ctx.guild.id]
used_posts = cache.get(self.key(tags), [])
return list(filter(lambda post: post['id'] not in used_posts, posts))
async def booru(self, ctx, booru, tags: list):
if ctx.channel.topic and '[jose:no_nsfw]' in ctx.channel.topic:
return
# taxxx
await self.jcoin.pricing(ctx, self.prices['API'])
try:
# grab posts
posts = await booru.get_posts(ctx.bot, tags)
posts = self.filter(ctx, tags, posts)
if not posts:
return await ctx.send('Found nothing.\n'
'(this can be caused by an exhaustion '
f'of the tags `{ctx.prefix}help NSFW`)')
# grab random post
post = random.choice(posts)
self.mark_post(ctx, tags, post)
post_id = post.get('id')
post_author = booru.get_author(post)
log.info('%d posts from %s, chose %d', len(posts), booru.__name__,
post_id)
tags = (post['tags'].replace('_', '\\_'))[:500]
# add stuffs
embed = discord.Embed(title=f'Posted by {post_author}')
embed.set_image(url=post['file_url'])
embed.add_field(name='Tags', value=tags)
embed.add_field(name='URL', value=booru.url_post.format(post_id))
# hypnohub doesn't have this
if 'fav_count' in post and 'score' in post:
embed.add_field(
name='Votes/Favorites',
value=f"{post['score']} votes, "
f"{post['fav_count']} favorites")
# send
await ctx.send(embed=embed)
except BooruError as err:
raise self.SayException(f'Error while fetching posts: `{err!r}`')
except aiohttp.ClientError as err:
log.exception('nsfw client error')
raise self.SayException(f'Something went wrong. Sorry! `{err!r}`')
@commands.command()
@commands.is_nsfw()
async def e621(self, ctx, *tags):
"""Randomly searches e621 for posts."""
async with ctx.typing():
await self.booru(ctx, E621Booru, tags)
@commands.command(aliases=['hh'])
@commands.is_nsfw()
async def hypnohub(self, ctx, *tags):
"""Randomly searches Hypnohub for posts."""
async with ctx.typing():
await self.booru(ctx, HypnohubBooru, tags)
@commands.command()
@commands.is_nsfw()
async def gelbooru(self, ctx, *tags):
"""Randomly searches Gelbooru for posts."""
async with ctx.typing():
await self.booru(ctx, GelBooru, tags)
@commands.command()
@commands.is_nsfw()
async def penis(self, ctx):
"""get penis from e621 bb"""
await ctx.invoke(self.bot.get_command('e621'), 'penis')
@commands.command()
@commands.cooldown(5, 1800, commands.BucketType.user)
async def whip(self, ctx, *, person: discord.User = None):
"""Whip someone.
If no arguments provided, shows how many whips you
received.
The command has a 5/1800s cooldown per-user
"""
if not person:
whip = await self.whip_coll.find_one({'user_id': ctx.author.id})
if not whip:
return await ctx.send(f'**{ctx.author}** was never whipped')
return await ctx.send(f'**{ctx.author}** was whipped'
f' {whip["whips"]} times')
if person == ctx.author:
return await ctx.send('no')
uid = person.id
whip = await self.whip_coll.find_one({'user_id': uid})
if not whip:
whip = {
'user_id': uid,
'whips': 0,
}
await self.whip_coll.insert_one(whip)
await self.whip_coll.update_one({
'user_id': uid
}, {'$inc': {
'whips': 1
}})
await ctx.send(f'**{ctx.author}** whipped **{person}** '
f'They have been whipped {whip["whips"] + 1} times.')
@commands.command()
async def whipboard(self, ctx):
"""Whip leaderboard."""
e = discord.Embed(title='Whip leaderboard')
data = []
cur = self.whip_coll.find().sort('whips',
motor.pymongo.DESCENDING).limit(15)
async for whip in cur:
u = self.bot.get_user(whip['user_id'])
u = str(u)
data.append(f'{u:30s} -> {whip["whips"]}')
joined = '\n'.join(data)
e.description = f'```\n{joined}\n```'
await ctx.send(embed=e)
def setup(bot):
bot.add_jose_cog(NSFW)
| lnmds/jose | ext/nsfw.py | Python | mit | 7,881 | 0 |
# -*- coding: utf-8 -*-
import logging
import logging.handlers
import radio
import datetime
import sys
import os
class RadioLogger():
"""Radio logger"""
def __init__(self, LOG_FILE, VERBOSE):
"""init the logger"""
# set up formatting for console and the two log files
confor = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s', '%H:%M:%S')
warfor = logging.Formatter('%(asctime)s :: %(levelname)-8s :: %(message)s', '%b-%d %H:%M:%S')
# set up logging to STDOUT for all levels DEBUG and higher
con = logging.StreamHandler(sys.stdout)
con.setLevel(logging.DEBUG)
con.setFormatter(confor)
# set up logging to a file for all levels DEBUG and higher
war = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes=500000, backupCount=3)
war.setLevel(logging.DEBUG)
war.setFormatter(warfor)
# create Logger object
self.mylogger = logging.getLogger('MAIN')
self.mylogger.setLevel(logging.DEBUG)
if VERBOSE:
self.mylogger.addHandler(con)
self.mylogger.addHandler(war)
from radio import DEVELOPMENT
if DEVELOPMENT:
werkzeug_logger = logging.getLogger('werkzeug')
werkzeug_logger.setLevel(logging.DEBUG)
werkzeug_logger.addHandler(con)
werkzeug_logger.addHandler(war)
def log(self, toLog, logLevel):
"""wrapper for logger output"""
try:
if logLevel == 'DEBUG':
self.mylogger.debug(toLog)
elif logLevel == 'INFO':
self.mylogger.info(toLog)
elif logLevel == 'WARNING':
self.mylogger.warning(toLog)
elif logLevel == 'ERROR':
self.mylogger.error(toLog)
elif logLevel == 'CRITICAL':
self.mylogger.critical(toLog)
time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
radio.LOG_LIST.append({'level': logLevel, 'message': toLog, 'time': time})
except ValueError:
pass
| hephaestus9/Radio | radio/logger.py | Python | mit | 2,105 | 0.0019 |
from __future__ import division
import numpy as np
from Tkinter import *
import json
import io
import unicodecsv as csv
#import csv
#file = open("moviesTest-1970.txt",'r')
act = open("invertedIndexActorsWeightedAll.txt",'r')
dir = open("invertedIndexDirectorsWeightedAll.txt", 'r')
wri = open("invertedIndexWritersWeightedAll.txt", 'r')
#line = file.readline()
lact = act.readline()
ldir = dir.readline()
lwri = wri.readline()
#gson = json.loads(line)
jact = json.loads(lact)
jdir = json.loads(ldir)
jwri = json.loads(lwri)
#file.close()
act.close()
dir.close()
wri.close()
class Test:
def calcolaMedie(self,actors,directors,writers):
mediaAct = 0
mediaDir = 0
mediaWri = 0
for elem in actors:
print elem
mediaAct += float(jact.get(elem).get("rating"))
for elem in directors:
mediaDir += float(jdir.get(elem).get("rating"))
for elem in writers:
mediaWri += float(jwri.get(elem).get("rating"))
mediaAct = float(mediaAct/len(actors))
mediaDir = float(mediaDir/len(directors))
mediaWri = float(mediaWri/len(writers))
return mediaAct,mediaDir,mediaWri
#### extract data from the json files ####
def readData(self,filename):
file = open(filename, 'r')
line = file.readline()
print line
gson = json.loads(line)
file.close()
vector = []
input = []
labels = []
titles = []
#indice = 0
for elem in gson:
#titles.append(gson.get(elem).get("title"))
actors = gson.get(elem).get("actors")
directors = gson.get(elem).get("director")
writers = gson.get(elem).get("writer")
input.append([actors,directors,writers])
#imdbRating = float(gson.get(elem).get("imdbRating"))
mediaAct, mediaDir, mediaWri = self.calcolaMedie(actors, directors, writers)
vect = [1,mediaAct, mediaDir, mediaWri]
vector.append(vect)
#labels.append(int(imdbRating)) ## CAST PER CLASSI DISCRETE ##
data = np.array(vector)
#labels = np.array(labels)
#train_data,test_data,train_labels,test_labels = train_test_split(data,labels, train_size= 0.5)
#return train_data, train_labels,test_data,test_labels
print "lettura terminata"
return data,input
def hypothesis(self,x,theta):
l_theta = []
for i in range(len(theta)):
#print theta[i]
thetaX = x.dot(theta[i])# wx
thetaX_exp = np.exp(thetaX) # exp(wx)
l_theta.append(thetaX_exp)
l_theta = np.array(l_theta)
#print np.shape(l_theta)
thetaX_exp_sum = np.sum(l_theta) # sum of exp(wx)
#print thetaX_exp_sum
p = l_theta.T / thetaX_exp_sum # 5xlen(x) predicted results
if np.isinf(p).any(): # deal with overflow in results.
inf_idx = np.isinf(p) # idx where overflow occurs
val = np.sum(p, 0) / np.sum(inf_idx, 0) * inf_idx # values to be used to substitution
p[inf_idx] = val[inf_idx] # substitute values
return p.T
#### predict the labels for a set of observations ####
def test(self,data,theta):
pred_lab = []
correct = 0
for i in range(len(data)):
p = self.hypothesis(data[i], theta)
max = 0
ind = 0
for k, x in enumerate(p):
if x > max:
max = x
ind = k
pred_lab.append(ind+1)
'''for j in range(len(labels)):
if labels[j] == pred_lab[j]:
correct += 1
correctness = (correct * 100) / len(labels)'''
return pred_lab
#### predict the label for a single observation ####
def singleTest(self,data,theta):
max = 0
ind = 0
p = self.hypothesis(data,theta)
for k, x in enumerate(p):
if x > max:
max = x
ind = k
pred_lab = ind+1
return pred_lab
#### reads the theta from file ####
def getTheta(self):
filenameTheta = "thetas.txt"
fileTheta = open(filenameTheta, 'r')
lines = fileTheta.readlines()
theta = []
for line in lines:
line = line.replace("\n", "")
line = line.rstrip()
l = line.split(' ')
for i in range(len(l)):
l[i] = float(l[i])
theta.append(l)
theta = np.array(theta)
return theta
#### print the results on a file in the case of a batch prediction ####
def results(self,fileResult,input,pred_lab):
fileRes = open(fileResult,'w')
writer = csv.writer(fileRes,delimiter = ',')
writer.writerow(("ACTORS","DIRECTORS","WRITERS","PREDICTED"))
for i in range(len(pred_lab)):
writer.writerow((input[i][0],input[i][1],input[i][2],pred_lab[i]))
#writer.writerow(unicode(titles[i]) + unicode("\t") + unicode(labels[i]) + unicode("\t") + unicode(
#pred_lab[i]) + unicode("\n"))
fileRes.close()
#### initialization for a set of predictions ####
def init2(self,filename,fileResult):
data,input =self.readData(filename)
theta = self.getTheta()
pred_lab = self.test(data,theta)
self.results(fileResult,input,pred_lab)
#print "ACCURACY ON TEST FILE IS: " + str(correctness) + "% "
return 1
#### initialization for a single prediction ####
def init(self,actors,directors,writers):
act = [x for x in actors if x != "None"]
dir = [x for x in directors if x != "None"]
wri = [x for x in writers if x != "None"]
mediaAct,mediaDir,mediaWri = self.calcolaMedie(act,dir,wri)
data = [1,mediaAct,mediaDir,mediaWri]
data = np.array(data)
#data,labels = self.readData()
filenameTheta = "thetas.txt"
fileTheta = open(filenameTheta,'r')
lines = fileTheta.readlines()
theta = []
for line in lines:
line = line.replace("\n","")
line = line.rstrip()
l = line.split(' ')
for i in range(len(l)):
l[i] = float(l[i])
theta.append(l)
theta = np.array(theta)
label = self.singleTest(data,theta)
return label
#print " LABEL PREDICTED: "+ str(label)
| 93lorenzo/software-suite-movie-market-analysis | testMovies.py | Python | gpl-3.0 | 6,486 | 0.015264 |
from google.appengine.ext import db
class Stuff (db.Model):
owner = db.UserProperty(required=True, auto_current_user=True)
pulp = db.BlobProperty()
class Greeting(db.Model):
author = db.UserProperty()
content = db.StringProperty(multiline=True)
avatar = db.BlobProperty()
date = db.DateTimeProperty(auto_now_add=True)
class Placebo(db.Model):
developer = db.StringProperty()
OID = db.StringProperty()
concept = db.StringProperty()
category = db.StringProperty()
taxonomy = db.StringProperty()
taxonomy_version = db.StringProperty()
code = db.StringProperty()
descriptor = db.StringProperty()
| 0--key/lib | portfolio/2009_GoogleAppEngine/apps/0--key/models.py | Python | apache-2.0 | 651 | 0.004608 |
""" Care about audio fileformat
"""
try:
from mutagen.flac import FLAC
from mutagen.oggvorbis import OggVorbis
except ImportError:
pass
import parser
import mutagenstripper
class MpegAudioStripper(parser.GenericParser):
""" Represent mpeg audio file (mp3, ...)
"""
def _should_remove(self, field):
return field.name in ("id3v1", "id3v2")
class OggStripper(mutagenstripper.MutagenStripper):
""" Represent an ogg vorbis file
"""
def _create_mfile(self):
self.mfile = OggVorbis(self.filename)
class FlacStripper(mutagenstripper.MutagenStripper):
""" Represent a Flac audio file
"""
def _create_mfile(self):
self.mfile = FLAC(self.filename)
def remove_all(self):
""" Remove the "metadata" block from the file
"""
super(FlacStripper, self).remove_all()
self.mfile.clear_pictures()
self.mfile.save()
return True
def is_clean(self):
""" Check if the "metadata" block is present in the file
"""
return super(FlacStripper, self).is_clean() and not self.mfile.pictures
def get_meta(self):
""" Return the content of the metadata block if present
"""
metadata = super(FlacStripper, self).get_meta()
if self.mfile.pictures:
metadata['picture:'] = 'yes'
return metadata
| jubalh/MAT | libmat/audio.py | Python | gpl-2.0 | 1,375 | 0 |
from .ctp_gateway import CtpGateway | bigdig/vnpy | vnpy/gateway/ctp/__init__.py | Python | mit | 35 | 0.028571 |
from unittest import TestCase
import neuropsydia as n
n.start(open_window=False)
class TestColor(TestCase):
def test_is_string(self):
c = n.color("w")
self.assertTrue(isinstance(c, tuple)) | neuropsychology/Neuropsydia.py | neuropsydia/tests/test_color.py | Python | mpl-2.0 | 210 | 0.009524 |
"""The tower of Hanoi."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
def tower_of_hanoi(height, from_pole, to_pole, with_pole, counter):
"""Tower of Hanoi.
Time complexity: T(1) = 1, T(n) = 2T(n - 1) + 1 => O(2^n).
Space complexity: O(1).
"""
if height == 1:
counter[0] += 1
print('{0} -> {1}'.format(from_pole, to_pole))
else:
tower_of_hanoi(height - 1, from_pole, with_pole, to_pole, counter)
tower_of_hanoi(1, from_pole, to_pole, with_pole, counter)
tower_of_hanoi(height - 1, with_pole, to_pole, from_pole, counter)
def main():
from_pole = 'A'
to_pole = 'B'
with_pole = 'C'
height = 1
counter = [0]
print('height: {}'.format(height))
tower_of_hanoi(height, from_pole, to_pole, with_pole, counter)
print('counter: {}'.format(counter[0]))
height = 2
counter = [0]
print('height: {}'.format(height))
tower_of_hanoi(height, from_pole, to_pole, with_pole, counter)
print('counter: {}'.format(counter[0]))
height = 5
counter = [0]
print('height: {}'.format(height))
tower_of_hanoi(height, from_pole, to_pole, with_pole, counter)
print('counter: {}'.format(counter[0]))
if __name__ == '__main__':
main()
| bowen0701/algorithms_data_structures | alg_tower_of_hanoi.py | Python | bsd-2-clause | 1,320 | 0.001515 |
from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class ShopifyAccount(ProviderAccount):
pass
class ShopifyProvider(OAuth2Provider):
id = 'shopify'
name = 'Shopify'
account_class = ShopifyAccount
def get_auth_params(self, request, action):
ret = super(ShopifyProvider, self).get_auth_params(request, action)
shop = request.GET.get('shop', None)
if shop:
ret.update({'shop': shop})
return ret
def get_default_scope(self):
return ['read_orders', 'read_products']
def extract_uid(self, data):
return str(data['shop']['id'])
def extract_common_fields(self, data):
# See: https://docs.shopify.com/api/shop
# User is only available with Shopify Plus, email is the only
# common field
return dict(email=data['shop']['email'])
providers.registry.register(ShopifyProvider)
| wli/django-allauth | allauth/socialaccount/providers/shopify/provider.py | Python | mit | 1,032 | 0 |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 18 12:04:44 2017
@author: rstreet
"""
from os import getcwd, path, remove, environ
from sys import path as systempath
cwd = getcwd()
systempath.append(path.join(cwd,'..'))
import artemis_subscriber
import log_utilities
import glob
from datetime import datetime
import pytz
import survey_data_utilities
import event_classes
def test_read_ogle_param_files():
"""Function to test whether the OGLE parameter files can be parsed
properly
"""
# Note that OGLE lenses.par files are searched for using a glob call
# which resolves the year suffices so that need not be given here
config = {
'ogle_data_local_location': '../../data/',
'ogle_time_stamp_file': 'ogle.last.changed',
'ogle_lenses_file': 'lenses.par',
'ogle_updated_file': 'ogle.last.updated',
}
ogle_data = survey_data_utilities.read_ogle_param_files(config)
last_changed = datetime(2016, 11, 2, 1, 4, 39, 360000)
last_changed= last_changed.replace(tzinfo=pytz.UTC)
assert ogle_data.last_changed == last_changed
last_updated = datetime(2017, 1, 23, 22, 30, 16)
last_updated= last_updated.replace(tzinfo=pytz.UTC)
assert ogle_data.last_updated == last_updated
assert len(ogle_data.lenses) == 1927
lens = event_classes.Lens()
assert type(ogle_data.lenses['OGLE-2016-BLG-0110']) == type(lens)
def test_read_moa_param_files():
"""Function to test whether the MOA parameter files can be parsed
properly
"""
config = {
'moa_data_local_location': '../../data/',
'moa_time_stamp_file': 'moa.last.changed',
'moa_lenses_file': 'moa_lenses.par',
'moa_updated_file': 'moa.last.updated',
}
moa_data = survey_data_utilities.read_moa_param_files(config)
last_changed = datetime(2016, 11, 4, 4, 0, 35)
last_changed= last_changed.replace(tzinfo=pytz.UTC)
assert moa_data.last_changed == last_changed
last_updated = datetime(2017, 1, 23, 22, 30, 19)
last_updated= last_updated.replace(tzinfo=pytz.UTC)
assert moa_data.last_updated == last_updated
assert len(moa_data.lenses) == 618
lens = event_classes.Lens()
assert type(moa_data.lenses['MOA-2016-BLG-618']) == type(lens)
def test_scrape_rtmodel():
year = 2019
event='OB190011'
output = survey_data_utilities.scrape_rtmodel(year, event)
assert len(output) == 5
assert 'http' in output[0]
assert 'http' in output[2]
assert type(output[3]) == type(True)
assert type(output[4]) == type(True)
def test_scrape_mismap():
year = 2019
event='OB190011'
output = survey_data_utilities.scrape_mismap(year, event)
assert len(output) == 4
assert 'http' in output[0]
assert 'png' in output[1]
assert type(output[2]) == type(True)
assert type(output[3]) == type(True)
def test_scrape_moa():
year = 2019
event='OB190011'
output = survey_data_utilities.scrape_moa(year, event)
assert len(output) == 4
assert 'http' in output[0]
assert 'jpg' in output[1]
assert type(output[2]) == type(True)
assert type(output[3]) == type(True)
def test_scrape_kmt():
year = 2019
event='OB190335'
output = survey_data_utilities.scrape_kmt(year, event)
assert len(output) == 4
assert 'http' in output[0]
assert 'jpg' in output[1] or 'N/A' in output[1]
assert type(output[2]) == type(True)
assert type(output[3]) == type(True)
print(output)
def test_fetch_ogle_fchart():
year = 2019
event='OB190011'
output = survey_data_utilities.fetch_ogle_fchart(year, event)
assert len(output) == 2
assert 'http' in output[0]
assert 'jpg' in output[0]
assert type(output[1]) == type(True)
if __name__ == '__main__':
#test_scrape_rtmodel()
#test_scrape_mismap()
#test_scrape_moa()
test_scrape_kmt()
#test_fetch_ogle_fchart() | ytsapras/robonet_site | scripts/tests/test_survey_data_utilities.py | Python | gpl-2.0 | 4,078 | 0.01643 |
##
##
# File auto-generated against equivalent DynamicSerialize Java class
# and then modified post-generation to use AbstractGfeRequest and
# implement str(), repr()
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# xx/xx/?? dgilling Initial Creation.
# 03/13/13 1759 dgilling Add software history header.
# 05/13/15 4427 dgilling Add siteIdOverride field.
#
#
from dynamicserialize.dstypes.com.raytheon.uf.common.dataplugin.gfe.request import AbstractGfeRequest
from dynamicserialize.dstypes.com.raytheon.uf.common.message import WsId
class ExecuteIfpNetCDFGridRequest(AbstractGfeRequest):
def __init__(self, outputFilename=None, parmList=[], databaseID=None,
startTime=None, endTime=None, mask=None, geoInfo=False,
compressFile=False, configFileName=None, compressFileFactor=0,
trim=False, krunch=False, userID=None, logFileName=None, siteIdOverride=None):
super(ExecuteIfpNetCDFGridRequest, self).__init__()
self.outputFilename = outputFilename
self.parmList = parmList
self.databaseID = databaseID
self.startTime = startTime
self.endTime = endTime
self.mask = mask
self.geoInfo = geoInfo
self.compressFile = compressFile
self.configFileName = configFileName
self.compressFileFactor = compressFileFactor
self.trim = trim
self.krunch = krunch
self.userID = userID
self.logFileName = logFileName
self.siteIdOverride = siteIdOverride
if self.userID is not None:
self.workstationID = WsId(progName='ifpnetCDF', userName=self.userID)
if self.databaseID is not None:
self.siteID = self.databaseID.getSiteId()
def __str__(self):
retVal = "ExecuteIfpNetCDFGridRequest["
retVal += "wokstationID: " + str(self.workstationID) + ", "
retVal += "siteID: " + str(self.siteID) + ", "
retVal += "outputFilename: " + str(self.outputFilename) + ", "
retVal += "parmList: " + str(self.parmList) + ", "
retVal += "databaseID: " + str(self.databaseID) + ", "
retVal += "startTime: " + str(self.startTime) + ", "
retVal += "endTime: " + str(self.endTime) + ", "
retVal += "mask: " + str(self.mask) + ", "
retVal += "geoInfo: " + str(self.geoInfo) + ", "
retVal += "compressFile: " + str(self.compressFile) + ", "
retVal += "configFileName: " + str(self.configFileName) + ", "
retVal += "compressFileFactor: " + str(self.compressFileFactor) + ", "
retVal += "trim: " + str(self.trim) + ", "
retVal += "krunch: " + str(self.krunch) + ", "
retVal += "userID: " + str(self.userID) + ", "
retVal += "logFileName: " + str(self.logFileName) + ", "
retVal += "siteIdOverride: " + str(self.siteIdOverride)
retVal += "]"
return retVal
def __repr__(self):
retVal = "ExecuteIfpNetCDFGridRequest("
retVal += "wokstationID=" + repr(self.workstationID) + ", "
retVal += "siteID=" + repr(self.siteID) + ", "
retVal += "outputFilename=" + repr(self.outputFilename) + ", "
retVal += "parmList=" + repr(self.parmList) + ", "
retVal += "databaseID=" + repr(self.databaseID) + ", "
retVal += "startTime=" + repr(self.startTime) + ", "
retVal += "endTime=" + repr(self.endTime) + ", "
retVal += "mask=" + repr(self.mask) + ", "
retVal += "geoInfo=" + repr(self.geoInfo) + ", "
retVal += "compressFile=" + repr(self.compressFile) + ", "
retVal += "configFileName=" + repr(self.configFileName) + ", "
retVal += "compressFileFactor=" + repr(self.compressFileFactor) + ", "
retVal += "trim=" + repr(self.trim) + ", "
retVal += "krunch=" + repr(self.krunch) + ", "
retVal += "userID=" + repr(self.userID) + ", "
retVal += "logFileName=" + repr(self.logFileName) + ", "
retVal += "siteIdOverride: " + str(self.siteIdOverride)
retVal += ")"
return retVal
def getOutputFilename(self):
return self.outputFilename
def setOutputFilename(self, outputFilename):
self.outputFilename = outputFilename
def getParmList(self):
return self.parmList
def setParmList(self, parmList):
self.parmList = parmList
def getDatabaseID(self):
return self.databaseID
def setDatabaseID(self, databaseID):
self.databaseID = databaseID
def getStartTime(self):
return self.startTime
def setStartTime(self, startTime):
self.startTime = startTime
def getEndTime(self):
return self.endTime
def setEndTime(self, endTime):
self.endTime = endTime
def getMask(self):
return self.mask
def setMask(self, mask):
self.mask = mask
def getGeoInfo(self):
return self.geoInfo
def setGeoInfo(self, geoInfo):
self.geoInfo = geoInfo
def getCompressFile(self):
return self.compressFile
def setCompressFile(self, compressFile):
self.compressFile = compressFile
def getConfigFileName(self):
return self.configFileName
def setConfigFileName(self, configFileName):
self.configFileName = configFileName
def getCompressFileFactor(self):
return self.compressFileFactor
def setCompressFileFactor(self, compressFileFactor):
self.compressFileFactor = compressFileFactor
def getTrim(self):
return self.trim
def setTrim(self, trim):
self.trim = trim
def getKrunch(self):
return self.krunch
def setKrunch(self, krunch):
self.krunch = krunch
def getUserID(self):
return self.userID
def setUserID(self, userID):
self.userID = userID
def getLogFileName(self):
return self.logFileName
def setLogFileName(self, logFileName):
self.logFileName = logFileName
def getSiteIdOverride(self):
return self.siteIdOverride
def setSiteIdOverride(self, siteIdOverride):
self.siteIdOverride = siteIdOverride
| mjames-upc/python-awips | dynamicserialize/dstypes/com/raytheon/uf/common/dataplugin/gfe/request/ExecuteIfpNetCDFGridRequest.py | Python | bsd-3-clause | 6,327 | 0.000632 |
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
import weakref
import functools
# Decorator to target specific messages.
def targets(target_messages, no_first=False):
if isinstance(target_messages, str):
target_messages = [target_messages]
def wrapper(f):
@functools.wraps(f)
def _(self, *args, **kwargs):
message = args[0]
if message in target_messages:
if no_first and kwargs["i"] == 0:
return
f(self, *args, **kwargs)
return _
return wrapper
class Observer(object):
__metaclass__ = ABCMeta
@abstractmethod
def update(self, *args, **kwargs):
pass
class Observable(object):
def __init__(self):
self.observers = weakref.WeakSet()
def register(self, observer):
self.observers.add(observer)
def unregister(self, observer):
self.observers.discard(observer)
def unregister_all(self):
self.observers.clear()
def update_observers(self, *args, **kwargs):
for observer in self.observers:
observer.update(*args, **kwargs)
def __getstate__(self):
state = self.__dict__.copy()
# Do not try to pickle observers.
del state["observers"]
return state
| terhorst/psmcpp | smcpp/observe.py | Python | gpl-3.0 | 1,330 | 0 |
import numpy as np
from numpy.testing import *
from numpy.testing.noseclasses import KnownFailureTest
import nose
def test_slow():
@dec.slow
def slow_func(x,y,z):
pass
assert_(slow_func.slow)
def test_setastest():
@dec.setastest()
def f_default(a):
pass
@dec.setastest(True)
def f_istest(a):
pass
@dec.setastest(False)
def f_isnottest(a):
pass
assert_(f_default.__test__)
assert_(f_istest.__test__)
assert_(not f_isnottest.__test__)
class DidntSkipException(Exception):
pass
def test_skip_functions_hardcoded():
@dec.skipif(True)
def f1(x):
raise DidntSkipException
try:
f1('a')
except DidntSkipException:
raise Exception('Failed to skip')
except nose.SkipTest:
pass
@dec.skipif(False)
def f2(x):
raise DidntSkipException
try:
f2('a')
except DidntSkipException:
pass
except nose.SkipTest:
raise Exception('Skipped when not expected to')
def test_skip_functions_callable():
def skip_tester():
return skip_flag == 'skip me!'
@dec.skipif(skip_tester)
def f1(x):
raise DidntSkipException
try:
skip_flag = 'skip me!'
f1('a')
except DidntSkipException:
raise Exception('Failed to skip')
except nose.SkipTest:
pass
@dec.skipif(skip_tester)
def f2(x):
raise DidntSkipException
try:
skip_flag = 'five is right out!'
f2('a')
except DidntSkipException:
pass
except nose.SkipTest:
raise Exception('Skipped when not expected to')
def test_skip_generators_hardcoded():
@dec.knownfailureif(True, "This test is known to fail")
def g1(x):
for i in xrange(x):
yield i
try:
for j in g1(10):
pass
except KnownFailureTest:
pass
else:
raise Exception('Failed to mark as known failure')
@dec.knownfailureif(False, "This test is NOT known to fail")
def g2(x):
for i in xrange(x):
yield i
raise DidntSkipException('FAIL')
try:
for j in g2(10):
pass
except KnownFailureTest:
raise Exception('Marked incorretly as known failure')
except DidntSkipException:
pass
def test_skip_generators_callable():
def skip_tester():
return skip_flag == 'skip me!'
@dec.knownfailureif(skip_tester, "This test is known to fail")
def g1(x):
for i in xrange(x):
yield i
try:
skip_flag = 'skip me!'
for j in g1(10):
pass
except KnownFailureTest:
pass
else:
raise Exception('Failed to mark as known failure')
@dec.knownfailureif(skip_tester, "This test is NOT known to fail")
def g2(x):
for i in xrange(x):
yield i
raise DidntSkipException('FAIL')
try:
skip_flag = 'do not skip'
for j in g2(10):
pass
except KnownFailureTest:
raise Exception('Marked incorretly as known failure')
except DidntSkipException:
pass
def test_deprecated():
@dec.deprecated(True)
def non_deprecated_func():
pass
@dec.deprecated()
def deprecated_func():
import warnings
warnings.warn("TEST: deprecated func", DeprecationWarning)
@dec.deprecated()
def deprecated_func2():
import warnings
warnings.warn("AHHHH")
raise ValueError
@dec.deprecated()
def deprecated_func3():
import warnings
warnings.warn("AHHHH")
# marked as deprecated, but does not raise DeprecationWarning
assert_raises(AssertionError, non_deprecated_func)
# should be silent
deprecated_func()
# fails if deprecated decorator just disables test. See #1453.
assert_raises(ValueError, deprecated_func2)
# first warnings is not a DeprecationWarning
assert_raises(AssertionError, deprecated_func3)
if __name__ == '__main__':
run_module_suite()
| lthurlow/Network-Grapher | proj/external/numpy-1.7.0/numpy/testing/tests/test_decorators.py | Python | mit | 4,070 | 0.001966 |
"""
WSGI config for mng_files project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
import sys
path = os.path.abspath(__file__+'/../..')
if path not in sys.path:
sys.path.append(path)
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mng_files.settings")
application = get_wsgi_application()
| idjung96/mng_files | mng_files/wsgi.py | Python | gpl-3.0 | 498 | 0.004016 |
__author__ = 'bromix'
import xbmcgui
from ..abstract_progress_dialog import AbstractProgressDialog
class XbmcProgressDialog(AbstractProgressDialog):
def __init__(self, heading, text):
AbstractProgressDialog.__init__(self, 100)
self._dialog = xbmcgui.DialogProgress()
self._dialog.create(heading, text)
# simple reset because KODI won't do it :(
self._position = 1
self.update(steps=-1)
def close(self):
if self._dialog:
self._dialog.close()
self._dialog = None
def update(self, steps=1, text=None):
self._position += steps
position = int(float(100.0 / self._total) * self._position)
if isinstance(text, basestring):
self._dialog.update(position, text)
else:
self._dialog.update(position)
def is_aborted(self):
return self._dialog.iscanceled()
| repotvsupertuga/tvsupertuga.repository | plugin.video.youtube/resources/lib/youtube_plugin/kodion/impl/xbmc/xbmc_progress_dialog.py | Python | gpl-2.0 | 911 | 0 |
# -*- coding: utf-8 -*-
"""
test_transaction.py
:copyright: (C) 2014-2015 by Openlabs Technologies & Consulting (P) Limited
:license: BSD, see LICENSE for more details.
"""
import unittest
import datetime
import random
import authorize
from dateutil.relativedelta import relativedelta
from trytond.tests.test_tryton import DB_NAME, USER, CONTEXT, POOL
import trytond.tests.test_tryton
from trytond.transaction import Transaction
from trytond.exceptions import UserError
class TestTransaction(unittest.TestCase):
"""
Test transaction
"""
def setUp(self):
"""
Set up data used in the tests.
"""
trytond.tests.test_tryton.install_module('payment_gateway')
self.Currency = POOL.get('currency.currency')
self.Company = POOL.get('company.company')
self.Party = POOL.get('party.party')
self.User = POOL.get('res.user')
self.Journal = POOL.get('account.journal')
self.PaymentGateway = POOL.get('payment_gateway.gateway')
self.PaymentTransaction = POOL.get('payment_gateway.transaction')
self.AccountMove = POOL.get('account.move')
self.PaymentProfile = POOL.get('party.payment_profile')
self.UseCardView = POOL.get('payment_gateway.transaction.use_card.view')
def _create_fiscal_year(self, date=None, company=None):
"""
Creates a fiscal year and requried sequences
"""
FiscalYear = POOL.get('account.fiscalyear')
Sequence = POOL.get('ir.sequence')
Company = POOL.get('company.company')
if date is None:
date = datetime.date.today()
if company is None:
company, = Company.search([], limit=1)
fiscal_year, = FiscalYear.create([{
'name': '%s' % date.year,
'start_date': date + relativedelta(month=1, day=1),
'end_date': date + relativedelta(month=12, day=31),
'company': company,
'post_move_sequence': Sequence.create([{
'name': '%s' % date.year,
'code': 'account.move',
'company': company,
}])[0],
}])
FiscalYear.create_period([fiscal_year])
return fiscal_year
def _create_coa_minimal(self, company):
"""Create a minimal chart of accounts
"""
AccountTemplate = POOL.get('account.account.template')
Account = POOL.get('account.account')
account_create_chart = POOL.get(
'account.create_chart', type="wizard")
account_template, = AccountTemplate.search(
[('parent', '=', None)]
)
session_id, _, _ = account_create_chart.create()
create_chart = account_create_chart(session_id)
create_chart.account.account_template = account_template
create_chart.account.company = company
create_chart.transition_create_account()
receivable, = Account.search([
('kind', '=', 'receivable'),
('company', '=', company),
])
payable, = Account.search([
('kind', '=', 'payable'),
('company', '=', company),
])
create_chart.properties.company = company
create_chart.properties.account_receivable = receivable
create_chart.properties.account_payable = payable
create_chart.transition_create_properties()
def _get_account_by_kind(self, kind, company=None, silent=True):
"""Returns an account with given spec
:param kind: receivable/payable/expense/revenue
:param silent: dont raise error if account is not found
"""
Account = POOL.get('account.account')
Company = POOL.get('company.company')
if company is None:
company, = Company.search([], limit=1)
accounts = Account.search([
('kind', '=', kind),
('company', '=', company)
], limit=1)
if not accounts and not silent:
raise Exception("Account not found")
if not accounts:
return None
account, = accounts
return account
def setup_defaults(self):
"""
Creates default data for testing
"""
currency, = self.Currency.create([{
'name': 'US Dollar',
'code': 'USD',
'symbol': '$',
}])
with Transaction().set_context(company=None):
company_party, = self.Party.create([{
'name': 'Openlabs'
}])
self.company, = self.Company.create([{
'party': company_party,
'currency': currency,
}])
self.User.write([self.User(USER)], {
'company': self.company,
'main_company': self.company,
})
CONTEXT.update(self.User.get_preferences(context_only=True))
# Create Fiscal Year
self._create_fiscal_year(company=self.company.id)
# Create Chart of Accounts
self._create_coa_minimal(company=self.company.id)
# Create Cash journal
self.cash_journal, = self.Journal.search(
[('type', '=', 'cash')], limit=1
)
self.Journal.write([self.cash_journal], {
'debit_account': self._get_account_by_kind('expense').id
})
self.auth_net_gateway = self.PaymentGateway(
name='Authorize.net',
journal=self.cash_journal,
provider='authorize_net',
method='credit_card',
authorize_net_login='327deWY74422',
authorize_net_transaction_key='32jF65cTxja88ZA2',
test=True
)
self.auth_net_gateway.save()
# Create parties
self.party1, = self.Party.create([{
'name': 'Test party - 1',
'addresses': [('create', [{
'name': 'Test Party %s' % random.randint(1, 999),
'street': 'Test Street %s' % random.randint(1, 999),
'city': 'Test City %s' % random.randint(1, 999),
}])],
'account_receivable': self._get_account_by_kind(
'receivable').id,
}])
self.party2, = self.Party.create([{
'name': 'Test party - 2',
'addresses': [('create', [{
'name': 'Test Party',
'street': 'Test Street',
'city': 'Test City',
}])],
'account_receivable': self._get_account_by_kind(
'receivable').id,
}])
self.party3, = self.Party.create([{
'name': 'Test party - 3',
'addresses': [('create', [{
'name': 'Test Party',
'street': 'Test Street',
'city': 'Test City',
}])],
'account_receivable': self._get_account_by_kind(
'receivable').id,
}])
self.card_data1 = self.UseCardView(
number='4111111111111111',
expiry_month='04',
expiry_year=str(random.randint(2016, 2020)),
csc=str(random.randint(100, 555)),
owner='Test User -1',
)
self.card_data2 = self.UseCardView(
number='4111111111111111',
expiry_month='08',
expiry_year=str(random.randint(2016, 2020)),
csc=str(random.randint(556, 999)),
owner='Test User -2',
)
self.invalid_card_data = self.UseCardView(
number='4111111111111111',
expiry_month='08',
expiry_year='2022',
csc=str(911),
owner='Test User -2',
)
self.payment_profile = self.PaymentProfile(
party=self.party1,
address=self.party1.addresses[0].id,
gateway=self.auth_net_gateway.id,
last_4_digits='1111',
expiry_month='01',
expiry_year='2018',
provider_reference='27527167',
authorize_profile_id='28545177',
)
self.payment_profile.save()
def test_0010_test_add_payment_profile(self):
"""
Test adding payment profile to a Party
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
ProfileWizard = POOL.get(
'party.party.payment_profile.add', type="wizard"
)
profile_wizard = ProfileWizard(
ProfileWizard.create()[0]
)
profile_wizard.card_info.owner = self.party2.name
profile_wizard.card_info.number = self.card_data1['number']
profile_wizard.card_info.expiry_month = self.card_data1[
'expiry_month']
profile_wizard.card_info.expiry_year = self.card_data1[
'expiry_year']
profile_wizard.card_info.csc = self.card_data1['csc']
profile_wizard.card_info.gateway = self.auth_net_gateway
profile_wizard.card_info.provider = self.auth_net_gateway.provider
profile_wizard.card_info.address = self.party2.addresses[0]
profile_wizard.card_info.party = self.party2
with Transaction().set_context(return_profile=True):
profile = profile_wizard.transition_add()
self.assertEqual(profile.party.id, self.party2.id)
self.assertEqual(profile.gateway, self.auth_net_gateway)
self.assertEqual(
profile.last_4_digits, self.card_data1['number'][-4:]
)
self.assertEqual(
profile.expiry_month, self.card_data1['expiry_month']
)
self.assertEqual(
profile.expiry_year, self.card_data1['expiry_year']
)
self.assertIsNotNone(profile.authorize_profile_id)
def test_0020_test_transaction_capture(self):
"""
Test capture transaction
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
with Transaction().set_context({'company': self.company.id}):
# Case I: Payment Profile
transaction1, = self.PaymentTransaction.create([{
'party': self.party1.id,
'address': self.party1.addresses[0].id,
'payment_profile': self.payment_profile.id,
'gateway': self.auth_net_gateway.id,
'amount': random.randint(1, 5),
}])
self.assert_(transaction1)
self.assertEqual(transaction1.state, 'draft')
# Capture transaction
self.PaymentTransaction.capture([transaction1])
self.assertEqual(transaction1.state, 'posted')
# Case II: No Payment Profile
transaction2, = self.PaymentTransaction.create([{
'party': self.party2.id,
'address': self.party2.addresses[0].id,
'gateway': self.auth_net_gateway.id,
'amount': random.randint(6, 10),
}])
self.assert_(transaction2)
self.assertEqual(transaction2.state, 'draft')
# Capture transaction
transaction2.capture_authorize_net(card_info=self.card_data1)
self.assertEqual(transaction2.state, 'posted')
# Case III: Transaction Failure on invalid amount
transaction3, = self.PaymentTransaction.create([{
'party': self.party1.id,
'address': self.party1.addresses[0].id,
'payment_profile': self.payment_profile.id,
'gateway': self.auth_net_gateway.id,
'amount': 0,
}])
self.assert_(transaction3)
self.assertEqual(transaction3.state, 'draft')
# Capture transaction
self.PaymentTransaction.capture([transaction3])
self.assertEqual(transaction3.state, 'failed')
# Case IV: Assert error when new customer is there with
# no payment profile and card info
transaction4, = self.PaymentTransaction.create([{
'party': self.party3.id,
'address': self.party3.addresses[0].id,
'gateway': self.auth_net_gateway.id,
'amount': random.randint(1, 5),
}])
self.assert_(transaction4)
self.assertEqual(transaction4.state, 'draft')
self.assertEqual(
transaction4.get_authorize_net_request_data(),
{'amount': transaction4.amount}
)
# Capture transaction
with self.assertRaises(UserError):
self.PaymentTransaction.capture([transaction4])
def test_0030_test_transaction_auth_only(self):
"""
Test auth_only transaction
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
with Transaction().set_context({'company': self.company.id}):
# Case I: Payment Profile
transaction1, = self.PaymentTransaction.create([{
'party': self.party1.id,
'address': self.party1.addresses[0].id,
'payment_profile': self.payment_profile.id,
'gateway': self.auth_net_gateway.id,
'amount': random.randint(6, 10),
}])
self.assert_(transaction1)
self.assertEqual(transaction1.state, 'draft')
# Authorize transaction
self.PaymentTransaction.authorize([transaction1])
self.assertEqual(transaction1.state, 'authorized')
# Case II: No Payment Profile
transaction2, = self.PaymentTransaction.create([{
'party': self.party2.id,
'address': self.party2.addresses[0].id,
'gateway': self.auth_net_gateway.id,
'amount': random.randint(1, 5),
}])
self.assert_(transaction2)
self.assertEqual(transaction2.state, 'draft')
# Authorize transaction
transaction2.authorize_authorize_net(card_info=self.card_data1)
self.assertEqual(transaction2.state, 'authorized')
# Case III: Transaction Failure on invalid amount
transaction3, = self.PaymentTransaction.create([{
'party': self.party1.id,
'address': self.party1.addresses[0].id,
'payment_profile': self.payment_profile.id,
'gateway': self.auth_net_gateway.id,
'amount': 0,
}])
self.assert_(transaction3)
self.assertEqual(transaction3.state, 'draft')
# Authorize transaction
self.PaymentTransaction.authorize([transaction3])
self.assertEqual(transaction3.state, 'failed')
# Case IV: Assert error when new customer is there with
# no payment profile and card info
transaction3, = self.PaymentTransaction.create([{
'party': self.party3.id,
'address': self.party3.addresses[0].id,
'gateway': self.auth_net_gateway.id,
'amount': random.randint(1, 5),
}])
self.assert_(transaction3)
self.assertEqual(transaction3.state, 'draft')
# Authorize transaction
with self.assertRaises(UserError):
self.PaymentTransaction.authorize([transaction3])
def test_0040_test_transaction_auth_and_settle(self):
"""
Test auth_and_settle transaction
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
with Transaction().set_context({'company': self.company.id}):
# Case I: Same or less amount than authorized amount
transaction1, = self.PaymentTransaction.create([{
'party': self.party3.id,
'address': self.party3.addresses[0].id,
'gateway': self.auth_net_gateway.id,
'amount': random.randint(6, 10),
}])
self.assert_(transaction1)
self.assertEqual(transaction1.state, 'draft')
# Authorize transaction
transaction1.authorize_authorize_net(card_info=self.card_data1)
self.assertEqual(transaction1.state, 'authorized')
# Assert if transaction succeeds
self.PaymentTransaction.settle([transaction1])
self.assertEqual(transaction1.state, 'posted')
# Case II: More amount than authorized amount
transaction2, = self.PaymentTransaction.create([{
'party': self.party3.id,
'address': self.party3.addresses[0].id,
'gateway': self.auth_net_gateway.id,
'amount': random.randint(1, 5),
}])
self.assert_(transaction2)
self.assertEqual(transaction2.state, 'draft')
# Authorize transaction
transaction2.authorize_authorize_net(card_info=self.card_data2)
self.assertEqual(transaction2.state, 'authorized')
# Assert if transaction fails.
self.PaymentTransaction.write([transaction2], {
'amount': 6,
})
self.PaymentTransaction.settle([transaction2])
self.assertEqual(transaction2.state, 'failed')
def test_0050_test_transaction_auth_and_cancel(self):
"""
Test auth_and_void transaction
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
with Transaction().set_context({'company': self.company.id}):
transaction1, = self.PaymentTransaction.create([{
'party': self.party2.id,
'address': self.party2.addresses[0].id,
'gateway': self.auth_net_gateway.id,
'amount': random.randint(6, 10),
'state': 'in-progress',
}])
self.assert_(transaction1)
self.assertEqual(transaction1.state, 'in-progress')
# Assert User error if cancel request is sent
# in state other than authorized
with self.assertRaises(UserError):
self.PaymentTransaction.cancel([transaction1])
transaction1.state = 'draft'
transaction1.save()
# Authorize transaction
transaction1.authorize_authorize_net(card_info=self.card_data1)
self.assertEqual(transaction1.state, 'authorized')
# Settle transaction
self.PaymentTransaction.cancel([transaction1])
self.assertEqual(transaction1.state, 'cancel')
def test_0060_test_duplicate_payment_profile(self):
"""
Test that workflow is not effected if duplicate payment profile
is there on authorize.net
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
customer = authorize.Customer.create()
authorize.CreditCard.create(customer.customer_id, {
'card_number': '4111111111111111',
'card_code': '523',
'expiration_date': '05/2023',
'billing': self.party2.addresses[0].get_authorize_address(
'Test User'
),
})
# Create a payment profile with some random payment id
payment_profile = self.PaymentProfile(
party=self.party2,
address=self.party2.addresses[0].id,
gateway=self.auth_net_gateway.id,
last_4_digits='1111',
expiry_month='05',
expiry_year='2023',
provider_reference='67382920',
authorize_profile_id=customer.customer_id,
)
payment_profile.save()
# Create payment profile with same details as above
ProfileWizard = POOL.get(
'party.party.payment_profile.add', type="wizard"
)
profile_wizard = ProfileWizard(
ProfileWizard.create()[0]
)
profile_wizard.card_info.owner = 'Test User'
profile_wizard.card_info.number = '4111111111111111'
profile_wizard.card_info.expiry_month = '05'
profile_wizard.card_info.expiry_year = '2023'
profile_wizard.card_info.csc = '523'
profile_wizard.card_info.gateway = self.auth_net_gateway
profile_wizard.card_info.provider = self.auth_net_gateway.provider
profile_wizard.card_info.address = self.party2.addresses[0]
profile_wizard.card_info.party = self.party2
with Transaction().set_context(return_profile=True):
profile = profile_wizard.transition_add()
self.assertEqual(profile.party.id, self.party2.id)
self.assertEqual(profile.gateway, self.auth_net_gateway)
self.assertEqual(
profile.last_4_digits, '1111'
)
self.assertEqual(
profile.expiry_month, '05'
)
self.assertEqual(
profile.expiry_year, '2023'
)
self.assertIsNotNone(profile.authorize_profile_id)
self.assertEqual(
profile.authorize_profile_id,
payment_profile.authorize_profile_id
)
def test_0070_test_duplicate_shipping_address(self):
"""
Test that workflow is not effected if duplicate shipping address
is sent.
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
customer = authorize.Customer.create()
authorize.Address.create(
customer.customer_id,
self.party1.addresses[0].get_authorize_address()
)
# Try creating shipping address with same address
new_address_id = self.party2.addresses[0].send_to_authorize(
customer.customer_id
)
self.assert_(new_address_id)
def suite():
"Define suite"
test_suite = trytond.tests.test_tryton.suite()
test_suite.addTests(
unittest.TestLoader().loadTestsFromTestCase(TestTransaction)
)
return test_suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| openlabs/payment-gateway-authorize-net | tests/test_transaction.py | Python | bsd-3-clause | 23,226 | 0.000043 |
"""
CursorWrapper (like django.db.backends.utils)
"""
import decimal
import logging
import warnings
from itertools import islice
from typing import Any, Callable, Iterable, Iterator, List, Tuple, TypeVar, Union, overload
from django.db import models, NotSupportedError
from django.db.models.sql import subqueries, Query, RawQuery
from salesforce.backend import DJANGO_30_PLUS
from salesforce.dbapi.driver import (
DatabaseError, InternalError, SalesforceWarning, merge_dict,
register_conversion, arg_to_json)
from salesforce.fields import NOT_UPDATEABLE, NOT_CREATEABLE
V = TypeVar('V')
if not DJANGO_30_PLUS:
# a "do nothing" stub for Django < 3.0, where is no decorator @async_unsafe
F = TypeVar('F', bound=Callable)
F2 = TypeVar('F2', bound=Callable)
@overload
def async_unsafe(message: F) -> F:
...
@overload
def async_unsafe(message: str) -> Callable[[F2], F2]:
...
def async_unsafe(message: Union[F, str]) -> Union[F, Callable[[F2], F2]]:
def decorator(func: F2) -> F2:
return func
# If the message is actually a function, then be a no-arguments decorator.
if callable(message):
func = message
message = 'You cannot call this from an async context - use a thread or sync_to_async.'
return decorator(func)
return decorator
else:
from django.utils.asyncio import ( # type: ignore[import,no-redef] # noqa pylint:disable=unused-import,ungrouped-imports
async_unsafe
)
log = logging.getLogger(__name__)
DJANGO_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S.%f-00:00'
MIGRATIONS_QUERY_TO_BE_IGNORED = "SELECT django_migrations.app, django_migrations.name FROM django_migrations"
def extract_values(query):
"""
Extract values from insert or update query.
Supports bulk_create
"""
if isinstance(query, subqueries.UpdateQuery):
row = query.values
return extract_values_inner(row, query)
if isinstance(query, subqueries.InsertQuery):
ret = []
for row in query.objs:
ret.append(extract_values_inner(row, query))
return ret
raise NotSupportedError
def extract_values_inner(row, query):
d = dict()
fields = query.model._meta.fields
for _, field in enumerate(fields):
sf_read_only = getattr(field, 'sf_read_only', 0)
is_date_auto = getattr(field, 'auto_now', False) or getattr(field, 'auto_now_add', False)
if field.get_internal_type() == 'AutoField':
continue
if isinstance(query, subqueries.UpdateQuery):
if (sf_read_only & NOT_UPDATEABLE) != 0 or is_date_auto:
continue
value_or_empty = [value for qfield, model, value in query.values if qfield.name == field.name]
if value_or_empty:
[value] = value_or_empty
else:
assert len(query.values) < len(fields), \
"Match name can miss only with an 'update_fields' argument."
continue
if hasattr(value, 'default'):
warnings.warn(
"The field '{}.{}' has been saved again with DEFAULTED_ON_CREATE value. "
"It is better to set a real value to it or to refresh it from the database "
"or restrict updated fields explicitly by 'update_fields='."
.format(field.model._meta.object_name, field.name),
SalesforceWarning
)
continue
elif isinstance(query, subqueries.InsertQuery):
value = getattr(row, field.attname)
if (sf_read_only & NOT_CREATEABLE) != 0 or hasattr(value, 'default'):
continue # skip not createable or DEFAULTED_ON_CREATE
else:
raise InternalError('invalid query type')
d[field.column] = arg_to_json(value)
return d
class CursorWrapper:
"""
A wrapper that emulates the behavior of a database cursor.
This is the class that is actually responsible for making connections
to the SF REST API
"""
# pylint:disable=too-many-instance-attributes,too-many-public-methods
def __init__(self, db):
"""
Connect to the Salesforce API.
"""
self.db = db
self.query = None
self.session = db.sf_session # this creates a TCP connection if doesn't exist
self.rowcount = None
self.first_row = None
self.lastrowid = None # not moved to driver because INSERT is implemented here
self.cursor = self.db.connection.cursor()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@property
def oauth(self):
return self.session.auth.get_auth()
def execute(self, q, args=()):
"""
Send a query to the Salesforce API.
"""
# pylint:disable=too-many-branches
self.rowcount = None
response = None
if self.query is None:
self.execute_select(q, args)
else:
response = self.execute_django(q, args)
if isinstance(response, list):
return
# the encoding is detected automatically, e.g. from headers
if response and response.text:
# parse_float set to decimal.Decimal to avoid precision errors when
# converting from the json number to a float and then to a Decimal object
# on a model's DecimalField. This converts from json number directly
# to a Decimal object
data = response.json(parse_float=decimal.Decimal)
# a SELECT query
if 'totalSize' in data:
# SELECT
self.rowcount = data['totalSize']
# a successful INSERT query, return after getting PK
elif 'success' in data and 'id' in data:
self.lastrowid = data['id']
return
elif 'compositeResponse' in data:
# TODO treat error reporting for composite requests
self.lastrowid = [x['body']['id'] if x['body'] is not None else x['referenceId']
for x in data['compositeResponse']]
return
elif data['hasErrors'] is False:
# it is from Composite Batch request
# save id from bulk_create even if Django don't use it
if data['results'] and data['results'][0]['result']:
self.lastrowid = [item['result']['id'] for item in data['results']]
return
# something we don't recognize
else:
raise DatabaseError(data)
if not q.upper().startswith('SELECT COUNT() FROM'):
self.first_row = data['records'][0] if data['records'] else None
def prepare_query(self, query):
self.query = query
def execute_django(self, soql: str, args: Tuple[Any, ...] = ()):
"""
Fixed execute for queries coming from Django query compilers
"""
response = None
sqltype = soql.split(None, 1)[0].upper()
if isinstance(self.query, subqueries.InsertQuery):
response = self.execute_insert(self.query)
elif isinstance(self.query, subqueries.UpdateQuery):
response = self.execute_update(self.query)
elif isinstance(self.query, subqueries.DeleteQuery):
response = self.execute_delete(self.query)
elif isinstance(self.query, RawQuery):
self.execute_select(soql, args)
elif sqltype in ('SAVEPOINT', 'ROLLBACK', 'RELEASE'):
log.info("Ignored SQL command '%s'", sqltype)
return
elif isinstance(self.query, Query):
self.execute_select(soql, args)
else:
raise DatabaseError("Unsupported query: type %s: %s" % (type(self.query), self.query))
return response
def execute_select(self, soql: str, args) -> None:
if soql != MIGRATIONS_QUERY_TO_BE_IGNORED:
# normal query
query_all = self.query and self.query.sf_params.query_all
tooling_api = self.query and self.query.model._meta.sf_tooling_api_model
self.cursor.execute(soql, args, query_all=query_all, tooling_api=tooling_api)
else:
# Nothing queried about django_migrations to SFDC and immediately responded that
# nothing about migration status is recorded in SFDC.
#
# That is required by "makemigrations" to accept this query.
# Empty results are possible.
# (It could be eventually replaced by: "SELECT app__c, Name FROM django_migrations__c")
self.cursor._iter = iter([]) # pylint:disable=protected-access
self.cursor.rowcount = 0
self.rowcount = self.cursor.rowcount
def execute_insert(self, query):
table = query.model._meta.db_table
if table == 'django_migrations':
return
post_data = extract_values(query)
obj_url = self.db.connection.rest_api_url('sobjects', table, relative=True)
if len(post_data) == 1:
# single object
post_data = post_data[0]
return self.handle_api_exceptions('POST', obj_url, json=post_data)
if self.db.connection.composite_type == 'sobject-collections':
# SObject Collections
records = [merge_dict(x, type_=table) for x in post_data]
all_or_none = query.sf_params.all_or_none
ret = self.db.connection.sobject_collections_request('POST', records, all_or_none=all_or_none)
self.lastrowid = ret
self.rowcount = len(ret)
return
# composite by REST
composite_data = [{'method': 'POST', 'url': obj_url, 'referenceId': str(i), 'body': row}
for i, row in enumerate(post_data)]
ret = self.db.connection.composite_request(composite_data)
return ret
def get_pks_from_query(self, query):
"""Prepare primary keys for update and delete queries"""
where = query.where
sql = None
if where.connector == 'AND' and not where.negated and len(where.children) == 1:
# simple cases are optimized, especially because a suboptimal
# nested query based on the same table is not allowed by SF
child = where.children[0]
if (hasattr(child, 'lookup_name') and child.lookup_name in ('exact', 'in')
and child.lhs.target.column == 'Id'
and not child.bilateral_transforms and child.lhs.target.model is self.query.model):
pks = child.rhs
if child.lookup_name == 'exact':
assert isinstance(pks, str)
return [pks]
# lookup_name 'in'
assert not child.bilateral_transforms
if isinstance(pks, (tuple, list)):
return pks
# 'sf_params' are also in 'pks' only in Django >= 2.0, therefore check query.sf_params
assert (isinstance(pks, Query) and type(pks).__name__ == 'SalesforceQuery' or
query.sf_params.edge_updates), (
"Too complicated queryset.update(). Rewrite it by two querysets. "
"See docs wiki/error-messages")
# # alternative solution:
# return list(salesforce.backend.query.SalesforceQuerySet(pk.model, query=pk, using=pk._db))
sql, params = pks.get_compiler('salesforce').as_sql()
if not sql:
# a subquery is necessary in this case
where_sql, params = where.as_sql(query.get_compiler('salesforce'), self.db.connection)
sql = "SELECT Id FROM {}".format(query.model._meta.db_table)
if where_sql:
sql += " WHERE {}".format(where_sql)
with self.db.cursor() as cur:
cur.execute(sql, params)
assert len(cur.description) == 1 and cur.description[0][0] == 'Id'
return [x[0] for x in cur]
def execute_tooling_update(self, query):
table = query.model._meta.db_table
post_data = extract_values(query)
pks = self.get_pks_from_query(query)
assert len(pks) == 1
pk = pks[0]
value_map = {qfield.db_column: value for qfield, _, value in query.values}
if 'Metadata' in value_map and 'FullName' in value_map and 'DurableId' in value_map:
ret = self.db.connection.handle_api_exceptions(
'PATCH',
'tooling/sobjects', table, value_map['DurableId'],
json={'Metadata': value_map['Metadata'], 'FullName': value_map['FullName']}
)
elif pk == '000000000000000AAA':
pks = value_map['DurableId']
post_data = dict(**{"attributes": {"type": query.model._meta.db_table}}, **post_data)
obj_url = self.db.connection.rest_api_url('tooling/sobjects', table, value_map['DurableId'], relative=True)
ret = self.db.connection.handle_api_exceptions('PATCH', obj_url, json=value_map)
else:
obj_url = self.db.connection.rest_api_url('tooling/sobjects', table, pk, relative=True)
ret = self.db.connection.handle_api_exceptions('PATCH', obj_url, json=post_data)
assert ret.status_code == 204
self.rowcount = 1
def execute_update(self, query):
if query.model._meta.sf_tooling_api_model:
return self.execute_tooling_update(query)
table = query.model._meta.db_table
post_data = extract_values(query)
pks = self.get_pks_from_query(query)
log.debug('UPDATE %s(%s)%r', table, pks, post_data)
if not pks:
return
obj_url = self.db.connection.rest_api_url('sobjects', table, '', relative=True)
if len(pks) == 1:
# single request
ret = self.handle_api_exceptions('PATCH', obj_url + pks[0], json=post_data)
self.rowcount = 1
return ret
if self.db.connection.composite_type == 'sobject-collections':
# SObject Collections
records = [merge_dict(post_data, id=pk, type_=table) for pk in pks]
all_or_none = query.sf_params.all_or_none
ret = self.db.connection.sobject_collections_request('PATCH', records, all_or_none=all_or_none)
self.lastrowid = ret
self.rowcount = len(ret)
return
# composite by REST
composite_data = [{'method': 'PATCH', 'url': obj_url + pk, 'referenceId': pk, 'body': post_data}
for pk in pks]
ret = self.db.connection.composite_request(composite_data)
self.rowcount = len([x for x in ret.json()['compositeResponse'] if x['httpStatusCode'] == 204])
return ret
def execute_delete(self, query):
table = query.model._meta.db_table
pks = self.get_pks_from_query(query)
log.debug('DELETE %s(%s)', table, pks)
if not pks:
self.rowcount = 0
return
if len(pks) == 1:
ret = self.handle_api_exceptions('DELETE', 'sobjects', table, pks[0])
self.rowcount = 1 if (ret and ret.status_code == 204) else 0
return ret
if self.db.connection.composite_type == 'sobject-collections':
# SObject Collections
records = pks
all_or_none = None # sf_params not supported by DeleteQuery
ret = self.db.connection.sobject_collections_request('DELETE', records, all_or_none=all_or_none)
self.lastrowid = ret
self.rowcount = len(ret)
return
# bulk by REST
url = self.db.connection.rest_api_url('sobjects', table, '', relative=True)
composite_data = [{'method': 'DELETE', 'url': url + pk, 'referenceId': pk}
for pk in pks]
ret = self.db.connection.composite_request(composite_data)
self.rowcount = len([x for x in ret.json()['compositeResponse'] if x['httpStatusCode'] == 204])
def __iter__(self):
return self.cursor
def fetchone(self):
return self.cursor.fetchone()
def fetchmany(self, size=None):
return self.cursor.fetchmany(size=size)
def fetchall(self):
return self.cursor.fetchall()
@property
def description(self):
return self.cursor.description
def close(self):
self.cursor.close()
def commit(self):
self.cursor.commit()
def rollback(self):
self.cursor.rollback()
def handle_api_exceptions(self, method, *url_parts, **kwargs):
return self.cursor.handle_api_exceptions(method, *url_parts, **kwargs)
def chunked(iterable: Iterable[V], n: int) -> Iterator[List[V]]:
"""
Break an iterable into lists of a given length::
>>> assert list(chunked([1, 2, 3, 4, 5], 3)) == [[1, 2, 3], [4,5]]
"""
iterable = iter(iterable)
while True:
chunk = list(islice(iterable, n))
if not chunk:
return
yield chunk
def sobj_id(obj):
assert obj._salesforce_object # pylint:disable=protected-access
return obj.pk
# this JSON conversion is important for QuerySet.update(foreign_key_field=some_object)
register_conversion(models.Model, json_conv=sobj_id, subclass=True)
| django-salesforce/django-salesforce | salesforce/backend/utils.py | Python | mit | 17,530 | 0.003023 |
"""
==============================================
Discrete Fourier transforms (:mod:`scipy.fft`)
==============================================
.. currentmodule:: scipy.fft
Fast Fourier Transforms (FFTs)
==============================
.. autosummary::
:toctree: generated/
fft - Fast (discrete) Fourier Transform (FFT)
ifft - Inverse FFT
fft2 - Two dimensional FFT
ifft2 - Two dimensional inverse FFT
fftn - n-dimensional FFT
ifftn - n-dimensional inverse FFT
rfft - FFT of strictly real-valued sequence
irfft - Inverse of rfft
rfft2 - Two dimensional FFT of real sequence
irfft2 - Inverse of rfft2
rfftn - n-dimensional FFT of real sequence
irfftn - Inverse of rfftn
hfft - FFT of a Hermitian sequence (real spectrum)
ihfft - Inverse of hfft
hfft2 - Two dimensional FFT of a Hermitian sequence
ihfft2 - Inverse of hfft2
hfftn - n-dimensional FFT of a Hermitian sequence
ihfftn - Inverse of hfftn
Discrete Sin and Cosine Transforms (DST and DCT)
================================================
.. autosummary::
:toctree: generated/
dct - Discrete cosine transform
idct - Inverse discrete cosine transform
dctn - n-dimensional Discrete cosine transform
idctn - n-dimensional Inverse discrete cosine transform
dst - Discrete sine transform
idst - Inverse discrete sine transform
dstn - n-dimensional Discrete sine transform
idstn - n-dimensional Inverse discrete sine transform
Helper functions
================
.. autosummary::
:toctree: generated/
fftshift - Shift the zero-frequency component to the center of the spectrum
ifftshift - The inverse of `fftshift`
fftfreq - Return the Discrete Fourier Transform sample frequencies
rfftfreq - DFT sample frequencies (for usage with rfft, irfft)
next_fast_len - Find the optimal length to zero-pad an FFT for speed
set_workers - Context manager to set default number of workers
get_workers - Get the current default number of workers
Backend control
===============
.. autosummary::
:toctree: generated/
set_backend - Context manager to set the backend within a fixed scope
skip_backend - Context manager to skip a backend within a fixed scope
set_global_backend - Sets the global fft backend
register_backend - Register a backend for permanent use
"""
from __future__ import division, print_function, absolute_import
from ._basic import (
fft, ifft, fft2, ifft2, fftn, ifftn,
rfft, irfft, rfft2, irfft2, rfftn, irfftn,
hfft, ihfft, hfft2, ihfft2, hfftn, ihfftn)
from ._realtransforms import dct, idct, dst, idst, dctn, idctn, dstn, idstn
from ._helper import next_fast_len
from ._backend import (set_backend, skip_backend, set_global_backend,
register_backend)
from numpy.fft import fftfreq, rfftfreq, fftshift, ifftshift
from ._pocketfft.helper import set_workers, get_workers
__all__ = [
'fft', 'ifft', 'fft2','ifft2', 'fftn', 'ifftn',
'rfft', 'irfft', 'rfft2', 'irfft2', 'rfftn', 'irfftn',
'hfft', 'ihfft', 'hfft2', 'ihfft2', 'hfftn', 'ihfftn',
'fftfreq', 'rfftfreq', 'fftshift', 'ifftshift',
'next_fast_len',
'dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn',
'set_backend', 'skip_backend', 'set_global_backend', 'register_backend',
'get_workers', 'set_workers']
from numpy.dual import register_func
for k in ['fft', 'ifft', 'fftn', 'ifftn', 'fft2', 'ifft2']:
register_func(k, eval(k))
del k, register_func
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
# Hack to allow numpy.fft.fft to be called as scipy.fft
import sys
class _FFTModule(sys.modules[__name__].__class__):
@staticmethod
def __call__(*args, **kwargs):
import numpy as np
return np.fft.fft(*args, **kwargs)
import os
if os.environ.get('_SCIPY_BUILDING_DOC') != 'True':
sys.modules[__name__].__class__ = _FFTModule
del os
del _FFTModule
del sys
| jor-/scipy | scipy/fft/__init__.py | Python | bsd-3-clause | 3,965 | 0.001261 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 4 juin 2013
@author: Aristote Diasonama
'''
from shop.handlers.event.base import BaseHandler
from shop.handlers.base_handler import asso_required
from shop.models.event import Event
from shop.shop_exceptions import EventNotFoundException
class ShowEventHandler(BaseHandler):
@asso_required
def get(self):
try:
self.try_to_show_the_event()
except EventNotFoundException:
self.show_all_events()
def try_to_show_the_event(self):
if not self.event:
raise EventNotFoundException
context = self.get_template_context_showing_one_event()
self.render_template('view_event.html', context=context)
def show_all_events(self):
context = self.get_template_context_showing_all_events()
self.render_template('view_event.html', context=context)
def get_template_context_showing_one_event(self):
context = dict()
context['event'] = self.event.get_event_in_dict_extended()
context['event']['image'] = self.uri_for('imageEvent', event_id = self.event_key.id())
context['isShowingAllEvents'] = False
context['sidebar_active'] = "overview"
context['url_for_editEvent'] = self.uri_for('editEvent', event_id = self.event_key.id())
context['url_to_publish_event'] = self.uri_for('rpc_publishEvent', event_id = self.event_key.id())
context['url_to_delete_event'] = self.uri_for('rpc_deleteEvent', event_id = self.event_key.id())
if self.event.type == 'paid':
context.update(self.get_template_context_paid_event())
return context
def get_template_context_paid_event(self):
context = dict()
tickets = self.event.get_all_tickets()
if tickets is not None:
tickets_urls = map(lambda ticket: self.uri_for('editTicket',
event_id=self.event_key.id(), ticket_id=ticket.key.id()),
tickets.fetch())
context['tickets'] = zip(tickets, tickets_urls) if tickets else None
context['url_for_createTicket'] = self.uri_for('createTicket', event_id = self.event_key.id())
context['url_for_rpc_create_ticket'] = self.uri_for('rpc_createTicket', event_id=self.event_key.id())
def get_template_context_showing_all_events(self):
events = self.user.get_all_events()
context = dict()
context['events'] = events
context['showingAllEvents'] = True
context['sidebar_active'] = "allEvent"
return context | EventBuck/EventBuck | shop/handlers/event/show.py | Python | mit | 2,800 | 0.018571 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import argparse
from spack.cmd.common import print_module_placeholder_help
description = "add package to environment using dotkit"
section = "environment"
level = "long"
def setup_parser(subparser):
"""Parser is only constructed so that this prints a nice help
message with -h. """
subparser.add_argument(
'spec', nargs=argparse.REMAINDER,
help='spec of package to use with dotkit')
def use(parser, args):
print_module_placeholder_help()
| skosukhin/spack | lib/spack/spack/cmd/use.py | Python | lgpl-2.1 | 1,713 | 0 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Settings'
db.create_table('operations_settings', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('value', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal('operations', ['Settings'])
def backwards(self, orm):
# Deleting model 'Settings'
db.delete_table('operations_settings')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'maps.map': {
'Meta': {'object_name': 'Map'},
'center_x': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'center_y': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '800', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'projection': ('django.db.models.fields.CharField', [], {'default': "'EPSG:4326'", 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75'}),
'zoom': ('django.db.models.fields.IntegerField', [], {})
},
'operations.agency': {
'Meta': {'ordering': "['name']", 'object_name': 'Agency'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'operations.deployment': {
'Meta': {'object_name': 'Deployment'},
'closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deployers': ('django.db.models.fields.related.ManyToManyField', [], {'max_length': '250', 'to': "orm['auth.User']", 'null': 'True', 'symmetrical': 'False', 'blank': 'True'}),
'deployment_location': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'description': ('tinymce.models.HTMLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['operations.Event']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {}),
'longitude': ('django.db.models.fields.FloatField', [], {}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'max_length': '1'})
},
'operations.event': {
'Meta': {'ordering': "['-last_updated']", 'object_name': 'Event'},
'agencies': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['operations.Agency']", 'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'collaboration_link': ('django.db.models.fields.URLField', [], {'default': "'https://connect.dco.dod.mil/r3ops?launcher=false'", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('tinymce.models.HTMLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'event_location': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'filedropoff_path': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'gq_job_ids': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'gq_project_ids': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {}),
'map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['maps.Map']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'poc': ('tinymce.models.HTMLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'posture': ('django.db.models.fields.CharField', [], {'default': "'Monitoring'", 'max_length': '25'}),
'product_feed_url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'rfi_generator_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'services': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['operations.Service']", 'null': 'True', 'blank': 'True'}),
'show_deployments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_event_on_map': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_geomedia_triage': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_notes': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_products': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_related_files': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_rfis': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_services': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_supporting_agencies': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_timeline': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '250'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'max_length': '1'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'})
},
'operations.lessonlearned': {
'Meta': {'ordering': "['-created']", 'unique_together': "(('submitted_by', 'description', 'event'),)", 'object_name': 'LessonLearned'},
'action': ('tinymce.models.HTMLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'assigned_to': ('django.db.models.fields.related.ForeignKey', [], {'max_length': '250', 'related_name': "'lesson_learned_assignment'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'max_length': '50', 'to': "orm['operations.LessonLearnedCategory']", 'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('tinymce.models.HTMLField', [], {'max_length': '1000', 'null': 'True'}),
'due': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['operations.Event']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'priority': ('django.db.models.fields.CharField', [], {'default': "'Low'", 'max_length': '25', 'null': 'True', 'blank': 'True'}),
'resolution': ('tinymce.models.HTMLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'max_length': '1', 'null': 'True', 'blank': 'True'}),
'submitted_by': ('django.db.models.fields.related.ForeignKey', [], {'max_length': '250', 'related_name': "'lesson_learned_submission'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'work_around': ('tinymce.models.HTMLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'})
},
'operations.lessonlearnedcategory': {
'Meta': {'ordering': "['name']", 'object_name': 'LessonLearnedCategory'},
'description': ('tinymce.models.HTMLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'operations.service': {
'Meta': {'ordering': "['name']", 'object_name': 'Service'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('tinymce.models.HTMLField', [], {'max_length': '800', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'service_type': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['operations.ServiceType']", 'symmetrical': 'False'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'max_length': '1'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'operations.servicetype': {
'Meta': {'ordering': "['name']", 'object_name': 'ServiceType'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('tinymce.models.HTMLField', [], {'max_length': '800', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'max_length': '1'})
},
'operations.settings': {
'Meta': {'object_name': 'Settings'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'operations.sitrep': {
'Meta': {'ordering': "['-created']", 'object_name': 'SitRep'},
'closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'content': ('tinymce.models.HTMLField', [], {'max_length': '6000'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['operations.Event']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'max_length': '250', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'max_length': '1', 'null': 'True', 'blank': 'True'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['operations'] | ngageoint/geoevents | geoevents/operations/migrations/0012_auto__add_settings.py | Python | mit | 17,700 | 0.00791 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os
my_site = os.path.join(os.environ["HOME"], ".local/lib/python2.7/site-packages")
sys.path.insert(0, my_site)
import h5py
import networkx as nx
import numpy as np
import pycuda.driver as cuda
import scipy.stats as st
import sys
import aux
from consts import *
def to_graph(connections):
graph = nx.DiGraph()
ca_size = connections.shape[0]
for cell in xrange(ca_size):
for neighbor in connections[cell]:
graph.add_edge(neighbor, cell)
# Count the number of rewired connection this cell has
graph.node[cell]['rew'] = (connections[cell] !=
(np.arange(cell - 3, cell + 4) % ca_size)).sum()
return graph
class AnalysisIndividual:
__cuda_module = False
def __init__(self, individual, correct, executions, ca_size,
connection_radius, ca_iterations, ca_repeat, k_history,
save_executions=0):
self.__ca_size = ca_size
self.__connection_radius = connection_radius
self.__n_connections = 2 * self.__connection_radius + 1
self.__ca_iterations = ca_iterations
self.__ca_repeat = ca_repeat
self.__k_history = k_history
self.__n_possible_history = 2 ** self.__k_history
self.__n_observations = self.__ca_repeat * \
(self.__ca_iterations - self.__k_history + 1)
self.__save_executions = save_executions
self.__individual = individual
self.__individual_number = self.__individual.number
self.__rules = self.__individual.gene_rules
self.__connections = self.__individual.connections
self.__graph = to_graph(self.__connections)
self.__executions = executions
density = np.mean(self.__executions[:, 0], axis=1)
self.__majority = np.round(density).astype(np.uint32)
# The closer the density is to .5 the harder the configuration is to
# decide
self.__difficult = 1 - np.abs(density - .5) / .5
# Checking which repetitions converged to a single state
self.__converged = np.all(self.__executions[:, -1] ==
self.__executions[:, -1, 0].reshape(-1, 1),
axis=1)
# Checking how many cells in each repetition converged to the right
# state
self.__cells_correct = np.mean(self.__executions[:, -1] ==
self.__majority.reshape(-1, 1), axis=1)
self.__correct = correct
self.__fitness = np.mean(self.__correct)
self.__gini = None
self.__limits = None
self.__entropy_rate = None
self.__base_table = None
self.__correlations = None
# Initialize the CUDA module
if not AnalysisIndividual.__cuda_module:
AnalysisIndividual.__cuda_module = True
cuda_module = aux.CudaModule('analysis.cu',
(self.__ca_size, self.__ca_iterations,
self.__ca_repeat,
self.__connection_radius,
self.__n_connections,
self.__n_observations,
self.__k_history,
self.__n_possible_history))
AnalysisIndividual.__kernel_calc_diffs = \
cuda_module.get_function("kernel_calc_diffs")
AnalysisIndividual.__kernel_probabilities = \
cuda_module.get_function("kernel_probabilities")
AnalysisIndividual.__kernel_active_storage = \
cuda_module.get_function("kernel_active_storage")
AnalysisIndividual.__kernel_entropy_rate = \
cuda_module.get_function("kernel_entropy_rate")
def __calculate_gini(self, values):
# Calculate the Gini coefficient to measure the inequality in a
# distribution of values
cum_values = np.sort(values).cumsum()
return 1 - (cum_values[0] + (cum_values[1:] + cum_values[:-1]).sum()) \
/ float(cum_values[-1] * cum_values.size)
def __get_limits(self):
# This function implements a heuristic to calculate how many times a
# cell has the role of "limit" of a diffusion in a simulation.
# The main idea here is that, usually, information in cellular automata
# flows in a given direction at a constant speed. If we know this
# direction and speed, we can check how many times a cell interrupts a
# flow.
sum_diffs = np.zeros(self.__ca_size, dtype=np.uint32)
try:
self.__kernel_calc_diffs(cuda.In(self.__majority),
cuda.In(self.__executions),
cuda.InOut(sum_diffs),
block=(self.__ca_size, 1, 1), grid=(1,))
cuda.Context.synchronize()
except cuda.Error as e:
sys.exit("CUDA: Execution failed ('%s')!" % e)
# For all repetitions, calculate the ratio of total iterations each
# cell acted as a "limit"
self.__limits = sum_diffs / \
float(self.__ca_repeat * self.__ca_iterations)
def get_individual_info(self):
if self.__gini != None:
# If all metrics are already computed, just return them!
return self.__fitness, self.__gini, self.__prop_max_min, \
self.__individual.epoch, self.__individual_number, \
self.__clustering, self.__average_k_neigh, \
self.__average_shortest_path, self.__diameter
self.__get_limits()
self.__gini = self.__calculate_gini(self.__limits)
self.__prop_max_min = self.__limits.max() / self.__limits.min()
# As clustering coefficient is not defined for directed graphs, we
# convert the graph to its undirected version
self.__clustering = nx.average_clustering(nx.Graph(self.__graph))
self.__average_shortest_path = \
nx.average_shortest_path_length(self.__graph)
try:
self.__diameter = nx.diameter(self.__graph)
except nx.exception.NetworkXError:
self.__diameter = float('nan')
self.__convergence = np.mean(self.__converged)
table_individual = {
# Serial number
"i_num": np.array([self.__individual_number], dtype=np.int),
# Individual fitness
"fit": np.array([self.__fitness], dtype=np.float),
# Ratio of the repetitions that converged to a single state
"conv": np.array([self.__convergence], dtype=np.float),
# gini and max_min are metrics intended to measure the inequality
# in the number of times each cell is a "limit"
"gini": np.array([self.__gini], dtype=np.float),
"max_min": np.array([self.__prop_max_min], dtype=np.float),
# Epoch in the evolution
"epoch": np.array([self.__individual.epoch], dtype=np.float),
# Clustering coefficient
"clust": np.array([self.__clustering], dtype=np.float),
# Average shortests path between each pair of cells
"short": np.array([self.__average_shortest_path], dtype=np.float),
# Maximum distance between any two cells
"diam": np.array([self.__diameter], dtype=np.float)}
return table_individual
def __get_probs_entropy(self):
# Calculate information theoretical metrics to evaluate the
# computational role of each cell
if self.__entropy_rate != None:
# If all metrics are already computed, just return them!
return self.__entropy_rate, self.__active_storage, \
self.__cond_entropy
p_joint_table = np.zeros((self.__ca_size, self.__n_possible_history,
2), dtype=np.float32)
p_prev_table = np.zeros((self.__ca_size, self.__n_possible_history),
dtype=np.float32)
p_curr_table = np.zeros((self.__ca_size, 2), dtype=np.float32)
try:
self.__kernel_probabilities(cuda.In(self.__executions),
cuda.InOut(p_joint_table),
cuda.InOut(p_prev_table),
cuda.InOut(p_curr_table),
block=(self.__ca_size, 1, 1),
grid=(self.__ca_repeat, 1, 1))
cuda.Context.synchronize()
except cuda.Error as e:
sys.exit("CUDA: Execution failed!\n'%s'" % e)
# The entropy rate is a measure of the uncertainty in a cell's state
# given its past
self.__entropy_rate = np.zeros(self.__ca_size, dtype=np.float32)
# The active information storage is the amount of past information
# currently in use by a cell, i.e., its memory
self.__active_storage = np.zeros(self.__ca_size, dtype=np.float32)
try:
self.__kernel_entropy_rate(cuda.In(p_joint_table),
cuda.In(p_prev_table),
cuda.InOut(self.__entropy_rate),
block=(self.__ca_size, 1, 1))
cuda.Context.synchronize()
for i in xrange(self.__ca_iterations - self.__k_history):
ca_aux = np.array(self.__executions[:,
i:i + self.__k_history + 1,
:])
self.__kernel_active_storage(cuda.In(ca_aux),
cuda.In(p_joint_table),
cuda.In(p_prev_table),
cuda.In(p_curr_table),
cuda.InOut(self.__active_storage),
block=(self.__ca_size, 1, 1),
grid=(self.__ca_repeat, 1, 1))
cuda.Context.synchronize()
except cuda.Error as e:
sys.exit("CUDA: Execution failed!\n'%s'" % e)
aux = np.multiply(p_joint_table, np.log2(np.divide(p_prev_table.
reshape(p_prev_table.shape + (1,)), p_joint_table)))
aux[p_joint_table == 0] = 0
self.__cond_entropy = np.sum(aux, axis=(1, 2)) / self.__n_observations
return self.__entropy_rate, self.__active_storage, self.__cond_entropy
def get_cells_info(self):
self.__get_limits()
self.__get_probs_entropy()
full_data = {
"lim": self.__limits,
"ent_rt": self.__entropy_rate,
"act_st": self.__active_storage,
"cond_ent": self.__cond_entropy}
if self.__base_table == None:
# Calculate graph measures
order = sorted(self.__graph.nodes())
pagerank = nx.pagerank(self.__graph)
pagerank = np.array([pagerank[k] for k in order], dtype=np.float)
try:
hubs, authorities = nx.hits(self.__graph, 1000)
hubs = np.array([hubs[k] for k in order], dtype=np.float)
authorities = np.array([authorities[k] for k in order],
dtype=np.float)
except nx.exception.NetworkXError:
hubs = np.repeat(float('nan'), self.__ca_size).astype(np.float)
authorities = hubs
try:
eccentricity = nx.eccentricity(self.__graph)
eccentricity = np.array([eccentricity[k] for k in order],
dtype=np.float)
except nx.exception.NetworkXError:
eccentricity = np.repeat(float('nan'), self.__ca_size). \
astype(np.float)
closeness = nx.closeness_centrality(self.__graph)
closeness = np.array([closeness[k] for k in order], dtype=np.float)
closeness_reverse = nx.closeness_centrality(
self.__graph.reverse(True))
closeness_reverse = np.array([closeness_reverse[k] for k in order],
dtype=np.float)
betweenness = nx.betweenness_centrality(self.__graph)
betweenness = np.array([betweenness[k] for k in order],
dtype=np.float)
try:
eigenvector = nx.eigenvector_centrality(self.__graph, 1000)
eigenvector = np.array([eigenvector[k] for k in order],
dtype=np.float)
except nx.exception.NetworkXError:
eigenvector = np.repeat(float('nan'), self.__ca_size). \
astype(np.float)
load = nx.load_centrality(self.__graph)
load = np.array([load[k] for k in order], dtype=np.float)
clustering = nx.clustering(nx.Graph(self.__graph))
clustering = np.array([clustering[k] for k in order],
dtype=np.float)
in_degree = nx.in_degree_centrality(self.__graph)
in_degree = np.array([in_degree[k] for k in order], dtype=np.float)
out_degree = nx.out_degree_centrality(self.__graph)
out_degree = np.array([out_degree[k] for k in order],
dtype=np.float)
rewires = np.array([self.__graph.node[k]['rew'] for k in order],
dtype=np.float)
average_k_neigh = nx.average_neighbor_degree(self.__graph)
average_k_neigh = np.array([average_k_neigh[k] for k in order],
dtype=np.float)
self.__base_table = {
"epoch": np.repeat(self.__individual.epoch, self.__ca_size). \
astype(np.int),
"i_num": np.repeat(self.__individual_number, self.__ca_size). \
astype(np.int),
"pr": pagerank,
"hub": hubs,
"auth": authorities,
"ecc": eccentricity,
"cls": closeness,
"cls_rev": closeness_reverse,
"btw": betweenness,
"eig": eigenvector,
"load": load,
"cltr": clustering,
"ind": in_degree,
"outd": out_degree,
"rew": rewires,
"kneigh": average_k_neigh}
return dict(full_data.items() + self.__base_table.items())
def save_executions(self):
# Save space-time diagrams of some executions
for i in np.random.choice(range(self.__executions.shape[0]),
self.__save_executions, replace=False):
aux.save_as_image(self.__executions[i],
"images/i%04d" % self.__individual_number,
"execution-%06d.png" % i)
class Analysis:
elems = 0
def __init__(self, data_file, ca_size, ca_iterations, ca_repeat,
connection_radius, k_history, save_executions=0):
self.__ca_size = ca_size
self.__ca_iterations = ca_iterations
self.__ca_repeat = ca_repeat
self.__connection_radius = connection_radius
self.__k_history = k_history
self.__save_executions = save_executions
self.__data_file = h5py.File(data_file, "w-")
def add_individual(self, individual):
# Run simulations with densities uniformly distributed in [0, 1],
# storing execution data for posterio analysis
correct, executions = individual.get_execution_data(UNIFORM_RHO)
# Perform individual analysis
individual = AnalysisIndividual(individual, correct, executions,
self.__ca_size,
self.__connection_radius,
self.__ca_iterations, self.__ca_repeat,
self.__k_history,
save_executions=self.__save_executions)
Analysis.elems += 1
table_cells = individual.get_cells_info()
table_individual = individual.get_individual_info()
individual.save_executions()
del correct
del executions
del individual
# Store the individual analysis in a HDF5 file
group = self.__data_file.create_group("individual%d" %
table_individual["i_num"])
cells_grp = group.create_group("cells")
for key, values in table_cells.iteritems():
cells_grp.create_dataset(key, data=values, shape=values.shape,
dtype=values.dtype)
individuals_grp = group.create_group("individuals")
for key, values in table_individual.iteritems():
individuals_grp.create_dataset(key, data=values,
shape=values.shape,
dtype=values.dtype)
self.__data_file.flush()
def get_table(self):
table = {
"cells": {},
"individuals": {}}
for individual_grp in self.__data_file.values():
for group in ["cells", "individuals"]:
for key, values in individual_grp[group].iteritems():
try:
table[group][key].append(values.value)
except KeyError:
table[group][key] = [values.value]
for group_values in table.values():
for key, values in group_values.iteritems():
group_values[key] = np.concatenate(values)
return table
def get_correlations(self):
table = self.get_table()
correlations = {'cells': {}, 'individuals': {}}
refs_cells = ['lim', 'cls_rev']
for ref in refs_cells:
correlations['cells'][ref] = {}
ref_cell = table['cells'][ref]
for key, values in table['cells'].iteritems():
if key == ref:
continue
correlations['cells'][ref][key] = \
st.spearmanr(ref_cell, values)
refs_individuals = ['gini', 'max_min', 'short', 'fit']
for ref in refs_individuals:
correlations['individuals'][ref] = {}
ref_individual = table['individuals'][ref]
for key, values in table['individuals'].iteritems():
if key == ref:
continue
correlations['individuals'][ref][key] = \
st.spearmanr(ref_individual, values)
return correlations
| unicamp-lbic/small_world_ca | analysis.py | Python | gpl-2.0 | 19,617 | 0.005251 |
from core import C
from sympify import sympify
from basic import Basic, Atom
from singleton import S
from evalf import EvalfMixin, pure_complex
from decorators import _sympifyit, call_highest_priority
from cache import cacheit
from compatibility import reduce, as_int, default_sort_key
from sympy.mpmath.libmp import mpf_log, prec_to_dps
from collections import defaultdict
from inspect import getmro
class Expr(Basic, EvalfMixin):
__slots__ = []
@property
def _diff_wrt(self):
"""Is it allowed to take derivative wrt to this instance.
This determines if it is allowed to take derivatives wrt this object.
Subclasses such as Symbol, Function and Derivative should return True
to enable derivatives wrt them. The implementation in Derivative
separates the Symbol and non-Symbol _diff_wrt=True variables and
temporarily converts the non-Symbol vars in Symbols when performing
the differentiation.
Note, see the docstring of Derivative for how this should work
mathematically. In particular, note that expr.subs(yourclass, Symbol)
should be well-defined on a structural level, or this will lead to
inconsistent results.
Examples
========
>>> from sympy import Expr
>>> e = Expr()
>>> e._diff_wrt
False
>>> class MyClass(Expr):
... _diff_wrt = True
...
>>> (2*MyClass()).diff(MyClass())
2
"""
return False
@cacheit
def sort_key(self, order=None):
coeff, expr = self.as_coeff_Mul()
if expr.is_Pow:
expr, exp = expr.args
else:
expr, exp = expr, S.One
if expr.is_Atom:
args = (str(expr),)
else:
if expr.is_Add:
args = expr.as_ordered_terms(order=order)
elif expr.is_Mul:
args = expr.as_ordered_factors(order=order)
else:
args = expr.args
args = tuple(
[ default_sort_key(arg, order=order) for arg in args ])
args = (len(args), tuple(args))
exp = exp.sort_key(order=order)
return expr.class_key(), args, exp, coeff
def rcall(self, *args):
"""Apply on the argument recursively through the expression tree.
This method is used to simulate a common abuse of notation for
operators. For instance in SymPy the the following will not work:
``(x+Lambda(y, 2*y))(z) == x+2*z``,
however you can use
>>> from sympy import Lambda
>>> from sympy.abc import x,y,z
>>> (x + Lambda(y, 2*y)).rcall(z)
x + 2*z
"""
return Expr._recursive_call(self, args)
@staticmethod
def _recursive_call(expr_to_call, on_args):
def the_call_method_is_overridden(expr):
for cls in getmro(type(expr)):
if '__call__' in cls.__dict__:
return cls != Expr
if callable(expr_to_call) and the_call_method_is_overridden(expr_to_call):
if isinstance(expr_to_call, C.Symbol): # XXX When you call a Symbol it is
return expr_to_call # transformed into an UndefFunction
else:
return expr_to_call(*on_args)
elif expr_to_call.args:
args = [Expr._recursive_call(
sub, on_args) for sub in expr_to_call.args]
return type(expr_to_call)(*args)
else:
return expr_to_call
# ***************
# * Arithmetics *
# ***************
# Expr and its sublcasses use _op_priority to determine which object
# passed to a binary special method (__mul__, etc.) will handle the
# operation. In general, the 'call_highest_priority' decorator will choose
# the object with the highest _op_priority to handle the call.
# Custom subclasses that want to define their own binary special methods
# should set an _op_priority value that is higher than the default.
#
# **NOTE**:
# This is a temporary fix, and will eventually be replaced with
# something better and more powerful. See issue 2411.
_op_priority = 10.0
def __pos__(self):
return self
def __neg__(self):
return Mul(S.NegativeOne, self)
def __abs__(self):
return C.Abs(self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__radd__')
def __add__(self, other):
return Add(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__add__')
def __radd__(self, other):
return Add(other, self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rsub__')
def __sub__(self, other):
return Add(self, -other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__sub__')
def __rsub__(self, other):
return Add(other, -self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rmul__')
def __mul__(self, other):
return Mul(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__mul__')
def __rmul__(self, other):
return Mul(other, self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rpow__')
def __pow__(self, other):
return Pow(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__pow__')
def __rpow__(self, other):
return Pow(other, self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rdiv__')
def __div__(self, other):
return Mul(self, Pow(other, S.NegativeOne))
@_sympifyit('other', NotImplemented)
@call_highest_priority('__div__')
def __rdiv__(self, other):
return Mul(other, Pow(self, S.NegativeOne))
__truediv__ = __div__
__rtruediv__ = __rdiv__
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rmod__')
def __mod__(self, other):
return Mod(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__mod__')
def __rmod__(self, other):
return Mod(other, self)
def __int__(self):
# Although we only need to round to the units position, we'll
# get one more digit so the extra testing below can be avoided
# unless the rounded value rounded to an integer, e.g. if an
# expression were equal to 1.9 and we rounded to the unit position
# we would get a 2 and would not know if this rounded up or not
# without doing a test (as done below). But if we keep an extra
# digit we know that 1.9 is not the same as 1 and there is no
# need for further testing: our int value is correct. If the value
# were 1.99, however, this would round to 2.0 and our int value is
# off by one. So...if our round value is the same as the int value
# (regardless of how much extra work we do to calculate extra decimal
# places) we need to test whether we are off by one.
r = self.round(2)
if not r.is_Number:
raise TypeError("can't convert complex to int")
i = int(r)
if not i:
return 0
# off-by-one check
if i == r and not (self - i).equals(0):
isign = 1 if i > 0 else -1
x = C.Dummy()
# in the following (self - i).evalf(2) will not always work while
# (self - r).evalf(2) and the use of subs does; if the test that
# was added when this comment was added passes, it might be safe
# to simply use sign to compute this rather than doing this by hand:
diff_sign = 1 if (self - x).evalf(2, subs={x: i}) > 0 else -1
if diff_sign != isign:
i -= isign
return i
def __float__(self):
# Don't bother testing if it's a number; if it's not this is going
# to fail, and if it is we still need to check that it evalf'ed to
# a number.
result = self.evalf()
if result.is_Number:
return float(result)
if result.is_number and result.as_real_imag()[1]:
raise TypeError("can't convert complex to float")
raise TypeError("can't convert expression to float")
def __complex__(self):
result = self.evalf()
re, im = result.as_real_imag()
return complex(float(re), float(im))
@_sympifyit('other', False) # sympy > other
def __ge__(self, other):
dif = self - other
if dif.is_nonnegative is not None and \
dif.is_nonnegative is not dif.is_negative:
return dif.is_nonnegative
return C.GreaterThan(self, other)
@_sympifyit('other', False) # sympy > other
def __le__(self, other):
dif = self - other
if dif.is_nonpositive is not None and \
dif.is_nonpositive is not dif.is_positive:
return dif.is_nonpositive
return C.LessThan(self, other)
@_sympifyit('other', False) # sympy > other
def __gt__(self, other):
dif = self - other
if dif.is_positive is not None and \
dif.is_positive is not dif.is_nonpositive:
return dif.is_positive
return C.StrictGreaterThan(self, other)
@_sympifyit('other', False) # sympy > other
def __lt__(self, other):
dif = self - other
if dif.is_negative is not None and \
dif.is_negative is not dif.is_nonnegative:
return dif.is_negative
return C.StrictLessThan(self, other)
@staticmethod
def _from_mpmath(x, prec):
if hasattr(x, "_mpf_"):
return C.Float._new(x._mpf_, prec)
elif hasattr(x, "_mpc_"):
re, im = x._mpc_
re = C.Float._new(re, prec)
im = C.Float._new(im, prec)*S.ImaginaryUnit
return re + im
else:
raise TypeError("expected mpmath number (mpf or mpc)")
@property
def is_number(self):
"""Returns True if 'self' is a number.
>>> from sympy import log, Integral
>>> from sympy.abc import x, y
>>> x.is_number
False
>>> (2*x).is_number
False
>>> (2 + log(2)).is_number
True
>>> (2 + Integral(2, x)).is_number
False
>>> (2 + Integral(2, (x, 1, 2))).is_number
True
"""
if not self.args:
return False
return all(obj.is_number for obj in self.iter_basic_args())
def _random(self, n=None, re_min=-1, im_min=-1, re_max=1, im_max=1):
"""Return self evaluated, if possible, replacing free symbols with
random complex values, if necessary.
The random complex value for each free symbol is generated
by the random_complex_number routine giving real and imaginary
parts in the range given by the re_min, re_max, im_min, and im_max
values. The returned value is evaluated to a precision of n
(if given) else the maximum of 15 and the precision needed
to get more than 1 digit of precision. If the expression
could not be evaluated to a number, or could not be evaluated
to more than 1 digit of precision, then None is returned.
Examples
========
>>> from sympy import sqrt
>>> from sympy.abc import x, y
>>> x._random() # doctest: +SKIP
0.0392918155679172 + 0.916050214307199*I
>>> x._random(2) # doctest: +SKIP
-0.77 - 0.87*I
>>> (x + y/2)._random(2) # doctest: +SKIP
-0.57 + 0.16*I
>>> sqrt(2)._random(2)
1.4
See Also
========
sympy.utilities.randtest.random_complex_number
"""
free = self.free_symbols
prec = 1
if free:
from sympy.utilities.randtest import random_complex_number
a, c, b, d = re_min, re_max, im_min, im_max
reps = dict(zip(free, [random_complex_number(a, b, c, d, rational=True)
for zi in free]))
try:
nmag = abs(self.evalf(2, subs=reps))
except TypeError:
# if an out of range value resulted in evalf problems
# then return None -- XXX is there a way to know how to
# select a good random number for a given expression?
# e.g. when calculating n! negative values for n should not
# be used
return None
else:
reps = {}
nmag = abs(self.evalf(2))
if not hasattr(nmag, '_prec'):
# e.g. exp_polar(2*I*pi) doesn't evaluate but is_number is True
return None
if nmag._prec == 1:
# increase the precision up to the default maximum
# precision to see if we can get any significance
# get the prec steps (patterned after giant_steps in
# libintmath) which approximately doubles the prec
# each step
from sympy.core.evalf import DEFAULT_MAXPREC as target
L = [target]
start = 2
while 1:
Li = L[-1]//2 + 2
if Li >= L[-1] or Li < start:
if L[-1] != start:
L.append(start)
break
L.append(Li)
L = L[::-1]
# evaluate
for prec in L:
nmag = abs(self.evalf(prec, subs=reps))
if nmag._prec != 1:
break
if nmag._prec != 1:
if n is None:
n = max(prec, 15)
return self.evalf(n, subs=reps)
# never got any significance
return None
def is_constant(self, *wrt, **flags):
"""Return True if self is constant, False if not, or None if
the constancy could not be determined conclusively.
If an expression has no free symbols then it is a constant. If
there are free symbols it is possible that the expression is a
constant, perhaps (but not necessarily) zero. To test such
expressions, two strategies are tried:
1) numerical evaluation at two random points. If two such evaluations
give two different values and the values have a precision greater than
1 then self is not constant. If the evaluations agree or could not be
obtained with any precision, no decision is made. The numerical testing
is done only if ``wrt`` is different than the free symbols.
2) differentiation with respect to variables in 'wrt' (or all free
symbols if omitted) to see if the expression is constant or not. This
will not always lead to an expression that is zero even though an
expression is constant (see added test in test_expr.py). If
all derivatives are zero then self is constant with respect to the
given symbols.
If neither evaluation nor differentiation can prove the expression is
constant, None is returned unless two numerical values happened to be
the same and the flag ``failing_number`` is True -- in that case the
numerical value will be returned.
If flag simplify=False is passed, self will not be simplified;
the default is True since self should be simplified before testing.
Examples
========
>>> from sympy import cos, sin, Sum, S, pi
>>> from sympy.abc import a, n, x, y
>>> x.is_constant()
False
>>> S(2).is_constant()
True
>>> Sum(x, (x, 1, 10)).is_constant()
True
>>> Sum(x, (x, 1, n)).is_constant() # doctest: +SKIP
False
>>> Sum(x, (x, 1, n)).is_constant(y)
True
>>> Sum(x, (x, 1, n)).is_constant(n) # doctest: +SKIP
False
>>> Sum(x, (x, 1, n)).is_constant(x)
True
>>> eq = a*cos(x)**2 + a*sin(x)**2 - a
>>> eq.is_constant()
True
>>> eq.subs({x:pi, a:2}) == eq.subs({x:pi, a:3}) == 0
True
>>> (0**x).is_constant()
False
>>> x.is_constant()
False
>>> (x**x).is_constant()
False
>>> one = cos(x)**2 + sin(x)**2
>>> one.is_constant()
True
>>> ((one - 1)**(x + 1)).is_constant() # could be 0 or 1
False
"""
simplify = flags.get('simplify', True)
# Except for expressions that contain units, only one of these should
# be necessary since if something is
# known to be a number it should also know that there are no
# free symbols. But is_number quits as soon as it hits a non-number
# whereas free_symbols goes until all free symbols have been collected,
# thus is_number should be faster. But a double check on free symbols
# is made just in case there is a discrepancy between the two.
free = self.free_symbols
if self.is_number or not free:
# if the following assertion fails then that object's free_symbols
# method needs attention: if an expression is a number it cannot
# have free symbols
assert not free
return True
# if we are only interested in some symbols and they are not in the
# free symbols then this expression is constant wrt those symbols
wrt = set(wrt)
if wrt and not wrt & free:
return True
wrt = wrt or free
# simplify unless this has already been done
if simplify:
self = self.simplify()
# is_zero should be a quick assumptions check; it can be wrong for
# numbers (see test_is_not_constant test), giving False when it
# shouldn't, but hopefully it will never give True unless it is sure.
if self.is_zero:
return True
# try numerical evaluation to see if we get two different values
failing_number = None
if wrt == free:
# try 0 and 1
a = self.subs(zip(free, [0]*len(free)))
if a is S.NaN:
a = self._random(None, 0, 0, 0, 0)
if a is not None and a is not S.NaN:
b = self.subs(zip(free, [1]*len(free)))
if b is S.NaN:
b = self._random(None, 1, 0, 1, 0)
if b is not None and b is not S.NaN:
if b.equals(a) is False:
return False
# try random real
b = self._random(None, -1, 0, 1, 0)
if b is not None and b is not S.NaN and b.equals(a) is False:
return False
# try random complex
b = self._random()
if b is not None and b is not S.NaN:
if a != b:
return False
failing_number = a if a.is_number else b
# now we will test each wrt symbol (or all free symbols) to see if the
# expression depends on them or not using differentiation. This is
# not sufficient for all expressions, however, so we don't return
# False if we get a derivative other than 0 with free symbols.
for w in wrt:
deriv = self.diff(w).simplify()
if deriv != 0:
if not (deriv.is_Number or pure_complex(deriv)):
if flags.get('failing_number', False):
return failing_number
elif deriv.free_symbols:
# dead line provided _random returns None in such cases
return None
return False
return True
def equals(self, other, failing_expression=False):
"""Return True if self == other, False if it doesn't, or None. If
failing_expression is True then the expression which did not simplify
to a 0 will be returned instead of None.
If ``self`` is a Number (or complex number) that is not zero, then
the result is False.
If ``self`` is a number and has not evaluated to zero, evalf will be
used to test whether the expression evaluates to zero. If it does so
and the result has significance (i.e. the precision is either -1, for
a Rational result, or is greater than 1) then the evalf value will be
used to return True or False.
"""
other = sympify(other)
if self == other:
return True
# they aren't the same so see if we can make the difference 0;
# don't worry about doing simplification steps one at a time
# because if the expression ever goes to 0 then the subsequent
# simplification steps that are done will be very fast.
diff = (self - other).as_content_primitive()[1]
diff = factor_terms(diff.simplify(), radical=True)
if not diff:
return True
if all(f.is_Atom for m in Add.make_args(diff)
for f in Mul.make_args(m)):
# if there is no expanding to be done after simplifying
# then this can't be a zero
return False
constant = diff.is_constant(simplify=False, failing_number=True)
if constant is False or \
not diff.free_symbols and not diff.is_number:
return False
elif constant is True:
ndiff = diff._random()
if ndiff:
return False
# diff has not simplified to zero; constant is either None, True
# or the number with significance (prec != 1) that was randomly
# calculated twice as the same value.
if constant not in (True, None) and constant != 0:
return False
if failing_expression:
return diff
return None
def _eval_is_positive(self):
if self.is_number:
if self.is_real is False:
return False
try:
# check to see that we can get a value
n2 = self._eval_evalf(2)
if n2 is None:
raise AttributeError
if n2._prec == 1: # no significance
raise AttributeError
except AttributeError:
return None
n, i = self.evalf(2).as_real_imag()
if not i.is_Number or not n.is_Number:
return False
if i:
if i._prec != 1:
return False
elif n._prec != 1:
if n > 0:
return True
return False
def _eval_is_negative(self):
if self.is_number:
if self.is_real is False:
return False
try:
# check to see that we can get a value
n2 = self._eval_evalf(2)
if n2 is None:
raise AttributeError
if n2._prec == 1: # no significance
raise AttributeError
except AttributeError:
return None
n, i = self.evalf(2).as_real_imag()
if not i.is_Number or not n.is_Number:
return False
if i:
if i._prec != 1:
return False
elif n._prec != 1:
if n < 0:
return True
return False
def _eval_interval(self, x, a, b):
"""
Returns evaluation over an interval. For most functions this is:
self.subs(x, b) - self.subs(x, a),
possibly using limit() if NaN is returned from subs.
If b or a is None, it only evaluates -self.subs(x, a) or self.subs(b, x),
respectively.
"""
from sympy.series import limit
if (a is None and b is None):
raise ValueError('Both interval ends cannot be None.')
if a is None:
A = 0
else:
A = self.subs(x, a)
if A.has(S.NaN):
A = limit(self, x, a)
if A is S.NaN:
return A
if b is None:
B = 0
else:
B = self.subs(x, b)
if B.has(S.NaN):
B = limit(self, x, b)
return B - A
def _eval_power(self, other):
# subclass to compute self**other for cases when
# other is not NaN, 0, or 1
return None
def _eval_conjugate(self):
if self.is_real:
return self
elif self.is_imaginary:
return -self
def conjugate(self):
from sympy.functions.elementary.complexes import conjugate as c
return c(self)
def _eval_transpose(self):
from sympy.functions.elementary.complexes import conjugate
if self.is_complex:
return self
elif self.is_hermitian:
return conjugate(self)
elif self.is_antihermitian:
return -conjugate(self)
def transpose(self):
from sympy.functions.elementary.complexes import transpose
return transpose(self)
def _eval_adjoint(self):
from sympy.functions.elementary.complexes import conjugate, transpose
if self.is_hermitian:
return self
elif self.is_antihermitian:
return -self
obj = self._eval_conjugate()
if obj is not None:
return transpose(obj)
obj = self._eval_transpose()
if obj is not None:
return conjugate(obj)
def adjoint(self):
from sympy.functions.elementary.complexes import adjoint
return adjoint(self)
@classmethod
def _parse_order(cls, order):
"""Parse and configure the ordering of terms. """
from sympy.polys.monomialtools import monomial_key
try:
reverse = order.startswith('rev-')
except AttributeError:
reverse = False
else:
if reverse:
order = order[4:]
monom_key = monomial_key(order)
def neg(monom):
result = []
for m in monom:
if isinstance(m, tuple):
result.append(neg(m))
else:
result.append(-m)
return tuple(result)
def key(term):
_, ((re, im), monom, ncpart) = term
monom = neg(monom_key(monom))
ncpart = tuple([ e.sort_key(order=order) for e in ncpart ])
coeff = ((bool(im), im), (re, im))
return monom, ncpart, coeff
return key, reverse
def as_ordered_factors(self, order=None):
"""Return list of ordered factors (if Mul) else [self]."""
return [self]
def as_ordered_terms(self, order=None, data=False):
"""
Transform an expression to an ordered list of terms.
Examples
========
>>> from sympy import sin, cos
>>> from sympy.abc import x, y
>>> (sin(x)**2*cos(x) + sin(x)**2 + 1).as_ordered_terms()
[sin(x)**2*cos(x), sin(x)**2, 1]
"""
key, reverse = self._parse_order(order)
terms, gens = self.as_terms()
if not any(term.is_Order for term, _ in terms):
ordered = sorted(terms, key=key, reverse=reverse)
else:
_terms, _order = [], []
for term, repr in terms:
if not term.is_Order:
_terms.append((term, repr))
else:
_order.append((term, repr))
ordered = sorted(_terms, key=key, reverse=True) \
+ sorted(_order, key=key, reverse=True)
if data:
return ordered, gens
else:
return [ term for term, _ in ordered ]
def as_terms(self):
"""Transform an expression to a list of terms. """
from sympy.core import Add, Mul, S
from sympy.core.exprtools import decompose_power
gens, terms = set([]), []
for term in Add.make_args(self):
coeff, _term = term.as_coeff_Mul()
coeff = complex(coeff)
cpart, ncpart = {}, []
if _term is not S.One:
for factor in Mul.make_args(_term):
if factor.is_number:
try:
coeff *= complex(factor)
except TypeError:
pass
else:
continue
if factor.is_commutative:
base, exp = decompose_power(factor)
cpart[base] = exp
gens.add(base)
else:
ncpart.append(factor)
coeff = coeff.real, coeff.imag
ncpart = tuple(ncpart)
terms.append((term, (coeff, cpart, ncpart)))
gens = sorted(gens, key=default_sort_key)
k, indices = len(gens), {}
for i, g in enumerate(gens):
indices[g] = i
result = []
for term, (coeff, cpart, ncpart) in terms:
monom = [0]*k
for base, exp in cpart.iteritems():
monom[indices[base]] = exp
result.append((term, (coeff, tuple(monom), ncpart)))
return result, gens
def removeO(self):
"""Removes the additive O(..) symbol if there is one"""
return self
def getO(self):
"""Returns the additive O(..) symbol if there is one, else None."""
return None
def getn(self):
"""
Returns the order of the expression.
The order is determined either from the O(...) term. If there
is no O(...) term, it returns None.
Examples
========
>>> from sympy import O
>>> from sympy.abc import x
>>> (1 + x + O(x**2)).getn()
2
>>> (1 + x).getn()
"""
o = self.getO()
if o is None:
return None
elif o.is_Order:
o = o.expr
if o is S.One:
return S.Zero
if o.is_Symbol:
return S.One
if o.is_Pow:
return o.args[1]
if o.is_Mul: # x**n*log(x)**n or x**n/log(x)**n
for oi in o.args:
if oi.is_Symbol:
return S.One
if oi.is_Pow:
syms = oi.atoms(C.Symbol)
if len(syms) == 1:
x = syms.pop()
oi = oi.subs(x, C.Dummy('x', positive=True))
if oi.base.is_Symbol and oi.exp.is_Rational:
return abs(oi.exp)
raise NotImplementedError('not sure of order of %s' % o)
def count_ops(self, visual=None):
"""wrapper for count_ops that returns the operation count."""
from function import count_ops
return count_ops(self, visual)
def args_cnc(self, cset=False, warn=True, split_1=True):
"""Return [commutative factors, non-commutative factors] of self.
self is treated as a Mul and the ordering of the factors is maintained.
If ``cset`` is True the commutative factors will be returned in a set.
If there were repeated factors (as may happen with an unevaluated Mul)
then an error will be raised unless it is explicitly supressed by
setting ``warn`` to False.
Note: -1 is always separated from a Number unless split_1 is False.
>>> from sympy import symbols, oo
>>> A, B = symbols('A B', commutative=0)
>>> x, y = symbols('x y')
>>> (-2*x*y).args_cnc()
[[-1, 2, x, y], []]
>>> (-2.5*x).args_cnc()
[[-1, 2.5, x], []]
>>> (-2*x*A*B*y).args_cnc()
[[-1, 2, x, y], [A, B]]
>>> (-2*x*A*B*y).args_cnc(split_1=False)
[[-2, x, y], [A, B]]
>>> (-2*x*y).args_cnc(cset=True)
[set([-1, 2, x, y]), []]
The arg is always treated as a Mul:
>>> (-2 + x + A).args_cnc()
[[], [x - 2 + A]]
>>> (-oo).args_cnc() # -oo is a singleton
[[-1, oo], []]
"""
if self.is_Mul:
args = list(self.args)
else:
args = [self]
for i, mi in enumerate(args):
if not mi.is_commutative:
c = args[:i]
nc = args[i:]
break
else:
c = args
nc = []
if c and split_1 and (
c[0].is_Number and
c[0].is_negative and
c[0] != S.NegativeOne):
c[:1] = [S.NegativeOne, -c[0]]
if cset:
clen = len(c)
c = set(c)
if clen and warn and len(c) != clen:
raise ValueError('repeated commutative arguments: %s' %
[ci for ci in c if list(self.args).count(ci) > 1])
return [c, nc]
def coeff(self, x, n=1, right=False):
"""
Returns the coefficient from the term(s) containing ``x**n`` or None. If ``n``
is zero then all terms independent of ``x`` will be returned.
When x is noncommutative, the coeff to the left (default) or right of x
can be returned. The keyword 'right' is ignored when x is commutative.
See Also
========
as_coefficient: separate the expression into a coefficient and factor
as_coeff_Add: separate the additive constant from an expression
as_coeff_Mul: separate the multiplicative constant from an expression
as_independent: separate x-dependent terms/factors from others
sympy.polys.polytools.coeff_monomial: efficiently find the single coefficient of a monomial in Poly
sympy.polys.polytools.nth: like coeff_monomial but powers of monomial terms are used
Examples
========
>>> from sympy import symbols
>>> from sympy.abc import x, y, z
You can select terms that have an explicit negative in front of them:
>>> (-x + 2*y).coeff(-1)
x
>>> (x - 2*y).coeff(-1)
2*y
You can select terms with no Rational coefficient:
>>> (x + 2*y).coeff(1)
x
>>> (3 + 2*x + 4*x**2).coeff(1)
0
You can select terms independent of x by making n=0; in this case
expr.as_independent(x)[0] is returned (and 0 will be returned instead
of None):
>>> (3 + 2*x + 4*x**2).coeff(x, 0)
3
>>> eq = ((x + 1)**3).expand() + 1
>>> eq
x**3 + 3*x**2 + 3*x + 2
>>> [eq.coeff(x, i) for i in reversed(range(4))]
[1, 3, 3, 2]
>>> eq -= 2
>>> [eq.coeff(x, i) for i in reversed(range(4))]
[1, 3, 3, 0]
You can select terms that have a numerical term in front of them:
>>> (-x - 2*y).coeff(2)
-y
>>> from sympy import sqrt
>>> (x + sqrt(2)*x).coeff(sqrt(2))
x
The matching is exact:
>>> (3 + 2*x + 4*x**2).coeff(x)
2
>>> (3 + 2*x + 4*x**2).coeff(x**2)
4
>>> (3 + 2*x + 4*x**2).coeff(x**3)
0
>>> (z*(x + y)**2).coeff((x + y)**2)
z
>>> (z*(x + y)**2).coeff(x + y)
0
In addition, no factoring is done, so 1 + z*(1 + y) is not obtained
from the following:
>>> (x + z*(x + x*y)).coeff(x)
1
If such factoring is desired, factor_terms can be used first:
>>> from sympy import factor_terms
>>> factor_terms(x + z*(x + x*y)).coeff(x)
z*(y + 1) + 1
>>> n, m, o = symbols('n m o', commutative=False)
>>> n.coeff(n)
1
>>> (3*n).coeff(n)
3
>>> (n*m + m*n*m).coeff(n) # = (1 + m)*n*m
1 + m
>>> (n*m + m*n*m).coeff(n, right=True) # = (1 + m)*n*m
m
If there is more than one possible coefficient 0 is returned:
>>> (n*m + m*n).coeff(n)
0
If there is only one possible coefficient, it is returned:
>>> (n*m + x*m*n).coeff(m*n)
x
>>> (n*m + x*m*n).coeff(m*n, right=1)
1
"""
x = sympify(x)
if not isinstance(x, Basic):
return S.Zero
n = as_int(n)
if not x:
return S.Zero
if x == self:
if n == 1:
return S.One
return S.Zero
if x is S.One:
co = [a for a in Add.make_args(self)
if a.as_coeff_Mul()[0] is S.One]
if not co:
return S.Zero
return Add(*co)
if n == 0:
if x.is_Add and self.is_Add:
c = self.coeff(x, right=right)
if not c:
return S.Zero
if not right:
return self - Add(*[a*x for a in Add.make_args(c)])
return self - Add(*[x*a for a in Add.make_args(c)])
return self.as_independent(x, as_Add=not self.is_Mul)[0]
# continue with the full method, looking for this power of x:
x = x**n
def incommon(l1, l2):
if not l1 or not l2:
return []
n = min(len(l1), len(l2))
for i in xrange(n):
if l1[i] != l2[i]:
return l1[:i]
return l1[:]
def find(l, sub, first=True):
""" Find where list sub appears in list l. When ``first`` is True
the first occurance from the left is returned, else the last
occurance is returned. Return None if sub is not in l.
>> l = range(5)*2
>> find(l, [2, 3])
2
>> find(l, [2, 3], first=0)
7
>> find(l, [2, 4])
None
"""
if not sub or not l or len(sub) > len(l):
return None
n = len(sub)
if not first:
l.reverse()
sub.reverse()
for i in xrange(0, len(l) - n + 1):
if all(l[i + j] == sub[j] for j in range(n)):
break
else:
i = None
if not first:
l.reverse()
sub.reverse()
if i is not None and not first:
i = len(l) - (i + n)
return i
co = []
args = Add.make_args(self)
self_c = self.is_commutative
x_c = x.is_commutative
if self_c and not x_c:
return S.Zero
if self_c:
xargs = x.args_cnc(cset=True, warn=False)[0]
for a in args:
margs = a.args_cnc(cset=True, warn=False)[0]
if len(xargs) > len(margs):
continue
resid = margs.difference(xargs)
if len(resid) + len(xargs) == len(margs):
co.append(Mul(*resid))
if co == []:
return S.Zero
elif co:
return Add(*co)
elif x_c:
xargs = x.args_cnc(cset=True, warn=False)[0]
for a in args:
margs, nc = a.args_cnc(cset=True)
if len(xargs) > len(margs):
continue
resid = margs.difference(xargs)
if len(resid) + len(xargs) == len(margs):
co.append(Mul(*(list(resid) + nc)))
if co == []:
return S.Zero
elif co:
return Add(*co)
else: # both nc
xargs, nx = x.args_cnc(cset=True)
# find the parts that pass the commutative terms
for a in args:
margs, nc = a.args_cnc(cset=True)
if len(xargs) > len(margs):
continue
resid = margs.difference(xargs)
if len(resid) + len(xargs) == len(margs):
co.append((resid, nc))
# now check the non-comm parts
if not co:
return S.Zero
if all(n == co[0][1] for r, n in co):
ii = find(co[0][1], nx, right)
if ii is not None:
if not right:
return Mul(Add(*[Mul(*r) for r, c in co]), Mul(*co[0][1][:ii]))
else:
return Mul(*co[0][1][ii + len(nx):])
beg = reduce(incommon, (n[1] for n in co))
if beg:
ii = find(beg, nx, right)
if ii is not None:
if not right:
gcdc = co[0][0]
for i in xrange(1, len(co)):
gcdc = gcdc.intersection(co[i][0])
if not gcdc:
break
return Mul(*(list(gcdc) + beg[:ii]))
else:
m = ii + len(nx)
return Add(*[Mul(*(list(r) + n[m:])) for r, n in co])
end = list(reversed(
reduce(incommon, (list(reversed(n[1])) for n in co))))
if end:
ii = find(end, nx, right)
if ii is not None:
if not right:
return Add(*[Mul(*(list(r) + n[:-len(end) + ii])) for r, n in co])
else:
return Mul(*end[ii + len(nx):])
# look for single match
hit = None
for i, (r, n) in enumerate(co):
ii = find(n, nx, right)
if ii is not None:
if not hit:
hit = ii, r, n
else:
break
else:
if hit:
ii, r, n = hit
if not right:
return Mul(*(list(r) + n[:ii]))
else:
return Mul(*n[ii + len(nx):])
return S.Zero
def as_expr(self, *gens):
"""
Convert a polynomial to a SymPy expression.
Examples
========
>>> from sympy import sin
>>> from sympy.abc import x, y
>>> f = (x**2 + x*y).as_poly(x, y)
>>> f.as_expr()
x**2 + x*y
>>> sin(x).as_expr()
sin(x)
"""
return self
def as_coefficient(self, expr):
"""
Extracts symbolic coefficient at the given expression. In
other words, this functions separates 'self' into the product
of 'expr' and 'expr'-free coefficient. If such separation
is not possible it will return None.
Examples
========
>>> from sympy import E, pi, sin, I, symbols, Poly
>>> from sympy.abc import x, y
>>> E.as_coefficient(E)
1
>>> (2*E).as_coefficient(E)
2
>>> (2*sin(E)*E).as_coefficient(E)
Two terms have E in them so a sum is returned. (If one were
desiring the coefficient of the term exactly matching E then
the constant from the returned expression could be selected.
Or, for greater precision, a method of Poly can be used to
indicate the desired term from which the coefficient is
desired.)
>>> (2*E + x*E).as_coefficient(E)
x + 2
>>> _.args[0] # just want the exact match
2
>>> p = Poly(2*E + x*E); p
Poly(x*E + 2*E, x, E, domain='ZZ')
>>> p.coeff_monomial(E)
2
>>> p.nth(0,1)
2
Since the following cannot be written as a product containing
E as a factor, None is returned. (If the coefficient ``2*x`` is
desired then the ``coeff`` method should be used.)
>>> (2*E*x + x).as_coefficient(E)
>>> (2*E*x + x).coeff(E)
2*x
>>> (E*(x + 1) + x).as_coefficient(E)
>>> (2*pi*I).as_coefficient(pi*I)
2
>>> (2*I).as_coefficient(pi*I)
See Also
========
coeff: return sum of terms have a given factor
as_coeff_Add: separate the additive constant from an expression
as_coeff_Mul: separate the multiplicative constant from an expression
as_independent: separate x-dependent terms/factors from others
sympy.polys.polytools.coeff_monomial: efficiently find the single coefficient of a monomial in Poly
sympy.polys.polytools.nth: like coeff_monomial but powers of monomial terms are used
"""
r = self.extract_multiplicatively(expr)
if r and not r.has(expr):
return r
def as_independent(self, *deps, **hint):
"""
A mostly naive separation of a Mul or Add into arguments that are not
are dependent on deps. To obtain as complete a separation of variables
as possible, use a separation method first, e.g.:
* separatevars() to change Mul, Add and Pow (including exp) into Mul
* .expand(mul=True) to change Add or Mul into Add
* .expand(log=True) to change log expr into an Add
The only non-naive thing that is done here is to respect noncommutative
ordering of variables.
The returned tuple (i, d) has the following interpretation:
* i will has no variable that appears in deps
* d will be 1 or else have terms that contain variables that are in deps
* if self is an Add then self = i + d
* if self is a Mul then self = i*d
* if self is anything else, either tuple (self, S.One) or (S.One, self)
is returned.
To force the expression to be treated as an Add, use the hint as_Add=True
Examples
========
-- self is an Add
>>> from sympy import sin, cos, exp
>>> from sympy.abc import x, y, z
>>> (x + x*y).as_independent(x)
(0, x*y + x)
>>> (x + x*y).as_independent(y)
(x, x*y)
>>> (2*x*sin(x) + y + x + z).as_independent(x)
(y + z, 2*x*sin(x) + x)
>>> (2*x*sin(x) + y + x + z).as_independent(x, y)
(z, 2*x*sin(x) + x + y)
-- self is a Mul
>>> (x*sin(x)*cos(y)).as_independent(x)
(cos(y), x*sin(x))
non-commutative terms cannot always be separated out when self is a Mul
>>> from sympy import symbols
>>> n1, n2, n3 = symbols('n1 n2 n3', commutative=False)
>>> (n1 + n1*n2).as_independent(n2)
(n1, n1*n2)
>>> (n2*n1 + n1*n2).as_independent(n2)
(0, n1*n2 + n2*n1)
>>> (n1*n2*n3).as_independent(n1)
(1, n1*n2*n3)
>>> (n1*n2*n3).as_independent(n2)
(n1, n2*n3)
>>> ((x-n1)*(x-y)).as_independent(x)
(1, (x - y)*(x - n1))
-- self is anything else:
>>> (sin(x)).as_independent(x)
(1, sin(x))
>>> (sin(x)).as_independent(y)
(sin(x), 1)
>>> exp(x+y).as_independent(x)
(1, exp(x + y))
-- force self to be treated as an Add:
>>> (3*x).as_independent(x, as_Add=True)
(0, 3*x)
-- force self to be treated as a Mul:
>>> (3+x).as_independent(x, as_Add=False)
(1, x + 3)
>>> (-3+x).as_independent(x, as_Add=False)
(1, x - 3)
Note how the below differs from the above in making the
constant on the dep term positive.
>>> (y*(-3+x)).as_independent(x)
(y, x - 3)
-- use .as_independent() for true independence testing instead
of .has(). The former considers only symbols in the free
symbols while the latter considers all symbols
>>> from sympy import Integral
>>> I = Integral(x, (x, 1, 2))
>>> I.has(x)
True
>>> x in I.free_symbols
False
>>> I.as_independent(x) == (I, 1)
True
>>> (I + x).as_independent(x) == (I, x)
True
Note: when trying to get independent terms, a separation method
might need to be used first. In this case, it is important to keep
track of what you send to this routine so you know how to interpret
the returned values
>>> from sympy import separatevars, log
>>> separatevars(exp(x+y)).as_independent(x)
(exp(y), exp(x))
>>> (x + x*y).as_independent(y)
(x, x*y)
>>> separatevars(x + x*y).as_independent(y)
(x, y + 1)
>>> (x*(1 + y)).as_independent(y)
(x, y + 1)
>>> (x*(1 + y)).expand(mul=True).as_independent(y)
(x, x*y)
>>> a, b=symbols('a b',positive=True)
>>> (log(a*b).expand(log=True)).as_independent(b)
(log(a), log(b))
See also: .separatevars(), .expand(log=True),
.as_two_terms(), .as_coeff_add(), .as_coeff_mul()
"""
from sympy.utilities.iterables import sift
func = self.func
# sift out deps into symbolic and other and ignore
# all symbols but those that are in the free symbols
sym = set()
other = []
for d in deps:
if isinstance(d, C.Symbol): # Symbol.is_Symbol is True
sym.add(d)
else:
other.append(d)
def has(e):
"""return the standard has() if there are no literal symbols, else
check to see that symbol-deps are in the free symbols."""
has_other = e.has(*other)
if not sym:
return has_other
return has_other or e.has(*(e.free_symbols & sym))
if hint.get('as_Add', func is Add):
want = Add
else:
want = Mul
if (want is not func or
func is not Add and func is not Mul):
if has(self):
return (want.identity, self)
else:
return (self, want.identity)
else:
if func is Add:
args = list(self.args)
else:
args, nc = self.args_cnc()
d = sift(args, lambda x: has(x))
depend = d[True]
indep = d[False]
if func is Add: # all terms were treated as commutative
return (Add(*indep),
Add(*depend))
else: # handle noncommutative by stopping at first dependent term
for i, n in enumerate(nc):
if has(n):
depend.extend(nc[i:])
break
indep.append(n)
return Mul(*indep), Mul(*depend)
def as_real_imag(self, deep=True, **hints):
"""Performs complex expansion on 'self' and returns a tuple
containing collected both real and imaginary parts. This
method can't be confused with re() and im() functions,
which does not perform complex expansion at evaluation.
However it is possible to expand both re() and im()
functions and get exactly the same results as with
a single call to this function.
>>> from sympy import symbols, I
>>> x, y = symbols('x,y', real=True)
>>> (x + y*I).as_real_imag()
(x, y)
>>> from sympy.abc import z, w
>>> (z + w*I).as_real_imag()
(re(z) - im(w), re(w) + im(z))
"""
if hints.get('ignore') == self:
return None
else:
return (C.re(self), C.im(self))
def as_powers_dict(self):
"""Return self as a dictionary of factors with each factor being
treated as a power. The keys are the bases of the factors and the
values, the corresponding exponents. The resulting dictionary should
be used with caution if the expression is a Mul and contains non-
commutative factors since the order that they appeared will be lost in
the dictionary."""
d = defaultdict(int)
d.update(dict([self.as_base_exp()]))
return d
def as_coefficients_dict(self):
"""Return a dictionary mapping terms to their Rational coefficient.
Since the dictionary is a defaultdict, inquiries about terms which
were not present will return a coefficient of 0. If an expression is
not an Add it is considered to have a single term.
Examples
========
>>> from sympy.abc import a, x
>>> (3*x + a*x + 4).as_coefficients_dict()
{1: 4, x: 3, a*x: 1}
>>> _[a]
0
>>> (3*a*x).as_coefficients_dict()
{a*x: 3}
"""
c, m = self.as_coeff_Mul()
if not c.is_Rational:
c = S.One
m = self
d = defaultdict(int)
d.update({m: c})
return d
def as_base_exp(self):
# a -> b ** e
return self, S.One
def as_coeff_mul(self, *deps):
"""Return the tuple (c, args) where self is written as a Mul, ``m``.
c should be a Rational multiplied by any terms of the Mul that are
independent of deps.
args should be a tuple of all other terms of m; args is empty
if self is a Number or if self is independent of deps (when given).
This should be used when you don't know if self is a Mul or not but
you want to treat self as a Mul or if you want to process the
individual arguments of the tail of self as a Mul.
- if you know self is a Mul and want only the head, use self.args[0];
- if you don't want to process the arguments of the tail but need the
tail then use self.as_two_terms() which gives the head and tail;
- if you want to split self into an independent and dependent parts
use ``self.as_independent(*deps)``
>>> from sympy import S
>>> from sympy.abc import x, y
>>> (S(3)).as_coeff_mul()
(3, ())
>>> (3*x*y).as_coeff_mul()
(3, (x, y))
>>> (3*x*y).as_coeff_mul(x)
(3*y, (x,))
>>> (3*y).as_coeff_mul(x)
(3*y, ())
"""
if deps:
if not self.has(*deps):
return self, tuple()
return S.One, (self,)
def as_coeff_add(self, *deps):
"""Return the tuple (c, args) where self is written as an Add, ``a``.
c should be a Rational added to any terms of the Add that are
independent of deps.
args should be a tuple of all other terms of ``a``; args is empty
if self is a Number or if self is independent of deps (when given).
This should be used when you don't know if self is an Add or not but
you want to treat self as an Add or if you want to process the
individual arguments of the tail of self as an Add.
- if you know self is an Add and want only the head, use self.args[0];
- if you don't want to process the arguments of the tail but need the
tail then use self.as_two_terms() which gives the head and tail.
- if you want to split self into an independent and dependent parts
use ``self.as_independent(*deps)``
>>> from sympy import S
>>> from sympy.abc import x, y
>>> (S(3)).as_coeff_add()
(3, ())
>>> (3 + x).as_coeff_add()
(3, (x,))
>>> (3 + x + y).as_coeff_add(x)
(y + 3, (x,))
>>> (3 + y).as_coeff_add(x)
(y + 3, ())
"""
if deps:
if not self.has(*deps):
return self, tuple()
return S.Zero, (self,)
def primitive(self):
"""Return the positive Rational that can be extracted non-recursively
from every term of self (i.e., self is treated like an Add). This is
like the as_coeff_Mul() method but primitive always extracts a positive
Rational (never a negative or a Float).
Examples
========
>>> from sympy.abc import x
>>> (3*(x + 1)**2).primitive()
(3, (x + 1)**2)
>>> a = (6*x + 2); a.primitive()
(2, 3*x + 1)
>>> b = (x/2 + 3); b.primitive()
(1/2, x + 6)
>>> (a*b).primitive() == (1, a*b)
True
"""
if not self:
return S.One, S.Zero
c, r = self.as_coeff_Mul(rational=True)
if c.is_negative:
c, r = -c, -r
return c, r
def as_content_primitive(self, radical=False):
"""This method should recursively remove a Rational from all arguments
and return that (content) and the new self (primitive). The content
should always be positive and ``Mul(*foo.as_content_primitive()) == foo``.
The primitive need no be in canonical form and should try to preserve
the underlying structure if possible (i.e. expand_mul should not be
applied to self).
Examples
========
>>> from sympy import sqrt
>>> from sympy.abc import x, y, z
>>> eq = 2 + 2*x + 2*y*(3 + 3*y)
The as_content_primitive function is recursive and retains structure:
>>> eq.as_content_primitive()
(2, x + 3*y*(y + 1) + 1)
Integer powers will have Rationals extracted from the base:
>>> ((2 + 6*x)**2).as_content_primitive()
(4, (3*x + 1)**2)
>>> ((2 + 6*x)**(2*y)).as_content_primitive()
(1, (2*(3*x + 1))**(2*y))
Terms may end up joining once their as_content_primitives are added:
>>> ((5*(x*(1 + y)) + 2*x*(3 + 3*y))).as_content_primitive()
(11, x*(y + 1))
>>> ((3*(x*(1 + y)) + 2*x*(3 + 3*y))).as_content_primitive()
(9, x*(y + 1))
>>> ((3*(z*(1 + y)) + 2.0*x*(3 + 3*y))).as_content_primitive()
(1, 6.0*x*(y + 1) + 3*z*(y + 1))
>>> ((5*(x*(1 + y)) + 2*x*(3 + 3*y))**2).as_content_primitive()
(121, x**2*(y + 1)**2)
>>> ((5*(x*(1 + y)) + 2.0*x*(3 + 3*y))**2).as_content_primitive()
(1, 121.0*x**2*(y + 1)**2)
Radical content can also be factored out of the primitive:
>>> (2*sqrt(2) + 4*sqrt(10)).as_content_primitive(radical=True)
(2, sqrt(2)*(1 + 2*sqrt(5)))
"""
return S.One, self
def as_numer_denom(self):
""" expression -> a/b -> a, b
This is just a stub that should be defined by
an object's class methods to get anything else.
See Also
========
normal: return a/b instead of a, b
"""
return self, S.One
def normal(self):
n, d = self.as_numer_denom()
if d is S.One:
return n
return n/d
def extract_multiplicatively(self, c):
"""Return None if it's not possible to make self in the form
c * something in a nice way, i.e. preserving the properties
of arguments of self.
>>> from sympy import symbols, Rational
>>> x, y = symbols('x,y', real=True)
>>> ((x*y)**3).extract_multiplicatively(x**2 * y)
x*y**2
>>> ((x*y)**3).extract_multiplicatively(x**4 * y)
>>> (2*x).extract_multiplicatively(2)
x
>>> (2*x).extract_multiplicatively(3)
>>> (Rational(1,2)*x).extract_multiplicatively(3)
x/6
"""
c = sympify(c)
if c is S.One:
return self
elif c == self:
return S.One
if c.is_Add:
cc, pc = c.primitive()
if cc is not S.One:
c = Mul(cc, pc, evaluate=False)
if c.is_Mul:
a, b = c.as_two_terms()
x = self.extract_multiplicatively(a)
if x is not None:
return x.extract_multiplicatively(b)
quotient = self / c
if self.is_Number:
if self is S.Infinity:
if c.is_positive:
return S.Infinity
elif self is S.NegativeInfinity:
if c.is_negative:
return S.Infinity
elif c.is_positive:
return S.NegativeInfinity
elif self is S.ComplexInfinity:
if not c.is_zero:
return S.ComplexInfinity
elif self is S.NaN:
return S.NaN
elif self.is_Integer:
if not quotient.is_Integer:
return None
elif self.is_positive and quotient.is_negative:
return None
else:
return quotient
elif self.is_Rational:
if not quotient.is_Rational:
return None
elif self.is_positive and quotient.is_negative:
return None
else:
return quotient
elif self.is_Float:
if not quotient.is_Float:
return None
elif self.is_positive and quotient.is_negative:
return None
else:
return quotient
elif self.is_NumberSymbol or self.is_Symbol or self is S.ImaginaryUnit:
if quotient.is_Mul and len(quotient.args) == 2:
if quotient.args[0].is_Integer and quotient.args[0].is_positive and quotient.args[1] == self:
return quotient
elif quotient.is_Integer:
return quotient
elif self.is_Add:
cs, ps = self.primitive()
if cs is not S.One:
return Mul(cs, ps, evaluate=False).extract_multiplicatively(c)
newargs = []
for arg in self.args:
newarg = arg.extract_multiplicatively(c)
if newarg is not None:
newargs.append(newarg)
else:
return None
return Add(*newargs)
elif self.is_Mul:
args = list(self.args)
for i, arg in enumerate(args):
newarg = arg.extract_multiplicatively(c)
if newarg is not None:
args[i] = newarg
return Mul(*args)
elif self.is_Pow:
if c.is_Pow and c.base == self.base:
new_exp = self.exp.extract_additively(c.exp)
if new_exp is not None:
return self.base ** (new_exp)
elif c == self.base:
new_exp = self.exp.extract_additively(1)
if new_exp is not None:
return self.base ** (new_exp)
def extract_additively(self, c):
"""Return self - c if it's possible to subtract c from self and
make all matching coefficients move towards zero, else return None.
Examples
========
>>> from sympy import S
>>> from sympy.abc import x, y
>>> e = 2*x + 3
>>> e.extract_additively(x + 1)
x + 2
>>> e.extract_additively(3*x)
>>> e.extract_additively(4)
>>> (y*(x + 1)).extract_additively(x + 1)
>>> ((x + 1)*(x + 2*y + 1) + 3).extract_additively(x + 1)
(x + 1)*(x + 2*y) + 3
Sometimes auto-expansion will return a less simplified result
than desired; gcd_terms might be used in such cases:
>>> from sympy import gcd_terms
>>> (4*x*(y + 1) + y).extract_additively(x)
4*x*(y + 1) + x*(4*y + 3) - x*(4*y + 4) + y
>>> gcd_terms(_)
x*(4*y + 3) + y
See Also
========
extract_multiplicatively
coeff
as_coefficient
"""
c = sympify(c)
if c is S.Zero:
return self
elif c == self:
return S.Zero
elif self is S.Zero:
return None
if self.is_Number:
if not c.is_Number:
return None
co = self
diff = co - c
# XXX should we match types? i.e should 3 - .1 succeed?
if (co > 0 and diff > 0 and diff < co or
co < 0 and diff < 0 and diff > co):
return diff
return None
if c.is_Number:
co, t = self.as_coeff_Add()
xa = co.extract_additively(c)
if xa is None:
return None
return xa + t
# handle the args[0].is_Number case separately
# since we will have trouble looking for the coeff of
# a number.
if c.is_Add and c.args[0].is_Number:
# whole term as a term factor
co = self.coeff(c)
xa0 = (co.extract_additively(1) or 0)*c
if xa0:
diff = self - co*c
return (xa0 + (diff.extract_additively(c) or diff)) or None
# term-wise
h, t = c.as_coeff_Add()
sh, st = self.as_coeff_Add()
xa = sh.extract_additively(h)
if xa is None:
return None
xa2 = st.extract_additively(t)
if xa2 is None:
return None
return xa + xa2
# whole term as a term factor
co = self.coeff(c)
xa0 = (co.extract_additively(1) or 0)*c
if xa0:
diff = self - co*c
return (xa0 + (diff.extract_additively(c) or diff)) or None
# term-wise
coeffs = []
for a in Add.make_args(c):
ac, at = a.as_coeff_Mul()
co = self.coeff(at)
if not co:
return None
coc, cot = co.as_coeff_Add()
xa = coc.extract_additively(ac)
if xa is None:
return None
self -= co*at
coeffs.append((cot + xa)*at)
coeffs.append(self)
return Add(*coeffs)
def could_extract_minus_sign(self):
"""Canonical way to choose an element in the set {e, -e} where
e is any expression. If the canonical element is e, we have
e.could_extract_minus_sign() == True, else
e.could_extract_minus_sign() == False.
For any expression, the set ``{e.could_extract_minus_sign(),
(-e).could_extract_minus_sign()}`` must be ``{True, False}``.
>>> from sympy.abc import x, y
>>> (x-y).could_extract_minus_sign() != (y-x).could_extract_minus_sign()
True
"""
negative_self = -self
self_has_minus = (self.extract_multiplicatively(-1) is not None)
negative_self_has_minus = (
(negative_self).extract_multiplicatively(-1) is not None)
if self_has_minus != negative_self_has_minus:
return self_has_minus
else:
if self.is_Add:
# We choose the one with less arguments with minus signs
all_args = len(self.args)
negative_args = len([False for arg in self.args if arg.could_extract_minus_sign()])
positive_args = all_args - negative_args
if positive_args > negative_args:
return False
elif positive_args < negative_args:
return True
elif self.is_Mul:
# We choose the one with an odd number of minus signs
num, den = self.as_numer_denom()
args = Mul.make_args(num) + Mul.make_args(den)
arg_signs = [arg.could_extract_minus_sign() for arg in args]
negative_args = filter(None, arg_signs)
return len(negative_args) % 2 == 1
# As a last resort, we choose the one with greater value of .sort_key()
return self.sort_key() < negative_self.sort_key()
def extract_branch_factor(self, allow_half=False):
"""
Try to write self as ``exp_polar(2*pi*I*n)*z`` in a nice way.
Return (z, n).
>>> from sympy import exp_polar, I, pi
>>> from sympy.abc import x, y
>>> exp_polar(I*pi).extract_branch_factor()
(exp_polar(I*pi), 0)
>>> exp_polar(2*I*pi).extract_branch_factor()
(1, 1)
>>> exp_polar(-pi*I).extract_branch_factor()
(exp_polar(I*pi), -1)
>>> exp_polar(3*pi*I + x).extract_branch_factor()
(exp_polar(x + I*pi), 1)
>>> (y*exp_polar(-5*pi*I)*exp_polar(3*pi*I + 2*pi*x)).extract_branch_factor()
(y*exp_polar(2*pi*x), -1)
>>> exp_polar(-I*pi/2).extract_branch_factor()
(exp_polar(-I*pi/2), 0)
If allow_half is True, also extract exp_polar(I*pi):
>>> exp_polar(I*pi).extract_branch_factor(allow_half=True)
(1, 1/2)
>>> exp_polar(2*I*pi).extract_branch_factor(allow_half=True)
(1, 1)
>>> exp_polar(3*I*pi).extract_branch_factor(allow_half=True)
(1, 3/2)
>>> exp_polar(-I*pi).extract_branch_factor(allow_half=True)
(1, -1/2)
"""
from sympy import exp_polar, pi, I, ceiling, Add
n = S(0)
res = S(1)
args = Mul.make_args(self)
exps = []
for arg in args:
if arg.func is exp_polar:
exps += [arg.exp]
else:
res *= arg
piimult = S(0)
extras = []
while exps:
exp = exps.pop()
if exp.is_Add:
exps += exp.args
continue
if exp.is_Mul:
coeff = exp.as_coefficient(pi*I)
if coeff is not None:
piimult += coeff
continue
extras += [exp]
if not piimult.free_symbols:
coeff = piimult
tail = ()
else:
coeff, tail = piimult.as_coeff_add(*piimult.free_symbols)
# round down to nearest multiple of 2
branchfact = ceiling(coeff/2 - S(1)/2)*2
n += branchfact/2
c = coeff - branchfact
if allow_half:
nc = c.extract_additively(1)
if nc is not None:
n += S(1)/2
c = nc
newexp = pi*I*Add(*((c, ) + tail)) + Add(*extras)
if newexp != 0:
res *= exp_polar(newexp)
return res, n
def _eval_is_polynomial(self, syms):
if self.free_symbols.intersection(syms) == set([]):
return True
return False
def is_polynomial(self, *syms):
"""
Return True if self is a polynomial in syms and False otherwise.
This checks if self is an exact polynomial in syms. This function
returns False for expressions that are "polynomials" with symbolic
exponents. Thus, you should be able to apply polynomial algorithms to
expressions for which this returns True, and Poly(expr, \*syms) should
work if and only if expr.is_polynomial(\*syms) returns True. The
polynomial does not have to be in expanded form. If no symbols are
given, all free symbols in the expression will be used.
This is not part of the assumptions system. You cannot do
Symbol('z', polynomial=True).
Examples
========
>>> from sympy import Symbol
>>> x = Symbol('x')
>>> ((x**2 + 1)**4).is_polynomial(x)
True
>>> ((x**2 + 1)**4).is_polynomial()
True
>>> (2**x + 1).is_polynomial(x)
False
>>> n = Symbol('n', nonnegative=True, integer=True)
>>> (x**n + 1).is_polynomial(x)
False
This function does not attempt any nontrivial simplifications that may
result in an expression that does not appear to be a polynomial to
become one.
>>> from sympy import sqrt, factor, cancel
>>> y = Symbol('y', positive=True)
>>> a = sqrt(y**2 + 2*y + 1)
>>> a.is_polynomial(y)
False
>>> factor(a)
y + 1
>>> factor(a).is_polynomial(y)
True
>>> b = (y**2 + 2*y + 1)/(y + 1)
>>> b.is_polynomial(y)
False
>>> cancel(b)
y + 1
>>> cancel(b).is_polynomial(y)
True
See also .is_rational_function()
"""
if syms:
syms = set(map(sympify, syms))
else:
syms = self.free_symbols
if syms.intersection(self.free_symbols) == set([]):
# constant polynomial
return True
else:
return self._eval_is_polynomial(syms)
def _eval_is_rational_function(self, syms):
if self.free_symbols.intersection(syms) == set([]):
return True
return False
def is_rational_function(self, *syms):
"""
Test whether function is a ratio of two polynomials in the given
symbols, syms. When syms is not given, all free symbols will be used.
The rational function does not have to be in expanded or in any kind of
canonical form.
This function returns False for expressions that are "rational
functions" with symbolic exponents. Thus, you should be able to call
.as_numer_denom() and apply polynomial algorithms to the result for
expressions for which this returns True.
This is not part of the assumptions system. You cannot do
Symbol('z', rational_function=True).
Examples
========
>>> from sympy import Symbol, sin
>>> from sympy.abc import x, y
>>> (x/y).is_rational_function()
True
>>> (x**2).is_rational_function()
True
>>> (x/sin(y)).is_rational_function(y)
False
>>> n = Symbol('n', integer=True)
>>> (x**n + 1).is_rational_function(x)
False
This function does not attempt any nontrivial simplifications that may
result in an expression that does not appear to be a rational function
to become one.
>>> from sympy import sqrt, factor, cancel
>>> y = Symbol('y', positive=True)
>>> a = sqrt(y**2 + 2*y + 1)/y
>>> a.is_rational_function(y)
False
>>> factor(a)
(y + 1)/y
>>> factor(a).is_rational_function(y)
True
See also is_rational_function().
"""
if syms:
syms = set(map(sympify, syms))
else:
syms = self.free_symbols
if syms.intersection(self.free_symbols) == set([]):
# constant rational function
return True
else:
return self._eval_is_rational_function(syms)
###################################################################################
##################### SERIES, LEADING TERM, LIMIT, ORDER METHODS ##################
###################################################################################
def series(self, x=None, x0=0, n=6, dir="+"):
"""
Series expansion of "self" around ``x = x0`` yielding either terms of
the series one by one (the lazy series given when n=None), else
all the terms at once when n != None.
Note: when n != None, if an O() term is returned then the x in the
in it and the entire expression represents x - x0, the displacement
from x0. (If there is no O() term then the series was exact and x has
it's normal meaning.) This is currently necessary since sympy's O()
can only represent terms at x0=0. So instead of::
cos(x).series(x0=1, n=2) --> (1 - x)*sin(1) + cos(1) + O((x - 1)**2)
which graphically looks like this::
|
.|. . .
. | \ . .
---+----------------------
| . . . .
| \
x=0
the following is returned instead::
-x*sin(1) + cos(1) + O(x**2)
whose graph is this::
\ |
. .| . .
. \ . .
-----+\------------------.
| . . . .
| \
x=0
which is identical to ``cos(x + 1).series(n=2)``.
Usage:
Returns the series expansion of "self" around the point ``x = x0``
with respect to ``x`` up to O(x**n) (default n is 6).
If ``x=None`` and ``self`` is univariate, the univariate symbol will
be supplied, otherwise an error will be raised.
>>> from sympy import cos, exp
>>> from sympy.abc import x, y
>>> cos(x).series()
1 - x**2/2 + x**4/24 + O(x**6)
>>> cos(x).series(n=4)
1 - x**2/2 + O(x**4)
>>> e = cos(x + exp(y))
>>> e.series(y, n=2)
cos(x + 1) - y*sin(x + 1) + O(y**2)
>>> e.series(x, n=2)
cos(exp(y)) - x*sin(exp(y)) + O(x**2)
If ``n=None`` then an iterator of the series terms will be returned.
>>> term=cos(x).series(n=None)
>>> [term.next() for i in range(2)]
[1, -x**2/2]
For ``dir=+`` (default) the series is calculated from the right and
for ``dir=-`` the series from the left. For smooth functions this
flag will not alter the results.
>>> abs(x).series(dir="+")
x
>>> abs(x).series(dir="-")
-x
"""
from sympy import collect
if x is None:
syms = self.atoms(C.Symbol)
if len(syms) > 1:
raise ValueError('x must be given for multivariate functions.')
x = syms.pop()
if not self.has(x):
if n is None:
return (s for s in [self])
else:
return self
## it seems like the following should be doable, but several failures
## then occur. Is this related to issue 1747 et al See also XPOS below.
#if x.is_positive is x.is_negative is None:
# # replace x with an x that has a positive assumption
# xpos = C.Dummy('x', positive=True)
# rv = self.subs(x, xpos).series(xpos, x0, n, dir)
# if n is None:
# return (s.subs(xpos, x) for s in rv)
# else:
# return rv.subs(xpos, x)
if len(dir) != 1 or dir not in '+-':
raise ValueError("Dir must be '+' or '-'")
if x0 in [S.Infinity, S.NegativeInfinity]:
dir = {S.Infinity: '+', S.NegativeInfinity: '-'}[x0]
s = self.subs(x, 1/x).series(x, n=n, dir=dir)
if n is None:
return (si.subs(x, 1/x) for si in s)
# don't include the order term since it will eat the larger terms
return s.removeO().subs(x, 1/x)
# use rep to shift origin to x0 and change sign (if dir is negative)
# and undo the process with rep2
if x0 or dir == '-':
if dir == '-':
rep = -x + x0
rep2 = -x
rep2b = x0
else:
rep = x + x0
rep2 = x
rep2b = -x0
s = self.subs(x, rep).series(x, x0=0, n=n, dir='+')
if n is None: # lseries...
return (si.subs(x, rep2 + rep2b) for si in s)
# nseries...
o = s.getO() or S.Zero
s = s.removeO()
if o and x0:
rep2b = 0 # when O() can handle x0 != 0 this can be removed
return s.subs(x, rep2 + rep2b) + o
# from here on it's x0=0 and dir='+' handling
if n is not None: # nseries handling
s1 = self._eval_nseries(x, n=n, logx=None)
o = s1.getO() or S.Zero
if o:
# make sure the requested order is returned
ngot = o.getn()
if ngot > n:
# leave o in its current form (e.g. with x*log(x)) so
# it eats terms properly, then replace it below
s1 += o.subs(x, x**C.Rational(n, ngot))
elif ngot < n:
# increase the requested number of terms to get the desired
# number keep increasing (up to 9) until the received order
# is different than the original order and then predict how
# many additional terms are needed
for more in range(1, 9):
s1 = self._eval_nseries(x, n=n + more, logx=None)
newn = s1.getn()
if newn != ngot:
ndo = n + (n - ngot)*more/(newn - ngot)
s1 = self._eval_nseries(x, n=ndo, logx=None)
# if this assertion fails then our ndo calculation
# needs modification
assert s1.getn() == n
break
else:
raise ValueError('Could not calculate %s terms for %s'
% (str(n), self))
o = s1.getO()
s1 = s1.removeO()
else:
o = C.Order(x**n)
if (s1 + o).removeO() == s1:
o = S.Zero
try:
return collect(s1, x) + o
except NotImplementedError:
return s1 + o
else: # lseries handling
def yield_lseries(s):
"""Return terms of lseries one at a time."""
for si in s:
if not si.is_Add:
yield si
continue
# yield terms 1 at a time if possible
# by increasing order until all the
# terms have been returned
yielded = 0
o = C.Order(si)*x
ndid = 0
ndo = len(si.args)
while 1:
do = (si - yielded + o).removeO()
o *= x
if not do or do.is_Order:
continue
if do.is_Add:
ndid += len(do.args)
else:
ndid += 1
yield do
if ndid == ndo:
raise StopIteration
yielded += do
return yield_lseries(self.removeO()._eval_lseries(x))
def lseries(self, x=None, x0=0, dir='+'):
"""
Wrapper for series yielding an iterator of the terms of the series.
Note: an infinite series will yield an infinite iterator. The following,
for exaxmple, will never terminate. It will just keep printing terms
of the sin(x) series::
for term in sin(x).lseries(x):
print term
The advantage of lseries() over nseries() is that many times you are
just interested in the next term in the series (i.e. the first term for
example), but you don't know how many you should ask for in nseries()
using the "n" parameter.
See also nseries().
"""
return self.series(x, x0, n=None, dir=dir)
def _eval_lseries(self, x):
# default implementation of lseries is using nseries(), and adaptively
# increasing the "n". As you can see, it is not very efficient, because
# we are calculating the series over and over again. Subclasses should
# override this method and implement much more efficient yielding of
# terms.
n = 0
series = self._eval_nseries(x, n=n, logx=None)
if not series.is_Order:
if series.is_Add:
yield series.removeO()
else:
yield series
raise StopIteration
while series.is_Order:
n += 1
series = self._eval_nseries(x, n=n, logx=None)
e = series.removeO()
yield e
while 1:
while 1:
n += 1
series = self._eval_nseries(x, n=n, logx=None).removeO()
if e != series:
break
yield series - e
e = series
def nseries(self, x=None, x0=0, n=6, dir='+', logx=None):
"""
Wrapper to _eval_nseries if assumptions allow, else to series.
If x is given, x0 is 0, dir='+', and self has x, then _eval_nseries is
called. This calculates "n" terms in the innermost expressions and
then builds up the final series just by "cross-multiplying" everything
out.
Advantage -- it's fast, because we don't have to determine how many
terms we need to calculate in advance.
Disadvantage -- you may end up with less terms than you may have
expected, but the O(x**n) term appended will always be correct and
so the result, though perhaps shorter, will also be correct.
If any of those assumptions is not met, this is treated like a
wrapper to series which will try harder to return the correct
number of terms.
See also lseries().
"""
if x and not self.has(x):
return self
if x is None or x0 or dir != '+': # {see XPOS above} or (x.is_positive == x.is_negative == None):
assert logx is None
return self.series(x, x0, n, dir)
else:
return self._eval_nseries(x, n=n, logx=logx)
def _eval_nseries(self, x, n, logx):
"""
Return terms of series for self up to O(x**n) at x=0
from the positive direction.
This is a method that should be overridden in subclasses. Users should
never call this method directly (use .nseries() instead), so you don't
have to write docstrings for _eval_nseries().
"""
from sympy.utilities.misc import filldedent
raise NotImplementedError(filldedent("""
The _eval_nseries method should be added to
%s to give terms up to O(x**n) at x=0
from the positive direction so it is available when
nseries calls it.""" % self.func)
)
def limit(self, x, xlim, dir='+'):
""" Compute limit x->xlim.
"""
from sympy.series.limits import limit
return limit(self, x, xlim, dir)
def compute_leading_term(self, x, skip_abs=False, logx=None):
"""
as_leading_term is only allowed for results of .series()
This is a wrapper to compute a series first.
If skip_abs is true, the absolute term is assumed to be zero.
(This is necessary because sometimes it cannot be simplified
to zero without a lot of work, but is still known to be zero.
See log._eval_nseries for an example.)
If skip_log is true, log(x) is treated as an independent symbol.
(This is needed for the gruntz algorithm.)
"""
from sympy.series.gruntz import calculate_series
from sympy import cancel
if self.removeO() == 0:
return self
if logx is None:
d = C.Dummy('logx')
s = calculate_series(self, x, skip_abs, d).subs(d, C.log(x))
else:
s = calculate_series(self, x, skip_abs, logx)
s = cancel(s)
if skip_abs:
s = expand_mul(s).as_independent(x)[1]
return s.as_leading_term(x)
@cacheit
def as_leading_term(self, *symbols):
"""
Returns the leading (nonzero) term of the series expansion of self.
The _eval_as_leading_term routines are used to do this, and they must
always return a non-zero value.
Examples
========
>>> from sympy.abc import x
>>> (1 + x + x**2).as_leading_term(x)
1
>>> (1/x**2 + x + x**2).as_leading_term(x)
x**(-2)
"""
from sympy import powsimp
if len(symbols) > 1:
c = self
for x in symbols:
c = c.as_leading_term(x)
return c
elif not symbols:
return self
x = sympify(symbols[0])
if not x.is_Symbol:
raise ValueError('expecting a Symbol but got %s' % x)
if x not in self.free_symbols:
return self
obj = self._eval_as_leading_term(x)
if obj is not None:
return powsimp(obj, deep=True, combine='exp')
raise NotImplementedError('as_leading_term(%s, %s)' % (self, x))
def _eval_as_leading_term(self, x):
return self
def as_coeff_exponent(self, x):
""" ``c*x**e -> c,e`` where x can be any symbolic expression.
"""
from sympy import collect
s = collect(self, x)
c, p = s.as_coeff_mul(x)
if len(p) == 1:
b, e = p[0].as_base_exp()
if b == x:
return c, e
return s, S.Zero
def leadterm(self, x):
"""
Returns the leading term a*x**b as a tuple (a, b).
Examples
========
>>> from sympy.abc import x
>>> (1+x+x**2).leadterm(x)
(1, 0)
>>> (1/x**2+x+x**2).leadterm(x)
(1, -2)
"""
c, e = self.as_leading_term(x).as_coeff_exponent(x)
if x in c.free_symbols:
from sympy.utilities.misc import filldedent
raise ValueError(filldedent("""
cannot compute leadterm(%s, %s). The coefficient
should have been free of x but got %s""" % (self, x, c)))
return c, e
def as_coeff_Mul(self, rational=False):
"""Efficiently extract the coefficient of a product. """
return S.One, self
def as_coeff_Add(self):
"""Efficiently extract the coefficient of a summation. """
return S.Zero, self
###################################################################################
##################### DERIVATIVE, INTEGRAL, FUNCTIONAL METHODS ####################
###################################################################################
def diff(self, *symbols, **assumptions):
new_symbols = map(sympify, symbols) # e.g. x, 2, y, z
assumptions.setdefault("evaluate", True)
return Derivative(self, *new_symbols, **assumptions)
###########################################################################
###################### EXPRESSION EXPANSION METHODS #######################
###########################################################################
# Relevant subclasses should override _eval_expand_hint() methods. See
# the docstring of expand() for more info.
def _eval_expand_complex(self, **hints):
real, imag = self.as_real_imag(**hints)
return real + S.ImaginaryUnit*imag
@staticmethod
def _expand_hint(expr, hint, deep=True, **hints):
"""
Helper for ``expand()``. Recursively calls ``expr._eval_expand_hint()``.
Returns ``(expr, hit)``, where expr is the (possibly) expanded
``expr`` and ``hit`` is ``True`` if ``expr`` was truly expanded and
``False`` otherwise.
"""
hit = False
# XXX: Hack to support non-Basic args
# |
# V
if deep and getattr(expr, 'args', ()) and not expr.is_Atom:
sargs = []
for arg in expr.args:
arg, arghit = Expr._expand_hint(arg, hint, **hints)
hit |= arghit
sargs.append(arg)
if hit:
expr = expr.func(*sargs)
if hasattr(expr, '_eval_expand_' + hint):
newexpr = getattr(expr, '_eval_expand_' + hint)(**hints)
if newexpr != expr:
return (newexpr, True)
return (expr, hit)
@cacheit
def expand(self, deep=True, modulus=None, power_base=True, power_exp=True,
mul=True, log=True, multinomial=True, basic=True, **hints):
"""
Expand an expression using hints.
See the docstring of the expand() function in sympy.core.function for
more information.
"""
from sympy.simplify.simplify import fraction
hints.update(power_base=power_base, power_exp=power_exp, mul=mul,
log=log, multinomial=multinomial, basic=basic)
expr = self
if hints.pop('frac', False):
n, d = [a.expand(deep=deep, modulus=modulus, **hints)
for a in fraction(self)]
return n/d
elif hints.pop('denom', False):
n, d = fraction(self)
return n/d.expand(deep=deep, modulus=modulus, **hints)
elif hints.pop('numer', False):
n, d = fraction(self)
return n.expand(deep=deep, modulus=modulus, **hints)/d
# Although the hints are sorted here, an earlier hint may get applied
# at a given node in the expression tree before another because of how
# the hints are applied. e.g. expand(log(x*(y + z))) -> log(x*y +
# x*z) because while applying log at the top level, log and mul are
# applied at the deeper level in the tree so that when the log at the
# upper level gets applied, the mul has already been applied at the
# lower level.
# Additionally, because hints are only applied once, the expression
# may not be expanded all the way. For example, if mul is applied
# before multinomial, x*(x + 1)**2 won't be expanded all the way. For
# now, we just use a special case to make multinomial run before mul,
# so that at least polynomials will be expanded all the way. In the
# future, smarter heuristics should be applied.
# TODO: Smarter heuristics
def _expand_hint_key(hint):
"""Make multinomial come before mul"""
if hint == 'mul':
return 'mulz'
return hint
for hint in sorted(hints.keys(), key=_expand_hint_key):
use_hint = hints[hint]
if use_hint:
expr, hit = Expr._expand_hint(expr, hint, deep=deep, **hints)
if modulus is not None:
modulus = sympify(modulus)
if not modulus.is_Integer or modulus <= 0:
raise ValueError(
"modulus must be a positive integer, got %s" % modulus)
terms = []
for term in Add.make_args(expr):
coeff, tail = term.as_coeff_Mul(rational=True)
coeff %= modulus
if coeff:
terms.append(coeff*tail)
expr = Add(*terms)
return expr
###########################################################################
################### GLOBAL ACTION VERB WRAPPER METHODS ####################
###########################################################################
def integrate(self, *args, **kwargs):
"""See the integrate function in sympy.integrals"""
from sympy.integrals import integrate
return integrate(self, *args, **kwargs)
def simplify(self, ratio=1.7, measure=None):
"""See the simplify function in sympy.simplify"""
from sympy.simplify import simplify
from sympy.core.function import count_ops
measure = measure or count_ops
return simplify(self, ratio, measure)
def nsimplify(self, constants=[], tolerance=None, full=False):
"""See the nsimplify function in sympy.simplify"""
from sympy.simplify import nsimplify
return nsimplify(self, constants, tolerance, full)
def separate(self, deep=False, force=False):
"""See the separate function in sympy.simplify"""
from sympy.simplify import separate
return separate(self, deep)
def collect(self, syms, func=None, evaluate=True, exact=False, distribute_order_term=True):
"""See the collect function in sympy.simplify"""
from sympy.simplify import collect
return collect(self, syms, func, evaluate, exact, distribute_order_term)
def together(self, *args, **kwargs):
"""See the together function in sympy.polys"""
from sympy.polys import together
return together(self, *args, **kwargs)
def apart(self, x=None, **args):
"""See the apart function in sympy.polys"""
from sympy.polys import apart
return apart(self, x, **args)
def ratsimp(self):
"""See the ratsimp function in sympy.simplify"""
from sympy.simplify import ratsimp
return ratsimp(self)
def trigsimp(self, **args):
"""See the trigsimp function in sympy.simplify"""
from sympy.simplify import trigsimp
return trigsimp(self, **args)
def radsimp(self):
"""See the radsimp function in sympy.simplify"""
from sympy.simplify import radsimp
return radsimp(self)
def powsimp(self, deep=False, combine='all'):
"""See the powsimp function in sympy.simplify"""
from sympy.simplify import powsimp
return powsimp(self, deep, combine)
def combsimp(self):
"""See the combsimp function in sympy.simplify"""
from sympy.simplify import combsimp
return combsimp(self)
def factor(self, *gens, **args):
"""See the factor() function in sympy.polys.polytools"""
from sympy.polys import factor
return factor(self, *gens, **args)
def refine(self, assumption=True):
"""See the refine function in sympy.assumptions"""
from sympy.assumptions import refine
return refine(self, assumption)
def cancel(self, *gens, **args):
"""See the cancel function in sympy.polys"""
from sympy.polys import cancel
return cancel(self, *gens, **args)
def invert(self, g):
"""See the invert function in sympy.polys"""
from sympy.polys import invert
return invert(self, g)
def round(self, p=0):
"""Return x rounded to the given decimal place.
If a complex number would results, apply round to the real
and imaginary components of the number.
Examples
========
>>> from sympy import pi, E, I, S, Add, Mul, Number
>>> S(10.5).round()
11.
>>> pi.round()
3.
>>> pi.round(2)
3.14
>>> (2*pi + E*I).round() #doctest: +SKIP
6. + 3.*I
The round method has a chopping effect:
>>> (2*pi + I/10).round()
6.
>>> (pi/10 + 2*I).round() #doctest: +SKIP
2.*I
>>> (pi/10 + E*I).round(2)
0.31 + 2.72*I
Notes
=====
Do not confuse the Python builtin function, round, with the
SymPy method of the same name. The former always returns a float
(or raises an error if applied to a complex value) while the
latter returns either a Number or a complex number:
>>> isinstance(round(S(123), -2), Number)
False
>>> isinstance(S(123).round(-2), Number)
True
>>> isinstance((3*I).round(), Mul)
True
>>> isinstance((1 + 3*I).round(), Add)
True
"""
from sympy.functions.elementary.exponential import log
x = self
if not x.is_number:
raise TypeError('%s is not a number' % x)
if not x.is_real:
i, r = x.as_real_imag()
return i.round(p) + S.ImaginaryUnit*r.round(p)
if not x:
return x
p = int(p)
precs = [f._prec for f in x.atoms(C.Float)]
dps = prec_to_dps(max(precs)) if precs else None
mag_first_dig = _mag(x)
allow = digits_needed = mag_first_dig + p
if dps is not None and allow > dps:
allow = dps
mag = Pow(10, p) # magnitude needed to bring digit p to units place
x += 1/(2*mag) # add the half for rounding
i10 = 10*mag*x.n((dps if dps is not None else digits_needed) + 1)
rv = Integer(i10)//10
q = 1
if p > 0:
q = mag
elif p < 0:
rv /= mag
rv = Rational(rv, q)
if rv.is_Integer:
# use str or else it won't be a float
return C.Float(str(rv), digits_needed)
else:
return C.Float(rv, allow)
class AtomicExpr(Atom, Expr):
"""
A parent class for object which are both atoms and Exprs.
For example: Symbol, Number, Rational, Integer, ...
But not: Add, Mul, Pow, ...
"""
is_Atom = True
__slots__ = []
def _eval_derivative(self, s):
if self == s:
return S.One
return S.Zero
def _eval_is_polynomial(self, syms):
return True
def _eval_is_rational_function(self, syms):
return True
def _eval_nseries(self, x, n, logx):
return self
def _mag(x):
"""Return integer ``i`` such that .1 <= x/10**i < 1
Examples
========
>>> from sympy.core.expr import _mag
>>> from sympy import Float
>>> _mag(Float(.1))
0
>>> _mag(Float(.01))
-1
>>> _mag(Float(1234))
4
"""
from math import log10, ceil, log
xpos = abs(x.n())
if not xpos:
return S.Zero
try:
mag_first_dig = int(ceil(log10(xpos)))
except (ValueError, OverflowError):
mag_first_dig = int(ceil(C.Float(mpf_log(xpos._mpf_, 53))/log(10)))
# check that we aren't off by 1
if (xpos/10**mag_first_dig) >= 1:
assert 1 <= (xpos/10**mag_first_dig) < 10
mag_first_dig += 1
return mag_first_dig
from mul import Mul
from add import Add
from power import Pow
from function import Derivative, expand_mul
from mod import Mod
from exprtools import factor_terms
from numbers import Integer, Rational
| amitjamadagni/sympy | sympy/core/expr.py | Python | bsd-3-clause | 102,305 | 0.000655 |
# This file is part of ArcJail.
#
# ArcJail is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ArcJail is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ArcJail. If not, see <http://www.gnu.org/licenses/>.
from listeners.tick import Delay
from controlled_cvars.handlers import float_handler
from ...resource.strings import build_module_strings
from ..damage_hook import get_hook, protected_player_manager
from ..players import player_manager
from .. import build_module_config
from .base_classes.jail_game import JailGame
from . import game_event_handler, stage
strings_module = build_module_strings('lrs/win_reward')
config_manager = build_module_config('lrs/win_reward')
config_manager.controlled_cvar(
float_handler,
"duration",
default=10,
description="Duration of Win Reward"
)
config_manager.controlled_cvar(
float_handler,
"loser_speed",
default=0.5,
description="Loser's speed"
)
class WinReward(JailGame):
caption = "Win Reward"
stage_groups = {
'winreward-start': [
"equip-damage-hooks",
"set-start-status",
"winreward-entry",
],
'winreward-timed-out': ["winreward-timed-out", ],
}
def __init__(self, players, **kwargs):
super().__init__(players, **kwargs)
self._counters = {}
self._results = {
'winner': kwargs['winner'],
'loser': kwargs['loser'],
}
@stage('basegame-entry')
def stage_basegame_entry(self):
self.set_stage_group('winreward-start')
@stage('equip-damage-hooks')
def stage_equip_damage_hooks(self):
winner, loser = self._results['winner'], self._results['loser']
def hook_hurt_for_loser(counter, info):
return info.attacker == winner.index
for player in self._players:
p_player = protected_player_manager[player.index]
counter = self._counters[player.index] = p_player.new_counter()
if player == winner:
counter.hook_hurt = get_hook('SW')
else:
counter.hook_hurt = hook_hurt_for_loser
p_player.set_protected()
@stage('undo-equip-damage-hooks')
def stage_undo_equip_damage_hooks(self):
for player in self._players_all:
p_player = protected_player_manager[player.index]
p_player.delete_counter(self._counters[player.index])
p_player.unset_protected()
@stage('winreward-entry')
def stage_winreward_entry(self):
winner, loser = self._results['winner'], self._results['loser']
loser.speed = config_manager['loser_speed']
def timeout_callback():
self.set_stage_group('winreward-timed-out')
self._delays.append(
Delay(config_manager['duration'], timeout_callback))
@stage('winreward-timed-out')
def stage_wireward_timed_out(self):
winner, loser = self._results['winner'], self._results['loser']
loser.take_damage(loser.health, attacker_index=winner.index)
@game_event_handler('jailgame-player-death', 'player_death')
def event_jailgame_player_death(self, game_event):
player = player_manager.get_by_userid(game_event['userid'])
if player not in self._players:
return
self._players.remove(player)
winner, loser = self._results['winner'], self._results['loser']
if player == winner:
loser.take_damage(loser.health + 1, attacker_index=winner.index)
self.set_stage_group('destroy')
| KirillMysnik/ArcJail | srcds/addons/source-python/plugins/arcjail/modules/lrs/win_reward.py | Python | gpl-3.0 | 4,013 | 0 |
import gtk
import cairo
import gobject
class CellRendererEmblems(gtk.CellRenderer):
"""Cell renderer that accepts list of icon names."""
__gproperties__ = {
'emblems': (
gobject.TYPE_PYOBJECT,
'Emblem list',
'List of icon names to display',
gobject.PARAM_READWRITE
),
'is-link': (
gobject.TYPE_BOOLEAN,
'Link indicator',
'Denotes if item is a link or regular file',
False,
gobject.PARAM_READWRITE
)
}
def __init__(self):
gtk.CellRenderer.__init__(self)
self.emblems = None
self.is_link = None
self.icon_size = 16
self.spacing = 2
self.padding = 1
def do_set_property(self, prop, value):
"""Set renderer property."""
if prop.name == 'emblems':
self.emblems = value
elif prop.name == 'is-link':
self.is_link = value
else:
setattr(self, prop.name, value)
def do_get_property(self, prop):
"""Get renderer property."""
if prop.name == 'emblems':
result = self.emblems
elif prop.name == 'is-link':
result = self.is_link
else:
result = getattr(self, prop.name)
return result
def do_render(self, window, widget, background_area, cell_area, expose_area, flags):
"""Render emblems on tree view."""
if not self.is_link and (self.emblems is None or len(self.emblems) == 0):
return
# cache constants locally
icon_size = self.icon_size
spacing = self.spacing
emblems = self.emblems or []
icon_theme = gtk.icon_theme_get_default()
context = window.cairo_create()
# add symbolic link emblem if needed
if self.is_link:
emblems.insert(0, 'emblem-symbolic-link')
# position of next icon
pos_x = cell_area[0] + cell_area[2]
pos_y = cell_area[1] + ((cell_area[3] - icon_size) / 2)
# draw all the icons
for emblem in emblems:
# load icon from the theme
pixbuf = icon_theme.load_icon(emblem, 16, 0)
# move position of next icon
pos_x -= icon_size + spacing
# draw icon
context.set_source_pixbuf(pixbuf, pos_x, pos_y)
context.paint()
def do_get_size(self, widget, cell_area=None):
"""Calculate size taken by emblems."""
count = 5 # optimum size, we can still render more or less emblems
width = self.icon_size * count + (self.spacing * (count - 1))
height = self.icon_size
result = (
0,
0,
width + 2 * self.padding,
height + 2 * self.padding
)
return result
| Goodmind/sunflower-fm | application/widgets/emblems_renderer.py | Python | gpl-3.0 | 2,375 | 0.032842 |
#!/usr/bin/env python
"""
@file pythonPropsMSVC.py
@author Michael Behrisch
@author Daniel Krajzewicz
@author Jakob Erdmann
@date 2011
@version $Id: pythonPropsMSVC.py 14425 2013-08-16 20:11:47Z behrisch $
This script rebuilds "../../build/msvc/python.props", the file which
gives information about the python includes and library.
SUMO, Simulation of Urban MObility; see http://sumo-sim.org/
Copyright (C) 2011-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
import sys, distutils.sysconfig
from os.path import dirname, join
propsFile = join(dirname(__file__), '..', '..', 'build', 'msvc10', 'python.props')
print('generating %s ' % propsFile)
props = open(propsFile, 'w')
print >> props, """<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup Label="UserMacros">
<PYTHON_LIB>%s\libs\python%s%s.lib</PYTHON_LIB>
</PropertyGroup>
<ItemDefinitionGroup>
<ClCompile>
<AdditionalIncludeDirectories>%s;%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>HAVE_PYTHON;%%(PreprocessorDefinitions)</PreprocessorDefinitions>
</ClCompile>
</ItemDefinitionGroup>
<ItemGroup>
<BuildMacro Include="PYTHON_LIB">
<Value>$(PYTHON_LIB)</Value>
</BuildMacro>
</ItemGroup>
</Project>""" % (sys.prefix, sys.version[0], sys.version[2],
distutils.sysconfig.get_config_var('INCLUDEPY'))
props.close()
| cathyyul/sumo-0.18 | tools/build/pythonPropsMSVC.py | Python | gpl-3.0 | 1,517 | 0.005274 |
# -*- coding: utf-8 -*-
"""Module providing views for the folderish content page type"""
from Acquisition import aq_inner
from Products.Five.browser import BrowserView
from zope.component import getMultiAdapter
IMG = 'data:image/gif;base64,R0lGODlhAQABAIAAAP///wAAACwAAAAAAQABAAACAkQBADs='
class ContentPageView(BrowserView):
""" Folderish content page default view """
def has_leadimage(self):
context = aq_inner(self.context)
try:
lead_img = context.image
except AttributeError:
lead_img = None
if lead_img is not None:
return True
return False
def display_gallery(self):
context = aq_inner(self.context)
try:
display = context.displayGallery
except AttributeError:
display = None
if display is not None:
return display
return False
def rendered_gallery(self):
context = aq_inner(self.context)
template = context.restrictedTraverse('@@gallery-view')()
return template
def image_data(self):
data = {}
sizes = ['small', 'medium', 'large']
idx = 0
for size in sizes:
idx += 0
img = self._get_scaled_img(size)
data[size] = '{0} {1}w'.format(img['url'], img['width'])
return data
def _get_scaled_img(self, size):
context = aq_inner(self.context)
scales = getMultiAdapter((context, self.request), name='images')
if size == 'small':
scale = scales.scale('image', width=300, height=300)
if size == 'medium':
scale = scales.scale('image', width=600, height=600)
else:
scale = scales.scale('image', width=900, height=900)
item = {}
if scale is not None:
item['url'] = scale.url
item['width'] = scale.width
item['height'] = scale.height
else:
item['url'] = IMG
item['width'] = '1px'
item['height'] = '1px'
return item
class GalleryPreview(BrowserView):
"""Preview embeddable image gallery"""
def __call__(self):
self.has_assets = len(self.contained_images()) > 0
return self.render()
def render(self):
return self.index()
def rendered_gallery(self):
context = aq_inner(self.context)
template = context.restrictedTraverse('@@gallery-view')()
return template
class GalleryView(BrowserView):
"""Provide gallery of contained image content"""
def __call__(self):
self.has_assets = len(self.contained_images()) > 0
return self.render()
def render(self):
return self.index()
def has_leadimage(self):
context = aq_inner(self.context)
try:
lead_img = context.image
except AttributeError:
lead_img = None
if lead_img is not None:
return True
return False
def leadimage_tag(self):
context = aq_inner(self.context)
scales = getMultiAdapter((context, self.request), name='images')
scale = scales.scale('image', width=900, height=900)
item = {}
if scale is not None:
item['url'] = scale.url
item['width'] = scale.width
item['height'] = scale.height
else:
item['url'] = IMG
item['width'] = '1px'
item['height'] = '1px'
return item
def contained_images(self):
context = aq_inner(self.context)
data = context.restrictedTraverse('@@folderListing')(
portal_type='Image',
sort_on='getObjPositionInParent')
return data
def image_tag(self, image):
context = image.getObject()
scales = getMultiAdapter((context, self.request), name='images')
scale = scales.scale('image', width=900, height=900)
item = {}
if scale is not None:
item['url'] = scale.url
item['width'] = scale.width
item['height'] = scale.height
else:
item['url'] = IMG
item['width'] = '1px'
item['height'] = '1px'
return item
def _get_scaled_img(self, size):
context = aq_inner(self.context)
scales = getMultiAdapter((context, self.request), name='images')
if size == 'small':
scale = scales.scale('image', width=300, height=300)
if size == 'medium':
scale = scales.scale('image', width=600, height=600)
else:
scale = scales.scale('image', width=900, height=900)
item = {}
if scale is not None:
item['url'] = scale.url
item['width'] = scale.width
item['height'] = scale.height
else:
item['url'] = IMG
item['width'] = '1px'
item['height'] = '1px'
return item
| a25kk/stv | src/stv.sitecontent/stv/sitecontent/browser/contentpage.py | Python | mit | 4,948 | 0 |
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
exts = [Extension("cython_hello_world",
["cython_hello_world.pyx"],
)]
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = exts,
)
| jeffzhengye/pylearn | speed/cython/scipy2013-cython-tutorial-master/exercises/hello-world/setup.py | Python | unlicense | 299 | 0.020067 |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Dict <--> XML de/serializer.
The identity API prefers attributes over elements, so we serialize that way
by convention, with a few hardcoded exceptions.
"""
from lxml import etree
import re
import six
from keystone.i18n import _
DOCTYPE = '<?xml version="1.0" encoding="UTF-8"?>'
XMLNS = 'http://docs.openstack.org/identity/api/v2.0'
XMLNS_LIST = [
{
'value': 'http://docs.openstack.org/identity/api/v2.0'
},
{
'prefix': 'OS-KSADM',
'value': 'http://docs.openstack.org/identity/api/ext/OS-KSADM/v1.0',
},
]
PARSER = etree.XMLParser(
resolve_entities=False,
remove_comments=True,
remove_pis=True)
# NOTE(dolph): lxml.etree.Entity() is just a callable that currently returns an
# lxml.etree._Entity instance, which doesn't appear to be part of the
# public API, so we discover the type dynamically to be safe
ENTITY_TYPE = type(etree.Entity('x'))
def from_xml(xml):
"""Deserialize XML to a dictionary."""
if xml is None:
return None
deserializer = XmlDeserializer()
return deserializer(xml)
def to_xml(d, xmlns=None):
"""Serialize a dictionary to XML."""
if d is None:
return None
serialize = XmlSerializer()
return serialize(d, xmlns)
class XmlDeserializer(object):
def __call__(self, xml_str):
"""Returns a dictionary populated by decoding the given xml string."""
dom = etree.fromstring(xml_str.strip(), PARSER)
return self.walk_element(dom, True)
def _deserialize_links(self, links):
return dict((x.attrib['rel'], x.attrib['href']) for x in links)
@staticmethod
def _qualified_name(tag, namespace):
"""Returns a qualified tag name.
The tag name may contain the namespace prefix or not, which can
be determined by specifying the parameter namespace.
"""
m = re.search('[^}]+$', tag)
tag_name = m.string[m.start():]
if not namespace:
return tag_name
bracket = re.search('[^{]+$', tag)
ns = m.string[bracket.start():m.start() - 1]
# If the namespace is
# http://docs.openstack.org/identity/api/ext/OS-KSADM/v1.0 for the
# root element, a prefix needs to add in front of the tag name.
prefix = None
for xmlns in XMLNS_LIST:
if xmlns['value'] == ns:
prefix = xmlns.get('prefix')
break
if prefix is not None:
return '%(PREFIX)s:%(tag_name)s' % {
'PREFIX': prefix, 'tag_name': tag_name}
else:
return tag_name
def walk_element(self, element, namespace=False):
"""Populates a dictionary by walking an etree element."""
values = {}
for k, v in six.iteritems(element.attrib):
# boolean-looking attributes become booleans in JSON
if k in ['enabled', 'truncated']:
if v in ['true']:
v = True
elif v in ['false']:
v = False
values[self._qualified_name(k, namespace)] = v
text = None
if element.text is not None:
text = element.text.strip()
# current spec does not have attributes on an element with text
values = values or text or {}
decoded_tag = XmlDeserializer._qualified_name(element.tag, namespace)
list_item_tag = None
if (decoded_tag[-1] == 's' and not values and
decoded_tag != 'access'):
# FIXME(gyee): special-case lists for now unti we
# figure out how to properly handle them.
# If any key ends with an 's', we are assuming it is a list.
# List element have no attributes.
values = list(values)
if decoded_tag == 'policies':
list_item_tag = 'policy'
else:
list_item_tag = decoded_tag[:-1]
if decoded_tag == 'links':
return {'links': self._deserialize_links(element)}
links = None
truncated = False
for child in [self.walk_element(x) for x in element
if not isinstance(x, ENTITY_TYPE)]:
if list_item_tag:
# FIXME(gyee): special-case lists for now until we
# figure out how to properly handle them.
# If any key ends with an 's', we are assuming it is a list.
if list_item_tag in child:
values.append(child[list_item_tag])
else:
if 'links' in child:
links = child['links']
else:
truncated = child['truncated']
else:
values = dict(values.items() + child.items())
# set empty and none-list element to None to align with JSON
if not values:
values = ""
d = {XmlDeserializer._qualified_name(element.tag, namespace): values}
if links:
d['links'] = links
d['links'].setdefault('next')
d['links'].setdefault('previous')
if truncated:
d['truncated'] = truncated['truncated']
return d
class XmlSerializer(object):
def __call__(self, d, xmlns=None):
"""Returns an xml etree populated by the given dictionary.
Optionally, namespace the etree by specifying an ``xmlns``.
"""
links = None
truncated = False
# FIXME(dolph): skipping links for now
for key in d.keys():
if '_links' in key:
d.pop(key)
# NOTE(gyee, henry-nash): special-case links and truncation
# attribute in collections
if 'links' == key:
if links:
# we have multiple links
raise Exception('Multiple links found')
links = d.pop(key)
if 'truncated' == key:
if truncated:
# we have multiple attributes
raise Exception(_('Multiple truncation attributes found'))
truncated = d.pop(key)
assert len(d.keys()) == 1, ('Cannot encode more than one root '
'element: %s' % d.keys())
# name the root dom element
name = d.keys()[0]
m = re.search('[^:]+$', name)
root_name = m.string[m.start():]
prefix = m.string[0:m.start() - 1]
for ns in XMLNS_LIST:
if prefix == ns.get('prefix'):
xmlns = ns['value']
break
# only the root dom element gets an xlmns
root = etree.Element(root_name, xmlns=(xmlns or XMLNS))
self.populate_element(root, d[name])
# NOTE(gyee, henry-nash): special-case links and truncation attribute
if links:
self._populate_links(root, links)
if truncated:
self._populate_truncated(root, truncated)
# TODO(dolph): you can get a doctype from lxml, using ElementTrees
return '%s\n%s' % (DOCTYPE, etree.tostring(root, pretty_print=True))
def _populate_links(self, element, links_json):
links = etree.Element('links')
for k, v in six.iteritems(links_json):
if v:
link = etree.Element('link')
link.set('rel', six.text_type(k))
link.set('href', six.text_type(v))
links.append(link)
element.append(links)
def _populate_truncated(self, element, truncated_value):
truncated = etree.Element('truncated')
self._populate_bool(truncated, 'truncated', truncated_value)
element.append(truncated)
def _populate_list(self, element, k, v):
"""Populates an element with a key & list value."""
# spec has a lot of inconsistency here!
container = element
if k == 'media-types':
# xsd compliance: <media-types> contains <media-type>s
# find an existing <media-types> element or make one
container = element.find('media-types')
if container is None:
container = etree.Element(k)
element.append(container)
name = k[:-1]
elif k == 'serviceCatalog' or k == 'catalog':
# xsd compliance: <serviceCatalog> contains <service>s
container = etree.Element(k)
element.append(container)
name = 'service'
elif k == 'roles' and element.tag == 'user':
name = 'role'
elif k == 'endpoints' and element.tag == 'service':
name = 'endpoint'
elif k == 'values' and element.tag[-1] == 's':
# OS convention is to contain lists in a 'values' element,
# so the list itself can have attributes, which is
# unnecessary in XML
name = element.tag[:-1]
elif k[-1] == 's':
container = etree.Element(k)
element.append(container)
if k == 'policies':
# need to special-case policies since policie is not a word
name = 'policy'
else:
name = k[:-1]
else:
name = k
for item in v:
child = etree.Element(name)
self.populate_element(child, item)
container.append(child)
def _populate_dict(self, element, k, v):
"""Populates an element with a key & dictionary value."""
if k == 'links':
# links is a special dict
self._populate_links(element, v)
else:
child = etree.Element(k)
self.populate_element(child, v)
element.append(child)
def _populate_bool(self, element, k, v):
"""Populates an element with a key & boolean value."""
# booleans are 'true' and 'false'
element.set(k, six.text_type(v).lower())
def _populate_str(self, element, k, v):
"""Populates an element with a key & string value."""
if k in ['description']:
# always becomes an element
child = etree.Element(k)
child.text = six.text_type(v)
element.append(child)
else:
# add attributes to the current element
element.set(k, six.text_type(v))
def _populate_number(self, element, k, v):
"""Populates an element with a key & numeric value."""
# numbers can be handled as strings
self._populate_str(element, k, v)
def populate_element(self, element, value):
"""Populates an etree with the given value."""
if isinstance(value, list):
self._populate_sequence(element, value)
elif isinstance(value, dict):
self._populate_tree(element, value)
# NOTE(blk-u): For compatibility with Folsom, when serializing the
# v2.0 version element also add the links to the base element.
if value.get('id') == 'v2.0':
for item in value['links']:
child = etree.Element('link')
self.populate_element(child, item)
element.append(child)
elif isinstance(value, six.string_types):
element.text = six.text_type(value)
def _populate_sequence(self, element, l):
"""Populates an etree with a sequence of elements, given a list."""
# xsd compliance: child elements are singular: <users> has <user>s
name = element.tag
if element.tag[-1] == 's':
name = element.tag[:-1]
if name == 'policie':
name = 'policy'
for item in l:
child = etree.Element(name)
self.populate_element(child, item)
element.append(child)
def _populate_tree(self, element, d):
"""Populates an etree with attributes & elements, given a dict."""
for k, v in six.iteritems(d):
if isinstance(v, dict):
self._populate_dict(element, k, v)
elif isinstance(v, list):
self._populate_list(element, k, v)
elif isinstance(v, bool):
self._populate_bool(element, k, v)
elif isinstance(v, six.string_types):
self._populate_str(element, k, v)
elif type(v) in [int, float, long, complex]:
self._populate_number(element, k, v)
| reeshupatel/demo | keystone/common/serializer.py | Python | apache-2.0 | 13,041 | 0.000077 |
# -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Node for an OPENQASM custom gate statement.
"""
from ._node import Node
class CustomUnitary(Node):
"""Node for an OPENQASM custom gate statement.
children[0] is an id node.
children[1] is an exp_list (if len==3) or primary_list.
children[2], if present, is a primary_list.
Has properties:
.id = id node
.name = gate name string
.arguments = None or exp_list node
.bitlist = primary_list node
"""
def __init__(self, children):
"""Create the custom gate node."""
Node.__init__(self, 'custom_unitary', children, None)
self.id = children[0]
self.name = self.id.name
if len(children) == 3:
self.arguments = children[1]
self.bitlist = children[2]
else:
self.arguments = None
self.bitlist = children[1]
def qasm(self, prec=15):
"""Return the corresponding OPENQASM string."""
string = self.name
if self.arguments is not None:
string += "(" + self.arguments.qasm(prec) + ")"
string += " " + self.bitlist.qasm(prec) + ";"
return string
| ChristopheVuillot/qiskit-sdk-py | qiskit/qasm/_node/_customunitary.py | Python | apache-2.0 | 1,893 | 0 |
# -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import copy
import logging
import warnings
from hyperspy import components1d
from hyperspy._signals.eels import EELSSpectrum
from hyperspy.components1d import EELSCLEdge, PowerLaw
from hyperspy.docstrings.model import FIT_PARAMETERS_ARG
from hyperspy.models.model1d import Model1D
_logger = logging.getLogger(__name__)
class EELSModel(Model1D):
"""Build an EELS model
Parameters
----------
spectrum : a Signal1D (or any Signal1D subclass) instance
auto_background : bool
If True, and if spectrum is an EELS instance adds automatically
a powerlaw to the model and estimate the parameters by the
two-area method.
auto_add_edges : bool
If True, and if spectrum is an EELS instance, it will
automatically add the ionization edges as defined in the
Signal1D instance. Adding a new element to the spectrum using
the components.EELSSpectrum.add_elements method automatically
add the corresponding ionisation edges to the model.
ll : {None, EELSSpectrum}
If an EELSSPectrum is provided, it will be assumed that it is
a low-loss EELS spectrum, and it will be used to simulate the
effect of multiple scattering by convolving it with the EELS
spectrum.
GOS : {'hydrogenic', 'Hartree-Slater', None}
The GOS to use when auto adding core-loss EELS edges.
If None it will use the Hartree-Slater GOS if
they are available, otherwise it will use the hydrogenic GOS.
dictionary : {dict, None}
A dictionary to be used to recreate a model. Usually generated using
:meth:`hyperspy.model.as_dictionary`
"""
def __init__(self, signal1D, auto_background=True,
auto_add_edges=True, ll=None,
GOS=None, dictionary=None):
Model1D.__init__(self, signal1D)
# When automatically setting the fine structure energy regions,
# the fine structure of an EELS edge component is automatically
# disable if the next ionisation edge onset distance to the
# higher energy side of the fine structure region is lower that
# the value of this parameter
self._min_distance_between_edges_for_fine_structure = 0
self._preedge_safe_window_width = 2
self.signal1D = signal1D
self._suspend_auto_fine_structure_width = False
self.convolved = False
self.low_loss = ll
self.GOS = GOS
self.edges = []
self._background_components = []
if dictionary is not None:
auto_background = False
auto_add_edges = False
self._load_dictionary(dictionary)
if auto_background is True:
background = PowerLaw()
self.append(background)
if self.signal.subshells and auto_add_edges is True:
self._add_edges_from_subshells_names()
@property
def signal1D(self):
return self._signal
@signal1D.setter
def signal1D(self, value):
if isinstance(value, EELSSpectrum):
self._signal = value
else:
raise ValueError(
"This attribute can only contain an EELSSpectrum "
"but an object of type %s was provided" %
str(type(value)))
def append(self, component):
"""Append component to EELS model.
Parameters
----------
component
HyperSpy component1D object.
Raises
------
NotImplementedError
If the signal axis is a non-uniform axis.
"""
super(EELSModel, self).append(component)
if isinstance(component, EELSCLEdge):
# Test that signal axis is uniform
if not self.axes_manager[-1].is_uniform:
raise NotImplementedError("This operation is not yet implemented "
"for non-uniform energy axes")
tem = self.signal.metadata.Acquisition_instrument.TEM
component.set_microscope_parameters(
E0=tem.beam_energy,
alpha=tem.convergence_angle,
beta=tem.Detector.EELS.collection_angle,
energy_scale=self.axis.scale)
component.energy_scale = self.axis.scale
component._set_fine_structure_coeff()
self._classify_components()
append.__doc__ = Model1D.append.__doc__
def remove(self, component):
super(EELSModel, self).remove(component)
self._classify_components()
remove.__doc__ = Model1D.remove.__doc__
def _classify_components(self):
"""Classify components between background and ionization edge
components.
This method should be called everytime that components are added and
removed. An ionization edge becomes background when its onset falls to
the left of the first non-masked energy channel. The ionization edges
are stored in a list in the `edges` attribute. They are sorted by
increasing `onset_energy`. The background components are stored in
`_background_components`.
"""
self.edges = []
self._background_components = []
for component in self:
if isinstance(component, EELSCLEdge):
if component.onset_energy.value < \
self.axis.axis[self.channel_switches][0]:
component.isbackground = True
if component.isbackground is not True:
self.edges.append(component)
else:
component.fine_structure_active = False
component.fine_structure_coeff.free = False
elif (isinstance(component, PowerLaw) or
component.isbackground is True):
self._background_components.append(component)
if self.edges:
self.edges.sort(key=EELSCLEdge._onset_energy)
self.resolve_fine_structure()
if len(self._background_components) > 1:
self._backgroundtype = "mix"
elif len(self._background_components) == 1:
self._backgroundtype = \
self._background_components[0].__repr__()
bg = self._background_components[0]
if isinstance(bg, PowerLaw) and self.edges and not \
bg.A.map["is_set"].any():
self.two_area_background_estimation()
@property
def _active_edges(self):
return [edge for edge in self.edges if edge.active]
@property
def _active_background_components(self):
return [bc for bc in self._background_components if bc.active]
def _add_edges_from_subshells_names(self, e_shells=None):
"""Create the Edge instances and configure them appropiately
Parameters
----------
e_shells : list of strings
"""
if self.signal._are_microscope_parameters_missing():
raise ValueError(
"The required microscope parameters are not defined in "
"the EELS spectrum signal metadata. Use "
"``set_microscope_parameters`` to set them."
)
if e_shells is None:
e_shells = list(self.signal.subshells)
e_shells.sort()
master_edge = EELSCLEdge(e_shells.pop(), self.GOS)
# If self.GOS was None, the GOS is set by eels_cl_edge so
# we reassing the value of self.GOS
self.GOS = master_edge.GOS._name
self.append(master_edge)
element = master_edge.element
while len(e_shells) > 0:
next_element = e_shells[-1].split('_')[0]
if next_element != element:
# New master edge
self._add_edges_from_subshells_names(e_shells=e_shells)
elif self.GOS == 'hydrogenic':
# The hydrogenic GOS includes all the L subshells in one
# so we get rid of the others
e_shells.pop()
else:
# Add the other subshells of the same element
# and couple their intensity and onset_energy to that of the
# master edge
edge = EELSCLEdge(e_shells.pop(), GOS=self.GOS)
edge.intensity.twin = master_edge.intensity
edge.onset_energy.twin = master_edge.onset_energy
edge.onset_energy.twin_function_expr = "x + {}".format(
(edge.GOS.onset_energy - master_edge.GOS.onset_energy))
edge.free_onset_energy = False
self.append(edge)
def resolve_fine_structure(
self,
preedge_safe_window_width=2,
i1=0):
"""Adjust the fine structure of all edges to avoid overlapping
This function is called automatically everytime the position of an edge
changes
Parameters
----------
preedge_safe_window_width : float
minimum distance between the fine structure of an ionization edge
and that of the following one. Default 2 (eV).
"""
if self._suspend_auto_fine_structure_width is True:
return
if not self._active_edges:
return
while (self._active_edges[i1].fine_structure_active is False and
i1 < len(self._active_edges) - 1):
i1 += 1
if i1 < len(self._active_edges) - 1:
i2 = i1 + 1
while (self._active_edges[i2].fine_structure_active is False and
i2 < len(self._active_edges) - 1):
i2 += 1
if self._active_edges[i2].fine_structure_active is True:
distance_between_edges = (
self._active_edges[i2].onset_energy.value -
self._active_edges[i1].onset_energy.value)
if (self._active_edges[i1].fine_structure_width >
distance_between_edges -
self._preedge_safe_window_width):
min_d = self._min_distance_between_edges_for_fine_structure
if (distance_between_edges -
self._preedge_safe_window_width) <= min_d:
_logger.info((
"Automatically deactivating the fine structure "
"of edge number %d to avoid conflicts with edge "
"number %d") % (i2 + 1, i1 + 1))
self._active_edges[i2].fine_structure_active = False
self._active_edges[
i2].fine_structure_coeff.free = False
self.resolve_fine_structure(i1=i2)
else:
new_fine_structure_width = (
distance_between_edges -
self._preedge_safe_window_width)
_logger.info((
"Automatically changing the fine structure "
"width of edge %d from %s eV to %s eV to avoid "
"conflicts with edge number %d") % (
i1 + 1,
self._active_edges[i1].fine_structure_width,
new_fine_structure_width,
i2 + 1))
self._active_edges[i1].fine_structure_width = \
new_fine_structure_width
self.resolve_fine_structure(i1=i2)
else:
self.resolve_fine_structure(i1=i2)
else:
return
def fit(self, kind="std", **kwargs):
"""Fits the model to the experimental data.
Read more in the :ref:`User Guide <model.fitting>`.
Parameters
----------
kind : {"std", "smart"}, default "std"
If "std", performs standard fit. If "smart",
performs a smart_fit - for more details see
the :ref:`User Guide <eels.fitting>`.
%s
Returns
-------
None
See Also
--------
* :py:meth:`~hyperspy.model.BaseModel.fit`
* :py:meth:`~hyperspy.model.BaseModel.multifit`
* :py:meth:`~hyperspy.model.EELSModel.smart_fit`
"""
if kind not in ["smart", "std"]:
raise ValueError(
f"kind must be either 'std' or 'smart', not '{kind}'"
)
elif kind == "smart":
return self.smart_fit(**kwargs)
elif kind == "std":
return Model1D.fit(self, **kwargs)
fit.__doc__ %= FIT_PARAMETERS_ARG
def smart_fit(self, start_energy=None, **kwargs):
"""Fits EELS edges in a cascade style.
The fitting procedure acts in iterative manner along
the energy-loss-axis. First it fits only the background
up to the first edge. It continues by deactivating all
edges except the first one, then performs the fit. Then
it only activates the the first two, fits, and repeats
this until all edges are fitted simultanously.
Other, non-EELSCLEdge components, are never deactivated,
and fitted on every iteration.
Parameters
----------
start_energy : {float, None}
If float, limit the range of energies from the left to the
given value.
%s
See Also
--------
* :py:meth:`~hyperspy.model.BaseModel.fit`
* :py:meth:`~hyperspy.model.BaseModel.multifit`
* :py:meth:`~hyperspy.model.EELSModel.fit`
"""
# Fit background
self.fit_background(start_energy, **kwargs)
# Fit the edges
for i in range(0, len(self._active_edges)):
self._fit_edge(i, start_energy, **kwargs)
smart_fit.__doc__ %= FIT_PARAMETERS_ARG
def _get_first_ionization_edge_energy(self, start_energy=None):
"""Calculate the first ionization edge energy.
Returns
-------
iee : float or None
The first ionization edge energy or None if no edge is defined in
the model.
"""
if not self._active_edges:
return None
start_energy = self._get_start_energy(start_energy)
iee_list = [edge.onset_energy.value for edge in self._active_edges
if edge.onset_energy.value > start_energy]
iee = min(iee_list) if iee_list else None
return iee
def _get_start_energy(self, start_energy=None):
E0 = self.axis.axis[self.channel_switches][0]
if not start_energy or start_energy < E0:
start_energy = E0
return start_energy
def fit_background(self, start_energy=None, only_current=True, **kwargs):
"""Fit the background to the first active ionization edge
in the energy range.
Parameters
----------
start_energy : {float, None}, optional
If float, limit the range of energies from the left to the
given value. Default None.
only_current : bool, optional
If True, only fit the background at the current coordinates.
Default True.
**kwargs : extra key word arguments
All extra key word arguments are passed to fit or
multifit.
"""
# If there is no active background compenent do nothing
if not self._active_background_components:
return
iee = self._get_first_ionization_edge_energy(start_energy=start_energy)
if iee is not None:
to_disable = [edge for edge in self._active_edges
if edge.onset_energy.value >= iee]
E2 = iee - self._preedge_safe_window_width
self.disable_edges(to_disable)
else:
E2 = None
self.set_signal_range(start_energy, E2)
if only_current:
self.fit(**kwargs)
else:
self.multifit(**kwargs)
self.channel_switches = copy.copy(self.backup_channel_switches)
if iee is not None:
self.enable_edges(to_disable)
def two_area_background_estimation(self, E1=None, E2=None, powerlaw=None):
"""Estimates the parameters of a power law background with the two
area method.
Parameters
----------
E1 : float
E2 : float
powerlaw : PowerLaw component or None
If None, it will try to guess the right component from the
background components of the model
"""
if powerlaw is None:
for component in self._active_background_components:
if isinstance(component, components1d.PowerLaw):
if powerlaw is None:
powerlaw = component
else:
_logger.warning(
'There are more than two power law '
'background components defined in this model, '
'please use the powerlaw keyword to specify one'
' of them')
return
else: # No power law component
return
ea = self.axis.axis[self.channel_switches]
E1 = self._get_start_energy(E1)
if E2 is None:
E2 = self._get_first_ionization_edge_energy(start_energy=E1)
if E2 is None:
E2 = ea[-1]
else:
E2 = E2 - \
self._preedge_safe_window_width
if not powerlaw.estimate_parameters(
self.signal, E1, E2, only_current=False):
_logger.warning(
"The power law background parameters could not "
"be estimated.\n"
"Try choosing a different energy range for the estimation")
return
def _fit_edge(self, edgenumber, start_energy=None, **kwargs):
backup_channel_switches = self.channel_switches.copy()
ea = self.axis.axis[self.channel_switches]
if start_energy is None:
start_energy = ea[0]
# Declare variables
active_edges = self._active_edges
edge = active_edges[edgenumber]
if (edge.intensity.twin is not None or
edge.active is False or
edge.onset_energy.value < start_energy or
edge.onset_energy.value > ea[-1]):
return 1
# Fitting edge 'edge.name'
last_index = len(self._active_edges) - 1 # Last edge index
i = 1
twins = []
# find twins
while edgenumber + i <= last_index and (
active_edges[edgenumber + i].intensity.twin is not None or
active_edges[edgenumber + i].active is False):
if active_edges[edgenumber + i].intensity.twin is not None:
twins.append(self._active_edges[edgenumber + i])
i += 1
if (edgenumber + i) > last_index:
nextedgeenergy = ea[-1]
else:
nextedgeenergy = (
active_edges[edgenumber + i].onset_energy.value -
self._preedge_safe_window_width)
# Backup the fsstate
to_activate_fs = []
for edge_ in [edge, ] + twins:
if (edge_.fine_structure_active is True and
edge_.fine_structure_coeff.free is True):
to_activate_fs.append(edge_)
self.disable_fine_structure(to_activate_fs)
# Smart Fitting
# Without fine structure to determine onset_energy
edges_to_activate = []
for edge_ in self._active_edges[edgenumber + 1:]:
if (edge_.active is True and
edge_.onset_energy.value >= nextedgeenergy):
edge_.active = False
edges_to_activate.append(edge_)
self.set_signal_range(start_energy, nextedgeenergy)
if edge.free_onset_energy is True:
edge.onset_energy.free = True
self.fit(**kwargs)
edge.onset_energy.free = False
_logger.info("onset_energy = %s", edge.onset_energy.value)
self._classify_components()
elif edge.intensity.free is True:
self.enable_fine_structure(to_activate_fs)
self.remove_fine_structure_data(to_activate_fs)
self.disable_fine_structure(to_activate_fs)
self.fit(**kwargs)
if len(to_activate_fs) > 0:
self.set_signal_range(start_energy, nextedgeenergy)
self.enable_fine_structure(to_activate_fs)
self.fit(**kwargs)
self.enable_edges(edges_to_activate)
# Recover the channel_switches. Remove it or make it smarter.
self.channel_switches = backup_channel_switches
def quantify(self):
"""Prints the value of the intensity of all the independent
active EELS core loss edges defined in the model
"""
elements = {}
for edge in self._active_edges:
if edge.active and edge.intensity.twin is None:
element = edge.element
subshell = edge.subshell
if element not in elements:
elements[element] = {}
elements[element][subshell] = edge.intensity.value
print()
print("Absolute quantification:")
print("Elem.\tIntensity")
for element in elements:
if len(elements[element]) == 1:
for subshell in elements[element]:
print("%s\t%f" % (
element, elements[element][subshell]))
else:
for subshell in elements[element]:
print("%s_%s\t%f" % (element, subshell,
elements[element][subshell]))
def remove_fine_structure_data(self, edges_list=None):
"""Remove the fine structure data from the fitting routine as
defined in the fine_structure_width parameter of the
component.EELSCLEdge
Parameters
----------
edges_list : None or list of EELSCLEdge or list of edge names
If None, the operation is performed on all the edges in the model.
Otherwise, it will be performed only on the listed components.
See Also
--------
enable_edges, disable_edges, enable_background,
disable_background, enable_fine_structure,
disable_fine_structure, set_all_edges_intensities_positive,
unset_all_edges_intensities_positive, enable_free_onset_energy,
disable_free_onset_energy, fix_edges, free_edges, fix_fine_structure,
free_fine_structure
"""
if edges_list is None:
edges_list = self._active_edges
else:
edges_list = [self._get_component(x) for x in edges_list]
for edge in edges_list:
if (edge.isbackground is False and
edge.fine_structure_active is True):
start = edge.onset_energy.value
stop = start + edge.fine_structure_width
self.remove_signal_range(start, stop)
def enable_edges(self, edges_list=None):
"""Enable the edges listed in edges_list. If edges_list is
None (default) all the edges with onset in the spectrum energy
region will be enabled.
Parameters
----------
edges_list : None or list of EELSCLEdge or list of edge names
If None, the operation is performed on all the edges in the model.
Otherwise, it will be performed only on the listed components.
See Also
--------
enable_edges, disable_edges, enable_background,
disable_background, enable_fine_structure,
disable_fine_structure, set_all_edges_intensities_positive,
unset_all_edges_intensities_positive, enable_free_onset_energy,
disable_free_onset_energy, fix_edges, free_edges, fix_fine_structure,
free_fine_structure
"""
if edges_list is None:
edges_list = self.edges
else:
edges_list = [self._get_component(x) for x in edges_list]
for edge in edges_list:
if edge.isbackground is False:
edge.active = True
self.resolve_fine_structure()
def disable_edges(self, edges_list=None):
"""Disable the edges listed in edges_list. If edges_list is None (default)
all the edges with onset in the spectrum energy region will be
disabled.
Parameters
----------
edges_list : None or list of EELSCLEdge or list of edge names
If None, the operation is performed on all the edges in the model.
Otherwise, it will be performed only on the listed components.
See Also
--------
enable_edges, disable_edges, enable_background,
disable_background, enable_fine_structure,
disable_fine_structure, set_all_edges_intensities_positive,
unset_all_edges_intensities_positive, enable_free_onset_energy,
disable_free_onset_energy, fix_edges, free_edges, fix_fine_structure,
free_fine_structure
"""
if edges_list is None:
edges_list = self._active_edges
else:
edges_list = [self._get_component(x) for x in edges_list]
for edge in edges_list:
if edge.isbackground is False:
edge.active = False
self.resolve_fine_structure()
def enable_background(self):
"""Enable the background componets.
"""
for component in self._background_components:
component.active = True
def disable_background(self):
"""Disable the background components.
"""
for component in self._active_background_components:
component.active = False
def enable_fine_structure(self, edges_list=None):
"""Enable the fine structure of the edges listed in edges_list.
If edges_list is None (default) the fine structure of all the edges
with onset in the spectrum energy region will be enabled.
Parameters
----------
edges_list : None or list of EELSCLEdge or list of edge names
If None, the operation is performed on all the edges in the model.
Otherwise, it will be performed only on the listed components.
See Also
--------
enable_edges, disable_edges, enable_background,
disable_background, enable_fine_structure,
disable_fine_structure, set_all_edges_intensities_positive,
unset_all_edges_intensities_positive, enable_free_onset_energy,
disable_free_onset_energy, fix_edges, free_edges, fix_fine_structure,
free_fine_structure
"""
if edges_list is None:
edges_list = self._active_edges
else:
edges_list = [self._get_component(x) for x in edges_list]
for edge in edges_list:
if edge.isbackground is False:
edge.fine_structure_active = True
edge.fine_structure_coeff.free = True
self.resolve_fine_structure()
def disable_fine_structure(self, edges_list=None):
"""Disable the fine structure of the edges listed in edges_list.
If edges_list is None (default) the fine structure of all the edges
with onset in the spectrum energy region will be disabled.
Parameters
----------
edges_list : None or list of EELSCLEdge or list of edge names
If None, the operation is performed on all the edges in the model.
Otherwise, it will be performed only on the listed components.
See Also
--------
enable_edges, disable_edges, enable_background,
disable_background, enable_fine_structure,
disable_fine_structure, set_all_edges_intensities_positive,
unset_all_edges_intensities_positive, enable_free_onset_energy,
disable_free_onset_energy, fix_edges, free_edges, fix_fine_structure,
free_fine_structure
"""
if edges_list is None:
edges_list = self._active_edges
else:
edges_list = [self._get_component(x) for x in edges_list]
for edge in edges_list:
if edge.isbackground is False:
edge.fine_structure_active = False
edge.fine_structure_coeff.free = False
self.resolve_fine_structure()
def set_all_edges_intensities_positive(self):
for edge in self._active_edges:
edge.intensity.ext_force_positive = True
edge.intensity.ext_bounded = True
def unset_all_edges_intensities_positive(self):
for edge in self._active_edges:
edge.intensity.ext_force_positive = False
edge.intensity.ext_bounded = False
def enable_free_onset_energy(self, edges_list=None):
"""Enable the automatic freeing of the onset_energy parameter during a
smart fit for the edges listed in edges_list.
If edges_list is None (default) the onset_energy of all the edges
with onset in the spectrum energy region will be freeed.
Parameters
----------
edges_list : None or list of EELSCLEdge or list of edge names
If None, the operation is performed on all the edges in the model.
Otherwise, it will be performed only on the listed components.
See Also
--------
enable_edges, disable_edges, enable_background,
disable_background, enable_fine_structure,
disable_fine_structure, set_all_edges_intensities_positive,
unset_all_edges_intensities_positive, enable_free_onset_energy,
disable_free_onset_energy, fix_edges, free_edges, fix_fine_structure,
free_fine_structure
"""
if edges_list is None:
edges_list = self._active_edges
else:
edges_list = [self._get_component(x) for x in edges_list]
for edge in edges_list:
if edge.isbackground is False:
edge.free_onset_energy = True
def disable_free_onset_energy(self, edges_list=None):
"""Disable the automatic freeing of the onset_energy parameter during a
smart fit for the edges listed in edges_list.
If edges_list is None (default) the onset_energy of all the edges
with onset in the spectrum energy region will not be freed.
Note that if their atribute edge.onset_energy.free is True, the
parameter will be free during the smart fit.
Parameters
----------
edges_list : None or list of EELSCLEdge or list of edge names
If None, the operation is performed on all the edges in the model.
Otherwise, it will be performed only on the listed components.
See Also
--------
enable_edges, disable_edges, enable_background,
disable_background, enable_fine_structure,
disable_fine_structure, set_all_edges_intensities_positive,
unset_all_edges_intensities_positive, enable_free_onset_energy,
disable_free_onset_energy, fix_edges, free_edges, fix_fine_structure,
free_fine_structure
"""
if edges_list is None:
edges_list = self._active_edges
else:
edges_list = [self._get_component(x) for x in edges_list]
for edge in edges_list:
if edge.isbackground is False:
edge.free_onset_energy = True
def fix_edges(self, edges_list=None):
"""Fixes all the parameters of the edges given in edges_list.
If edges_list is None (default) all the edges will be fixed.
Parameters
----------
edges_list : None or list of EELSCLEdge or list of edge names
If None, the operation is performed on all the edges in the model.
Otherwise, it will be performed only on the listed components.
See Also
--------
enable_edges, disable_edges, enable_background,
disable_background, enable_fine_structure,
disable_fine_structure, set_all_edges_intensities_positive,
unset_all_edges_intensities_positive, enable_free_onset_energy,
disable_free_onset_energy, fix_edges, free_edges, fix_fine_structure,
free_fine_structure
"""
if edges_list is None:
edges_list = self._active_edges
else:
edges_list = [self._get_component(x) for x in edges_list]
for edge in edges_list:
if edge.isbackground is False:
edge.intensity.free = False
edge.onset_energy.free = False
edge.fine_structure_coeff.free = False
def free_edges(self, edges_list=None):
"""Frees all the parameters of the edges given in edges_list.
If edges_list is None (default) all the edges will be freeed.
Parameters
----------
edges_list : None or list of EELSCLEdge or list of edge names
If None, the operation is performed on all the edges in the model.
Otherwise, it will be performed only on the listed components.
See Also
--------
enable_edges, disable_edges, enable_background,
disable_background, enable_fine_structure,
disable_fine_structure, set_all_edges_intensities_positive,
unset_all_edges_intensities_positive, enable_free_onset_energy,
disable_free_onset_energy, fix_edges, free_edges, fix_fine_structure,
free_fine_structure
"""
if edges_list is None:
edges_list = self._active_edges
else:
edges_list = [self._get_component(x) for x in edges_list]
for edge in edges_list:
if edge.isbackground is False:
edge.intensity.free = True
def fix_fine_structure(self, edges_list=None):
"""Fixes all the parameters of the edges given in edges_list.
If edges_list is None (default) all the edges will be fixed.
Parameters
----------
edges_list : None or list of EELSCLEdge or list of edge names
If None, the operation is performed on all the edges in the model.
Otherwise, it will be performed only on the listed components.
See Also
--------
enable_edges, disable_edges, enable_background,
disable_background, enable_fine_structure,
disable_fine_structure, set_all_edges_intensities_positive,
unset_all_edges_intensities_positive, enable_free_onset_energy,
disable_free_onset_energy, fix_edges, free_edges, fix_fine_structure,
free_fine_structure
"""
if edges_list is None:
edges_list = self._active_edges
else:
edges_list = [self._get_component(x) for x in edges_list]
for edge in edges_list:
if edge.isbackground is False:
edge.fine_structure_coeff.free = False
def free_fine_structure(self, edges_list=None):
"""Frees all the parameters of the edges given in edges_list.
If edges_list is None (default) all the edges will be freeed.
Parameters
----------
edges_list : None or list of EELSCLEdge or list of edge names
If None, the operation is performed on all the edges in the model.
Otherwise, it will be performed only on the listed components.
See Also
--------
enable_edges, disable_edges, enable_background,
disable_background, enable_fine_structure,
disable_fine_structure, set_all_edges_intensities_positive,
unset_all_edges_intensities_positive, enable_free_onset_energy,
disable_free_onset_energy, fix_edges, free_edges, fix_fine_structure,
free_fine_structure
"""
if edges_list is None:
edges_list = self._active_edges
else:
edges_list = [self._get_component(x) for x in edges_list]
for edge in edges_list:
if edge.isbackground is False:
edge.fine_structure_coeff.free = True
def suspend_auto_fine_structure_width(self):
"""Disable the automatic adjustament of the core-loss edges fine
structure width.
See Also
--------
resume_auto_fine_structure_width
"""
if self._suspend_auto_fine_structure_width is False:
self._suspend_auto_fine_structure_width = True
else:
warnings.warn("Already suspended, does nothing.")
def resume_auto_fine_structure_width(self, update=True):
"""Enable the automatic adjustament of the core-loss edges fine
structure width.
Parameters
----------
update : bool, optional
If True, also execute the automatic adjustment (default).
See Also
--------
suspend_auto_fine_structure_width
"""
if self._suspend_auto_fine_structure_width is True:
self._suspend_auto_fine_structure_width = False
if update is True:
self.resolve_fine_structure()
else:
warnings.warn("Not suspended, nothing to resume.")
| thomasaarholt/hyperspy | hyperspy/models/eelsmodel.py | Python | gpl-3.0 | 38,199 | 0.000052 |
from .util import bucket as tokenbucket
from . import wrappers
class Limit(object):
def __init__(self, command_limiting_initial_tokens, command_limiting_message_cost, command_limiting_restore_rate, override, permissions):
"""limit(20, 4, 0.13, ["admin"], {"admin": "user!*@*"})
Limits the use of commands
Arguments:
command_limiting_initial_tokens {Integer} -- Initial tokens for tokenbucket
command_limiting_message_cost {Integer} -- Message cost for tokenbucket
command_limiting_restore_rate {Integer} -- Restore rate for token bucket
override {List} -- List of permissions to override the limit
permissions {Dict} -- All of the bots permissions.
"""
self.command_limiting_initial_tokens = command_limiting_initial_tokens
self.command_limiting_message_cost = command_limiting_message_cost
self.command_limiting_restore_rate = command_limiting_restore_rate
self.buckets = {}
self.permissions = wrappers.permissions_class(permissions)
self.override = override
def command_limiter(self, info):
#Check if admin/whatever specified
if self.permissions.check(self.override, info.mask):
return True
if info.nick not in self.buckets:
bucket = tokenbucket.TokenBucket(self.command_limiting_initial_tokens, self.command_limiting_restore_rate)
self.buckets[info.nick] = bucket
else:
bucket = self.buckets[info.nick]
if bucket.consume(self.command_limiting_message_cost):
return True
return False
| IndigoTiger/ezzybot | ezzybot/limit.py | Python | gpl-3.0 | 1,649 | 0.004245 |
'''winnow/values.py
vivify and normalize each of the different field types:
- string
- collection (values are strings, left operand is collection)
- numeric
- bool
- date
To vivify is to turn from a string representation into a
live object. So for '2014-01-21T16:34:02', we would make a
datetime object. Vivify functions should also accept their
return type. So vivify_absolute_date(datetime.datetime.now())
should just return the datetime object.
To stringify is to serialize. This would be like turning the
list [1, 2, 3] into the JSON string "[1,2,3]"
'''
from __future__ import unicode_literals
import json
from datetime import datetime
from dateutil.parser import parse as parse_date
from six import string_types
from .error import WinnowError
from .relative_dates import valid_rel_date_values
# TODO : Since we're storing filters denormalized as JSON now, we probably need
# Less of this crazy vivification stuff. For another day, perhaps.
def stringify_string(value):
return str(value)
def stringify_collection(value):
return json.dumps(value)
stringify_single_choice = json.dumps
stringify_bool = str
def stringify_numeric(value):
if isinstance(value, float):
return '{:.10f}'.format(value)
return str(value)
stringify_absolute_date = datetime.isoformat
def vivify_string(value): # request for comment -- tighter check on this?
return str(value)
def vivify_collection(value):
try:
if not isinstance(value, list):
value = json.loads(value)
assert isinstance(value, list), "collection values must be lists"
assert all(isinstance(v, (dict, string_types)) for v in value), "elements of collection must be dicts (or strings, for backwards compat)"
if value and isinstance(value[0], dict): # backwards compat check.
value = [v['id'] for v in value]
return value
except (ValueError, AssertionError) as e:
raise WinnowError(e)
def vivify_single_choice(value):
try:
if not isinstance(value, dict):
value = json.loads(value)
assert isinstance(value, dict), "single choice values must be a dict"
assert 'id' in value and 'name' in value, "Choice must have keys for 'name' and 'id'"
return value
except (ValueError, AssertionError) as e:
raise WinnowError(e)
def vivify_numeric(value):
if value == '':
return 0
if isinstance(value, (float, int)):
return value
try:
return int(value)
except ValueError:
pass # int is more restrictive -- let's not get hasty
# and reject before we see if it's a float.
try:
return float(value)
except ValueError as e:
raise WinnowError(e)
def vivify_relative_date(value):
if value.lower().replace(' ', '_') in valid_rel_date_values:
return value.lower().replace(' ', '_')
raise WinnowError("Invalid relative date value: '{}'".format(value))
stringify_relative_date = vivify_relative_date
def vivify_absolute_date(value):
try:
return parse_date(value)
except TypeError:
raise WinnowError("invalid literal for date range: '{}'".format(value))
def vivify_bool(value):
if isinstance(value, string_types) and value.lower() in ('true', 'false'):
return value.lower() == 'true'
else:
assert isinstance(value, bool), "expected boolean or string. received '{}'".format(value)
return value
| bgschiller/winnow | winnow/values.py | Python | mit | 3,473 | 0.004319 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 0, transform = "Quantization", sigma = 0.0, exog_count = 20, ar_order = 0); | antoinecarme/pyaf | tests/artificial/transf_Quantization/trend_MovingAverage/cycle_0/ar_/test_artificial_1024_Quantization_MovingAverage_0__20.py | Python | bsd-3-clause | 272 | 0.084559 |
#!/usr/bin/env python
"""
poncho.common.utils : Utility Functions
"""
from datetime import datetime
def readable_datetime(dt):
"""Turn a datetime into something readable, with time since or until."""
if dt is None:
return ""
dt = dt.replace(microsecond=0)
now = datetime.now().replace(microsecond=0)
low = min(dt, now)
hi = max(dt, now)
delta = hi - low
relative_times = [
('year', delta.days // 365),
('month', delta.days // 30),
('week', delta.days // 7),
('day', delta.days),
('hour', delta.seconds // 60 // 60 % 24),
('min', delta.seconds // 60 % 60),
('sec', delta.seconds % 60),
]
modifier = "from now"
if dt < now:
modifier = "ago"
two_sizes = []
for name,ammount in relative_times:
if len(two_sizes) == 2:
break
if ammount > 0:
name += "s" if ammount != 1 else ""
two_sizes.append("%s %s" % (ammount, name))
if len(two_sizes):
return "%s (%s %s)" % (dt, ", ".join(two_sizes), modifier)
return "%s (right now)" % (dt)
| magellancloud/poncho | poncho/common/utils.py | Python | bsd-3-clause | 1,121 | 0.00446 |
# AUTOGENERATED FILE - DO NOT MODIFY!
# This file generated by Djinni from map.djinni
from djinni.support import MultiSet # default imported in all files
from djinni.exception import CPyException # default imported in all files
from djinni.pycffi_marshal import CPyObject, CPyObjectProxy, CPyPrimitive, CPyRecord, CPyString
from dh__map_int32_t_int32_t import MapInt32TInt32THelper
from dh__map_int32_t_int32_t import MapInt32TInt32TProxy
from dh__map_string_int64_t import MapStringInt64THelper
from dh__map_string_int64_t import MapStringInt64TProxy
from PyCFFIlib_cffi import ffi, lib
from djinni import exception # this forces run of __init__.py which gives cpp option to call back into py to create exception
class MapRecord:
c_data_set = MultiSet()
@staticmethod
def check_c_data_set_empty():
assert len(MapRecord.c_data_set) == 0
MapStringInt64THelper.check_c_data_set_empty()
MapInt32TInt32THelper.check_c_data_set_empty()
def __init__(self, map, imap):
self.map = map
self.imap = imap
| trafi/djinni | test-suite/generated-src/python/map_record.py | Python | apache-2.0 | 1,058 | 0.007561 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
__all__ = [
'prefix_dict_keys'
]
def prefix_dict_keys(dictionary, prefix='_'):
"""
Prefix dictionary keys with a provided prefix.
:param dictionary: Dictionary whose keys to prefix.
:type dictionary: ``dict``
:param prefix: Key prefix.
:type prefix: ``str``
:rtype: ``dict``:
"""
result = {}
for key, value in six.iteritems(dictionary):
result['%s%s' % (prefix, key)] = value
return result
| jtopjian/st2 | st2common/st2common/util/misc.py | Python | apache-2.0 | 1,245 | 0 |
#!/home/ela/Python_Django/coworkok/coworkok/bin/python
#
# The Python Imaging Library.
# $Id$
#
# convert image files
#
# History:
# 0.1 96-04-20 fl Created
# 0.2 96-10-04 fl Use draft mode when converting images
# 0.3 96-12-30 fl Optimize output (PNG, JPEG)
# 0.4 97-01-18 fl Made optimize an option (PNG, JPEG)
# 0.5 98-12-30 fl Fixed -f option (from Anthony Baxter)
#
from __future__ import print_function
import getopt, string, sys
from PIL import Image
def usage():
print("PIL Convert 0.5/1998-12-30 -- convert image files")
print("Usage: pilconvert [option] infile outfile")
print()
print("Options:")
print()
print(" -c <format> convert to format (default is given by extension)")
print()
print(" -g convert to greyscale")
print(" -p convert to palette image (using standard palette)")
print(" -r convert to rgb")
print()
print(" -o optimize output (trade speed for size)")
print(" -q <value> set compression quality (0-100, JPEG only)")
print()
print(" -f list supported file formats")
sys.exit(1)
if len(sys.argv) == 1:
usage()
try:
opt, argv = getopt.getopt(sys.argv[1:], "c:dfgopq:r")
except getopt.error as v:
print(v)
sys.exit(1)
format = None
convert = None
options = { }
for o, a in opt:
if o == "-f":
Image.init()
id = sorted(Image.ID)
print("Supported formats (* indicates output format):")
for i in id:
if i in Image.SAVE:
print(i+"*", end=' ')
else:
print(i, end=' ')
sys.exit(1)
elif o == "-c":
format = a
if o == "-g":
convert = "L"
elif o == "-p":
convert = "P"
elif o == "-r":
convert = "RGB"
elif o == "-o":
options["optimize"] = 1
elif o == "-q":
options["quality"] = string.atoi(a)
if len(argv) != 2:
usage()
try:
im = Image.open(argv[0])
if convert and im.mode != convert:
im.draft(convert, im.size)
im = im.convert(convert)
if format:
im.save(argv[1], format, **options)
else:
im.save(argv[1], **options)
except:
print("cannot convert image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
| elatomczyk/dook | coworkok/bin/pilconvert.py | Python | gpl-3.0 | 2,354 | 0.002124 |
# $Id: 969e4c5fd51bb174563d06c1357489c2742813ec $
"""
Base classes for enhanced DB drivers.
"""
from __future__ import absolute_import
__docformat__ = "restructuredtext en"
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import re
import time
import os
import sys
from datetime import date, datetime
from collections import namedtuple
from grizzled.exception import ExceptionWithMessage
from grizzled.decorators import abstract
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
__all__ = ['DBDriver', 'DB', 'Cursor', 'DBError', 'Error', 'Warning',
'TableMetadata', 'IndexMetadata', 'RDBMSMetadata']
# ---------------------------------------------------------------------------
# Globals
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class DBError(ExceptionWithMessage):
"""
Base class for all DB exceptions.
"""
pass
class Error(DBError):
"""Thrown to indicate an error in the ``db`` module."""
pass
class Warning(DBError):
"""Thrown to indicate an error in the ``db`` module."""
pass
TableMetadata = namedtuple('TableMetadata', ['column_name',
'type_string',
'max_char_size',
'precision',
'scale',
'nullable'])
IndexMetadata = namedtuple('IndexMetadata', ['index_name',
'index_columns',
'description'])
RDBMSMetadata = namedtuple('RDBMSMetadata', ['vendor', 'product', 'version'])
class Cursor(object):
"""
Class for DB cursors returned by the ``DB.cursor()`` method. This class
conforms to the Python DB cursor interface, including the following
attributes.
:IVariables:
description : tuple
A read-only attribute that is a sequence of 7-item tuples, one per
column, from the last query executed. The tuple values are:
*(name, typecode, displaysize, internalsize, precision, scale)*
rowcount : int
A read-only attribute that specifies the number of rows
fetched in the last query, or -1 if unknown. *Note*: It's best
not to rely on the row count, because some database drivers
(such as SQLite) don't report valid row counts.
"""
def __init__(self, cursor, driver):
"""
Create a new Cursor object, wrapping the underlying real DB API
cursor.
:Parameters:
cursor
the real DB API cursor object
driver
the driver that is creating this object
"""
self.__cursor = cursor
self.__driver = driver
self.__description = None
self.__rowcount = -1
def __get_description(self):
return self.__description
description = property(__get_description,
doc='The description field. See class docs.')
def __get_rowcount(self):
return self.__rowcount
rowcount = property(__get_rowcount,
doc='Number of rows from last query, or -1')
def close(self):
"""
Close the cursor.
:raise Warning: Non-fatal warning
:raise Error: Error; unable to close
"""
dbi = self.__driver.get_import()
try:
return self.__cursor.close()
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
def execute(self, statement, parameters=None):
"""
Execute a SQL statement string with the given parameters.
'parameters' is a sequence when the parameter style is
'format', 'numeric' or 'qmark', and a dictionary when the
style is 'pyformat' or 'named'. See ``DB.paramstyle()``.
:Parameters:
statement : str
the SQL statement to execute
parameters : list
parameters to use, if the statement is parameterized
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
if parameters:
result = self.__cursor.execute(statement, parameters)
else:
result = self.__cursor.execute(statement)
try:
self.__rowcount = self.__cursor.rowcount
except AttributeError:
self.__rowcount = -1
self.__description = self.__cursor.description
return result
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
except:
raise Error(sys.exc_info()[1])
def executemany(self, statement, *parameters):
"""
Execute a SQL statement once for each item in the given parameters.
:Parameters:
statement : str
the SQL statement to execute
parameters : sequence
a sequence of sequences when the parameter style
is 'format', 'numeric' or 'qmark', and a sequence
of dictionaries when the style is 'pyformat' or
'named'.
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
result = self.__cursor.executemany(statement, *parameters)
self.__rowcount = self.__cursor.rowcount
self.__description = self.__cursor.description
return result
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
executeMany = executemany
def fetchone(self):
"""
Returns the next result set row from the last query, as a sequence
of tuples. Raises an exception if the last statement was not a query.
:rtype: tuple
:return: Next result set row
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
return self.__cursor.fetchone()
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
def fetchall(self):
"""
Returns all remaining result rows from the last query, as a sequence
of tuples. Raises an exception if the last statement was not a query.
:rtype: list of tuples
:return: List of rows, each represented as a tuple
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
return self.__cursor.fetchall()
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
fetchAll = fetchall
def fetchmany(self, n):
"""
Returns up to n remaining result rows from the last query, as a
sequence of tuples. Raises an exception if the last statement was
not a query.
:Parameters:
n : int
maximum number of result rows to get
:rtype: list of tuples
:return: List of rows, each represented as a tuple
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
self.__cursor.fetchmany(n)
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
fetchMany = fetchmany
def get_rdbms_metadata(self):
"""
Return data about the RDBMS: the product name, the version,
etc. The result is a named tuple, with the following fields:
vendor
The product vendor, if applicable, or ``None`` if not known
product
The name of the database product, or ``None`` if not known
version
The database product version, or ``None`` if not known
The fields may be accessed by position or name. This method
just calls through to the equivalent method in the underlying
``DBDriver`` implementation.
:rtype: named tuple
:return: the vendor information
"""
# Default implementation
dbi = self.__driver.get_import()
try:
return self.__driver.get_rdbms_metadata(self.__cursor)
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
def get_table_metadata(self, table):
"""
Get the metadata for a table. Returns a list of tuples, one for
each column. Each tuple consists of the following::
(column_name, type_string, max_char_size, precision, scale, nullable)
The tuple elements have the following meanings.
column_name
the name of the column
type_string
the column type, as a string
max_char_size
the maximum size for a character field, or ``None``
precision
the precision, for a numeric field; or ``None``
scale
the scale, for a numeric field; or ``None``
nullable
True if the column is nullable, False if it is not
The tuples are named tuples, so the fields may be referenced by the
names above or by position.
The data may come from the DB API's ``cursor.description`` field, or
it may be richer, coming from a direct SELECT against
database-specific tables.
:rtype: list
:return: list of tuples, as described above
:raise Warning: Non-fatal warning
:raise Error: Error
"""
# Default implementation
dbi = self.__driver.get_import()
try:
return self.__driver.get_table_metadata(table, self.__cursor)
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
def get_index_metadata(self, table):
"""
Get the metadata for the indexes for a table. Returns a list of
tuples, one for each index. Each tuple consists of the following::
(index_name, [index_columns], description)
The tuple elements have the following meanings.
index_name
the index name
index_columns
a list of column names
description
index description, or ``None``
The tuples are named tuples, so the fields may be referenced by the
names above or by position.
:rtype: list of tuples
:return: the list of tuples, or ``None`` if not supported in the
underlying database
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
return self.__driver.get_index_metadata(table, self.__cursor)
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
def get_tables(self):
"""
Get the list of tables in the database to which this cursor is
connected.
:rtype: list
:return: List of table names. The list will be empty if the database
contains no tables.
:raise NotImplementedError: Capability not supported by database driver
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
return self.__driver.get_tables(self.__cursor)
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
class DB(object):
"""
The object returned by a call to ``DBDriver.connect()``. ``db`` wraps the
real database object returned by the underlying Python DB API module's
``connect()`` method.
"""
def __init__(self, db, driver):
"""
Create a new DB object.
:Parameters:
db
the underlying Python DB API database object
driver : DBDriver
the driver (i.e., the subclass of ``DBDriver``) that
created the ``db`` object
"""
self.__db = db
self.__driver = driver
dbi = driver.get_import()
for attr in ['BINARY', 'NUMBER', 'STRING', 'DATETIME', 'ROWID']:
try:
exec 'self.%s = dbi.%s' % (attr, attr)
except AttributeError:
exec 'self.%s = 0' % attr
def paramstyle(self):
"""
Get the parameter style for the underlying DB API module. The
result of this method call corresponds exactly to the underlying
DB API module's 'paramstyle' attribute. It will have one of the
following values:
+----------+-----------------------------------------------------------+
| format | The parameter marker is '%s', as in string |
| | formatting. A query looks like this:: |
| | |
| | c.execute('SELECT * FROM Foo WHERE Bar=%s', [x]) |
+----------+-----------------------------------------------------------+
| named | The parameter marker is ``:name``, and parameters |
| | are named. A query looks like this:: |
| | |
| | c.execute('SELECT * FROM Foo WHERE Bar=:x', {'x':x}) |
+----------+-----------------------------------------------------------+
| numeric | The parameter marker is ``:n``, giving the parameter's |
| | number (starting at 1). A query looks like this:: |
| | |
| | c.execute('SELECT * FROM Foo WHERE Bar=:1', [x]) |
+----------+-----------------------------------------------------------+
| pyformat | The parameter marker is ``:name``, and parameters |
| | are named. A query looks like this:: |
| | |
| | c.execute('SELECT * FROM Foo WHERE Bar=%(x)s', {'x':x}) |
+----------+-----------------------------------------------------------+
| qmark | The parameter marker is "?", and parameters are |
| | substituted in order. A query looks like this:: |
| | |
| | c.execute('SELECT * FROM Foo WHERE Bar=?', [x]) |
+----------+-----------------------------------------------------------+
"""
return self.__driver.get_import().paramstyle
def Binary(self, string):
"""
Returns an object representing the given string of bytes as a BLOB.
This method is equivalent to the module-level ``Binary()`` method in
an underlying DB API-compliant module.
:Parameters:
string : str
the string to convert to a BLOB
:rtype: object
:return: the corresponding BLOB
"""
return self.__driver.get_import().Binary(string)
def Date(self, year, month, day):
"""
Returns an object representing the specified date.
This method is equivalent to the module-level ``Date()`` method in
an underlying DB API-compliant module.
:Parameters:
year
the year
month
the month
day
the day of the month
:return: an object containing the date
"""
return self.__driver.get_import().Date(year, month, day)
def DateFromTicks(self, secs):
"""
Returns an object representing the date *secs* seconds after the
epoch. For example:
.. python::
import time
d = db.DateFromTicks(time.time())
This method is equivalent to the module-level ``DateFromTicks()``
method in an underlying DB API-compliant module.
:Parameters:
secs : int
the seconds from the epoch
:return: an object containing the date
"""
date = date.fromtimestamp(secs)
return self.__driver.get_import().Date(date.year, date.month, date.day)
def Time(self, hour, minute, second):
"""
Returns an object representing the specified time.
This method is equivalent to the module-level ``Time()`` method in an
underlying DB API-compliant module.
:Parameters:
hour
the hour of the day
minute
the minute within the hour. 0 <= *minute* <= 59
second
the second within the minute. 0 <= *second* <= 59
:return: an object containing the time
"""
dt = datetime.fromtimestamp(secs)
return self.__driver.get_import().Time(dt.hour, dt.minute, dt.second)
def TimeFromTicks(self, secs):
"""
Returns an object representing the time 'secs' seconds after the
epoch. For example:
.. python::
import time
d = db.TimeFromTicks(time.time())
This method is equivalent to the module-level ``TimeFromTicks()``
method in an underlying DB API-compliant module.
:Parameters:
secs : int
the seconds from the epoch
:return: an object containing the time
"""
dt = datetime.fromtimestamp(secs)
return self.__driver.get_import().Time(dt.hour, dt.minute, dt.second)
def Timestamp(self, year, month, day, hour, minute, second):
"""
Returns an object representing the specified time.
This method is equivalent to the module-level ``Timestamp()`` method
in an underlying DB API-compliant module.
:Parameters:
year
the year
month
the month
day
the day of the month
hour
the hour of the day
minute
the minute within the hour. 0 <= *minute* <= 59
second
the second within the minute. 0 <= *second* <= 59
:return: an object containing the timestamp
"""
return self.__driver.get_import().Timestamp(year, month, day,
hour, minute, second)
def TimestampFromTicks(self, secs):
"""
Returns an object representing the date and time ``secs`` seconds
after the epoch. For example:
.. python::
import time
d = db.TimestampFromTicks(time.time())
This method is equivalent to the module-level ``TimestampFromTicks()``
method in an underlying DB API-compliant module.
:Parameters:
secs : int
the seconds from the epoch
:return: an object containing the timestamp
"""
dt = datetime.now()
return self.__driver.get_import().Timestamp(dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second)
def cursor(self):
"""
Get a cursor suitable for accessing the database. The returned object
conforms to the Python DB API cursor interface.
:return: the cursor
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
return Cursor(self.__db.cursor(), self.__driver)
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
def commit(self):
"""
Commit the current transaction.
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
self.__db.commit()
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
def rollback(self):
"""
Roll the current transaction back.
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
self.__db.rollback()
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
def close(self):
"""
Close the database connection.
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.__driver.get_import()
try:
self.__db.close()
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
class DBDriver(object):
"""
Base class for all DB drivers.
"""
@abstract
def get_import(self):
"""
Get a bound import for the underlying DB API module. All subclasses
must provide an implementation of this method. Here's an example,
assuming the real underlying Python DB API module is 'foosql':
.. python::
def get_import(self):
import foosql
return foosql
:return: a bound module
"""
pass
def __display_name(self):
return self.get_display_name()
@abstract
def get_display_name(self):
"""
Get the driver's name, for display. The returned name ought to be
a reasonable identifier for the database (e.g., 'SQL Server',
'MySQL'). All subclasses must provide an implementation of this
method.
:rtype: str
:return: the driver's displayable name
"""
pass
display_name = property(__display_name,
doc='get a displayable name for the driver')
def connect(self,
host='localhost',
port=None,
user=None,
password='',
database=None):
"""
Connect to the underlying database. Subclasses should *not*
override this method. Instead, a subclass should override the
``do_connect()`` method.
:Parameters:
host : str
the host where the database lives
port : int
the TCP port to use when connecting, or ``None``
user : str
the user to use when connecting, or ``None``
password : str
the password to use when connecting, or ``None``
database : str
the name of the database to which to connect
:rtype: ``db``
:return: a ``db`` object representing the open database
:raise Warning: Non-fatal warning
:raise Error: Error
"""
dbi = self.get_import()
try:
self.__db = self.do_connect(host=host,
port=port,
user=user,
password=password,
database=database)
return DB(self.__db, self)
except dbi.Warning, val:
raise Warning(val)
except dbi.Error, val:
raise Error(val)
@abstract
def do_connect(self,
host='localhost',
port=None,
user='',
password='',
database='default'):
"""
Connect to the actual underlying database, using the driver.
Subclasses must provide an implementation of this method. The
method must return the result of the real DB API implementation's
``connect()`` method. For instance:
.. python::
def do_connect():
dbi = self.get_import()
return dbi.connect(host=host, user=user, passwd=password,
database=database)
There is no need to catch exceptions; the ``DBDriver`` class's
``connect()`` method handles that.
:Parameters:
host : str
the host where the database lives
port : int
the TCP port to use when connecting
user : str
the user to use when connecting
password : str
the password to use when connecting
database : str
the name of the database to which to connect
:rtype: object
:return: a DB API-compliant object representing the open database
:raise Warning: Non-fatal warning
:raise Error: Error
"""
pass
def get_rdbms_metadata(self, cursor):
"""
Return data about the RDBMS: the product name, the version,
etc. The result is a named tuple, with the following fields.
vendor
The product vendor, if applicable, or ``None`` if not known
product
The name of the database product, or ``None`` if not known
version
The database product version, or ``None`` if not known
:Parameters:
cursor : Cursor
a ``Cursor`` object from a recent query
:rtype: named tuple
:return: the vendor information
"""
return RDBMSMetadata('unknown', 'unknown', 'unknown')
def get_index_metadata(self, table, cursor):
"""
Get the metadata for the indexes for a table. Returns a list of
tuples, one for each index. Each tuple consists of the following::
(index_name, [index_columns], description)
The tuple elements have the following meanings.
index_name
the index name
index_columns
a list of column names
description
index description, or ``None``
The tuples are named tuples, so the fields may be referenced by the
names above or by position.
The default implementation of this method returns `None`
:Parameters:
table : str
table name
cursor : Cursor
a ``Cursor`` object from a recent query
:rtype: list of tuples
:return: the list of tuples, or ``None`` if not supported in the
underlying database
:raise Warning: Non-fatal warning
"""
return None
def get_table_metadata(self, table, cursor):
"""
Get the metadata for a table. Returns a list of tuples, one for
each column. Each tuple consists of the following::
(column_name, type_string, max_char_size, precision, scale, nullable)
The tuple elements have the following meanings.
column_name
the name of the column
type_string
the column type, as a string
max_char_size
the maximum size for a character field, or ``None``
precision
the precision, for a numeric field; or ``None``
scale
the scale, for a numeric field; or ``None``
nullable
``True`` if the column is nullable, ``False`` if it is not
The tuples are named tuples, so the fields may be referenced by the
names above or by position.
The default implementation uses the DB API's ``cursor.description``
field. Subclasses are free to override this method to produce their
own version that uses other means.
:Parameters:
table : str
the table name for which metadata is desired
cursor : Cursor
a ``Cursor`` object from a recent query
:rtype: list
:return: list of tuples, as described above
:raise Warning: Non-fatal warning
:raise Error: Error
"""
self._ensure_valid_table(cursor, table)
dbi = self.get_import()
cursor.execute('SELECT * FROM %s WHERE 1=0' % table)
result = []
for col in cursor.description:
name = col[0]
type = col[1]
size = col[2]
internalSize = col[3]
precision = col[4]
scale = col[5]
nullable = col[6]
sType = None
try:
if type == dbi.BINARY:
stype = 'blob'
elif type == dbi.DATETIME:
stype = 'datetime'
elif type == dbi.NUMBER:
stype = 'numeric'
elif type == dbi.STRING:
sz = internalSize
if sz == None:
sz = size
elif sz <= 0:
sz = size
if sz == 1:
stype = 'char'
else:
stype = 'varchar'
size = sz
elif type == dbi.ROWID:
stype = 'id'
except AttributeError:
stype = None
if not sType:
stype = 'unknown (type code=%s)' % str(type)
data = TableMetadata(name, stype, size, precision, scale, nullable)
result += [data]
return result
def get_tables(self, cursor):
"""
Get the list of tables in the database.
:Parameters:
cursor : Cursor
a ``Cursor`` object from a recent query
:rtype: list
:return: List of table names. The list will be empty if the database
contains no tables.
:raise NotImplementedError: Capability not supported by database driver
:raise Warning: Non-fatal warning
:raise Error: Error
"""
raise NotImplementedError
def _ensure_valid_table(self, cursor, table_name):
"""
Determines whether a table name represents a legal table in the
current database, throwing an ``Error`` if not.
:Parameters:
cursor : Cursor
an open ``Cursor``
table_name : str
the table name
:raise Error: bad table name
"""
if not self._is_valid_table(cursor, table_name):
raise Error, 'No such table: "%s"' % table_name
def _is_valid_table(self, cursor, table_name):
"""
Determines whether a table name represents a legal table in the
current database, throwing an ``Error`` if not.
:Parameters:
cursor : Cursor
an open ``Cursor``
table_name : str
the table name
:rtype: bool
:return: ``True`` if the table is valid, ``False`` if not
"""
tables = self.get_tables(cursor)
return table_name in tables
| ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/grizzled/grizzled/db/base.py | Python | bsd-3-clause | 32,027 | 0.001186 |
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import unittest
import IECore
import Gaffer
import GafferTest
class UndoTest( GafferTest.TestCase ) :
def testSetName( self ) :
s = Gaffer.ScriptNode()
self.assertEqual( s.undoAvailable(), False )
self.assertEqual( s.redoAvailable(), False )
self.assertRaises( Exception, s.undo )
n = Gaffer.Node()
s["a"] = n
self.assertEqual( n.getName(), "a" )
n.setName( "b" )
self.assertEqual( n.getName(), "b" )
self.assertEqual( s.undoAvailable(), False )
self.assertEqual( s.redoAvailable(), False )
self.assertRaises( Exception, s.undo )
with Gaffer.UndoContext( s ) :
n.setName( "c" )
self.assertEqual( s.undoAvailable(), True )
self.assertEqual( s.redoAvailable(), False )
s.undo()
self.assertEqual( s.undoAvailable(), False )
self.assertEqual( s.redoAvailable(), True )
self.assertEqual( n.getName(), "b" )
s.redo()
self.assertEqual( s.undoAvailable(), True )
self.assertEqual( s.redoAvailable(), False )
self.assertEqual( n.getName(), "c" )
self.assertRaises( Exception, s.redo )
def testSetInput( self ) :
s = Gaffer.ScriptNode()
n1 = GafferTest.AddNode()
n2 = GafferTest.AddNode()
s["n1"] = n1
s["n2"] = n2
with Gaffer.UndoContext( s ) :
n1["op1"].setInput( n2["sum"] )
self.assert_( n1["op1"].getInput().isSame( n2["sum"] ) )
s.undo()
self.assertEqual( n1["op1"].getInput(), None )
s.redo()
self.assert_( n1["op1"].getInput().isSame( n2["sum"] ) )
def testChildren( self ) :
s = Gaffer.ScriptNode()
n = Gaffer.Node()
self.assertEqual( n.parent(), None )
with Gaffer.UndoContext( s ) :
s["n"] = n
self.assert_( n.parent().isSame( s ) )
s.undo()
self.assertEqual( n.parent(), None )
s.redo()
self.assert_( n.parent().isSame( s ) )
def testDelete( self ) :
s = Gaffer.ScriptNode()
n1 = GafferTest.AddNode()
n2 = GafferTest.AddNode()
n3 = GafferTest.AddNode()
s.addChild( n1 )
s.addChild( n2 )
s.addChild( n3 )
n2["op1"].setInput( n1["sum"] )
n2["op2"].setInput( n1["sum"] )
n3["op1"].setInput( n2["sum"] )
n3["op2"].setInput( n2["sum"] )
self.assert_( n2["op1"].getInput().isSame( n1["sum"] ) )
self.assert_( n2["op2"].getInput().isSame( n1["sum"] ) )
self.assert_( n3["op1"].getInput().isSame( n2["sum"] ) )
self.assert_( n3["op2"].getInput().isSame( n2["sum"] ) )
with Gaffer.UndoContext( s ) :
s.deleteNodes( filter = Gaffer.StandardSet( [ n2 ] ) )
self.assertEqual( n2["op1"].getInput(), None )
self.assertEqual( n2["op2"].getInput(), None )
self.assert_( n3["op1"].getInput().isSame( n1["sum"] ) )
self.assert_( n3["op2"].getInput().isSame( n1["sum"] ) )
s.undo()
self.assert_( n2["op1"].getInput().isSame( n1["sum"] ) )
self.assert_( n2["op2"].getInput().isSame( n1["sum"] ) )
self.assert_( n3["op1"].getInput().isSame( n2["sum"] ) )
self.assert_( n3["op2"].getInput().isSame( n2["sum"] ) )
with Gaffer.UndoContext( s ) :
s.deleteNodes( filter = Gaffer.StandardSet( [ n2 ] ), reconnect = False )
self.assertEqual( n2["op1"].getInput(), None )
self.assertEqual( n2["op2"].getInput(), None )
self.assertEqual( n3["op1"].getInput(), None )
self.assertEqual( n3["op2"].getInput(), None )
s.undo()
self.assert_( n2["op1"].getInput().isSame( n1["sum"] ) )
self.assert_( n2["op2"].getInput().isSame( n1["sum"] ) )
self.assert_( n3["op1"].getInput().isSame( n2["sum"] ) )
self.assert_( n3["op2"].getInput().isSame( n2["sum"] ) )
def testDisable( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.AddNode()
with Gaffer.UndoContext( s, Gaffer.UndoContext.State.Disabled ) :
s["n"]["op1"].setValue( 10 )
self.assertFalse( s.undoAvailable() )
with Gaffer.UndoContext( s, Gaffer.UndoContext.State.Enabled ) :
with Gaffer.UndoContext( s, Gaffer.UndoContext.State.Disabled ) :
s["n"]["op1"].setValue( 20 )
self.assertFalse( s.undoAvailable() )
if __name__ == "__main__":
unittest.main()
| chippey/gaffer | python/GafferTest/UndoTest.py | Python | bsd-3-clause | 5,828 | 0.053706 |
# Copyright (C) 2015, Carlo de Franchis <[email protected]>
# Copyright (C) 2015, Gabriele Facciolo <[email protected]>
# Copyright (C) 2015, Enric Meinhardt <[email protected]>
from __future__ import print_function
import os
import numpy as np
from s2plib import common
from s2plib import rpc_utils
from s2plib import estimation
from s2plib.config import cfg
def image_keypoints(im, x, y, w, h, max_nb=None, extra_params=''):
"""
Runs SIFT (the keypoints detection and description only, no matching).
It uses Ives Rey Otero's implementation published in IPOL:
http://www.ipol.im/pub/pre/82/
Args:
im: path to the input image
max_nb (optional): maximal number of keypoints. If more keypoints are
detected, those at smallest scales are discarded
extra_params (optional): extra parameters to be passed to the sift
binary
Returns:
path to the file containing the list of descriptors
"""
keyfile = common.tmpfile('.txt')
if max_nb:
cmd = "sift_roi %s %d %d %d %d --max-nb-pts %d %s -o %s" % (im, x, y, w,
h, max_nb,
extra_params,
keyfile)
else:
cmd = "sift_roi %s %d %d %d %d %s -o %s" % (im, x, y, w, h,
extra_params, keyfile)
common.run(cmd)
return keyfile
def keypoints_match(k1, k2, method='relative', sift_thresh=0.6, F=None,
model=None):
"""
Find matches among two lists of sift keypoints.
Args:
k1, k2: paths to text files containing the lists of sift descriptors
method (optional, default is 'relative'): flag ('relative' or
'absolute') indicating wether to use absolute distance or relative
distance
sift_thresh (optional, default is 0.6): threshold for distance between SIFT
descriptors. These descriptors are 128-vectors, whose coefficients
range from 0 to 255, thus with absolute distance a reasonable value
for this threshold is between 200 and 300. With relative distance
(ie ratio between distance to nearest and distance to second
nearest), the commonly used value for the threshold is 0.6.
F (optional): affine fundamental matrix
model (optional, default is None): model imposed by RANSAC when
searching the set of inliers. If None all matches are considered as
inliers.
Returns:
if any, a numpy 2D array containing the list of inliers matches.
"""
# compute matches
mfile = common.tmpfile('.txt')
cmd = "matching %s %s -%s %f -o %s" % (k1, k2, method, sift_thresh, mfile)
if F is not None:
fij = ' '.join(str(x) for x in [F[0, 2], F[1, 2], F[2, 0],
F[2, 1], F[2, 2]])
cmd = "%s -f \"%s\"" % (cmd, fij)
common.run(cmd)
matches = np.loadtxt(mfile)
if matches.ndim == 2: # filter outliers with ransac
if model == 'fundamental' and len(matches) >= 7:
common.run("ransac fmn 1000 .3 7 %s < %s" % (mfile, mfile))
elif model == 'homography' and len(matches) >= 4:
common.run("ransac hom 1000 1 4 /dev/null /dev/null %s < %s" % (mfile,
mfile))
elif model == 'hom_fund' and len(matches) >= 7:
common.run("ransac hom 1000 2 4 /dev/null /dev/null %s < %s" % (mfile,
mfile))
common.run("ransac fmn 1000 .2 7 %s < %s" % (mfile, mfile))
if os.stat(mfile).st_size > 0: # return numpy array of matches
return np.loadtxt(mfile)
def matches_on_rpc_roi(im1, im2, rpc1, rpc2, x, y, w, h):
"""
Compute a list of SIFT matches between two images on a given roi.
The corresponding roi in the second image is determined using the rpc
functions.
Args:
im1, im2: paths to two large tif images
rpc1, rpc2: two instances of the rpc_model.RPCModel class
x, y, w, h: four integers defining the rectangular ROI in the first
image. (x, y) is the top-left corner, and (w, h) are the dimensions
of the rectangle.
Returns:
matches: 2D numpy array containing a list of matches. Each line
contains one pair of points, ordered as x1 y1 x2 y2.
The coordinate system is that of the full images.
"""
x2, y2, w2, h2 = rpc_utils.corresponding_roi(rpc1, rpc2, x, y, w, h)
# estimate an approximate affine fundamental matrix from the rpcs
rpc_matches = rpc_utils.matches_from_rpc(rpc1, rpc2, x, y, w, h, 5)
F = estimation.affine_fundamental_matrix(rpc_matches)
# if less than 10 matches, lower thresh_dog. An alternative would be ASIFT
thresh_dog = 0.0133
for i in range(2):
p1 = image_keypoints(im1, x, y, w, h, extra_params='--thresh-dog %f' % thresh_dog)
p2 = image_keypoints(im2, x2, y2, w2, h2, extra_params='--thresh-dog %f' % thresh_dog)
matches = keypoints_match(p1, p2, 'relative', cfg['sift_match_thresh'],
F, model='fundamental')
if matches is not None and matches.ndim == 2 and matches.shape[0] > 10:
break
thresh_dog /= 2.0
else:
print("WARNING: sift.matches_on_rpc_roi: found no matches.")
return None
return matches
| jmichel-otb/s2p | s2plib/sift.py | Python | agpl-3.0 | 5,706 | 0.001577 |
"""Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float64)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/sklearn/neighbors/regression.py | Python | mit | 11,000 | 0 |
# -*- coding: utf-8 -*-
from datetime import datetime
from django.http import HttpResponse
from smsbrana import SmsConnect
from smsbrana import signals
from smsbrana.const import DELIVERY_STATUS_DELIVERED, DATETIME_FORMAT
from smsbrana.models import SentSms
def smsconnect_notification(request):
sc = SmsConnect()
result = sc.inbox()
# print result
for delivered in result['delivery_report']:
sms_id = delivered['idsms']
if delivered['status'] != DELIVERY_STATUS_DELIVERED:
continue
try:
sms = SentSms.objects.get(sms_id=sms_id)
if sms.delivered:
continue
sms.delivered = True
sms.delivered_date = datetime.strptime(delivered['time'], DATETIME_FORMAT)
sms.save()
except SentSms.DoesNotExist:
# logger.error('sms delivered which wasn\'t sent' + str(delivered))
pass
# delete the inbox if there are 100+ items
if len(result['delivery_report']) > 100:
sc.inbox(delete=True)
signals.smsconnect_notification_received.send(sender=None, inbox=result, request=request)
return HttpResponse('OK')
| vlinhart/django-smsbrana | smsbrana/views.py | Python | bsd-3-clause | 1,174 | 0.001704 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import os
from superdesk.io.feed_parsers.dpa_iptc7901 import DPAIPTC7901FeedParser
from superdesk.tests import TestCase
def fixture(filename):
dirname = os.path.dirname(os.path.realpath(__file__))
return os.path.normpath(os.path.join(dirname, '../fixtures', filename))
class DPAIptcTestCase(TestCase):
parser = DPAIPTC7901FeedParser()
def open(self, filename):
provider = {'name': 'Test'}
return self.parser.parse(fixture(filename), provider)
def test_open_iptc7901_file(self):
with self.app.app_context():
item = self.open('IPTC7901.txt')
self.assertEqual('text', item['type'])
self.assertEqual('062', item['ingest_provider_sequence'])
self.assertEqual('i', item['anpa_category'][0]['qcode'])
self.assertEqual(211, item['word_count'])
self.assertEqual('Germany Social Democrats: Coalition talks with Merkel could fail =', item['headline'])
self.assertRegex(item['body_html'], '^<p></p><p>Negotiations')
self.assertEqual('Germany-politics', item['slugline'])
self.assertEqual(4, item['priority'])
self.assertEqual([{'qcode': 'i'}], item['anpa_category'])
self.assertTrue(item['ednote'].find('## Editorial contacts'))
self.assertEqual(item['dateline']['source'], 'dpa')
self.assertEqual(item['dateline']['located']['city'], 'Berlin')
def test_open_dpa_copyright(self):
with self.app.app_context():
item = self.open('dpa_copyright.txt')
self.assertEqual('text', item['type'])
self.assertEqual('rs', item['anpa_category'][0]['qcode'])
self.assertEqual('(Achtung)', item['headline'])
self.assertEqual('Impressum', item['slugline'])
| nistormihai/superdesk-core | tests/io/feed_parsers/dpa_test.py | Python | agpl-3.0 | 2,118 | 0.000472 |
import attr
import pandas as pd
import re
from ..base import TohuBaseGenerator
from ..logging import logger
__all__ = ['get_tohu_items_name', 'make_tohu_items_class']
def make_tohu_items_class(clsname, attr_names):
"""
Parameters
----------
clsname: string
Name of the class to be created
attr_names: list of strings
Names of the attributes of the class to be created
"""
item_cls = attr.make_class(clsname, {name: attr.ib() for name in attr_names}, repr=False, cmp=True, frozen=True)
def new_repr(self):
all_fields = ', '.join([f'{name}={repr(value)}' for name, value in attr.asdict(self).items()])
return f'{clsname}({all_fields})'
orig_eq = item_cls.__eq__
def new_eq(self, other):
"""
Custom __eq__() method which also allows comparisons with
tuples and dictionaries. This is mostly for convenience
during testing.
"""
if isinstance(other, self.__class__):
return orig_eq(self, other)
else:
if isinstance(other, tuple):
return attr.astuple(self) == other
elif isinstance(other, dict):
return attr.asdict(self) == other
else:
return NotImplemented
item_cls.__repr__ = new_repr
item_cls.__eq__ = new_eq
item_cls.keys = lambda self: attr_names
item_cls.__getitem__ = lambda self, key: getattr(self, key)
item_cls.as_dict = lambda self: attr.asdict(self)
item_cls.to_series = lambda self: pd.Series(attr.asdict(self))
return item_cls
def get_tohu_items_name(cls):
"""
Return a string which defines the name of the namedtuple class which will be used
to produce items for the custom generator.
By default this will be the first part of the class name (before '...Generator'),
for example:
FoobarGenerator -> Foobar
QuuxGenerator -> Quux
However, it can be set explicitly by the user by defining `__tohu_items_name__`
in the class definition, for example:
class Quux(CustomGenerator):
__tohu_items_name__ = 'MyQuuxItem'
"""
assert issubclass(cls, TohuBaseGenerator)
try:
tohu_items_name = cls.__dict__['__tohu_items_name__']
logger.debug(f"Using item class name '{tohu_items_name}' (derived from attribute '__tohu_items_name__')")
except KeyError:
m = re.match('^(.*)Generator$', cls.__name__)
if m is not None:
tohu_items_name = m.group(1)
logger.debug(f"Using item class name '{tohu_items_name}' (derived from custom generator name)")
else:
msg = (
"Cannot derive class name for items to be produced by custom generator. "
"Please set '__tohu_items_name__' at the top of the custom generator's "
"definition or change its name so that it ends in '...Generator'"
)
raise ValueError(msg)
return tohu_items_name | maxalbert/tohu | tohu/v6/custom_generator/utils.py | Python | mit | 3,014 | 0.00365 |
#!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Invokes concatenate_application_code for applications specified on the command line.
"""
from os import path
import concatenate_application_code
import modular_build
import sys
try:
import simplejson as json
except ImportError:
import json
def main(argv):
try:
input_path_flag_index = argv.index('--input_path')
input_path = argv[input_path_flag_index + 1]
output_path_flag_index = argv.index('--output_path')
output_path = argv[output_path_flag_index + 1]
application_names = argv[1:input_path_flag_index]
debug_flag_index = argv.index('--debug')
minify = argv[debug_flag_index + 1] == '0'
except:
print('Usage: %s app_1 app_2 ... app_N --input_path <input_path> --output_path <output_path> --debug <0_or_1>' % argv[0])
raise
loader = modular_build.DescriptorLoader(input_path)
for app in application_names:
concatenate_application_code.build_application(app, loader, input_path, output_path, minify)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| hujiajie/chromium-crosswalk | third_party/WebKit/Source/devtools/scripts/build_applications.py | Python | bsd-3-clause | 1,259 | 0.003971 |
from .meta import BaseModelDependent
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.special import i0 as bessi0
class BaseSource(BaseModelDependent):
pass
class FakeSource(BaseSource):
def __call__(self, loc):
return loc
class SimpleSource(BaseSource):
def __init__(self, systemConfig):
super(BaseSource, self).__init__(systemConfig)
if hasattr(self, 'ny'):
raise NotImplementedError('Sources not implemented for 3D case')
self._z, self._y, self._x = np.mgrid[
self.zorig : self.dz * self.nz : self.dz,
self.yorig : self.dy * self.ny : self.dy,
self.xorig : self.dx * self.nx : self.dx
]
else:
self._z, self._x = np.mgrid[
self.zorig : self.dz * self.nz : self.dz,
self.xorig : self.dx * self.nx : self.dx
]
def dist(self, loc):
nsrc = len(loc)
if hasattr(self, 'ny'):
raise NotImplementedError('Sources not implemented for 3D case')
dist = np.sqrt((self._x.reshape((1, self.nz, self.ny, self.nx)) - loc[:,0].reshape((nsrc, 1, 1, 1)))**2
+ (self._y.reshape((1, self.nz, self.ny, self.nx)) - loc[:,1].reshape((nsrc, 1, 1, 1)))**2
+ (self._z.reshape((1, self.nz, self.ny, self.nx)) - loc[:,2].reshape((nsrc, 1, 1, 1)))**2)
else:
dist = np.sqrt((self._x.reshape((1, self.nz, self.nx)) - loc[:,0].reshape((nsrc, 1, 1)))**2
+ (self._z.reshape((1, self.nz, self.nx)) - loc[:,1].reshape((nsrc, 1, 1)))**2)
return dist
def vecIndexOf(self, loc):
return self.toVecIndex(self.linIndexOf(loc))
def linIndexOf(self, loc):
nsrc = loc.shape[0]
dists = self.dist(loc).reshape((nsrc, self.nrow))
return np.argmin(dists, axis=1)
def __call__(self, loc):
nsrc = loc.shape[0]
q = np.zeros((nsrc, self.nrow), dtype=np.complex128)
for i, index in enumerate(self.linIndexOf(loc)):
q[i,index] = 1.
return q.T
class StackedSimpleSource(SimpleSource):
def __call__(self, loc):
q = super(StackedSimpleSource, self).__call__(loc)
return np.vstack([q, np.zeros(q.shape, dtype=np.complex128)])
class SparseKaiserSource(SimpleSource):
initMap = {
# Argument Required Rename as ... Store as type
'ireg': (False, '_ireg', np.int64),
'freeSurf': (False, '_freeSurf', tuple),
}
HC_KAISER = {
1: 1.24,
2: 2.94,
3: 4.53,
4: 6.31,
5: 7.91,
6: 9.42,
7: 10.95,
8: 12.53,
9: 14.09,
10: 14.18,
}
def kws(self, offset):
'''
Finds 2D source terms to approximate a band-limited point source, based on
Hicks, Graham J. (2002) Arbitrary source and receiver positioning in finite-difference
schemes using Kaiser windowed sinc functions. Geophysics (67) 1, 156-166.
KaiserWindowedSinc(ireg, offset) --> 2D ndarray of size (2*ireg+1, 2*ireg+1)
Input offset is the 2D offsets in fractional gridpoints between the source location and
the nearest node on the modelling grid.
'''
try:
b = self.HC_KAISER.get(self.ireg)
except KeyError:
print('Kaiser windowed sinc function not implemented for half-width of %d!'%(ireg,))
raise
freg = 2*self.ireg+1
xOffset, zOffset = offset
# Grid from 0 to freg-1
Zi, Xi = np.mgrid[:freg,:freg]
# Distances from source point
dZi = (zOffset + self.ireg - Zi)
dXi = (xOffset + self.ireg - Xi)
# Taper terms for decay function
with warnings.catch_warnings():
warnings.simplefilter('ignore')
tZi = np.nan_to_num(np.sqrt(1 - (dZi / self.ireg)**2))
tXi = np.nan_to_num(np.sqrt(1 - (dXi / self.ireg)**2))
tZi[tZi == np.inf] = 0
tXi[tXi == np.inf] = 0
# Actual tapers for Kaiser window
taperZ = bessi0(b*tZi) / bessi0(b)
taperX = bessi0(b*tXi) / bessi0(b)
# Windowed sinc responses in Z and X
responseZ = np.sinc(dZi) * taperZ
responseX = np.sinc(dXi) * taperX
# Combined 2D source response
result = responseX * responseZ
return result
def __call__(self, sLocs):
ireg = self.ireg
freeSurf = self.freeSurf
N = sLocs.shape[0]
M = self.nz * self.nx
# Scale source based on the cellsize so that changing the grid doesn't
# change the overall source amplitude
srcScale = 1. / (self.dx * self.dz)
qI = self.linIndexOf(sLocs)
if ireg == 0:
# Closest gridpoint
q = sp.coo_matrix((srcScale, (np.arange(N), qI)), shape=(N, M))
else:
# Kaiser windowed sinc function
freg = 2*ireg+1
nnz = N * freg**2
lShift, sShift = np.mgrid[-ireg:ireg+1,-ireg:ireg+1]
shift = lShift * self.nx + sShift
entries = np.zeros((nnz,), dtype=np.complex128)
columns = np.zeros((nnz,))
rows = np.zeros((nnz,))
dptr = 0
for i in xrange(N):
Zi, Xi = (qI[i] / self.nx, np.mod(qI[i], self.nx))
offset = (sLocs[i][0] - Xi * self.dx, sLocs[i][1] - Zi * self.dz)
sourceRegion = self.kws(offset)
qshift = shift.copy()
if Zi < ireg:
index = ireg-Zi
if freeSurf[2]:
lift = np.flipud(sourceRegion[:index,:])
sourceRegion = sourceRegion[index:,:]
qshift = qshift[index:,:]
if freeSurf[2]:
sourceRegion[:index,:] -= lift
if Zi > self.nz-ireg-1:
index = self.nz-ireg-1 - Zi
if freeSurf[0]:
lift = np.flipud(sourceRegion[index:,:])
sourceRegion = sourceRegion[:index,:]
qshift = qshift[:index,:]
if freeSurf[0]:
sourceRegion[index:,:] -= lift
if Xi < ireg:
index = ireg-Xi
if freeSurf[3]:
lift = np.fliplr(sourceRegion[:,:index])
sourceRegion = sourceRegion[:,index:]
qshift = qshift[:,index:]
if freeSurf[3]:
sourceRegion[:,:index] -= lift
if Xi > self.nx-ireg-1:
index = self.nx-ireg-1 - Xi
if freeSurf[1]:
lift = np.fliplr(sourceRegion[:,index:])
sourceRegion = sourceRegion[:,:index]
qshift = qshift[:,:index]
if freeSurf[1]:
sourceRegion[:,index:] -= lift
data = srcScale * sourceRegion.ravel()
cols = qI[i] + qshift.ravel()
dlen = data.shape[0]
entries[dptr:dptr+dlen] = data
columns[dptr:dptr+dlen] = cols
rows[dptr:dptr+dlen] = i
dptr += dlen
q = sp.coo_matrix((entries[:dptr], (rows[:dptr],columns[:dptr])), shape=(N, M), dtype=np.complex128)
return q.T
@property
def ireg(self):
return getattr(self, '_ireg', 4)
class KaiserSource(SparseKaiserSource):
def __call__(self, sLocs):
q = super(KaiserSource, self).__call__(sLocs)
return q.toarray()
| uwoseis/anemoi | anemoi/source.py | Python | mit | 8,029 | 0.011085 |
# This module imports names for backwards compatibility and to ensure
# that pickled objects in existing sessions can be unpickled.
__all__ = ['DAObject', 'DAList', 'DADict', 'DAOrderedDict', 'DASet', 'DAFile', 'DAFileCollection', 'DAFileList', 'DAStaticFile', 'DAEmail', 'DAEmailRecipient', 'DAEmailRecipientList', 'DATemplate', 'DAEmpty', 'DALink', 'RelationshipTree', 'DAContext']
from docassemble.base.util import DAObject, DAList, DADict, DAOrderedDict, DASet, DAFile, DAFileCollection, DAFileList, DAStaticFile, DAEmail, DAEmailRecipient, DAEmailRecipientList, DATemplate, DAEmpty, DALink, RelationshipTree, DAContext, DAObjectPlusParameters, DACatchAll, RelationshipDir, RelationshipPeer, DALazyTemplate, DALazyTableTemplate, selections, DASessionLocal, DADeviceLocal, DAUserLocal
| jhpyle/docassemble | docassemble_base/docassemble/base/core.py | Python | mit | 790 | 0.002532 |
"""
Django settings for chatbot_website project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['CHATBOT_SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'channels',
'chatbot_interface',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'chatbot_website.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'chatbot_website.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
CHANNEL_LAYERS = {
"default": {
"BACKEND": "asgi_redis.RedisChannelLayer",
"CONFIG": {
"hosts": [os.environ.get('REDIS_URL', 'redis://localhost:6379')],
},
"ROUTING": "chatbot_interface.routing.channel_routing",
},
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file_django': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'logs/debug_django.log',
},
'file_chatbot': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'logs/debug_chatbot.log',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout',
},
},
'loggers': {
'django': {
'handlers': ['console', 'file_django'],
'level': 'INFO',
'propagate': True,
},
'chatbot_interface': {
'handlers': ['console', 'file_chatbot'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| Catherine-Chu/DeepQA | chatbot_website/chatbot_website/settings.py | Python | apache-2.0 | 4,300 | 0.00093 |
# MIT License
#
# Copyright (C) IBM Corporation 2019
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Machine learning models with differential privacy
"""
from diffprivlib.models.naive_bayes import GaussianNB
from diffprivlib.models.k_means import KMeans
from diffprivlib.models.linear_regression import LinearRegression
from diffprivlib.models.logistic_regression import LogisticRegression
from diffprivlib.models.pca import PCA
from diffprivlib.models.standard_scaler import StandardScaler
from diffprivlib.models.forest import RandomForestClassifier
| IBM/differential-privacy-library | diffprivlib/models/__init__.py | Python | mit | 1,558 | 0.005777 |
#
# Copyright 2014-2015 Boundary, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from boundary import ApiCli
class SourceList(ApiCli):
def __init__(self):
ApiCli.__init__(self)
self.path = "v1/account/sources/"
self.method = "GET"
def getDescription(self):
return "Lists the sources in a Boundary account"
| wcainboundary/boundary-api-cli | boundary/source_list.py | Python | apache-2.0 | 856 | 0 |
import torch
from base_model import Loss
from train import dboxes300_coco
from opt_loss import OptLoss
# In:
# ploc : N x 8732 x 4
# plabel : N x 8732
# gloc : N x 8732 x 4
# glabel : N x 8732
data = torch.load('loss.pth')
ploc = data['ploc'].cuda()
plabel = data['plabel'].cuda()
gloc = data['gloc'].cuda()
glabel = data['glabel'].cuda()
dboxes = dboxes300_coco()
# loss = Loss(dboxes).cuda()
loss = OptLoss(dboxes).cuda()
loss = torch.jit.trace(loss, (ploc, plabel, gloc, glabel))
# print(traced_loss.graph)
# timing
timing_iterations = 1000
import time
# Dry run to eliminate JIT compile overhead
dl = torch.tensor([1.], device="cuda")
l = loss(ploc, plabel, gloc, glabel)
l.backward(dl)
# fprop
torch.cuda.synchronize()
start = time.time()
with torch.no_grad():
for _ in range(timing_iterations):
l = loss(ploc, plabel, gloc, glabel)
print('loss: {}'.format(l))
torch.cuda.synchronize()
end = time.time()
time_per_fprop = (end - start) / timing_iterations
print('took {} seconds per iteration (fprop)'.format(time_per_fprop))
# fprop + bprop
torch.cuda.synchronize()
start = time.time()
for _ in range(timing_iterations):
l = loss(ploc, plabel, gloc, glabel)
l.backward(dl)
torch.cuda.synchronize()
end = time.time()
time_per_fprop_bprop = (end - start) / timing_iterations
print('took {} seconds per iteration (fprop + bprop)'.format(time_per_fprop_bprop))
print(loss.graph_for(ploc, plabel, gloc, glabel))
| mlperf/training_results_v0.7 | NVIDIA/benchmarks/ssd/implementations/pytorch/test/opt_loss_test.py | Python | apache-2.0 | 1,453 | 0.003441 |
# -*- coding: utf-8 -*-
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Session',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('begin', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Variable',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('name', models.CharField(max_length=32)),
('type', models.IntegerField(choices=[('Integer', int), ('Floating Point', float), ('Text', str), ('Flag', bool)])),
('value', models.CharField(max_length=130)),
('session', models.ForeignKey(to='hs_tracking.Session')),
],
),
migrations.CreateModel(
name='Visitor',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('first_seen', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.AddField(
model_name='session',
name='visitor',
field=models.ForeignKey(to='hs_tracking.Visitor'),
),
]
| hydroshare/hydroshare | hs_tracking/migrations/0001_initial.py | Python | bsd-3-clause | 1,731 | 0.002889 |
import random
from ..simulator import Simulator
class RandomMover(Simulator):
ACTIONS = ('up', 'down', 'left', 'right')
def start(self):
self.init_game()
while True:
self._check_pygame_events()
for drone in self.drones:
drone.do_move(random.choice(self.ACTIONS))
self.print_map()
self._draw()
| dev-coop/plithos | src/plithos/simulations/random_mover.py | Python | mit | 388 | 0 |
import logging
import re
import markdown
from django.conf import settings
from django.db import models
from django.template import Template, Context, loader
import sys
from selvbetjening.core.mail import send_mail
logger = logging.getLogger('selvbetjening.email')
class EmailSpecification(models.Model):
BODY_FORMAT_CHOICES = (
('html', 'HTML'),
('markdown', 'Markdown')
)
CONTEXT_CHOICES = (
('user', 'User'),
('attendee', 'Attendee')
)
# template
subject = models.CharField(max_length=128)
body = models.TextField()
body_format = models.CharField(max_length=32, choices=BODY_FORMAT_CHOICES, default='markdown')
# context
template_context = models.CharField(max_length=32, choices=CONTEXT_CHOICES, default='user')
# meta
date_created = models.DateField(editable=False, auto_now_add=True)
def send_email_user(self, user, internal_sender_id):
if self.template_context == 'attendee':
raise ValueError
ok, email, err = self.render_user(user)
if not ok:
# Warn an admin and log the error silently
logger.exception('Failure rendering e-mail (template pk: %s) -- Addressed to %s', self.pk, user.email, exc_info=err, extra={
'related_user': user})
return
instance = self._send_mail(user.email, email, internal_sender_id)
logger.info('E-mail queued (%s) -- Addressed to %s', email['subject'], user.email,
extra={
'related_user': user,
'related_email': instance
})
def send_email_attendee(self, attendee, internal_sender_id):
ok, email, err = self.render_attendee(attendee)
if not ok:
# Warn an admin and log the error silently
logger.exception('Failure rendering e-mail (template pk: %s) -- Addressed to %s', self.pk, attendee.user.email, exc_info=err, extra={
'related_user': attendee.user,
'related_attendee': attendee})
return
instance = self._send_mail(attendee.user.email, email, internal_sender_id)
logger.info('E-mail queued (%s) -- Addressed to %s', email['subject'], attendee.user.email,
extra={
'related_user': attendee.user,
'related_attendee': attendee,
'related_email': instance
})
def _send_mail(self, to_address, email, internal_sender_id):
mails = send_mail(email['subject'],
email['body_plain'],
settings.DEFAULT_FROM_EMAIL,
[to_address],
body_html=email['body_html'],
internal_sender_id=internal_sender_id)
return mails[0]
def render_user(self, user):
"""
Renders the e-mail template using a user object as source.
An error is thrown if the template context is Attendee.
"""
if self.template_context == 'attendee':
raise ValueError
return self._render(self._get_context(user))
def render_attendee(self, attendee):
"""
Renders the e-mail template using a user object as source.
"""
return self._render(self._get_context(attendee.user, attendee=attendee))
def render_dummy(self):
context = {
# user context
'username': 'johndoe',
'full_name': 'John Doe',
'email': '[email protected]',
# attendee.event context
'event_title': 'Dummy Event',
'invoice_plain': 'INVOICE',
'invoice_html': 'INVOICE_HTML'
}
return self._render(context)
def _get_context(self, user, attendee=None):
# lazy import, prevent circular import in core.events
from selvbetjening.core.events.options.dynamic_selections import SCOPE, dynamic_selections
context = {
# user context
'username': user.username,
'full_name': ('%s %s' % (user.first_name, user.last_name)).strip(),
'email': user.email
}
if attendee is not None:
invoice = dynamic_selections(SCOPE.VIEW_USER_INVOICE, attendee)
invoice_html = loader.render_to_string('events/parts/invoice.html', {
'attendee': attendee,
'invoice': invoice
})
invoice_text = loader.render_to_string('events/parts/invoice_text.html', {
'attendee': attendee,
'invoice': invoice
})
context.update({
# attendee.event context
'event_title': attendee.event.title,
'attendee': attendee,
'invoice_plain': invoice_text,
'invoice_html': invoice_html,
})
for option, selection in invoice:
context['selected_%s' % option.pk] = selection is not None
return context
def _render(self, context):
context = Context(context)
try:
email = {
'subject': self.subject,
'body_plain': self._get_rendered_body_plain(context),
'body_html': self._get_rendered_body_html(context)
}
return True, email, None
except Exception:
return False, None, sys.exc_info()
def _get_rendered_body_plain(self, context):
if self.body_format == 'markdown':
body = self.body
else:
body = re.sub(r'<[^>]*?>', '', self.body)
context['invoice'] = context.get('invoice_plain', None)
return Template(body).render(context)
def _get_rendered_body_html(self, context):
if self.body_format == 'markdown':
body = markdown.markdown(self.body)
else:
body = self.body
context['invoice'] = context.get('invoice_html', None)
return Template(body).render(context)
def __unicode__(self):
return self.subject
| animekita/selvbetjening | selvbetjening/core/mailcenter/models.py | Python | mit | 6,200 | 0.001774 |
#!/usr/bin/env python3
# Copyright 2019 The GraphicsFuzz Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib2
import pytest
import sys
from typing import List, Optional
HERE = os.path.abspath(__file__)
sys.path.insert(0, os.path.dirname(os.path.dirname(HERE)) + os.sep + "drivers")
import inspect_compute_results
def test_unknown_command_rejected(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['unknown', '1.json', '2.json'])
assert 'ValueError: Unknown command' in str(value_error)
def test_show_rejects_multiple_args(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['show', '1.json', '2.json'])
assert 'ValueError: Command "show" requires exactly 1 input; 2 provided' in str(value_error)
def test_exactdiff_rejects_one_arg(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['exactdiff', '1.json'])
assert 'ValueError: Command "exactdiff" requires exactly 2 inputs; 1 provided' in str(value_error)
def test_exactdiff_rejects_three_args(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['exactdiff', '1.json', '2.json', '3.json'])
assert 'ValueError: Command "exactdiff" requires exactly 2 inputs; 3 provided' in str(value_error)
def test_fuzzydiff_rejects_one_arg(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['fuzzydiff', '1.json'])
assert 'ValueError: Command "fuzzydiff" requires exactly 2 inputs; 1 provided' in str(value_error)
def test_fuzzydiff_rejects_three_args(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['fuzzydiff', '1.json', '2.json', '3.json'])
assert 'ValueError: Command "fuzzydiff" requires exactly 2 inputs; 3 provided' in str(value_error)
def test_show_handles_file_not_found(tmp_path: pathlib2.Path):
with pytest.raises(FileNotFoundError) as file_not_found_error:
inspect_compute_results.main_helper(['show', 'nofile.json'])
assert 'FileNotFoundError: Input file "nofile.json" not found' in str(file_not_found_error)
def test_exactdiff_handles_first_file_not_found(tmp_path: pathlib2.Path):
onefile = tmp_path / 'something.json'
onefile.touch(exist_ok=False)
with pytest.raises(FileNotFoundError) as file_not_found_error:
inspect_compute_results.main_helper(['exactdiff', 'nofile.json', str(onefile)])
assert 'FileNotFoundError: Input file "nofile.json" not found' in str(file_not_found_error)
def test_exactdiff_handles_second_file_not_found(tmp_path: pathlib2.Path):
onefile = tmp_path / 'something.json'
onefile.touch(exist_ok=False)
with pytest.raises(FileNotFoundError) as file_not_found_error:
inspect_compute_results.main_helper(['exactdiff', str(onefile), 'nofile.json'])
assert 'FileNotFoundError: Input file "nofile.json" not found' in str(file_not_found_error)
def test_fuzzydiff_handles_first_file_not_found(tmp_path: pathlib2.Path):
onefile = tmp_path / 'something.json'
onefile.touch(exist_ok=False)
with pytest.raises(FileNotFoundError) as file_not_found_error:
inspect_compute_results.main_helper(['fuzzydiff', 'nofile.json', str(onefile)])
assert 'FileNotFoundError: Input file "nofile.json" not found' in str(file_not_found_error)
def test_fuzzydiff_handles_second_file_not_found(tmp_path: pathlib2.Path):
onefile = tmp_path / 'something.json'
onefile.touch(exist_ok=False)
with pytest.raises(FileNotFoundError) as file_not_found_error:
inspect_compute_results.main_helper(['fuzzydiff', str(onefile), 'nofile.json'])
assert 'FileNotFoundError: Input file "nofile.json" not found' in str(file_not_found_error)
def check_diff(tmp_path: pathlib2.Path, output1: str, output2: str, is_exact: bool,
extra_args: Optional[List[str]]=None) -> int:
results1_path = tmp_path / '1.info.json'
results2_path = tmp_path / '2.info.json'
with results1_path.open(mode='w') as results1_file:
results1_file.write(output1)
with results2_path.open(mode='w') as results2_file:
results2_file.write(output2)
args = ['exactdiff' if is_exact else 'fuzzydiff',
str(results1_path),
str(results2_path)]
if extra_args:
args += extra_args
return inspect_compute_results.main_helper(args)
def check_exact_diff(tmp_path: pathlib2.Path, output1: str, output2: str) -> int:
return check_diff(tmp_path, output1, output2, is_exact=True)
def check_fuzzy_diff(tmp_path: pathlib2.Path, output1: str, output2: str,
extra_args: Optional[List[str]]=None) -> int:
return check_diff(tmp_path, output1, output2, is_exact=False, extra_args=extra_args)
def test_exactdiff_pass1(tmp_path: pathlib2.Path):
assert 0 == check_exact_diff(tmp_path, (
'{"status": "SUCCESS", "log": "#### Start compute shader", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,18,28,22,24,24,28,26]]}}'), (
'{"status": "IGNORED_DURING_DIFF", "log": "#### Different stuff", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,18,28,22,24,24,28,26]]}}'))
def test_exactdiff_pass2(tmp_path: pathlib2.Path):
assert 0 == check_exact_diff(tmp_path, (
'{"status": "SUCCESS", "log": "#### Start compute shader", "outputs": '
'{"ssbo":[[2.0]]}}'), (
'{"status": "IGNORED_DURING_DIFF", "log": "#### Different stuff", "outputs": '
'{"ssbo": [ [2.0] ] } }'))
def test_exactdiff_pass3(tmp_path: pathlib2.Path):
assert 0 == check_exact_diff(tmp_path, (
'{"status": "SUCCESS", "log": "#### Start compute shader", "outputs": '
'{"ssbo":[[88.0, 12.3],[28,12,14],[1]]}}'), (
'{"status": "IGNORED_DURING_DIFF", "log": "#### Different stuff", "outputs": '
'{"ssbo":[[88.0, 12.3],[28,12,14],[1]]}}'))
def test_exactdiff_fail_first_invalid(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
check_exact_diff(tmp_path, (
'not_json'), (
'{"status": "IGNORED_DURING_DIFF", "log": "#### Different stuff", "outputs": '
'{"ssbo": [ [2.0] ] } }'))
assert 'ValueError: First input file did not contain valid SSBO data' in str(value_error)
def test_exactdiff_fail_second_invalid(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
check_exact_diff(tmp_path, (
'{"status": "IGNORED_DURING_DIFF", "log": "#### Different stuff", "outputs": '
'{"ssbo": [ [2.0] ] } }'), (
'not_json'))
assert 'ValueError: Second input file did not contain valid SSBO data' in str(value_error)
def test_exactdiff_fail_mismatched_number_of_fields(tmp_path: pathlib2.Path):
assert 0 != check_exact_diff(tmp_path, (
'{"status": "SUCCESS", "log": "...", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,18,28,22,24,24,28,26]]}}'), (
'{"status": "SUCCESS", "log": "...", "outputs": '
'{"ssbo":[[88]]}}'))
def test_exactdiff_fail_mismatched_field_length(tmp_path: pathlib2.Path):
assert 0 != check_exact_diff(tmp_path, (
'{"status": "SUCCESS", "log": "...", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,18,28,22,24,24,28,26]]}}'), (
'{"status": "SUCCESS", "log": "...", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,18,28,22,24,24,28]]}}'))
def test_exactdiff_fail_mismatched_field_element(tmp_path: pathlib2.Path):
assert 0 != check_exact_diff(tmp_path, (
'{"status": "SUCCESS", "log": "...", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,18,28,22,24,24,28]]}}'), (
'{"status": "SUCCESS", "log": "...", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,17,28,22,24,24,28]]}}'))
def test_fuzzydiff_pass1(tmp_path: pathlib2.Path):
float1 = 88.0
float2 = 1e+6
float3 = 1.3e-6
float4 = 0.0
float1ish = float1 + 0.00000001
float2ish = float2 + 0.0001
float3ish = float3 + 1.3e-15
float4ish = float4 + 1e-20
assert float1 != float1ish
assert float2 != float2ish
assert float3 != float3ish
assert float4 != float4ish
output1 = ('{"status": "SUCCESS", "log": "...", "outputs": {"ssbo":[[' + str(float1) + '],['
+ str(float2) + ',' + str(float3) + ',' + str(float4) + ']]}}')
output2 = ('{"status": "SUCCESS", "log": "...", "outputs": {"ssbo":[[' + str(float1ish) + '],['
+ str(float2ish) + ',' + str(float3ish) + ',' + str(float4ish) + ']]}}')
assert 0 != check_exact_diff(tmp_path, output1, output2)
assert 0 == check_fuzzy_diff(tmp_path, output1, output2)
def test_fuzzydiff_pass2(tmp_path: pathlib2.Path):
float1 = 88.0
float2 = 1e+6
float3 = 1.3e-6
float4 = 0.0
float1ish = float1 + 0.00009
float2ish = float2 + 0.00009
float3ish = float3 + 0.00009
float4ish = float4 + 0.00009
assert float1 != float1ish
assert float2 != float2ish
assert float3 != float3ish
assert float4 != float4ish
output1 = ('{"status": "SUCCESS", "log": "...", "outputs": {"ssbo":[[' + str(float1) + '],['
+ str(float2) + ',' + str(float3) + ',' + str(float4) + ']]}}')
output2 = ('{"status": "SUCCESS", "log": "...", "outputs": {"ssbo":[[' + str(float1ish) + '],['
+ str(float2ish) + ',' + str(float3ish) + ',' + str(float4ish) + ']]}}')
assert 0 != check_exact_diff(tmp_path, output1, output2)
assert 0 == check_fuzzy_diff(tmp_path, output1, output2, extra_args=['--abs_tol=0.0001'])
def test_fuzzydiff_pass3(tmp_path: pathlib2.Path):
float1 = 88.0
float2 = 1e+6
float3 = 1.3e-6
float4 = 0.0
float1ish = float1 + 0.0000001
float2ish = float2 + 1.0
float3ish = float3 + 1e-12
float4ish = float4 + 1e-6
assert float1 != float1ish
assert float2 != float2ish
assert float3 != float3ish
assert float4 != float4ish
output1 = ('{"status": "SUCCESS", "log": "...", "outputs": {"ssbo":[[' + str(float1) + '],['
+ str(float2) + ',' + str(float3) + ',' + str(float4) + ']]}}')
output2 = ('{"status": "SUCCESS", "log": "...", "outputs": {"ssbo":[[' + str(float1ish) + '],['
+ str(float2ish) + ',' + str(float3ish) + ',' + str(float4ish) + ']]}}')
assert 0 != check_exact_diff(tmp_path, output1, output2)
assert 0 == check_fuzzy_diff(tmp_path, output1, output2,
extra_args=['--rel_tol=1e-06', '--abs_tol=1e-06'])
def test_fuzzydiff_fail_first_invalid(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
check_exact_diff(tmp_path, (
'not_json'), (
'{"status": "IGNORED_DURING_DIFF", "log": "#### Different stuff", "outputs": '
'{"ssbo": [ [2.0] ] } }'))
assert 'ValueError: First input file did not contain valid SSBO data' in str(value_error)
def test_fuzzydiff_fail_second_invalid(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
check_exact_diff(tmp_path, (
'{"status": "IGNORED_DURING_DIFF", "log": "#### Different stuff", "outputs": '
'{"ssbo": [ [2.0] ] } }'), (
'not_json'))
assert 'ValueError: Second input file did not contain valid SSBO data' in str(value_error)
def test_fuzzydiff_fail_mismatched_number_of_fields(tmp_path: pathlib2.Path):
assert 0 != check_exact_diff(tmp_path, (
'{"status": "SUCCESS", "log": "...", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,18,28,22,24,24,28,26]]}}'), (
'{"status": "SUCCESS", "log": "...", "outputs": '
'{"ssbo":[[88]]}}'))
def test_fuzzydiff_fail_mismatched_field_length(tmp_path: pathlib2.Path):
assert 0 != check_exact_diff(tmp_path, (
'{"status": "SUCCESS", "log": "...", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,18,28,22,24,24,28,26]]}}'), (
'{"status": "SUCCESS", "log": "...", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,18,28,22,24,24,28]]}}'))
def test_fuzzydiff_fail_mismatched_field_element(tmp_path: pathlib2.Path):
float1 = 88.0
float2 = 1e+6
float3 = 1.3e-6
float4 = 0.0
float1ish = float1 + 0.0000001
float2ish = float2 + 1.0
float3ish = float3 + 1e-12
float4ish = float4 + 1e-4 ## Too big a difference
assert float1 != float1ish
assert float2 != float2ish
assert float3 != float3ish
assert float4 != float4ish
output1 = ('{"status": "SUCCESS", "log": "...", "outputs": {"ssbo":[[' + str(float1) + '],['
+ str(float2) + ',' + str(float3) + ',' + str(float4) + ']]}}')
output2 = ('{"status": "SUCCESS", "log": "...", "outputs": {"ssbo":[[' + str(float1ish) + '],['
+ str(float2ish) + ',' + str(float3ish) + ',' + str(float4ish) + ']]}}')
assert 0 != check_exact_diff(tmp_path, output1, output2)
assert 0 != check_fuzzy_diff(tmp_path, output1, output2,
extra_args=['--rel_tol=1e-06', '--abs_tol=1e-06'])
def test_bad_rel_tol():
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['fuzzydiff', '1.json', '2.json', '--rel_tol=notafloat'])
assert 'ValueError: Positive floating-point value required for --rel_tol argument'\
in str(value_error)
def test_bad_rel_tol2():
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['fuzzydiff', '1.json', '2.json', '--rel_tol=0.0'])
assert 'ValueError: Positive floating-point value required for --rel_tol argument'\
in str(value_error)
def test_bad_rel_tol3():
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['fuzzydiff', '1.json', '2.json', '--rel_tol=-0.1'])
assert 'ValueError: Positive floating-point value required for --rel_tol argument'\
in str(value_error)
def test_bad_abs_tol():
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['fuzzydiff', '1.json', '2.json', '--abs_tol=notafloat'])
assert 'ValueError: Non-negative floating-point value required for --abs_tol argument'\
in str(value_error)
def test_bad_abs_tol2():
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['fuzzydiff', '1.json', '2.json', '--abs_tol=-0.1'])
assert 'ValueError: Non-negative floating-point value required for --abs_tol argument'\
in str(value_error)
| google/graphicsfuzz | python/src/main/python/test_scripts/inspect_compute_results_test.py | Python | apache-2.0 | 15,285 | 0.004122 |
from qcodes.instrument.base import Instrument
from qcodes.utils import validators as vals
from qcodes.instrument.parameter import ManualParameter
import numpy as np
class SimControlCZ(Instrument):
"""
Noise and other parameters for cz_superoperator_simulation_new
"""
def __init__(self, name, **kw):
super().__init__(name, **kw)
# Noise parameters
self.add_parameter(
"T1_q0",
unit="s",
label="T1 fluxing qubit",
docstring="T1 fluxing qubit",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"T1_q1",
unit="s",
label="T1 static qubit",
docstring="T1 static qubit",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"T2_q1",
unit="s",
label="T2 static qubit",
docstring="T2 static qubit",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"T2_q0_amplitude_dependent",
docstring="fitcoefficients giving T2_q0 or Tphi_q0 as a function of inverse sensitivity (in units of w_q0/Phi_0): a, b. Function is ax+b",
parameter_class=ManualParameter,
vals=vals.Arrays(),
initial_value=np.array([-1, -1]),
)
# for flux noise simulations
self.add_parameter(
"sigma_q0",
unit="flux quanta",
docstring="standard deviation of the Gaussian from which we sample the flux bias, q0",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"sigma_q1",
unit="flux quanta",
docstring="standard deviation of the Gaussian from which we sample the flux bias, q1",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"w_q1_sweetspot",
docstring="NB: different from the operating point in general",
parameter_class=ManualParameter,
vals=vals.Numbers(),
)
self.add_parameter(
"w_q0_sweetspot",
docstring="NB: different from the operating point in general",
parameter_class=ManualParameter,
vals=vals.Numbers(),
)
self.add_parameter(
"Z_rotations_length",
unit="s",
docstring="duration of the single qubit Z rotations at the end of the pulse",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"total_idle_time",
unit="s",
docstring="duration of the idle time",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
# Control parameters for the simulations
self.add_parameter(
"dressed_compsub",
docstring="true if we use the definition of the comp subspace that uses the dressed 00,01,10,11 states",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=True,
)
self.add_parameter(
"distortions",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=False,
)
self.add_parameter(
"voltage_scaling_factor",
unit="a.u.",
docstring="scaling factor for the voltage for a CZ pulse",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=1,
)
self.add_parameter(
"n_sampling_gaussian_vec",
docstring="array. each element is a number of samples from the gaussian distribution. Std to guarantee convergence is [11]. More are used only to verify convergence",
parameter_class=ManualParameter,
vals=vals.Arrays(),
initial_value=np.array([11]),
)
self.add_parameter(
"cluster",
docstring="true if we want to use the cluster",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=False,
)
self.add_parameter(
"look_for_minimum",
docstring="changes cost function to optimize either research of minimum of avgatefid_pc or to get the heat map in general",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=False,
)
self.add_parameter(
"T2_scaling",
unit="a.u.",
docstring="scaling factor for T2_q0_amplitude_dependent",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=1,
)
self.add_parameter(
"waiting_at_sweetspot",
unit="s",
docstring="time spent at sweetspot during the two halves of a netzero pulse",
parameter_class=ManualParameter,
vals=vals.Numbers(min_value=0),
initial_value=0,
)
self.add_parameter(
"which_gate",
docstring="Direction of the CZ gate. E.g. 'NE'. Used to extract parameters from the fluxlutman ",
parameter_class=ManualParameter,
vals=vals.Strings(),
initial_value="NE",
)
self.add_parameter(
"simstep_div",
docstring="Division of the simulation time step. 4 is a good one, corresponding to a time step of 0.1 ns. For smaller values landscapes can deviate significantly from experiment.",
parameter_class=ManualParameter,
vals=vals.Numbers(min_value=1),
initial_value=4,
)
self.add_parameter(
"gates_num",
docstring="Chain the same gate gates_num times.",
parameter_class=ManualParameter,
# It should be an integer but the measurement control cast to float when setting sweep points
vals=vals.Numbers(min_value=1),
initial_value=1,
)
self.add_parameter(
"gates_interval",
docstring="Time interval that separates the gates if gates_num > 1.",
parameter_class=ManualParameter,
unit='s',
vals=vals.Numbers(min_value=0),
initial_value=0,
)
self.add_parameter(
"cost_func",
docstring="Used to calculate the cost function based on the quantities of interest (qoi). Signature: cost_func(qoi). NB: qoi's that represent percentages will be in [0, 1] range. Inspect 'pycqed.simulations.cz_superoperator_simulation_new_functions.simulate_quantities_of_interest_superoperator_new??' in notebook for available qoi's.",
parameter_class=ManualParameter,
unit='a.u.',
vals=vals.Callable(),
initial_value=None,
)
self.add_parameter(
"cost_func_str",
docstring="Not loaded automatically. Convenience parameter to store the cost function string and use `exec('sim_control_CZ.cost_func(' + sim_control_CZ.cost_func_str() + ')')` to load it.",
parameter_class=ManualParameter,
vals=vals.Strings(),
initial_value="lambda qoi: np.log10((1 - qoi['avgatefid_compsubspace_pc']) * (1 - 0.5) + qoi['L1'] * 0.5)",
)
self.add_parameter(
"double_cz_pi_pulses",
docstring="If set to 'no_pi_pulses' or 'with_pi_pulses' will simulate two sequential CZs with or without Pi pulses simulated as an ideal superoperator multiplication.",
parameter_class=ManualParameter,
vals=vals.Strings(),
initial_value="", # Use empty string to evaluate to false
)
# for ramsey/Rabi simulations
self.add_parameter(
"detuning",
unit="Hz",
docstring="detuning of w_q0 from its sweet spot value",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"initial_state",
docstring="determines initial state for ramsey_simulations_new",
parameter_class=ManualParameter,
vals=vals.Strings(),
initial_value="changeme",
)
# for spectral tomo
self.add_parameter(
"repetitions",
docstring="Repetitions of CZ gate, used for spectral tomo",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=1,
)
self.add_parameter(
"time_series",
docstring="",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=False,
)
self.add_parameter(
"overrotation_sims",
docstring="instead of constant shift in flux, we use constant rotations around some axis",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=False,
)
self.add_parameter(
"axis_overrotation",
docstring="",
parameter_class=ManualParameter,
vals=vals.Arrays(),
initial_value=np.array([1, 0, 0]),
)
def set_cost_func(self, cost_func_str=None):
"""
Sets the self.cost_func from the self.cost_func_str string
or from the provided string
"""
if cost_func_str is None:
cost_func_str = self.cost_func_str()
else:
self.cost_func_str(cost_func_str)
exec("self.cost_func(" + self.cost_func_str() + ")")
def LJP(r, R_min, depth=1., p12=12, p6=6):
"""
Lennard-Jones potential function
Added here to be used with adaptive sampling of a cost function that
diverges at zero and might get the adaptive learner stucked from
samping the rest of the landscape
"""
return depth * ((R_min / r)**p12 - 2 * (R_min / r)**p6)
def LJP_mod(r, R_min, depth=100., p12=12, p6=6):
"""
Modiefied Lennard-Jones potential function
Modification: moved minum at zero and made positive
Added here to be used with adaptive sampling of a cost function that
diverges at zero and might get the adaptive learner stucked from
samping the rest of the landscape
It is a nice wrapping of a cost function because it bounds the
[0, +inf] output of any other cost function always between
[0, depth] so that there is always an intuition of how good an
optimization is doing
The derivative at zero is zero and that should help not getting the
adaptive sampling stuck
arctan could be used for a similar purpose but is more useful in
experiment to have high slope at zero
"""
return LJP(r + R_min, R_min, depth=depth, p12=p12, p6=p6) + depth
| DiCarloLab-Delft/PycQED_py3 | pycqed/instrument_drivers/virtual_instruments/sim_control_CZ.py | Python | mit | 11,275 | 0.001508 |
import numpy
import chainer
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
def _as_mat(x):
if x.ndim == 2:
return x
return x.reshape(len(x), -1)
def _ij_ik_il_to_jkl(a, b, c):
ab = chainer.functions.matmul(a[:, :, None], b[:, None, :]) # ijk
return chainer.functions.matmul(_as_mat(ab).T, c).reshape(
a.shape[1], b.shape[1], c.shape[1])
def _ij_ik_jkl_to_il(a, b, c):
ab = chainer.functions.matmul(a[:, :, None], b[:, None, :]) # ijk
c = c.reshape(-1, c.shape[-1]) # [jk]l
return chainer.functions.matmul(_as_mat(ab), c)
def _ij_il_jkl_to_ik(a, b, c):
return _ij_ik_jkl_to_il(a, b, chainer.functions.swapaxes(c, 1, 2))
def _ik_il_jkl_to_ij(a, b, c):
return _ij_ik_jkl_to_il(a, b, chainer.functions.rollaxis(c, 0, c.ndim))
class BilinearFunction(function_node.FunctionNode):
def check_type_forward(self, in_types):
n_in = type_check.eval(in_types.size())
if n_in != 3 and n_in != 6:
raise type_check.InvalidType(
'{0} or {1}'.format(
in_types.size() == 3, in_types.size() == 6),
'{0} == {1}'.format(in_types.size(), n_in))
e1_type, e2_type, W_type = in_types[:3]
type_check_prod = type_check.make_variable(numpy.prod, 'prod')
type_check.expect(
e1_type.dtype == numpy.float32,
e1_type.ndim >= 2,
e2_type.dtype == numpy.float32,
e2_type.ndim >= 2,
e1_type.shape[0] == e2_type.shape[0],
W_type.dtype == numpy.float32,
W_type.ndim == 3,
type_check_prod(e1_type.shape[1:]) == W_type.shape[0],
type_check_prod(e2_type.shape[1:]) == W_type.shape[1],
)
if n_in == 6:
out_size = W_type.shape[2]
V1_type, V2_type, b_type = in_types[3:]
type_check.expect(
V1_type.dtype == numpy.float32,
V1_type.ndim == 2,
V1_type.shape[0] == W_type.shape[0],
V1_type.shape[1] == out_size,
V2_type.dtype == numpy.float32,
V2_type.ndim == 2,
V2_type.shape[0] == W_type.shape[1],
V2_type.shape[1] == out_size,
b_type.dtype == numpy.float32,
b_type.ndim == 1,
b_type.shape[0] == out_size,
)
def forward(self, inputs):
self.retain_inputs(tuple(range(len(inputs))))
e1 = _as_mat(inputs[0])
e2 = _as_mat(inputs[1])
W = inputs[2]
xp = backend.get_array_module(*inputs)
# optimize: y = xp.einsum('ij,ik,jkl->il', e1, e2, W)
y = xp.tensordot(xp.einsum('ij,ik->ijk', e1, e2), W, axes=2)
if len(inputs) == 6:
V1, V2, b = inputs[3:]
y += e1.dot(V1)
y += e2.dot(V2)
y += b
return y,
def backward(self, indexes, grad_outputs):
inputs = self.get_retained_inputs()
e1, e2, W = inputs[:3]
gy, = grad_outputs
if len(inputs) == 6:
V1, V2 = inputs[3], inputs[4]
return BilinearFunctionGrad().apply((e1, e2, W, V1, V2, gy))
return BilinearFunctionGrad().apply((e1, e2, W, gy))
class BilinearFunctionGrad(function_node.FunctionNode):
def forward(self, inputs):
self.retain_inputs(tuple(range(len(inputs))))
e1 = _as_mat(inputs[0])
e2 = _as_mat(inputs[1])
W, gy = inputs[2], inputs[-1]
xp = backend.get_array_module(*inputs)
# optimize: gW = xp.einsum('ij,ik,il->jkl', e1, e2, gy)
gW = xp.einsum('ij,ik->jki', e1, e2).dot(gy)
gy_W = xp.tensordot(gy, W, axes=(1, 2)) # 'il,jkl->ijk'
# optimize: ge1 = xp.einsum('ik,jkl,il->ij', e2, W, gy)
ge1 = xp.einsum('ik,ijk->ij', e2, gy_W)
# optimize: ge2 = xp.einsum('ij,jkl,il->ik', e1, W, gy)
ge2 = xp.einsum('ij,ijk->ik', e1, gy_W)
ret = ge1.reshape(inputs[0].shape), ge2.reshape(inputs[1].shape), gW
if len(inputs) == 6:
V1, V2 = inputs[3], inputs[4]
gV1 = e1.T.dot(gy)
gV2 = e2.T.dot(gy)
gb = gy.sum(0)
ge1 += gy.dot(V1.T)
ge2 += gy.dot(V2.T)
ret += gV1, gV2, gb
return ret
def backward(self, indexes, grad_outputs):
inputs = self.get_retained_inputs()
e1 = _as_mat(inputs[0])
e2 = _as_mat(inputs[1])
W, gy = inputs[2], inputs[-1]
gge1 = _as_mat(grad_outputs[0])
gge2 = _as_mat(grad_outputs[1])
ggW = grad_outputs[2]
dge1_de2 = _ij_il_jkl_to_ik(gge1, gy, W)
dge1_dW = _ij_ik_il_to_jkl(gge1, e2, gy)
dge1_dgy = _ij_ik_jkl_to_il(gge1, e2, W)
dge2_de1 = _ik_il_jkl_to_ij(gge2, gy, W)
dge2_dW = _ij_ik_il_to_jkl(e1, gge2, gy)
dge2_dgy = _ij_ik_jkl_to_il(e1, gge2, W)
dgW_de1 = _ik_il_jkl_to_ij(e2, gy, ggW)
dgW_de2 = _ij_il_jkl_to_ik(e1, gy, ggW)
dgW_dgy = _ij_ik_jkl_to_il(e1, e2, ggW)
ge1 = dgW_de1 + dge2_de1
ge2 = dgW_de2 + dge1_de2
gW = dge1_dW + dge2_dW
ggy = dgW_dgy + dge1_dgy + dge2_dgy
if len(inputs) == 6:
V1, V2 = inputs[3], inputs[4]
ggV1, ggV2, ggb = grad_outputs[3:]
gV1 = chainer.functions.matmul(gge1, gy, transa=True)
gV2 = chainer.functions.matmul(gge2, gy, transa=True)
ge1 += chainer.functions.matmul(gy, ggV1, transb=True)
ge2 += chainer.functions.matmul(gy, ggV2, transb=True)
ggy += chainer.functions.matmul(gge1, V1)
ggy += chainer.functions.matmul(gge2, V2)
ggy += chainer.functions.matmul(e1, ggV1)
ggy += chainer.functions.matmul(e2, ggV2)
ggy += chainer.functions.broadcast_to(ggb, ggy.shape)
ge1 = ge1.reshape(inputs[0].shape)
ge2 = ge2.reshape(inputs[1].shape)
if len(inputs) == 6:
return ge1, ge2, gW, gV1, gV2, ggy
return ge1, ge2, gW, ggy
def bilinear(e1, e2, W, V1=None, V2=None, b=None):
"""Applies a bilinear function based on given parameters.
This is a building block of Neural Tensor Network (see the reference paper
below). It takes two input variables and one or four parameters, and
outputs one variable.
To be precise, denote six input arrays mathematically by
:math:`e^1\\in \\mathbb{R}^{I\\cdot J}`,
:math:`e^2\\in \\mathbb{R}^{I\\cdot K}`,
:math:`W\\in \\mathbb{R}^{J \\cdot K \\cdot L}`,
:math:`V^1\\in \\mathbb{R}^{J \\cdot L}`,
:math:`V^2\\in \\mathbb{R}^{K \\cdot L}`, and
:math:`b\\in \\mathbb{R}^{L}`,
where :math:`I` is mini-batch size.
In this document, we call :math:`V^1`, :math:`V^2`, and :math:`b` linear
parameters.
The output of forward propagation is calculated as
.. math::
y_{il} = \\sum_{jk} e^1_{ij} e^2_{ik} W_{jkl} + \\
\\sum_{j} e^1_{ij} V^1_{jl} + \\sum_{k} e^2_{ik} V^2_{kl} + b_{l}.
Note that V1, V2, b are optional. If these are not given, then this
function omits the last three terms in the above equation.
.. note::
This function accepts an input variable ``e1`` or ``e2`` of a non-matrix
array. In this case, the leading dimension is treated as the batch
dimension, and the other dimensions are reduced to one dimension.
.. note::
In the original paper, :math:`J` and :math:`K`
must be equal and the author denotes :math:`[V^1 V^2]`
(concatenation of matrices) by :math:`V`.
Args:
e1 (:class:`~chainer.Variable` or :ref:`ndarray`):
Left input variable.
e2 (:class:`~chainer.Variable` or :ref:`ndarray`):
Right input variable.
W (:class:`~chainer.Variable` or :ref:`ndarray`):
Quadratic weight variable.
V1 (:class:`~chainer.Variable` or :ref:`ndarray`):
Left coefficient variable.
V2 (:class:`~chainer.Variable` or :ref:`ndarray`):
Right coefficient variable.
b (:class:`~chainer.Variable` or :ref:`ndarray`):
Bias variable.
Returns:
~chainer.Variable: Output variable.
See:
`Reasoning With Neural Tensor Networks for Knowledge Base Completion
<https://papers.nips.cc/paper/5028-reasoning-with-neural-tensor-
networks-for-knowledge-base-completion>`_ [Socher+, NIPS2013].
.. seealso::
:class:`~chainer.links.Bilinear` to manage the model parameters
``W``, ``V1``, ``V2``, and ``b``.
"""
flags = [V1 is None, V2 is None, b is None]
if any(flags):
if not all(flags):
raise ValueError('All coefficients and bias for bilinear() must '
'be None, if at least one of them is None.')
return BilinearFunction().apply((e1, e2, W))[0]
return BilinearFunction().apply((e1, e2, W, V1, V2, b))[0]
| okuta/chainer | chainer/functions/connection/bilinear.py | Python | mit | 9,015 | 0 |
# -*- coding: utf-8 -*-
# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web)
# Copyright (C) 2021 OzzieIsaacs
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import abc
import dataclasses
import os
import re
from typing import Dict, Generator, List, Optional, Union
from cps import constants
@dataclasses.dataclass
class MetaSourceInfo:
id: str
description: str
link: str
@dataclasses.dataclass
class MetaRecord:
id: Union[str, int]
title: str
authors: List[str]
url: str
source: MetaSourceInfo
cover: str = os.path.join(constants.STATIC_DIR, 'generic_cover.jpg')
description: Optional[str] = ""
series: Optional[str] = None
series_index: Optional[Union[int, float]] = 0
identifiers: Dict[str, Union[str, int]] = dataclasses.field(default_factory=dict)
publisher: Optional[str] = None
publishedDate: Optional[str] = None
rating: Optional[int] = 0
languages: Optional[List[str]] = dataclasses.field(default_factory=list)
tags: Optional[List[str]] = dataclasses.field(default_factory=list)
class Metadata:
__name__ = "Generic"
__id__ = "generic"
def __init__(self):
self.active = True
def set_status(self, state):
self.active = state
@abc.abstractmethod
def search(
self, query: str, generic_cover: str = "", locale: str = "en"
) -> Optional[List[MetaRecord]]:
pass
@staticmethod
def get_title_tokens(
title: str, strip_joiners: bool = True
) -> Generator[str, None, None]:
"""
Taken from calibre source code
It's a simplified (cut out what is unnecessary) version of
https://github.com/kovidgoyal/calibre/blob/99d85b97918625d172227c8ffb7e0c71794966c0/
src/calibre/ebooks/metadata/sources/base.py#L363-L367
(src/calibre/ebooks/metadata/sources/base.py - lines 363-398)
"""
title_patterns = [
(re.compile(pat, re.IGNORECASE), repl)
for pat, repl in [
# Remove things like: (2010) (Omnibus) etc.
(
r"(?i)[({\[](\d{4}|omnibus|anthology|hardcover|"
r"audiobook|audio\scd|paperback|turtleback|"
r"mass\s*market|edition|ed\.)[\])}]",
"",
),
# Remove any strings that contain the substring edition inside
# parentheses
(r"(?i)[({\[].*?(edition|ed.).*?[\]})]", ""),
# Remove commas used a separators in numbers
(r"(\d+),(\d+)", r"\1\2"),
# Remove hyphens only if they have whitespace before them
(r"(\s-)", " "),
# Replace other special chars with a space
(r"""[:,;!@$%^&*(){}.`~"\s\[\]/]《》「」“”""", " "),
]
]
for pat, repl in title_patterns:
title = pat.sub(repl, title)
tokens = title.split()
for token in tokens:
token = token.strip().strip('"').strip("'")
if token and (
not strip_joiners or token.lower() not in ("a", "and", "the", "&")
):
yield token
| janeczku/calibre-web | cps/services/Metadata.py | Python | gpl-3.0 | 3,837 | 0.000784 |
# coding: utf-8
from __future__ import unicode_literals, absolute_import
try:
import requests as r
except:
r = None
class TigrisSession(object):
"""
Base session layer for Tigris.
"""
def __init__(self,
base_url,
default_headers={}):
"""
:param base_url:
The customer endpoint docroot.
:type base_url:
`str`
:param default_headers
"""
self._base_url = base_url
self._session = r.Session()
self._default_headers = default_headers
self._timeout = 80
def _request(self, method, endpoint, headers, post_data=None, files=None):
"""
Makes an HTTP request
:param method:
The name of the method
:type method:
`str`
:param endpoint:
The name of the endpoint
:type endpoint:
`str`
:param headers:
The name of the endpoint
:type headers:
`dict`
:param post_data:
PATCH/POST/PUT data.
:type post_data:
`dict`
:rtype:
`tuple` of `str`, `int`, `dict`
"""
url = '{0}/{1}'.format(self._base_url, endpoint)
try:
try:
result = self._session.request(method,
url,
headers=headers,
json=post_data,
files=files,
timeout=self._timeout)
except TypeError as e:
raise TypeError(
'WARNING: We couldn\'t find a proper instance of '
'Python `requests`. You may need to update or install '
'the library, which you can do with `pip`: '
' To update `requests`: '
''
' pip install -U requests '
' To install `requests`:'
''
' pip install requests. '
'Alternatively, your POST data may be malformed. '
'Underlying error: {0}'.format(e))
content = result.json()
status_code = result.status_code
except Exception as e:
raise Exception(e)
return content, status_code, result.headers
def _delete(self, endpoint, headers={}):
"""
Executes a DELETE request
:param endpoint:
The name of the endpoint
:type endpoint:
`url`
:rtype:
`tuple`
"""
joined_headers = dict(headers, **self._default_headers)
return self._request('delete', endpoint, joined_headers)
def _get(self, endpoint, headers={}):
"""
Executes a GET request
:param endpoint:
The name of the endpoint
:type endpoint:
`url`
:rtype:
`tuple`
"""
joined_headers = dict(headers, **self._default_headers)
return self._request('get', endpoint, joined_headers)
def _head(self, endpoint, headers={}):
"""
Executes a HEAD request
:param endpoint:
The name of the endpoint
:type endpoint:
`url`
:rtype:
`tuple`
"""
joined_headers = dict(headers, **self._default_headers)
return self._request('head', endpoint, joined_headers)
def _patch(self, endpoint, data={}, headers={}):
"""
Executes a PATCH request
:param endpoint:
The name of the endpoint
:type endpoint:
`url`
:param data:
The payload data to send
:type data:
`dict`
:rtype:
`tuple`
"""
joined_headers = dict(headers, **self._default_headers)
return self._request(
'patch',
endpoint,
joined_headers,
post_data=data)
def _post(self, endpoint, data={}, headers={}, files=None):
"""
Executes a POST request
:param endpoint:
The name of the endpoint
:type endpoint:
`url`
:param data:
The payload data to send
:type data:
`dict`
:rtype:
`tuple`
"""
joined_headers = dict(headers, **self._default_headers)
return self._request(
'post',
endpoint,
joined_headers,
post_data=data,
files=files)
def _put(self, endpoint, data={}, headers={}):
"""
Executes a PATCH request
:param endpoint:
The name of the endpoint
:type endpoint:
`url`
:param data:
The payload data to send
:type data:
`dict`
:rtype:
`tuple`
"""
joined_headers = dict(headers, **self._default_headers)
return self._request(
'put',
endpoint,
joined_headers,
post_data=data)
| jogral/tigris-python-sdk | tigrissdk/session/tigris_session.py | Python | apache-2.0 | 5,263 | 0.00019 |
"""
This is a subscriber meant for the 'weather' messages example.
It uses a custom code loop to get and process messages.
"""
from __future__ import print_function
import sys
import threading
import time
import Pyro4
from messagebus.messagebus import Subscriber
from Pyro4.util import excepthook
sys.excepthook = excepthook
if sys.version_info < (3, 0):
input = raw_input
Pyro4.config.AUTOPROXY = True
@Pyro4.expose
class Subber(Subscriber):
def consume_message(self, topic, message):
# In this case, this consume message method is called by our own code loop.
print("\nPROCESSING MESSAGE:")
print(" topic:", topic)
print(" msgid:", message.msgid)
print(" created:", message.created)
print(" data:", message.data)
def manual_message_loop(self):
print("Entering manual message processing loop (5 messages).")
processed = 0
while processed < 5:
time.sleep(0.5)
print("\nApprox. number of received messages:", self.received_messages.qsize())
topic, message = self.received_messages.get() # get a message from the queue (they are put there by the Pyro messagebus)
self.consume_message(topic, message)
processed += 1
print("\nEnd.")
hostname = input("hostname to bind on (empty=localhost): ").strip() or "localhost"
# create a messagebus subscriber that uses manual message retrieval (via explicit call)
# because we're doing the message loop ourselves, the Pyro daemon has to run in a separate thread
subber = Subber(auto_consume=False)
d = Pyro4.Daemon(host=hostname)
d.register(subber)
daemon_thread = threading.Thread(target=d.requestLoop)
daemon_thread.daemon = True
daemon_thread.start()
topics = subber.bus.topics()
print("Topics on the bus: ", topics)
print("Subscribing to weather-forecast.")
subber.bus.subscribe("weather-forecast", subber)
# note: we subscribe on the bus *after* registering the subber as a Pyro object
# this results in Pyro automatically making a proxy for the subber
print("Subscribed on weather-forecast")
# run the manual message loop
print("Entering message loop, you should see the msg count increasing.")
subber.manual_message_loop()
subber.bus.unsubscribe("weather-forecast", subber)
print("Unsubscribed from the topic.")
print("Entering message loop again, you should see the msg count decrease.")
subber.manual_message_loop()
| irmen/Pyro4 | examples/messagebus/subscriber_manual_consume.py | Python | mit | 2,432 | 0.002467 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from future.builtins import range
import re
import sys
from twisted.enterprise import adbapi
from twisted.internet import defer
from twisted.python import log
from buildbot.process.buildstep import LogLineObserver
from buildbot.steps.shell import Test
class EqConnectionPool(adbapi.ConnectionPool):
"""This class works the same way as
twisted.enterprise.adbapi.ConnectionPool. But it adds the ability to
compare connection pools for equality (by comparing the arguments
passed to the constructor).
This is useful when passing the ConnectionPool to a BuildStep, as
otherwise Buildbot will consider the buildstep (and hence the
containing buildfactory) to have changed every time the configuration
is reloaded.
It also sets some defaults differently from adbapi.ConnectionPool that
are more suitable for use in MTR.
"""
def __init__(self, *args, **kwargs):
self._eqKey = (args, kwargs)
adbapi.ConnectionPool.__init__(self,
cp_reconnect=True, cp_min=1, cp_max=3,
*args, **kwargs)
def __eq__(self, other):
if isinstance(other, EqConnectionPool):
return self._eqKey == other._eqKey
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
class MtrTestFailData:
def __init__(self, testname, variant, result, info, text, callback):
self.testname = testname
self.variant = variant
self.result = result
self.info = info
self.text = text
self.callback = callback
def add(self, line):
self.text += line
def fireCallback(self):
return self.callback(self.testname, self.variant, self.result, self.info, self.text)
class MtrLogObserver(LogLineObserver):
"""
Class implementing a log observer (can be passed to
BuildStep.addLogObserver().
It parses the output of mysql-test-run.pl as used in MySQL,
MariaDB, Drizzle, etc.
It counts number of tests run and uses it to provide more accurate
completion estimates.
It parses out test failures from the output and summarizes the results on
the Waterfall page. It also passes the information to methods that can be
overridden in a subclass to do further processing on the information."""
_line_re = re.compile(
r"^([-._0-9a-zA-z]+)( '[-_ a-zA-Z]+')?\s+(w[0-9]+\s+)?\[ (fail|pass) \]\s*(.*)$")
_line_re2 = re.compile(
r"^[-._0-9a-zA-z]+( '[-_ a-zA-Z]+')?\s+(w[0-9]+\s+)?\[ [-a-z]+ \]")
_line_re3 = re.compile(
r"^\*\*\*Warnings generated in error logs during shutdown after running tests: (.*)")
_line_re4 = re.compile(r"^The servers were restarted [0-9]+ times$")
_line_re5 = re.compile(r"^Only\s+[0-9]+\s+of\s+[0-9]+\s+completed.$")
def __init__(self, textLimit=5, testNameLimit=16, testType=None):
self.textLimit = textLimit
self.testNameLimit = testNameLimit
self.testType = testType
self.numTests = 0
self.testFail = None
self.failList = []
self.warnList = []
LogLineObserver.__init__(self)
def setLog(self, loog):
LogLineObserver.setLog(self, loog)
d = loog.waitUntilFinished()
d.addCallback(lambda l: self.closeTestFail())
def outLineReceived(self, line):
stripLine = line.strip("\r\n")
m = self._line_re.search(stripLine)
if m:
testname, variant, worker, result, info = m.groups()
self.closeTestFail()
self.numTests += 1
self.step.setProgress('tests', self.numTests)
if result == "fail":
if variant is None:
variant = ""
else:
variant = variant[2:-1]
self.openTestFail(
testname, variant, result, info, stripLine + "\n")
else:
m = self._line_re3.search(stripLine)
# pylint: disable=too-many-boolean-expressions
if m:
stuff = m.group(1)
self.closeTestFail()
testList = stuff.split(" ")
self.doCollectWarningTests(testList)
elif (self._line_re2.search(stripLine) or
self._line_re4.search(stripLine) or
self._line_re5.search(stripLine) or
stripLine == "Test suite timeout! Terminating..." or
stripLine.startswith("mysql-test-run: *** ERROR: Not all tests completed") or
(stripLine.startswith("------------------------------------------------------------")
and self.testFail is not None)):
self.closeTestFail()
else:
self.addTestFailOutput(stripLine + "\n")
def openTestFail(self, testname, variant, result, info, line):
self.testFail = MtrTestFailData(
testname, variant, result, info, line, self.doCollectTestFail)
def addTestFailOutput(self, line):
if self.testFail is not None:
self.testFail.add(line)
def closeTestFail(self):
if self.testFail is not None:
self.testFail.fireCallback()
self.testFail = None
def addToText(self, src, dst):
lastOne = None
count = 0
for t in src:
if t != lastOne:
dst.append(t)
count += 1
if count >= self.textLimit:
break
def makeText(self, done):
if done:
text = ["test"]
else:
text = ["testing"]
if self.testType:
text.append(self.testType)
fails = sorted(self.failList[:])
self.addToText(fails, text)
warns = sorted(self.warnList[:])
self.addToText(warns, text)
return text
# Update waterfall status.
def updateText(self):
self.step.step_status.setText(self.makeText(False))
strip_re = re.compile(r"^[a-z]+\.")
def displayTestName(self, testname):
displayTestName = self.strip_re.sub("", testname)
if len(displayTestName) > self.testNameLimit:
displayTestName = displayTestName[
:(self.testNameLimit - 2)] + "..."
return displayTestName
def doCollectTestFail(self, testname, variant, result, info, text):
self.failList.append("F:" + self.displayTestName(testname))
self.updateText()
self.collectTestFail(testname, variant, result, info, text)
def doCollectWarningTests(self, testList):
for t in testList:
self.warnList.append("W:" + self.displayTestName(t))
self.updateText()
self.collectWarningTests(testList)
# These two methods are overridden to actually do something with the data.
def collectTestFail(self, testname, variant, result, info, text):
pass
def collectWarningTests(self, testList):
pass
class MTR(Test):
"""
Build step that runs mysql-test-run.pl, as used in MySQL, Drizzle,
MariaDB, etc.
It uses class MtrLogObserver to parse test results out from the
output of mysql-test-run.pl, providing better completion time
estimates and summarizing test failures on the waterfall page.
It also provides access to mysqld server error logs from the test
run to help debugging any problems.
Optionally, it can insert into a database data about the test run,
including details of any test failures.
Parameters:
textLimit
Maximum number of test failures to show on the waterfall page
(to not flood the page in case of a large number of test
failures. Defaults to 5.
testNameLimit
Maximum length of test names to show unabbreviated in the
waterfall page, to avoid excessive column width. Defaults to 16.
parallel
Value of --parallel option used for mysql-test-run.pl (number
of processes used to run the test suite in parallel). Defaults
to 4. This is used to determine the number of server error log
files to download from the worker. Specifying a too high value
does not hurt (as nonexisting error logs will be ignored),
however if using --parallel value greater than the default it
needs to be specified, or some server error logs will be
missing.
dbpool
An instance of twisted.enterprise.adbapi.ConnectionPool, or None.
Defaults to None. If specified, results are inserted into the database
using the ConnectionPool.
The class process.mtrlogobserver.EqConnectionPool subclass of
ConnectionPool can be useful to pass as value for dbpool, to
avoid having config reloads think the Buildstep is changed
just because it gets a new ConnectionPool instance (even
though connection parameters are unchanged).
autoCreateTables
Boolean, defaults to False. If True (and dbpool is specified), the
necessary database tables will be created automatically if they do
not exist already. Alternatively, the tables can be created manually
from the SQL statements found in the mtrlogobserver.py source file.
test_type
test_info
Two descriptive strings that will be inserted in the database tables if
dbpool is specified. The test_type string, if specified, will also
appear on the waterfall page."""
renderables = ['mtr_subdir']
def __init__(self, dbpool=None, test_type=None, test_info="",
description=None, descriptionDone=None,
autoCreateTables=False, textLimit=5, testNameLimit=16,
parallel=4, logfiles=None, lazylogfiles=True,
warningPattern="MTR's internal check of the test case '.*' failed",
mtr_subdir="mysql-test", **kwargs):
if logfiles is None:
logfiles = {}
if description is None:
description = ["testing"]
if test_type:
description.append(test_type)
if descriptionDone is None:
descriptionDone = ["test"]
if test_type:
descriptionDone.append(test_type)
Test.__init__(self, logfiles=logfiles, lazylogfiles=lazylogfiles,
description=description, descriptionDone=descriptionDone,
warningPattern=warningPattern, **kwargs)
self.dbpool = dbpool
self.test_type = test_type
self.test_info = test_info
self.autoCreateTables = autoCreateTables
self.textLimit = textLimit
self.testNameLimit = testNameLimit
self.parallel = parallel
self.mtr_subdir = mtr_subdir
self.progressMetrics += ('tests',)
def start(self):
# Add mysql server logfiles.
for mtr in range(0, self.parallel + 1):
for mysqld in range(1, 4 + 1):
if mtr == 0:
logname = "mysqld.%d.err" % mysqld
filename = "var/log/mysqld.%d.err" % mysqld
else:
logname = "mysqld.%d.err.%d" % (mysqld, mtr)
filename = "var/%d/log/mysqld.%d.err" % (mtr, mysqld)
self.addLogFile(logname, self.mtr_subdir + "/" + filename)
self.myMtr = self.MyMtrLogObserver(textLimit=self.textLimit,
testNameLimit=self.testNameLimit,
testType=self.test_type)
self.addLogObserver("stdio", self.myMtr)
# Insert a row for this test run into the database and set up
# build properties, then start the command proper.
d = self.registerInDB()
d.addCallback(self.afterRegisterInDB)
d.addErrback(self.failed)
def getText(self, command, results):
return self.myMtr.makeText(True)
def runInteractionWithRetry(self, actionFn, *args, **kw):
"""
Run a database transaction with dbpool.runInteraction, but retry the
transaction in case of a temporary error (like connection lost).
This is needed to be robust against things like database connection
idle timeouts.
The passed callable that implements the transaction must be retryable,
ie. it must not have any destructive side effects in the case where
an exception is thrown and/or rollback occurs that would prevent it
from functioning correctly when called again."""
def runWithRetry(txn, *args, **kw):
retryCount = 0
while(True):
try:
return actionFn(txn, *args, **kw)
except txn.OperationalError:
retryCount += 1
if retryCount >= 5:
raise
excType, excValue, excTraceback = sys.exc_info()
log.msg("Database transaction failed (caught exception %s(%s)), retrying ..." % (
excType, excValue))
txn.close()
txn.reconnect()
txn.reopen()
return self.dbpool.runInteraction(runWithRetry, *args, **kw)
def runQueryWithRetry(self, *args, **kw):
"""
Run a database query, like with dbpool.runQuery, but retry the query in
case of a temporary error (like connection lost).
This is needed to be robust against things like database connection
idle timeouts."""
def runQuery(txn, *args, **kw):
txn.execute(*args, **kw)
return txn.fetchall()
return self.runInteractionWithRetry(runQuery, *args, **kw)
def registerInDB(self):
if self.dbpool:
return self.runInteractionWithRetry(self.doRegisterInDB)
else:
return defer.succeed(0)
# The real database work is done in a thread in a synchronous way.
def doRegisterInDB(self, txn):
# Auto create tables.
# This is off by default, as it gives warnings in log file
# about tables already existing (and I did not find the issue
# important enough to find a better fix).
if self.autoCreateTables:
txn.execute("""
CREATE TABLE IF NOT EXISTS test_run(
id INT PRIMARY KEY AUTO_INCREMENT,
branch VARCHAR(100),
revision VARCHAR(32) NOT NULL,
platform VARCHAR(100) NOT NULL,
dt TIMESTAMP NOT NULL,
bbnum INT NOT NULL,
typ VARCHAR(32) NOT NULL,
info VARCHAR(255),
KEY (branch, revision),
KEY (dt),
KEY (platform, bbnum)
) ENGINE=innodb
""")
txn.execute("""
CREATE TABLE IF NOT EXISTS test_failure(
test_run_id INT NOT NULL,
test_name VARCHAR(100) NOT NULL,
test_variant VARCHAR(16) NOT NULL,
info_text VARCHAR(255),
failure_text TEXT,
PRIMARY KEY (test_run_id, test_name, test_variant)
) ENGINE=innodb
""")
txn.execute("""
CREATE TABLE IF NOT EXISTS test_warnings(
test_run_id INT NOT NULL,
list_id INT NOT NULL,
list_idx INT NOT NULL,
test_name VARCHAR(100) NOT NULL,
PRIMARY KEY (test_run_id, list_id, list_idx)
) ENGINE=innodb
""")
revision = self.getProperty("got_revision")
if revision is None:
revision = self.getProperty("revision")
typ = "mtr"
if self.test_type:
typ = self.test_type
txn.execute("""
INSERT INTO test_run(branch, revision, platform, dt, bbnum, typ, info)
VALUES (%s, %s, %s, CURRENT_TIMESTAMP(), %s, %s, %s)
""", (self.getProperty("branch"), revision,
self.getProperty("buildername"), self.getProperty("buildnumber"),
typ, self.test_info))
return txn.lastrowid
def afterRegisterInDB(self, insert_id):
self.setProperty("mtr_id", insert_id)
self.setProperty("mtr_warn_id", 0)
Test.start(self)
def reportError(self, err):
log.msg("Error in async insert into database: %s" % err)
class MyMtrLogObserver(MtrLogObserver):
def collectTestFail(self, testname, variant, result, info, text):
# Insert asynchronously into database.
dbpool = self.step.dbpool
if dbpool is None:
return defer.succeed(None)
run_id = self.step.getProperty("mtr_id")
if variant is None:
variant = ""
d = self.step.runQueryWithRetry("""
INSERT INTO test_failure(test_run_id, test_name, test_variant, info_text, failure_text)
VALUES (%s, %s, %s, %s, %s)
""", (run_id, testname, variant, info, text))
d.addErrback(self.step.reportError)
return d
def collectWarningTests(self, testList):
# Insert asynchronously into database.
dbpool = self.step.dbpool
if dbpool is None:
return defer.succeed(None)
run_id = self.step.getProperty("mtr_id")
warn_id = self.step.getProperty("mtr_warn_id")
self.step.setProperty("mtr_warn_id", warn_id + 1)
q = ("INSERT INTO test_warnings(test_run_id, list_id, list_idx, test_name) " +
"VALUES " + ", ".join(map(lambda x: "(%s, %s, %s, %s)", testList)))
v = []
idx = 0
for t in testList:
v.extend([run_id, warn_id, idx, t])
idx = idx + 1
d = self.step.runQueryWithRetry(q, tuple(v))
d.addErrback(self.step.reportError)
return d
| Lekensteyn/buildbot | master/buildbot/steps/mtrlogobserver.py | Python | gpl-2.0 | 18,336 | 0.000545 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .v2016_09_01.models import *
| Azure/azure-sdk-for-python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/locks/models.py | Python | mit | 360 | 0 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import atexit
import os.path
import tempfile
import fixtures
from oslo.config import cfg
from glance import tests as glance_tests
import glance.common.client
from glance.common import config
import glance.db.sqlalchemy.api
import glance.db.sqlalchemy.migration
import glance.registry.client.v1.client
import glance.store
from glance.tests import utils as test_utils
TESTING_API_PASTE_CONF = """
[pipeline:glance-api]
pipeline = versionnegotiation gzip unauthenticated-context rootapp
[pipeline:glance-api-caching]
pipeline = versionnegotiation gzip unauthenticated-context cache rootapp
[pipeline:glance-api-cachemanagement]
pipeline =
versionnegotiation
gzip
unauthenticated-context
cache
cache_manage
rootapp
[pipeline:glance-api-fakeauth]
pipeline = versionnegotiation gzip fakeauth context rootapp
[pipeline:glance-api-noauth]
pipeline = versionnegotiation gzip context rootapp
[composite:rootapp]
paste.composite_factory = glance.api:root_app_factory
/: apiversions
/v1: apiv1app
/v2: apiv2app
[app:apiversions]
paste.app_factory = glance.api.versions:create_resource
[app:apiv1app]
paste.app_factory = glance.api.v1.router:API.factory
[app:apiv2app]
paste.app_factory = glance.api.v2.router:API.factory
[filter:versionnegotiation]
paste.filter_factory =
glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory
[filter:gzip]
paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory
[filter:cache]
paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory
[filter:cache_manage]
paste.filter_factory =
glance.api.middleware.cache_manage:CacheManageFilter.factory
[filter:context]
paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
[filter:unauthenticated-context]
paste.filter_factory =
glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
[filter:fakeauth]
paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory
"""
TESTING_REGISTRY_PASTE_CONF = """
[pipeline:glance-registry]
pipeline = unauthenticated-context registryapp
[pipeline:glance-registry-fakeauth]
pipeline = fakeauth context registryapp
[app:registryapp]
paste.app_factory = glance.registry.api.v1:API.factory
[filter:context]
paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
[filter:unauthenticated-context]
paste.filter_factory =
glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
[filter:fakeauth]
paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory
"""
CONF = cfg.CONF
CONF.import_opt('filesystem_store_datadir', 'glance.store.filesystem')
class ApiTest(test_utils.BaseTestCase):
def setUp(self):
super(ApiTest, self).setUp()
self.init()
def init(self):
self.test_dir = self.useFixture(fixtures.TempDir()).path
self._configure_logging()
self._setup_database()
self._setup_stores()
self._setup_property_protection()
self.glance_registry_app = self._load_paste_app(
'glance-registry',
flavor=getattr(self, 'registry_flavor', ''),
conf=getattr(self, 'registry_paste_conf',
TESTING_REGISTRY_PASTE_CONF),
)
self._connect_registry_client()
self.glance_api_app = self._load_paste_app(
'glance-api',
flavor=getattr(self, 'api_flavor', ''),
conf=getattr(self, 'api_paste_conf', TESTING_API_PASTE_CONF),
)
self.http = test_utils.Httplib2WsgiAdapter(self.glance_api_app)
def _setup_property_protection(self):
self._copy_data_file('property-protections.conf', self.test_dir)
self.property_file = os.path.join(self.test_dir,
'property-protections.conf')
def _configure_logging(self):
self.config(default_log_levels=[
'amqplib=WARN',
'sqlalchemy=WARN',
'boto=WARN',
'suds=INFO',
'keystone=INFO',
'eventlet.wsgi.server=DEBUG'
])
def _setup_database(self):
sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir
self.config(sql_connection=sql_connection)
glance.db.sqlalchemy.api.clear_db_env()
glance_db_env = 'GLANCE_DB_TEST_SQLITE_FILE'
if glance_db_env in os.environ:
# use the empty db created and cached as a tempfile
# instead of spending the time creating a new one
db_location = os.environ[glance_db_env]
test_utils.execute('cp %s %s/tests.sqlite'
% (db_location, self.test_dir))
else:
glance.db.sqlalchemy.migration.db_sync()
# copy the clean db to a temp location so that it
# can be reused for future tests
(osf, db_location) = tempfile.mkstemp()
os.close(osf)
test_utils.execute('cp %s/tests.sqlite %s'
% (self.test_dir, db_location))
os.environ[glance_db_env] = db_location
# cleanup the temp file when the test suite is
# complete
def _delete_cached_db():
try:
os.remove(os.environ[glance_db_env])
except Exception:
glance_tests.logger.exception(
"Error cleaning up the file %s" %
os.environ[glance_db_env])
atexit.register(_delete_cached_db)
def _setup_stores(self):
image_dir = os.path.join(self.test_dir, "images")
self.config(filesystem_store_datadir=image_dir)
glance.store.create_stores()
def _load_paste_app(self, name, flavor, conf):
conf_file_path = os.path.join(self.test_dir, '%s-paste.ini' % name)
with open(conf_file_path, 'wb') as conf_file:
conf_file.write(conf)
conf_file.flush()
return config.load_paste_app(name, flavor=flavor,
conf_file=conf_file_path)
def _connect_registry_client(self):
def get_connection_type(self2):
def wrapped(*args, **kwargs):
return test_utils.HttplibWsgiAdapter(self.glance_registry_app)
return wrapped
self.stubs.Set(glance.common.client.BaseClient,
'get_connection_type', get_connection_type)
def tearDown(self):
glance.db.sqlalchemy.api.clear_db_env()
super(ApiTest, self).tearDown()
| SUSE-Cloud/glance | glance/tests/integration/legacy_functional/base.py | Python | apache-2.0 | 7,145 | 0 |
from __future__ import absolute_import
from __future__ import unicode_literals
import os
from compose.config.config import ConfigDetails
from compose.config.config import ConfigFile
from compose.config.config import load
def build_config(contents, **kwargs):
return load(build_config_details(contents, **kwargs))
def build_config_details(contents, working_dir='working_dir', filename='filename.yml'):
return ConfigDetails(
working_dir,
[ConfigFile(filename, contents)],
)
def create_host_file(client, filename):
dirname = os.path.dirname(filename)
with open(filename, 'r') as fh:
content = fh.read()
container = client.create_container(
'busybox:latest',
['sh', '-c', 'echo -n "{}" > {}'.format(content, filename)],
volumes={dirname: {}},
host_config=client.create_host_config(
binds={dirname: {'bind': dirname, 'ro': False}},
network_mode='none',
),
)
try:
client.start(container)
exitcode = client.wait(container)
if exitcode != 0:
output = client.logs(container)
raise Exception(
"Container exited with code {}:\n{}".format(exitcode, output))
finally:
client.remove_container(container, force=True)
| sdurrheimer/compose | tests/helpers.py | Python | apache-2.0 | 1,309 | 0.000764 |
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yd=6gp*c%jj@jmqug!qwb0m)ksf#2gr%_w+)a1t*4t)9yc#cr#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| williamHuang5468/QuicklyLearnDjango | mysite/mysite/settings.py | Python | mit | 2,686 | 0 |
from django.core.management.base import BaseCommand, CommandError
from xmodule.contentstore.utils import restore_asset_from_trashcan
class Command(BaseCommand):
help = '''Restore a deleted asset from the trashcan back to it's original course'''
def handle(self, *args, **options):
if len(args) != 1 and len(args) != 0:
raise CommandError("restore_asset_from_trashcan requires one argument: <location>")
restore_asset_from_trashcan(args[0])
| htzy/bigfour | cms/djangoapps/contentstore/management/commands/restore_asset_from_trashcan.py | Python | agpl-3.0 | 480 | 0.004167 |
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import Environment, Rule
from sentry.testutils import APITestCase
class ProjectRuleListTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
team = self.create_team()
project1 = self.create_project(teams=[team], name="foo")
self.create_project(teams=[team], name="bar")
url = reverse(
"sentry-api-0-project-rules",
kwargs={"organization_slug": project1.organization.slug, "project_slug": project1.slug},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
rule_count = Rule.objects.filter(project=project1).count()
assert len(response.data) == rule_count
class CreateProjectRuleTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
project = self.create_project()
conditions = [
{
"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition",
"key": "foo",
"match": "eq",
"value": "bar",
}
]
actions = [{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}]
url = reverse(
"sentry-api-0-project-rules",
kwargs={"organization_slug": project.organization.slug, "project_slug": project.slug},
)
response = self.client.post(
url,
data={
"name": "hello world",
"actionMatch": "any",
"actions": actions,
"conditions": conditions,
"frequency": 30,
},
format="json",
)
assert response.status_code == 200, response.content
assert response.data["id"]
rule = Rule.objects.get(id=response.data["id"])
assert rule.label == "hello world"
assert rule.data["action_match"] == "any"
assert rule.data["actions"] == actions
assert rule.data["conditions"] == conditions
assert rule.data["frequency"] == 30
def test_with_environment(self):
self.login_as(user=self.user)
project = self.create_project()
Environment.get_or_create(project, "production")
conditions = [
{
"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition",
"key": "foo",
"match": "eq",
"value": "bar",
}
]
actions = [{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}]
url = reverse(
"sentry-api-0-project-rules",
kwargs={"organization_slug": project.organization.slug, "project_slug": project.slug},
)
response = self.client.post(
url,
data={
"name": "hello world",
"environment": "production",
"conditions": conditions,
"actions": actions,
"actionMatch": "any",
"frequency": 30,
},
format="json",
)
assert response.status_code == 200, response.content
assert response.data["id"]
assert response.data["environment"] == "production"
rule = Rule.objects.get(id=response.data["id"])
assert rule.label == "hello world"
assert rule.environment_id == Environment.get_or_create(rule.project, "production").id
def test_with_null_environment(self):
self.login_as(user=self.user)
project = self.create_project()
conditions = [
{
"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition",
"key": "foo",
"match": "eq",
"value": "bar",
}
]
actions = [{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}]
url = reverse(
"sentry-api-0-project-rules",
kwargs={"organization_slug": project.organization.slug, "project_slug": project.slug},
)
response = self.client.post(
url,
data={
"name": "hello world",
"environment": None,
"conditions": conditions,
"actions": actions,
"actionMatch": "any",
"frequency": 30,
},
format="json",
)
assert response.status_code == 200, response.content
assert response.data["id"]
assert response.data["environment"] is None
rule = Rule.objects.get(id=response.data["id"])
assert rule.label == "hello world"
assert rule.environment_id is None
def test_missing_name(self):
self.login_as(user=self.user)
project = self.create_project()
conditions = [
{
"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition",
"key": "foo",
"match": "eq",
"value": "bar",
}
]
actions = [{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}]
url = reverse(
"sentry-api-0-project-rules",
kwargs={"organization_slug": project.organization.slug, "project_slug": project.slug},
)
response = self.client.post(
url,
data={"actionMatch": "any", "actions": actions, "conditions": conditions},
format="json",
)
assert response.status_code == 400, response.content
| mvaled/sentry | tests/sentry/api/endpoints/test_project_rules.py | Python | bsd-3-clause | 5,710 | 0.002627 |
#
# (c) 2015 Peter Sprygada, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import re
import shlex
import re
from distutils.version import LooseVersion
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.network import NetworkError, NetworkModule
from ansible.module_utils.network import register_transport, to_list
from ansible.module_utils.shell import CliBase
from ansible.module_utils.six import string_types
# temporary fix until modules are update. to be removed before 2.2 final
from ansible.module_utils.network import get_module
try:
from jnpr.junos import Device
from jnpr.junos.utils.config import Config
from jnpr.junos.version import VERSION
from jnpr.junos.exception import RpcError, ConnectError, ConfigLoadError, CommitError
from jnpr.junos.exception import LockError, UnlockError
if LooseVersion(VERSION) < LooseVersion('1.2.2'):
HAS_PYEZ = False
else:
HAS_PYEZ = True
except ImportError:
HAS_PYEZ = False
try:
import jxmlease
HAS_JXMLEASE = True
except ImportError:
HAS_JXMLEASE = False
try:
from lxml import etree
except ImportError:
import xml.etree.ElementTree as etree
SUPPORTED_CONFIG_FORMATS = ['text', 'set', 'json', 'xml']
def xml_to_json(val):
if isinstance(val, string_types):
return jxmlease.parse(val)
else:
return jxmlease.parse_etree(val)
def xml_to_string(val):
return etree.tostring(val)
class Netconf(object):
def __init__(self):
self.device = None
self.config = None
self._locked = False
self._connected = False
self.default_output = 'xml'
def raise_exc(self, msg):
if self.device:
if self._locked:
self.config.unlock()
self.disconnect()
raise NetworkError(msg)
def connect(self, params, **kwargs):
host = params['host']
port = params.get('port') or 830
user = params['username']
passwd = params['password']
try:
self.device = Device(host, user=user, passwd=passwd, port=port,
gather_facts=False)
self.device.open()
except ConnectError:
exc = get_exception()
self.raise_exc('unable to connect to %s: %s' % (host, str(exc)))
self.config = Config(self.device)
self._connected = True
def disconnect(self):
try:
self.device.close()
except AttributeError:
pass
self._connected = False
### Command methods ###
def run_commands(self, commands):
responses = list()
for cmd in commands:
meth = getattr(self, cmd.args.get('command_type'))
responses.append(meth(str(cmd), output=cmd.output))
for index, cmd in enumerate(commands):
if cmd.output == 'xml':
responses[index] = etree.tostring(responses[index])
elif cmd.args.get('command_type') == 'rpc':
responses[index] = str(responses[index].text).strip()
return responses
def cli(self, commands, output='xml'):
'''Send commands to the device.'''
try:
return self.device.cli(commands, format=output, warning=False)
except (ValueError, RpcError):
exc = get_exception()
self.raise_exc('Unable to get cli output: %s' % str(exc))
def rpc(self, command, output='xml'):
name, kwargs = rpc_args(command)
meth = getattr(self.device.rpc, name)
reply = meth({'format': output}, **kwargs)
return reply
### Config methods ###
def get_config(self, config_format="text"):
if config_format not in SUPPORTED_CONFIG_FORMATS:
self.raise_exc(msg='invalid config format. Valid options are '
'%s' % ', '.join(SUPPORTED_CONFIG_FORMATS))
ele = self.rpc('get_configuration', output=config_format)
if config_format in ['text', 'set']:
return str(ele.text).strip()
else:
return ele
def load_config(self, candidate, update='merge', comment=None,
confirm=None, format='text', commit=True):
merge = update == 'merge'
overwrite = update == 'overwrite'
self.lock_config()
try:
candidate = '\n'.join(candidate)
self.config.load(candidate, format=format, merge=merge,
overwrite=overwrite)
except ConfigLoadError:
exc = get_exception()
self.raise_exc('Unable to load config: %s' % str(exc))
diff = self.config.diff()
self.check_config()
if all((commit, diff)):
self.commit_config(comment=comment, confirm=confirm)
self.unlock_config()
return diff
def save_config(self):
raise NotImplementedError
### end of Config ###
def get_facts(self, refresh=True):
if refresh:
self.device.facts_refresh()
return self.device.facts
def unlock_config(self):
try:
self.config.unlock()
self._locked = False
except UnlockError:
exc = get_exception()
raise NetworkError('unable to unlock config: %s' % str(exc))
def lock_config(self):
try:
self.config.lock()
self._locked = True
except LockError:
exc = get_exception()
raise NetworkError('unable to lock config: %s' % str(exc))
def check_config(self):
if not self.config.commit_check():
self.raise_exc(msg='Commit check failed')
def commit_config(self, comment=None, confirm=None):
try:
kwargs = dict(comment=comment)
if confirm and confirm > 0:
kwargs['confirm'] = confirm
return self.config.commit(**kwargs)
except CommitError:
exc = get_exception()
raise NetworkError('unable to commit config: %s' % str(exc))
def rollback_config(self, identifier, commit=True, comment=None):
self.lock_config()
try:
self.config.rollback(identifier)
except ValueError:
exc = get_exception()
self._error('Unable to rollback config: $s' % str(exc))
diff = self.config.diff()
if commit:
self.commit_config(comment=comment)
self.unlock_config()
return diff
Netconf = register_transport('netconf')(Netconf)
class Cli(CliBase):
CLI_PROMPTS_RE = [
re.compile(r"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(r"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
]
CLI_ERRORS_RE = [
re.compile(r"% ?Error"),
re.compile(r"% ?Bad secret"),
re.compile(r"invalid input", re.I),
re.compile(r"(?:incomplete|ambiguous) command", re.I),
re.compile(r"connection timed out", re.I),
re.compile(r"[^\r\n]+ not found", re.I),
re.compile(r"'[^']' +returned error code: ?\d+"),
]
def connect(self, params, **kwargs):
super(Cli, self).connect(params, **kwargs)
if self.shell._matched_prompt.strip().endswith('%'):
self.execute('cli')
self.execute('set cli screen-length 0')
def configure(self, commands, **kwargs):
cmds = ['configure']
cmds.extend(to_list(commands))
if kwargs.get('comment'):
cmds.append('commit and-quit comment "%s"' % kwargs.get('comment'))
else:
cmds.append('commit and-quit')
responses = self.execute(cmds)
return responses[1:-1]
def load_config(self, commands):
return self.configure(commands)
def get_config(self, output='block'):
cmd = 'show configuration'
if output == 'set':
cmd += ' | display set'
return self.execute([cmd])[0]
Cli = register_transport('cli', default=True)(Cli)
def split(value):
lex = shlex.shlex(value)
lex.quotes = '"'
lex.whitespace_split = True
lex.commenters = ''
return list(lex)
def rpc_args(args):
kwargs = dict()
args = split(args)
name = args.pop(0)
for arg in args:
key, value = arg.split('=')
if str(value).upper() in ['TRUE', 'FALSE']:
kwargs[key] = bool(value)
elif re.match(r'^[0-9]+$', value):
kwargs[key] = int(value)
else:
kwargs[key] = str(value)
return (name, kwargs)
| jasonzzz/ansible | lib/ansible/module_utils/junos.py | Python | gpl-3.0 | 9,209 | 0.000869 |
# -*- coding: utf-8 -*-
"""
pid - example of PID control of a simple process with a time constant
Copyright (c) 2016 - RocketRedNeck.com RocketRedNeck.net
RocketRedNeck and MIT Licenses
RocketRedNeck hereby grants license for others to copy and modify this source code for
whatever purpose other's deem worthy as long as RocketRedNeck is given credit where
where credit is due and you leave RocketRedNeck out of it for all other nefarious purposes.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import matplotlib.pyplot as plot
import numpy as np
import math
tmax = 3.0
dt = 0.01
ts = np.arange(0.0, tmax, dt)
pvs = np.zeros(len(ts))
sps = np.zeros(len(ts))
mvs = np.zeros(len(ts))
mps = np.zeros(len(ts))
kf = 0.0
kp = 20.0 #10.0
ki = 0.0
kd = 2.0 #1.0
dt = ts[1] - ts[0]
Gp = 1.0
delay = 1 * dt
tau = 1000 * dt
sp_period = 1.0
err = 0.0
intErr = 0.0
lastErr = 0.0
lastT = ts[0]
lastG = 0.0
i = 0
d = 0
exp = -np.exp(-1/tau)
mp = 0
for t in ts:
if (t > 0):
sps[i] = math.sin(sp_period*t)
sps[i] = sps[i] / abs(sps[i]) # Square wave
else:
sps[i] = 0
derr = err - lastErr
intErr = intErr + err
mv = kf*sps[i] + (kp * err) + (ki * intErr) + (kd * (derr/dt))
mvs[i] = mv
mp = mp + (mv * dt)
mps[i] = mp
G = 0.0
if (t >= delay):
G = mp * Gp * (1.0 + exp) - (lastG * exp)
else:
d += 1
pvs[i] = G
lastG = G
i += 1
lastErr = err
err = 0.0
if (t >= delay):
err = sps[i-d] - pvs[i-d]
# err += np.random.randn(1)*0.09
plot.figure(1)
plot.cla()
plot.grid()
plot.plot(ts,sps,ts,pvs)
| RocketRedNeck/PythonPlayground | pid_dot.py | Python | mit | 2,665 | 0.01651 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE", "garelay.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| smn/garelay | manage.py | Python | bsd-2-clause | 259 | 0 |
"""
Format the current file with black or isort.
Available in Tools/Python/Black and Tools/Python/Isort.
"""
from __future__ import annotations
import logging
import subprocess
import traceback
from functools import partial
from pathlib import Path
from tkinter import messagebox
from porcupine import menubar, tabs, textutils, utils
from porcupine.plugins import python_venv
log = logging.getLogger(__name__)
def run_tool(tool: str, code: str, path: Path | None) -> str:
python = python_venv.find_python(None if path is None else utils.find_project_root(path))
if python is None:
messagebox.showerror(
"Can't find a Python installation", f"You need to install Python to run {tool}."
)
return code
fail_str = f"Running {tool} failed"
try:
# run in subprocess just to make sure that it can't crash porcupine
# set cwd so that black/isort finds its config in pyproject.toml
#
# FIXME: file must not be named black.py or similar
result = subprocess.run(
[str(python), "-m", tool, "-"],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=(Path.home() if path is None else path.parent),
input=code.encode("utf-8"),
)
return result.stdout.decode("utf-8")
except subprocess.CalledProcessError as e:
messagebox.showerror(
fail_str,
utils.tkinter_safe_string(e.stderr.decode("utf-8"), hide_unsupported_chars=True),
)
except Exception:
log.exception(f"running {tool} failed")
messagebox.showerror(fail_str, traceback.format_exc())
return code
def format_code_in_textwidget(tool: str, tab: tabs.FileTab) -> None:
before = tab.textwidget.get("1.0", "end - 1 char")
after = run_tool(tool, before, tab.path)
if before != after:
with textutils.change_batch(tab.textwidget):
tab.textwidget.replace("1.0", "end - 1 char", after)
def setup() -> None:
menubar.add_filetab_command("Tools/Python/Black", partial(format_code_in_textwidget, "black"))
menubar.add_filetab_command("Tools/Python/Isort", partial(format_code_in_textwidget, "isort"))
| Akuli/porcupine | porcupine/plugins/python_tools.py | Python | mit | 2,238 | 0.002234 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.