code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
#!/bin/python3
import sys
fact = lambda n: 1 if n <= 1 else n * fact(n - 1)
n = int(input().strip())
fct = fact(n)
print(fct)
| lilsweetcaligula/Online-Judges | hackerrank/algorithms/implementation/medium/extra_long_factorials/py/solution.py | Python | mit | 134 |
class _DataTuner(object):
_skeys = ["sid", "fid", "area_ratio",
"time", "dlen", "olen",
"mean_dist", "qart_dist",
"top10", "top20", "top30", "top40", "top50",
"rtop10", "rdist", "inv_rdist"
]
def __init__(self):
"""
Output integrated useful data information
"""
pass
def __no_data(self, data):
return (len(data) == 0)
def __no_ransac(self, data):
return ("ransac" not in data.columns)
def __datagen(func):
def inner(self, *args, **kwargs):
rejectable = kwargs["rejector"]
if rejectable(args[0]):
return [None]*kwargs["isize"]
return func(self, *args, **kwargs)
return inner
@__datagen
def __topN(self, data, isize=5, rejector=None):
sd = data.sort(columns="dist")
dm = lambda n: sd[:n].dist.mean()
return map(dm, range(10, 10*(isize+1), 10))
@__datagen
def __base_dist(self, data, isize=2, rejector=None):
mdis = data.dist.mean()
qcond = data["dist"] < data["dist"].quantile(.25)
qdis = data[qcond].dist.mean()
return [mdis, qdis]
@__datagen
def __ransac_dist(self, data, isize=3, rejector=None):
rdf = data[data.ransac > 0]
ra = self.__topN(rdf, isize=5, rejector=self.__no_data)[:1]
ra += [rdf.dist.mean()]
dist = data.dist.copy()
dist[dist == 0] = 1
data["wd"] = dist * data["ransac"]
ra += [data.wd.mean()]
return ra
def __arear(self, data):
ar = None
if "area" in data.columns:
dar = data.iloc[0]
ar = dar.area/dar.barea
return ar
def _statisticalize(self, data=None, olen=None, timer=None,
sx=None, fx=None):
aa = [sx["pid"], fx["pid"], self.__arear(data)]
aa += [timer.msecs, len(data), olen]
aa += self.__base_dist(data, isize=2, rejector=self.__no_data)
aa += self.__topN(data, isize=5, rejector=self.__no_data)
aa += self.__ransac_dist(data, isize=3, rejector=self.__no_ransac)
return dict(zip(self._skeys, aa))
def _group_dist(self, gi, grp):
r = dict(fid=gi,
mean_dist=grp.mean_dist.mean(),
qart_dist=grp.qart_dist.mean(),
top_dist=grp.top10_dist.mean())
return r
def _group_all(self, gi, grp):
ga = grp.area_ratio.copy()
ga.sort(ascending=0)
r = dict(fid=gi,
top10=grp.top10.mean(),
top20=grp.top20.mean(),
top30=grp.top30.mean(),
top40=grp.top40.mean(),
top50=grp.top50.mean(),
rdist=grp.rdist.mean(),
rtop10=grp.rtop10.mean(),
ridist=grp.inv_rdist.mean(),
mean=grp.mean_dist.mean(),
qart=grp.qart_dist.mean(),
bot_area=ga[:1].sum(),
mean_area=ga[:3].mean(),
)
return r
| speed-of-light/pyslider | lib/exp/pairing/data_tuner.py | Python | agpl-3.0 | 3,037 |
#
# Copyright 2015 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Implements command to remove an alarm definition from a Boundary account.
"""
from boundary import ApiCli
import requests
"""
Uses the following Boundary API:
https://premium-api.boundary.com/v1/alarm/:alarmId
"""
class AlarmDelete(ApiCli):
def __init__(self, **kwargs):
"""
"""
ApiCli.__init__(self)
self._kwargs = kwargs
self._alarm_id = None
def add_arguments(self):
"""
"""
ApiCli.add_arguments(self)
self.parser.add_argument('-i', '--alarm-id', dest='alarm_id', action='store', required=True,
metavar='alarm-id', help='Alarm identifier')
def get_arguments(self):
"""
Extracts the specific arguments of this CLI
"""
ApiCli.get_arguments(self)
self._alarm_id = self.args.alarm_id if self.args.alarm_id is not None else None
def handle_key_word_args(self):
self._alarm_id = self._kwargs['id'] if 'id' in self._kwargs else None
def get_api_parameters(self):
self.method = "DELETE"
self.path = "v2/alarms/{0}".format(self._alarm_id)
def get_description(self):
return 'Deletes an alarm definition from a {0} account'.format(self.product_name)
def _handle_results(self):
"""
Handle the results of the API call
"""
# Only process if we get HTTP return code other 200.
if self._api_result.status_code != requests.codes.ok:
print(self.colorize_json(self._api_result.text))
def _handle_api_results(self):
# Only process if we get HTTP result of 200
if self._api_result.status_code != requests.codes.ok:
pass
return None
def good_response(self, status_code):
"""
Determines what status codes represent a good response from an API call.
"""
return status_code == requests.codes.no_content
| boundary/pulse-api-cli | boundary/alarm_delete.py | Python | apache-2.0 | 2,519 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Participant.user'
db.alter_column(u'experiments_participant', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.SET_NULL))
def backwards(self, orm):
# Changing field 'Participant.user'
db.alter_column(u'experiments_participant', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'experiments.anonymousvisitor': {
'Meta': {'object_name': 'AnonymousVisitor'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'experiments.dailyconversionreport': {
'Meta': {'object_name': 'DailyConversionReport'},
'confidence': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'control_group_size': ('django.db.models.fields.IntegerField', [], {}),
'date': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['experiments.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'overall_control_conversion': ('django.db.models.fields.IntegerField', [], {}),
'overall_test_conversion': ('django.db.models.fields.IntegerField', [], {}),
'test_group_size': ('django.db.models.fields.IntegerField', [], {})
},
u'experiments.dailyconversionreportgoaldata': {
'Meta': {'object_name': 'DailyConversionReportGoalData'},
'confidence': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'control_conversion': ('django.db.models.fields.IntegerField', [], {}),
'goal_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['experiments.GoalType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'goal_data'", 'to': u"orm['experiments.DailyConversionReport']"}),
'test_conversion': ('django.db.models.fields.IntegerField', [], {})
},
u'experiments.dailyengagementreport': {
'Meta': {'object_name': 'DailyEngagementReport'},
'confidence': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'control_group_size': ('django.db.models.fields.IntegerField', [], {}),
'control_score': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['experiments.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'test_group_size': ('django.db.models.fields.IntegerField', [], {}),
'test_score': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
u'experiments.experiment': {
'Meta': {'object_name': 'Experiment'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'start_date': ('django.db.models.fields.DateField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'experiments.goalrecord': {
'Meta': {'object_name': 'GoalRecord'},
'anonymous_visitor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['experiments.AnonymousVisitor']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'goal_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['experiments.GoalType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'experiments.goaltype': {
'Meta': {'object_name': 'GoalType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
u'experiments.participant': {
'Meta': {'unique_together': "(('user', 'experiment'), ('anonymous_visitor', 'experiment'))", 'object_name': 'Participant'},
'anonymous_visitor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['experiments.AnonymousVisitor']", 'null': 'True', 'blank': 'True'}),
'enrollment_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['experiments.Experiment']"}),
'group': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL'})
}
}
complete_apps = ['experiments'] | uhuramedia/django-lean | django_lean/experiments/migrations/0010_auto__chg_field_participant_user.py | Python | bsd-3-clause | 9,043 |
# vim: fileencoding=utf-8 et ts=4 sts=4 sw=4 tw=0 fdm=marker fmr=#{,#}
"""
Tornado versions of RPC service and client
Authors:
* Brian Granger
* Alexander Glyzov
Example
-------
To create a simple service::
from netcall.tornado import TornadoRPCService
echo = TornadoRPCService()
@echo.task
def echo(self, s):
return s
echo.bind('tcp://127.0.0.1:5555')
echo.start()
echo.serve()
To talk to this service::
from netcall.tornado import TornadoRPCClient
p = TornadoRPCClient()
p.connect('tcp://127.0.0.1:5555')
p.echo('Hi there')
'Hi there'
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2012-2014. Brian Granger, Min Ragan-Kelley, Alexander Glyzov
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from ..base_service import RPCServiceBase
from ..utils import RemoteMethod
from ..errors import RPCError, RemoteRPCError, RPCTimeoutError
from ..serializer import *
from .service import TornadoRPCService
from .client import TornadoRPCClient
| srault95/netcall | netcall/tornado/__init__.py | Python | bsd-3-clause | 1,409 |
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import unittest
from django.utils import six
from django.utils.encoding import (
escape_uri_path, filepath_to_uri, force_bytes, force_text, iri_to_uri,
smart_text, uri_to_iri,
)
from django.utils.functional import SimpleLazyObject
from django.utils.http import urlquote_plus
class TestEncodingUtils(unittest.TestCase):
def test_force_text_exception(self):
"""
Check that broken __unicode__/__str__ actually raises an error.
"""
class MyString(object):
def __str__(self):
return b'\xc3\xb6\xc3\xa4\xc3\xbc'
__unicode__ = __str__
# str(s) raises a TypeError on python 3 if the result is not a text type.
# python 2 fails when it tries converting from str to unicode (via ASCII).
exception = TypeError if six.PY3 else UnicodeError
with self.assertRaises(exception):
force_text(MyString())
def test_force_text_lazy(self):
s = SimpleLazyObject(lambda: 'x')
self.assertTrue(issubclass(type(force_text(s)), six.text_type))
def test_force_bytes_exception(self):
"""
Test that force_bytes knows how to convert to bytes an exception
containing non-ASCII characters in its args.
"""
error_msg = "This is an exception, voilà"
exc = ValueError(error_msg)
result = force_bytes(exc)
self.assertEqual(result, error_msg.encode('utf-8'))
def test_force_bytes_strings_only(self):
today = datetime.date.today()
self.assertEqual(force_bytes(today, strings_only=True), today)
def test_smart_text(self):
class Test:
if six.PY3:
def __str__(self):
return 'ŠĐĆŽćžšđ'
else:
def __str__(self):
return 'ŠĐĆŽćžšđ'.encode('utf-8')
class TestU:
if six.PY3:
def __str__(self):
return 'ŠĐĆŽćžšđ'
def __bytes__(self):
return b'Foo'
else:
def __str__(self):
return b'Foo'
def __unicode__(self):
return '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111'
self.assertEqual(smart_text(Test()), '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111')
self.assertEqual(smart_text(TestU()), '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111')
self.assertEqual(smart_text(1), '1')
self.assertEqual(smart_text('foo'), 'foo')
class TestRFC3987IEncodingUtils(unittest.TestCase):
def test_filepath_to_uri(self):
self.assertEqual(filepath_to_uri('upload\\чубака.mp4'), 'upload/%D1%87%D1%83%D0%B1%D0%B0%D0%BA%D0%B0.mp4')
self.assertEqual(
filepath_to_uri('upload\\чубака.mp4'.encode('utf-8')),
'upload/%D1%87%D1%83%D0%B1%D0%B0%D0%BA%D0%B0.mp4'
)
def test_iri_to_uri(self):
cases = [
# Valid UTF-8 sequences are encoded.
('red%09rosé#red', 'red%09ros%C3%A9#red'),
('/blog/for/Jürgen Münster/', '/blog/for/J%C3%BCrgen%20M%C3%BCnster/'),
('locations/%s' % urlquote_plus('Paris & Orléans'), 'locations/Paris+%26+Orl%C3%A9ans'),
# Reserved chars remain unescaped.
('%&', '%&'),
('red&♥ros%#red', 'red&%E2%99%A5ros%#red'),
]
for iri, uri in cases:
self.assertEqual(iri_to_uri(iri), uri)
# Test idempotency.
self.assertEqual(iri_to_uri(iri_to_uri(iri)), uri)
def test_uri_to_iri(self):
cases = [
# Valid UTF-8 sequences are decoded.
('/%E2%99%A5%E2%99%A5/', '/♥♥/'),
('/%E2%99%A5%E2%99%A5/?utf8=%E2%9C%93', '/♥♥/?utf8=✓'),
# Broken UTF-8 sequences remain escaped.
('/%AAd%AAj%AAa%AAn%AAg%AAo%AA/', '/%AAd%AAj%AAa%AAn%AAg%AAo%AA/'),
('/%E2%99%A5%E2%E2%99%A5/', '/♥%E2♥/'),
('/%E2%99%A5%E2%99%E2%99%A5/', '/♥%E2%99♥/'),
('/%E2%E2%99%A5%E2%99%A5%99/', '/%E2♥♥%99/'),
('/%E2%99%A5%E2%99%A5/?utf8=%9C%93%E2%9C%93%9C%93', '/♥♥/?utf8=%9C%93✓%9C%93'),
]
for uri, iri in cases:
self.assertEqual(uri_to_iri(uri), iri)
# Test idempotency.
self.assertEqual(uri_to_iri(uri_to_iri(uri)), iri)
def test_complementarity(self):
cases = [
('/blog/for/J%C3%BCrgen%20M%C3%BCnster/', '/blog/for/J\xfcrgen M\xfcnster/'),
('%&', '%&'),
('red&%E2%99%A5ros%#red', 'red&♥ros%#red'),
('/%E2%99%A5%E2%99%A5/', '/♥♥/'),
('/%E2%99%A5%E2%99%A5/?utf8=%E2%9C%93', '/♥♥/?utf8=✓'),
('/%AAd%AAj%AAa%AAn%AAg%AAo%AA/', '/%AAd%AAj%AAa%AAn%AAg%AAo%AA/'),
('/%E2%99%A5%E2%E2%99%A5/', '/♥%E2♥/'),
('/%E2%99%A5%E2%99%E2%99%A5/', '/♥%E2%99♥/'),
('/%E2%E2%99%A5%E2%99%A5%99/', '/%E2♥♥%99/'),
('/%E2%99%A5%E2%99%A5/?utf8=%9C%93%E2%9C%93%9C%93', '/♥♥/?utf8=%9C%93✓%9C%93'),
]
for uri, iri in cases:
self.assertEqual(iri_to_uri(uri_to_iri(uri)), uri)
self.assertEqual(uri_to_iri(iri_to_uri(iri)), iri)
def test_escape_uri_path(self):
self.assertEqual(
escape_uri_path('/;some/=awful/?path/:with/@lots/&of/+awful/chars'),
'/%3Bsome/%3Dawful/%3Fpath/:with/@lots/&of/+awful/chars'
)
self.assertEqual(escape_uri_path('/foo#bar'), '/foo%23bar')
self.assertEqual(escape_uri_path('/foo?bar'), '/foo%3Fbar')
| filias/django | tests/utils_tests/test_encoding.py | Python | bsd-3-clause | 5,759 |
from operator import add, div, mul, neg
from _ppeg import Pattern as P
def pattprint(pattern):
print pattern.env()
pattern.display()
mt = P(1)
ANY = P(1)
predef = {
'nl': P('\n'),
}
def getdef(name, defs):
c = defs and defs[name]
return c
def patt_error(s, i):
msg = (len(s) < i + 20) and s[i] or s[i:i+20] + '...'
msg = "pattern error near '%s'" % (msg, )
raise Exception(msg)
def mult(p, n):
"""Returns a Pattern that matches exactly n repetitions of Pattern p.
"""
np = P()
while n >= 1:
if n % 2:
np = np + p
p = p + p
n = n // 2
return np
def equalcap(s, i, (c,)):
if not isinstance(c, str):
return None
e = len(c) + i
if s[i: e] == c:
return e
else:
return None
S = (P.Set(" \t\n") | '--' + (ANY - P.Set("\n"))**0)**0
name = P.Range("AZaz") + P.Range("AZaz09")**0
exp_follow = P('/') | P(")") | P("}") | P(":}") | P("~}") | name | -1
name = P.Cap(name)
Identifier = name + P.CapA(1)
num = (P.Cap(P.Range("09")**1) + S) / int
String = (("'" + P.Cap((ANY - "'")**0) + "'") |
('"' + P.Cap((ANY - '"')**0) + '"'))
def getcat(c, defs):
cat = defs.get(c, predef.get(c))
if not cat:
raise Exception('name %s undefined' % (c,))
return cat
Cat = ('%' + Identifier) / getcat
Range = P.CapS(ANY + (P("-")/"") + (ANY-"]")) / P.Range
item = Cat | Range | P.Cap(ANY)
def f(c, p):
if c == "^":
return ANY - p
else:
return p
Class = ( ("[" + P.Cap(P("^")**-1) # optional complement symbol
+ P.CapF(item + (item - "]")**0, mt.__or__)) / f
) + "]"
def adddef(d, k, defs, exp):
if d.get(k):
raise Exception("'%s' already defined as a rule" % k)
d[k] = exp
return d
def firstdef(n, defs, r):
return adddef({}, k, defs, r)
def abf(a, b, f):
return f(a, b)
def np(n, p):
return P.CapG(p, n)
exp = P.Grammar(
# 0 Exp
(S + ( P.Var(6)
| P.CapF(P.Var(1) + ('/' + S + P.Var(1))**0, mt.__or__) )),
# 1 Seq
(P.CapF(P.CapC(P("")) + P.Var(2)**0, mt.__add__)
+ (+exp_follow | patt_error)),
# 2 Prefix
( ("&" + S + P.Var(2)) / mt.__pos__
| ("!" + S + P.Var(2)) / mt.__neg__
| P.Var(3)),
# 3 Suffix
(P.CapF(P.Var(4) + S +
( ( P("+") + P.CapC(1, mt.__pow__)
| P("*") + P.CapC(0, mt.__pow__)
| P("?") + P.CapC(-1, mt.__pow__)
| "^" + ( P.CapG(num + P.CapC(mult))
| P.CapG(P.Cap(P.Set("+-") + P.Range("09")**1)
+ P.CapC(mt.__pow__))
)
| "->" + S + ( P.CapG(String + P.CapC(mt.__div__))
| P("{}") + P.CapC(None, P.CapT)
| P.CapG(Identifier / getdef + P.CapC(mt.__div__))
)
| "=>" + S + P.CapG(Identifier / getdef + P.CapC(P.CapRT))
) + S
)**0, abf)),
# 4 Primary
("(" + P.Var(0) + ")"
| String / P
| Class
| Cat
| ("{:" + (name + ":" | P.CapC(None)) + P.Var(0) + ":}") / np
| ("=" + name) / (lambda n: CapRT(CapB(n), equalcap))
| P("{}") / P.CapP
| ("{~" + P.Var(0) + "~}") / P.CapS
| ("{" + P.Var(0) + "}") / P.Cap
| P(".") + P.CapC(ANY)
| ("<" + name + ">") / P.Var),
# 5 Definition
(Identifier + S + '<-' + P.Var(0)),
# 6 Grammar
(P.CapF(P.Var(5) / firstdef + P.CapG(P.Var(5))**0, adddef) / P.Grammar),
)
#pattprint(exp)
pattern = (S + exp) / P + (-ANY | patt_error)
def compile(p, defs=None):
m = pattern(p, defs)
return m.captures[0]
balanced = compile('balanced <- "(" ([^()] / <balanced>)* ")"')
if __name__ == '__main__':
print '(hello())', balanced('(hello())').pos
| moreati/ppeg | pe.py | Python | mit | 3,807 |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import os.path
import types
from app.libs.utils import load_module_attrs
def _filter(module):
if hasattr(module, 'urls') and isinstance(module.urls, types.ListType):
return getattr(module, 'urls')
path = os.path.abspath(os.path.dirname(__file__))
urls = load_module_attrs(path, _filter, True)
__all__ = ['urls']
| Damnever/2L | app/services/__init__.py | Python | bsd-3-clause | 421 |
''' -- imports from installed packages -- '''
from django.shortcuts import render_to_response
from django.template import RequestContext
# from django.core.urlresolvers import reverse
from mongokit import paginator
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
''' -- imports from application folders/files -- '''
from gnowsys_ndf.settings import GAPPS
from gnowsys_ndf.ndf.models import Node, GRelation,GSystemType,File,Triple
from gnowsys_ndf.ndf.models import node_collection
from gnowsys_ndf.ndf.views.file import *
from gnowsys_ndf.ndf.views.methods import get_group_name_id, cast_to_data_type,get_execution_time
GST_FILE = node_collection.one({'_type':'GSystemType', 'name': "File"})
ebook_gst = node_collection.one({'_type':'GSystemType', 'name': "E-Book"})
GST_IMAGE = node_collection.one({'_type':'GSystemType', 'name': GAPPS[3]})
GST_VIDEO = node_collection.one({'_type':'GSystemType', 'name': GAPPS[4]})
e_library_GST = node_collection.one({'_type':'GSystemType', 'name': 'E-Library'})
pandora_video_st = node_collection.one({'_type':'GSystemType', 'name': 'Pandora_video'})
app = node_collection.one({'_type':'GSystemType', 'name': 'E-Library'})
@get_execution_time
def ebook_listing(request, group_id, page_no=1):
group_name, group_id = get_group_name_id(group_id)
all_ebooks = node_collection.find({"_type": "File", "attribute_set.educationaluse": "eBooks"})
# all_ebook = node_collection.find({"_type": "File", "member_of": {"$in":[gst_ebook._id]} })
# return render_to_response("ndf/page_list.html",
# {'title': "E-Book",
# 'appId':app._id,
# 'shelf_list': shelf_list,'shelves': shelves,
# 'searching': True, 'query': search_field,
# 'page_nodes': all_ebook, 'groupid':group_id, 'group_id':group_id
# },
# context_instance=RequestContext(request) )
return render_to_response("ndf/ebook.html",
{"all_ebooks": all_ebooks, "ebook_gst": ebook_gst,
"group_id": group_id, "groupid": group_id},
context_instance = RequestContext(request))
| Dhiru/gstudio | gnowsys-ndf/gnowsys_ndf/ndf/views/e-book.py | Python | agpl-3.0 | 2,256 |
# -*- coding: iso-8859-1 -*-
"""A lexical analyzer class for simple shell-like syntaxes."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
# Input stacking and error message cleanup added by ESR, March 2000
# push_source() and pop_source() made explicit by ESR, January 2001.
# Posix compliance, split(), string arguments, and
# iterator interface by Gustavo Niemeyer, April 2003.
# Support for Unicode characters, Paul Swartz, August 2010
# PKS: I removed a bunch of stuff we don't need for MC
import logging
from collections import deque
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = ["shlex"]
class shlex:
"A lexical analyzer class for simple shell-like syntaxes."
def __init__(self, instream, posix=True, locale=False):
self.was_unicode = False
if isinstance(instream, basestring):
if isinstance(instream, unicode):
self.was_unicode = True
instream = instream.encode('utf_32_be')
instream = StringIO(instream)
self.instream = instream
self.locale = locale
self.posix = posix
if posix:
self.eof = None
else:
self.eof = ''
self.commenters = '#'
if locale:
self.wordchars = ''
self.whitespace = ''
else:
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
if self.posix:
self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
self.whitespace = ' \t\r\n'
self.whitespace_split = False
self.quotes = '\'"'
self.escape = '\\'
self.escapedquotes = '"'
if self.was_unicode:
self.wordchars = self.wordchars.decode('latin1')
self.whitespace = self.whitespace.decode('latin1')
self.quotes = self.quotes.decode('latin1')
self.escape = self.quotes.decode('latin1')
self.escapedquotes = self.escapedquotes.decode('latin1')
self.state = ' '
self.pushback = deque()
self.lineno = 1
self.debug = 0
self.token = ''
logging.debug('shlex: reading from %s, line %d',
self.instream, self.lineno)
def push_token(self, tok):
"Push a token onto the stack popped by the get_token method"
logging.debug("shlex: pushing token %r", tok)
self.pushback.appendleft(tok)
def get_token(self):
"Get a token from the input stream (or from stack if it's nonempty)"
if self.pushback:
tok = self.pushback.popleft()
logging.debug("shlex: popping token %r", tok)
return tok
# No pushback. Get a token.
raw = self.read_token()
# Maybe we got EOF instead?
while raw == self.eof:
return self.eof
# Neither inclusion nor EOF
if raw != self.eof:
logging.debug("shlex: token=%r", raw)
else:
logging.debug("shlex: token=EOF")
return raw
def read_token(self):
quoted = False
escapedstate = ' '
while True:
if not self.was_unicode:
nextchar = self.instream.read(1)
else:
nextchar = self.instream.read(4).decode('utf_32_be')
if nextchar == '\n':
self.lineno = self.lineno + 1
if self.debug >= 3:
logging.debug("shlex: in state %r I see character: %r",
self.state, nextchar)
if self.state is None:
self.token = '' # past end of file
break
elif self.state == ' ':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace or \
self.locale and nextchar.isspace():
logging.debug(
"shlex: I see whitespace in whitespace state")
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars or \
self.locale and nextchar.isalnum():
self.token = nextchar
self.state = 'a'
elif nextchar in self.quotes:
if not self.posix:
self.token = nextchar
self.state = nextchar
elif self.whitespace_split:
self.token = nextchar
self.state = 'a'
else:
self.token = nextchar
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.state in self.quotes:
quoted = True
if not nextchar: # end of file
logging.debug("shlex: I see EOF in quotes state")
# XXX what error should be raised here?
raise ValueError, "No closing quotation"
if nextchar == self.state:
if not self.posix:
self.token = self.token + nextchar
self.state = ' '
break
else:
self.state = 'a'
elif self.posix and nextchar in self.escape and \
self.state in self.escapedquotes:
escapedstate = self.state
self.state = nextchar
else:
self.token = self.token + nextchar
elif self.state in self.escape:
if not nextchar: # end of file
logging.debug("shlex: I see EOF in escape state")
# XXX what error should be raised here?
raise ValueError, "No escaped character"
# In posix shells, only the quote itself or the escape
# character may be escaped within quotes.
if escapedstate in self.quotes and \
nextchar != self.state and nextchar != escapedstate:
self.token = self.token + self.state
self.token = self.token + nextchar
self.state = escapedstate
elif self.state == 'a':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace or \
self.locale and nextchar.isspace():
logging.debug("shlex: I see whitespace in word state")
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
if self.posix:
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.posix and nextchar in self.quotes:
self.state = nextchar
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars or nextchar in self.quotes \
or self.whitespace_split or \
self.locale and nextchar.isalnum():
self.token = self.token + nextchar
else:
self.pushback.appendleft(nextchar)
logging.debug("shlex: I see punctuation in word state")
self.state = ' '
if self.token:
break # emit current token
else:
continue
result = self.token
self.token = ''
if self.posix and not quoted and result == '':
result = None
if result:
logging.debug("shlex: raw token=%r", result)
else:
logging.debug("shlex: raw token=EOF")
return result
def __iter__(self):
return self
def next(self):
token = self.get_token()
if token == self.eof:
raise StopIteration
return token
| pculture/mirocommunity | localtv/search/shlex.py | Python | agpl-3.0 | 9,212 |
import csv
import logging
import re
import requests
from account.models import EmailAddress
from django.contrib.auth import get_user_model
from django.core.management.base import NoArgsCommand
from constance import config
from pycon.models import PyConTutorialProposal, PyConSponsorTutorialProposal
logger = logging.getLogger(__name__)
TUT_RE = re.compile('Tutorial (\d+) - (Wed|Thur)\/(AM|PM)')
class Command(NoArgsCommand):
"""
Parses an external CSV for tutorial registrant data and updates the
corresponding Tutorial instance with registrant information.
"""
def handle_noargs(self, **options):
"""Fetch the external URL and parse the data."""
print("Begin update tutorial registration numbers.")
url = config.CTE_TUTORIAL_DATA_URL
username = config.CTE_BASICAUTH_USER
password = config.CTE_BASICAUTH_PASS
if not all([url, username, password]):
print("CTE tutorial registration may not be correctly configured.")
auth = (username, password) if username and password else None
response = requests.get(url, auth=auth)
if not response.raise_for_status():
User = get_user_model()
tutorials = {} # CTE ID: PyConTutorialProposal
for row in csv.DictReader(response.content.splitlines()):
row.pop(None, None) # Delete any goofy keys
if not row or not any(v.strip() for v in row.values()):
print("Skipping blank line.")
continue
item = row['Item']
tut_name = row['Tutorial Name']
max_attendees = row['Max Attendees']
user_email = row['User Email']
user_id = row['PyCon ID']
tut_match = TUT_RE.match(tut_name)
if not tut_match:
print(
"Unable to register '{}' for '{}': Tutorial ID not "
"given.".format(user_email, tut_name))
continue
else:
tut_id = tut_match.group(1)
if tut_id not in tutorials:
try:
# Try to get the tutorial by ID.
# If that fails, match name and set the tutorial
# ID on the found object.
tutorial = PyConTutorialProposal.objects.get(id=tut_id)
except PyConTutorialProposal.DoesNotExist:
try:
sponsor_tutorial = PyConSponsorTutorialProposal.objects.get(id=tut_id)
continue
except PyConSponsorTutorialProposal.DoesNotExist:
continue
print(
"Unable to register '{}[{}]' for '{}': Tutorial ID "
"{} is invalid.".format(user_email, user_id, tut_name, tut_id))
continue
except PyConTutorialProposal.MultipleObjectsReturned:
print(
"Unable to register '{}[{}] for '{}': Multiple "
"tutorials found for '{}' or '{}'".format(
user_email, user_id, tut_name, tut_name, tut_id))
continue
else:
# Clear the registrants as these are effectively
# read-only, and can only be updated via CTE.
tutorial.registrants.clear()
tutorial.registration_count = 0
tutorial.cte_tutorial_id = tut_id
tutorial.max_attendees = max_attendees or None
tutorials[tut_id] = tutorial
tutorial = tutorials[tut_id]
tutorial.registration_count += 1
tutorial.save()
try:
user = User.objects.get(id=int(user_id))
except User.DoesNotExist:
print(
"Unable to register '{}[{}]' for '{}' ({}): User account "
"not found.".format(user_email, user_id, tut_name, tut_id))
continue
except User.MultipleObjectsReturned:
print(
"Unable to register '{}[{}]' for '{}' ({}): "
"Multiple user accounts found for "
"email.".format(user_email, user_id, tut_name, tut_id))
continue
except ValueError:
print(
"Unable to register '{}[{}]' for '{}' ({}): PyConID \"{}\""
"not recognized as an integer.".format(user_email, user_id,
tut_name, tut_id,
user_id))
continue
else:
user_emails = EmailAddress.objects.filter(user=user)
if user_email.lower() not in [e.email.lower() for e in user_emails]:
logger.debug("Adding missing email {} to user {}".format(user_email, user_id))
new_email = EmailAddress.objects.create(user=user, email=user_email)
tutorial.registrants.add(user)
logger.debug(
"Successfully registered '{}[{}]' for '{}' "
"({}).".format(user_email, user_id, tut_name, tut_id))
print("End update tutorial registration numbers.")
| PyCon/pycon | pycon/management/commands/update_tutorial_registrants.py | Python | bsd-3-clause | 5,738 |
import tensorflow as tf
"""tf.ceil(x,name=None)
功能:计算x各元素比x大的最小整数。
输入:x为张量,可以为`half`,`float32`, `float64`类型。"""
x = tf.constant([[0.2, 0.8, -0.7]], tf.float64)
z = tf.ceil(x)
sess = tf.Session()
print(sess.run(z))
sess.close()
# z==>[[1. 1. -0.]]
| Asurada2015/TFAPI_translation | math_ops_basicoperation/tf_ceil.py | Python | apache-2.0 | 308 |
from subprocess import call, os
from django.core.management.base import BaseCommand
from django.conf import settings
from boto.s3.connection import S3Connection
class Command(BaseCommand):
help = "Loads fixture images from S3 bucket"
def handle(self, *args, **options):
if len(args)>0:
AWS_BUCKET_NAME = args[0]
else:
AWS_BUCKET_NAME = settings.AWS_BUCKET_NAME
AWS_ACCESS_KEY_ID = settings.AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY = settings.AWS_SECRET_ACCESS_KEY
self.stdout.write('Using bucket: {}'.format(AWS_BUCKET_NAME))
BASE_DIR = settings.BASE_DIR
uploads_dir = os.path.abspath(os.path.join(BASE_DIR, 'uploads'))
os.chdir(uploads_dir)
call('tar cjvf assets.tar.bz2 *', shell=True)
call('mv assets.tar.bz2 ..', shell=True)
conn = S3Connection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
bucket = conn.get_bucket(AWS_BUCKET_NAME)
os.chdir(BASE_DIR)
k = bucket.new_key('fixtures/assets.tar.bz2')
k.set_contents_from_filename('assets.tar.bz2')
k.set_acl('public-read')
| MadeInHaus/django-template | backend/apps/utils/management/commands/save_fixture_images.py | Python | mit | 1,139 |
import socket
from flask import Flask, jsonify
app = Flask(__name__)
PRICES = {
'BHP': {'Code': 'BHP', 'Price': 91.72},
'GOOG': {'Code': 'GOOG', 'Price': 34.21},
'ABC': {'Code': 'ABC', 'Price': 1.17}
}
@app.route('/ping', methods=["GET"])
def ping():
return socket.gethostname()
@app.route('/prices/<code>', methods=["GET"])
def price(code):
exists = code in PRICES
if exists:
return jsonify(PRICES.get(code))
else:
return ('Not found', 404)
@app.route('/prices', methods=["GET"])
def all_prices():
#raise Exception
return jsonify(list(PRICES.values()))
if __name__ == "__main__":
app.run(host='0.0.0.0')
| morganjbruce/microservices-in-action | chapter-6/market-data/app.py | Python | mit | 672 |
from flask import request, session
from flask import Blueprint
import api
import json
import mimetypes
import os.path
import api.auth
import api.cache
import api.stats
import asyncio
import threading
from api.annotations import api_wrapper
from api.common import flat_multi
from api.exceptions import *
blueprint = Blueprint("stats_api", __name__)
@blueprint.route("/scoregraph", methods=["GET"])
@api_wrapper
def stats_scoregraph_hook():
result = api.stats.get_scoregraph()
return { "success": 1, "data": result }
@blueprint.route("/scoreboard", methods=["GET"])
@api_wrapper
def stats_scoreboard_hook():
result = {}
result["scores"] = api.stats.get_all_team_scores()
if api.auth.is_logged_in() and api.user.in_team():
for i in range(len(result["scores"])):
if result["scores"][i]["tid"] == api.user.get_user()["team"]:
result["scores"][i]["my_team"] = True
break
if api.auth.is_logged_in() and api.user.in_team():
team = api.team.get_team()
groups = api.team.get_groups(tid=team["tid"])
result["groups"] = groups
return { "success": 1, "data": result }
@blueprint.route("/scoreboard/all", methods=["GET"])
@api_wrapper
def stats_scoreboard_all_hook():
result = {}
result = api.stats.get_all_team_scores(show_admin=True)
return { "success": 1, "data": result }
@blueprint.route("/score_progression/<tid>", methods=["GET"])
@api_wrapper
def stats_score_progression_hook(tid):
return { "success": 1, "data": api.stats.get_team_score_progression(tid=tid) } | EasyCTF/easyctf-2015 | api/api/routes/stats.py | Python | mit | 1,501 |
# -*- coding: utf-8 -*-
class Codes(object):
def __init__(self, **kws):
self._reverse_dict = {}
for k, v in kws.items():
self.__setattr__(k, v)
def str_value(self, value):
return self._reverse_dict[value]
def __setattr__(self, name, value):
super(Codes, self).__setattr__(name, value)
if not name.startswith('_'):
self._reverse_dict[value] = name
def __repr__(self):
constants_str = ', '.join('{0}={1!r}'.format(v, k) for k, v
in sorted(self._reverse_dict.items()))
return 'Codes({0})'.format(constants_str)
def __getitem__(self, key):
return self.__dict__[key.replace('-', '_').upper()]
| NSLS-II/channelarchiver | channelarchiver/structures.py | Python | mit | 735 |
import sc2reader
replay = sc2reader.load_replay('1.SC2Replay', load_level=4)
for i in range(0,3):
print("hello" + str(i))
| asveron/starcraftViz | starcraftViz1/learnPython.py | Python | mit | 130 |
import numpy as np
def affine_forward(x, w, b):
"""
Computes the forward pass for an affine (fully-connected) layer.
The input x has shape (N, d_1, ..., d_k) where x[i] is the ith input.
We multiply this against a weight matrix of shape (D, M) where
D = \prod_i d_i
Inputs:
x - Input data, of shape (N, d_1, ..., d_k)
w - Weights, of shape (D, M)
b - Biases, of shape (M,)
Returns a tuple of:
- out: output, of shape (N, M)
- cache: (x, w, b)
"""
out = x.reshape(x.shape[0], -1).dot(w) + b
cache = (x, w, b)
return out, cache
def affine_backward(dout, cache):
"""
Computes the backward pass for an affine layer.
Inputs:
- dout: Upstream derivative, of shape (N, M)
- cache: Tuple of:
- x: Input data, of shape (N, d_1, ... d_k)
- w: Weights, of shape (D, M)
Returns a tuple of:
- dx: Gradient with respect to x, of shape (N, d1, ..., d_k)
- dw: Gradient with respect to w, of shape (D, M)
- db: Gradient with respect to b, of shape (M,)
"""
x, w, b = cache
dx = dout.dot(w.T).reshape(x.shape)
dw = x.reshape(x.shape[0], -1).T.dot(dout)
db = np.sum(dout, axis=0)
return dx, dw, db
def relu_forward(x):
"""
Computes the forward pass for a layer of rectified linear units (ReLUs).
Input:
- x: Inputs, of any shape
Returns a tuple of:
- out: Output, of the same shape as x
- cache: x
"""
out = np.maximum(0, x)
cache = x
return out, cache
def relu_backward(dout, cache):
"""
Computes the backward pass for a layer of rectified linear units (ReLUs).
Input:
- dout: Upstream derivatives, of any shape
- cache: Input x, of same shape as dout
Returns:
- dx: Gradient with respect to x
"""
x = cache
dx = np.where(x > 0, dout, 0)
return dx
def dropout_forward(x, dropout_param):
"""
Performs the forward pass for (inverted) dropout.
Inputs:
- x: Input data, of any shape
- dropout_param: A dictionary with the following keys:
- p: Dropout parameter. We keep each neuron output with probability p.
- mode: 'test' or 'train'. If the mode is train, then perform dropout;
if the mode is test, then just return the input.
- seed: Seed for the random number generator. Passing seed makes this
function deterministic, which is needed for gradient checking but not in
real networks.
Outputs:
- out: Array of the same shape as x.
- cache: A tuple (dropout_param, mask). In training mode, mask is the dropout
mask that was used to multiply the input; in test mode, mask is None.
"""
p, mode = dropout_param['p'], dropout_param['mode']
if 'seed' in dropout_param:
np.random.seed(dropout_param['seed'])
mask = None
out = None
if mode == 'train':
###########################################################################
# TODO: Implement the training phase forward pass for inverted dropout. #
# Store the dropout mask in the mask variable. #
###########################################################################
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
elif mode == 'test':
###########################################################################
# TODO: Implement the test phase forward pass for inverted dropout. #
###########################################################################
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (dropout_param, mask)
out = out.astype(x.dtype, copy=False)
return out, cache
def dropout_backward(dout, cache):
"""
Perform the backward pass for (inverted) dropout.
Inputs:
- dout: Upstream derivatives, of any shape
- cache: (dropout_param, mask) from dropout_forward.
"""
dropout_param, mask = cache
mode = dropout_param['mode']
if mode == 'train':
###########################################################################
# TODO: Implement the training phase forward pass for inverted dropout. #
# Store the dropout mask in the mask variable. #
###########################################################################
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
elif mode == 'test':
dx = dout
return dx
def svm_loss(x, y):
"""
Computes the loss and gradient using for multiclass SVM classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
N = x.shape[0]
correct_class_scores = x[np.arange(N), y]
margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
margins[np.arange(N), y] = 0
loss = np.sum(margins) / N
num_pos = np.sum(margins > 0, axis=1)
dx = np.zeros_like(x)
dx[margins > 0] = 1
dx[np.arange(N), y] -= num_pos
dx /= N
return loss, dx
def softmax_loss(x, y):
"""
Computes the loss and gradient for softmax classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
probs = np.exp(x - np.max(x, axis=1, keepdims=True))
probs /= np.sum(probs, axis=1, keepdims=True)
N = x.shape[0]
loss = -np.sum(np.log(probs[np.arange(N), y])) / N
dx = probs.copy()
dx[np.arange(N), y] -= 1
dx /= N
return loss, dx
| DeercoderCourse/cs231n | assignment3/cs231n/layers.py | Python | apache-2.0 | 6,236 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Record field function."""
from __future__ import absolute_import, print_function
from six import iteritems
from .models import OAISet
from .proxies import current_oaiserver
from .query import Query
try:
from functools import lru_cache
except ImportError: # pragma: no cover
from functools32 import lru_cache
@lru_cache(maxsize=1000)
def _build_query(search_pattern):
"""Build ``Query`` object for given set query."""
return Query(search_pattern)
def _build_cache():
"""Preprocess set queries."""
for _set in OAISet.query.filter(
OAISet.search_pattern.isnot(None)).all():
yield _set.name, dict(
query=_set.search_pattern,
)
raise StopIteration
def _find_matching_sets_internally(sets, record):
"""Find matching sets with internal engine.
:param sets: set of sets where search
:param record: record to match
"""
for name, data in iteritems(sets):
if _build_query(data['query']).match(record):
yield set((name,))
raise StopIteration
def get_record_sets(record, matcher):
"""Return list of sets to which record belongs to.
:record: Record instance
:return: list of set names
"""
sets = current_oaiserver.sets
if sets is None:
# build sets cache
sets = current_oaiserver.sets = dict(_build_cache())
output = set()
for sets in matcher(sets, record):
output |= sets
return list(output)
class OAIServerUpdater(object):
"""Return the right update oaisets function."""
def __init__(self, app=None):
"""Init."""
self.matcher = _find_matching_sets_internally
def __call__(self, record, **kwargs):
"""Update sets list."""
record['_oaisets'] = get_record_sets(record=record,
matcher=self.matcher)
| jirikuncar/invenio-oaiserver | invenio_oaiserver/receivers.py | Python | gpl-2.0 | 2,841 |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
import sys
from xml.dom import minidom
from swid_generator.command_manager import CommandManager as CM
from swid_generator.generators.utils import create_temp_folder
def sign_xml(data, signature_args):
folder_info = create_temp_folder(signature_args['pkcs12_file'])
file_path = folder_info['save_location'] + '/swid_tag.xml'
with open(file_path, 'wb') as file:
file.write(data)
if signature_args['pkcs12_password'] is None:
sign_command = ["xmlsec1", "--sign", "--pkcs12", signature_args['pkcs12_file'], file_path]
else:
sign_command = ["xmlsec1", "--sign", "--pkcs12", signature_args['pkcs12_file'], "--pwd", signature_args['pkcs12_password'], file_path]
return CM.run_command_check_output(sign_command)
def safe_print(data, signature_args=None, end='\n'):
"""
Safely print a binary or unicode string to stdout.
This is needed for Python 2 / 3 compatibility.
On Python 2, data is printed using the print() function. On Python 3,
binary data is written directly to ``sys.stdout.buffer``.
Args:
data (bytes or unicode):
The data to print as bytestring.
end (bytes or unicode):
The bytestring with which to end the output (default newline).
signature_args (Dictionary):
Dictionary with needed arguments from argument-parser.
"""
# Python 3
if hasattr(sys.stdout, 'buffer'):
if signature_args is not None:
if signature_args['pkcs12_file'] is not None:
data = bytes(sign_xml(data, signature_args), encoding='utf-8')
if isinstance(data, bytes):
sys.stdout.buffer.write(data)
else:
sys.stdout.write(data)
if isinstance(end, bytes):
sys.stdout.buffer.write(end)
else:
sys.stdout.write(end)
sys.stdout.flush()
# Python 2
else:
if signature_args is not None:
if signature_args['pkcs12_file'] is not None:
data = sign_xml(data, signature_args)
print(data, end=end)
def iterate(generator, action_func, separator, end):
"""
Wrapper function to print out a generator using specified separators.
This is needed when you want to print the items of a generator with a
separator string, but don't want that string to occur at the end of the
output.
Args:
generator:
A generator that returns printable items.
action_func:
A function object that takes one argument (the item) and prints it somehow.
separator (unicode):
The separator string to be printed between two items.
end (unicode):
The string that is printed at the very end of the output.
"""
item = next(generator)
while item:
action_func(item)
try:
item = next(generator)
safe_print(separator, end='')
except StopIteration:
safe_print(end, end='')
break
def print_swid_tags(swid_tags, signature_args, separator, pretty):
"""
Print the specified SWID Tags using the specified separator.
Args:
swid_tags:
A generator yielding SWID Tags as bytestrings.
separator (str or unicode):
The separator string to be printed between two SWID Tags.
pretty (bool):
Whether or not to use pretty printing.
"""
def action(tag):
if pretty:
swidtag_reparsed = minidom.parseString(tag)
# [:-1] strips away the last newline, automatically inserted by minidoms toprettyxml
safe_print(swidtag_reparsed.toprettyxml(indent=' ', encoding='utf-8')[:-1],
signature_args, end='')
else:
safe_print(tag, signature_args, end='')
iterate(swid_tags, action, separator, end='\n')
def print_software_ids(software_ids, separator):
"""
Print the specified software IDs using the specified separator.
Args:
swid_tags:
A generator yielding SWID Tags as strings.
separator (str or unicode):
The separator string to be printed between two SWID Tags.
"""
def action(swid):
safe_print(swid, end='')
iterate(software_ids, action, separator, end='\n')
| pombredanne/swidGenerator | swid_generator/print_functions.py | Python | mit | 4,434 |
# -*-coding:Utf-8 -*
# Copyright (c) 2013 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'auberge liste'."""
from primaires.interpreteur.masque.parametre import Parametre
class PrmListe(Parametre):
"""Commande 'auberge liste'"""
def __init__(self):
"""Constructeur du paramètre."""
Parametre.__init__(self, "liste", "list")
self.aide_courte = "affiche les auberges existantes"
self.aide_longue = \
"Cette commande permet de lister les auberges existantes."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande."""
auberges = sorted([a for a in importeur.auberge.auberges.values()],
key=lambda a: a.cle)
if auberges:
en_tete = "+-" + "-" * 15 + "-+-" + "-" * 25 + "-+-" + \
"-" * 8 + "-+-" + "-" * 6 + "-+"
msg = en_tete + "\n"
msg += "| Clé | Salle | " \
"Chambres | Occupé |\n"
msg += en_tete
for auberge in auberges:
cle = auberge.cle
ident = auberge.ident_comptoir
nb_chambres = len(auberge.chambres)
pct_occupation = auberge.pct_occupation
msg += "\n| {:<15} | {:<25} | {:>8} | {:>5}% |".format(
cle, ident, nb_chambres, pct_occupation)
msg += "\n" + en_tete
personnage << msg
else:
personnage << "Aucune auberge n'existe pour l'heure."
| stormi/tsunami | src/secondaires/auberge/commandes/auberge/liste.py | Python | bsd-3-clause | 3,069 |
#!/usr/bin/env python
'''
Python bindings for libmagic
'''
import ctypes
from ctypes import *
from ctypes.util import find_library
def _init():
"""
Loads the shared library through ctypes and returns a library
L{ctypes.CDLL} instance
"""
return ctypes.cdll.LoadLibrary(find_library('magic'))
_libraries = {}
_libraries['magic'] = _init()
# Flag constants for open and setflags
MAGIC_NONE = NONE = 0
MAGIC_DEBUG = DEBUG = 1
MAGIC_SYMLINK = SYMLINK = 2
MAGIC_COMPRESS = COMPRESS = 4
MAGIC_DEVICES = DEVICES = 8
MAGIC_MIME_TYPE = MIME_TYPE = 16
MAGIC_CONTINUE = CONTINUE = 32
MAGIC_CHECK = CHECK = 64
MAGIC_PRESERVE_ATIME = PRESERVE_ATIME = 128
MAGIC_RAW = RAW = 256
MAGIC_ERROR = ERROR = 512
MAGIC_MIME_ENCODING = MIME_ENCODING = 1024
MAGIC_MIME = MIME = 1040
MAGIC_APPLE = APPLE = 2048
MAGIC_NO_CHECK_COMPRESS = NO_CHECK_COMPRESS = 4096
MAGIC_NO_CHECK_TAR = NO_CHECK_TAR = 8192
MAGIC_NO_CHECK_SOFT = NO_CHECK_SOFT = 16384
MAGIC_NO_CHECK_APPTYPE = NO_CHECK_APPTYPE = 32768
MAGIC_NO_CHECK_ELF = NO_CHECK_ELF = 65536
MAGIC_NO_CHECK_TEXT = NO_CHECK_TEXT = 131072
MAGIC_NO_CHECK_CDF = NO_CHECK_CDF = 262144
MAGIC_NO_CHECK_TOKENS = NO_CHECK_TOKENS = 1048576
MAGIC_NO_CHECK_ENCODING = NO_CHECK_ENCODING = 2097152
MAGIC_NO_CHECK_BUILTIN = NO_CHECK_BUILTIN = 4173824
class magic_set(Structure):
pass
magic_set._fields_ = []
magic_t = POINTER(magic_set)
_open = _libraries['magic'].magic_open
_open.restype = magic_t
_open.argtypes = [c_int]
_close = _libraries['magic'].magic_close
_close.restype = None
_close.argtypes = [magic_t]
_file = _libraries['magic'].magic_file
_file.restype = c_char_p
_file.argtypes = [magic_t, c_char_p]
_descriptor = _libraries['magic'].magic_descriptor
_descriptor.restype = c_char_p
_descriptor.argtypes = [magic_t, c_int]
_buffer = _libraries['magic'].magic_buffer
_buffer.restype = c_char_p
_buffer.argtypes = [magic_t, c_void_p, c_size_t]
_error = _libraries['magic'].magic_error
_error.restype = c_char_p
_error.argtypes = [magic_t]
_setflags = _libraries['magic'].magic_setflags
_setflags.restype = c_int
_setflags.argtypes = [magic_t, c_int]
_load = _libraries['magic'].magic_load
_load.restype = c_int
_load.argtypes = [magic_t, c_char_p]
_compile = _libraries['magic'].magic_compile
_compile.restype = c_int
_compile.argtypes = [magic_t, c_char_p]
_check = _libraries['magic'].magic_check
_check.restype = c_int
_check.argtypes = [magic_t, c_char_p]
_list = _libraries['magic'].magic_list
_list.restype = c_int
_list.argtypes = [magic_t, c_char_p]
_errno = _libraries['magic'].magic_errno
_errno.restype = c_int
_errno.argtypes = [magic_t]
class Magic(object):
def __init__(self, ms):
self._magic_t = ms
def close(self):
"""
Closes the magic database and deallocates any resources used.
"""
_close(self._magic_t)
def file(self, filename):
"""
Returns a textual description of the contents of the argument passed
as a filename or None if an error occurred and the MAGIC_ERROR flag
is set. A call to errno() will return the numeric error code.
"""
try: # attempt python3 approach first
bi = bytes(filename, 'utf-8')
return str(_file(self._magic_t, bi), 'utf-8')
except:
return _file(self._magic_t, filename)
def descriptor(self, fd):
"""
Like the file method, but the argument is a file descriptor.
"""
return _descriptor(self._magic_t, fd)
def buffer(self, buf):
"""
Returns a textual description of the contents of the argument passed
as a buffer or None if an error occurred and the MAGIC_ERROR flag
is set. A call to errno() will return the numeric error code.
"""
try: # attempt python3 approach first
return str(_buffer(self._magic_t, buf, len(buf)), 'utf-8')
except:
return _buffer(self._magic_t, buf, len(buf))
def error(self):
"""
Returns a textual explanation of the last error or None
if there was no error.
"""
try: # attempt python3 approach first
return str(_error(self._magic_t), 'utf-8')
except:
return _error(self._magic_t)
def setflags(self, flags):
"""
Set flags on the magic object which determine how magic checking behaves;
a bitwise OR of the flags described in libmagic(3), but without the MAGIC_
prefix.
Returns -1 on systems that don't support utime(2) or utimes(2)
when PRESERVE_ATIME is set.
"""
return _setflags(self._magic_t, flags)
def load(self, filename=None):
"""
Must be called to load entries in the colon separated list of database files
passed as argument or the default database file if no argument before
any magic queries can be performed.
Returns 0 on success and -1 on failure.
"""
return _load(self._magic_t, filename)
def compile(self, dbs):
"""
Compile entries in the colon separated list of database files
passed as argument or the default database file if no argument.
Returns 0 on success and -1 on failure.
The compiled files created are named from the basename(1) of each file
argument with ".mgc" appended to it.
"""
return _compile(self._magic_t, dbs)
def check(self, dbs):
"""
Check the validity of entries in the colon separated list of
database files passed as argument or the default database file
if no argument.
Returns 0 on success and -1 on failure.
"""
return _check(self._magic_t, dbs)
def list(self, dbs):
"""
Check the validity of entries in the colon separated list of
database files passed as argument or the default database file
if no argument.
Returns 0 on success and -1 on failure.
"""
return _list(self._magic_t, dbs)
def errno(self):
"""
Returns a numeric error code. If return value is 0, an internal
magic error occurred. If return value is non-zero, the value is
an OS error code. Use the errno module or os.strerror() can be used
to provide detailed error information.
"""
return _errno(self._magic_t)
def open(flags):
"""
Returns a magic object on success and None on failure.
Flags argument as for setflags.
"""
return Magic(_open(flags))
| opf-attic/ref | tools/file/file-5.11/python/magic.py | Python | apache-2.0 | 6,558 |
from numpy import *
from struct import pack
def pack_coefs(c):
cw = list(zip(c[ :128][::-1],
c[128:256][::-1],
c[256:384][::-1],
c[384: ][::-1]))
m = max(sum(x) for x in cw)
return b''.join(pack('>4h', *(int(round(n / m * 32767)) for n in x)) for x in cw)
x = linspace(-2, 2, 512, endpoint=False)
w1 = hamming(512)
w2 = kaiser(512, pi * 9/4)
coef_1 = [sinc(n * 0.5) for n in x] * w1
coef_2 = [sinc(n * 0.75) for n in x] * w2
coef_3 = [sinc(n) for n in x] * w1
with open('dsp_coef.bin', 'wb') as f:
f.write(pack_coefs(coef_1))
f.write(pack_coefs(coef_2))
f.write(pack_coefs(coef_3))
f.write(b'\0' * 1024)
| moncefmechri/dolphin | docs/DSP/free_dsp_rom/generate_coefs.py | Python | gpl-2.0 | 703 |
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import unittest
from pants.engine.internals.addressable import (
MutationError,
NotSerializableError,
addressable,
addressable_dict,
addressable_sequence,
)
from pants.engine.internals.objects import Resolvable, Serializable
from pants.util.objects import Exactly, TypeConstraintError
class SimpleSerializable(Serializable):
def __init__(self, **kwargs):
self._kwargs = kwargs
def _asdict(self):
return self._kwargs
class CountingResolvable(Resolvable):
def __init__(self, address, value):
self._address = address
self._value = value
self._resolutions = 0
@property
def address(self):
return self._address
def resolve(self):
try:
return self._value
finally:
self._resolutions += 1
@property
def resolutions(self):
return self._resolutions
class AddressableDescriptorTest(unittest.TestCase):
def test_inappropriate_application(self):
class NotSerializable:
def __init__(self, count):
super().__init__()
self.count = count
@addressable(Exactly(int))
def count(self):
pass
with self.assertRaises(NotSerializableError):
NotSerializable(42)
class AddressableTest(unittest.TestCase):
class Person(SimpleSerializable):
def __init__(self, age):
super(AddressableTest.Person, self).__init__()
self.age = age
@addressable(Exactly(int))
def age(self):
"""Return the person's age in years.
:rtype int
"""
def test_none(self):
person = self.Person(None)
self.assertIsNone(person.age, None)
def test_value(self):
person = self.Person(42)
self.assertEqual(42, person.age)
def test_address(self):
person = self.Person("//:meaning-of-life")
self.assertEqual("//:meaning-of-life", person.age)
def test_resolvable(self):
resolvable_age = CountingResolvable("//:meaning-of-life", 42)
person = self.Person(resolvable_age)
self.assertEqual(0, resolvable_age.resolutions)
self.assertEqual(42, person.age)
self.assertEqual(1, resolvable_age.resolutions)
self.assertEqual(42, person.age)
self.assertEqual(2, resolvable_age.resolutions)
def test_type_mismatch_value(self):
with self.assertRaises(TypeConstraintError):
self.Person(42.0)
def test_type_mismatch_resolvable(self):
resolvable_age = CountingResolvable("//:meaning-of-life", 42.0)
person = self.Person(resolvable_age)
with self.assertRaises(TypeConstraintError):
person.age
def test_single_assignment(self):
person = self.Person(42)
with self.assertRaises(MutationError):
person.age = 37
class AddressableListTest(unittest.TestCase):
class Series(SimpleSerializable):
def __init__(self, values):
super(AddressableListTest.Series, self).__init__()
self.values = values
@addressable_sequence(Exactly(int, float))
def values(self):
"""Return this series' values.
:rtype tuple of int or float
"""
def test_none(self):
series = self.Series(None)
self.assertEqual((), series.values)
def test_values(self):
series = self.Series([42, 1 / 137.0])
self.assertEqual((42, 1 / 137.0,), series.values)
def test_addresses(self):
series = self.Series(["//:meaning-of-life"])
self.assertEqual(("//:meaning-of-life",), series.values)
def test_resolvables(self):
resolvable_value = CountingResolvable("//:fine-structure-constant", 1 / 137.0)
series = self.Series([resolvable_value])
self.assertEqual((1 / 137.0,), series.values)
self.assertEqual(1, resolvable_value.resolutions)
self.assertEqual(1 / 137.0, series.values[0])
self.assertEqual(2, resolvable_value.resolutions)
def test_mixed(self):
resolvable_value = CountingResolvable("//:fine-structure-constant", 1 / 137.0)
series = self.Series([42, "//:meaning-of-life", resolvable_value])
self.assertEqual(0, resolvable_value.resolutions)
self.assertEqual((42, "//:meaning-of-life", 1 / 137.0), series.values)
self.assertEqual(1, resolvable_value.resolutions)
self.assertEqual(1 / 137.0, series.values[2])
self.assertEqual(2, resolvable_value.resolutions)
def test_type_mismatch_container(self):
with self.assertRaises(TypeError):
self.Series({42, 1 / 137.0})
def test_type_mismatch_value(self):
with self.assertRaises(TypeConstraintError):
self.Series([42, False])
def test_type_mismatch_resolvable(self):
resolvable_value = CountingResolvable("//:meaning-of-life", True)
series = self.Series([42, resolvable_value])
with self.assertRaises(TypeConstraintError):
series.values
def test_single_assignment(self):
series = self.Series([42])
with self.assertRaises(MutationError):
series.values = [37]
class AddressableDictTest(unittest.TestCase):
class Varz(SimpleSerializable):
def __init__(self, varz):
super(AddressableDictTest.Varz, self).__init__()
self.varz = varz
@addressable_dict(Exactly(int, float))
def varz(self):
"""Return a snapshot of the current /varz.
:rtype dict of string -> int or float
"""
def test_none(self):
varz = self.Varz(None)
self.assertEqual({}, varz.varz)
def test_values(self):
varz = self.Varz({"meaning of life": 42, "fine structure constant": 1 / 137.0})
self.assertEqual({"meaning of life": 42, "fine structure constant": 1 / 137.0}, varz.varz)
def test_addresses(self):
varz = self.Varz({"meaning of life": "//:meaning-of-life"})
self.assertEqual({"meaning of life": "//:meaning-of-life"}, varz.varz)
def test_resolvables(self):
resolvable_value = CountingResolvable("//:fine-structure-constant", 1 / 137.0)
varz = self.Varz({"fine structure constant": resolvable_value})
self.assertEqual({"fine structure constant": 1 / 137.0}, varz.varz)
self.assertEqual(1, resolvable_value.resolutions)
self.assertEqual(1 / 137.0, varz.varz["fine structure constant"])
self.assertEqual(2, resolvable_value.resolutions)
def test_mixed(self):
resolvable_value = CountingResolvable("//:fine-structure-constant", 1 / 137.0)
varz = self.Varz(
{
"prime": 37,
"meaning of life": "//:meaning-of-life",
"fine structure constant": resolvable_value,
}
)
self.assertEqual(0, resolvable_value.resolutions)
self.assertEqual(
{
"prime": 37,
"meaning of life": "//:meaning-of-life",
"fine structure constant": 1 / 137.0,
},
varz.varz,
)
self.assertEqual(1, resolvable_value.resolutions)
self.assertEqual(1 / 137.0, varz.varz["fine structure constant"])
self.assertEqual(2, resolvable_value.resolutions)
def test_type_mismatch_container(self):
with self.assertRaises(TypeError):
self.Varz([42, 1 / 137.0])
def test_type_mismatch_value(self):
with self.assertRaises(TypeConstraintError):
self.Varz({"meaning of life": 42, "fine structure constant": False})
def test_type_mismatch_resolvable(self):
resolvable_item = CountingResolvable("//:fine-structure-constant", True)
varz = self.Varz({"meaning of life": 42, "fine structure constant": resolvable_item})
with self.assertRaises(TypeConstraintError):
varz.varz
def test_single_assignment(self):
varz = self.Varz({"meaning of life": 42})
with self.assertRaises(MutationError):
varz.varz = {"fine structure constant": 1 / 137.0}
| tdyas/pants | src/python/pants/engine/internals/addressable_test.py | Python | apache-2.0 | 8,357 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Inventory/AppliedItem.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Inventory.Item import ItemId_pb2 as POGOProtos_dot_Inventory_dot_Item_dot_ItemId__pb2
from POGOProtos.Inventory.Item import ItemType_pb2 as POGOProtos_dot_Inventory_dot_Item_dot_ItemType__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Inventory/AppliedItem.proto',
package='POGOProtos.Inventory',
syntax='proto3',
serialized_pb=_b('\n&POGOProtos/Inventory/AppliedItem.proto\x12\x14POGOProtos.Inventory\x1a&POGOProtos/Inventory/Item/ItemId.proto\x1a(POGOProtos/Inventory/Item/ItemType.proto\"\xa0\x01\n\x0b\x41ppliedItem\x12\x32\n\x07item_id\x18\x01 \x01(\x0e\x32!.POGOProtos.Inventory.Item.ItemId\x12\x36\n\titem_type\x18\x02 \x01(\x0e\x32#.POGOProtos.Inventory.Item.ItemType\x12\x11\n\texpire_ms\x18\x03 \x01(\x03\x12\x12\n\napplied_ms\x18\x04 \x01(\x03\x62\x06proto3')
,
dependencies=[POGOProtos_dot_Inventory_dot_Item_dot_ItemId__pb2.DESCRIPTOR,POGOProtos_dot_Inventory_dot_Item_dot_ItemType__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_APPLIEDITEM = _descriptor.Descriptor(
name='AppliedItem',
full_name='POGOProtos.Inventory.AppliedItem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='item_id', full_name='POGOProtos.Inventory.AppliedItem.item_id', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='item_type', full_name='POGOProtos.Inventory.AppliedItem.item_type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='expire_ms', full_name='POGOProtos.Inventory.AppliedItem.expire_ms', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='applied_ms', full_name='POGOProtos.Inventory.AppliedItem.applied_ms', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=147,
serialized_end=307,
)
_APPLIEDITEM.fields_by_name['item_id'].enum_type = POGOProtos_dot_Inventory_dot_Item_dot_ItemId__pb2._ITEMID
_APPLIEDITEM.fields_by_name['item_type'].enum_type = POGOProtos_dot_Inventory_dot_Item_dot_ItemType__pb2._ITEMTYPE
DESCRIPTOR.message_types_by_name['AppliedItem'] = _APPLIEDITEM
AppliedItem = _reflection.GeneratedProtocolMessageType('AppliedItem', (_message.Message,), dict(
DESCRIPTOR = _APPLIEDITEM,
__module__ = 'POGOProtos.Inventory.AppliedItem_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Inventory.AppliedItem)
))
_sym_db.RegisterMessage(AppliedItem)
# @@protoc_insertion_point(module_scope)
| DenL/pogom-webhook | pogom/pgoapi/protos/POGOProtos/Inventory/AppliedItem_pb2.py | Python | mit | 3,946 |
import os
from seleniumbase import BaseCase
class FileUploadButtonTests(BaseCase):
""" The main purpose of this is to test the self.choose_file() method. """
def test_file_upload_button(self):
self.open("https://www.w3schools.com/jsref/tryit.asp"
"?filename=tryjsref_fileupload_get")
self.ad_block()
self.switch_to_frame('iframeResult')
zoom_in = 'input[type="file"]{zoom: 1.5;-moz-transform: scale(1.5);}'
self.add_css_style(zoom_in)
self.highlight('input[type="file"]')
dir_name = os.path.dirname(os.path.abspath(__file__))
file_path = dir_name + "/example_logs/screenshot.png"
self.choose_file('input[type="file"]', file_path)
self.demo_mode = True # Adds highlighting to the assert statement
self.assert_element('input[type="file"]')
| mdmintz/SeleniumBase | examples/upload_file_test.py | Python | mit | 857 |
import urllib2
import socket
import re
import os
#MOCK addinfurl
class addinfoUrl():
"""class to add info() and getUrl(url=) methods to an open file."""
def __init__(self, url, code, msg):
self.headers = None
self.url = url
self.code = code
self.msg = msg
def info(self):
return self.headers
def getcode(self):
return self.code
def getUrl(self):
return self.url
#
# Mock Method so test can run independently
#
# Fix for Python<2.6
try:
timeout = socket._GLOBAL_DEFAULT_TIMEOUT
except AttributeError:
timeout = 1000
def mock_urlopen(url, data=None, timeout=timeout):
msg_dict = {'301': "Moved Permanently", '404': 'Not Found', '200': 'OK'}
code = '404'
msg = msg_dict.get(code)
m = re.search("([0-9]*)$", url)
if m:
code = m.group(0)
msg = msg_dict.get(code, 'Something Happened')
if code == "200":
return addinfoUrl(url, code, msg)
raise urllib2.HTTPError(url, code, msg, None, None)
from django.conf import settings
from django.test import TestCase
from linkcheck.models import Url
class InternalCheckTestCase(TestCase):
urls = 'linkcheck.tests.test_urls'
def setUp(self):
#replace urllib2.urlopen with mock method
urllib2.urlopen = mock_urlopen
def test_internal_check_mailto(self):
uv = Url(url="mailto:nobody", still_exists=True)
uv.check()
self.assertEquals(uv.status, None)
self.assertEquals(uv.message, 'Email link (not automatically checked)')
def test_internal_check_blank(self):
uv = Url(url="", still_exists=True)
uv.check()
self.assertEquals(uv.status, False)
self.assertEquals(uv.message, 'Empty link')
def test_internal_check_anchor(self):
uv = Url(url="#some_anchor", still_exists=True)
uv.check()
self.assertEquals(uv.status, None)
self.assertEquals(uv.message, 'Link to within the same page (not automatically checked)')
# TODO: This now fails, because with follow=True, redirects are automatically followed
# def test_internal_check_view_302(self):
# uv = Url(url="/admin/linkcheck", still_exists=True)
# uv.check()
# self.assertEquals(uv.status, None)
# self.assertEquals(uv.message, 'This link redirects: code 302 (not automatically checked)')
def test_internal_check_admin_found(self):
uv = Url(url="/admin/", still_exists=True)
uv.check()
self.assertEquals(uv.status, True)
self.assertEquals(uv.message, 'Working internal link')
def test_internal_check_broken_internal_link(self):
uv = Url(url="/broken/internal/link", still_exists=True)
uv.check()
self.assertEquals(uv.status, False)
self.assertEquals(uv.message, 'Broken internal link')
def test_internal_check_invalid_url(self):
uv = Url(url="invalid/url", still_exists=True)
uv.check()
self.assertEquals(uv.status, False)
self.assertEquals(uv.message, 'Invalid URL')
def test_same_page_anchor(self):
# TODO Make this test
pass
#uv = Url(url="#anchor", still_exists=True)
#uv.check()
#self.assertEquals(uv.status, None)
#self.assertEquals(uv.message, "")
class InternalMediaCheckTestCase(TestCase):
def setUp(self):
self.old_media_root = settings.MEDIA_ROOT
settings.MEDIA_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'media')
def tearDown(self):
settings.MEDIA_ROOT = self.old_media_root
def test_internal_check_media_missing(self):
uv = Url(url="/media/not_found", still_exists=True)
uv.check()
self.assertEquals(uv.status, False)
self.assertEquals(uv.message, 'Missing Document')
def test_internal_check_media_found(self):
uv = Url(url="/media/found", still_exists=True)
uv.check()
self.assertEquals(uv.status, True)
self.assertEquals(uv.message, 'Working file link')
def test_internal_check_media_utf8(self):
uv = Url(url="/media/r%C3%BCckmeldung", still_exists=True)
uv.check()
self.assertEquals(uv.status, True)
self.assertEquals(uv.message, 'Working file link')
class ExternalCheckTestCase(TestCase):
def test_external_check_200(self):
uv = Url(url="http://qa-dev.w3.org/link-testsuite/http.php?code=200", still_exists=True)
uv.check()
self.assertEquals(uv.status, True)
self.assertEquals(uv.message, '200 OK')
def test_external_check_301(self):
uv = Url(url="http://qa-dev.w3.org/link-testsuite/http.php?code=301", still_exists=True)
uv.check()
self.assertEquals(uv.status, False)
self.assertEquals(uv.message, '301 Moved Permanently')
def test_external_check_404(self):
uv = Url(url="http://qa-dev.w3.org/link-testsuite/http.php?code=404", still_exists=True)
uv.check()
self.assertEquals(uv.status, False)
self.assertEquals(uv.message, '404 Not Found')
class FindingLinksTestCase(TestCase):
def test_found_links(self):
from linkcheck.tests.sampleapp.models import Book
self.assertEqual(Url.objects.all().count(), 0)
Book.objects.create(title='My Title', description="""Here's a link: <a href="http://www.example.org">Example</a>""")
self.assertEqual(Url.objects.all().count(), 1)
self.assertEqual(Url.objects.all()[0].url, "http://www.example.org")
| yvess/django-linkcheck | linkcheck/tests/__init__.py | Python | bsd-3-clause | 5,551 |
"""
description: open addressing Hash Table for CS 141 Lecture
file: hashtable.py
language: python3
author: [email protected] Sean Strout
author: [email protected] Scott Johnson
"""
from rit_lib import *
class HashTable(struct):
"""
The HashTable data structure contains a collection of values
where each value is located by a hashable key.
No two values may have the same key, but more than one
key may have the same value.
table is the list holding the hash table
size is the number of elements in occupying the hashtable
"""
_slots = ((list, 'table'), (int, 'size'))
def HashTableToStr(self):
"""
HashTableToStr: HashTable -> String
"""
result = ""
for i in range(len(self.table)):
e = self.table[i]
if not e == None:
result += str(i) + ": "
result += e.EntryToStr() + "\n"
return result
def hash_function(self, name):
"""
hash_function: K NatNum -> NatNum
Compute a hash of the val string that is in [0 ... n).
"""
hashval = 0
for letter in name:
hashval += (ord(letter) - ord('a'))
hallnum = hashval % len(self.table)
# hashcode = 0
# hashcode = len(val) % n
return hallnum
def keys(self):
"""
keys: HashTable(K, V) -> List(K)
Return a list of keys in the given hashTable.
"""
result = []
for entry in self.table:
if entry != None:
result.append(entry.key)
return result
def has(self, key):
"""
has: HashTable(K, V) K -> Boolean
Return True iff hTable has an entry with the given key.
"""
index = self.hash_function(key)
startIndex = index # We must make sure we don't go in circles.
while self.table[ index ] != None and self.table[ index ].key != key:
index = (index + 1) % len(self.table)
if index == startIndex:
return False
return self.table[ index ] != None
def put(self, key, value):
"""
put: HashTable(K, V) K V -> Boolean
Using the given hash table, set the given key to the
given value. If the key already exists, the given value
will replace the previous one already in the table.
If the table is full, an Exception is raised.
"""
index = self.hash_function(key)
startIndex = index # We must make sure we don't go in circles.
while self.table[ index ] != None and self.table[ index ].key != key:
index = (index + 1) % len(self.table)
if index == startIndex:
raise Exception("Hash table is full.")
if self.table[ index ] == None:
self.table[ index ] = Entry(key, value)
self.size += 1
else:
self.table[ index ].value = value
return True
def get( self, key):
"""
get: HashTable(K, V) K -> V
Return the value associated with the given key in
the given hash table.
Precondition: self.has(key)
"""
index = self.hash_function(key)
startIndex = index # We must make sure we don't go in circles.
while self.table[ index ] != None and self.table[ index ].key != key:
index = (index + 1) % len(self.table)
if index == startIndex:
raise Exception("Hash table does not contain key.")
if self.table[ index ] == None:
raise Exception("Hash table does not contain key:", key)
else:
return self.table[ index ].value
def createHashTable(capacity=100):
"""
createHashTable: NatNum? -> HashTable
"""
if capacity < 2:
capacity = 2
aHashTable = HashTable([None for _ in range(capacity)], 0)
return aHashTable
class Entry(struct):
"""
A class used to hold key/value pairs.
"""
_slots = ((object, "key"), (object, "value"))
def EntryToStr( self ):
"""
EntryToStr: Entry -> String
return the string representation of the entry.
"""
return "(" + str(self.key) + ", " + str(self.value) + ")"
| moiseslorap/RIT | Computer Science 1/Labs/lab9/hashtable.py | Python | mit | 4,321 |
#!/usr/bin/env python3
import argparse, time
import numpy as np
import django
from django.utils import timezone
import datetime, os
import sys
import db6 as db
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "BasicBrowser.settings")
django.setup()
from tmv_app.models import *
def read_state(filename, terms):
topics = []
twords = []
with open(args.file, 'r') as state:
score = float(state.readline().split()[1])
iter = int(state.readline().split()[1])
eta = state.readline().split()
eta = [float(x) for x in eta[1:len(eta)]]
gam = state.readline().split()
gam = [float(x) for x in gam[1:len(gam)]]
gem_mean = float(state.readline().split()[1])
gem_scale = float(state.readline().split()[1])
scaling_shape = float(state.readline().split()[1])
scaling_scale = float(state.readline().split()[1])
header = state.readline()
tree = {}
for line in state:
(id, parent, ndocs, nwords, scale, word_cnt) = line.split(None, 5)
topics.append({'id': id,
'parent': parent,
'ndocs': ndocs,
'nwords': nwords,
'scale': scale
})
words = [int(x) for x in word_cnt.split()]
twords.append(words)
return([topics,twords])
def read_dmap(dmap):
docs = []
with open(dmap,'r') as dmap:
for line in dmap:
d = 'WOS:'+line.strip().split('WOS:')[1].split('.')[0]
docs.append(d)
return(docs)
def read_vocab(vocab):
words = []
with open(vocab,'r') as vocab:
for line in vocab:
w = line.strip()
words.append({'term': w})
return(words)
def main(filename, dmap, vocab):
# Init run
run_id = db.init('HL')
# Add docs
docs = read_dmap(dmap)
# Add terms
terms = read_vocab(vocab)
for term in terms:
t = Term(title=term['term'],run_id=run_id)
t.save()
term['db_id'] = t.term
# Add topics
state = read_state(filename, terms)
topics = state[0]
for topic in topics:
scale = topic['scale']
nwords = topic['nwords']
ndocs = topic['ndocs']
t = HTopic(
run_id=run_id,
n_docs=ndocs,
n_words=nwords,
scale=scale
)
t.save()
topic['db_id'] = t.topic
for topic in topics:
t = HTopic.objects.get(topic=topic['db_id'])
parent_id = topic['parent']
if int(parent_id) > -1:
for tt in topics:
if tt['id'] == parent_id:
topic['parent_db_id'] = tt['db_id']
break
t.parent = HTopic.objects.get(topic=topic['parent_db_id'])
t.save()
# Add topicTerm
tt = state[1]
for topic_id in range(len(tt)):
topic = topics[topic_id]
for term_id in range(len(tt[topic_id])):
term = terms[term_id]
if tt[topic_id][term_id] > 0:
topicterm = HTopicTerm(
topic = HTopic.objects.get(topic=topic['db_id']),
term = Term.objects.get(term=term['db_id']),
count = tt[topic_id][term_id],
run_id = run_id
)
topicterm.save()
with open(args.file+".assign", "r") as assign:
for line in assign:
(doc_id, score, path) = line.split(None, 2)
doc_id = int(doc_id)
doc = docs[doc_id]
try:
d = Doc.objects.get(UT=doc)
except:
d = Doc(UT=doc)
d.save()
score = float(score)
path = [int(x) for x in path.split()]
level = -1
for topic_id in path:
level+=1
for topic in topics:
if int(topic['id']) == topic_id:
t = HTopic.objects.get(topic=topic['db_id'])
dt = HDocTopic(
doc=d,
topic=t,
level=level,
score=score,
run_id=run_id
)
dt.save()
break
# Add doctopics
# Parse the arguments
parser = argparse.ArgumentParser(description='Update hlda output to the tmv app')
parser.add_argument('file',type=str,help='name of mode or iteration file containing the hlda output')
parser.add_argument('dmap',type=str,help='name of docmap')
parser.add_argument('vocab',type=str,help='name of vocab')
args=parser.parse_args()
if __name__ == '__main__':
t0 = time.time()
main(args.file, args.dmap, args.vocab)
totalTime = time.time() - t0
tm = int(totalTime//60)
ts = int(totalTime-(tm*60))
print("done! total time: " + str(tm) + " minutes and " + str(ts) + " seconds")
| mcallaghan/tmv | BasicBrowser/import_hlda.py | Python | gpl-3.0 | 5,188 |
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import shutil
import sys
from util import build_utils
_RESOURCE_CLASSES = [
"R.class",
"R##*.class",
"Manifest.class",
"Manifest##*.class",
]
def Jar(class_files, classes_dir, jar_path, manifest_file=None,
provider_configurations=None, additional_files=None):
jar_path = os.path.abspath(jar_path)
# The paths of the files in the jar will be the same as they are passed in to
# the command. Because of this, the command should be run in
# options.classes_dir so the .class file paths in the jar are correct.
jar_cwd = classes_dir
class_files_rel = [os.path.relpath(f, jar_cwd) for f in class_files]
jar_cmd = ['jar', 'cf0', jar_path]
if manifest_file:
jar_cmd[1] += 'm'
jar_cmd.append(os.path.abspath(manifest_file))
jar_cmd.extend(class_files_rel)
for filepath, jar_filepath in additional_files or []:
full_jar_filepath = os.path.join(jar_cwd, jar_filepath)
jar_dir = os.path.dirname(full_jar_filepath)
if not os.path.exists(jar_dir):
os.makedirs(jar_dir)
shutil.copy(filepath, full_jar_filepath)
jar_cmd.append(jar_filepath)
if provider_configurations:
service_dir = os.path.join(jar_cwd, 'META-INF', 'services')
if not os.path.exists(service_dir):
os.makedirs(service_dir)
for config in provider_configurations:
config_jar_path = os.path.join(service_dir, os.path.basename(config))
shutil.copy(config, config_jar_path)
jar_cmd.append(os.path.relpath(config_jar_path, jar_cwd))
if not class_files_rel:
empty_file = os.path.join(classes_dir, '.empty')
build_utils.Touch(empty_file)
jar_cmd.append(os.path.relpath(empty_file, jar_cwd))
build_utils.CheckOutput(jar_cmd, cwd=jar_cwd)
build_utils.Touch(jar_path, fail_if_missing=True)
def JarDirectory(classes_dir, jar_path, manifest_file=None, predicate=None,
provider_configurations=None, additional_files=None):
class_files = build_utils.FindInDirectory(classes_dir, '*.class')
if predicate:
class_files = [f for f in class_files if predicate(f)]
Jar(class_files, classes_dir, jar_path, manifest_file=manifest_file,
provider_configurations=provider_configurations,
additional_files=additional_files)
def main():
parser = optparse.OptionParser()
parser.add_option('--classes-dir', help='Directory containing .class files.')
parser.add_option('--input-jar', help='Jar to include .class files from')
parser.add_option('--jar-path', help='Jar output path.')
parser.add_option('--excluded-classes',
help='GYP list of .class file patterns to exclude from the jar.')
parser.add_option('--strip-resource-classes-for',
help='GYP list of java package names exclude R.class files in.')
parser.add_option('--stamp', help='Path to touch on success.')
args = build_utils.ExpandFileArgs(sys.argv[1:])
options, _ = parser.parse_args(args)
# Current implementation supports just one or the other of these:
assert not options.classes_dir or not options.input_jar
excluded_classes = []
if options.excluded_classes:
excluded_classes = build_utils.ParseGypList(options.excluded_classes)
if options.strip_resource_classes_for:
packages = build_utils.ParseGypList(options.strip_resource_classes_for)
excluded_classes.extend(p.replace('.', '/') + '/' + f
for p in packages for f in _RESOURCE_CLASSES)
predicate = None
if excluded_classes:
predicate = lambda f: not build_utils.MatchesGlob(f, excluded_classes)
with build_utils.TempDir() as temp_dir:
classes_dir = options.classes_dir
if options.input_jar:
build_utils.ExtractAll(options.input_jar, temp_dir)
classes_dir = temp_dir
JarDirectory(classes_dir, options.jar_path, predicate=predicate)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main())
| danakj/chromium | build/android/gyp/jar.py | Python | bsd-3-clause | 4,097 |
# -*- coding: utf-8 -*-
import pytest
from .utils import last_activity
@pytest.mark.usefixtures('versioning_manager', 'table_creator')
class TestActivityCreationWithColumnExclusion(object):
@pytest.fixture
def audit_trigger_creator(self, session, user_class):
session.execute(
'''SELECT audit_table('{0}', '{{"age"}}')'''.format(
user_class.__tablename__
)
)
@pytest.fixture
def user(self, session, user_class, audit_trigger_creator):
user = user_class(name='John', age=15)
session.add(user)
session.flush()
return user
def test_insert(self, user, connection):
activity = last_activity(connection)
assert activity['old_data'] == {}
assert activity['changed_data'] == {
'id': user.id,
'name': 'John'
}
assert activity['table_name'] == 'user'
assert activity['native_transaction_id'] > 0
assert activity['verb'] == 'insert'
def test_update(self, user, session):
user.name = 'Luke'
user.age = 18
session.flush()
activity = last_activity(session)
assert activity['changed_data'] == {'name': 'Luke'}
assert activity['old_data'] == {
'id': user.id,
'name': 'John',
}
assert activity['table_name'] == 'user'
assert activity['native_transaction_id'] > 0
assert activity['verb'] == 'update'
def test_delete(self, user, session):
session.delete(user)
session.flush()
activity = last_activity(session)
assert activity['changed_data'] == {}
assert activity['old_data'] == {
'id': user.id,
'name': 'John',
}
assert activity['table_name'] == 'user'
assert activity['native_transaction_id'] > 0
assert activity['verb'] == 'delete'
| kvesteri/postgresql-audit | tests/test_sql_files.py | Python | bsd-2-clause | 1,919 |
"""
The Response class in REST framework is similar to HTTPResponse, except that
it is initialized with unrendered data, instead of a pre-rendered string.
The appropriate renderer is called during Django's template response rendering.
"""
from __future__ import unicode_literals
from django.core.handlers.wsgi import STATUS_CODE_TEXT
from django.template.response import SimpleTemplateResponse
from django.utils import six
class Response(SimpleTemplateResponse):
"""
An HttpResponse that allows its data to be rendered into
arbitrary media types.
"""
def __init__(self, data=None, status=None,
template_name=None, headers=None,
exception=False, content_type=None):
"""
Alters the init arguments slightly.
For example, drop 'template_name', and instead use 'data'.
Setting 'renderer' and 'media_type' will typically be deferred,
For example being set automatically by the `APIView`.
"""
super(Response, self).__init__(None, status=status)
self.data = data
self.template_name = template_name
self.exception = exception
self.content_type = content_type
if headers:
for name, value in six.iteritems(headers):
self[name] = value
@property
def rendered_content(self):
renderer = getattr(self, 'accepted_renderer', None)
media_type = getattr(self, 'accepted_media_type', None)
context = getattr(self, 'renderer_context', None)
assert renderer, ".accepted_renderer not set on Response"
assert media_type, ".accepted_media_type not set on Response"
assert context, ".renderer_context not set on Response"
context['response'] = self
charset = renderer.charset
content_type = self.content_type
if content_type is None and charset is not None:
content_type = "{0}; charset={1}".format(media_type, charset)
elif content_type is None:
content_type = media_type
self['Content-Type'] = content_type
ret = renderer.render(self.data, media_type, context)
if isinstance(ret, six.text_type):
assert charset, (
'renderer returned unicode, and did not specify '
'a charset value.'
)
return bytes(ret.encode(charset))
if not ret:
del self['Content-Type']
return ret
@property
def status_text(self):
"""
Returns reason text corresponding to our HTTP response status code.
Provided for convenience.
"""
# TODO: Deprecate and use a template tag instead
# TODO: Status code text for RFC 6585 status codes
return STATUS_CODE_TEXT.get(self.status_code, '')
def __getstate__(self):
"""
Remove attributes from the response that shouldn't be cached
"""
state = super(Response, self).__getstate__()
for key in ('accepted_renderer', 'renderer_context', 'data'):
if key in state:
del state[key]
return state
| paulormart/gae-project-skeleton-100 | gae/lib/rest_framework/response.py | Python | mit | 3,150 |
import utils
import operation_registry
import operation_wrappers.base_wrappers as base_wrappers
import types
import exceptions
from protectron import protectron
@utils.doublewrap
def register_operation(func, operation_wrapper=base_wrappers.LocalOperation):
if isinstance(operation_wrapper, types.ClassType):
operation_wrapper_instance = operation_wrapper()
else:
operation_wrapper_instance = operation_wrapper
operation_wrapper_instance.load_wrapped_operation(func)
operation_registry_cls = operation_registry.OperationRegistry
operation_registry_cls.register_operation(operation_wrapper_instance)
@utils.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
def dispatch_event(event):
return operation_registry.OperationRegistry.call(event)
| hkrist/ratatoskr | ratatoskr/__init__.py | Python | mit | 843 |
# z39.5er
# v.0.0
# June 1, 2017
# declarations and constants
normalizations = [['!',' '], ['"',' '], ['(',' '], [')',' '], ['-',' '], ['{',' '], ['}',' '], ['<',' '], ['>',' '], [';',' '], [':',' '], ['.',' '], ['?',' '], [',',' '],['[',''], [']',''], ["'",''], ['/',' ']]
stopwords = [ 'a', 'by', 'how', 'that', 'an', 'for', 'in', 'the', 'and', 'from', 'is', 'this', 'are', 'had', 'it', 'to', 'as', 'have', 'not', 'was', 'at', 'he', 'of', 'which', 'be', 'her', 'on', 'with', 'but', 'his', 'or', 'you']
filename_in = "input.txt"
filename_out = "output.txt"
inTable = []
tempTable = []
outTable = []
tempRow = []
tempStr = ''
log = ''
# functions
def normalize( str ):
' Apply normalization rules to remove punctuation'
print(str)
newStr = ''
for eachChar in str:
for eachNormalization in normalizations:
if eachChar == eachNormalization[0]:
eachChar = eachNormalization[1]
newStr += eachChar
newStr = newStr.replace(' ',' ')
newStr = newStr.strip()
newStr = newStr.lower()
print(newStr)
return newStr
def removeStops(kwt):
hasStop = False
returnString = ''
tempRow = []
tempValue = ''
tempRow.append(kwt.split(' '))
for eachTerm in tempRow[0]:
tempValue = eachTerm
for eachStop in stopwords:
if eachTerm == eachStop:
print(eachTerm + ' is a stop word.')
tempValue = ''
break
if len(tempValue) > 0:
returnString += tempValue + ' '
tempValue = ''
return(returnString)
def removeSingleCharacters(input):
# remove single character search terms, which cause trouble
tempRow = []
output = ''
tempRow.append(input.split(' '))
for eachTerm in tempRow[0]:
if len(eachTerm) > 1:
output += eachTerm + ' '
return(output)
def uniq(input):
# deduplicates list
# swiped from: https://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-whilst-preserving-order
output = []
for x in input:
if x not in output:
output.append(x)
return output
# input file, convert lines to lists
inFile = open(filename_in, 'r')
inTable = inFile.readlines()
log += 'Imported file. Read ' + str(len(inTable)) + ' lines of data.\n'
for count, eachRow in enumerate(inTable):
log += 'Row ' + str(count) + ': '
tempStr = eachRow
tempStr = tempStr.strip('\n')
log += tempStr + '\n'
tempRow = tempStr.split('\t')
log += 'Adding to processing table: ' + str(len(tempRow)) + ' columns.\n'
tempTable.append(tempRow)
log += str(tempTable[count])
log += '\n'
# iterate through lists, evaluating terms
# for now: assume the 1st column is title keyword (bib-1 @attr 4)
# the second column is date of publication (bib-1 @attr 31)
for count, eachRow in enumerate(tempTable):
# keyword title
tempStr = tempTable[count][0]
tempStr = normalize(tempStr)
log += 'KWT in row ' + str(count) + ' normalized to: ' + tempStr + ' \n '
tempStr = removeStops(tempStr)
log += 'KWT in row ' + str(count) + ' stopwords removed: ' + tempStr + '\n'
tempStr = removeSingleCharacters(tempStr)
log += '\nremoved single characters'
keyword_title = tempStr.rstrip(' ')
# now dates
midYear = tempTable[count][1]
print(midYear)
lowYear = int(midYear) - 5
hiYear = int(midYear) + 5
year_range = str(lowYear) + '-' + str(hiYear)
row2add = [keyword_title, year_range]
log += 'Identified year: ' + midYear + ', converted to date range: ' + year_range + '\n'
outTable.append(row2add)
# convert lists to z-39.50 queries & add to outTable
log += '\nOuttable has: ' + str(len(outTable)) + ' rows.'
log += '\nFirst row looks like this: ' + str(outTable[0])
zQuery = []
for eachRow in outTable:
row2add = '@and '
row2add += '@attr 1=4 "' + eachRow[0] +'" '
row2add += '@attr 1=31 "' + eachRow[1] + '"'
zQuery.append(row2add)
zQuery = uniq(zQuery)
log += '\nAfter removing duplicate row, out table has: ' + str(len(zQuery)) + ' rows.'
# output to file
outFile = open(filename_out,'w')
for eachRow in zQuery:
outFile.write(eachRow + '\n')
# housekeeping
print(log) | babrahamse/z395er | z395er.py | Python | mit | 4,347 |
from typing import List
class Solution:
def validSquare(
self, p1: List[int], p2: List[int], p3: List[int], p4: List[int]
) -> bool:
def dist(p1: List[int], p2: List[int]):
return (p1[0] - p2[0]) * (p1[0] - p2[0]) + (p1[1] - p2[1]) * (p1[1] - p2[1])
dist_set = set(
[
dist(p1, p2),
dist(p1, p3),
dist(p1, p4),
dist(p2, p3),
dist(p2, p4),
dist(p3, p4),
]
)
return 0 not in dist_set and len(dist_set) == 2
# TESTS
for p, expected in [
([[0, 0], [1, 1], [1, 0], [0, 1]], True),
([[0, 0], [0, 2], [1, 1], [-1, 1]], True),
([[0, 0], [0, 2], [1, 2], [-1, 1]], False),
]:
sol = Solution()
actual = sol.validSquare(p[0], p[1], p[2], p[3])
print("Could", p, "construct a square? ->", actual)
assert actual == expected
| l33tdaima/l33tdaima | p593m/valid_square.py | Python | mit | 922 |
import glob
import os
import shutil
import copy
import simplejson
from contextlib import contextmanager
from PIL import Image
from PIL.ImageDraw import ImageDraw
from cStringIO import StringIO
from django.conf import settings
from django.test import TestCase
from dju_image import settings as dju_settings
from dju_image.tools import clear_profile_configs_cache, media_path
def create_test_image(w, h, c='RGB'):
colors = {
'RGB': {1: '#DDEEFF', 2: '#667788', 3: '#887766'},
'CMYK': {1: (120, 130, 140, 25), 2: (80, 100, 120, 50), 3: (120, 100, 80, 75)},
'P': {1: 1, 2: 128, 3: 255},
}
color = colors[c]
img = Image.new(c, (w, h), color=color[1])
d = ImageDraw(img)
d.line((-1, -1) + img.size, fill=color[2], width=2)
d.line((-1, img.size[1], img.size[0], -1), fill=color[3], width=2)
return img
def get_img_file(img, img_format='JPEG', jpeg_quality=100):
f = StringIO()
img.save(f, img_format, quality=jpeg_quality)
f.seek(0)
return f
def save_img_file(fn, img, img_format='JPEG', jpeg_quality=100):
full_path = os.path.join(settings.TMP_DIR, fn)
if os.path.exists(full_path):
os.remove(full_path)
with open(full_path, 'wb') as f:
img.save(f, img_format, quality=jpeg_quality)
return full_path
def clean_media_dir():
for fn in glob.glob(media_path('*')):
if os.path.isdir(fn):
shutil.rmtree(fn)
else:
os.remove(fn)
@contextmanager
def safe_change_dju_settings():
"""
with safe_change_dju_settings():
dju_settings.DJU_IMG_UPLOAD_PROFILE_DEFAULT['TYPES'] = ('PNG',)
...
# dju settings will be restored
...
"""
settings_bak = {}
for k, v in dju_settings.__dict__.iteritems():
if k[:4] == 'DJU_':
settings_bak[k] = copy.deepcopy(v)
try:
yield
finally:
for k, v in settings_bak.iteritems():
setattr(dju_settings, k, v)
clear_profile_configs_cache()
class ViewTestCase(TestCase):
def get_json(self, response):
self.assertEqual(response['Content-Type'], 'application/json')
try:
data = simplejson.loads(response.content)
except (TypeError, simplejson.JSONDecodeError):
raise self.failureException('Response is not JSON')
self.assertIsInstance(data, dict)
self.assertIsInstance(data['uploaded'], list)
self.assertIsInstance(data['errors'], list)
return data
def assertUploadedFilesExist(self, response_data):
for item in response_data['uploaded']:
path = media_path(item['rel_url'])
self.assertTrue(os.path.isfile(path))
for var_data in item['variants'].values():
var_path = media_path(var_data['rel_url'])
self.assertTrue(os.path.isfile(var_path))
def assertUploadedFilesNotExist(self, response_data):
for item in response_data['uploaded']:
path = media_path(item['rel_url'])
self.assertFalse(os.path.isfile(path))
for var_data in item['variants'].values():
var_path = media_path(var_data['rel_url'])
self.assertFalse(os.path.isfile(var_path))
class CleanTmpDirMixin(object):
@classmethod
def _clean_tmp_dir(cls):
for fn in glob.glob(os.path.join(settings.TMP_DIR, '*')):
if os.path.isdir(fn):
shutil.rmtree(fn)
else:
os.remove(fn)
| liminspace/dju-image | tests/tests/tools.py | Python | mit | 3,513 |
from sys import argv
from os.path import exists
script, from_file, to_file = argv
print "Copying from %s to %s" % (from_file, to_file)
# we could do these two on one line too, how?
# in_file = open(from_file)
# indata = in_file.read()
indata = open(from_file).read()
print "The input file is %d bytes long" % len(indata)
print "Does the output file exist? %r" % exists(to_file)
print "Ready, hit RETURN to continue, CTRL-C to abort."
raw_input()
out_file = open(to_file, 'w')
out_file.write(indata)
print "Alright, all done."
out_file.close()
# in_file.close() | CodeCatz/litterbox | Pija/LearnPythontheHardWay/ex17.py | Python | mit | 570 |
# Author: Mr_Orange <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
from urllib import urlencode
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.providers import generic
from sickbeard.common import USER_AGENT
class ThePirateBayProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "ThePirateBay")
self.supportsBacklog = True
self.public = True
self.ratio = None
self.confirmed = True
self.minseed = None
self.minleech = None
self.cache = ThePirateBayCache(self)
self.urls = {
'base_url': 'https://pirateproxy.la/',
'search': 'https://pirateproxy.la/s/',
'rss': 'https://pirateproxy.la/tv/latest'
}
self.url = self.urls['base_url']
self.headers.update({'User-Agent': USER_AGENT})
"""
205 = SD, 208 = HD, 200 = All Videos
https://thepiratebay.gd/s/?q=Game of Thrones&type=search&orderby=7&page=0&category=200
"""
self.search_params = {
'q': '',
'type': 'search',
'orderby': 7,
'page': 0,
'category': 200
}
self.re_title_url = r'/torrent/(?P<id>\d+)/(?P<title>.*?)".+?(?P<url>magnet.*?)".+?Size (?P<size>[\d\.]* [TGKMiB]{2,3}).+?(?P<seeders>\d+)</td>.+?(?P<leechers>\d+)</td>'
def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
for mode in search_strings.keys():
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_strings[mode]:
self.search_params.update({'q': search_string.strip()})
if mode is not 'RSS':
logger.log(u"Search string: " + search_string, logger.DEBUG)
searchURL = self.urls[('search', 'rss')[mode is 'RSS']] + '?' + urlencode(self.search_params)
logger.log(u"Search URL: %s" % searchURL, logger.DEBUG)
data = self.getURL(searchURL)
# data = self.getURL(self.urls[('search', 'rss')[mode is 'RSS']], params=self.search_params)
if not data:
continue
matches = re.compile(self.re_title_url, re.DOTALL).finditer(data)
for torrent in matches:
title = torrent.group('title')
download_url = torrent.group('url')
# id = int(torrent.group('id'))
size = self._convertSize(torrent.group('size'))
seeders = int(torrent.group('seeders'))
leechers = int(torrent.group('leechers'))
if not all([title, download_url]):
continue
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode is not 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
# Accept Torrent only from Good People for every Episode Search
if self.confirmed and re.search(r'(VIP|Trusted|Helper|Moderator)', torrent.group(0)) is None:
if mode is not 'RSS':
logger.log(u"Found result %s but that doesn't seem like a trusted result so I'm ignoring it" % title, logger.DEBUG)
continue
item = title, download_url, size, seeders, leechers
if mode is not 'RSS':
logger.log(u"Found result: %s " % title, logger.DEBUG)
items[mode].append(item)
# For each search mode sort all the items by seeders if available
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def _convertSize(self, size):
size, modifier = size.split(' ')
size = float(size)
if modifier in 'KiB':
size = size * 1024
elif modifier in 'MiB':
size = size * 1024**2
elif modifier in 'GiB':
size = size * 1024**3
elif modifier in 'TiB':
size = size * 1024**4
return size
def seedRatio(self):
return self.ratio
class ThePirateBayCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
# only poll ThePirateBay every 30 minutes max
self.minTime = 30
def _getRSSData(self):
search_params = {'RSS': ['']}
return {'entries': self.provider._doSearch(search_params)}
provider = ThePirateBayProvider()
| srluge/SickRage | sickbeard/providers/thepiratebay.py | Python | gpl-3.0 | 5,662 |
#!/usr/bin/env python
# Demonstration GET geo/reverse_geocode
# See https://dev.twitter.com/rest/reference/get/geo/reverse_geocode
from secret import twitter_instance
from json import dump
import sys
tw = twitter_instance()
# [1]
response = tw.geo.reverse_geocode(lat=35.696805, long=139.773828)
# [2]
dump(response, sys.stdout, ensure_ascii=False, indent=4, sort_keys=True)
| showa-yojyo/notebook | source/_sample/ptt/geo-reverse_geocode.py | Python | mit | 380 |
# -*- coding: utf-8 -*-
from django.shortcuts import render
from url_obfuscate.decorators import deobfuscate
from url_obfuscate.helpers import obfuscate
def home(request):
links = list()
for i in range(10):
links.append(obfuscate('Name %d' % (i+1)))
return render(request, 'index.html', { 'links': links })
@deobfuscate
def obfuscated_link(request, name):
return render(request, 'obfuscate_result.html', { 'name': name })
@deobfuscate
def optional_param(request, param=None):
return render(request, 'optional_param.html', { 'param': param })
| lmanzurv/url_ofbuscate | tests/views.py | Python | apache-2.0 | 573 |
import os
from unittest import TestCase, main
from gruffy import Pie
TARGET_FILE = 'test.png'
class TestPie(TestCase):
def tearDown(self):
os.remove(TARGET_FILE)
def test_writable(self):
g = Pie()
g.data("test1", [1, 2, 3])
g.data("test2", [3, 2, 1])
g.write(TARGET_FILE)
if __name__ == '__main__':
main()
| hhatto/gruffy | test/test_pie.py | Python | mit | 364 |
from datetime import datetime, timedelta
import calendar
DAY_NAMES = [x.lower() for x in calendar.day_name[6:] + calendar.day_name[:6]]
DAY_ABBRS = [x.lower() for x in calendar.day_abbr[6:] + calendar.day_abbr[:6]]
# Choice tuples, mainly designed to use with Django
MINUTE_CHOICES = [(str(x), str(x)) for x in range(0, 60)]
HOUR_CHOICES = [(str(x), str(x)) for x in range(0, 24)]
DOM_CHOICES = [(str(x), str(x)) for x in range(1, 32)]
MONTH_CHOICES = [(str(x), calendar.month_name[x]) for x in range(1, 13)]
DOW_CHOICES = [(str(i), day_name) for i, day_name in enumerate(DAY_NAMES)]
def _to_int(value, allow_daynames=False):
"""
Converts a value to an integer. If allow_daynames is True, it will convert day of week to an integer 0 through 6.
@input:
value = value to convert to integer
allow_daynames = True, to allow values like Mon or Monday
@output: value as an integer
"""
if isinstance(value, int) or (isinstance(value, str) and value.isnumeric()):
return int(value)
elif isinstance(value, str) and allow_daynames and value in DAY_NAMES:
return DAY_NAMES.index(value)
elif isinstance(value, str) and allow_daynames and value in DAY_ABBRS:
return DAY_ABBRS.index(value)
raise ValueError('Failed to parse string to integer')
def _parse_arg(value, target, allow_daynames=False):
value = value.strip()
if value == '*':
return True
values = filter(None, [x.strip() for x in value.split(',')])
for value in values:
try:
# First, try a direct comparison
if _to_int(value, allow_daynames=allow_daynames) == target:
return True
except ValueError:
pass
if '-' in value:
step = 1
if '/' in value:
# Allow divider in values, see issue #14
try:
start, tmp = [
x.strip()
for x in value.split('-')
]
start = _to_int(start)
end, step = [
_to_int(x.strip(), allow_daynames=allow_daynames)
for x in tmp.split('/')
]
except ValueError:
continue
else:
try:
start, end = [
_to_int(x.strip(), allow_daynames=allow_daynames)
for x in value.split('-')
]
except ValueError:
continue
# If target value is in the range, it matches
if target in range(start, end + 1, step):
return True
# Special cases, where the day names are more or less incorrectly set...
if allow_daynames and start > end:
return target in range(start, end + 6 + 1)
if '/' in value:
v, interval = [x.strip() for x in value.split('/')]
# Not sure if applicable for every situation, but just to make sure...
if v != '*':
continue
# If the remainder is zero, this matches
if target % _to_int(interval, allow_daynames=allow_daynames) == 0:
return True
return False
def is_now(s, dt=None):
'''
A very simple cron-like parser to determine, if (cron-like) string is valid for this date and time.
@input:
s = cron-like string (minute, hour, day of month, month, day of week)
dt = datetime to use as reference time, defaults to now
@output: boolean of result
'''
if dt is None:
dt = datetime.now()
minute, hour, dom, month, dow = s.split(' ')
weekday = dt.isoweekday()
return _parse_arg(minute, dt.minute) \
and _parse_arg(hour, dt.hour) \
and _parse_arg(dom, dt.day) \
and _parse_arg(month, dt.month) \
and _parse_arg(dow, 0 if weekday == 7 else weekday, True)
def has_been(s, since, dt=None):
'''
A parser to check whether a (cron-like) string has been true during a certain time period.
Useful for applications which cannot check every minute or need to catch up during a restart.
@input:
s = cron-like string (minute, hour, day of month, month, day of week)
since = datetime to use as reference time for start of period
dt = datetime to use as reference time for end of period, defaults to now
@output: boolean of result
'''
if dt is None:
dt = datetime.now(tz=since.tzinfo)
if dt < since:
raise ValueError("The since datetime must be before the current datetime.")
while since <= dt:
if is_now(s, since):
return True
since += timedelta(minutes=1)
return False
| kipe/pycron | pycron/__init__.py | Python | mit | 4,831 |
#!/usr/bin/env python
#
# Copyright (c) 2010, ZX. All rights reserved.
# Copyright (c) 2016, Capitar. All rights reserved.
#
# Released under the MIT license. See LICENSE file for details.
#
import netaddr
import netifaces
import socket
import os
import os.path
import re
import sys
import subprocess
import time
__version__ = (2, 9, 6)
class FWMacroException(Exception):
"""Base exception for fwmacro"""
pass
class FWSyntaxError(FWMacroException):
"""Basic syntax error"""
def __init__(self, lineno, msg):
self.lineno = lineno
self.msg = msg
def __str__(self):
return "{} at line {}".format(self.msg, self.lineno)
class FWUndefinedError(FWMacroException):
"""Undefined name"""
def __init__(self, lineno, name, entity=None):
self.lineno = lineno
self.name = name
self.entity = entity
def __str__(self):
entity = "{}".format(self.entity) if self.entity else ""
return "Undefined {} {} at line {}".format(
entity, self.name, self.lineno)
class FWRedefinitionError(FWMacroException):
"""Redefinition detected"""
def __init__(self, lineno, name, entity=None, reflineno=None):
self.lineno = lineno
self.name = name
self.entity = entity
self.reflineno = reflineno
def __str__(self):
entity = " {}".format(self.entity) if self.entity else ""
if self.reflineno:
refline = " (defined at line {})".format(self.reflineno)
else:
refline = ""
return "Redefinition of {}{} at line {}{}".format(
entity, self.name, self.lineno, refline)
class FWRecursionError(FWMacroException):
"""Recursion detected in resolving names"""
def __init__(self, lineno, name, *groups):
self.lineno = lineno
self.name = name
self.groups = groups
def __str__(self):
groups = list(self.args)
lines = []
for group in groups:
lines.append("{}: {}".format(group.name, group.lineno))
return (
"Recursion detected in group definition for group {}: {}"
).format(self.name, ", ".join(lines))
class FWResolveError(FWMacroException):
"""Resolve error for hostnames"""
def __init__(self, lineno, name, msg):
self.lineno = lineno
self.name = name
self.msg = msg
def __str__(self):
msg = ": {}".format(self.msg) if self.msg else ""
return "Cannot resolve {}{}".format(self.name, msg)
class FWIPMaskBoundaryError(FWMacroException):
"""IP not on lower mask boundary"""
def __init__(self, lineno, ip):
self.lineno = lineno
self.ip = ip
def __str__(self):
return "IP is not on mask boundary ({}) at line {}".format(
self.ip, self.lineno)
class FWInvalidTable(FWMacroException):
"""Invalid table name"""
def __init__(self, lineno, name):
self.lineno = lineno
self.name = name
def __str__(self):
return "Invalid table name {}".format(self.name)
class FWInvalidParentChain(FWMacroException):
"""Invalid parent chain"""
def __init__(self, lineno, table, chain):
self.lineno = lineno
self.table = table
self.chain = chain
def __str__(self):
return "Invalid parent chain {} in table {}".format(
self.chain,
self.table,
)
class FWReservedChainName(FWMacroException):
"""Reserved name used or a chain definition"""
def __init__(self, lineno, fname, chain):
self.lineno = lineno
self.fname = fname
self.chain = chain
def __str__(self):
return "Reserved chain name {} at {}:{}".format(
self.chain,
self.fname,
self.lineno,
)
class FWOrderConflict(FWMacroException):
def __init__(self, lineno, fname, chain, origdef):
self.lineno = lineno
self.fname = fname
self.chain = chain
self.origdef = origdef
def __str__(self):
return (
"Order redefinition of chain {} at {}:{} "
"(first defined at {}:{})"
).format(
self.chain,
self.fname,
self.lineno,
self.origdef[1],
self.origdef[2],
)
class FWIndentError(FWMacroException):
def __init__(self, lineno, direction):
self.lineno = lineno
assert direction in [None, '-', '+']
self.direction = direction
def __str__(self):
if self.direction == '-':
msg = "Dedent error at line {}"
elif self.direction == '+':
msg = "Indent error at line {}"
else:
msg = "Indentation error at line {}"
return msg.format(self.lineno)
class FWInvalidName(FWMacroException):
def __init__(self, lineno, name):
self.lineno = lineno
self.name = name
def __str__(self):
return "Invalid name {} at line {}".format(self.name, self.lineno)
class FWInvalidIP(FWMacroException):
def __init__(self, lineno, address):
self.lineno = lineno
self.address = address
def __str__(self):
return "Invalid IP address {} at line {}".format(
self.address, self.lineno)
class FWInvalidPort(FWMacroException):
def __init__(self, lineno, port):
self.lineno = lineno
self.port = port
def __str__(self):
return "Invalid port {} at line {}".format(
self.port, self.lineno)
class FWExpectedError(FWMacroException):
def __init__(self, lineno, text, expected=None):
self.lineno = lineno
self.text = text
self.expected = expected
def __str__(self):
if not self.expected:
return "Unexpected {} at line {}".format(self.text, self.lineno)
if isinstance(self.expected, str):
expected = self.expected
elif len(self.expected) == 1:
expected = self.expected
else:
expected = "one of {}".format(", ".join([
i for i in self.expected if i is not None]))
return "Expected {} but got {} at line {}".format(
expected, self.text, self.lineno)
class FWGroupNameRequired(FWMacroException):
def __init__(self, lineno, text):
self.lineno = lineno
self.text = text
def __str__(self):
return "Group names are required. Got {} at line {}".format(
self.text, self.lineno)
class Chain(object):
def __init__(self, lineno, name, condition=None):
self.lineno = lineno
self.name = name
self.condition = condition
def __str__(self):
return "line: {}, name: {}, condition: {}".format(
self.lineno, self.local, self.direction, self.action,
)
def __repr__(self):
return "<Chain {}>".format(self.__str__())
class Rule(object):
"""Represention of a input (FWPreprocess) rule"""
def __init__(self, lineno, local, direction, chainname=None):
self.lineno = lineno
self.local = local
self.direction = direction
self.action = None
self.protocol = None
self.icmp4 = []
self.icmp6 = []
self.state = "NEW"
self.tcpflags = []
self.sources = []
self.srcports = []
self.destinations = []
self.dstports = []
self.logging = ""
self.logname = ""
self.nat = ""
self.natports = ""
self._chainname = chainname
def __str__(self):
return "line: {}, local: {}, direction: {}, action: {}".format(
self.lineno, self.local, self.direction, self.action,
)
def __repr__(self):
return "<Rule {}>".format(self.__str__())
def chainname(self, chainnr, chainname, iface):
if self._chainname:
return self._chainname
if self.local:
direction_char = self.direction[0].upper()
else:
direction_char = self.direction[0].lower()
chainname = "{}{}:{}".format(direction_char, iface, chainname)
if len(chainname) >= 30:
chainname = "{}{}:{}{}".format(
iface, direction_char, chainnr, chainname)
chainname = chainname[:29]
# self._chainname = chainname
return chainname
class Token(object):
def __init__(self, lineno, indent, text, quote=None):
self.lineno = lineno
self.indent = indent
self.text = text
self.quote = quote
def __str__(self):
return "{}:{}:{}".format(self.lineno, self.indent, self.text)
def __repr__(self):
return "Token({}, {}, {})".format(
self.lineno, self.indent, self.text)
class Group(list):
"""Group object containing groups/hostnames/ips"""
cached = {}
def __init__(self, name, lineno):
self.name = name
self.lineno = lineno
self.resolved = None
self.ipv4 = []
self.ipv6 = []
self.referred_lines = []
def resolve(self):
if self.name not in self.cached:
self.cached[self.name] = self
if self.resolved is False:
raise FWRecursionError(self.lineno, self.name, [self])
if self.resolved is None:
self.resolved = False
for obj in self:
ipv4, ipv6 = [], []
if isinstance(obj, Group):
try:
ipv4, ipv6 = obj.resolve()
except FWRecursionError as e:
raise FWRecursionError(
self.lineno, self.name, e.groups + [self])
else:
assert isinstance(obj, netaddr.IPNetwork), obj
if obj.version == 4:
ipv4 = [obj]
else:
ipv6 = [obj]
for ip in ipv4:
if ip not in self.ipv4:
if (
isinstance(ip, netaddr.IPNetwork) and
ip.network != ip.ip
):
raise FWIPMaskBoundaryError(ip, self.lineno)
self.ipv4.append(ip)
for ip in ipv6:
if ip not in self.ipv6:
if (
isinstance(ip, netaddr.IPNetwork) and
ip.network != ip.ip
):
raise FWIPMaskBoundaryError(ip, self.lineno)
self.ipv6.append(ip)
self.resolved = True
self.ipv4.sort()
self.ipv6.sort()
return self.ipv4, self.ipv6
def ips(self):
assert self.resolved is not None
return self.ipv4 + self.ipv6
class Hostname(Group):
def resolve(self):
if self.name not in self.cached:
self.cached[self.name] = self
if self.resolved is None:
self.resolved = False
try:
ainfos = socket.getaddrinfo(self.name, None)
except socket.gaierror as why:
raise FWResolveError(self.name, why[1], self.lineno)
except:
raise FWResolveError(self.name, None, self.lineno)
for ainfo in ainfos:
ip = netaddr.IPAddress(ainfo[4][0])
ip = netaddr.IPNetwork(ip)
if ip.version == 4:
if ip not in self.ipv4:
self.ipv4.append(ip)
else:
if ip not in self.ipv6:
self.ipv6.append(ip)
self.resolved = True
self.ipv4.sort()
self.ipv6.sort()
return self.ipv4, self.ipv6
class FWMacro(object):
basedir = "/etc/fwmacro"
chainsdir_ipv4 = "chains4"
chainsdir_ipv6 = "chains6"
chainsfile_ipv4 = "ipv4.rules"
chainsfile_ipv6 = "ipv6.rules"
# chainsfile_ipv6 = os.path.join(basedir, "ipv6.rules")
logtag = "%(iface)s-%(chainname)s-%(lineno)s-%(action)s"
rule_explanation = """\
DIRECTION ACTION [STATES] PROTOCOL OPTIONS SOURCE DESTINATION LOG [LOGLEVEL] \
[LOGNAME]
DIRECTION := ["local"] "in" | "out"
ACTION := "permit" | "deny" | "snat" NATARGS | "dnat" NATARGS | "masq"
STATES := "NONE" | STATE[,STATE ...]
STATE := "NEW" | "ESTABLISHED" | "RELATED" | "INVALID"
PROTOCOL := "ip" | "all" | "tcp" | "udp" | "icmp" | number | `/etc/protocol`
DESTINATION := SOURCE
ADDR := group | fqdn-hostname | ip/mask | "any"
PORT := number | "all"
LOG := log [syslog_level]
NATARGS := ip[-ip] [port[-port]]
protocol ip, all, number:
SOURCE := SRC
OPTIONS :=
protocol icmp:
SOURCE := SRC
OPTIONS := [number[/code]|icmp-option]
protocol tcp:
SOURCE := ADDR PORT
OPTIONS := [ "syn" | "flags" [!] FMASK FCOMP ]
FMASK := TCPFLAGS
FCOMP := TCPFLAGS
TCPFLAGS := "ALL"|TCPFLAG[,TCPFLAG ...]
TCPFLAG := "SYN"|"ACK"|"FIN"|"RST"|"URG"|"PSH"|"ALL"
protocol udp:
SOURCE := ADDR PORT
OPTIONS :=
"""
default_rule = """\
Default tcp state: NEW
"""
iptables_cmd = 'iptables'
ip6tables_cmd = 'ip6tables'
default_icmp4options = '''
# See iptables -p icmp -h
Valid ICMP Types:
any
echo-reply (pong)
destination-unreachable
network-unreachable
host-unreachable
protocol-unreachable
port-unreachable
fragmentation-needed
source-route-failed
network-unknown
host-unknown
network-prohibited
host-prohibited
TOS-network-unreachable
TOS-host-unreachable
communication-prohibited
host-precedence-violation
precedence-cutoff
source-quench
redirect
network-redirect
host-redirect
TOS-network-redirect
TOS-host-redirect
echo-request (ping)
router-advertisement
router-solicitation
time-exceeded (ttl-exceeded)
ttl-zero-during-transit
ttl-zero-during-reassembly
parameter-problem
ip-header-bad
required-option-missing
timestamp-request
timestamp-reply
address-mask-request
address-mask-reply
'''
default_icmp6options = '''
# See ip6tables -p icmpv6 -h
Valid ICMPv6 Types:
destination-unreachable
no-route
communication-prohibited
beyond-scope
address-unreachable
port-unreachable
failed-policy
reject-route
packet-too-big
time-exceeded (ttl-exceeded)
ttl-zero-during-transit
ttl-zero-during-reassembly
parameter-problem
bad-header
unknown-header-type
unknown-option
echo-request (ping)
echo-reply (pong)
router-solicitation
router-advertisement
neighbour-solicitation (neighbor-solicitation)
neighbour-advertisement (neighbor-advertisement)
redirect
'''
re_name = re.compile('[a-zA-Z0-9_]+')
reserved_words = [
"group", "interface", "ruleset",
"local", "in", "out", "permit", "deny", "snat", "dnat", "masq",
"ip", "tcp", "udp", "icmp", "any", "all",
"NONE", "ESTABLISHED", "NEW", "RELATED", "INVALID",
"ALL", "SYN", "ACK", "FIN", "RST", "URG", "PSH", "ALL", "syn", "flags",
]
logging_levels = [
"debug", "info", "notice", "warning",
"err", "crit", "alert", "emerg",
]
def __init__(self):
self.n_errors = 0
self.n_warnings = 0
self.interfaces = list([
i
for i in netifaces.interfaces()
if (
netifaces.AF_LINK in netifaces.ifaddresses(i) and
(
netifaces.AF_INET in netifaces.ifaddresses(i) or
netifaces.AF_INET6 in netifaces.ifaddresses(i)
)
)
])
def warning(self, msg):
self.n_warnings += 1
sys.stderr.write("Warning: {}\n".format(msg))
def error(self, msg):
self.n_errors += 1
sys.stderr.write("Error: {}\n".format(msg))
def exec_cmd(self, cmd):
stdoutdata = None
try:
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as err:
self.warning("Cannot execute {}: {}".format(cmd[0], err))
else:
stdoutdata, stderrdata = p.communicate()
if stderrdata:
self.warning("Error while executing {}: {}".format(
' '.join(cmd), stderrdata))
stdoutdata = None
return stdoutdata
def parse_icmp_options(self, cmd, default_stdout):
stdoutdata = self.exec_cmd(cmd) or default_stdout
if isinstance(stdoutdata, bytes):
stdoutdata = stdoutdata.decode()
re_valid_types = re.compile('Valid ICMP(v6)? Types:')
options = []
in_option_help = False
for line in stdoutdata.split('\n'):
if not in_option_help:
if re_valid_types.match(line):
in_option_help = True
continue
line = line.split('#')[0]
if not line.strip():
continue
words = line.strip().split()
assert len(words) < 3, line
options.append(words[0])
for word in words[1:]:
assert word[:1] == '(' and word[-1:] == ')', word
options.append(word[1:-1])
if 'any' in options:
options.remove('any')
return options
def get_icmp4options(self):
iptables_cmd = '/sbin/{0}'.format(self.iptables_cmd)
if not os.path.exists(iptables_cmd):
iptables_cmd = self.iptables_cmd
cmd = [iptables_cmd, '-p', 'icmp', '-h']
self.icmp4_options = self.parse_icmp_options(
cmd, self.default_icmp4options)
return self.icmp4_options
def get_icmp6options(self):
ip6tables_cmd = '/sbin/{0}'.format(self.ip6tables_cmd)
if not os.path.exists(ip6tables_cmd):
ip6tables_cmd = self.ip6tables_cmd
cmd = [ip6tables_cmd, '-p', 'icmpv6', '-h']
self.icmp6_options = self.parse_icmp_options(
cmd, self.default_icmp6options)
return self.icmp6_options
def get_protocols(self):
protocols = [
'all', 'tcp', 'udp', 'udplite', 'icmp', 'icmpv4', 'icmpv6',
'esp', 'ah', 'sctp', 'mh',
]
with open('/etc/protocols', 'r') as fp:
for line in fp:
line = line.split('#')[0].strip()
if not line:
continue
words = line.split()
assert len(words) > 1, line
assert int(words[1]) >= 0, line
if not words[0] in protocols:
protocols.append(words[0])
for word in words[2:]:
if word not in protocols:
protocols.append(word)
return protocols
def get_services(self):
services = {}
with open('/etc/services', 'r') as fp:
for line in fp:
line = line.split('#')[0].strip()
if not line:
continue
words = line.split()
assert len(words) > 1, line
number = int(words[1].split('/')[0])
assert number >= 0, line
if not words[0] in services:
services[words[0]] = number
for word in words[2:]:
if word not in services:
services[word] = number
sorted_services = list([
i[1] for i in sorted([(len(key), key) for key in services.keys()])
])
sorted_services.reverse()
return services, sorted_services
class FWPreprocess(FWMacro):
re_indent = re.compile('^(\s*)\S.*$')
# re_tokens = re.compile('''((?P<q>['"]).*?(?P=q))|(\w[\w.]*)|(\W)''')
re_tokens = re.compile(
'''((?P<q>['"]).*?(?P=q))|'''
'''([A-Fa-f0-9]*:[A-Fa-f0-9]*:[A-Fa-f0-9:]*)|'''
'''(\d+[.]\d+[.]\d+[.]\d+)|'''
'''(\d+(?!\w))|'''
'''(\w[\w.-]*)|'''
'''(\W)'''
)
def __init__(self):
super(FWPreprocess, self).__init__()
self.icmp4options = self.get_icmp4options()
self.icmp6options = self.get_icmp6options()
self.protocols = self.get_protocols()
self.services, self.sorted_services = self.get_services()
self.token_stack = []
self.lineno = 0
self.groups = {}
self.rulesets = {}
self.ifaces = {}
self.addrinfos = {}
group = Group("any", 0)
self.any_ipv4 = netaddr.IPNetwork("0.0.0.0/0")
self.any_ipv6 = netaddr.IPNetwork("::/0")
group.append(self.any_ipv4)
group.append(self.any_ipv6)
self.groups["any"] = group
self.force_groups = False
self.allow_mixed_ipv = True
self.default_log_level = 'warning'
def get_token(self, expect_text=False, expect_indent=None,
expect_lineno=None):
token = self._get_token(expect_text, expect_indent, expect_lineno)
return token
def _get_token(self, expect_text=False, expect_indent=None,
expect_lineno=None):
if self.token_stack:
token = self.token_stack.pop()
if expect_text is False:
return token
if expect_indent is not None and expect_indent != token.indent:
if token.indent < expect_indent:
raise FWIndentError(token.lineno, "-")
raise FWIndentError(token.lineno, "+")
if expect_lineno and token.lineno != expect_lineno:
raise FWExpectedError(token.lineno, 'EOL')
if expect_text is True:
# Just expect something
return token
text = token.text
if isinstance(expect_text, str):
if text != expect_text:
raise FWExpectedError(token.lineno, text, expect_text)
return token
if text not in expect_text:
raise FWExpectedError(token.lineno, text, expect_text)
return token
line = self.fp.readline()
if not line:
if (
expect_text is False or
isinstance(expect_text, list) and None in expect_text
):
return None
raise FWExpectedError(self.lineno, 'EOF', expect_text)
self.lineno += 1
line = line.split('#')[0]
if not line.strip():
return self._get_token(expect_text, expect_indent)
try:
indent = self.re_indent.match(line).group(1)
except:
self.error("line {} {}: Invalid".format(self.lineno, line))
indent.replace('\t', ' ')
indent = len(indent)
for match in self.re_tokens.findall(line):
strng, quote, ipv6, ipv4, number, text, char = match
if quote:
token = Token(self.lineno, indent, strng[1:-1], quote)
elif ipv6:
token = Token(self.lineno, indent, ipv6)
elif ipv4:
token = Token(self.lineno, indent, ipv4)
elif number:
token = Token(self.lineno, indent, number)
elif text:
token = Token(self.lineno, indent, text)
elif char.strip():
token = Token(self.lineno, indent, char)
else:
continue
self.token_stack.insert(0, token)
return self._get_token(expect_text, expect_indent)
def push_token(self, token):
if token is not None:
self.token_stack.append(token)
def skip_line(self, lineno):
token = self.get_token()
while token is not None and token.lineno == lineno:
token = self.get_token()
self.push_token(token)
def skip_lines(self, indent):
token = self.get_token()
while token is not None and token.indent == indent:
token = self.get_token()
self.push_token(token)
def lookup_service(self, text):
for srvc in self.sorted_services:
if text.startswith(srvc) and text[len(srvc)] == '-':
return srvc, text[len(srvc):]
return None, None
def get_port(self, token):
if token.text == 'all' or token.text in self.services:
port = token.text
else:
try:
port = str(int(token.text))
except:
raise FWInvalidPort(token.lineno, token.text)
return port
def get_ports(self, token):
indent = token.indent
lineno = token.lineno
try:
ports = [self.get_port(token)]
except FWInvalidPort:
return []
while True:
token = self.get_token()
if token is None:
return ports
if token.lineno != lineno:
break
if token.text == '-':
token = self.get_token(True, indent, lineno)
port = self.get_port(token)
if ports[-1] == 'all' or port == 'all':
raise FWSyntaxError("Cannot use 'all' in port range")
ports[-1] = "{}-{}".format(ports[-1], port)
elif token.text == ',':
token = self.get_token(True, indent, lineno)
ports.append(self.get_port(token))
else:
break
if len(ports) > 1 and 'all' in ports:
raise FWSyntaxError(
lineno, "Cannot use 'all' with other ports")
self.push_token(token)
return ports
def get_port_range(self, token):
try:
port_range = [self.get_port(token)]
except FWInvalidPort:
return []
token = self.get_token()
if token is None:
return port_range
if token.lineno != token.lineno or token.text != '-':
self.push_token(token)
return port_range
token = self.get_token(True, token.indent, token.lineno)
port_range.append(self.get_port(token))
return port_range
def get_name_ip_net(self, token, names_only=False):
try:
ip = netaddr.IPNetwork(token.text)
if names_only:
raise FWGroupNameRequired(token.lineno, token.text)
except:
if token.text != 'any' and token.text in self.reserved_words:
raise FWSyntaxError(
token.lineno,
"Reserved word {}".format(token.text))
return token.text
next_token = self.get_token()
if (
not next_token or
next_token.lineno != token.lineno or
next_token.text != '/'
):
self.push_token(next_token)
else:
mask_token = self.get_token(True, token.indent, token.lineno)
try:
ip = netaddr.IPNetwork("{}/{}".format(
token.text, mask_token.text))
except:
self.push_token(mask_token)
self.push_token(next_token)
return ip
def get_name_ip_net_list(self, start_token, names_only=False):
indent = start_token.indent
lineno = start_token.lineno
entries = [self.get_name_ip_net(start_token, names_only)]
token = self.get_token()
while token and token.text == ',' and token.lineno == lineno:
entries.append(
self.get_name_ip_net(
self.get_token(True, indent, lineno),
names_only,
)
)
token = self.get_token()
self.push_token(token)
return entries
def parse(self, fp):
self.fp = fp
self.lineno = 0
while True:
token = self.get_token(
['group', 'ruleset', 'interface', None], 0)
if token is None:
break
try:
if token.text == 'group':
self.handle_group(token)
elif token.text == 'ruleset':
self.handle_ruleset_def(token)
elif token.text == 'interface':
self.handle_interface(token)
except FWMacroException as e:
self.error(e)
self.skip_line(token.lineno)
token = self.get_token()
if token.indent == 0:
self.push_token(token)
else:
self.skip_lines(token.indent)
unreferenced_ifaces = list([
i
for i in self.interfaces
if (
netifaces.AF_INET in netifaces.ifaddresses(i) or
netifaces.AF_INET6 in netifaces.ifaddresses(i)
)
])
for name in self.ifaces:
if name in unreferenced_ifaces:
unreferenced_ifaces.remove(name)
elif name[-1] == '+':
for i in unreferenced_ifaces[:]:
if i.startswith(name[:-1]):
unreferenced_ifaces.remove(i)
if unreferenced_ifaces:
self.warning("Unreferenced interfaces: {}".format(
", ".join(unreferenced_ifaces)))
def handle_group(self, start_token):
token = self.get_token(True)
name = token.text
self.get_token(':', token.indent, token.lineno)
if not self.re_name.match(name):
raise FWInvalidName(token.lineno, name)
if name not in self.groups:
self.groups[name] = group = Group(name, start_token.lineno)
else:
group = self.groups[name]
if group.lineno is None:
group.lineno = token.lineno
else:
raise FWRedefinitionError(token.lineno, name, group.lineno)
token = self.get_token(True)
indent = token.indent
if indent <= start_token.indent:
raise FWIndentError(token.lineno, '+')
while token is not None and token.indent == indent:
text = self.get_name_ip_net(token)
if not isinstance(text, str):
group.append(text)
else:
if text not in self.groups:
# Forward reference to a group
if '.' in text:
# Hostname (to be resolved later)
self.groups[text] = Hostname(text, None)
else:
self.groups[text] = Group(text, None)
self.groups[text].referred_lines.append(token.lineno)
group.append(self.groups[text])
next_token = self.get_token()
if token.lineno == next_token.lineno:
raise FWExpectedError(token.lineno, next_token.text, 'EOL')
token = next_token
if token.indent != start_token.indent:
raise FWIndentError(token.lineno, None)
if token is not None:
self.push_token(token)
def handle_ruleset_def(self, start_token):
token = self.get_token(True)
name = token.text
if name in self.rulesets:
raise FWRedefinitionError(
token.lineno, name, 'ruleset')
self.get_token(':', token.indent, token.lineno)
rules = self.handle_rules(start_token)
self.rulesets[name] = rules
def handle_interface(self, start_token):
token = self.get_token(True)
name = token.text
if name in self.ifaces:
raise FWRedefinitionError(
token.lineno, name, 'interface')
if name not in self.interfaces:
matched = False
if name[-1] == '+':
for i in self.interfaces:
if i.startswith(name[:-1]):
matched = True
break
if not matched:
self.warning("No matching interfaces for {}".format(name))
self.get_token(':', token.indent, token.lineno)
rules = self.handle_rules(start_token)
self.ifaces[name] = rules
def handle_rules(self, start_token):
rules = []
token = self.get_token(True)
indent = token.indent
if indent <= start_token.indent:
raise FWIndentError(token.lineno, None)
while token is not None and token.indent == indent:
try:
rule = self.handle_rule(token)
if isinstance(rule, list):
rules.extend(rule)
else:
rules.append(rule)
except FWMacroException as e:
self.error(e)
# Skip current line
self.skip_line(token.lineno)
token = self.get_token()
if token is not None:
if token.indent != start_token.indent:
raise FWIndentError(token.lineno, None)
self.push_token(token)
return rules
def handle_rule(self, start_token):
lineno = start_token.lineno
indent = start_token.indent
def get_nat_ip(token):
text = token.text
if text in self.groups:
# test if it is a group and make sure it only contains one IP
if len(self.groups[text]) != 1:
raise FWSyntaxError(
token.lineno, "NAT ip group can only have 1 item")
else:
self.groups[text].resolve()
text = str(self.groups[text].ips()[0]).split('/')[0]
elif self.force_groups:
raise FWGroupNameRequired(token.lineno, text)
else:
# Handle ip address
try:
text = str(netaddr.IPAddress(text))
except:
raise FWInvalidIP(token.lineno, text)
return text
# Until we have a destination, all self.get_token should return
# in a token on the same line. So, at least arguments:
# True, indent, lineno
token = start_token
expect = ['in', 'out']
is_local = False
if token.text == 'ruleset':
token = self.get_token(True, indent, lineno)
if token.text not in self.rulesets:
raise FWUndefinedError(
lineno, token.text, "ruleset")
return self.rulesets[token.text]
elif token.text == 'local':
is_local = True
token = self.get_token(expect, indent, lineno)
elif token.text not in expect:
raise FWExpectedError(start_token.lineno, token.text, expect)
rule = Rule(lineno, is_local, token.text)
token = self.get_token(
["permit", "deny", "snat", "dnat", "masq"], indent, lineno)
rule.action = token.text
token = self.get_token(True, indent, lineno)
if rule.action in ["snat", "dnat"]:
# Read NATARGS
# NATARGS := ip[-ip] [port[-port]]
nat_ip = [get_nat_ip(token)]
token = self.get_token(True, indent, lineno)
if token.text != '-':
rule.nat = nat_ip[0]
else:
token = self.get_token(True, indent, lineno)
nat_ip.append(get_nat_ip(token))
rule.nat = '-'.join(nat_ip)
token = self.get_token(True, indent, lineno)
rule.natports = '-'.join([
str(i) for i in self.get_port_range(token)])
if rule.natports:
token = self.get_token(True, indent, lineno)
# STATES := "NONE" | STATE[,STATE ...]
# STATE := "NEW" | "ESTABLISHED" | "RELATED" | "INVALID"
# Default is NEW
states = []
while (
token.text in [
"NONE", "NEW", "ESTABLISHED", "RELATED", "INVALID",
]
):
if token.lineno != lineno:
self.push_token(token)
raise FWExpectedError(lineno, 'EOL')
text = "" if token.text == "NONE" else token.text
if text not in states:
states.append(text)
token = self.get_token(True, indent, lineno)
if states:
if len(states) > 1 and "" in states:
FWSyntaxError("Cannot mix state NONE with other states")
rule.state = ",".join(states)
# PROTOCOL := "ip" | "all" | "tcp" | "udp" | "icmp" | number |
# `/etc/protocol`
invert = ''
if token.text == '!':
invert = '!'
token = self.get_token(True, indent, lineno)
try:
proto = int(token.text)
except:
if (
token.text in ["ip", "all", "tcp", "udp", "icmp", ] or
token.text in self.protocols
):
proto = token.text
else:
raise FWExpectedError(lineno, token.text, 'protocol')
if proto in ["ip", "all", 0]:
if invert:
raise FWSyntaxError(
"Cannot invert protocol {}".format(proto)
)
proto = "all"
rule.protocol = "{}{}".format(invert, proto)
token = self.get_token(True, indent, lineno)
if rule.action in ["dnat"]:
if proto in ["tcp", "udp"]:
if not rule.natports:
raise FWSyntaxError(
"Specific ports needed in nat definition "
"(when using tcp or udp match condition)"
)
elif rule.natports:
raise FWSyntaxError(
"Ports not used in nat definition "
"(use tcp or udp match condition)"
)
# Get proto options
if proto == 'icmp':
icmp4 = []
icmp6 = []
while True:
is_option = False
if token.text in self.icmp4_options:
icmp4.append(token.text)
is_option = True
if token.text in self.icmp6_options:
icmp6.append(token.text)
is_option = True
if not is_option:
break
token = self.get_token(True, indent, lineno)
if token.text == ',':
token = self.get_token(True, indent, lineno)
else:
break
rule.icmp4 = icmp4
rule.icmp6 = icmp6
elif proto == 'tcp':
# OPTIONS := [ "syn" | "flags" [!] FMASK FCOMP ]
# FMASK := TCPFLAGS
# FCOMP := TCPFLAGS
# TCPFLAGS := "ALL"|TCPFLAG[,TCPFLAG ...]
# TCPFLAG := "SYN"|"ACK"|"FIN"|"RST"|"URG"|"PSH"|"ALL"
tcp_fmask = []
tcp_fcomp = []
if token.text == 'syn':
rule.tcpflags = [token.text]
elif token.text == 'flags':
token = self.get_token(
['!', "ALL", "SYN", "ACK", "FIN", "RST", "URG", "PSH", ],
indent, lineno)
invert = False
if token.text == '!':
invert = True
token = self.get_token(
["ALL", "SYN", "ACK", "FIN", "RST", "URG", "PSH", ],
indent, lineno)
tcp_fmask.append(token.text)
token = self.get_token(True, indent, lineno)
while token.text == ',':
token = self.get_token(
["ALL", "SYN", "ACK", "FIN", "RST", "URG", "PSH", ],
indent, lineno)
tcp_fmask.append(token.text)
token = self.get_token(True, indent, lineno)
self.push_token(token)
token.text = ','
while token.text == ',':
token = self.get_token(
["ALL", "SYN", "ACK", "FIN", "RST", "URG", "PSH", ],
indent, lineno)
tcp_fcomp.append(token.text)
token = self.get_token(True, indent, lineno)
rule.tcpflags = [','.join(tcp_fmask), ','.join(tcp_fcomp)]
if invert:
rule.tcpflags[0] = '!{}'.format(rule.tcpflags[0])
# Now get the source
addrs = self.get_name_ip_net_list(token, self.force_groups)
if len(addrs) == 0:
raise FWExpectedError(lineno, token.text, 'source address')
if len(addrs) > 1 and 'any' in addrs:
raise FWSyntaxError(
lineno, "Cannot mix 'any' with other source addresses")
rule.sources = addrs
token = self.get_token(True, indent, lineno)
if proto in ["tcp", "udp"]:
# Get tcp/udp ports
rule.srcports = self.get_ports(token)
if not rule.srcports:
raise FWExpectedError(lineno, token.text, 'source port')
token = self.get_token(True, indent, lineno)
# Now get the destination
addrs = self.get_name_ip_net_list(token, self.force_groups)
if len(addrs) == 0:
raise FWExpectedError(lineno, token.text, 'destination address')
if len(addrs) > 1 and 'any' in addrs:
raise FWSyntaxError(
lineno, "Cannot mix 'any' with other destination addresses")
rule.destinations = addrs
# Now we have to be careful with get_token()
if proto in ["tcp", "udp"]:
token = self.get_token(True, indent, lineno)
# Get tcp/udp ports
rule.dstports = self.get_ports(token)
if not rule.dstports:
raise FWExpectedError(lineno, token.text, 'destination port')
token = self.get_token()
# Check for logging
if token and token.lineno == lineno and token.text == "log":
rule.logging = self.default_log_level
token = self.get_token()
if token and token.lineno == lineno:
if token.text in self.logging_levels:
rule.logging = token.text
token = self.get_token()
if (
token.lineno == lineno and
token.text not in self.logging_levels
):
rule.logname = token.text
token = self.get_token()
if token is not None:
if token.lineno == lineno:
raise FWExpectedError(lineno, token.text, 'EOL')
self.push_token(token)
return rule
def resolve_ip(self, targets, rule):
"""Resolve all targets to ip networks"""
all_ip4 = []
all_ip6 = []
all_ip = []
all_targets = []
for name in targets:
if name[0] == "!":
invert = "!"
name = name[1:]
self.error("Cannot invert addresses")
else:
invert = ""
ips = []
try:
ip = netaddr.IPNetwork(name)
if ip.network != ip.ip:
self.error(
FWIPMaskBoundaryError(ip, rule.lineno))
ips = [ip]
except netaddr.core.AddrFormatError as e:
if name in self.groups and \
self.groups[name].lineno is not None:
ips = self.groups[name].ips()
elif name.find(".") != -1:
hostname = Hostname(name, rule.lineno)
try:
hostname.resolve()
ips = hostname.ips()
except FWResolveError as e:
self.error(e)
else:
e = FWUndefinedError(rule.lineno, name, 'group')
self.error(e)
for ip in ips:
ipinfo = (ip.prefixlen, ip, invert)
if ipinfo not in all_ip:
all_ip.append((ip.prefixlen, ip, invert))
all_ip.sort()
all_ip.reverse()
last_ip = last_invert = None
for prefixlen, ip, invert in all_ip:
if last_ip is not None:
if last_ip == ip:
if last_invert != invert:
self.error(
"Conflicting definitions ({}, !{},{})"
"at line {}"
).format(ip, invert, last_invert, rule.lineno)
continue
last_ip = ip
last_invert = invert
for target_ip, target_invert in all_targets:
if (
target_ip.size != 1 and (
(target_ip[0] >= ip[0] and target_ip[0] <= ip[-1]) or
(target_ip[-1] >= ip[0] and target_ip[1] <= ip[-1]) or
(ip[0] >= target_ip[0] and ip[0] <= target_ip[-1]) or
(ip[-1] >= target_ip[0] and ip[-1] <= target_ip[-1])
)
):
self.warning(
"Overlapping ranges ({}, {}) at line {}".format(
ip, target_ip, rule.lineno))
all_targets.append((ip, invert))
if ip.version == 4:
all_ip4.append((invert, ip))
elif ip.version == 6:
all_ip6.append((invert, ip))
else:
self.error("Invalid ip version for {} at line {}".format(
ip, rule.lineno))
return all_ip4, all_ip6
def resolve_ports(self, ports, rule):
if len(ports) == 1:
if ports[0] == "all":
return [], [""]
all = []
for port in ports:
all += port.split(",")
ports = all
all = []
for port in ports:
if port[0] == "!":
invert = "!"
else:
invert = ""
if port.find("-") != -1:
p1, p2 = port.split("-")
p1 = int(p1)
p2 = int(p2)
all.append((p1, p2, invert))
else:
p = int(port)
all.append((p, p, invert))
all_raw = all
all_raw.sort()
all = []
while len(all_raw):
p1a = all_raw[0][0]
p2a = all_raw[0][1]
pia = all_raw[0][2]
del(all_raw[0])
while len(all_raw):
p1b = all_raw[0][0]
p2b = all_raw[0][1]
pib = all_raw[0][2]
if p1b <= p2a + 1:
if pia != pib:
self.error(
"Conflicting port definition at line {}".format(
rule.lineno))
break
if p2a < p2b:
p2a = p2b
del(all_raw[0])
continue
break
all.append((p1a, p2a, pia))
comma_ports = []
range_ports = []
for p1, p2, invert in all:
if invert and len(all) > 1:
self.error((
"Cannot use '!' on multiple port definitions "
"at line {}"
).format(rule.lineno))
return [], []
if p1 == p2:
if not invert:
if len(all) == 1:
range_ports.append("{}".format(p1))
else:
comma_ports.append("{}".format(p1))
else:
comma_ports.append("!{}".format(p1))
else:
range_ports.append("{}{}:{}".format(invert, p1, p2))
all = comma_ports
comma_ports = []
while len(all):
comma_ports.append(",".join(all[:15]))
del(all[:15])
if not comma_ports and not range_ports:
return [], [""]
return comma_ports, range_ports
def resolve(self):
for group in self.groups.values():
try:
group.resolve()
except FWRecursionError as e:
self.error(e)
except FWResolveError as e:
self.error(e)
for name, group in self.groups.items():
if (
group.lineno is None and
not isinstance(group, Hostname)
):
raise FWUndefinedError(group.referred_lines[0], name, 'group')
def purge_default(self, src):
dst = []
for ip in src:
if ip[1] != self.any_ipv4 and ip[1] != self.any_ipv6:
dst.append(ip)
return dst
def make_rule(self, chainnr, chainname, iface, rule):
if not rule:
if self.n_errors == 0:
self.error("Invalid rule in interface {}: {}".format(
iface, chainname))
return "", ""
# Get all source ips
srcs_ip4, srcs_ip6 = self.resolve_ip(rule.sources, rule)
dsts_ip4, dsts_ip6 = self.resolve_ip(rule.destinations, rule)
if not srcs_ip4 and dsts_ip4:
dsts_ip4 = self.purge_default(dsts_ip4)
if srcs_ip4 and not dsts_ip4:
srcs_ip4 = self.purge_default(srcs_ip4)
if not srcs_ip6 and dsts_ip6:
dsts_ip6 = self.purge_default(dsts_ip6)
if srcs_ip6 and not dsts_ip6:
srcs_ip6 = self.purge_default(srcs_ip6)
if (
not self.allow_mixed_ipv and (
(srcs_ip4 and not dsts_ip4) or
(dsts_ip4 and not srcs_ip4) or
(srcs_ip6 and not dsts_ip6) or
(dsts_ip6 and not srcs_ip6)
)
):
self.error((
"Cannot mix IPv4 and IPv6 source and "
"destination at line {}"
).format(rule.lineno))
lines_ip4 = []
lines_ip6 = []
line_ipv4 = []
line_ipv6 = []
targets = []
if rule.nat or rule.action == "masq":
line_ipv4 += ["-t nat"]
if not srcs_ip4 or not dsts_ip4:
self.error("NAT rule only valid for IPv4 at line {}".format(
rule.lineno))
else:
all = netaddr.IPNetwork("::/0")
for src in srcs_ip6:
if src[1] != all:
self.warning((
"Ignoring {} rule for IPv6 source address {} "
"at line {}"
).format(rule.action, src, rule.lineno))
for dst in dsts_ip6:
if dst[1] != all:
self.warning((
"Ignoring {} rule for IPv6 destination "
"address {} at line {}"
).format(rule.action, dst, rule.lineno))
else:
line_ipv4 += ["-t filter"]
line_ipv6 += ["-t filter"]
if rule.logging:
lineno = rule.lineno
action = rule.action
if rule.logname:
s = rule.logname
else:
s = self.logtag % locals()
if len(s) > 27:
s = s[:20] + "..." + s[-5:]
# iptables-restore needs strings in "" and not ''
targets.append((
'LOG --log-prefix "{} " --log-level {} -m limit '
'--limit 60/minute --limit-burst 10').format(s, rule.logging))
if rule.direction == "in":
line_ipv4 += ["-i", iface]
line_ipv6 += ["-i", iface]
elif rule.direction == "out":
line_ipv4 += ["-o", iface]
line_ipv6 += ["-o", iface]
else:
self.error("Invalid direction {}".format(
rule.direction, rule.lineno))
chainname = rule.chainname(chainnr, chainname, iface)
line_ipv4 += ["-p", rule.protocol]
if rule.protocol == 'icmp':
line_ipv6 += ["-p", 'icmpv6']
else:
line_ipv6 += ["-p", rule.protocol]
if srcs_ip4:
for icmp_type in rule.icmp4:
line_ipv4 += ['--icmp-type', icmp_type]
if srcs_ip6:
for icmp_type in rule.icmp6:
line_ipv6 += ['--icmpv6-type', icmp_type]
if rule.state:
if rule.protocol == 'icmp':
line_ipv4 += ["-m state --state", rule.state]
else:
line_ipv4 += ["-m state --state", rule.state]
line_ipv6 += ["-m state --state", rule.state]
line_ipv4 += ["-A {}{}".format(100 + chainnr, chainname)]
line_ipv6 += ["-A {}{}".format(100 + chainnr, chainname)]
if rule.nat:
if rule.natports:
nat = "{}:{}".format(rule.nat, rule.natports)
else:
nat = rule.nat
if rule.action == "snat":
targets.append("SNAT")
line_ipv4 += ["-j %(target)s --to-source", nat]
else:
targets.append("DNAT")
line_ipv4 += ["-j %(target)s --to-destination", nat]
else:
if rule.action == "permit":
if rule.direction == "in" and \
not rule.local:
targets.append("RETURN")
else:
targets.append("ACCEPT")
line_ipv4 += ["-j %(target)s"]
line_ipv6 += ["-j %(target)s"]
elif rule.action == "deny":
targets.append("DROP")
line_ipv4 += ["-j %(target)s"]
line_ipv6 += ["-j %(target)s"]
elif rule.action == "masq":
targets.append("MASQUERADE")
line_ipv4 += ["-j %(target)s"]
line_ipv6 += ["-j %(target)s"]
line_ipv4_start = " ".join(line_ipv4)
line_ipv6_start = " ".join(line_ipv6)
# Get all src ports (two lists: ranges and comma sep)
src_comma_ports, src_range_ports = self.resolve_ports(
rule.srcports, rule)
# Get all destination ips
destinations = self.resolve_ip(rule.destinations, rule)
# Get all dst ports (two lists: ranges and comma sep)
dst_comma_ports, dst_range_ports = self.resolve_ports(
rule.dstports, rule)
if rule.nat:
sources = srcs_ip4
destinations = dsts_ip4
else:
sources = srcs_ip4 + srcs_ip6
destinations = dsts_ip4 + dsts_ip6
for src_invert, src_ip in sources:
if src_ip.version == 4:
line_start = line_ipv4_start
lines = lines_ip4
else:
line_start = line_ipv6_start
lines = lines_ip6
if src_ip.prefixlen == 0:
src = ""
else:
src = "--src {}{}/{}".format(
src_invert, src_ip.ip, src_ip.prefixlen)
for dst_invert, dst_ip in destinations:
if rule.nat and src_ip.version != 4:
continue
if src_ip.version != dst_ip.version:
continue
if dst_ip.prefixlen == 0:
dst = ""
else:
dst = "--dst {}{}/{}".format(
dst_invert, dst_ip.ip, dst_ip.prefixlen)
for sport in src_comma_ports:
for dport in dst_comma_ports:
for target in targets:
lines.append(" ".join([
line_start % {"target": target},
src, "-m multiport --sports", sport,
dst, "-m multiport --dports", dport,
]))
for dport in dst_range_ports:
if dport != "":
dport = "--dport {}".format(dport)
for target in targets:
lines.append(" ".join([
line_start % {"target": target},
src, "-m multiport --sports", sport,
dst, dport,
]))
for sport in src_range_ports:
if sport != "":
sport = "--sport {}".format(sport)
for dport in dst_comma_ports:
for target in targets:
lines.append(" ".join([
line_start % {"target": target},
src, sport,
dst, "-m multiport --dports", dport,
]))
for dport in dst_range_ports:
if dport != "":
dport = "--dport {}".format(dport)
for target in targets:
lines.append(" ".join([
line_start % {"target": target},
src, sport,
dst, dport,
]))
return [
line.strip() for line in lines_ip4
], [
line.strip() for line in lines_ip6
]
def make_rules(self):
chains4 = {}
chains6 = {}
ifaces_keys = list(self.ifaces.keys())
ifaces_keys.sort()
for iface in ifaces_keys:
chain_idx = 0
chain = 'ifs'
chain_idx += 1
lines_ip4 = []
lines_ip6 = []
filename = "fwm-{}".format(chain)
for rule in self.ifaces[iface]:
rule_ip4, rule_ip6 = self.make_rule(
chain_idx, chain, iface, rule)
if not rule_ip4 and not rule_ip6:
self.warning((
"Nothing to do for {} rule for IPv4 and IPv6 "
"at line {}"
).format(rule.action, rule.lineno))
lines_ip4 += rule_ip4
lines_ip6 += rule_ip6
if filename in chains4:
chains4[filename] += lines_ip4
else:
chains4[filename] = lines_ip4
if filename in chains6:
chains6[filename] += lines_ip6
else:
chains6[filename] = lines_ip6
return chains4, chains6
def write_rules(self, chains4, chains6):
if self.n_errors != 0:
return
for chainsdir, chains in [
(self.chainsdir_ipv4, chains4),
(self.chainsdir_ipv6, chains6),
]:
chainsdir = os.path.join(self.basedir, chainsdir)
if not os.path.isdir(chainsdir):
self.error("Not a directory: {}".format(chainsdir))
if self.n_errors != 0:
return
for chainsdir, chains in [
(self.chainsdir_ipv4, chains4),
(self.chainsdir_ipv6, chains6),
]:
chainsdir = os.path.join(self.basedir, chainsdir)
chains_keys = list(chains.keys())
chains_keys.sort()
for chainname in chains_keys:
fname = "{}/{}".format(chainsdir, chainname)
try:
fp = open(fname, "w")
fp.write("{}\n".format("\n".join(chains[chainname])))
fp.close()
except IOError as why:
self.error("Failed to write to file {}: {}".format(
fname, why))
for fname in os.listdir(chainsdir):
if fname[:4] != "fwm-":
continue
if fname not in chains:
os.unlink(os.path.join(chainsdir, fname))
class FWCompile(FWMacro):
tables = []
reserved_targets = [
# Always there
"ACCEPT",
"DROP",
"QUEUE",
"RETURN",
# Target extensions
"CLASSIFY",
"CLUSTERIP",
"CONNMARK",
"CONNSECMARK",
"DNAT",
"DSCP",
"ECN",
"HL",
"LOG",
"MARK",
"MASQUERADE",
"MIRROR",
"NETMAP",
"NFLOG",
"NFQUEUE",
"NOTRACK",
"RATEEST",
"REDIRECT",
"REJECT",
"SAME",
"SECMARK",
"SET",
"SNAT",
"TCPMSS",
"TCPOPTSTRIP",
"TOS",
"TRACE",
"TTL",
"ULOG",
]
re_ignore_chain_file = re.compile("([.].*)|(CVS)")
re_heading = re.compile("target\s+prot\s+opt\s+source\s+destination")
re_policy = re.compile("Chain ([^ ]+) [(]policy .*")
re_chain = re.compile("Chain ([^ ]+) [(].* references.*")
re_jump = re.compile("([^ ]+).+all.+--.+0.0.0.0/0.+0.0.0.0/0.*")
re_get_table = re.compile("-t ([^ ]+)")
re_get_chain = re.compile("-A ([^ ]+)")
re_numchain = re.compile("(\d+)(.*)")
re_chaindef = re.compile("(\d+)?(.*:)?(.*)")
re_table_rule = re.compile("(.*)(-t \S+)(.*)")
builtin_chains = {
"filter": ["INPUT", "OUTPUT", "FORWARD"],
"nat": ["PREROUTING", "POSTROUTING", "OUTPUT"],
"mangle": ["INPUT", "OUTPUT", "FORWARD", "PREROUTING", "POSTROUTING"],
"raw": ["PREROUTING", "OUTPUT"],
}
def __init__(self, remove_all_chains=False, verbose=False):
self.remove_all_chains = remove_all_chains
self.verbose = verbose
self.n_errors = 0
self.n_warnings = 0
self.newchains = {}
self.filechains = {}
self.reserved_chains = {}
for table in self.tables:
self.reserved_chains[table] = []
def log(self, level, msg):
sys.stderr.write("{}\n".format(msg))
# def log_error(self, msg, lineno=None):
# if self.n_errors > 10:
# sys.exit(1)
# self.n_errors += 1
# if lineno is not None:
# self.log(syslog.LOG_ERR, "line %d, %s" % (lineno, msg))
# return
# self.log(syslog.LOG_ERR, "%s" % msg)
# def log_warning(self, msg, lineno=None):
# self.nwarnings += 1
# if lineno is not None:
# self.log(syslog.LOG_WARNING, "line %d, %s" % (lineno, msg))
# return
# self.log(syslog.LOG_WARNING, "%s" % msg)
def parentchains(self, table, chain):
match = self.re_chaindef.match(chain)
parentchains = self.builtin_chains[table]
short = chain.split("-")[0]
if short == "IN":
parentchains = ["INPUT"]
elif short == "OUT":
parentchains = ["OUTPUT"]
elif short == "FWD":
parentchains = ["FORWARD"]
elif short == "PRE":
parentchains = ["PREROUTING"]
elif short == "POST":
parentchains = ["POSTROUTING"]
if len(parentchains) == 1 and \
parentchains[0] not in self.builtin_chains[table]:
raise FWInvalidParentChain(table, chain)
if table == "filter":
if match.group(2):
# Local rules are in chains that start with eighter
# I or O. Other rules start with i or o.
if match.group(2)[0] == "I":
parentchains = ["INPUT"]
elif match.group(2)[0] == "O":
parentchains = ["OUTPUT"]
else:
parentchains = ["FORWARD"]
elif table == "nat":
if match.group(2):
if match.group(2)[0] == "i":
parentchains = ["PREROUTING"]
elif match.group(2)[0] == "o":
parentchains = ["POSTROUTING"]
elif table == "mangle":
if match.group(2):
if match.group(2)[0] == "i":
parentchains = ["INPUT", "PREROUTING", "FORWARD"]
elif match.group(2)[0] == "o":
parentchains = ["OUTPUT", "POSTROUTING", "FORWARD"]
return parentchains
def read_chain_file(self, fpath):
try:
fp = open(fpath, "r")
data = fp.read()
fp.close()
except IOError as why:
sys.stderr.write(
"Error reading file '{}': {}\n".format(fpath, why))
sys.exit(1)
return data
def read_chain_files(self, chainsfiles):
chainsfiles.sort()
chainorder = {}
for table in self.tables:
self.newchains[table] = {}
chainorder[table] = {}
if not self.remove_all_chains:
for fname in chainsfiles:
if self.re_ignore_chain_file.match(fname):
continue
self.filechains[fname] = {}
for table in self.tables:
self.filechains[fname][table] = {}
fpath = "{}/{}".format(self.chainsdir, fname)
data = self.read_chain_file(fpath)
lines = data.split("\n")
linenr = 0
for line in lines:
linenr += 1
line = line.split("#")[0]
line = line.strip()
m = self.re_get_table.search(line)
if m:
table = m.group(1)
else:
table = "filter"
m = self.re_get_chain.search(line)
if not m:
# Hmm... No chain name?
continue
num = 50
chain = m.group(1)
m = self.re_chaindef.match(chain)
if m:
num = int(m.group(1))
if m.group(2):
newchain = "{}{}".format(m.group(2), m.group(3))
else:
newchain = m.group(3)
line = line.replace(
"-A {}".format(chain), "-A {}".format(newchain))
chain = newchain
if chain in self.reserved_chains[table]:
raise FWReservedChainName(chain, fname, linenr)
if chain not in self.filechains[fname][table]:
self.filechains[fname][table][chain] = []
self.filechains[fname][table][chain].append(line)
self.newchains[table][(num, chain)] = 1
if chain not in chainorder[table]:
chainorder[table][chain] = (num, fname, linenr)
elif chainorder[table][chain][0] != num:
raise FWOrderConflict(
chain,
fname,
linenr,
(chainorder[table][chain]),
)
for table in self.tables:
sortchains = list(self.newchains[table].keys())
sortchains.sort()
self.newchains[table] = []
for order, chain in sortchains:
if chain not in self.newchains[table]:
self.newchains[table].append(chain)
def generate_restore_file(self, rule_file):
targets = {}
rules = {}
for table in self.tables:
targets[table] = []
rules[table] = []
for chain in self.newchains[table]:
if table not in self.tables:
raise FWInvalidTable(table)
targets[table].append(chain)
parentchains = self.parentchains(table, chain)
for pchain in parentchains:
m = self.re_chaindef.match(chain)
if m.group(2):
iface = m.group(2)[1:-1]
direction = m.group(2)[0].lower()
rules[table].append("-A {} -{} {} -j {}".format(
pchain, direction, iface, chain))
else:
rules[table].append("-A {} -j {}".format(
pchain, chain))
for table in self.tables:
for chain in self.newchains[table]:
for fname in self.filechains.keys():
if chain in self.filechains[fname][table]:
for line in self.filechains[fname][table][chain]:
match = self.re_table_rule.match(line)
if match:
line = "{} {}".format(
match.group(1).strip(),
match.group(3).strip(),
)
rules[table].append(line.strip())
if rule_file == "-" or not rule_file:
fp = sys.stdout
elif hasattr(rule_file, "seek"):
fp = rule_file
else:
fp = open(rule_file, "w")
fp.write("# Generated with {} {} at {}\n".format(
self.__class__.__name__,
".".join([str(i) for i in __version__]),
time.ctime(),
))
for table in self.tables:
fp.write("*{}\n".format(table))
if self.remove_all_chains or table != "filter":
policy = "ACCEPT"
else:
policy = "DROP"
for target in self.builtin_chains[table]:
fp.write(":{} {} [0:0]\n".format(target, policy))
for target in targets[table]:
fp.write(":{} - [0:0]\n".format(target))
for rule in rules[table]:
fp.write("{}\n".format(rule))
fp.write("COMMIT\n")
class FWCompileIPv4(FWCompile):
tables = ["raw", "mangle", "nat", "filter"]
class FWCompileIPv6(FWCompile):
tables = ["filter", "mangle"]
def fwmpp():
import optparse
parser = optparse.OptionParser(
usage="""\
usage: %%prog [options] FILE
Rule format:
{pp.rule_explanation}
Defaults:
{pp.default_rule}
ICMP options:
{pp.default_icmp4options}
ICMPv6 options:
{pp.default_icmp6options}
""".format(pp=FWPreprocess))
parser.add_option(
"-V", "--version",
action="store_true",
dest="version",
default=False,
help="show version and exit",
)
parser.add_option(
"--base",
dest="basedir",
default="/etc/fwmacro",
metavar="DIRECTORY",
help="Set the base path (default: '{}')".format(FWMacro.basedir),
)
parser.add_option(
"--ipv4-chains",
action="store",
dest="chainsdir_ipv4",
default=FWPreprocess.chainsdir_ipv4,
metavar="DIRECTORY",
help="directory with iptables chains (default: {})".format(
FWPreprocess.chainsdir_ipv4),
)
parser.add_option(
"--ipv6-chains",
action="store",
dest="chainsdir_ipv6",
default=FWPreprocess.chainsdir_ipv6,
metavar="DIRECTORY",
help="directory with ip6tables chains (default: {})".format(
FWPreprocess.chainsdir_ipv6),
)
parser.add_option(
"--logtag",
action="store",
dest="logtag",
default=FWPreprocess.logtag,
help="log tag template (default: '%s')" % FWPreprocess.logtag,
)
parser.add_option(
"--force-groups",
action="store_true",
dest="force_groups",
default=False,
help="Force the use of groups (default: '%s')" % False,
)
opts, args = parser.parse_args()
if opts.version:
print("Version: {}".format(".".join([str(i) for i in __version__])))
sys.exit(0)
if len(args) > 1:
sys.stderr.write("Too many arguments")
sys.exit(1)
fpp = FWPreprocess()
fpp.basedir = os.path.abspath(opts.basedir)
fpp.chainsdir_ipv4 = opts.chainsdir_ipv4
fpp.chainsdir_ipv6 = opts.chainsdir_ipv6
fpp.logtag = opts.logtag
fpp.force_groups = opts.force_groups
try:
if not args:
fpp.parse(sys.stdin)
else:
fpp.parse(open(args[0], 'r'))
fpp.resolve()
chains4, chains6 = fpp.make_rules()
except FWMacroException as e:
fpp.error(e)
sys.exit(1)
if fpp.n_errors == 0:
fpp.write_rules(chains4, chains6)
else:
sys.stderr.write("Errors (%s)\n" % fpp.n_errors)
sys.exit(1)
sys.exit(0)
def fwmc():
import optparse
parser = optparse.OptionParser(
usage="""\
usage: %prog [options] start | stop
""",
)
parser.add_option(
"-V", "--version",
action="store_true",
dest="version",
default=False,
help="show version and exit",
)
parser.add_option(
"--verbose",
action="store_true",
dest="verbose",
default=False,
help="verbose messages",
)
parser.add_option(
"--base",
dest="basedir",
default="/etc/fwmacro",
metavar="DIRECTORY",
help="Set the base path (default: '{}')".format(FWMacro.basedir),
)
parser.add_option(
"--ipv4-rules",
action="store",
dest="ipv4_rules",
default=FWCompile.chainsfile_ipv4,
help="filename of generated iptables-restore file",
)
parser.add_option(
"--ipv6-rules",
action="store",
dest="ipv6_rules",
default=FWCompile.chainsfile_ipv6,
help="filename of generated ip6tables-restore file",
)
parser.add_option(
"--no-ipv4",
action="store_false",
dest="ipv4",
default=True,
help="no iptables chains (ipv4)",
)
parser.add_option(
"--no-ipv6",
action="store_false",
dest="ipv6",
default=True,
help="no ip6tables chains (ipv6)",
)
parser.add_option(
"--ipv4-chains",
action="store",
dest="chainsdir_ipv4",
default=FWCompile.chainsdir_ipv4,
metavar="DIRECTORY",
help="directory with iptables chains (default: {})".format(
FWCompile.chainsdir_ipv4),
)
parser.add_option(
"--ipv6-chains",
action="store",
dest="chainsdir_ipv6",
default=FWCompile.chainsdir_ipv6,
metavar="DIRECTORY",
help="directory with ip6tables chains (default: {})".format(
FWCompile.chainsdir_ipv6),
)
parser.add_option(
"--reserved-target",
action="append",
dest="reserved_targets",
default=FWCompile.reserved_targets,
help=(
"reserved target (e.g. ACCEPT) that "
"will not be mapped to a chain"
),
)
def no_reserved_target(option, opt_str, value, parser, *args, **kwargs):
FWCompile.reserved_targets.remove(value)
parser.add_option(
"--no-reserved-target",
type="string",
action="callback",
callback=no_reserved_target,
help="not a reserved target (remove from reserved targets list)",
)
parser.add_option(
"--help-reserved-target",
action="store_true",
dest="help_reserved_target",
default=False,
help="show help on reserved targets",
)
default_reserved_targets = [] + FWCompile.reserved_targets
opts, args = parser.parse_args()
if opts.version:
print("Version: {}".format(".".join([str(i) for i in __version__])))
sys.exit(0)
if opts.help_reserved_target:
print("Default reserved targets:")
indent = 4 * " "
line = ""
while default_reserved_targets:
if line:
new_line = ", ".join([line, default_reserved_targets[0]])
else:
new_line = default_reserved_targets[0]
if len(new_line) < 80 - len(indent):
line = new_line
del(default_reserved_targets[0])
elif not line:
print ("{}{}".format(indent, new_line))
del(default_reserved_targets[0])
else:
print("{}{}".format(indent, line))
line = ""
sys.exit(0)
if len(args) == 0:
args = ["start"]
elif len(args) != 1:
sys.stderr.write("Too many arguments\n")
sys.exit(1)
if args[0] not in ["start", "stop"]:
sys.stderr.write("Invalid argument '%s'\n" % args[0])
sys.exit(1)
remove_all_chains = False
if args[0] in ["stop"]:
remove_all_chains = 1
if opts.ipv4:
fc = FWCompileIPv4(
remove_all_chains=remove_all_chains,
verbose=opts.verbose,
)
fc.basedir = os.path.abspath(opts.basedir)
fc.chainsdir = opts.chainsdir_ipv4
chainsfiles = os.listdir(fc.chainsdir)
try:
fc.read_chain_files(chainsfiles)
fc.generate_restore_file(opts.ipv4_rules)
except FWMacroException as e:
fc.error(e)
if opts.ipv6:
fc = FWCompileIPv6(
remove_all_chains=remove_all_chains,
verbose=opts.verbose,
)
fc.basedir = os.path.abspath(opts.basedir)
fc.chainsdir = opts.chainsdir_ipv6
chainsfiles = os.listdir(fc.chainsdir)
try:
fc.read_chain_files(chainsfiles)
fc.generate_restore_file(opts.ipv6_rules)
except FWMacroException as e:
fc.error(e)
sys.exit(0)
def main():
progname = os.path.basename(sys.argv[0])
if progname in ["fwmpp", "fwmpp.py"]:
fwmpp()
elif progname in ["fwmc", "fwmc.py"]:
fwmc()
elif len(sys.argv) > 1 and sys.argv[1] == '--fwmpp':
del(sys.argv[0])
fwmpp()
elif len(sys.argv) > 1 and sys.argv[1] == '--fwmc':
del(sys.argv[0])
fwmc()
else:
sys.stderr.write("Invalid invocation as '%s'\n" % progname)
exit(1)
if __name__ == '__main__':
main()
| keesbos/fwmacro | fwmacro.py | Python | mit | 78,450 |
# Copyright 2008 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
# SocketError exception
class SocketError(Exception):
def __init__(self, msg):
self.msg = msg
def socket_read_all(sock):
"""Read all data waiting to be read on the given socket. The first attempt
to read will be a blocking call, and all subsequent reads will be
non-blocking."""
msg = ''
old_timeout = sock.gettimeout()
data = sock.recv(1024)
sock.settimeout(0.1)
try:
while len(data) != 0:
msg += data
data = sock.recv(1024)
except socket.timeout, timeout:
pass
except Exception, error:
try:
close_socket(sock)
except:
pass
if error != None:
raise SocketError('socket error %d: %s' % (error[0], error[1]))
sock.settimeout(old_timeout)
return msg
def close_socket(connection):
"""Performs a shutdown and a close on the socket. Any errors
are logged to the system logging service. Raises a SocketError
if any errors are encountered"""
try:
try:
# python 2.3 doesn't have SHUT_WR defined, so use it's value (1)
connection.shutdown(1)
except Exception, error:
if error != None:
raise SocketError('socket error %d: %s' % (error[0], error[1]))
try:
data = connection.recv(4096)
while len(data) != 0:
data = connection.recv(4096)
except:
pass
finally:
connection.close()
connection = None
| htcondor/job_hooks | module/socketutil.py | Python | apache-2.0 | 2,064 |
from django.contrib import admin
from itxland.search.models import SearchTerm
class SearchTermAdmin(admin.ModelAdmin):
list_display = ('__unicode__','ip_address','search_date')
list_filter = ('ip_address', 'user', 'q')
exclude = ('user',)
admin.site.register(SearchTerm, SearchTermAdmin)
| davidhenry/ITX-Land | search/admin.py | Python | mit | 293 |
#!/usr/bin/env python
# *-* coding: UTF-8 *-*
"""Tuxy dorește să împlementeze un nou paint pentru consolă.
În timpul dezvoltării proiectului s-a izbit de o problemă
pe care nu o poate rezolva singur și a apelat la ajutorul tău.
El dorește să adauge o unealtă care să permită umplerea unei
forme închise.
Exemplu:
Pornim de la imaginea inițială reprezentată mai jos, trebuie să
umplem formele în care se află "x":
|-----*------| |******------| |************|
|--x--*------| |******------| |************|
|******------| -----> |******------| -----> |************|
|-----******-| |-----******-| |-----*******|
|-----*---*--| |-----*---*--| |-----*---***|
|-----*---*-x| |-----*---*--| |-----*---***|
"""
def on_image(imagine, row, column):
"""
Functia verifica daca punctul, descris de coordonatele sale - row si
column, este pe imagine. Returneaza True daca se afla pe imagine,
False - in caz contrar.
"""
return not ((row < 0) or (row > len(imagine) - 1) or
(column < 0) or (column > len(imagine[0]) - 1))
def recursive_fill(imagine, row, column):
"""
Functia primeste imaginea si coordonatele unui punct gol
(pe matrice, caracterul '-'), il coloreaza (pe matrice, caracterul '*')
dupa care verifica cele 4 puncte vecine. Daca un punct vecin este gol,
functia se autoapeleaza cu aceeasi imagine dar punctul fiind vecinul.
"""
imagine[row][column] = '*'
if on_image(imagine, row, column - 1) and imagine[row][column - 1] == '-':
recursive_fill(imagine, row, column - 1)
if on_image(imagine, row, column + 1) and imagine[row][column + 1] == '-':
recursive_fill(imagine, row, column + 1)
if on_image(imagine, row - 1, column) and imagine[row - 1][column] == '-':
recursive_fill(imagine, row - 1, column)
if on_image(imagine, row + 1, column) and imagine[row + 1][column] == '-':
recursive_fill(imagine, row + 1, column)
def umple_forma(imagine, punct):
"""Funcția primește reprezentarea imaginii și coordonatele unui
punct.
În cazul în care punctul se află într-o formă închisă trebuie să
umple forma respectivă cu caracterul "*"
"""
if not on_image(imagine, punct[0], punct[1]):
return "Punctul {} nu se afla pe imagine.".format(punct)
if imagine[punct[0]][punct[1]] == '-':
recursive_fill(imagine, punct[0], punct[1])
def main():
""" Main function docstring """
imaginea = [
["-", "-", "-", "-", "-", "*", "-", "-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-", "*", "-", "-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-", "*", "-", "-", "-", "-", "-", "-"],
["*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "-"],
["-", "-", "-", "-", "-", "*", "-", "*", "-", "-", "*", "-"],
["-", "-", "-", "-", "-", "*", "-", "*", "-", "-", "*", "-"],
]
umple_forma(imaginea, (1, 3))
umple_forma(imaginea, (5, 11))
if __name__ == "__main__":
main()
| c-square/python-lab | python/solutii/daniel_toncu/paint/fill.py | Python | mit | 3,131 |
try:
from StringIO import StringIO
except ImportError:
from io import StringIO # nopyflakes
| SurfasJones/djcmsrc3 | venv/lib/python2.7/site-packages/cms/utils/compat/string_io.py | Python | mit | 101 |
# Created By: Virgil Dupras
# Created On: 2009-11-01
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
from PyQt5.QtCore import Qt
from qtlib.column import Column
from ..account_sheet import AccountSheet
class ProfitSheet(AccountSheet):
COLUMNS = [
Column('name', 133),
Column('account_number', 80),
Column('cash_flow', 100, alignment=Qt.AlignRight),
Column('last_cash_flow', 100, alignment=Qt.AlignRight),
Column('delta', 100, alignment=Qt.AlignRight),
Column('delta_perc', 100),
Column('budgeted', 100, alignment=Qt.AlignRight),
]
AMOUNT_ATTRS = {'cash_flow', 'last_cash_flow', 'delta', 'delta_perc', 'budgeted'}
BOLD_ATTRS = {'cash_flow', }
| fokusov/moneyguru | qt/controller/profit/sheet.py | Python | gpl-3.0 | 953 |
#!/usr/bin/env python
# Copyright 2013 YouView TV Ltd.
# License: LGPL v2.1 or (at your option) any later version (see
# https://github.com/drothlis/stb-tester/blob/master/LICENSE for details).
"""Generates reports from logs of stb-tester test runs created by 'run'."""
import collections
import glob
import itertools
import os
import re
import sys
from datetime import datetime
from os.path import abspath, basename, dirname, isdir
import jinja2
escape = jinja2.Markup.escape
templates = jinja2.Environment(loader=jinja2.FileSystemLoader(
os.path.join(os.path.dirname(__file__), "templates")))
def main(argv):
usage = "Usage: report (index.html | <testrun directory>)"
if len(argv[1:]) == 0:
die(usage)
if argv[1] in ("-h", "--help"):
print usage
sys.exit(0)
for target in argv[1:]:
if isdir(target):
match = re.match(
r"(.*/)?\d{4}-\d{2}-\d{2}_\d{2}\.\d{2}\.\d{2}(-[^/]+)?$",
abspath(target))
if match:
testrun(match.group())
elif target.endswith("index.html"):
index(dirname(target))
else:
die("Invalid target '%s'" % target)
def index(parentdir):
rundirs = [
dirname(x) for x in glob.glob(
os.path.join(parentdir, "????-??-??_??.??.??*/test-name"))]
runs = [Run(d) for d in sorted(rundirs, reverse=True)]
if len(runs) == 0:
die("Directory '%s' doesn't contain any testruns" % parentdir)
print templates.get_template("index.html").render(
name=basename(abspath(parentdir)).replace("_", " "),
runs=runs,
extra_columns=set(
itertools.chain(*[x.extra_columns.keys() for x in runs])),
).encode('utf-8')
def testrun(rundir):
print templates.get_template("testrun.html").render(
run=Run(rundir),
).encode('utf-8')
class Run(object):
def __init__(self, rundir):
self.rundir = rundir
try:
self.exit_status = int(self.read("exit-status"))
except ValueError:
self.exit_status = "still running"
self.duration = self.read_seconds("duration")
self.failure_reason = self.read("failure-reason")
self.git_commit = self.read("git-commit")
self.notes = self.read("notes")
self.test_args = self.read("test-args")
self.test_name = self.read("test-name")
if self.exit_status != "still running":
self.files = sorted([
basename(x) for x in glob.glob(rundir + "/*")
if basename(x) not in [
"duration",
"exit-status",
"extra-columns",
"failure-reason",
"git-commit",
"test-args",
"test-name",
]
and not x.endswith(".png")
and not x.endswith(".manual")
and not basename(x).startswith("index.html")
])
self.images = sorted([
basename(x) for x in glob.glob(rundir + "/*.png")])
self.extra_columns = collections.OrderedDict()
for line in self.read("extra-columns").splitlines():
column, value = line.split("\t", 1)
self.extra_columns.setdefault(column.strip(), [])
self.extra_columns[column.strip()].append(value.strip())
t = re.match(
r"\d{4}-\d{2}-\d{2}_\d{2}\.\d{2}\.\d{2}", basename(rundir))
assert t, "Invalid rundir '%s'" % rundir
self.timestamp = datetime.strptime(t.group(), "%Y-%m-%d_%H.%M.%S")
def css_class(self):
if self.exit_status == "still running":
return "muted" # White
elif self.exit_status == 0:
return "success"
elif self.exit_status == 1:
return "error" # Red: Possible system-under-test failure
else:
return "warning" # Yellow: Test infrastructure error
def read(self, f):
f = os.path.join(self.rundir, f)
if os.path.exists(f + ".manual"):
return escape(open(f + ".manual").read().decode('utf-8').strip())
elif os.path.exists(f):
return open(f).read().decode('utf-8').strip()
else:
return ""
def read_seconds(self, f):
s = self.read(f)
try:
s = int(s)
except ValueError:
s = 0
return "%02d:%02d:%02d" % (s / 3600, (s % 3600) / 60, s % 60)
def die(message):
sys.stderr.write("report.py: %s\n" % message)
sys.exit(1)
if __name__ == "__main__":
main(sys.argv)
| wmanley/stb-tester | stbt-batch.d/report.py | Python | lgpl-2.1 | 4,660 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
def get_notification_config():
return { "for_doctype":
{
"Issue": {"status": "Open"},
"Warranty Claim": {"status": "Open"},
"Task": {"status": "Open"},
"Project": {"status": "Open"},
"Lead": {"status": "Open"},
"Contact": {"status": "Open"},
"Opportunity": {"status": "Open"},
"Quotation": {"docstatus": 0},
"Sales Order": {
"status": ("not in", ("Stopped", "Completed", "Closed")),
"docstatus": ("<", 2)
},
"Journal Entry": {"docstatus": 0},
"Sales Invoice": { "outstanding_amount": (">", 0), "docstatus": ("<", 2) },
"Purchase Invoice": {"docstatus": 0},
"Leave Application": {"status": "Open"},
"Expense Claim": {"approval_status": "Draft"},
"Job Applicant": {"status": "Open"},
"Purchase Receipt": {"docstatus": 0},
"Delivery Note": {"docstatus": 0},
"Stock Entry": {"docstatus": 0},
"Material Request": {"docstatus": 0},
"Purchase Order": {
"status": ("not in", ("Stopped", "Completed", "Closed")),
"docstatus": ("<", 2)
},
"Production Order": { "status": "In Process" },
"BOM": {"docstatus": 0},
"Timesheet": {"docstatus": 0},
"Time Log": {"status": "Draft"},
"Time Log Batch": {"status": "Draft"}
}
}
| shft117/SteckerApp | erpnext/startup/notifications.py | Python | agpl-3.0 | 1,380 |
import json
from django_api_tools.APIModel import APIModel, UserAuthCode
from django_api_tools.APIView import APIUrl, ReservedURL, StatusCode
from django_api_tools.tests.models import Foo, Bar, Baz, Qux, TestProfile
from django_api_tools.tests.views import TestAPIView
from django.test import TestCase
from django.test.client import RequestFactory, Client
from django.contrib.auth.models import AnonymousUser, User
from django.core.paginator import EmptyPage, PageNotAnInteger
from django.core.exceptions import ObjectDoesNotExist
__author__ = 'szpytfire'
class APIToolsTestCase(TestCase):
def assertDictKeysEqual(self, dict, keys):
# For related fields, APIModel cuts off the special related syntax when dictifying
# We should therefore do the same when testing for the correct keys
for index, val in enumerate(keys):
prefix = filter(lambda prefix: val.startswith(prefix), APIModel._reserved_prefixes)
if prefix:
keys[index] = keys[index][len(prefix[0]) + 1:]
self.assertSetEqual(set(dict.keys()), set(keys))
class APIModelTestCase(APIToolsTestCase):
fixtures = ['user_testprofile_foo.json', 'bar_baz_qux.json']
def remove_foreign_key_fields(self, fields):
return [field for field in fields if not filter(lambda prefix: field.startswith(prefix), APIModel._reserved_prefixes)]
def test_dictify(self):
foo = Foo.objects.get(id=1)
foo._curr_user = AnonymousUser()
# Test no fields to include returns empty dict
self.assertDictEqual(foo.dictify([], False), {})
# Test random fields to include returns empty dict
self.assertDictEqual(foo.dictify(['bar1', 'bar2'], False), {})
# Test defaults to public user
self.assertDictKeysEqual(foo.dictify(Foo.long_description_fields, False), list(Foo.public_fields))
# Test correct registered user fields returned
foo._user_auth = UserAuthCode.REGISTERED_USER
self.assertDictKeysEqual(foo.dictify(Foo.long_description_fields, False), list(Foo.public_fields + Foo.registered_user_fields))
# Test correct owner fields returned
foo._user_auth = UserAuthCode.OWNER
self.assertDictKeysEqual(foo.dictify(Foo.long_description_fields, False), list(Foo.public_fields + Foo.registered_user_fields + Foo.owner_only_fields))
def test_dictify_helper(self):
user = User.objects.get(id=1)
foo = Foo.objects.get(id=1)
foo.set_user_auth(user)
# Test no dictified fields returned for empty fields to return
self.assertDictEqual(foo.dictify_helper(Foo.public_fields, [], False), {})
# Test no dictified fields returned for fields which aren't in the auth level
self.assertDictEqual(foo.dictify_helper(Foo.public_fields, ['bar1', 'bar2'], False), {})
# Test regular field is set in the dictionary
dictified_foo = foo.dictify_helper(Foo.public_fields, Foo.public_fields, False)
self.assertEqual(dictified_foo['id'], foo.id)
# Test invalid regular fields is set as None
non_existent_field = ('test', )
dictified_foo = foo.dictify_helper(non_existent_field, non_existent_field, False)
self.assertIsNone(dictified_foo[non_existent_field[0]])
# Test invalid related field is set as None
non_existent_rel_field = ('fk_short_test', )
dictified_foo = foo.dictify_helper(non_existent_rel_field, non_existent_rel_field, False)
self.assertIsNone(dictified_foo['test'])
# Test fk_short only returns the foreign model's ID
fk_short_field = ('fk_short_baz', )
bar = Bar.objects.get(id=1)
bar.set_user_auth(user)
dictified_bar = bar.dictify_helper(fk_short_field, fk_short_field, False)
self.assertEqual(len(dictified_bar), 1)
self.assertDictKeysEqual(dictified_bar['baz'], self.remove_foreign_key_fields(bar.baz.short_description_fields))
# Test fk_long returns the foreign model's dictify_long()
fk_long_field = ('fk_long_baz', )
dictified_bar = bar.dictify_helper(fk_long_field, fk_long_field, False)
self.assertEqual(len(dictified_bar), 1)
self.assertDictKeysEqual(dictified_bar['baz'], self.remove_foreign_key_fields(bar.baz.short_description_fields + bar.baz.long_description_fields))
# Test onetoone_short only returns the foreign model's ID
onetoone_short_field = ('onetoone_short_owner', )
dictified_foo = foo.dictify_helper(onetoone_short_field, onetoone_short_field, False)
self.assertEqual(len(dictified_foo), 1)
self.assertDictKeysEqual(dictified_foo['owner'], self.remove_foreign_key_fields(foo.owner.short_description_fields))
# Test onetoone_long returns the foreign model's dictify_long()
fk_long_field = ('onetoone_long_owner', )
qux = Qux.objects.get(id=1)
qux.set_user_auth(user)
dictified_qux = qux.dictify_helper(fk_long_field, fk_long_field, False)
self.assertEqual(len(dictified_qux), 1)
self.assertDictKeysEqual(dictified_qux['owner'], self.remove_foreign_key_fields(qux.owner.short_description_fields + qux.owner.long_description_fields))
# Test rel_short only returns the related models' ID's
rel_short_field = ('rel_short_bars', )
baz = Baz.objects.get(id=1)
baz.set_user_auth(user)
dictified_baz = baz.dictify_helper(rel_short_field, rel_short_field, False)
self.assertEqual(len(dictified_baz), 1)
self.assertEqual(len(dictified_baz['bars']), baz.bars.all().count())
self.assertDictKeysEqual(dictified_baz['bars'][0], self.remove_foreign_key_fields(baz.bars.all()[0].short_description_fields))
# Test rel_long returns the related models' dictify_long()
rel_long_field = ('rel_long_bars', )
dictified_baz = baz.dictify_helper(rel_long_field, rel_long_field, False)
self.assertEqual(len(dictified_baz), 1)
self.assertEqual(len(dictified_baz['bars']), baz.bars.all().count())
self.assertDictKeysEqual(dictified_baz['bars'][0], self.remove_foreign_key_fields(baz.bars.all()[0].short_description_fields + baz.bars.all()[0].long_description_fields))
# Test m2m_short only returns the related models' ID's
m2m_short_field = ('m2m_short_foos', )
qux = Qux.objects.get(id=1)
qux.set_user_auth(user)
qux.foos.add(foo)
dictified_qux = qux.dictify_helper(m2m_short_field, m2m_short_field, False)
self.assertEqual(len(dictified_qux), 1)
self.assertEqual(len(dictified_qux['foos']), qux.foos.all().count())
self.assertDictKeysEqual(dictified_qux['foos'][0], self.remove_foreign_key_fields(qux.foos.all()[0].short_description_fields))
# Test m2m_long returns the related models' dictify_long()
m2m_long_field = ('m2m_long_foos', )
dictified_qux = qux.dictify_helper(m2m_long_field, m2m_long_field, False)
self.assertEqual(len(dictified_qux), 1)
self.assertEqual(len(dictified_qux['foos']), qux.foos.all().count())
self.assertDictKeysEqual(dictified_qux['foos'][0], self.remove_foreign_key_fields(qux.foos.all()[0].short_description_fields + qux.foos.all()[0].long_description_fields))
def test_dictify_short(self):
# Test that the method only returns the short description fields
foo = Foo.objects.get(id=1)
self.assertDictKeysEqual(foo.dictify_short(False), Foo.short_description_fields)
def test_dictify_long(self):
# Test that the method returns the long and short description fields
foo = Foo.objects.get(id=1)
owner = TestProfile.objects.get(id=1).user
foo.set_user_auth(owner)
self.assertDictKeysEqual(foo.dictify_long(False), list(Foo.short_description_fields + Foo.long_description_fields))
def test_dictify_with_auth(self):
active_foo = Foo.objects.get(id=1)
deactivated_foo = Foo.objects.filter(active=0)[0]
owner = User.objects.get(id=1)
not_owner = User.objects.get(id=2)
public_user = AnonymousUser()
# Test whether a deactivated instance returns None
self.assertIsNone(deactivated_foo.dictify_with_auth(owner, False))
# Test whether a public user only sees the public fields
self.assertDictKeysEqual(active_foo.dictify_with_auth(public_user, False), list(Foo.public_fields))
# Test whether an owner can view all the fields
self.assertDictKeysEqual(active_foo.dictify_with_auth(owner, False), list(Foo.public_fields + Foo.registered_user_fields + Foo.owner_only_fields))
# Test whether a registered user sees registered user + public fields
self.assertDictKeysEqual(active_foo.dictify_with_auth(not_owner, False), list(Foo.public_fields + Foo.registered_user_fields))
def test_is_owner(self):
# Test ownership of Foo
foo = Foo.objects.get(id=1)
# Test Foo with its rightful owner
# Test Foo with its rightful owner
owner = User.objects.get(id=1)
self.assertTrue(foo.is_owner(owner))
# Test Foo with an incorrect owner
not_owner = User.objects.get(id=2)
self.assertFalse(foo.is_owner(not_owner))
# Test Bar with an arbitrary user - Bar's don't have an owner.
bar = Bar.objects.get(id=1)
self.assertTrue(bar.is_owner(owner))
def test_get_all(self):
user = User.objects.get(id=1)
# Test number of Foo's equal to 10
self.assertEqual(len(Foo.get_all(1, user)), Foo.pagination)
# Test number of Bar's equal to number of Bar's (< 10)
self.assertEqual(len(Bar.get_all(1, user)), Bar.objects.all().count())
# Test invalid page number raises expected exception
with self.assertRaises(EmptyPage):
Bar.get_all(2, user)
# Test invalid page value raises expected exception
with self.assertRaises(PageNotAnInteger):
Bar.get_all("foo", user)
def test_get_model_instance(self):
# Test getting a Foo object with a valid ID
valid_foo_id = 1
# Make sure the method returns the right object
foo = Foo.objects.get(id=valid_foo_id)
self.assertEqual(Foo.get_model_instance(valid_foo_id), foo)
# Test invalid lookup raises expected exception
with self.assertRaises(ValueError):
Foo.objects.get(id="foo")
with self.assertRaises(ObjectDoesNotExist):
Foo.objects.get(id=20)
class APIViewTestCase(APIToolsTestCase):
fixtures = ['user_testprofile_foo.json', 'bar_baz_qux.json']
urls = 'django_api_tools.tests.urls'
def setUp(self):
self.factory = RequestFactory()
def test_get(self):
t = TestAPIView()
# Test invalid request gives back 404
request = self.factory.get('/test_api/')
response = t.get(request)
self.assertEqual(response.status_code, StatusCode.NOT_FOUND)
# Test reserved URL gives back 200
request = self.factory.get('/test_api/{}'.format(ReservedURL.CSRFTOKEN))
response = t.get(request)
self.assertEqual(response.status_code, StatusCode.OK)
user = User.objects.get(id=1)
# Test model request returns 200
request = self.factory.get('/test_api/foo/')
request.user = user
response = t.get(request)
self.assertEqual(response.status_code, StatusCode.OK)
# Test get instance gives back 200
request = self.factory.get('/test_api/foo/1/')
request.user = user
response = t.get(request)
self.assertEqual(response.status_code, StatusCode.OK)
# Test custom request on model with custom_request implemented gives back 200
request = self.factory.get('/test_api/qux/1/custom/')
request.user = user
response = t.get(request)
self.assertEqual(response.status_code, StatusCode.OK)
# Test custom request on model without implementation gives back 404
request = self.factory.get('/test_api/foo/1/custom/')
request.user = user
response = t.get(request)
self.assertEqual(response.status_code, StatusCode.NOT_FOUND)
def test_post(self):
t = TestAPIView()
# Test invalid request gives back 404
request = self.factory.post('/test_api/')
response = t.post(request)
self.assertEqual(response.status_code, StatusCode.NOT_FOUND)
# Test reserved URL gives back 200
request = self.factory.post('/test_api/{}/'.format(ReservedURL.CSRFTOKEN))
response = t.post(request)
self.assertEqual(response.status_code, StatusCode.OK)
user = User.objects.get(id=1)
# Test post model request (create) returns 200
APIUrl.ADDITIONAL_FIELDS = list()
request = self.factory.post('/test_api/foo/', data={"f2": "foo"})
request.user = user
response = t.post(request)
self.assertEqual(response.status_code, StatusCode.OK)
# Test post instance (update) gives back 200
APIUrl.ADDITIONAL_FIELDS = list()
foo = Foo.objects.get(id=1)
request = self.factory.post('/test_api/foo/{}/'.format(foo.id), data={"f1": True})
request.user = user
response = t.post(request)
self.assertEqual(response.status_code, StatusCode.OK)
def test_get_all(self):
user = User.objects.get(id=1)
t = TestAPIView()
# Test get first page of Foo's gives back 10 results
request = self.factory.get('/test_api/foo/')
request.user = user
t._endpoint_model = Foo
response = t._get_all(request)
self.assertEqual(len(json.loads(response.content)), 10)
# Test second page of Foo's gives back 1 results
request = self.factory.get('/test_api/foo/', data={"page": 2})
request.user = user
t._endpoint_model = Foo
response = t._get_all(request)
self.assertEqual(len(json.loads(response.content)), 1)
# Test third page of Foo's gives back 404
request = self.factory.get('/test_api/foo/', data={"page": 3})
request.user = user
t._endpoint_model = Foo
response = t._get_all(request)
self.assertIsNone(json.loads(response.content))
def test_get_instance(self):
user = User.objects.get(id=1)
t = TestAPIView()
# Test Foo ID = 1 gives back 200/ correct Foo
foo = Foo.objects.get(id=1)
foo_dict = foo.dictify_with_auth(user, short_dict=False)
request = self.factory.get('/test_api/foo/{}/'.format(foo.id))
request.user = user
t._endpoint_model = Foo
t._url_validator = APIUrl(request)
response = t._get_instance(request)
self.assertDictEqual(json.loads(response.content), foo_dict)
self.assertEqual(response.status_code, StatusCode.OK)
# Test Foo ID = 22 gives back 404/ none
request = self.factory.get('/test_api/foo/22/')
request.user = user
t._endpoint_model = Foo
t._url_validator = APIUrl(request)
response = t._get_instance(request)
self.assertEqual(response.status_code, StatusCode.NOT_FOUND)
self.assertIsNone(json.loads(response.content))
# Test Foo ID = "foo" gives back 404
request = self.factory.get('/test_api/foo/foo/')
request.user = user
t._endpoint_model = Foo
t._url_validator = APIUrl(request)
response = t._get_instance(request)
self.assertEqual(response.status_code, StatusCode.NOT_FOUND)
self.assertIsNone(json.loads(response.content))
# Test Qux /custom/ gives back 200/ correct value
request = self.factory.get('/test_api/qux/custom/')
request.user = user
t._endpoint_model = Qux
t._url_validator = APIUrl(request)
response = t._get_instance(request)
self.assertEqual(response.status_code, StatusCode.OK)
self.assertEqual(json.loads(response.content), Qux.api_custom_request(request))
def test_post_handler(self):
t = TestAPIView()
# Test non-authenticated user and private endpoint gives back 404
request = self.factory.post('/test_api/qux/')
request.user = AnonymousUser()
public_endpoints = (Foo, )
t._endpoint_model = Qux
response = t._post_handler(request, public_endpoints)
self.assertEqual(response.status_code, StatusCode.NOT_FOUND)
# Test create:
f2_val = "hello"
user = User.objects.get(id=1)
request = self.factory.post('/test_api/foo/', data={"f2": f2_val})
request.user = user
public_endpoints = (Qux, )
t._endpoint_model = Foo
response = t._post_handler(request, public_endpoints)
foo_dict = json.loads(response.content)
self.assertEqual(response.status_code, StatusCode.OK)
self.assertEqual(foo_dict['f2'], f2_val)
self.assertEqual(foo_dict, Foo.objects.get(id=foo_dict['id']).dictify_with_auth(user, short_dict=False))
# Test create Foo with bad/missing fields returns 404
f1_val = "hello"
request = self.factory.post('/test_api/foo/', data={"f1": f1_val})
request.user = user
response = t._post_handler(request, public_endpoints)
self.assertEqual(response.status_code, StatusCode.NOT_FOUND)
# Test update with owner returns 200 + updated foo object
foo = Foo.objects.get(id=1)
f1_before = foo.f1
foo1_url = '/test_api/foo/{}/'.format(foo.id)
request = self.factory.post(foo1_url, data={"f1": True})
request.user = user
t._url_validator = APIUrl(request)
response = t._post_handler(request, public_endpoints, create=False)
self.assertEqual(response.status_code, StatusCode.OK)
response_content = json.loads(response.content)
self.assertEqual(response_content['f1'], f1_before + 1)
new_foo = Foo.objects.get(id=1)
self.assertDictEqual(new_foo.dictify_with_auth(user, False), response_content)
# Test update with non owner returns 404
request = self.factory.post(foo1_url, data={"f1": True})
request.user = AnonymousUser()
response = t._post_handler(request, public_endpoints, create=False)
self.assertEqual(response.status_code, StatusCode.NOT_FOUND)
# Test deactivate gives back 404 + Test that the deactivate date is set
request = self.factory.post(foo1_url, data={"deactivate": True})
request.user = user
response = t._post_handler(request, public_endpoints, create=False)
self.assertEqual(response.status_code, StatusCode.NOT_FOUND)
def test_get_json_response_for_instance(self):
foo = Foo.objects.get(id=1)
t = TestAPIView()
# Test Anonymous user gives back public fields
user = AnonymousUser()
response_content = t.get_json_response_for_instance(foo, user).content
self.assertDictKeysEqual(json.loads(response_content), Foo.public_fields)
# Test registered user gives back all fields
user = User.objects.get(id=2)
response_content = t.get_json_response_for_instance(foo, user).content
self.assertDictKeysEqual(json.loads(response_content), list(Foo.public_fields + Foo.registered_user_fields))
# Test owner gives back all fields
user = User.objects.get(id=1)
response_content = t.get_json_response_for_instance(foo, user).content
self.assertDictKeysEqual(json.loads(response_content), list(Foo.public_fields + Foo.registered_user_fields + Foo.owner_only_fields))
def test_validate_request(self):
t = TestAPIView()
# Test invalid request returns False
request = self.factory.get('/test_api/fob/')
self.assertFalse(t._validate_request(request))
request = self.factory.get('/test_api/123/123/123/')
self.assertFalse(t._validate_request(request))
# Test valid request returns True
request = self.factory.get('/test_api/foo/')
self.assertTrue(t._validate_request(request))
# Test reserved URL returns True
request = self.factory.get('/test_api/{}/'.format(ReservedURL.LOGIN))
self.assertTrue(t._validate_request(request))
def test_handle_login_logout_request(self):
# We need to use Django's Client to test the login
# as RequestFactory doesn't offer any middleware by default
c = Client()
login_url = "/test_api/{}/".format(ReservedURL.LOGIN)
# Test valid user login returns the user's profile + sets cookies
valid_user = User.objects.get(id=1)
new_password = "newpassword1"
valid_user.set_password(new_password)
valid_user.save()
response = c.post(login_url, data={"username": valid_user.username, "password": new_password})
self.assertEqual(response.status_code, StatusCode.OK)
self.assertDictEqual(json.loads(response.content), valid_user.test_profile.dictify_with_auth(valid_user, short_dict=False))
# Test that logout deletes the authenticated session
session_val_before = response.cookies['sessionid'].value
response = c.post("/test_api/{}/".format(ReservedURL.LOGOUT))
session_val_after = response.cookies['sessionid'].value
self.assertNotEqual(session_val_before, session_val_after)
# Test an invalid login returns 404
response = c.post(login_url, data={"username": valid_user.username, "password": "badpassword"})
self.assertEqual(response.status_code, StatusCode.NOT_FOUND)
# Test inactive user login returns 404
valid_user.is_active = False
valid_user.save()
response = c.post(login_url, data={"username": valid_user.username, "password": new_password})
self.assertEqual(response.status_code, StatusCode.NOT_FOUND)
def test_handle_csrf_request(self):
# Test csrf request sets a token
c = Client()
response = c.get("/test_api/{}".format(ReservedURL.CSRFTOKEN))
self.assertIsNotNone(response.cookies['csrftoken'].value)
def test_handle_custom_request(self):
t = TestAPIView()
# Test model which handles custom request returns 200
request = self.factory.get('/test_api/qux/custom/')
t._endpoint_model = Qux
response = t.handle_custom_request(request)
self.assertEqual(response.status_code, StatusCode.OK)
# Test model which doesn't handle custom request returns 404
request = self.factory.get('/test_api/foo/custom/')
t._endpoint_model = Foo
response = t.handle_custom_request(request)
self.assertEqual(response.status_code, StatusCode.NOT_FOUND)
class APIUrlTestCase(APIToolsTestCase):
def setUp(self):
self.factory = RequestFactory()
def test_split_url_components(self):
# Test an invalid request
request = self.factory.get("/api/")
splitter = APIUrl(request)
self.assertFalse(splitter.is_valid_request())
# Test a model request
MODEL_NAME = "foo"
request = self.factory.get("/api/{}/".format(MODEL_NAME))
splitter = APIUrl(request)
self.assertTrue(splitter.is_valid_request())
self.assertTrue(splitter.is_model_request())
self.assertEqual(MODEL_NAME, splitter.REQUESTED_MODEL)
# Test a model instance request
MODEL_INSTANCE = "1"
request = self.factory.get("/api/{}/{}/".format(MODEL_NAME, MODEL_INSTANCE))
splitter = APIUrl(request)
self.assertTrue(splitter.is_valid_request())
self.assertTrue(splitter.is_model_instance_request())
self.assertEqual(MODEL_NAME, splitter.REQUESTED_MODEL)
self.assertEqual(MODEL_INSTANCE, splitter.REQUESTED_MODEL_INSTANCE)
# Test a reserved URL request
reserved_url = ReservedURL.LOGOUT
request = self.factory.get("/api/{}/".format(reserved_url))
splitter = APIUrl(request)
self.assertTrue(splitter.is_valid_request())
self.assertTrue(splitter.is_reserved_url())
self.assertEqual(reserved_url, splitter.RESERVED_URL)
# Test a custom request
reserved_url = ReservedURL.LOGOUT
request = self.factory.get("/api/{}/".format(reserved_url))
splitter = APIUrl(request)
self.assertTrue(splitter.is_valid_request())
self.assertTrue(splitter.is_reserved_url())
self.assertEqual(reserved_url, splitter.RESERVED_URL) | szpytfire/django-api-tools | django_api_tools/tests/tests.py | Python | mit | 24,690 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.db import migrations
from django.db.models import Q
def reconvert_social_work(apps, schema_editor):
pass
def convert_social_work(apps, schema_editor):
"""
Migrations 0014 - 0017 take care of a major change in services categorization.
IndividualCounseling is converted to non-proxy IndividualCounselling, with default 'other' sub-service.
Moreover SocialWork sub-services are converted and removed:
- SocialWork.counselling to IndividualCounselling.pre_treatment
- SocialWork.work_with_family to WorkWithFamily
1 Create non-proxy IndividualCounselling
2 Convert old SocialWork and IndividualCounseling sub-services
3 Add new/remove old SocialWork fields
4 Remove old IndividualCounseling
"""
# convert IndividialCounseling to IndividualCounselling.general
# convert old SocialWork.counselling to IndividualCounselling.pre_treatment
# convert old SocialWork.work_with_family to WorkWithFamily
SocialWork = apps.get_model('services', 'SocialWork')
WorkWithFamily = apps.get_model('services', 'WorkWithFamily')
IndividualCounseling = apps.get_model('services', 'IndividualCounseling')
IndividualCounselling = apps.get_model('services', 'IndividualCounselling')
try:
ct = ContentType.objects.get_by_natural_key('services', 'individualcounseling')
for ic in IndividualCounseling.objects.filter(content_type_id=ct.id):
_convert(IndividualCounselling, ic, {'general': True})
ic.delete()
except ContentType.DoesNotExist:
pass # new installations don't have the ct
try:
ct = ContentType.objects.get_by_natural_key('services', 'socialwork')
for service in SocialWork.objects.filter(Q(counselling=True) | Q(work_with_family=True), content_type_id=ct.id):
if service.counselling:
_convert(IndividualCounselling, service, {'pre_treatment': True})
print 'Converted counselling to IC.pre_treatment %s' % service.encounter
service.counselling = False
if service.work_with_family:
_convert(WorkWithFamily, service)
print 'Converted wwf to WorkWithFamily %s' % service.encounter
service.work_with_family = False
service.save()
if not any([getattr(service, attr.attname) for attr in SocialWork._meta.fields
if attr.attname not in ('encounter_id', 'id', 'service_ptr_id', 'content_type_id', 'title', 'created', 'modified')]):
print 'Deleting empty SocialWork %s' % service.encounter
service.delete()
except ContentType.DoesNotExist:
pass # new installations don't have the ct
def _convert(clazz, s, values=None):
if values is None:
values = {}
new = clazz(encounter=s.encounter, title=clazz._meta.verbose_name)
new.created = s.created
new.modified = s.modified
for k, v in values.iteritems():
setattr(new, k, v)
ct = ContentType.objects.get_for_model(new)
new.content_type_id = ct.id
new.save()
class Migration(migrations.Migration):
dependencies = [
('services', '0014_individualcounselling'),
]
operations = [
migrations.RunPython(convert_social_work, reverse_code=reconvert_social_work)
]
| fragaria/BorIS | boris/services/migrations/0015_socialwork.py | Python | mit | 3,466 |
"""
Cobbler settings - ``/etc/cobbler/settings`` file
=================================================
The Cobbler settings file is a **YAML** file and the standard Python ``yaml``
library is used to parse it.
Sample input::
kernel_options:
ksdevice: bootif
lang: ' '
text: ~
Examples:
>>> cobbler = shared[CobblerSettings]
>>> 'kernel_options' in cobbler.data
True
>>> cobbler.data['kernel_options']['ksdevice']
'bootif'
"""
from .. import YAMLParser, parser
from insights.specs import cobbler_settings
@parser(cobbler_settings)
class CobblerSettings(YAMLParser):
"""
Read the ``/etc/cobbler/settings`` YAML file.
"""
pass
| wcmitchell/insights-core | insights/parsers/cobbler_settings.py | Python | apache-2.0 | 698 |
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Progress/spinner.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify output when a Progress() call is initialized with the list
that represents a canonical "spinner" on the output.
"""
import os
import TestSCons
test = TestSCons.TestSCons(universal_newlines=None)
test.write('SConstruct', r"""
env = Environment()
env['BUILDERS']['C'] = Builder(action=Copy('$TARGET', '$SOURCE'))
Progress(['-\r', '\\\r', '|\r', '/\r'])
env.C('S1.out', 'S1.in')
env.C('S2.out', 'S2.in')
env.C('S3.out', 'S3.in')
env.C('S4.out', 'S4.in')
""")
test.write('S1.in', "S1.in\n")
test.write('S2.in', "S2.in\n")
test.write('S3.in', "S3.in\n")
test.write('S4.in', "S4.in\n")
expect = """\
\\\r|\rCopy("S1.out", "S1.in")
/\r-\rCopy("S2.out", "S2.in")
\\\r|\rCopy("S3.out", "S3.in")
/\r-\rCopy("S4.out", "S4.in")
\\\r|\r"""
if os.linesep != '\n':
expect = expect.replace('\n', os.linesep)
test.run(arguments = '-Q .', stdout=expect)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| EmanueleCannizzaro/scons | test/Progress/spinner.py | Python | mit | 2,221 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Common types, and routines for manually loading types from file
via GCC.
"""
import glob
import os
import subprocess
import sys
import tempfile
import gdb
import pwndbg.events
import pwndbg.gcc
import pwndbg.memoize
module = sys.modules[__name__]
def is_pointer(value):
type = value
if isinstance(value, gdb.Value):
type = value.type
type = type.strip_typedefs()
return type.code == gdb.TYPE_CODE_PTR
def lookup_types(*types):
for type_str in types:
try:
return gdb.lookup_type(type_str)
except Exception as e:
exc = e
raise exc
@pwndbg.events.new_objfile
@pwndbg.events.start
@pwndbg.events.stop
def update():
module.char = gdb.lookup_type('char')
module.ulong = lookup_types('unsigned long', 'uint', 'u32', 'uint32')
module.long = lookup_types('long', 'int', 'i32', 'int32')
module.uchar = lookup_types('unsigned char', 'ubyte', 'u8', 'uint8')
module.ushort = lookup_types('unsigned short', 'ushort', 'u16', 'uint16')
module.uint = lookup_types('unsigned int', 'uint', 'u32', 'uint32')
module.void = lookup_types('void', '()')
module.uint8 = module.uchar
module.uint16 = module.ushort
module.uint32 = module.uint
module.uint64 = lookup_types('unsigned long long', 'ulong', 'u64', 'uint64')
module.unsigned = {
1: module.uint8,
2: module.uint16,
4: module.uint32,
8: module.uint64
}
module.int8 = lookup_types('char', 'i8', 'int8')
module.int16 = lookup_types('short', 'i16', 'int16')
module.int32 = lookup_types('int', 'i32', 'int32')
module.int64 = lookup_types('long long', 'long', 'i64', 'int64')
module.signed = {
1: module.int8,
2: module.int16,
4: module.int32,
8: module.int64
}
module.pvoid = void.pointer()
module.ppvoid = pvoid.pointer()
module.pchar = char.pointer()
module.ptrsize = pvoid.sizeof
if pvoid.sizeof == 4:
module.ptrdiff = module.uint32
module.size_t = module.uint32
module.ssize_t = module.int32
elif pvoid.sizeof == 8:
module.ptrdiff = module.uint64
module.size_t = module.uint64
module.ssize_t = module.int64
else:
raise Exception('Pointer size not supported')
module.null = gdb.Value(0).cast(void)
# Call it once so we load all of the types
update()
tempdir = tempfile.gettempdir() + '/pwndbg'
if not os.path.exists(tempdir):
os.mkdir(tempdir)
# Trial and error until things work
blacklist = ['regexp.h', 'xf86drm.h', 'libxl_json.h', 'xf86drmMode.h',
'caca0.h', 'xenguest.h', '_libxl_types_json.h', 'term_entry.h', 'slcurses.h',
'pcreposix.h', 'sudo_plugin.h', 'tic.h', 'sys/elf.h', 'sys/vm86.h',
'xenctrlosdep.h', 'xenctrl.h', 'cursesf.h', 'cursesm.h', 'gdbm.h', 'dbm.h',
'gcrypt-module.h', 'term.h', 'gmpxx.h', 'pcap/namedb.h', 'pcap-namedb.h',
'evr.h', 'mpc.h', 'fdt.h', 'mpfr.h', 'evrpc.h', 'png.h', 'zlib.h', 'pngconf.h',
'libelfsh.h', 'libmjollnir.h', 'hwloc.h', 'ares.h', 'revm.h', 'ares_rules.h',
'libunwind-ptrace.h', 'libui.h', 'librevm-color.h', 'libedfmt.h','revm-objects.h',
'libetrace.h', 'revm-io.h','libasm-mips.h','libstderesi.h','libasm.h','libaspect.h',
'libunwind.h','libmjollnir-objects.h','libunwind-coredump.h','libunwind-dynamic.h']
def load(name):
"""Load symbol by name from headers in standard system include directory"""
try:
return gdb.lookup_type(name)
except gdb.error:
pass
# s, _ = gdb.lookup_symbol(name)
# Try to find an architecture-specific include path
arch = pwndbg.arch.current.split(':')[0]
include_dir = glob.glob('/usr/%s*/include' % arch)
if include_dir:
include_dir = include_dir[0]
else:
include_dir = '/usr/include'
source = '#include <fstream>\n'
for subdir in ['', 'sys', 'netinet']:
dirname = os.path.join(include_dir, subdir)
for path in glob.glob(os.path.join(dirname, '*.h')):
if any(b in path for b in blacklist):
continue
print(path)
source += '#include "%s"\n' % path
source += '''
{name} foo;
'''.format(**locals())
filename = '%s/%s_%s.cc' % (tempdir, arch, '-'.join(name.split()))
with open(filename, 'w+') as f:
f.write(source)
f.flush()
os.fsync(f.fileno())
compile(filename)
return gdb.lookup_type(name)
def compile(filename=None, address=0):
"""Compile and extract symbols from specified file"""
if filename is None:
print("Specify a filename to compile.")
return
objectname = os.path.splitext(filename)[0] + ".o"
if not os.path.exists(objectname):
gcc = pwndbg.gcc.which()
gcc += ['-w', '-c', '-g', filename, '-o', objectname]
try:
subprocess.check_output(gcc)
except subprocess.CalledProcessError as e:
return
add_symbol_file(objectname, address)
def add_symbol_file(filename=None, address=0):
"""Read additional symbol table information from the object file filename"""
if filename is None:
print("Specify a symbol file to add.")
return
with pwndbg.events.Pause():
gdb.execute('add-symbol-file %s %s' % (filename, address), from_tty=False, to_string=True)
def read_gdbvalue(type_name, addr):
""" Read the memory contents at addr and interpret them as a GDB value with the given type """
gdb_type = pwndbg.typeinfo.load(type_name)
return gdb.Value(addr).cast(gdb_type.pointer()).dereference()
| anthraxx/pwndbg | pwndbg/typeinfo.py | Python | mit | 5,640 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import platform
class PyNumpy(PythonPackage):
"""NumPy is the fundamental package for scientific computing with Python.
It contains among other things: a powerful N-dimensional array object,
sophisticated (broadcasting) functions, tools for integrating C/C++ and
Fortran code, and useful linear algebra, Fourier transform, and random
number capabilities"""
homepage = "http://www.numpy.org/"
url = "https://pypi.io/packages/source/n/numpy/numpy-1.13.1.zip"
install_time_test_callbacks = ['install_test', 'import_module_test']
import_modules = [
'numpy', 'numpy.compat', 'numpy.core', 'numpy.distutils', 'numpy.doc',
'numpy.f2py', 'numpy.fft', 'numpy.lib', 'numpy.linalg', 'numpy.ma',
'numpy.matrixlib', 'numpy.polynomial', 'numpy.random', 'numpy.testing',
'numpy.distutils.command', 'numpy.distutils.fcompiler'
]
# FIXME: numpy._build_utils and numpy.core.code_generators failed to import
# FIXME: Is this expected?
version('1.13.1', '2c3c0f4edf720c3a7b525dacc825b9ae')
version('1.13.0', 'fd044f0b8079abeaf5e6d2e93b2c1d03')
version('1.12.1', 'c75b072a984028ac746a6a332c209a91')
version('1.12.0', '33e5a84579f31829bbbba084fe0a4300')
version('1.11.2', '8308cc97be154d2f64a2387ea863c2ac')
version('1.11.1', '5caa3428b24aaa07e72c79d115140e46')
version('1.11.0', '19ce5c4eb16d663a0713daf0018a3021')
version('1.10.4', '510ffc322c635511e7be95d225b6bcbb')
version('1.9.2', 'e80c19d2fb25af576460bb7dac31c59a')
version('1.9.1', '223532d8e1bdaff5d30936439701d6e1')
variant('blas', default=True, description='Build with BLAS support')
variant('lapack', default=True, description='Build with LAPACK support')
depends_on('[email protected]:2.8,3.4:')
depends_on('py-setuptools', type='build')
depends_on('blas', when='+blas')
depends_on('lapack', when='+lapack')
# Tests require:
# TODO: Add a 'test' deptype
# depends_on('[email protected]:', type='test')
def setup_dependent_package(self, module, dependent_spec):
python_version = self.spec['python'].version.up_to(2)
arch = '{0}-{1}'.format(platform.system().lower(), platform.machine())
self.spec.include = join_path(
self.prefix.lib,
'python{0}'.format(python_version),
'site-packages',
'numpy-{0}-py{1}-{2}.egg'.format(
self.spec.version, python_version, arch),
'numpy/core/include')
def patch(self):
spec = self.spec
# for build notes see http://www.scipy.org/scipylib/building/linux.html
lapackblas = LibraryList('')
if '+lapack' in spec:
lapackblas += spec['lapack'].libs
if '+blas' in spec:
lapackblas += spec['blas'].libs
if '+blas' in spec or '+lapack' in spec:
# note that one should not use [blas_opt] and [lapack_opt], see
# https://github.com/numpy/numpy/commit/ffd4332262ee0295cb942c94ed124f043d801eb6
with open('site.cfg', 'w') as f:
# Unfortunately, numpy prefers to provide each BLAS/LAPACK
# differently.
names = ','.join(lapackblas.names)
dirs = ':'.join(lapackblas.directories)
# Special treatment for some (!) BLAS/LAPACK. Note that
# in this case library_dirs can not be specified within [ALL].
if '^openblas' in spec:
f.write('[openblas]\n')
f.write('libraries=%s\n' % names)
elif '^mkl' in spec:
# numpy does not expect system libraries needed for MKL
# here.
# names = [x for x in names if x.startswith('mkl')]
# FIXME: as of @1.11.2, numpy does not work with separately
# specified threading and interface layers. A workaround is
# a terribly bad idea to use mkl_rt. In this case Spack
# will no longer be able to guarantee that one and the
# same variant of Blas/Lapack (32/64bit, threaded/serial)
# is used within the DAG. This may lead to a lot of
# hard-to-debug segmentation faults on user's side. Users
# may also break working installation by (unconsciously)
# setting environment variable to switch between different
# interface and threading layers dynamically. From this
# perspective it is no different from throwing away RPATH's
# and using LD_LIBRARY_PATH throughout Spack.
f.write('[mkl]\n')
f.write('mkl_libs=%s\n' % 'mkl_rt')
elif '^atlas' in spec:
f.write('[atlas]\n')
f.write('atlas_libs=%s\n' % names)
else:
# The section title for the defaults changed in @1.10, see
# https://github.com/numpy/numpy/blob/master/site.cfg.example
if spec.satisfies('@:1.9.2'):
f.write('[DEFAULT]\n')
else:
f.write('[ALL]\n')
f.write('libraries=%s\n' % names)
f.write('library_dirs=%s\n' % dirs)
if not ((platform.system() == "Darwin") and
(platform.mac_ver()[0] == '10.12')):
f.write('rpath=%s\n' % dirs)
def build_args(self, spec, prefix):
args = []
# From NumPy 1.10.0 on it's possible to do a parallel build
if self.version >= Version('1.10.0'):
args = ['-j', str(make_jobs)]
return args
def test(self):
# `setup.py test` is not supported. Use one of the following
# instead:
#
# - `python runtests.py` (to build and test)
# - `python runtests.py --no-build` (to test installed numpy)
# - `>>> numpy.test()` (run tests for installed numpy
# from within an interpreter)
pass
def install_test(self):
# Change directories due to the following error:
#
# ImportError: Error importing numpy: you should not try to import
# numpy from its source directory; please exit the numpy
# source tree, and relaunch your python interpreter from there.
with working_dir('..'):
python('-c', 'import numpy; numpy.test("full", verbose=2)')
| TheTimmy/spack | var/spack/repos/builtin/packages/py-numpy/package.py | Python | lgpl-2.1 | 7,907 |
import sys
from time import time
import inspect
from importlib import import_module
from copy import deepcopy
from collections import defaultdict, OrderedDict
#import warnings
import numpy as np
import scipy as sp
import pymake.io as io
from pymake import logger
from sklearn.pipeline import make_pipeline
class ModelBase(object):
"""" Root Class for all the Models.
* Suited for unserpervised model
* Virtual methods for the desired propertie of models
"""
__abstractmethods__ = 'model'
default_settings = {
'_write' : False,
'_measures' : [],
'_fmt' : [], # unused...
'snapshot_freq': 50,
# @deprecated => use model skl to wrap all model !
'iterations' : 3,
'burnin' : 5, # (inverse burnin, last sample to keep
'thinning' : 1,
}
log = logger
def __init__(self, expe=None, frontend=None):
""" Model Initialization strategy:
1. self lookup from child initalization
2. kwargs lookup
3. default value
"""
self.expe = expe
self.frontend = frontend
self._name = self.__class__.__name__.lower()
# change to semantic -> update value (t+1)
self.samples = [] # actual sample
self._samples = [] # slice to save to avoid writing disk a each iteratoin. (ref format.write_current_state.)
for k, v in self.default_settings.items():
self._set_default_settings(k, expe, v)
#self._typo_kws = self._extract_typo_kws() # <as used for precomputation, may be reused ...
self._meas_kws = self._extract_meas_kws()
self.measures = {}
self._measure_cpt = 0
# @debug Frontend integratoin !
# dev a Frontend.get_properties
if hasattr(self.frontend, 'is_symmetric'):
self._is_symmetric = self.frontend.is_symmetric()
if hasattr(self.frontend, 'data_ma'):
self.mask = self.frontend.data_ma.mask
#np.fill_diagonal(self.frontend.data_ma, np.ma.masked)
self._purge_objects = ['frontend', 'data_A', 'data_B']
if hasattr(self, '_purge'):
self._purge_objects.extend(self._purge)
def _set_default_settings(self, key, expe, default):
if key in expe:
value = expe[key]
elif hasattr(self, key):
value = getattr(self, key)
else:
value = default
return setattr(self, key, value)
def _init(self, *args, **kwargs):
''' Init for fit method.
Should initialize parmas that depend on the frontend/data.
'''
if hasattr(self, '_check_measures'):
self._check_measures()
if hasattr(self, '_init_params'):
self._init_params(*args, **kwargs)
self.begin_it = time()
# @Dense mmsb
def data_iter(self, data=None, randomize=False):
''' Iterate over various type of data:
* ma.array (ignore masked
* ndarray (todo ?)
* What format for temporal/chunk/big data... ?
'''
return self._data_iter_ma(data, randomize)
# @Dense mmsb
def _data_iter_ma(self, data, randomize):
if data is None:
data_ma = self.frontend.data_ma
else:
data_ma = data
order = np.arange(data_ma.size).reshape(data_ma.shape)
masked = order[data_ma.mask]
if self._is_symmetric:
tril = np.tril_indices_from(data_ma, -1)
tril = order[tril]
masked = np.append(masked, tril)
# Remove masked value to the iteration list
order = np.delete(order, masked)
# Get the indexes of nodes (i,j) for each observed interactions
order = list(zip(*np.unravel_index(order, data_ma.shape)))
if randomize is True:
np.random.shuffle(order)
return order
def getK(self):
theta, _ = self.get_params()
return theta.shape[1]
def getN(self):
theta, _ = self.get_params()
return theta.shape[0]
def _init_params(self, *args, **kwargs):
pass
def _check_measures(self):
if self.expe.get('deactivate_measures'):
for m in self.expe.get('_measures', []):
if not hasattr(self, m):
setattr(self, m, None)
def _extract_typo_kws(self):
if self.expe.get('_measures'):
measures = self.expe._measures
else:
return {}
kws = defaultdict(list)
for param in measures:
_param = ''.join(param.split('@')[1:])
if not _param: continue
for item in _param.split('&'):
k, v = item.split('=')
kws[k].append(v)
return kws
def _extract_meas_kws(self):
meas_kws = OrderedDict()
for _meas in self.expe._measures:
kws = {}
if '@' in _meas:
meas, params = _meas.split('@')
for param in params.split('&'):
k, v = param.split('=')
try:
kws[k] = int(v)
except ValueError as e:
kws[k] = v
else:
meas = _meas
meas_kws[meas] = kws
return meas_kws
def compute_measures(self):
''' Compute measure as model attributes.
begin_it: is the time of the begining of the iteration.
'''
if self.expe.get('deactivate_measures'):
return
if hasattr(self, 'begin_it'):
self.time_it = time() - self.begin_it
params = self._reduce_latent()
for meas, kws in self._meas_kws.items():
if 'measure_freq' in kws:
if self._measure_cpt % kws['measure_freq'] != 0:
continue
# lstrip('_') is a usefull hack for identical measure with different parameters...
if hasattr(self, 'compute_'+meas.lstrip('_')):
_meas = getattr(self, 'compute_'+meas.lstrip('_'))(*params, **kws)
##with np.errstate(all='raise'):
#with warnings.catch_warnings():
# warnings.filterwarnings('error')
# try:
# _meas = getattr(self, 'compute_'+meas.lstrip('_'))(*params, **kws)
# except (Warning, ValueError) as e:
# self.log.warning(e)
# _meas = np.nan
else:
# Assume already computed
_meas = getattr(self, meas) # raise exception if not here.
# set value and last diff
self.measures[meas] = (_meas, _meas-self.measures.get(meas,[0])[0])
self._measure_cpt += 1
return self._measure_cpt
#@mmm #frontend
def likelihood(self, theta=None, phi=None):
if theta is None:
theta = self._theta
if phi is None:
phi = self._phi
likelihood = theta.dot(phi).dot(theta.T)
return likelihood
#@mmm
def similarity_matrix(self, theta=None, phi=None, sim='cos'):
if theta is None:
theta = self._theta
if phi is None:
phi = self._phi
features = theta
if sim in ('dot', 'latent'):
sim = np.dot(features, features.T)
elif sim == 'cos':
norm = np.linalg.norm(features, axis=1)
sim = np.dot(features, features.T)/norm/norm.T
elif sim in ('model', 'natural'):
sim = features.dot(phi).dot(features.T)
else:
self.log.error('Similaririty metric unknown: %s' % sim)
sim = None
if hasattr(self, 'normalization_fun'):
sim = self.normalization_fun(sim)
return sim
def get_params(self):
if hasattr(self, '_theta') and hasattr(self, '_phi'):
return np.asarray(self._theta), np.asarray(self._phi)
else:
return self._reduce_latent()
def _reduce_latent(self):
''' Estimate global parameters of a model '''
raise NotImplementedError
def update_hyper(self):
self.log.warning('No method to update hyperparams..')
return
def get_hyper(self):
self.log.error('no method to get hyperparams')
return
def save(self, silent=False):
to_remove = []
for k, v in self.__dict__.items():
if hasattr(v, 'func_name') and v.func_name == '<lambda>':
to_remove.append(k)
if str(v).find('<lambda>') >= 0:
# python3 hook, nothing better ?
to_remove.append(k)
#elif type(k) is defaultdict:
# setattr(self.model, k, dict(v))
if to_remove or self._has_purge():
model = deepcopy(self)
model.purge()
for k in to_remove:
try:
delattr(model, k)
except Exception as e:
self.log.debug('Cant delete object during model purging: %s' % e)
else:
model = self
if hasattr(self, 'write_current_state'):
delattr(self, 'write_current_state')
fn = self.expe['_output_path']
if not silent:
self.log.info('Snapshotting Model: %s' % fn)
else:
print('+', end='')
sys.stdout.flush()
io.save(fn, model, silent=True)
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
try:
setattr(result, k, deepcopy(v, memo))
except Exception as e:
self.log.debug('can\'t copy %s: %s. Passing on: %s' % (k, v, e))
continue
return result
#@dense mmsb
def get_mask(self):
return self.mask
#@dense mmsb
def mask_probas(self, data):
mask = self.get_mask()
y_test = data[mask]
p_ji = self.likelihood(*self._reduce_latent())
probas = p_ji[mask]
return y_test, probas
#@dense mmsb
def compute_roc(self, *args, **kwargs):
from sklearn.metrics import roc_curve, auc, precision_recall_curve
data = self.frontend.data
y_true, probas = self.mask_probas(data)
fpr, tpr, thresholds = roc_curve(y_true, probas)
roc = auc(fpr, tpr)
return roc
#@dense mmsb
#@mmm
def predictMask(self, data, mask=True):
self.log.info('Reducing latent variables...')
if mask is True:
masked = self.get_mask()
else:
masked = mask
### @Debug Ignore the Diagonnal when predicting.
np.fill_diagonal(masked, False)
ground_truth = data[masked]
p_ji = self.likelihood(*self.get_params())
prediction = p_ji[masked]
prediction = sp.stats.bernoulli.rvs( prediction )
#prediction[prediction >= 0.5 ] = 1
#prediction[prediction < 0.5 ] = 0
### Computing Precision
test_size = float(ground_truth.size)
good_1 = ((prediction + ground_truth) == 2).sum()
precision = good_1 / float(prediction.sum())
rappel = good_1 / float(ground_truth.sum())
g_precision = (prediction == ground_truth).sum() / test_size
mask_density = ground_truth.sum() / test_size
### Finding Communities
if hasattr(self, 'communities_analysis'):
self.log.info('Finding Communities...')
communities = self.communities_analysis(data)
K = self.K
else:
communities = None
K = self.expe.get('K')
res = {'Precision': precision,
'Recall': rappel,
'g_precision': g_precision,
'mask_density': mask_density,
'clustering':communities,
'K': K
}
return res
def fit(self, *args, **kwargs):
''' A core method.
Template
--------
# cant take argument, they will be passed to
# _init_params that you can safely overwrite.
#
self._init()
for _it in range(self.expe.iterations):
# core process
if self.expe.get('_write'):
self.write_current_state(self)
# In addition to that, model is automatically
# saved at the end of a script if the model
# is configured ie (called to load_model())
#
if _it > 0 and _it % self.snapshot_freq == 0:
self.save(silent=True)
'''
raise NotImplementedError
def transform(self, *args, **kwargs):
raise NotImplementedError
def predict(self, *args, **kwargs):
raise NotImplementedError
# Search ?
def generate(self):
raise NotImplementedError
def get_clusters(self):
raise NotImplementedError
def _has_purge(self):
return any([getattr(self, o, None) for o in self._purge_objects])
def purge(self):
for obj in self._purge_objects:
if hasattr(self, obj):
delattr(self, obj)
class ModelSkl(ModelBase):
''' Wrapper around scikit-learn models.
Notes
-----
Model class need to be serialisable. Module object are not serialisable.
Avoid keeping it in the self object.
'''
def __init__(self, expe, frontend=None):
super(ModelSkl, self).__init__(expe, frontend)
# Load Sklearn Model
if not hasattr(self, 'module'):
self.log.error('ModelSkl base class need a {module} name attribute. Exiting.')
exit(42)
sk_modules = []
if isinstance(self.module, str):
sk_modules = [self.module]
elif isinstance(self.module, list):
sk_modules = self.module
else:
raise ValueError('Slearn model type unknown: %s' % (type(self.module), self.module))
self._specs = []
self._models = []
model_names = self._name.split('-')
assert(len(model_names) == len(sk_modules))
for model_name, module in zip(model_names, sk_modules):
_module, _model = self._mm_from_str(module)
spec = self._spec_from_expe(_model, model_name)
model = _model(**spec)
self._specs.append(spec)
self._models.append(model)
# Init Sklearn model
self.model = make_pipeline(*self._models)
@staticmethod
def _mm_from_str(module):
_module = module.split('.')
_module, model_name = '.'.join(_module[:-1]), _module[-1]
module = import_module(_module)
_model = getattr(module, model_name)
return module, _model
def _spec_from_expe(self, _model, model_name=None):
''' Set Sklearn parameters. '''
if model_name is None:
model_name = _model.__name__.split('.')[-1].lower()
else:
model_name = model_name.lower()
# @debug model resolve name !
model_name = model_name.split('.')[-1]
model_params = list(inspect.signature(_model).parameters)
spec = dict()
spec_map = getattr(self, 'spec_map', {})
default_spec = getattr(self, '_default_spec', {})
model_spec = {}
for k, v in self.expe.items():
if k.find('__') >= 0:
model, param = k.split('__')
if model.lower() == model_name:
model_spec[param] = v
for k in model_params:
if k in list(model_spec)+list(spec_map):
_k = spec_map.get(k, k)
if _k in model_spec:
spec[k] = model_spec[_k]
elif k in default_spec:
spec[k] = default_spec[k]
return spec
def __getattr__(self, attr):
''' Propagate sklearn attribute.
Notes
-----
__getatrr__ is call only if the attr doesn't exist...
'''
if not 'model' in self.__dict__:
raise AttributeError
# or should it be hook_me ;)
attr = attr.partition('__hack_me_')[-1]
return getattr(self.model, attr)
def fit(self, *args, **kwargs):
fun = self.__hack_me_fit
self.log.info("Fitting `%s' model with spec: %s" % (type(self), str(self._specs)))
return fun(*args, **kwargs)
def transform(self, *args, **kwargs):
fun = self.__hack_me_transform
data = fun(*args, **kwargs)
if hasattr(self, 'post_transform'):
for module in self.post_transform:
_m, _model = self._mm_from_str(module)
spec = self._spec_from_expe(_model)
model = _model(**spec)
data = model.fit_transform(data)
return data
def fit_transform(self, *args, **kwargs):
fun = self.__hack_me_fit_transform
return fun(*args, **kwargs)
# @Obsolete ?
def predict(self, *args, **kwargs):
fun = self.__hack_me_predict
return fun(*args, **kwargs)
# get_params()
# set_params()
| dtrckd/pymake | pymake/model.py | Python | gpl-3.0 | 17,406 |
""" Python Character Mapping Codec
For the Jape Konstanz encoding
Bernard Sufrin
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
#
# led/ted perp has to be different -- tcl/tk assumes
# that none of an encoding works if the first special
# character has no rendition in the given font
# (BAS: Jan 2003)
#
tkBug = 0
if tkBug:
perp = 0x2310 # laterally reversed not
prep = 0x2319 # inverted laterally reversed not
else:
perp = 0x2ADF # superscript top
prep = 0x2AE0 # superscript bottom
jenc = ([
perp, 0x00C5, 0x0393, 0x00C9, 0x00D1, 0x00D6, 0x00DC, 0x00E1, 0x00E0, 0x00E2, 0x2AE2, 0x27E8, 0x00E5, 0x00E7, 0x00E9, 0x00E8,
## perp, Aring, Gamma, Eacute, Ntilde, Odier, Udier, aacute, agrave, acircm, stile3, seqbra, aring, ccedil, eacute, egrave
0x00EA, 0x25C1, 0x00ED, 0x00EC, 0x00EE, 0x21DD, 0x27E9, 0x97, 0x98, 0x99, 0x00F6, 0x21D0, 0x00FA, 0x00F9, 0x00FB, 0x21CC,
## ecircm, ltrian, iacute, igrave, icircm, sttilde,seqket, ?oacute,?ograve,?ocircm,bararr, leftar, uacute, ugrave, ucircm, harp2
0x03a4, 0x00B0, 0x00A2, 0x00A3, 0x00A7, 0x2022, 0x2227, 0x2286, 0x00AE, 0x00A9, 0x2122, 0x00B4, 0x00A8, 0x2260, 0x00C6, 0x00D8,
## Tserif, degree, cent, pound, para, bullet, logand, subset, regist, copyri, tradma, acute, umlaut, noteq, AE, Oslash
0x221E, 0x00B1, 0x2264, 0x2265, 0x22B8, 0x00B5, 0x2202, 0x2211, 0x220F, 0x03C0, 0x222B, 0x2297, 0x2295, 0x2126, 0x00E6, 0x00F8,
## infin, plusmi, lesseq, greaeq, lolli, mu, delta, Sigma, Pi, pi, curlyS, xcircl, plusci, Omega, ae, oslash
0x00BF, 0x00A1, 0x00AC, 0x221A, 0x0192, 0x2248, 0x2206, 0x00AB, 0x00BB, 0x2026, 0x00A0, 0x00C0, 0x00C3, 0x00D5, 0x0152, 0x0153,
## seuq, alxce, not, root, curlyf, curlyeq,Delta, guibra, guiket, ..., nbspace,Agrave, Atilde, Otilde, OE, oe,
0x2013, 0x2014, 0x201C, 0x201D, 0x2018, 0x2019, 0x00F7, 0x25CA, 0x21A6, 0x22A5, 0x2208, 0x21d2, 0x2234, 0x27E6, 0x27E7, 0x2229,
## endash, emdash, quote, etouq, squote, etouqs, divide, lozeng, mapsto, bottom, member, 2arrow, ::, sembra, semket, interse,
0x214B, 0x297D, 0x25AA, 0x201E, 0x2203, 0x27DB, 0x22A2, 0x2192, 0x2200, 0x2261, 0x2194, 0x2228, 0x039B, 0x22A7, 0x22A9, 0x222A,
## srepma, fishta, blksq, lowquot,exists, stiboth,stile, arrow, forall, equiv, lrarrow,logor, Lambda, models, forces, union
0x27DA, 0x223C, 0x2135, 0x00DB, 0x00D7, 0x2292, 0x25A1, 0x225C, prep, 0x25CF, 0x2283, 0x03BB, 0x00B8, 0x02DD, 0x0328, 0x02C7
## bimodel,tildop, aleph, Ucircm, times, sqgee, whsqua, eqdef, prep, dot, hook, lambda, cedilla,2acute, ogonek, caron
])
for i in xrange(128, 256):
decoding_map[i] = jenc[i-128]
jenc = []
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
| RBornat/jape | dev/Unicode/jape_konstanz.py | Python | gpl-2.0 | 3,436 |
# usage: python setup.py pydexe
from pyd.support import setup, Extension, pydexe_sanity_check
import platform
pydexe_sanity_check()
projName = "datetime"
setup(
name=projName,
version='1.0',
ext_modules=[
Extension("datetime", ['datetime.d'],
build_deimos=True,
d_lump=True,
d_unittest=True
),
],
)
| ariovistus/pyd | tests/deimos_unittests/datetime/setup.py | Python | mit | 368 |
#!/usr/bin/python2.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
#
# $Id$
#
# Copyright (C) 1999-2006 Keith Dart <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
import sys
import os
import pycopia.inet.XHTMLcgi as CGI
def test(environ=os.environ):
"""Robust test CGI script, usable as main program.
Write minimal HTTP headers and dump all information provided to
the script in HTML form.
"""
print "Content-type: text/html"
print
print_head()
try:
print_directory()
print_arguments()
form = CGI.get_form(sys.stdin) # Replace with other classes to test those
print_form(form.get_form_values())
print_environ(environ)
print_environ_usage()
# def f():
# exec "testing print_exception() -- <I>italics?</I>"
# def g(f=f):
# f()
# print "<H3>What follows is a test, not an actual exception:</H3>"
# g()
except:
print_exception()
print_tail()
def print_head():
print """<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<HTML>
<HEAD>
<TITLE>cgi test</TITLE>
</HEAD>
<BODY>
"""
def print_tail():
print """
</BODY>
</HTML>
"""
def print_exception(type=None, value=None, tb=None, limit=None):
if type is None:
type, value, tb = sys.exc_info()
import traceback
print
print "<H3>Traceback (most recent call last):</H3>"
list = traceback.format_tb(tb, limit) + \
traceback.format_exception_only(type, value)
print "<PRE>%s<B>%s</B></PRE>" % (
escape("".join(list[:-1])),
escape(list[-1]),
)
del tb
def print_environ(environ=os.environ):
"""Dump the shell environment as HTML."""
keys = environ.keys()
keys.sort()
print
print "<H3>Shell Environment:</H3>"
print "<DL>"
for key in keys:
print "<DT>", escape(key), "</DT>"
print "<DD>", escape(environ[key]), "</DD>"
print "</DL>"
print
def print_form(form):
"""Dump the contents of a form as HTML."""
keys = form.keys()
keys.sort()
print
print "<H3>Form Contents:</H3>"
if not keys:
print "<P>No form fields."
print "<DL>"
for key in keys:
print "<DT>", escape(repr(key)), "</DT>"
value = form[key]
print "<DD>", escape(repr(value.value)), "</DD>"
print "</DL>"
print
def print_directory():
"""Dump the current directory as HTML."""
print
print "<H3>Current Working Directory:</H3>"
try:
pwd = os.getcwd()
except os.error, msg:
print "os.error:", escape(str(msg))
else:
print escape(pwd)
print
def print_arguments():
print
print "<H3>Command Line Arguments:</H3>"
print
print sys.argv
print
def print_environ_usage():
"""Dump a list of environment variables used by CGI as HTML."""
print """
<H3>These environment variables could have been set:</H3>
<UL>
<LI>AUTH_TYPE</LI>
<LI>CONTENT_LENGTH</LI>
<LI>CONTENT_TYPE</LI>
<LI>DATE_GMT</LI>
<LI>DATE_LOCAL</LI>
<LI>DOCUMENT_NAME</LI>
<LI>DOCUMENT_ROOT</LI>
<LI>DOCUMENT_URI</LI>
<LI>GATEWAY_INTERFACE</LI>
<LI>LAST_MODIFIED</LI>
<LI>PATH</LI>
<LI>PATH_INFO</LI>
<LI>PATH_TRANSLATED</LI>
<LI>QUERY_STRING</LI>
<LI>REMOTE_ADDR</LI>
<LI>REMOTE_HOST</LI>
<LI>REMOTE_IDENT</LI>
<LI>REMOTE_USER</LI>
<LI>REQUEST_METHOD</LI>
<LI>SCRIPT_NAME</LI>
<LI>SERVER_NAME</LI>
<LI>SERVER_PORT</LI>
<LI>SERVER_PROTOCOL</LI>
<LI>SERVER_ROOT</LI>
<LI>SERVER_SOFTWARE</LI>
</UL>
In addition, HTTP headers sent by the server may be passed in the
environment as well. Here are some common variable names:
<UL>
<LI>HTTP_ACCEPT</LI>
<LI>HTTP_CONNECTION</LI>
<LI>HTTP_HOST</LI>
<LI>HTTP_PRAGMA</LI>
<LI>HTTP_REFERER</LI>
<LI>HTTP_USER_AGENT</LI>
</UL>
"""
# Utilities
def escape(s, quote=None):
"""Replace special characters '&', '<' and '>' by SGML entities."""
s = s.replace("&", "&") # Must be done first!
s = s.replace("<", "<")
s = s.replace(">", ">")
if quote:
s = s.replace('"', """)
return s
if __name__ == "__main__":
test()
| xiangke/pycopia | core/pycopia/inet/cgi_test.py | Python | lgpl-2.1 | 4,619 |
from couchpotato import get_session
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import toUnicode, simplifyString, ss, sp
from couchpotato.core.helpers.variable import getExt, getImdb, tryInt, \
splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import File, Media
from enzyme.exceptions import NoParserError, ParseError
from guessit import guess_movie_info
from subliminal.videos import Video
import enzyme
import os
import re
import threading
import time
import traceback
log = CPLog(__name__)
class Scanner(Plugin):
ignored_in_path = [os.path.sep + 'extracted' + os.path.sep, 'extracting', '_unpack', '_failed_', '_unknown_', '_exists_', '_failed_remove_',
'_failed_rename_', '.appledouble', '.appledb', '.appledesktop', os.path.sep + '._', '.ds_store', 'cp.cpnfo',
'thumbs.db', 'ehthumbs.db', 'desktop.ini'] #unpacking, smb-crap, hidden files
ignore_names = ['extract', 'extracting', 'extracted', 'movie', 'movies', 'film', 'films', 'download', 'downloads', 'video_ts', 'audio_ts', 'bdmv', 'certificate']
extensions = {
'movie': ['mkv', 'wmv', 'avi', 'mpg', 'mpeg', 'mp4', 'm2ts', 'iso', 'img', 'mdf', 'ts', 'm4v'],
'movie_extra': ['mds'],
'dvd': ['vts_*', 'vob'],
'nfo': ['nfo', 'txt', 'tag'],
'subtitle': ['sub', 'srt', 'ssa', 'ass'],
'subtitle_extra': ['idx'],
'trailer': ['mov', 'mp4', 'flv']
}
file_types = {
'subtitle': ('subtitle', 'subtitle'),
'subtitle_extra': ('subtitle', 'subtitle_extra'),
'trailer': ('video', 'trailer'),
'nfo': ('nfo', 'nfo'),
'movie': ('video', 'movie'),
'movie_extra': ('movie', 'movie_extra'),
'backdrop': ('image', 'backdrop'),
'poster': ('image', 'poster'),
'thumbnail': ('image', 'thumbnail'),
'leftover': ('leftover', 'leftover'),
}
file_sizes = { # in MB
'movie': {'min': 300},
'trailer': {'min': 2, 'max': 250},
'backdrop': {'min': 0, 'max': 5},
}
codecs = {
'audio': ['dts', 'ac3', 'ac3d', 'mp3'],
'video': ['x264', 'h264', 'divx', 'xvid']
}
audio_codec_map = {
0x2000: 'ac3',
0x2001: 'dts',
0x0055: 'mp3',
0x0050: 'mp2',
0x0001: 'pcm',
0x003: 'pcm',
0x77a1: 'tta1',
0x5756: 'wav',
0x6750: 'vorbis',
0xF1AC: 'flac',
0x00ff: 'aac',
}
source_media = {
'bluray': ['bluray', 'blu-ray', 'brrip', 'br-rip'],
'hddvd': ['hddvd', 'hd-dvd'],
'dvd': ['dvd'],
'hdtv': ['hdtv']
}
clean = '[ _\,\.\(\)\[\]\-](extended.cut|directors.cut|french|swedisch|danish|dutch|swesub|spanish|german|ac3|dts|custom|dc|divx|divx5|dsr|dsrip|dutch|dvd|dvdr|dvdrip|dvdscr|dvdscreener|screener|dvdivx|cam|fragment|fs|hdtv|hdrip|hdtvrip|internal|limited|multisubs|ntsc|ogg|ogm|pal|pdtv|proper|repack|rerip|retail|r3|r5|bd5|se|svcd|swedish|german|read.nfo|nfofix|unrated|ws|telesync|ts|telecine|tc|brrip|bdrip|video_ts|audio_ts|480p|480i|576p|576i|720p|720i|1080p|1080i|hrhd|hrhdtv|hddvd|bluray|x264|h264|xvid|xvidvd|xxx|www.www|cd[1-9]|\[.*\])([ _\,\.\(\)\[\]\-]|$)'
multipart_regex = [
'[ _\.-]+cd[ _\.-]*([0-9a-d]+)', #*cd1
'[ _\.-]+dvd[ _\.-]*([0-9a-d]+)', #*dvd1
'[ _\.-]+part[ _\.-]*([0-9a-d]+)', #*part1
'[ _\.-]+dis[ck][ _\.-]*([0-9a-d]+)', #*disk1
'cd[ _\.-]*([0-9a-d]+)$', #cd1.ext
'dvd[ _\.-]*([0-9a-d]+)$', #dvd1.ext
'part[ _\.-]*([0-9a-d]+)$', #part1.mkv
'dis[ck][ _\.-]*([0-9a-d]+)$', #disk1.mkv
'()[ _\.-]+([0-9]*[abcd]+)(\.....?)$',
'([a-z])([0-9]+)(\.....?)$',
'()([ab])(\.....?)$' #*a.mkv
]
cp_imdb = '(.cp.(?P<id>tt[0-9{7}]+).)'
def __init__(self):
addEvent('scanner.create_file_identifier', self.createStringIdentifier)
addEvent('scanner.remove_cptag', self.removeCPTag)
addEvent('scanner.scan', self.scan)
addEvent('scanner.name_year', self.getReleaseNameYear)
addEvent('scanner.partnumber', self.getPartNumber)
def scan(self, folder = None, files = None, release_download = None, simple = False, newer_than = 0, return_ignored = True, on_found = None):
folder = sp(folder)
if not folder or not os.path.isdir(folder):
log.error('Folder doesn\'t exists: %s', folder)
return {}
# Get movie "master" files
movie_files = {}
leftovers = []
# Scan all files of the folder if no files are set
if not files:
check_file_date = True
try:
files = []
for root, dirs, walk_files in os.walk(folder):
files.extend([sp(os.path.join(root, filename)) for filename in walk_files])
# Break if CP wants to shut down
if self.shuttingDown():
break
except:
log.error('Failed getting files from %s: %s', (folder, traceback.format_exc()))
else:
check_file_date = False
files = [sp(x) for x in files]
for file_path in files:
if not os.path.exists(file_path):
continue
# Remove ignored files
if self.isSampleFile(file_path):
leftovers.append(file_path)
continue
elif not self.keepFile(file_path):
continue
is_dvd_file = self.isDVDFile(file_path)
if self.filesizeBetween(file_path, self.file_sizes['movie']) or is_dvd_file: # Minimal 300MB files or is DVD file
# Normal identifier
identifier = self.createStringIdentifier(file_path, folder, exclude_filename = is_dvd_file)
identifiers = [identifier]
# Identifier with quality
quality = fireEvent('quality.guess', [file_path], single = True) if not is_dvd_file else {'identifier':'dvdr'}
if quality:
identifier_with_quality = '%s %s' % (identifier, quality.get('identifier', ''))
identifiers = [identifier_with_quality, identifier]
if not movie_files.get(identifier):
movie_files[identifier] = {
'unsorted_files': [],
'identifiers': identifiers,
'is_dvd': is_dvd_file,
}
movie_files[identifier]['unsorted_files'].append(file_path)
else:
leftovers.append(file_path)
# Break if CP wants to shut down
if self.shuttingDown():
break
# Cleanup
del files
# Sort reverse, this prevents "Iron man 2" from getting grouped with "Iron man" as the "Iron Man 2"
# files will be grouped first.
leftovers = set(sorted(leftovers, reverse = True))
# Group files minus extension
ignored_identifiers = []
for identifier, group in movie_files.iteritems():
if identifier not in group['identifiers'] and len(identifier) > 0: group['identifiers'].append(identifier)
log.debug('Grouping files: %s', identifier)
has_ignored = 0
for file_path in list(group['unsorted_files']):
ext = getExt(file_path)
wo_ext = file_path[:-(len(ext) + 1)]
found_files = set([i for i in leftovers if wo_ext in i])
group['unsorted_files'].extend(found_files)
leftovers = leftovers - found_files
has_ignored += 1 if ext == 'ignore' else 0
if has_ignored == 0:
for file_path in list(group['unsorted_files']):
ext = getExt(file_path)
has_ignored += 1 if ext == 'ignore' else 0
if has_ignored > 0:
ignored_identifiers.append(identifier)
# Break if CP wants to shut down
if self.shuttingDown():
break
# Create identifiers for all leftover files
path_identifiers = {}
for file_path in leftovers:
identifier = self.createStringIdentifier(file_path, folder)
if not path_identifiers.get(identifier):
path_identifiers[identifier] = []
path_identifiers[identifier].append(file_path)
# Group the files based on the identifier
delete_identifiers = []
for identifier, found_files in path_identifiers.iteritems():
log.debug('Grouping files on identifier: %s', identifier)
group = movie_files.get(identifier)
if group:
group['unsorted_files'].extend(found_files)
delete_identifiers.append(identifier)
# Remove the found files from the leftover stack
leftovers = leftovers - set(found_files)
# Break if CP wants to shut down
if self.shuttingDown():
break
# Cleaning up used
for identifier in delete_identifiers:
if path_identifiers.get(identifier):
del path_identifiers[identifier]
del delete_identifiers
# Group based on folder
delete_identifiers = []
for identifier, found_files in path_identifiers.iteritems():
log.debug('Grouping files on foldername: %s', identifier)
for ff in found_files:
new_identifier = self.createStringIdentifier(os.path.dirname(ff), folder)
group = movie_files.get(new_identifier)
if group:
group['unsorted_files'].extend([ff])
delete_identifiers.append(identifier)
# Remove the found files from the leftover stack
leftovers = leftovers - set([ff])
# Break if CP wants to shut down
if self.shuttingDown():
break
# leftovers should be empty
if leftovers:
log.debug('Some files are still left over: %s', leftovers)
# Cleaning up used
for identifier in delete_identifiers:
if path_identifiers.get(identifier):
del path_identifiers[identifier]
del delete_identifiers
# Make sure we remove older / still extracting files
valid_files = {}
while True and not self.shuttingDown():
try:
identifier, group = movie_files.popitem()
except:
break
# Check if movie is fresh and maybe still unpacking, ignore files newer than 1 minute
file_too_new = False
for cur_file in group['unsorted_files']:
if not os.path.isfile(cur_file):
file_too_new = time.time()
break
file_time = [os.path.getmtime(cur_file), os.path.getctime(cur_file)]
for t in file_time:
if t > time.time() - 60:
file_too_new = tryInt(time.time() - t)
break
if file_too_new:
break
if check_file_date and file_too_new:
try:
time_string = time.ctime(file_time[0])
except:
try:
time_string = time.ctime(file_time[1])
except:
time_string = 'unknown'
log.info('Files seem to be still unpacking or just unpacked (created on %s), ignoring for now: %s', (time_string, identifier))
# Delete the unsorted list
del group['unsorted_files']
continue
# Only process movies newer than x
if newer_than and newer_than > 0:
has_new_files = False
for cur_file in group['unsorted_files']:
file_time = [os.path.getmtime(cur_file), os.path.getctime(cur_file)]
if file_time[0] > newer_than or file_time[1] > newer_than:
has_new_files = True
break
if not has_new_files:
log.debug('None of the files have changed since %s for %s, skipping.', (time.ctime(newer_than), identifier))
# Delete the unsorted list
del group['unsorted_files']
continue
valid_files[identifier] = group
del movie_files
total_found = len(valid_files)
# Make sure only one movie was found if a download ID is provided
if release_download and total_found == 0:
log.info('Download ID provided (%s), but no groups found! Make sure the download contains valid media files (fully extracted).', release_download.get('imdb_id'))
elif release_download and total_found > 1:
log.info('Download ID provided (%s), but more than one group found (%s). Ignoring Download ID...', (release_download.get('imdb_id'), len(valid_files)))
release_download = None
# Determine file types
db = get_session()
processed_movies = {}
while True and not self.shuttingDown():
try:
identifier, group = valid_files.popitem()
except:
break
if return_ignored is False and identifier in ignored_identifiers:
log.debug('Ignore file found, ignoring release: %s', identifier)
continue
# Group extra (and easy) files first
group['files'] = {
'movie_extra': self.getMovieExtras(group['unsorted_files']),
'subtitle': self.getSubtitles(group['unsorted_files']),
'subtitle_extra': self.getSubtitlesExtras(group['unsorted_files']),
'nfo': self.getNfo(group['unsorted_files']),
'trailer': self.getTrailers(group['unsorted_files']),
'leftover': set(group['unsorted_files']),
}
# Media files
if group['is_dvd']:
group['files']['movie'] = self.getDVDFiles(group['unsorted_files'])
else:
group['files']['movie'] = self.getMediaFiles(group['unsorted_files'])
if len(group['files']['movie']) == 0:
log.error('Couldn\'t find any movie files for %s', identifier)
continue
log.debug('Getting metadata for %s', identifier)
group['meta_data'] = self.getMetaData(group, folder = folder, release_download = release_download)
# Subtitle meta
group['subtitle_language'] = self.getSubtitleLanguage(group) if not simple else {}
# Get parent dir from movie files
for movie_file in group['files']['movie']:
group['parentdir'] = os.path.dirname(movie_file)
group['dirname'] = None
folder_names = group['parentdir'].replace(folder, '').split(os.path.sep)
folder_names.reverse()
# Try and get a proper dirname, so no "A", "Movie", "Download" etc
for folder_name in folder_names:
if folder_name.lower() not in self.ignore_names and len(folder_name) > 2:
group['dirname'] = folder_name
break
break
# Leftover "sorted" files
for file_type in group['files']:
if not file_type is 'leftover':
group['files']['leftover'] -= set(group['files'][file_type])
group['files'][file_type] = list(group['files'][file_type])
group['files']['leftover'] = list(group['files']['leftover'])
# Delete the unsorted list
del group['unsorted_files']
# Determine movie
group['library'] = self.determineMovie(group, release_download = release_download)
if not group['library']:
log.error('Unable to determine movie: %s', group['identifiers'])
else:
movie = db.query(Media).filter_by(library_id = group['library']['id']).first()
group['movie_id'] = None if not movie else movie.id
processed_movies[identifier] = group
# Notify parent & progress on something found
if on_found:
on_found(group, total_found, total_found - len(processed_movies))
# Wait for all the async events calm down a bit
while threading.activeCount() > 100 and not self.shuttingDown():
log.debug('Too many threads active, waiting a few seconds')
time.sleep(10)
if len(processed_movies) > 0:
log.info('Found %s movies in the folder %s', (len(processed_movies), folder))
else:
log.debug('Found no movies in the folder %s', folder)
return processed_movies
def getMetaData(self, group, folder = '', release_download = None):
data = {}
files = list(group['files']['movie'])
for cur_file in files:
if not self.filesizeBetween(cur_file, self.file_sizes['movie']): continue # Ignore smaller files
meta = self.getMeta(cur_file)
try:
data['video'] = meta.get('video', self.getCodec(cur_file, self.codecs['video']))
data['audio'] = meta.get('audio', self.getCodec(cur_file, self.codecs['audio']))
data['resolution_width'] = meta.get('resolution_width', 720)
data['resolution_height'] = meta.get('resolution_height', 480)
data['audio_channels'] = meta.get('audio_channels', 2.0)
data['aspect'] = meta.get('resolution_width', 720) / meta.get('resolution_height', 480)
except:
log.debug('Error parsing metadata: %s %s', (cur_file, traceback.format_exc()))
pass
if data.get('audio'): break
# Use the quality guess first, if that failes use the quality we wanted to download
data['quality'] = None
if release_download and release_download.get('quality'):
data['quality'] = fireEvent('quality.single', release_download.get('quality'), single = True)
if not data['quality']:
data['quality'] = fireEvent('quality.guess', files = files, extra = data, single = True)
if not data['quality']:
data['quality'] = fireEvent('quality.single', 'dvdr' if group['is_dvd'] else 'dvdrip', single = True)
data['quality_type'] = 'HD' if data.get('resolution_width', 0) >= 1280 or data['quality'].get('hd') else 'SD'
filename = re.sub('(.cp\(tt[0-9{7}]+\))', '', files[0])
data['group'] = self.getGroup(filename[len(folder):])
data['source'] = self.getSourceMedia(filename)
return data
def getMeta(self, filename):
try:
p = enzyme.parse(filename)
# Video codec
vc = ('h264' if p.video[0].codec == 'AVC1' else p.video[0].codec).lower()
# Audio codec
ac = p.audio[0].codec
try: ac = self.audio_codec_map.get(p.audio[0].codec)
except: pass
return {
'video': vc,
'audio': ac,
'resolution_width': tryInt(p.video[0].width),
'resolution_height': tryInt(p.video[0].height),
'audio_channels': p.audio[0].channels,
}
except ParseError:
log.debug('Failed to parse meta for %s', filename)
except NoParserError:
log.debug('No parser found for %s', filename)
except:
log.debug('Failed parsing %s', filename)
return {}
def getSubtitleLanguage(self, group):
detected_languages = {}
# Subliminal scanner
paths = None
try:
paths = group['files']['movie']
scan_result = []
for p in paths:
if not group['is_dvd']:
video = Video.from_path(toUnicode(p))
video_result = [(video, video.scan())]
scan_result.extend(video_result)
for video, detected_subtitles in scan_result:
for s in detected_subtitles:
if s.language and s.path not in paths:
detected_languages[s.path] = [s.language]
except:
log.debug('Failed parsing subtitle languages for %s: %s', (paths, traceback.format_exc()))
# IDX
for extra in group['files']['subtitle_extra']:
try:
if os.path.isfile(extra):
output = open(extra, 'r')
txt = output.read()
output.close()
idx_langs = re.findall('\nid: (\w+)', txt)
sub_file = '%s.sub' % os.path.splitext(extra)[0]
if len(idx_langs) > 0 and os.path.isfile(sub_file):
detected_languages[sub_file] = idx_langs
except:
log.error('Failed parsing subtitle idx for %s: %s', (extra, traceback.format_exc()))
return detected_languages
def determineMovie(self, group, release_download = None):
# Get imdb id from downloader
imdb_id = release_download and release_download.get('imdb_id')
if imdb_id:
log.debug('Found movie via imdb id from it\'s download id: %s', release_download.get('imdb_id'))
files = group['files']
# Check for CP(imdb_id) string in the file paths
if not imdb_id:
for cur_file in files['movie']:
imdb_id = self.getCPImdb(cur_file)
if imdb_id:
log.debug('Found movie via CP tag: %s', cur_file)
break
# Check and see if nfo contains the imdb-id
nfo_file = None
if not imdb_id:
try:
for nf in files['nfo']:
imdb_id = getImdb(nf, check_inside = True)
if imdb_id:
log.debug('Found movie via nfo file: %s', nf)
nfo_file = nf
break
except:
pass
# Check and see if filenames contains the imdb-id
if not imdb_id:
try:
for filetype in files:
for filetype_file in files[filetype]:
imdb_id = getImdb(filetype_file)
if imdb_id:
log.debug('Found movie via imdb in filename: %s', nfo_file)
break
except:
pass
# Check if path is already in db
if not imdb_id:
db = get_session()
for cf in files['movie']:
f = db.query(File).filter_by(path = toUnicode(cf)).first()
try:
imdb_id = f.library[0].identifier
log.debug('Found movie via database: %s', cf)
cur_file = cf
break
except:
pass
# Search based on identifiers
if not imdb_id:
for identifier in group['identifiers']:
if len(identifier) > 2:
try: filename = list(group['files'].get('movie'))[0]
except: filename = None
name_year = self.getReleaseNameYear(identifier, file_name = filename if not group['is_dvd'] else None)
if name_year.get('name') and name_year.get('year'):
movie = fireEvent('movie.search', q = '%(name)s %(year)s' % name_year, merge = True, limit = 1)
if len(movie) > 0:
imdb_id = movie[0].get('imdb')
log.debug('Found movie via search: %s', cur_file)
if imdb_id: break
else:
log.debug('Identifier to short to use for search: %s', identifier)
if imdb_id:
return fireEvent('library.add.movie', attrs = {
'identifier': imdb_id
}, update_after = False, single = True)
log.error('No imdb_id found for %s. Add a NFO file with IMDB id or add the year to the filename.', group['identifiers'])
return {}
def getCPImdb(self, string):
try:
m = re.search(self.cp_imdb, string.lower())
id = m.group('id')
if id: return id
except AttributeError:
pass
return False
def removeCPTag(self, name):
try:
return re.sub(self.cp_imdb, '', name)
except:
pass
return name
def getSamples(self, files):
return set(filter(lambda s: self.isSampleFile(s), files))
def getMediaFiles(self, files):
def test(s):
return self.filesizeBetween(s, self.file_sizes['movie']) and getExt(s.lower()) in self.extensions['movie'] and not self.isSampleFile(s)
return set(filter(test, files))
def getMovieExtras(self, files):
return set(filter(lambda s: getExt(s.lower()) in self.extensions['movie_extra'], files))
def getDVDFiles(self, files):
def test(s):
return self.isDVDFile(s)
return set(filter(test, files))
def getSubtitles(self, files):
return set(filter(lambda s: getExt(s.lower()) in self.extensions['subtitle'], files))
def getSubtitlesExtras(self, files):
return set(filter(lambda s: getExt(s.lower()) in self.extensions['subtitle_extra'], files))
def getNfo(self, files):
return set(filter(lambda s: getExt(s.lower()) in self.extensions['nfo'], files))
def getTrailers(self, files):
def test(s):
return re.search('(^|[\W_])trailer\d*[\W_]', s.lower()) and self.filesizeBetween(s, self.file_sizes['trailer'])
return set(filter(test, files))
def getImages(self, files):
def test(s):
return getExt(s.lower()) in ['jpg', 'jpeg', 'png', 'gif', 'bmp', 'tbn']
files = set(filter(test, files))
images = {
'backdrop': set(filter(lambda s: re.search('(^|[\W_])fanart|backdrop\d*[\W_]', s.lower()) and self.filesizeBetween(s, self.file_sizes['backdrop']), files))
}
# Rest
images['rest'] = files - images['backdrop']
return images
def isDVDFile(self, file_name):
if list(set(file_name.lower().split(os.path.sep)) & set(['video_ts', 'audio_ts'])):
return True
for needle in ['vts_', 'video_ts', 'audio_ts', 'bdmv', 'certificate']:
if needle in file_name.lower():
return True
return False
def keepFile(self, filename):
# ignoredpaths
for i in self.ignored_in_path:
if i in filename.lower():
log.debug('Ignored "%s" contains "%s".', (filename, i))
return False
# All is OK
return True
def isSampleFile(self, filename):
is_sample = re.search('(^|[\W_])sample\d*[\W_]', filename.lower())
if is_sample: log.debug('Is sample file: %s', filename)
return is_sample
def filesizeBetween(self, file, file_size = []):
try:
return (file_size.get('min', 0) * 1048576) < os.path.getsize(file) < (file_size.get('max', 100000) * 1048576)
except:
log.error('Couldn\'t get filesize of %s.', file)
return False
def createStringIdentifier(self, file_path, folder = '', exclude_filename = False):
year = self.findYear(file_path)
identifier = file_path.replace(folder, '').lstrip(os.path.sep) # root folder
identifier = os.path.splitext(identifier)[0] # ext
try:
path_split = splitString(identifier, os.path.sep)
identifier = path_split[-2] if len(path_split) > 1 and len(path_split[-2]) > len(path_split[-1]) else path_split[-1] # Only get filename
except: pass
if exclude_filename:
identifier = identifier[:len(identifier) - len(os.path.split(identifier)[-1])]
# multipart
identifier = self.removeMultipart(identifier)
# remove cptag
identifier = self.removeCPTag(identifier)
# groups, release tags, scenename cleaner, regex isn't correct
identifier = re.sub(self.clean, '::', simplifyString(identifier)).strip(':')
# Year
if year and identifier[:4] != year:
identifier = '%s %s' % (identifier.split(year)[0].strip(), year)
else:
identifier = identifier.split('::')[0]
# Remove duplicates
out = []
for word in identifier.split():
if not word in out:
out.append(word)
identifier = ' '.join(out)
return simplifyString(identifier)
def removeMultipart(self, name):
for regex in self.multipart_regex:
try:
found = re.sub(regex, '', name)
if found != name:
name = found
except:
pass
return name
def getPartNumber(self, name):
for regex in self.multipart_regex:
try:
found = re.search(regex, name)
if found:
return found.group(1)
return 1
except:
pass
return 1
def getCodec(self, filename, codecs):
codecs = map(re.escape, codecs)
try:
codec = re.search('[^A-Z0-9](?P<codec>' + '|'.join(codecs) + ')[^A-Z0-9]', filename, re.I)
return (codec and codec.group('codec')) or ''
except:
return ''
def getGroup(self, file):
try:
match = re.findall('\-([A-Z0-9]+)[\.\/]', file, re.I)
return match[-1] or ''
except:
return ''
def getSourceMedia(self, file):
for media in self.source_media:
for alias in self.source_media[media]:
if alias in file.lower():
return media
return None
def findYear(self, text):
# Search year inside () or [] first
matches = re.findall('(\(|\[)(?P<year>19[0-9]{2}|20[0-9]{2})(\]|\))', text)
if matches:
return matches[-1][1]
# Search normal
matches = re.findall('(?P<year>19[0-9]{2}|20[0-9]{2})', text)
if matches:
return matches[-1]
return ''
def getReleaseNameYear(self, release_name, file_name = None):
release_name = release_name.strip(' .-_')
# Use guessit first
guess = {}
if file_name:
try:
guessit = guess_movie_info(toUnicode(file_name))
if guessit.get('title') and guessit.get('year'):
guess = {
'name': guessit.get('title'),
'year': guessit.get('year'),
}
except:
log.debug('Could not detect via guessit "%s": %s', (file_name, traceback.format_exc()))
# Backup to simple
cleaned = ' '.join(re.split('\W+', simplifyString(release_name)))
cleaned = re.sub(self.clean, ' ', cleaned)
for year_str in [file_name, release_name, cleaned]:
if not year_str: continue
year = self.findYear(year_str)
if year:
break
cp_guess = {}
if year: # Split name on year
try:
movie_name = cleaned.rsplit(year, 1).pop(0).strip()
if movie_name:
cp_guess = {
'name': movie_name,
'year': int(year),
}
except:
pass
if not cp_guess: # Split name on multiple spaces
try:
movie_name = cleaned.split(' ').pop(0).strip()
cp_guess = {
'name': movie_name,
'year': int(year) if movie_name[:4] != year else 0,
}
except:
pass
if cp_guess.get('year') == guess.get('year') and len(cp_guess.get('name', '')) > len(guess.get('name', '')):
return cp_guess
elif guess == {}:
return cp_guess
return guess
| lebabouin/CouchPotatoServer-develop | couchpotato/core/plugins/scanner/main.py | Python | gpl-3.0 | 32,904 |
def kab(n):
if n in (0, 1):
return [1]
for i in range(n):
b yield i * 2
| TakesxiSximada/TIL | python/python3.6/err2.py | Python | apache-2.0 | 95 |
"""
pyText2Pdf - Python script to convert plain text files into Adobe
Acrobat PDF files.
Version 1.2
Author: Anand B Pillai <abpillai at lycos dot com>
Keywords: python, tools, converter, pdf, text2pdf, adobe, acrobat,
processing.
Copyright (C) 2003-2004 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
This file is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU Emacs; see the file COPYING. If not, write to
the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
Commentary:
Modification History:
Mon Feb 17 12:20:13 2003 Changed option parsing algorithm to use
getopt. Use __main__ calling convention.
Bug in FF character fixed.
Thu Apr 10 11:26:58 2003 Modified to use python style strings
and function objects.
July 1 2003 Fixed help string errors. Added the
Creator property.
Feb 25 2004 Rewrote argument parser to remove
duplicate code.Use string.join() instead
of concatenation. Modified sys.exit()
calls to print messages.
Code:
"""
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189858
import sys, os
import string
import time
import getopt
LF_EXTRA=0
LINE_END='\015'
# form feed character (^L)
FF=chr(12)
ENCODING_STR = """\
/Encoding <<
/Differences [ 0 /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /space /exclam
/quotedbl /numbersign /dollar /percent /ampersand
/quoteright /parenleft /parenright /asterisk /plus /comma
/hyphen /period /slash /zero /one /two /three /four /five
/six /seven /eight /nine /colon /semicolon /less /equal
/greater /question /at /A /B /C /D /E /F /G /H /I /J /K /L
/M /N /O /P /Q /R /S /T /U /V /W /X /Y /Z /bracketleft
/backslash /bracketright /asciicircum /underscore
/quoteleft /a /b /c /d /e /f /g /h /i /j /k /l /m /n /o /p
/q /r /s /t /u /v /w /x /y /z /braceleft /bar /braceright
/asciitilde /.notdef /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
/dotlessi /grave /acute /circumflex /tilde /macron /breve
/dotaccent /dieresis /.notdef /ring /cedilla /.notdef
/hungarumlaut /ogonek /caron /space /exclamdown /cent
/sterling /currency /yen /brokenbar /section /dieresis
/copyright /ordfeminine /guillemotleft /logicalnot /hyphen
/registered /macron /degree /plusminus /twosuperior
/threesuperior /acute /mu /paragraph /periodcentered
/cedilla /onesuperior /ordmasculine /guillemotright
/onequarter /onehalf /threequarters /questiondown /Agrave
/Aacute /Acircumflex /Atilde /Adieresis /Aring /AE
/Ccedilla /Egrave /Eacute /Ecircumflex /Edieresis /Igrave
/Iacute /Icircumflex /Idieresis /Eth /Ntilde /Ograve
/Oacute /Ocircumflex /Otilde /Odieresis /multiply /Oslash
/Ugrave /Uacute /Ucircumflex /Udieresis /Yacute /Thorn
/germandbls /agrave /aacute /acircumflex /atilde /adieresis
/aring /ae /ccedilla /egrave /eacute /ecircumflex
/edieresis /igrave /iacute /icircumflex /idieresis /eth
/ntilde /ograve /oacute /ocircumflex /otilde /odieresis
/divide /oslash /ugrave /uacute /ucircumflex /udieresis
/yacute /thorn /ydieresis ]
>>
"""
PROG_HELP = """\
%(progname)s [options] [filename]
%(progname)s makes a 7-bit clean PDF file from any input file.
It reads from a named file, and writes the PDF file to a file specified by
the user, otherwise to a file with '.pdf' appended to the input file.
Author: Anand B Pillai.
Copyright (C) 2003-2004 Free Software Foundation, http://www.fsf.org
There are various options as follows:
-h\t\tshow this message\n
-o/-O\t\tdirect output to this file
-f<font>\tuse PostScript <font> (must be in standard 14, default: Courier)
-I\t\tuse ISOLatin1Encoding
-s<size>\tuse font at given pointsize (default 10) points\n
-v<dist>\tuse given line spacing (default 12) points
-l<lines>\tlines per page (default 60, determined automatically\n\t\tif unspecified)
-c<chars>\tmaximum characters per line (default 80)
-t<spaces>\tspaces per tab character (default 4)
-F\t\tignore formfeed characters (^L)
\t\t(i.e, accept formfeed characters as pagebreaks)\n
-A4\t\tuse A4 paper (default Letter)
-A3\t\tuse A3 paper (default Letter)
-x<width>\tindependent paper width in points
-y<height>\tindependent paper height in points
-2\t\tformat in 2 columns
-L\t\tlandscape mode
Note that where one variable is implied by two options, the second option
takes precedence for that variable. (e.g. -A4 -y500)
In landscape mode, page width and height are simply swapped over before
formatting, no matter how or when they were defined.
"""
class pyText2Pdf:
def __init__(self,fileName,savePath):
# version number
self._version="1.1.1"
# iso encoding flag
self._IsoEnc=0
# formfeeds flag
self._doFFs=0
self._progname="PyText2Pdf"
self._appname = "".join((self._progname, " Version ", str(self._version)))
# default font
self._font="/Courier"
# default font size
self._ptSize=10
# default vert space
self._vertSpace=12
self._lines=0
# number of characters in a row
self._cols=80
self._columns=1
# page ht
self._pageHt=792
# page wd
self._pageWd=612
# input file
self._ifile = fileName
# output file
self._ofile = savePath
# default tab width
self._tab=4
# input file descriptor
self._ifs=None
# output file descriptor
self._ofs=None
# landscape flag
self._landscape=0
# marker objects
self._curobj = 5
self._pageObs = [0]
self._locations = [0,0,0,0,0,0]
self._pageNo=0
# file position marker
self._fpos=0
def argsCallBack(self, argslist, listoftuples=False):
""" Callback function called by argument parser.
Helps to remove duplicate code """
x = 0
while x<len(argslist):
item = argslist[x]
if listoftuples:
o, a = item
else:
o = item
if o == '-h':
self.ShowHelp()
elif o == '-I':
self._IsoEnc=1
elif o == '-F':
self._doFFs=1
elif o == '-2':
self._columns=2
elif o == '-L':
self._landscape=1
if o in ('-f', '-s', '-l', '-x', 'y', '-c', '-v', '-o', '-O'):
if not listoftuples:
x += 1
try:
a = argslist[x]
except:
msg = "Argument error for option " + o
sys.exit(msg)
if a == "" or a[0] == "-":
msg = "Error: argument error for option " + o
sys.exit(msg)
elif o == '-f':
self._font='/' + a
elif o == '-A':
if a == '3':
self._pageWd=842
self._pageHt=1190
elif a =='4':
self._pageWd=595
self._pageHt=842
else:
psz=o[1]+a
## print setlf._progname, ': ignoring unknown paper size ', psz
elif o == '-s':
self._ptSize=int(a)
if self._ptSize<1:
self._ptSize=1
elif o == '-v':
self._vertSpace=int(a)
if self._vertSpace<1:
self._vertSpace=1
elif o == '-l':
self._lines=int(a)
if self._lines<1:
self._lines=1
elif o == '-c':
self._cols=int(a)
if self._cols<4:
self._cols=4
elif o == '-t':
self._tab=int(a)
if self._tab<1:
self._tab=1
elif o == '-x':
self._pageWd=int(a)
if self._pageWd<72:
self._pageWd=72
elif o == '-y':
self._pageHt=int(a)
if self._pageHt<72:
self._pageHt=72
elif o in ('-o', '-O'):
self._ofile=a
else:
ERROR = True
## print self._progname, ': ignoring invalid switch: ', o
x += 1
def parseArgs(self):
if len(sys.argv) == 1:
self.ShowHelp()
arguments=sys.argv[1:]
optlist, args = getopt.getopt(arguments, 'hIF2Lf:A:s:v:l:c:t:x:y:o:')
# input file is the first element in arg list
# or last element in options list (in case of an error!)
if len(args):
self._ifile=args[0]
else:
l=len(optlist)
tup=optlist[l-1]
# parse options list
if len(optlist):
self.argsCallBack( optlist, listoftuples=True )
else:
self.argsCallBack( args )
## if self._landscape:
## print 'Landscape option on...'
## if self._columns==2:
## print 'Printing in two columns...'
## if self._doFFs:
## print 'Ignoring form feed character...'
## if self._IsoEnc:
## print 'Using ISO Latin Encoding...'
## print 'Using font', self._font[1:], ' size =', self._ptSize
def writestr(self, str):
""" Write string to output file descriptor.
All output operations go through this function.
We keep the current file position also here"""
# update current file position
self._fpos += len(str)
for x in range(0, len(str)):
if str[x] == '\n':
self._fpos += LF_EXTRA
try:
self._ofs.write(str)
except IOError, e:
print e
return -1
return 0
def Convert(self):
""" Perform the actual conversion """
if self._landscape:
# swap page width & height
tmp = self._pageHt
self._pageHt = self._pageWd
self._pageWd = tmp
if self._lines==0:
self._lines = (self._pageHt - 72)/self._vertSpace
if self._lines < 1:
self._lines=1
try:
self._ifs=open(self._ifile)
except IOError, (strerror, errno):
print 'Error: Could not open file to read --->', self._ifile
sys.exit(3)
if self._ofile=="":
self._ofile=self._ifile + '.pdf'
try:
self._ofs = open(self._ofile, 'wb')
except IOError, (strerror, errno):
print 'Error: Could not open file to write --->', self._ofile
sys.exit(3)
## print 'Input file =>', self._ifile
## print 'Writing pdf file', self._ofile, '...'
self.WriteHeader(self._ifile)
self.WritePages()
self.WriteRest()
## print 'Wrote file', self._ofile
self._ifs.close()
self._ofs.close()
return 0
def WriteHeader(self, title):
"""Write the PDF header"""
ws = self.writestr
t=time.localtime()
timestr=str(time.strftime("D:%Y%m%d%H%M%S", t))
ws("%PDF-1.4\n")
self._locations[1] = self._fpos
ws("1 0 obj\n")
ws("<<\n")
buf = "".join(("/Creator (", self._appname, " By Anand B Pillai )\n"))
ws(buf)
buf = "".join(("/CreationDate (", timestr, ")\n"))
ws(buf)
buf = "".join(("/Producer (", self._appname, "(\\251 Free Software Foundation, 2004))\n"))
ws(buf)
if title:
buf = "".join(("/Title (", title, ")\n"))
ws(buf)
ws(">>\n")
ws("endobj\n")
self._locations[2] = self._fpos
ws("2 0 obj\n")
ws("<<\n")
ws("/Type /Catalog\n")
ws("/Pages 3 0 R\n")
ws(">>\n")
ws("endobj\n")
self._locations[4] = self._fpos
ws("4 0 obj\n")
ws("<<\n")
buf = "".join(("/BaseFont ", str(self._font), " /Encoding /WinAnsiEncoding /Name /F1 /Subtype /Type1 /Type /Font >>\n"))
ws(buf)
if self._IsoEnc:
ws(ENCODING_STR)
ws(">>\n")
ws("endobj\n")
self._locations[5] = self._fpos
ws("5 0 obj\n")
ws("<<\n")
ws(" /Font << /F1 4 0 R >>\n")
ws(" /ProcSet [ /PDF /Text ]\n")
ws(">>\n")
ws("endobj\n")
def StartPage(self):
""" Start a page of data """
ws = self.writestr
self._pageNo += 1
self._curobj += 1
self._locations.append(self._fpos)
self._locations[self._curobj]=self._fpos
self._pageObs.append(self._curobj)
self._pageObs[self._pageNo] = self._curobj
buf = "".join((str(self._curobj), " 0 obj\n"))
ws(buf)
ws("<<\n")
ws("/Type /Page\n")
ws("/Parent 3 0 R\n")
ws("/Resources 5 0 R\n")
self._curobj += 1
buf = "".join(("/Contents ", str(self._curobj), " 0 R\n"))
ws(buf)
ws(">>\n")
ws("endobj\n")
self._locations.append(self._fpos)
self._locations[self._curobj] = self._fpos
buf = "".join((str(self._curobj), " 0 obj\n"))
ws(buf)
ws("<<\n")
buf = "".join(("/Length ", str(self._curobj + 1), " 0 R\n"))
ws(buf)
ws(">>\n")
ws("stream\n")
strmPos = self._fpos
ws("BT\n");
buf = "".join(("/F1 ", str(self._ptSize), " Tf\n"))
ws(buf)
buf = "".join(("1 0 0 1 50 ", str(self._pageHt - 40), " Tm\n"))
ws(buf)
buf = "".join((str(self._vertSpace), " TL\n"))
ws(buf)
return strmPos
def EndPage(self, streamStart):
"""End a page of data """
ws = self.writestr
ws("ET\n")
streamEnd = self._fpos
ws("endstream\n")
ws("endobj\n")
self._curobj += 1
self._locations.append(self._fpos)
self._locations[self._curobj] = self._fpos
buf = "".join((str(self._curobj), " 0 obj\n"))
ws(buf)
buf = "".join((str(streamEnd - streamStart), '\n'))
ws(buf)
ws('endobj\n')
def WritePages(self):
"""Write pages as PDF"""
ws = self.writestr
beginstream=0
lineNo, charNo=0,0
ch, column=0,0
padding,i=0,0
atEOF=0
while not atEOF:
beginstream = self.StartPage()
column=1
while column <= self._columns:
column += 1
atFF=0
atBOP=0
lineNo=0
while lineNo < self._lines and not atFF and not atEOF:
lineNo += 1
ws("(")
charNo=0
while charNo < self._cols:
charNo += 1
ch = self._ifs.read(1)
cond = ((ch != '\n') and not(ch==FF and self._doFFs) and (ch != ''))
if not cond:
break
if ord(ch) >= 32 and ord(ch) <= 127:
if ch == '(' or ch == ')' or ch == '\\':
ws("\\")
ws(ch)
else:
if ord(ch) == 9:
padding =self._tab - ((charNo - 1) % self._tab)
for i in range(padding):
ws(" ")
charNo += (padding -1)
else:
if ch != FF:
# write \xxx form for dodgy character
buf = "".join(('\\', ch))
ws(buf)
else:
# dont print anything for a FF
charNo -= 1
ws(")'\n")
if ch == FF:
atFF=1
if lineNo == self._lines:
atBOP=1
if atBOP:
pos=0
ch = self._ifs.read(1)
pos= self._ifs.tell()
if ch == FF:
ch = self._ifs.read(1)
pos=self._ifs.tell()
# python's EOF signature
if ch == '':
atEOF=1
else:
# push position back by one char
self._ifs.seek(pos-1)
elif atFF:
ch = self._ifs.read(1)
pos=self._ifs.tell()
if ch == '':
atEOF=1
else:
self._ifs.seek(pos-1)
if column < self._columns:
buf = "".join(("1 0 0 1 ",
str((self._pageWd/2 + 25)),
" ",
str(self._pageHt - 40),
" Tm\n"))
ws(buf)
self.EndPage(beginstream)
def WriteRest(self):
"""Finish the file"""
ws = self.writestr
self._locations[3] = self._fpos
ws("3 0 obj\n")
ws("<<\n")
ws("/Type /Pages\n")
buf = "".join(("/Count ", str(self._pageNo), "\n"))
ws(buf)
buf = "".join(("/MediaBox [ 0 0 ", str(self._pageWd), " ", str(self._pageHt), " ]\n"))
ws(buf)
ws("/Kids [ ")
for i in range(1, self._pageNo+1):
buf = "".join((str(self._pageObs[i]), " 0 R "))
ws(buf)
ws("]\n")
ws(">>\n")
ws("endobj\n")
xref = self._fpos
ws("xref\n")
buf = "".join(("0 ", str((self._curobj) + 1), "\n"))
ws(buf)
buf = "".join(("0000000000 65535 f ", str(LINE_END)))
ws(buf)
for i in range(1, self._curobj + 1):
val = self._locations[i]
buf = "".join((string.zfill(str(val), 10), " 00000 n ", str(LINE_END)))
ws(buf)
ws("trailer\n")
ws("<<\n")
buf = "".join(("/Size ", str(self._curobj + 1), "\n"))
ws(buf)
ws("/Root 2 0 R\n")
ws("/Info 1 0 R\n")
ws(">>\n")
ws("startxref\n")
buf = "".join((str(xref), "\n"))
ws(buf)
ws("%%EOF\n")
def ShowHelp(self):
"""Show help on this program"""
sys.exit( PROG_HELP % {'progname': self._progname} )
def main():
pdfclass=pyText2Pdf()
pdfclass.parseArgs()
pdfclass.Convert()
if __name__ == "__main__":
main()
| guptalab/dnacloud | source/pytxt2pdf.py | Python | mit | 20,500 |
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(name="xlsxi18n",
py_modules=['xlsxi18n'],
version="0.0",
description="Simple command line utility to generate Android language files for your app from xlsx files.",
license="MIT",
author="Andrea Stagi",
author_email="[email protected]",
url="https://github.com/atooma/xlsxi18n",
keywords= "i18n app android test team script",
install_requires=[
"openpyxl",
],
entry_points = {
'console_scripts': [
'xlsxi18n = xlsxi18n:main',
],
},
zip_safe = True) | astagi/xlsxi18n | setup.py | Python | mit | 641 |
"""
Miscellaneous utilities.
"""
import collections
import dis
import sys
import pyte
from pyte.exc import ValidationError
from . import tokens
PY36 = sys.version_info[0:2] >= (3, 6)
def ensure_instruction(instruction: int) -> bytes:
"""
Wraps an instruction to be Python 3.6+ compatible. This does nothing on Python 3.5 and below.
This is most useful for operating on bare, single-width instructions such as
``RETURN_FUNCTION`` in a version portable way.
:param instruction: The instruction integer to use.
:return: A safe bytes object, if applicable.
"""
if PY36:
return instruction.to_bytes(2, byteorder="little")
else:
return instruction.to_bytes(1, byteorder="little")
def pack_value(index: int) -> bytes:
"""
Small helper value to pack an index value into bytecode.
This is used for version compat between 3.5- and 3.6+
:param index: The item to pack.
:return: The packed item.
"""
if PY36:
return index.to_bytes(1, byteorder="little")
else:
return index.to_bytes(2, byteorder="little")
def generate_simple_call(opcode: int, index: int):
"""
Generates a simple call, with an index for something.
:param opcode: The opcode to generate.
:param index: The index to use as an argument.
:return:
"""
bs = b""
# add the opcode
bs += opcode.to_bytes(1, byteorder="little")
# Add the index
if isinstance(index, int):
if PY36:
bs += index.to_bytes(1, byteorder="little")
else:
bs += index.to_bytes(2, byteorder="little")
else:
bs += index
return bs
def generate_bytecode_from_obb(obb: object, previous: bytes) -> bytes:
"""
Generates a bytecode from an object.
:param obb: The object to generate.
:param previous: The previous bytecode to use when generating subobjects.
:return: The generated bytecode.
"""
# Generates bytecode from a specified object, be it a validator or an int or bytes even.
if isinstance(obb, pyte.superclasses._PyteOp):
return obb.to_bytes(previous)
elif isinstance(obb, (pyte.superclasses._PyteAugmentedComparator,
pyte.superclasses._PyteAugmentedValidator._FakeMathematicalOP)):
return obb.to_bytes(previous)
elif isinstance(obb, pyte.superclasses._PyteAugmentedValidator):
obb.validate()
return obb.to_load()
elif isinstance(obb, int):
return obb.to_bytes((obb.bit_length() + 7) // 8, byteorder="little") or b''
elif isinstance(obb, bytes):
return obb
else:
raise TypeError("`{}` was not a valid bytecode-encodable item".format(obb))
def generate_load_global(index: int) -> bytes:
"""
Generates a LOAD_GLOBAL instruction.
:param index: The index of the global to load.
:return: The generated bytecode.
"""
return generate_simple_call(tokens.LOAD_GLOBAL, index)
def generate_load_fast(index: int) -> bytes:
"""
Generates a LOAD_FAST operation.
:param index: The index of the varname to load.
:return: The generated bytecode.
"""
return generate_simple_call(tokens.LOAD_FAST, index)
def generate_load_const(index: int) -> bytes:
"""
Generates a LOAD_CONST instruction.
:param index: The index of the const to load.
:return: The generated bytecode.
"""
return generate_simple_call(tokens.LOAD_CONST, index)
# https://stackoverflow.com/a/2158532
def flatten(l):
for el in l:
if isinstance(el, collections.Iterable) and not isinstance(el, (str, bytes)):
for sub in flatten(el):
yield sub
else:
yield el
# "fixed" functions
# mostly copied from dis, but with some proper error validation
def _get_const_info(const_index, const_list):
"""
Helper to get optional details about const references
Returns the dereferenced constant and its repr if the constant
list is defined.
Otherwise returns the constant index and its repr().
"""
argval = const_index
if const_list is not None:
try:
argval = const_list[const_index]
except IndexError:
raise ValidationError("Consts value out of range: {}".format(const_index)) from None
return argval, repr(argval)
def _get_name_info(name_index, name_list):
"""Helper to get optional details about named references
Returns the dereferenced name as both value and repr if the name
list is defined.
Otherwise returns the name index and its repr().
"""
argval = name_index
if name_list is not None:
try:
argval = name_list[name_index]
except IndexError:
raise ValidationError("Names value out of range: {}".format(name_index)) from None
argrepr = argval
else:
argrepr = repr(argval)
return argval, argrepr
dis._get_const_info = _get_const_info
dis._get_name_info = _get_name_info
if sys.version_info[0:2] < (3, 4):
from pyte import backports
backports.apply()
| SunDwarf/Pyte | pyte/util.py | Python | mit | 5,105 |
#!/usr/bin/env python
# coding: utf-8
# pylint: disable=global-statement
"""This module runs dev_appserver.py, creates virtualenv, performs requirements check,
creates necessary directories
"""
from distutils import spawn
import argparse
import os
import platform
import shutil
import sys
###############################################################################
# Options
###############################################################################
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'-o', '--host', dest='host', action='store', default='127.0.0.1',
help='the host to start the dev_appserver.py',
)
PARSER.add_argument(
'-p', '--port', dest='port', action='store', default='8080',
help='the port to start the dev_appserver.py',
)
PARSER.add_argument(
'-f', '--flush', dest='flush', action='store_true',
help='clears the datastore, blobstore, etc',
)
PARSER.add_argument(
'--appserver-args', dest='args', nargs=argparse.REMAINDER, default=[],
help='all following args are passed to dev_appserver.py',
)
ARGS = PARSER.parse_args()
###############################################################################
# Globals
###############################################################################
GAE_PATH = ''
IS_WINDOWS = platform.system() == 'Windows'
###############################################################################
# Directories
###############################################################################
DIR_MAIN = 'main'
DIR_TEMP = 'temp'
DIR_VENV = os.path.join(DIR_TEMP, 'venv')
DIR_LIB = os.path.join(DIR_MAIN, 'lib')
DIR_LIBX = os.path.join(DIR_MAIN, 'libx')
FILE_REQUIREMENTS = 'requirements.txt'
FILE_VENV = os.path.join(DIR_VENV, 'Scripts', 'activate.bat') \
if IS_WINDOWS \
else os.path.join(DIR_VENV, 'bin', 'activate')
DIR_STORAGE = os.path.join(DIR_TEMP, 'storage')
###############################################################################
# Helpers
###############################################################################
def make_dirs(directory):
"""Creates directories"""
if not os.path.exists(directory):
os.makedirs(directory)
def os_execute(executable, args, source, target, append=False):
"""Executes OS command"""
operator = '>>' if append else '>'
os.system('%s %s %s %s %s' % (executable, args, source, operator, target))
def listdir(directory, split_ext=False):
"""Lists directory"""
try:
if split_ext:
return [os.path.splitext(dir_)[0] for dir_ in os.listdir(directory)]
else:
return os.listdir(directory)
except OSError:
return []
def site_packages_path():
"""Gets path of site-packages folder with third party libraries on system"""
if IS_WINDOWS:
return os.path.join(DIR_VENV, 'Lib', 'site-packages')
py_version = 'python%s.%s' % sys.version_info[:2]
return os.path.join(DIR_VENV, 'lib', py_version, 'site-packages')
def create_virtualenv():
"""Creates virtialenv into temp folder if it doesn't exists"""
if not os.path.exists(FILE_VENV):
os.system('virtualenv --no-site-packages %s' % DIR_VENV)
os.system('echo %s >> %s' % (
'set PYTHONPATH=' if IS_WINDOWS else 'unset PYTHONPATH', FILE_VENV
))
pth_file = os.path.join(site_packages_path(), 'gae.pth')
echo_to = 'echo %s >> {pth}'.format(pth=pth_file)
os.system(echo_to % find_gae_path())
os.system(echo_to % os.path.abspath(DIR_LIBX))
fix_path_cmd = 'import dev_appserver; dev_appserver.fix_sys_path()'
os.system(echo_to % (
fix_path_cmd if IS_WINDOWS else '"%s"' % fix_path_cmd
))
return True
def exec_pip_commands(command):
"""Executes pip command on system"""
script = []
if create_virtualenv():
activate_cmd = 'call %s' if IS_WINDOWS else 'source %s'
activate_cmd %= FILE_VENV
script.append(activate_cmd)
script.append('echo %s' % command)
script.append(command)
script = '&'.join(script) if IS_WINDOWS else \
'/bin/bash -c "%s"' % ';'.join(script)
os.system(script)
def install_py_libs():
"""Installs requirements from requirements file and then copies them
from site-packages folder into main/lib folder
Alse excludes files that don't need to be deployed"""
exec_pip_commands('pip install -q -r %s' % FILE_REQUIREMENTS)
exclude_ext = ['.pth', '.pyc', '.egg-info', '.dist-info']
exclude_prefix = ['setuptools-', 'pip-', 'Pillow-']
exclude = [
'test', 'tests', 'pip', 'setuptools', '_markerlib', 'PIL',
'easy_install.py', 'pkg_resources.py'
]
def _exclude_prefix(pkg): # pylint: disable=missing-docstring
for prefix in exclude_prefix:
if pkg.startswith(prefix):
return True
return False
def _exclude_ext(pkg): # pylint: disable=missing-docstring
for ext in exclude_ext:
if pkg.endswith(ext):
return True
return False
def _get_dest(pkg): # pylint: disable=missing-docstring
make_dirs(DIR_LIB)
return os.path.join(DIR_LIB, pkg)
site_packages = site_packages_path()
dir_libs = listdir(DIR_LIB)
dir_libs.extend(listdir(DIR_LIBX))
for dir_ in listdir(site_packages):
if dir_ in dir_libs or dir_ in exclude:
continue
if _exclude_prefix(dir_) or _exclude_ext(dir_):
continue
src_path = os.path.join(site_packages, dir_)
copy = shutil.copy if os.path.isfile(src_path) else shutil.copytree
copy(src_path, _get_dest(dir_))
def install_dependencies():
"""Installs python dependencies"""
make_dirs(DIR_TEMP)
install_py_libs()
###############################################################################
# Doctor
###############################################################################
def check_requirement(check_func):
"""Executes check function for given requirement
Args:
check_func (function): check function, which should return True if requirement
is satisfied
Returns:
bool: True if requirement is OK
"""
result, name = check_func()
if not result:
print '[ERR] %s was NOT FOUND' % name
return False
return True
def find_gae_path():
"""Tries to find GAE's dev_appserver.py executable
Returns:
string: Absolute path of dev_appserver.py or empty string
"""
global GAE_PATH
if GAE_PATH:
return GAE_PATH
if IS_WINDOWS:
gae_path = None
for path in os.environ['PATH'].split(os.pathsep):
if os.path.isfile(os.path.join(path, 'dev_appserver.py')):
gae_path = path
else:
gae_path = spawn.find_executable('dev_appserver.py')
if gae_path:
gae_path = os.path.dirname(os.path.realpath(gae_path))
if not gae_path:
return ''
gcloud_exec = 'gcloud.cmd' if IS_WINDOWS else 'gcloud'
if not os.path.isfile(os.path.join(gae_path, gcloud_exec)):
GAE_PATH = gae_path
else:
gae_path = os.path.join(gae_path, '..', 'platform', 'google_appengine')
if os.path.exists(gae_path):
GAE_PATH = os.path.realpath(gae_path)
return GAE_PATH
def check_gae():
"""Checks if Google App Engine is present on system"""
return bool(find_gae_path()), 'Google App Engine SDK'
def check_pip():
"""Checks if pip is present on system"""
return bool(spawn.find_executable('pip')), 'pip'
def check_virtualenv():
"""Checks if virtualenv is present on system"""
return bool(spawn.find_executable('virtualenv')), 'virtualenv'
def doctor_says_ok():
"""Executes all check functions
Returns:
bool: True only iif all chcek functions return True
"""
checkers = [check_gae, check_pip, check_virtualenv]
if False in [check_requirement(check) for check in checkers]:
sys.exit(1)
return True
###############################################################################
# Main
###############################################################################
def run_dev_appserver():
"""Runs dev_appserver.py with given arguments"""
make_dirs(DIR_STORAGE)
clear = 'yes' if ARGS.flush else 'no'
port = int(ARGS.port)
args = [
'"%s"' % os.path.join(find_gae_path(), 'dev_appserver.py'),
DIR_MAIN,
'--host %s' % ARGS.host,
'--port %s' % port,
'--admin_port %s' % (port + 1),
'--storage_path=%s' % DIR_STORAGE,
'--clear_datastore=%s' % clear,
'--skip_sdk_update_check',
] + ARGS.args
run_command = ' '.join(args)
os.system(run_command)
def run():
"""Runs this script"""
os.chdir(os.path.dirname(os.path.realpath(__file__)))
if doctor_says_ok():
install_dependencies()
run_dev_appserver()
if __name__ == '__main__':
run()
| madvas/gae-angular-material-starter | run.py | Python | mit | 8,998 |
from pylab import *
def gradient_scalar_wrt_scalar_non_const_dt(x,t):
x = x.astype(float64).squeeze()
t = t.astype(float64).squeeze()
x_grad = zeros_like(x)
x_grad[0] = (x[1] - x[0]) / (t[1] - t[0])
x_grad[-1] = (x[-1] - x[-2]) / (t[-1] - t[-2])
x_grad[1:-1] = (x[2:] - x[:-2]) / (t[2:] - t[:-2])
return x_grad
def gradient_vector_wrt_scalar_non_const_dt(x,t):
x_grad = zeros_like(x)
for d in range(x.shape[1]):
x_grad[:,d] = gradient_scalar_wrt_scalar_non_const_dt(x[:,d],t)
return x_grad
def gradient_vector_wrt_scalar(x,dt):
x_grad = zeros_like(x)
for d in range(x.shape[1]):
x_grad[:,d] = gradient(x[:,d],dt)
return x_grad
| stanford-gfx/Horus | Code/flashlight/gradientutils.py | Python | bsd-3-clause | 745 |
"""
Marginal Utility of Information, as defined here: http://arxiv.org/abs/1409.4708
"""
import warnings
from itertools import product
import numpy as np
from scipy.linalg import LinAlgWarning
from scipy.optimize import OptimizeWarning
from .base_profile import BaseProfile, profile_docstring
from .information_partitions import ShannonPartition
from ..utils import flatten, powerset
__all__ = (
'MUIProfile',
)
def get_lp_form(dist, ents):
"""
Construct the constraint matrix for computing the maximum utility of information in linear programming cononical form.
Parameters
----------
dist : Distribution
The distribution from which to construct the constraints.
Returns
-------
c : ndarray
The utility function to minimize
A : ndarray
The lhs of the constraint equations
b : ndarray
The rhs of the constraint equations
bounds : list of pairs
The bounds on the individual elements of `x`
"""
pa = [frozenset(s) for s in powerset(flatten(dist.rvs))][1:]
sp = sorted(ents.atoms.items())
atoms = [frozenset(flatten(a[0])) for a, v in sp if not np.isclose(v, 0)]
A = []
b = []
for pa_V, pa_W in product(pa, pa):
if pa_V == pa_W:
# constraint (i)
cond = np.zeros(len(atoms))
for j, atom in enumerate(atoms):
if pa_V & atom:
cond[j] = 1
A.append(cond)
b.append(ents[([pa_V], [])])
else:
# constraint (ii)
if pa_W < pa_V:
cond = np.zeros(len(atoms))
for j, atom in enumerate(atoms):
if (pa_V & atom) and not (pa_W & atom):
cond[j] = 1
A.append(cond)
b.append(ents[([pa_V], [])] - ents[([pa_W], [])])
# constraint (iii)
cond = np.zeros(len(atoms))
for j, atom in enumerate(atoms):
if (pa_V & atom):
cond[j] += 1
if (pa_W & atom):
cond[j] += 1
if ((pa_V | pa_W) & atom):
cond[j] -= 1
if ((pa_V & pa_W) & atom):
cond[j] -= 1
A.append(cond)
b.append(ents[([pa_V], [])]
+ ents[([pa_W], [])]
- ents[([pa_V | pa_W], [])]
- ents[([pa_V & pa_W], [])])
A.append([1] * len(atoms))
b.append(0) # placeholder for y
A = np.array(A)
b = np.array(b)
c = np.array([-len(atom) for atom in atoms]) # negative for minimization
bounds = [(min(0, val), max(0, val)) for _, val in sp if not np.isclose(val, 0)]
return c, A, b, bounds
def max_util_of_info(c, A, b, bounds, y):
"""
Compute the maximum utility of information at scale `y`.
Parameters
----------
c : ndarray
A list of atom-weights.
A : ndarray
The lhs of the various constraints.
b : ndarray
The rhs of the various constraints.
bounds : list of pairs
Each part of `x` must be between the atom's value and 0.
y : float
The total mutual information captured.
"""
from scipy.optimize import linprog
b[-1] = y
with warnings.catch_warnings():
warnings.simplefilter("ignore", LinAlgWarning)
warnings.simplefilter("ignore", OptimizeWarning)
solution = linprog(c, A, b, bounds=bounds)
maximum_utility_of_information = -solution.fun
return maximum_utility_of_information
class MUIProfile(BaseProfile):
__doc__ = profile_docstring.format(name='MUIProfile',
static_attributes='',
attributes='',
methods='')
xlabel = "scale [bits]"
ylabel = "marginal utility of information"
align = 'edge'
_name = "Marginal Utility of Info."
def _compute(self):
"""
Compute the Marginal Utility of Information.
"""
sp = ShannonPartition(self.dist)
c, A, b, bounds = get_lp_form(self.dist, sp)
ent = sum(sp.atoms.values())
atoms = sp.atoms.values()
ps = powerset(atoms)
pnts = np.unique(np.round([sum(ss) for ss in ps], 7))
pnts = [v for v in pnts if 0 <= v <= ent]
if len(c):
maxui = [max_util_of_info(c, A, b, bounds, y) for y in pnts]
mui = np.round(np.diff(maxui) / np.diff(pnts), 7)
vals = np.array(np.unique(mui, return_index=True))
self.profile = {pnts[int(row[1])]: row[0] for row in vals.T}
self.widths = np.diff(sorted(self.profile.keys()) + [ent])
else:
self.profile = {0.0: 0.0}
self.widths = np.asarray([0.0])
def draw(self, ax=None): # pragma: no cover
ax = super().draw(ax=ax)
pnts = np.arange(int(max(self.profile.keys()) + self.widths[-1]) + 1)
ax.set_xticks(pnts)
ax.set_xticklabels(pnts)
return ax
draw.__doc__ = BaseProfile.draw.__doc__
| dit/dit | dit/profiles/marginal_utility_of_information.py | Python | bsd-3-clause | 5,164 |
# -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Manage SDK chroots.
This script is used for manipulating local chroot environments; creating,
deleting, downloading, etc. If given --enter (or no args), it defaults
to an interactive bash shell within the chroot.
If given args those are passed to the chroot environment, and executed.
"""
from __future__ import print_function
import argparse
import glob
import os
import pwd
import random
import re
import resource
import sys
from six.moves import urllib
from chromite.lib import constants
from chromite.lib import cgroups
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import cros_sdk_lib
from chromite.lib import locking
from chromite.lib import namespaces
from chromite.lib import osutils
from chromite.lib import path_util
from chromite.lib import process_util
from chromite.lib import retry_util
from chromite.lib import toolchain
from chromite.utils import key_value_store
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
COMPRESSION_PREFERENCE = ('xz', 'bz2')
# TODO(zbehan): Remove the dependency on these, reimplement them in python
MAKE_CHROOT = [
os.path.join(constants.SOURCE_ROOT, 'src/scripts/sdk_lib/make_chroot.sh')
]
ENTER_CHROOT = [
os.path.join(constants.SOURCE_ROOT, 'src/scripts/sdk_lib/enter_chroot.sh')
]
# Proxy simulator configuration.
PROXY_HOST_IP = '192.168.240.1'
PROXY_PORT = 8080
PROXY_GUEST_IP = '192.168.240.2'
PROXY_NETMASK = 30
PROXY_VETH_PREFIX = 'veth'
PROXY_CONNECT_PORTS = (80, 443, 9418)
PROXY_APACHE_FALLBACK_USERS = ('www-data', 'apache', 'nobody')
PROXY_APACHE_MPMS = ('event', 'worker', 'prefork')
PROXY_APACHE_FALLBACK_PATH = ':'.join(
'/usr/lib/apache2/mpm-%s' % mpm for mpm in PROXY_APACHE_MPMS)
PROXY_APACHE_MODULE_GLOBS = ('/usr/lib*/apache2/modules', '/usr/lib*/apache2')
# We need these tools to run. Very common tools (tar,..) are omitted.
NEEDED_TOOLS = ('curl', 'xz')
# Tools needed for --proxy-sim only.
PROXY_NEEDED_TOOLS = ('ip',)
# Tools needed when use_image is true (the default).
IMAGE_NEEDED_TOOLS = ('losetup', 'lvchange', 'lvcreate', 'lvs', 'mke2fs',
'pvscan', 'thin_check', 'vgchange', 'vgcreate', 'vgs')
# As space is used inside the chroot, the empty space in chroot.img is
# allocated. Deleting files inside the chroot doesn't automatically return the
# used space to the OS. Over time, this tends to make the sparse chroot.img
# less sparse even if the chroot contents don't currently need much space. We
# can recover most of this unused space with fstrim, but that takes too much
# time to run it every time. Instead, check the used space against the image
# size after mounting the chroot and only call fstrim if it looks like we could
# recover at least this many GiB.
MAX_UNUSED_IMAGE_GBS = 20
def GetArchStageTarballs(version):
"""Returns the URL for a given arch/version"""
extension = {'bz2': 'tbz2', 'xz': 'tar.xz'}
return [
toolchain.GetSdkURL(
suburl='cros-sdk-%s.%s' % (version, extension[compressor]))
for compressor in COMPRESSION_PREFERENCE
]
def FetchRemoteTarballs(storage_dir, urls, desc):
"""Fetches a tarball given by url, and place it in |storage_dir|.
Args:
storage_dir: Path where to save the tarball.
urls: List of URLs to try to download. Download will stop on first success.
desc: A string describing what tarball we're downloading (for logging).
Returns:
Full path to the downloaded file.
Raises:
ValueError: None of the URLs worked.
"""
# Note we track content length ourselves since certain versions of curl
# fail if asked to resume a complete file.
# https://sourceforge.net/tracker/?func=detail&atid=100976&aid=3482927&group_id=976
logging.notice('Downloading %s tarball...', desc)
status_re = re.compile(br'^HTTP/[0-9]+(\.[0-9]+)? 200')
# pylint: disable=undefined-loop-variable
for url in urls:
parsed = urllib.parse.urlparse(url)
tarball_name = os.path.basename(parsed.path)
if parsed.scheme in ('', 'file'):
if os.path.exists(parsed.path):
return parsed.path
continue
content_length = 0
logging.debug('Attempting download from %s', url)
result = retry_util.RunCurl(['-I', url],
print_cmd=False,
debug_level=logging.NOTICE,
capture_output=True)
successful = False
for header in result.output.splitlines():
# We must walk the output to find the 200 code for use cases where
# a proxy is involved and may have pushed down the actual header.
if status_re.match(header):
successful = True
elif header.lower().startswith(b'content-length:'):
content_length = int(header.split(b':', 1)[-1].strip())
if successful:
break
if successful:
break
else:
raise ValueError('No valid URLs found!')
tarball_dest = os.path.join(storage_dir, tarball_name)
current_size = 0
if os.path.exists(tarball_dest):
current_size = os.path.getsize(tarball_dest)
if current_size > content_length:
osutils.SafeUnlink(tarball_dest)
current_size = 0
if current_size < content_length:
retry_util.RunCurl(
['--fail', '-L', '-y', '30', '-C', '-', '--output', tarball_dest, url],
print_cmd=False,
debug_level=logging.NOTICE)
# Cleanup old tarballs now since we've successfull fetched; only cleanup
# the tarballs for our prefix, or unknown ones. This gets a bit tricky
# because we might have partial overlap between known prefixes.
my_prefix = tarball_name.rsplit('-', 1)[0] + '-'
all_prefixes = ('stage3-amd64-', 'cros-sdk-', 'cros-sdk-overlay-')
ignored_prefixes = [prefix for prefix in all_prefixes if prefix != my_prefix]
for filename in os.listdir(storage_dir):
if (filename == tarball_name or
any([(filename.startswith(p) and
not (len(my_prefix) > len(p) and filename.startswith(my_prefix)))
for p in ignored_prefixes])):
continue
logging.info('Cleaning up old tarball: %s', filename)
osutils.SafeUnlink(os.path.join(storage_dir, filename))
return tarball_dest
def CreateChroot(chroot_path, sdk_tarball, cache_dir, nousepkg=False):
"""Creates a new chroot from a given SDK.
Args:
chroot_path: Path where the new chroot will be created.
sdk_tarball: Path to a downloaded Gentoo Stage3 or Chromium OS SDK tarball.
cache_dir: Path to a directory that will be used for caching portage files,
etc.
nousepkg: If True, pass --nousepkg to cros_setup_toolchains inside the
chroot.
"""
cmd = MAKE_CHROOT + [
'--stage3_path', sdk_tarball, '--chroot', chroot_path, '--cache_dir',
cache_dir
]
if nousepkg:
cmd.append('--nousepkg')
logging.notice('Creating chroot. This may take a few minutes...')
try:
cros_build_lib.dbg_run(cmd)
except cros_build_lib.RunCommandError as e:
cros_build_lib.Die('Creating chroot failed!\n%s', e)
def DeleteChroot(chroot_path):
"""Deletes an existing chroot"""
cmd = MAKE_CHROOT + ['--chroot', chroot_path, '--delete']
try:
logging.notice('Deleting chroot.')
cros_build_lib.dbg_run(cmd)
except cros_build_lib.RunCommandError as e:
cros_build_lib.Die('Deleting chroot failed!\n%s', e)
def CleanupChroot(chroot_path):
"""Unmounts a chroot and cleans up any associated devices."""
cros_sdk_lib.CleanupChrootMount(chroot_path, delete=False)
def EnterChroot(chroot_path, cache_dir, chrome_root, chrome_root_mount,
goma_dir, goma_client_json, working_dir, additional_args):
"""Enters an existing SDK chroot"""
st = os.statvfs(os.path.join(chroot_path, 'usr', 'bin', 'sudo'))
# The os.ST_NOSUID constant wasn't added until python-3.2.
if st.f_flag & 0x2:
cros_build_lib.Die('chroot cannot be in a nosuid mount')
cmd = ENTER_CHROOT + ['--chroot', chroot_path, '--cache_dir', cache_dir]
if chrome_root:
cmd.extend(['--chrome_root', chrome_root])
if chrome_root_mount:
cmd.extend(['--chrome_root_mount', chrome_root_mount])
if goma_dir:
cmd.extend(['--goma_dir', goma_dir])
if goma_client_json:
cmd.extend(['--goma_client_json', goma_client_json])
if working_dir is not None:
cmd.extend(['--working_dir', working_dir])
if additional_args:
cmd.append('--')
cmd.extend(additional_args)
# ThinLTO opens lots of files at the same time.
resource.setrlimit(resource.RLIMIT_NOFILE, (32768, 32768))
ret = cros_build_lib.dbg_run(cmd, check=False)
# If we were in interactive mode, ignore the exit code; it'll be whatever
# they last ran w/in the chroot and won't matter to us one way or another.
# Note this does allow chroot entrance to fail and be ignored during
# interactive; this is however a rare case and the user will immediately
# see it (nor will they be checking the exit code manually).
if ret.returncode != 0 and additional_args:
raise SystemExit(ret.returncode)
def _ImageFileForChroot(chroot):
"""Find the image file that should be associated with |chroot|.
This function does not check if the image exists; it simply returns the
filename that would be used.
Args:
chroot: Path to the chroot.
Returns:
Path to an image file that would be associated with chroot.
"""
return chroot.rstrip('/') + '.img'
def CreateChrootSnapshot(snapshot_name, chroot_vg, chroot_lv):
"""Create a snapshot for the specified chroot VG/LV.
Args:
snapshot_name: The name of the new snapshot.
chroot_vg: The name of the VG containing the origin LV.
chroot_lv: The name of the origin LV.
Returns:
True if the snapshot was created, or False if a snapshot with the same
name already exists.
Raises:
SystemExit: The lvcreate command failed.
"""
if snapshot_name in ListChrootSnapshots(chroot_vg, chroot_lv):
logging.error(
'Cannot create snapshot %s: A volume with that name already '
'exists.', snapshot_name)
return False
cmd = [
'lvcreate', '-s', '--name', snapshot_name,
'%s/%s' % (chroot_vg, chroot_lv)
]
try:
logging.notice('Creating snapshot %s from %s in VG %s.', snapshot_name,
chroot_lv, chroot_vg)
cros_build_lib.dbg_run(cmd, capture_output=True)
return True
except cros_build_lib.RunCommandError as e:
cros_build_lib.Die('Creating snapshot failed!\n%s', e)
def DeleteChrootSnapshot(snapshot_name, chroot_vg, chroot_lv):
"""Delete the named snapshot from the specified chroot VG.
If the requested snapshot is not found, nothing happens. The main chroot LV
and internal thinpool LV cannot be deleted with this function.
Args:
snapshot_name: The name of the snapshot to delete.
chroot_vg: The name of the VG containing the origin LV.
chroot_lv: The name of the origin LV.
Raises:
SystemExit: The lvremove command failed.
"""
if snapshot_name in (cros_sdk_lib.CHROOT_LV_NAME,
cros_sdk_lib.CHROOT_THINPOOL_NAME):
logging.error(
'Cannot remove LV %s as a snapshot. Use cros_sdk --delete '
'if you want to remove the whole chroot.', snapshot_name)
return
if snapshot_name not in ListChrootSnapshots(chroot_vg, chroot_lv):
return
cmd = ['lvremove', '-f', '%s/%s' % (chroot_vg, snapshot_name)]
try:
logging.notice('Deleting snapshot %s in VG %s.', snapshot_name, chroot_vg)
cros_build_lib.dbg_run(cmd, capture_output=True)
except cros_build_lib.RunCommandError as e:
cros_build_lib.Die('Deleting snapshot failed!\n%s', e)
def RestoreChrootSnapshot(snapshot_name, chroot_vg, chroot_lv):
"""Restore the chroot to an existing snapshot.
This is done by renaming the original |chroot_lv| LV to a temporary name,
renaming the snapshot named |snapshot_name| to |chroot_lv|, and deleting the
now unused LV. If an error occurs, attempts to rename the original snapshot
back to |chroot_lv| to leave the chroot unchanged.
The chroot must be unmounted before calling this function, and will be left
unmounted after this function returns.
Args:
snapshot_name: The name of the snapshot to restore. This snapshot will no
longer be accessible at its original name after this function finishes.
chroot_vg: The VG containing the chroot LV and snapshot LV.
chroot_lv: The name of the original chroot LV.
Returns:
True if the chroot was restored to the requested snapshot, or False if
the snapshot wasn't found or isn't valid.
Raises:
SystemExit: Any of the LVM commands failed.
"""
valid_snapshots = ListChrootSnapshots(chroot_vg, chroot_lv)
if (snapshot_name in (cros_sdk_lib.CHROOT_LV_NAME,
cros_sdk_lib.CHROOT_THINPOOL_NAME) or
snapshot_name not in valid_snapshots):
logging.error('Chroot cannot be restored to %s. Valid snapshots: %s',
snapshot_name, ', '.join(valid_snapshots))
return False
backup_chroot_name = 'chroot-bak-%d' % random.randint(0, 1000)
cmd = ['lvrename', chroot_vg, chroot_lv, backup_chroot_name]
try:
cros_build_lib.dbg_run(cmd, capture_output=True)
except cros_build_lib.RunCommandError as e:
cros_build_lib.Die('Restoring snapshot failed!\n%s', e)
cmd = ['lvrename', chroot_vg, snapshot_name, chroot_lv]
try:
cros_build_lib.dbg_run(cmd, capture_output=True)
except cros_build_lib.RunCommandError as e:
cmd = ['lvrename', chroot_vg, backup_chroot_name, chroot_lv]
try:
cros_build_lib.dbg_run(cmd, capture_output=True)
except cros_build_lib.RunCommandError as e:
cros_build_lib.Die(
'Failed to rename %s to chroot and failed to restore %s back to '
'chroot!\n%s', snapshot_name, backup_chroot_name, e)
cros_build_lib.Die(
'Failed to rename %s to chroot! Original chroot LV has '
'been restored.\n%s', snapshot_name, e)
# Some versions of LVM set snapshots to be skipped at auto-activate time.
# Other versions don't have this flag at all. We run lvchange to try
# disabling auto-skip and activating the volume, but ignore errors. Versions
# that don't have the flag should be auto-activated.
chroot_lv_path = '%s/%s' % (chroot_vg, chroot_lv)
cmd = ['lvchange', '-kn', chroot_lv_path]
cros_build_lib.run(
cmd, print_cmd=False, capture_output=True, check=False)
# Activate the LV in case the lvchange above was needed. Activating an LV
# that is already active shouldn't do anything, so this is safe to run even if
# the -kn wasn't needed.
cmd = ['lvchange', '-ay', chroot_lv_path]
cros_build_lib.dbg_run(cmd, capture_output=True)
cmd = ['lvremove', '-f', '%s/%s' % (chroot_vg, backup_chroot_name)]
try:
cros_build_lib.dbg_run(cmd, capture_output=True)
except cros_build_lib.RunCommandError as e:
cros_build_lib.Die('Failed to remove backup LV %s/%s!\n%s',
chroot_vg, backup_chroot_name, e)
return True
def ListChrootSnapshots(chroot_vg, chroot_lv):
"""Return all snapshots in |chroot_vg| regardless of origin volume.
Args:
chroot_vg: The name of the VG containing the chroot.
chroot_lv: The name of the chroot LV.
Returns:
A (possibly-empty) list of snapshot LVs found in |chroot_vg|.
Raises:
SystemExit: The lvs command failed.
"""
if not chroot_vg or not chroot_lv:
return []
cmd = [
'lvs', '-o', 'lv_name,pool_lv,lv_attr', '-O', 'lv_name', '--noheadings',
'--separator', '\t', chroot_vg
]
try:
result = cros_build_lib.run(
cmd, print_cmd=False, stdout=True, encoding='utf-8')
except cros_build_lib.RunCommandError:
raise SystemExit('Running %r failed!' % cmd)
# Once the thin origin volume has been deleted, there's no way to tell a
# snapshot apart from any other volume. Since this VG is created and managed
# by cros_sdk, we'll assume that all volumes that share the same thin pool are
# valid snapshots.
snapshots = []
snapshot_attrs = re.compile(r'^V.....t.{2,}') # Matches a thin volume.
for line in result.output.splitlines():
lv_name, pool_lv, lv_attr = line.lstrip().split('\t')
if (lv_name == chroot_lv or lv_name == cros_sdk_lib.CHROOT_THINPOOL_NAME or
pool_lv != cros_sdk_lib.CHROOT_THINPOOL_NAME or
not snapshot_attrs.match(lv_attr)):
continue
snapshots.append(lv_name)
return snapshots
def _SudoCommand():
"""Get the 'sudo' command, along with all needed environment variables."""
# Pass in the ENVIRONMENT_WHITELIST and ENV_PASSTHRU variables so that
# scripts in the chroot know what variables to pass through. We keep PATH
# not for the chroot but for the re-exec & for programs we might run before
# we chroot into the SDK. The process that enters the SDK itself will take
# care of initializing PATH to the right value then.
cmd = ['sudo']
for key in (constants.CHROOT_ENVIRONMENT_WHITELIST + constants.ENV_PASSTHRU +
('PATH',)):
value = os.environ.get(key)
if value is not None:
cmd += ['%s=%s' % (key, value)]
# Pass in the path to the depot_tools so that users can access them from
# within the chroot.
cmd += ['DEPOT_TOOLS=%s' % constants.DEPOT_TOOLS_DIR]
return cmd
def _ReportMissing(missing):
"""Report missing utilities, then exit.
Args:
missing: List of missing utilities, as returned by
osutils.FindMissingBinaries. If non-empty, will not return.
"""
if missing:
raise SystemExit(
'The tool(s) %s were not found.\n'
'Please install the appropriate package in your host.\n'
'Example(ubuntu):\n'
' sudo apt-get install <packagename>' % ', '.join(missing))
def _ProxySimSetup(options):
"""Set up proxy simulator, and return only in the child environment.
TODO: Ideally, this should support multiple concurrent invocations of
cros_sdk --proxy-sim; currently, such invocations will conflict with each
other due to the veth device names and IP addresses. Either this code would
need to generate fresh, unused names for all of these before forking, or it
would need to support multiple concurrent cros_sdk invocations sharing one
proxy and allowing it to exit when unused (without counting on any local
service-management infrastructure on the host).
"""
may_need_mpm = False
apache_bin = osutils.Which('apache2')
if apache_bin is None:
apache_bin = osutils.Which('apache2', PROXY_APACHE_FALLBACK_PATH)
if apache_bin is None:
_ReportMissing(('apache2',))
else:
may_need_mpm = True
# Module names and .so names included for ease of grepping.
apache_modules = [('proxy_module', 'mod_proxy.so'),
('proxy_connect_module', 'mod_proxy_connect.so'),
('proxy_http_module', 'mod_proxy_http.so'),
('proxy_ftp_module', 'mod_proxy_ftp.so')]
# Find the apache module directory, and make sure it has the modules we need.
module_dirs = {}
for g in PROXY_APACHE_MODULE_GLOBS:
for _, so in apache_modules:
for f in glob.glob(os.path.join(g, so)):
module_dirs.setdefault(os.path.dirname(f), []).append(so)
for apache_module_path, modules_found in module_dirs.items():
if len(modules_found) == len(apache_modules):
break
else:
# Appease cros lint, which doesn't understand that this else block will not
# fall through to the subsequent code which relies on apache_module_path.
apache_module_path = None
raise SystemExit(
'Could not find apache module path containing all required modules: %s'
% ', '.join(so for mod, so in apache_modules))
def check_add_module(name):
so = 'mod_%s.so' % name
if os.access(os.path.join(apache_module_path, so), os.F_OK):
mod = '%s_module' % name
apache_modules.append((mod, so))
return True
return False
check_add_module('authz_core')
if may_need_mpm:
for mpm in PROXY_APACHE_MPMS:
if check_add_module('mpm_%s' % mpm):
break
veth_host = '%s-host' % PROXY_VETH_PREFIX
veth_guest = '%s-guest' % PROXY_VETH_PREFIX
# Set up locks to sync the net namespace setup. We need the child to create
# the net ns first, and then have the parent assign the guest end of the veth
# interface to the child's new network namespace & bring up the proxy. Only
# then can the child move forward and rely on the network being up.
ns_create_lock = locking.PipeLock()
ns_setup_lock = locking.PipeLock()
pid = os.fork()
if not pid:
# Create our new isolated net namespace.
namespaces.Unshare(namespaces.CLONE_NEWNET)
# Signal the parent the ns is ready to be configured.
ns_create_lock.Post()
del ns_create_lock
# Wait for the parent to finish setting up the ns/proxy.
ns_setup_lock.Wait()
del ns_setup_lock
# Set up child side of the network.
commands = (
('ip', 'link', 'set', 'up', 'lo'),
('ip', 'address', 'add', '%s/%u' % (PROXY_GUEST_IP, PROXY_NETMASK),
'dev', veth_guest),
('ip', 'link', 'set', veth_guest, 'up'),
)
try:
for cmd in commands:
cros_build_lib.dbg_run(cmd)
except cros_build_lib.RunCommandError as e:
cros_build_lib.Die('Proxy setup failed!\n%s', e)
proxy_url = 'http://%s:%u' % (PROXY_HOST_IP, PROXY_PORT)
for proto in ('http', 'https', 'ftp'):
os.environ[proto + '_proxy'] = proxy_url
for v in ('all_proxy', 'RSYNC_PROXY', 'no_proxy'):
os.environ.pop(v, None)
return
# Set up parent side of the network.
uid = int(os.environ.get('SUDO_UID', '0'))
gid = int(os.environ.get('SUDO_GID', '0'))
if uid == 0 or gid == 0:
for username in PROXY_APACHE_FALLBACK_USERS:
try:
pwnam = pwd.getpwnam(username)
uid, gid = pwnam.pw_uid, pwnam.pw_gid
break
except KeyError:
continue
if uid == 0 or gid == 0:
raise SystemExit('Could not find a non-root user to run Apache as')
chroot_parent, chroot_base = os.path.split(options.chroot)
pid_file = os.path.join(chroot_parent, '.%s-apache-proxy.pid' % chroot_base)
log_file = os.path.join(chroot_parent, '.%s-apache-proxy.log' % chroot_base)
# Wait for the child to create the net ns.
ns_create_lock.Wait()
del ns_create_lock
apache_directives = [
'User #%u' % uid,
'Group #%u' % gid,
'PidFile %s' % pid_file,
'ErrorLog %s' % log_file,
'Listen %s:%u' % (PROXY_HOST_IP, PROXY_PORT),
'ServerName %s' % PROXY_HOST_IP,
'ProxyRequests On',
'AllowCONNECT %s' % ' '.join(str(x) for x in PROXY_CONNECT_PORTS),
] + [
'LoadModule %s %s' % (mod, os.path.join(apache_module_path, so))
for (mod, so) in apache_modules
]
commands = (
('ip', 'link', 'add', 'name', veth_host, 'type', 'veth', 'peer', 'name',
veth_guest),
('ip', 'address', 'add', '%s/%u' % (PROXY_HOST_IP, PROXY_NETMASK), 'dev',
veth_host),
('ip', 'link', 'set', veth_host, 'up'),
([apache_bin, '-f', '/dev/null'] +
[arg for d in apache_directives for arg in ('-C', d)]),
('ip', 'link', 'set', veth_guest, 'netns', str(pid)),
)
cmd = None # Make cros lint happy.
try:
for cmd in commands:
cros_build_lib.dbg_run(cmd)
except cros_build_lib.RunCommandError as e:
# Clean up existing interfaces, if any.
cmd_cleanup = ('ip', 'link', 'del', veth_host)
try:
cros_build_lib.run(cmd_cleanup, print_cmd=False)
except cros_build_lib.RunCommandError:
logging.error('running %r failed', cmd_cleanup)
cros_build_lib.Die('Proxy network setup failed!\n%s', e)
# Signal the child that the net ns/proxy is fully configured now.
ns_setup_lock.Post()
del ns_setup_lock
process_util.ExitAsStatus(os.waitpid(pid, 0)[1])
def _ReExecuteIfNeeded(argv):
"""Re-execute cros_sdk as root.
Also unshare the mount namespace so as to ensure that processes outside
the chroot can't mess with our mounts.
"""
if os.geteuid() != 0:
cmd = _SudoCommand() + ['--'] + argv
logging.debug('Reexecing self via sudo:\n%s', cros_build_lib.CmdToStr(cmd))
os.execvp(cmd[0], cmd)
else:
# We must set up the cgroups mounts before we enter our own namespace.
# This way it is a shared resource in the root mount namespace.
cgroups.Cgroup.InitSystem()
def _CreateParser(sdk_latest_version, bootstrap_latest_version):
"""Generate and return the parser with all the options."""
usage = ('usage: %(prog)s [options] '
'[VAR1=val1 ... VAR2=val2] [--] [command [args]]')
parser = commandline.ArgumentParser(
usage=usage, description=__doc__, caching=True)
# Global options.
default_chroot = os.path.join(constants.SOURCE_ROOT,
constants.DEFAULT_CHROOT_DIR)
parser.add_argument(
'--chroot',
dest='chroot',
default=default_chroot,
type='path',
help=('SDK chroot dir name [%s]' % constants.DEFAULT_CHROOT_DIR))
parser.add_argument(
'--nouse-image',
dest='use_image',
action='store_false',
default=True,
help='Do not mount the chroot on a loopback image; '
'instead, create it directly in a directory.')
parser.add_argument(
'--chrome-root',
'--chrome_root',
type='path',
help='Mount this chrome root into the SDK chroot')
parser.add_argument(
'--chrome_root_mount',
type='path',
help='Mount chrome into this path inside SDK chroot')
parser.add_argument(
'--nousepkg',
action='store_true',
default=False,
help='Do not use binary packages when creating a chroot.')
parser.add_argument(
'-u',
'--url',
dest='sdk_url',
help='Use sdk tarball located at this url. Use file:// '
'for local files.')
parser.add_argument(
'--sdk-version',
help=('Use this sdk version. For prebuilt, current is %r'
', for bootstrapping it is %r.' % (sdk_latest_version,
bootstrap_latest_version)))
parser.add_argument(
'--goma_dir',
type='path',
help='Goma installed directory to mount into the chroot.')
parser.add_argument(
'--goma_client_json',
type='path',
help='Service account json file to use goma on bot. '
'Mounted into the chroot.')
# Use type=str instead of type='path' to prevent the given path from being
# transfered to absolute path automatically.
parser.add_argument(
'--working-dir',
type=str,
help='Run the command in specific working directory in '
'chroot. If the given directory is a relative '
'path, this program will transfer the path to '
'the corresponding one inside chroot.')
parser.add_argument('commands', nargs=argparse.REMAINDER)
# Commands.
group = parser.add_argument_group('Commands')
group.add_argument(
'--enter',
action='store_true',
default=False,
help='Enter the SDK chroot. Implies --create.')
group.add_argument(
'--create',
action='store_true',
default=False,
help='Create the chroot only if it does not already exist. '
'Implies --download.')
group.add_argument(
'--bootstrap',
action='store_true',
default=False,
help='Build everything from scratch, including the sdk. '
'Use this only if you need to validate a change '
'that affects SDK creation itself (toolchain and '
'build are typically the only folk who need this). '
'Note this will quite heavily slow down the build. '
'This option implies --create --nousepkg.')
group.add_argument(
'-r',
'--replace',
action='store_true',
default=False,
help='Replace an existing SDK chroot. Basically an alias '
'for --delete --create.')
group.add_argument(
'--delete',
action='store_true',
default=False,
help='Delete the current SDK chroot if it exists.')
group.add_argument(
'--unmount',
action='store_true',
default=False,
help='Unmount and clean up devices associated with the '
'SDK chroot if it exists. This does not delete the '
'backing image file, so the same chroot can be later '
're-mounted for reuse. To fully delete the chroot, use '
'--delete. This is primarily useful for working on '
'cros_sdk or the chroot setup; you should not need it '
'under normal circumstances.')
group.add_argument(
'--download',
action='store_true',
default=False,
help='Download the sdk.')
group.add_argument(
'--snapshot-create',
metavar='SNAPSHOT_NAME',
help='Create a snapshot of the chroot. Requires that the chroot was '
'created without the --nouse-image option.')
group.add_argument(
'--snapshot-restore',
metavar='SNAPSHOT_NAME',
help='Restore the chroot to a previously created snapshot.')
group.add_argument(
'--snapshot-delete',
metavar='SNAPSHOT_NAME',
help='Delete a previously created snapshot. Deleting a snapshot that '
'does not exist is not an error.')
group.add_argument(
'--snapshot-list',
action='store_true',
default=False,
help='List existing snapshots of the chroot and exit.')
commands = group
# Namespace options.
group = parser.add_argument_group('Namespaces')
group.add_argument(
'--proxy-sim',
action='store_true',
default=False,
help='Simulate a restrictive network requiring an outbound'
' proxy.')
group.add_argument(
'--no-ns-pid',
dest='ns_pid',
default=True,
action='store_false',
help='Do not create a new PID namespace.')
# Internal options.
group = parser.add_argument_group(
'Internal Chromium OS Build Team Options',
'Caution: these are for meant for the Chromium OS build team only')
group.add_argument(
'--buildbot-log-version',
default=False,
action='store_true',
help='Log SDK version for buildbot consumption')
return parser, commands
def main(argv):
# Turn on strict sudo checks.
cros_build_lib.STRICT_SUDO = True
conf = key_value_store.LoadFile(
os.path.join(constants.SOURCE_ROOT, constants.SDK_VERSION_FILE),
ignore_missing=True)
sdk_latest_version = conf.get('SDK_LATEST_VERSION', '<unknown>')
bootstrap_frozen_version = conf.get('BOOTSTRAP_FROZEN_VERSION', '<unknown>')
# Use latest SDK for bootstrapping if requested. Use a frozen version of SDK
# for bootstrapping if BOOTSTRAP_FROZEN_VERSION is set.
bootstrap_latest_version = (
sdk_latest_version
if bootstrap_frozen_version == '<unknown>' else bootstrap_frozen_version)
parser, commands = _CreateParser(sdk_latest_version, bootstrap_latest_version)
options = parser.parse_args(argv)
chroot_command = options.commands
# Some sanity checks first, before we ask for sudo credentials.
cros_build_lib.AssertOutsideChroot()
host = os.uname()[4]
if host != 'x86_64':
cros_build_lib.Die(
"cros_sdk is currently only supported on x86_64; you're running"
' %s. Please find a x86_64 machine.' % (host,))
_ReportMissing(osutils.FindMissingBinaries(NEEDED_TOOLS))
if options.proxy_sim:
_ReportMissing(osutils.FindMissingBinaries(PROXY_NEEDED_TOOLS))
missing_image_tools = osutils.FindMissingBinaries(IMAGE_NEEDED_TOOLS)
if (sdk_latest_version == '<unknown>' or
bootstrap_latest_version == '<unknown>'):
cros_build_lib.Die(
'No SDK version was found. '
'Are you in a Chromium source tree instead of Chromium OS?\n\n'
'Please change to a directory inside your Chromium OS source tree\n'
'and retry. If you need to setup a Chromium OS source tree, see\n'
' https://dev.chromium.org/chromium-os/developer-guide')
any_snapshot_operation = (
options.snapshot_create or options.snapshot_restore or
options.snapshot_delete or options.snapshot_list)
if any_snapshot_operation and not options.use_image:
cros_build_lib.Die('Snapshot operations are not compatible with '
'--nouse-image.')
if (options.snapshot_delete and
options.snapshot_delete == options.snapshot_restore):
parser.error('Cannot --snapshot_delete the same snapshot you are '
'restoring with --snapshot_restore.')
_ReExecuteIfNeeded([sys.argv[0]] + argv)
lock_path = os.path.dirname(options.chroot)
lock_path = os.path.join(
lock_path, '.%s_lock' % os.path.basename(options.chroot).lstrip('.'))
# Expand out the aliases...
if options.replace:
options.delete = options.create = True
if options.bootstrap:
options.create = True
# If a command is not given, default to enter.
# pylint: disable=protected-access
# This _group_actions access sucks, but upstream decided to not include an
# alternative to optparse's option_list, and this is what they recommend.
options.enter |= not any(
getattr(options, x.dest) for x in commands._group_actions)
# pylint: enable=protected-access
options.enter |= bool(chroot_command)
if (options.delete and not options.create and
(options.enter or any_snapshot_operation)):
parser.error('Trying to enter or snapshot the chroot when --delete '
'was specified makes no sense.')
if (options.unmount and
(options.create or options.enter or any_snapshot_operation)):
parser.error('--unmount cannot be specified with other chroot actions.')
if options.working_dir is not None and not os.path.isabs(options.working_dir):
options.working_dir = path_util.ToChrootPath(options.working_dir)
# Discern if we need to create the chroot.
chroot_exists = cros_sdk_lib.IsChrootReady(options.chroot)
if (options.use_image and not chroot_exists and not options.delete and
not options.unmount and not missing_image_tools and
os.path.exists(_ImageFileForChroot(options.chroot))):
# Try to re-mount an existing image in case the user has rebooted.
with cgroups.SimpleContainChildren('cros_sdk'):
with locking.FileLock(lock_path, 'chroot lock') as lock:
logging.debug('Checking if existing chroot image can be mounted.')
lock.write_lock()
cros_sdk_lib.MountChroot(options.chroot, create=False)
chroot_exists = cros_sdk_lib.IsChrootReady(options.chroot)
if chroot_exists:
logging.notice('Mounted existing image %s on chroot',
_ImageFileForChroot(options.chroot))
# Finally, flip create if necessary.
if options.enter or options.snapshot_create:
options.create |= not chroot_exists
# Make sure we will download if we plan to create.
options.download |= options.create
# Anything that needs to manipulate the main chroot mount or communicate with
# LVM needs to be done here before we enter the new namespaces.
# If deleting, do it regardless of the use_image flag so that a
# previously-created loopback chroot can also be cleaned up.
# TODO(bmgordon): See if the DeleteChroot call below can be removed in
# favor of this block.
chroot_deleted = False
if options.delete:
with cgroups.SimpleContainChildren('cros_sdk'):
with locking.FileLock(lock_path, 'chroot lock') as lock:
lock.write_lock()
if missing_image_tools:
logging.notice('Unmounting chroot.')
osutils.UmountTree(options.chroot)
else:
logging.notice('Deleting chroot.')
cros_sdk_lib.CleanupChrootMount(options.chroot, delete=True)
chroot_deleted = True
# If cleanup was requested, we have to do it while we're still in the original
# namespace. Since cleaning up the mount will interfere with any other
# commands, we exit here. The check above should have made sure that no other
# action was requested, anyway.
if options.unmount:
with locking.FileLock(lock_path, 'chroot lock') as lock:
lock.write_lock()
CleanupChroot(options.chroot)
sys.exit(0)
# Make sure the main chroot mount is visible. Contents will be filled in
# below if needed.
if options.create and options.use_image:
if missing_image_tools:
raise SystemExit("""The tool(s) %s were not found.
Please make sure the lvm2 and thin-provisioning-tools packages
are installed on your host.
Example(ubuntu):
sudo apt-get install lvm2 thin-provisioning-tools
If you want to run without lvm2, pass --nouse-image (chroot
snapshots will be unavailable).""" % ', '.join(missing_image_tools))
logging.debug('Making sure chroot image is mounted.')
with cgroups.SimpleContainChildren('cros_sdk'):
with locking.FileLock(lock_path, 'chroot lock') as lock:
lock.write_lock()
if not cros_sdk_lib.MountChroot(options.chroot, create=True):
cros_build_lib.Die('Unable to mount %s on chroot',
_ImageFileForChroot(options.chroot))
logging.notice('Mounted %s on chroot',
_ImageFileForChroot(options.chroot))
# Snapshot operations will always need the VG/LV, but other actions won't.
if any_snapshot_operation:
with cgroups.SimpleContainChildren('cros_sdk'):
with locking.FileLock(lock_path, 'chroot lock') as lock:
chroot_vg, chroot_lv = cros_sdk_lib.FindChrootMountSource(
options.chroot)
if not chroot_vg or not chroot_lv:
cros_build_lib.Die('Unable to find VG/LV for chroot %s',
options.chroot)
# Delete snapshot before creating a new one. This allows the user to
# throw out old state, create a new snapshot, and enter the chroot in a
# single call to cros_sdk. Since restore involves deleting, also do it
# before creating.
if options.snapshot_restore:
lock.write_lock()
valid_snapshots = ListChrootSnapshots(chroot_vg, chroot_lv)
if options.snapshot_restore not in valid_snapshots:
cros_build_lib.Die(
'%s is not a valid snapshot to restore to. '
'Valid snapshots: %s', options.snapshot_restore,
', '.join(valid_snapshots))
osutils.UmountTree(options.chroot)
if not RestoreChrootSnapshot(options.snapshot_restore, chroot_vg,
chroot_lv):
cros_build_lib.Die('Unable to restore chroot to snapshot.')
if not cros_sdk_lib.MountChroot(options.chroot, create=False):
cros_build_lib.Die('Unable to mount restored snapshot onto chroot.')
# Use a read lock for snapshot delete and create even though they modify
# the filesystem, because they don't modify the mounted chroot itself.
# The underlying LVM commands take their own locks, so conflicting
# concurrent operations here may crash cros_sdk, but won't corrupt the
# chroot image. This tradeoff seems worth it to allow snapshot
# operations on chroots that have a process inside.
if options.snapshot_delete:
lock.read_lock()
DeleteChrootSnapshot(options.snapshot_delete, chroot_vg, chroot_lv)
if options.snapshot_create:
lock.read_lock()
if not CreateChrootSnapshot(options.snapshot_create, chroot_vg,
chroot_lv):
cros_build_lib.Die('Unable to create snapshot.')
img_path = _ImageFileForChroot(options.chroot)
if (options.use_image and os.path.exists(options.chroot) and
os.path.exists(img_path)):
img_stat = os.stat(img_path)
img_used_bytes = img_stat.st_blocks * 512
mount_stat = os.statvfs(options.chroot)
mount_used_bytes = mount_stat.f_frsize * (
mount_stat.f_blocks - mount_stat.f_bfree)
extra_gbs = (img_used_bytes - mount_used_bytes) // 2**30
if extra_gbs > MAX_UNUSED_IMAGE_GBS:
logging.notice('%s is using %s GiB more than needed. Running '
'fstrim.', img_path, extra_gbs)
cmd = ['fstrim', options.chroot]
try:
cros_build_lib.dbg_run(cmd)
except cros_build_lib.RunCommandError as e:
logging.warning(
'Running fstrim failed. Consider running fstrim on '
'your chroot manually.\n%s', e)
# Enter a new set of namespaces. Everything after here cannot directly affect
# the hosts's mounts or alter LVM volumes.
namespaces.SimpleUnshare()
if options.ns_pid:
first_pid = namespaces.CreatePidNs()
else:
first_pid = None
if options.snapshot_list:
for snap in ListChrootSnapshots(chroot_vg, chroot_lv):
print(snap)
sys.exit(0)
if not options.sdk_version:
sdk_version = (
bootstrap_latest_version if options.bootstrap else sdk_latest_version)
else:
sdk_version = options.sdk_version
if options.buildbot_log_version:
logging.PrintBuildbotStepText(sdk_version)
# Based on selections, determine the tarball to fetch.
if options.download:
if options.sdk_url:
urls = [options.sdk_url]
else:
urls = GetArchStageTarballs(sdk_version)
with cgroups.SimpleContainChildren('cros_sdk', pid=first_pid):
with locking.FileLock(lock_path, 'chroot lock') as lock:
if options.proxy_sim:
_ProxySimSetup(options)
if (options.delete and not chroot_deleted and
(os.path.exists(options.chroot) or
os.path.exists(_ImageFileForChroot(options.chroot)))):
lock.write_lock()
DeleteChroot(options.chroot)
sdk_cache = os.path.join(options.cache_dir, 'sdks')
distfiles_cache = os.path.join(options.cache_dir, 'distfiles')
osutils.SafeMakedirsNonRoot(options.cache_dir)
for target in (sdk_cache, distfiles_cache):
src = os.path.join(constants.SOURCE_ROOT, os.path.basename(target))
if not os.path.exists(src):
osutils.SafeMakedirsNonRoot(target)
continue
lock.write_lock(
'Upgrade to %r needed but chroot is locked; please exit '
'all instances so this upgrade can finish.' % src)
if not os.path.exists(src):
# Note that while waiting for the write lock, src may've vanished;
# it's a rare race during the upgrade process that's a byproduct
# of us avoiding taking a write lock to do the src check. If we
# took a write lock for that check, it would effectively limit
# all cros_sdk for a chroot to a single instance.
osutils.SafeMakedirsNonRoot(target)
elif not os.path.exists(target):
# Upgrade occurred, but a reversion, or something whacky
# occurred writing to the old location. Wipe and continue.
os.rename(src, target)
else:
# Upgrade occurred once already, but either a reversion or
# some before/after separate cros_sdk usage is at play.
# Wipe and continue.
osutils.RmDir(src)
if options.download:
lock.write_lock()
sdk_tarball = FetchRemoteTarballs(
sdk_cache, urls, 'stage3' if options.bootstrap else 'SDK')
if options.create:
lock.write_lock()
# Recheck if the chroot is set up here before creating to make sure we
# account for whatever the various delete/unmount/remount steps above
# have done.
if cros_sdk_lib.IsChrootReady(options.chroot):
logging.debug('Chroot already exists. Skipping creation.')
else:
CreateChroot(
options.chroot,
sdk_tarball,
options.cache_dir,
nousepkg=(options.bootstrap or options.nousepkg))
if options.enter:
lock.read_lock()
EnterChroot(options.chroot, options.cache_dir, options.chrome_root,
options.chrome_root_mount, options.goma_dir,
options.goma_client_json, options.working_dir,
chroot_command)
| endlessm/chromium-browser | third_party/chromite/scripts/cros_sdk.py | Python | bsd-3-clause | 44,269 |
"""Particle system example."""
from galry import *
import pylab as plt
import numpy as np
import numpy.random as rdn
import time
import timeit
import os
class ParticleVisual(Visual):
def get_position_update_code(self):
return """
// update position
position.x += velocities.x * tloc;
position.y += velocities.y * tloc - 0.5 * g * tloc * tloc;
"""
def get_color_update_code(self):
return """
// pass the color and point size to the fragment shader
varying_color = color;
varying_color.w = alpha;
"""
def base_fountain(self, initial_positions=None,
velocities=None, color=None, alpha=None, delays=None):
self.size = initial_positions.shape[0]
self.primitive_type = 'POINTS'
# load texture
path = os.path.dirname(os.path.realpath(__file__))
particle = plt.imread(os.path.join(path, "images/particle.png"))
size = float(max(particle.shape))
# create the dataset
self.add_uniform("point_size", vartype="float", ndim=1, data=size)
self.add_uniform("t", vartype="float", ndim=1, data=0.)
self.add_uniform("color", vartype="float", ndim=4, data=color)
# add the different data buffers
self.add_attribute("initial_positions", vartype="float", ndim=2, data=initial_positions)
self.add_attribute("velocities", vartype="float", ndim=2, data=velocities)
self.add_attribute("delays", vartype="float", ndim=1, data=delays)
self.add_attribute("alpha", vartype="float", ndim=1, data=alpha)
self.add_varying("varying_color", vartype="float", ndim=4)
# add particle texture
self.add_texture("tex_sampler", size=particle.shape[:2],
ncomponents=particle.shape[2], ndim=2, data=particle)
vs = """
// compute local time
const float tmax = 5.;
const float tlocmax = 2.;
const float g = %G_CONSTANT%;
// Local time.
float tloc = mod(t - delays, tmax);
vec2 position = initial_positions;
if ((tloc >= 0) && (tloc <= tlocmax))
{
// position update
%POSITION_UPDATE%
%COLOR_UPDATE%
}
else
{
varying_color = vec4(0., 0., 0., 0.);
}
gl_PointSize = point_size;
"""
vs = vs.replace('%POSITION_UPDATE%', self.get_position_update_code())
vs = vs.replace('%COLOR_UPDATE%', self.get_color_update_code())
vs = vs.replace('%G_CONSTANT%', '3.')
self.add_vertex_main(vs)
self.add_fragment_main(
"""
out_color = texture2D(tex_sampler, gl_PointCoord) * varying_color;
""")
def initialize(self, **kwargs):
self.base_fountain(**kwargs)
def update(figure, parameter):
t = parameter[0]
figure.set_data(t=t)
if __name__ == '__main__':
figure()
# number of particles
n = 50000
# initial positions
positions = .02 * rdn.randn(n, 2)
# initial velocities
velocities = np.zeros((n, 2))
v = 1.5 + .5 * rdn.rand(n)
angles = .1 * rdn.randn(n) + np.pi / 2
velocities[:,0] = v * np.cos(angles)
velocities[:,1] = v * np.sin(angles)
# transparency
alpha = .2 * rdn.rand(n)
# color
color = (0.70,0.75,.98,1.)
# random delays
delays = 10 * rdn.rand(n)
figure(constrain_navigation=True)
# create the visual
visual(ParticleVisual,
initial_positions=positions,
velocities=velocities,
alpha=alpha,
color=color,
delays=delays
)
animate(update, dt=.02)
show()
| rossant/galry | examples/fountain.py | Python | bsd-3-clause | 3,841 |
with open("day-10.txt") as f:
nav_sys = f.read().rstrip().splitlines()
pairs = {
")": "(",
"]": "[",
"}": "{",
">": "<",
}
points = {
")": 3,
"]": 57,
"}": 1197,
">": 25137,
}
score = 0
for line in nav_sys:
brackets = []
for char in line:
if char in pairs:
if brackets and pairs[char] == brackets[-1]:
brackets.pop()
else:
score += points[char]
break
else:
brackets.append(char)
print(score)
| scorphus/sparring | advent-of-code/2021/day-10-part-1.py | Python | mit | 540 |
#
# Epour - A bittorrent client using EFL and libtorrent
#
# Copyright 2012-2013 Kai Huuhko <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
import os
import cgi
import logging
log = logging.getLogger("epour")
import libtorrent as lt
from efl.elementary.icon import Icon
from efl.elementary.box import Box
from efl.elementary.label import Label
from efl.elementary.button import Button
from efl.elementary.frame import Frame
from efl.elementary.entry import Entry
from efl.elementary.check import Check
from efl.elementary.spinner import Spinner
from efl.elementary.hoversel import Hoversel
from efl.elementary.popup import Popup
from efl.elementary.fileselector_button import FileselectorButton
from efl.elementary.scroller import Scroller, ELM_SCROLLER_POLICY_OFF, \
ELM_SCROLLER_POLICY_AUTO
from efl.elementary.separator import Separator
from efl.elementary.slider import Slider
from efl.elementary.actionslider import Actionslider, \
ELM_ACTIONSLIDER_LEFT, ELM_ACTIONSLIDER_CENTER, \
ELM_ACTIONSLIDER_RIGHT, ELM_ACTIONSLIDER_ALL
from efl.elementary.naviframe import Naviframe
from efl.elementary.table import Table
from efl.elementary.configuration import Configuration
from efl.evas import Rectangle
from efl.ecore import Timer
from efl.elementary.window import Window, ELM_WIN_BASIC
from efl.elementary.background import Background
import Notify
EXPAND_BOTH = 1.0, 1.0
EXPAND_HORIZ = 1.0, 0.0
FILL_BOTH = -1.0, -1.0
FILL_HORIZ = -1.0, 0.5
SCROLL_BOTH = ELM_SCROLLER_POLICY_AUTO, ELM_SCROLLER_POLICY_AUTO
class PreferencesDialog(Window):
""" Base class for all preferences dialogs """
def __init__(self, title):
elm_conf = Configuration()
scale = elm_conf.scale
Window.__init__(self, title, ELM_WIN_BASIC, title=title, autodel=True)
self.size = scale * 480, scale * 320
bg = Background(self, size_hint_weight=EXPAND_BOTH)
self.resize_object_add(bg)
bg.show()
# bt = Button(self, text="Close")
# bt.callback_clicked_add(lambda b: self.delete())
self.scroller = Scroller(self, policy=SCROLL_BOTH,
size_hint_weight=EXPAND_BOTH, size_hint_align=FILL_BOTH)
self.resize_object_add(self.scroller)
self.scroller.show()
self.box = Box(self)
self.box.size_hint_weight = EXPAND_BOTH
self.scroller.content = self.box
self.show()
# def parent_resize_cb(self, parent):
# (pw, ph) = parent.size
# self.table.size_hint_min = pw * 0.7, ph * 0.7
class PreferencesGeneral(PreferencesDialog):
""" General preference dialog """
def __init__(self, parent, session):
self.session = session
conf = session.conf
PreferencesDialog.__init__(self, "General")
limits = Limits(self, session)
ports = ListenPorts(self, session)
pe = EncryptionSettings(self, session)
dlsel = DataStorageSelector(self, conf)
pad = Rectangle(self.evas)
pad.color = 0, 0, 0, 0
pad.size_hint_min = 0, 10
sep1 = Separator(self)
sep1.horizontal = True
chk1 = Check(self)
chk1.size_hint_align = 0.0, 0.0
chk1.text = "Delete original .torrent file when added"
chk1.state = conf.getboolean("Settings", "delete_original")
chk1.callback_changed_add(lambda x: conf.set("Settings",
"delete_original", str(bool(chk1.state))))
chk2 = Check(self)
chk2.size_hint_align = 0.0, 0.0
chk2.text = "Ask for confirmation on exit"
chk2.state = conf.getboolean("Settings", "confirmations")
chk2.callback_changed_add(lambda x: conf.set("Settings",
"confirmations", str(bool(chk2.state))))
sep2 = Separator(self)
sep2.horizontal = True
for w in ports, limits, dlsel, pe, pad, sep1, chk1, chk2, sep2:
w.show()
self.box.pack_end(w)
class DataStorageSelector(Frame):
def __init__(self, parent, conf):
Frame.__init__(self, parent)
self.size_hint_align = -1.0, 0.0
self.size_hint_weight = 1.0, 0.0
self.text = "Data storage"
self.conf = conf
b = Box(parent)
lbl = self.path_lbl = Label(parent)
lbl.text = conf.get("Settings", "storage_path")
self.dlsel = dlsel = FileselectorButton(self)
dlsel.size_hint_align = -1.0, 0.0
dlsel.inwin_mode = False
dlsel.folder_only = True
dlsel.expandable = False
dlsel.text = "Change path"
dlsel.path = conf.get("Settings", "storage_path")
dlsel.callback_file_chosen_add(self.save_dlpath)
for w in lbl, dlsel:
w.show()
b.pack_end(w)
b.show()
self.content = b
def save_dlpath(self, fs, path):
if not path:
return
if not os.path.exists(self.dlsel.path):
p = Notify.Error(self, "Invalid storage path",
"You have selected an invalid data storage path for torrents.")
return
self.path_lbl.text = path
self.conf.set("Settings", "storage_path", self.dlsel.path)
class ListenPorts(Frame):
def __init__(self, parent, session):
Frame.__init__(self, parent)
self.session = session
self.size_hint_align = FILL_HORIZ
self.text = "Listen port (range)"
port = session.listen_port()
b = Box(parent)
b.size_hint_weight = EXPAND_HORIZ
lp = self.lp = RangeSpinners(
parent,
low = session.conf.getint("Settings", "listen_low"),
high = session.conf.getint("Settings", "listen_high"),
minim = 0, maxim = 65535)
lp.show()
b.pack_end(lp)
save = Button(parent)
save.text = "Apply"
save.callback_clicked_add(self.save_cb)
save.show()
b.pack_end(save)
b.show()
self.content = b
def save_cb(self, btn):
low = int(self.lp.listenlow.value)
high = int(self.lp.listenhigh.value)
self.session.listen_on(low, high)
self.session.conf.set("Settings", "listen_low", str(low))
self.session.conf.set("Settings", "listen_high", str(high))
class PreferencesProxy(PreferencesDialog):
""" Proxy preference dialog """
def __init__(self, parent, session):
PreferencesDialog.__init__(self, "Proxy")
proxies = [
["Proxy for torrent peer connections",
session.peer_proxy, session.set_peer_proxy],
["Proxy for torrent web seed connections",
session.web_seed_proxy, session.set_web_seed_proxy],
["Proxy for tracker connections",
session.tracker_proxy, session.set_tracker_proxy],
["Proxy for DHT connections",
session.dht_proxy, session.set_dht_proxy],
]
for title, rfunc, wfunc in proxies:
pg = ProxyGroup(self, title, rfunc, wfunc)
pg.show()
self.box.pack_end(pg)
class ProxyGroup(Frame):
proxy_types = {
lt.proxy_type.none.name: lt.proxy_type.none,
lt.proxy_type.socks4.name: lt.proxy_type.socks4,
lt.proxy_type.socks5.name: lt.proxy_type.socks5,
lt.proxy_type.socks5_pw.name: lt.proxy_type.socks5_pw,
lt.proxy_type.http.name: lt.proxy_type.http,
lt.proxy_type.http_pw.name: lt.proxy_type.http_pw,
}
def __init__(self, parent, title, rfunc, wfunc):
Frame.__init__(self, parent)
self.size_hint_weight = EXPAND_HORIZ
self.size_hint_align = FILL_HORIZ
self.text = title
t = Table(self, homogeneous=True, padding=(3,3))
t.size_hint_weight = EXPAND_HORIZ
t.size_hint_align = FILL_HORIZ
t.show()
l = Label(self, text="Proxy type")
l.size_hint_align = 0.0, 0.5
l.show()
ptype = Hoversel(parent)
ptype.size_hint_align = -1.0, 0.5
ptype.text = rfunc().type.name
for n in self.proxy_types.iterkeys():
ptype.item_add(n, callback=lambda x, y, z=n: ptype.text_set(z))
ptype.show()
t.pack(l, 0, 0, 1, 1)
t.pack(ptype, 1, 0, 1, 1)
l = Label(self, text="Hostname")
l.size_hint_align = 0.0, 0.5
l.show()
phost = Entry(parent)
phost.size_hint_weight = EXPAND_HORIZ
phost.size_hint_align = FILL_HORIZ
phost.single_line = True
phost.scrollable = True
phost.entry = rfunc().hostname
phost.show()
t.pack(l, 0, 1, 1, 1)
t.pack(phost, 1, 1, 1, 1)
l = Label(self, text="Port")
l.size_hint_align = 0.0, 0.5
l.show()
pport = Spinner(parent)
pport.size_hint_align = -1.0, 0.5
pport.min_max = 0, 65535
pport.value = rfunc().port
pport.show()
t.pack(l, 0, 2, 1, 1)
t.pack(pport, 1, 2, 1, 1)
l = Label(self, text="Username")
l.size_hint_align = 0.0, 0.5
l.show()
puser = Entry(parent)
puser.size_hint_weight = EXPAND_HORIZ
puser.size_hint_align = FILL_HORIZ
puser.single_line = True
puser.scrollable = True
puser.entry = rfunc().username
puser.show()
t.pack(l, 0, 3, 1, 1)
t.pack(puser, 1, 3, 1, 1)
l = Label(self, text="Password")
l.size_hint_align = 0.0, 0.5
l.show()
ppass = Entry(parent)
ppass.size_hint_weight = EXPAND_HORIZ
ppass.size_hint_align = FILL_HORIZ
ppass.single_line = True
ppass.scrollable = True
ppass.password = True
ppass.entry = rfunc().password
ppass.show()
t.pack(l, 0, 4, 1, 1)
t.pack(ppass, 1, 4, 1, 1)
entries = [ptype, phost, pport, puser, ppass]
save = Button(parent, text="Apply")
save.callback_clicked_add(self.save_conf, wfunc, entries)
save.show()
t.pack(save, 0, 5, 2, 1)
self.content = t
def save_conf(self, btn, wfunc, entries):
ptype, phost, pport, puser, ppass = entries
p = lt.proxy_settings()
p.hostname = phost.entry.encode("utf-8")
p.port = int(pport.value)
p.username = puser.entry.encode("utf-8")
p.password = ppass.entry.encode("utf-8")
p.type = self.proxy_types[ptype.text]
wfunc(p)
class EncryptionSettings(Frame):
def __init__(self, parent, session):
self.session = session
Frame.__init__(self, parent)
self.size_hint_align = -1.0, 0.0
self.text = "Encryption settings"
pes = self.pes = session.get_pe_settings()
b = Box(parent)
enc_values = lt.enc_policy.disabled, lt.enc_policy.enabled, lt.enc_policy.forced
enc_levels = lt.enc_level.plaintext, lt.enc_level.rc4, lt.enc_level.both
inc = self.inc = ActSWithLabel(parent,
"Incoming encryption", enc_values, pes.in_enc_policy)
b.pack_end(inc)
inc.show()
out = self.out = ActSWithLabel(parent,
"Outgoing encryption", enc_values, pes.out_enc_policy)
b.pack_end(out)
out.show()
lvl = self.lvl = ActSWithLabel(parent,
"Allowed encryption level", enc_levels, pes.allowed_enc_level)
b.pack_end(lvl)
lvl.show()
prf = self.prf = Check(parent)
prf.style = "toggle"
prf.text = "Prefer RC4 ecryption"
prf.state = pes.prefer_rc4
b.pack_end(prf)
prf.show()
a_btn = Button(parent)
a_btn.text = "Apply"
a_btn.callback_clicked_add(self.apply)
b.pack_end(a_btn)
a_btn.show()
b.show()
self.content = b
def apply(self, btn):
#TODO: Use callbacks to set these?
self.pes.in_enc_policy = self.inc.get_value()
self.pes.out_enc_policy = self.out.get_value()
#FIXME: Find out why this isn't saved to the session.
self.pes.allowed_enc_level = self.lvl.get_value()
self.pes.prefer_rc4 = self.prf.state
self.session.set_pe_settings(self.pes)
class ActSWithLabel(Box):
def __init__(self, parent, label_text, values, initial_value):
Box.__init__(self, parent)
self.vd = {
ELM_ACTIONSLIDER_LEFT: values[0],
ELM_ACTIONSLIDER_CENTER: values[1],
ELM_ACTIONSLIDER_RIGHT: values[2],
}
self.horizontal = True
self.size_hint_align = -1.0, 0.0
self.size_hint_weight = 1.0, 0.0
l = Label(parent)
l.text = label_text
l.show()
w = self.w = Actionslider(parent)
w.magnet_pos = ELM_ACTIONSLIDER_ALL
w.size_hint_align = -1.0, 0.0
w.size_hint_weight = 1.0, 0.0
w.show()
parts = "left", "center", "right"
for i, v in enumerate(values):
w.part_text_set(parts[i], str(v))
w.indicator_pos = values.index(initial_value) + 1
self.pack_end(l)
self.pack_end(w)
def get_value(self):
return self.vd[self.w.indicator_pos]
class PreferencesSession(PreferencesDialog):
""" Session preference dialog """
def __init__(self, parent, session):
PreferencesDialog.__init__(self, "Session")
# TODO: Construct and populate this with an Idler
self.session = session
widgets = {}
elm_conf = Configuration()
s = session.settings()
t = Table(self, padding=(5,5), homogeneous=True,
size_hint_align=FILL_BOTH)
self.box.pack_end(t)
t.show()
i = 0
INT_MIN = -2147483648
INT_MAX = 2147483647
scale = elm_conf.scale
for k in dir(s):
if k.startswith("__"): continue
try:
a = getattr(s, k)
if isinstance(a, lt.disk_cache_algo_t):
w = Spinner(t)
w.size_hint_align = FILL_HORIZ
# XXX: lt-rb python bindings don't have all values.
w.min_max = 0, 2 #len(lt.disk_cache_algo_t.values.keys())
for name, val in lt.disk_cache_algo_t.names.items():
w.special_value_add(val, name)
w.value = a
elif isinstance(a, bool):
w = Check(t)
w.size_hint_align = 1.0, 0.0
w.style = "toggle"
w.state = a
elif isinstance(a, int):
w = Spinner(t)
w.size_hint_align = FILL_HORIZ
w.min_max = INT_MIN, INT_MAX
w.value = a
elif isinstance(a, float):
w = Slider(t)
w.size_hint_align = FILL_HORIZ
w.size_hint_weight = EXPAND_HORIZ
w.unit_format = "%1.2f"
if k.startswith("peer_turnover"):
w.min_max = 0.0, 1.0
else:
w.min_max = 0.0, 20.0
w.value = a
elif k == "peer_tos":
# XXX: This is an int pair in libtorrent,
# which doesn't have a python equivalent.
continue
elif k == "user_agent":
w = Entry(t)
w.size_hint_align = 1.0, 0.0
w.size_hint_weight = EXPAND_HORIZ
w.single_line = True
w.editable = False
w.entry = cgi.escape(a)
else:
w = Entry(t)
w.part_text_set("guide", "Enter here")
w.size_hint_align = FILL_HORIZ
w.size_hint_weight = EXPAND_HORIZ
w.single_line = True
w.entry = cgi.escape(a)
l = Label(t)
l.text = k.replace("_", " ").capitalize()
l.size_hint_align = 0.0, 0.0
l.size_hint_weight = EXPAND_HORIZ
l.show()
t.pack(l, 0, i, 1, 1)
#w.size_hint_min = scale * 150, scale * 25
t.pack(w, 1, i, 1, 1)
w.show()
widgets[k] = w
i += 1
except TypeError:
pass #print("Error {}".format(k))
save_btn = Button(self)
save_btn.text = "Apply session settings"
save_btn.callback_clicked_add(self.apply_settings, widgets, session)
save_btn.show()
self.box.pack_end(save_btn)
def apply_settings(self, btn, widgets, session):
s = lt.session_settings()
for k, w in widgets.iteritems():
if k == "disk_cache_algorithm":
v = lt.disk_cache_algo_t(w.value)
elif isinstance(w, Spinner):
v = int(w.value)
elif isinstance(w, Slider):
v = w.value
elif isinstance(w, Entry):
v = w.entry.encode("utf-8")
elif isinstance(w, Check):
v = bool(w.state)
else:
v = None
setattr(s, k, v)
session.set_settings(s)
Notify.Information(self, "Session settings saved.")
class UnitSpinner(Box):
def __init__(self, parent, base, units):
self.base = base # the divisor/multiplier for units
self.units = units # a list of strings with the base unit description at index 0
super(UnitSpinner, self).__init__(parent)
self.horizontal = True
self.save_timer = None
s = self.spinner = Spinner(parent)
s.size_hint_weight = EXPAND_HORIZ
s.size_hint_align = FILL_HORIZ
s.min_max = 0, base
s.show()
self.pack_end(s)
hs = self.hoversel = Hoversel(parent)
for u in units:
hs.item_add(u, None, 0, lambda x=hs, y=None, u=u: x.text_set(u))
hs.show()
self.pack_end(hs)
def callback_changed_add(self, func, delay=None):
self.spinner.callback_changed_add(self.changed_cb, func, delay)
self.hoversel.callback_selected_add(self.changed_cb, func, delay)
def changed_cb(self, widget, *args):
func, delay = args[-2:]
if delay:
if self.save_timer is not None:
self.save_timer.delete()
self.save_timer = Timer(2.0, self.save_cb, func)
else:
self.save_cb(func)
def save_cb(self, func):
v = int(self.get_value())
log.debug("Saving value {}.".format(v))
func(v)
return False
def get_value(self):
return self.spinner.value * ( self.base ** self.units.index(self.hoversel.text) )
def set_value(self, v):
i = 0
while v // self.base > 0:
i += 1
v = float(v) / float(self.base)
if i > len(self.units):
i = len(self.units) - 1
self.spinner.value = v
self.hoversel.text = self.units[i]
class RangeSpinners(Box):
def __init__(self, parent, low, high, minim, maxim):
Box.__init__(self, parent)
self.size_hint_weight = EXPAND_BOTH
self.size_hint_align = FILL_BOTH
self.horizontal = True
l = self.listenlow = Spinner(parent)
l.size_hint_weight = EXPAND_BOTH
l.size_hint_align = FILL_BOTH
l.min_max = minim, maxim
l.value = low
self.pack_end(l)
l.show()
h = self.listenhigh = Spinner(parent)
h.size_hint_weight = EXPAND_BOTH
h.size_hint_align = FILL_BOTH
h.min_max = minim, maxim
h.value = high
self.pack_end(h)
h.show()
class Limits(Frame):
def __init__(self, parent, session):
Frame.__init__(self, parent)
self.text = "Limits"
self.size_hint_align = FILL_HORIZ
base = 1024
units = ( "bytes/s", "KiB/s", "MiB/s", "GiB/s", "TiB/s" )
t = Table(parent)
for r, values in enumerate((
("Upload limit", session.upload_rate_limit, session.set_upload_rate_limit),
("Download limit", session.download_rate_limit, session.set_download_rate_limit),
("Upload limit for local connections", session.local_upload_rate_limit, session.set_local_upload_rate_limit),
("Download limit for local connections", session.local_download_rate_limit, session.set_local_download_rate_limit),
)):
title, rfunc, wfunc = values
l = Label(parent)
l.text = title
l.size_hint_align = FILL_HORIZ
t.pack(l, 0, r, 1, 1)
l.show()
usw = UnitSpinner(parent, base, units)
usw.size_hint_weight = EXPAND_HORIZ
usw.size_hint_align = FILL_HORIZ
usw.set_value(rfunc())
usw.callback_changed_add(wfunc, delay=2.0)
t.pack(usw, 1, r, 1, 1)
usw.show()
self.content = t
# TODO:
# max uploads?, max conns?, max half open conns?
# ip filter
| maikodaraine/EnlightenmentUbuntu | apps/epour/epour/gui/Preferences.py | Python | unlicense | 21,829 |
# coding=utf-8
# Reverse a linked list from position m to n. Do it in-place and in one-pass.
#
# For example:
# Given 1->2->3->4->5->NULL, m = 2 and n = 4,
#
# return 1->4->3->2->5->NULL.
#
# Note:
# Given m, n satisfy the following condition:
# 1 ≤ m ≤ n ≤ length of list.
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def reverseBetween(self, head, m, n):
"""
:type head: ListNode
:type m: int
:type n: int
:rtype: ListNode
"""
if m == n:
return head
new_head = ListNode(0)
new_head.next = head
p = new_head
for i in range(m - 1):
p = p.next
r = None
pm = p.next
for i in range(n - m + 1):
pn = pm.next
pm.next = r
r = pm
pm = pn
p.next.next = pm
p.next = r
return new_head.next
# Note:
# We reach the node previous just before m and perform reverse operation
| jigarkb/CTCI | LeetCode/092-M-ReverseLinkedListII.py | Python | mit | 1,109 |
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
from mopy.mojo_python_tests_runner import MojoPythonTestRunner
def main():
test_dir_list = [
# Tests of pylib bindings.
os.path.join('mojo', 'public', 'tools', 'bindings', 'pylib'),
# Tests of "mopy" python tools code.
os.path.join('mojo', 'tools', 'mopy'),
# Tests of python code in devtools.
os.path.join('mojo', 'devtools', 'common', 'devtoolslib')
]
for test_dir in test_dir_list:
runner = MojoPythonTestRunner(test_dir)
exit_code = runner.run()
if exit_code:
return exit_code
if __name__ == '__main__':
sys.exit(main())
| jianglu/mojo | mojo/tools/run_mojo_python_tests.py | Python | bsd-3-clause | 799 |
def split_range_lower(rng):
parts = str(rng).split()
if len(parts) != 3:
return None
if parts[2].strip() == 'less':
return 0
return int(parts[0].replace(',',''))
def split_range_upper(rng):
parts = str(rng).split()
if len(parts) != 3:
return None
if parts[2] == 'less':
return int(parts[0].replace(',',''))
if parts[2] == 'more':
return None
return int(parts[2].replace(',','')) | CivicKnowledge/metatab-packages | census.gov/variance_replicates/census.gov-varrep_tables_support-2011e2015/lib/split.py | Python | mit | 524 |
from django.template import Template, Context
def test_get_setting_as_variable(settings):
settings.GREETING = "Hola"
template = Template(
r'{% load config %}{% get_setting "GREETING" as greeting %}{{ greeting }}'
)
c = Context({})
assert template.render(c) == "Hola"
| cmheisel/ebdocker-py | sample/hello/tests.py | Python | mit | 297 |
import argparse
__all__ = ['Args']
Args = None
def parse():
global Args
parser = argparse.ArgumentParser(description='Voctogui')
parser.add_argument('-v', '--verbose', action='count', default=0,
help="Set verbosity level by using -v, -vv or -vvv.")
parser.add_argument('-c', '--color',
action='store',
choices=['auto', 'always', 'never'],
default='auto',
help="Control the use of colors in the Log-Output")
parser.add_argument('-t', '--timestamp', action='store_true',
help="Enable timestamps in the Log-Output")
parser.add_argument('-i', '--ini-file', action='store',
help="Load a custom configuration file")
parser.add_argument('-H', '--host', action='store',
help="Connect to this host "
"instead of the configured one.")
parser.add_argument('-d', '--dot', action='store_true',
help="Generate DOT files of pipelines into directory given in environment variable GST_DEBUG_DUMP_DOT_DIR")
parser.add_argument('-D', '--gst-debug-details', action='store', default=15,
help="Set details in dot graph. GST_DEBUG_DETAILS must be a combination the following values: 1 = show caps-name on edges, 2 = show caps-details on edges, 4 = show modified parameters on elements, 8 = show element states, 16 = show full element parameter values even if they are very long. Default: 15 = show all the typical details that one might want (15=1+2+4+8)")
parser.add_argument('-g', '--gstreamer-log', action='count', default=0,
help="Log gstreamer messages into voctocore log (Set log level by using -g, -gg or -ggg).")
Args = parser.parse_args()
| voc/voctomix | voctogui/lib/args.py | Python | mit | 1,875 |
__author__ = 'Lai Tash'
from .library import AND, NOT, XOR, OR
from .library import Button
from .library import PersistentSwitch
from .library import Informer
from .library import Switch
from .library import Timer
from .library import Latch | LaiTash/starschematic | stargate/library/__init__.py | Python | gpl-3.0 | 241 |
# -*- coding: utf-8 -*-
import requests
from framework.auth import Auth
from website import util
from website import settings
from osf.models import MailRecord
def record_message(message, nodes_created, users_created):
record = MailRecord.objects.create(
data=message.raw,
)
record.users_created.add(*users_created),
record.nodes_created.add(*nodes_created)
record.save()
def provision_node(conference, message, node, user):
"""
:param Conference conference:
:param ConferenceMessage message:
:param Node node:
:param User user:
"""
auth = Auth(user=user)
node.update_node_wiki('home', message.text, auth)
if conference.admins.exists():
node.add_contributors(prepare_contributors(conference.admins.all()), log=False)
if not message.is_spam and conference.public_projects:
node.set_privacy('public', meeting_creation=True, auth=auth)
node.add_tag(message.conference_name, auth=auth)
node.add_tag(message.conference_category, auth=auth)
for systag in ['emailed', message.conference_name, message.conference_category]:
node.add_system_tag(systag, save=False)
if message.is_spam:
node.add_system_tag('spam', save=False)
node.save()
def prepare_contributors(admins):
return [
{
'user': admin,
'permissions': ['read', 'write', 'admin'],
'visible': False,
}
for admin in admins
]
def upload_attachment(user, node, attachment):
attachment.seek(0)
name = (attachment.filename or settings.MISSING_FILE_NAME)
content = attachment.read()
upload_url = util.waterbutler_api_url_for(node._id, 'osfstorage', name=name, cookie=user.get_or_create_cookie(), _internal=True)
requests.put(
upload_url,
data=content,
)
def upload_attachments(user, node, attachments):
for attachment in attachments:
upload_attachment(user, node, attachment)
| laurenrevere/osf.io | website/conferences/utils.py | Python | apache-2.0 | 1,976 |
from django import template
register = template.Library()
@register.filter
def rating_score(obj, user):
if not user.is_authenticated() or not hasattr(obj, '_ratings_field'):
return False
ratings_descriptor = getattr(obj, obj._ratings_field)
try:
rating = ratings_descriptor.get(user=user).score
except ratings_descriptor.model.DoesNotExist:
rating = None
return rating
| Tmr/django-simple-ratings | ratings/templatetags/ratings_tags.py | Python | mit | 415 |
# -*- twisted.conch.test.test_mixin -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
import time
from twisted.internet import reactor, protocol
from twisted.trial import unittest
from twisted.test.proto_helpers import StringTransport
from twisted.conch import mixin
class TestBufferingProto(mixin.BufferingMixin):
scheduled = False
rescheduled = 0
def schedule(self):
self.scheduled = True
return object()
def reschedule(self, token):
self.rescheduled += 1
class BufferingTest(unittest.TestCase):
def testBuffering(self):
p = TestBufferingProto()
t = p.transport = StringTransport()
self.failIf(p.scheduled)
L = ['foo', 'bar', 'baz', 'quux']
p.write('foo')
self.failUnless(p.scheduled)
self.failIf(p.rescheduled)
for s in L:
n = p.rescheduled
p.write(s)
self.assertEquals(p.rescheduled, n + 1)
self.assertEquals(t.value(), '')
p.flush()
self.assertEquals(t.value(), 'foo' + ''.join(L))
| sorenh/cc | vendor/Twisted-10.0.0/twisted/conch/test/test_mixin.py | Python | apache-2.0 | 1,110 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\qad_dimstyle_diff.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_DimStyle_Diff_Dialog(object):
def setupUi(self, DimStyle_Diff_Dialog):
DimStyle_Diff_Dialog.setObjectName("DimStyle_Diff_Dialog")
DimStyle_Diff_Dialog.resize(443, 526)
DimStyle_Diff_Dialog.setMinimumSize(QtCore.QSize(443, 526))
DimStyle_Diff_Dialog.setMaximumSize(QtCore.QSize(443, 526))
self.label = QtWidgets.QLabel(DimStyle_Diff_Dialog)
self.label.setGeometry(QtCore.QRect(10, 10, 81, 21))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(DimStyle_Diff_Dialog)
self.label_2.setGeometry(QtCore.QRect(10, 40, 81, 21))
self.label_2.setObjectName("label_2")
self.dimStyle1 = QtWidgets.QComboBox(DimStyle_Diff_Dialog)
self.dimStyle1.setGeometry(QtCore.QRect(100, 10, 211, 22))
self.dimStyle1.setObjectName("dimStyle1")
self.dimStyle2 = QtWidgets.QComboBox(DimStyle_Diff_Dialog)
self.dimStyle2.setGeometry(QtCore.QRect(100, 40, 211, 22))
self.dimStyle2.setObjectName("dimStyle2")
self.line = QtWidgets.QFrame(DimStyle_Diff_Dialog)
self.line.setGeometry(QtCore.QRect(10, 70, 421, 16))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.msg = QtWidgets.QLabel(DimStyle_Diff_Dialog)
self.msg.setGeometry(QtCore.QRect(10, 80, 381, 21))
self.msg.setObjectName("msg")
self.layoutWidget = QtWidgets.QWidget(DimStyle_Diff_Dialog)
self.layoutWidget.setGeometry(QtCore.QRect(277, 490, 158, 25))
self.layoutWidget.setObjectName("layoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.layoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.closeButton = QtWidgets.QPushButton(self.layoutWidget)
self.closeButton.setObjectName("closeButton")
self.horizontalLayout.addWidget(self.closeButton)
self.helpButton = QtWidgets.QPushButton(self.layoutWidget)
self.helpButton.setObjectName("helpButton")
self.horizontalLayout.addWidget(self.helpButton)
self.tableWidget = QtWidgets.QTableWidget(DimStyle_Diff_Dialog)
self.tableWidget.setGeometry(QtCore.QRect(10, 110, 421, 371))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
self.copyButton = QtWidgets.QPushButton(DimStyle_Diff_Dialog)
self.copyButton.setGeometry(QtCore.QRect(404, 80, 31, 23))
self.copyButton.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/plugins/qad/icons/copy.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.copyButton.setIcon(icon)
self.copyButton.setObjectName("copyButton")
self.retranslateUi(DimStyle_Diff_Dialog)
self.helpButton.clicked.connect(DimStyle_Diff_Dialog.ButtonHELP_Pressed)
self.dimStyle1.currentIndexChanged['int'].connect(DimStyle_Diff_Dialog.DimStyleName1Changed)
self.dimStyle2.currentIndexChanged['int'].connect(DimStyle_Diff_Dialog.DimStyleName2Changed)
self.copyButton.clicked.connect(DimStyle_Diff_Dialog.copyToClipboard)
self.closeButton.clicked.connect(DimStyle_Diff_Dialog.accept)
QtCore.QMetaObject.connectSlotsByName(DimStyle_Diff_Dialog)
def retranslateUi(self, DimStyle_Diff_Dialog):
_translate = QtCore.QCoreApplication.translate
DimStyle_Diff_Dialog.setWindowTitle(_translate("DimStyle_Diff_Dialog", "QAD - Compare dimension styles"))
self.label.setText(_translate("DimStyle_Diff_Dialog", "Compare:"))
self.label_2.setText(_translate("DimStyle_Diff_Dialog", "With:"))
self.dimStyle1.setToolTip(_translate("DimStyle_Diff_Dialog", "Specify the first dimension style."))
self.dimStyle2.setToolTip(_translate("DimStyle_Diff_Dialog", "Specify the second dimension style. If you set the second style as the first, all dimension style properties will displayed."))
self.msg.setText(_translate("DimStyle_Diff_Dialog", "TextLabel"))
self.closeButton.setText(_translate("DimStyle_Diff_Dialog", "Close"))
self.helpButton.setText(_translate("DimStyle_Diff_Dialog", "?"))
self.tableWidget.setToolTip(_translate("DimStyle_Diff_Dialog", "<html><head/><body><p>Display the result of comparing dimension styles.If you compare two different styles, the settings that are different between the two dimension styles, their current settings, and brief descriptions are listed. If you set the second style as the first, all dimension style properties will displayed.</p></body></html>"))
self.copyButton.setToolTip(_translate("DimStyle_Diff_Dialog", "Copy the result of comparing into the clipboard."))
| gam17/QAD | qad_dimstyle_diff_ui.py | Python | gpl-3.0 | 5,215 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_image
description:
- Represents an Image resource.
- Google Compute Engine uses operating system images to create the root persistent
disks for your instances. You specify an image when you create an instance. Images
contain a boot loader, an operating system, and a root file system. Linux operating
system images are also capable of running containers on Compute Engine.
- Images can be either public or custom.
- Public images are provided and maintained by Google, open-source communities, and
third-party vendors. By default, all projects have access to these images and can
use them to create instances. Custom images are available only to your project.
You can create a custom image from root persistent disks and other images. Then,
use the custom image to create an instance.
short_description: Creates a GCP Image
version_added: 2.6
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices: ['present', 'absent']
default: 'present'
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
required: false
disk_size_gb:
description:
- Size of the image when restored onto a persistent disk (in GB).
required: false
family:
description:
- The name of the image family to which this image belongs. You can create disks by
specifying an image family instead of a specific image name. The image family always
returns its latest image that is not deprecated. The name of the image family must
comply with RFC1035.
required: false
guest_os_features:
description:
- A list of features to enable on the guest OS. Applicable for bootable images only.
Currently, only one feature can be enabled, VIRTIO_SCSI_MULTIQUEUE, which allows
each virtual CPU to have its own queue. For Windows images, you can only enable
VIRTIO_SCSI_MULTIQUEUE on images with driver version 1.2.0.1621 or higher. Linux
images with kernel versions 3.17 and higher will support VIRTIO_SCSI_MULTIQUEUE.
- For new Windows images, the server might also populate this field with the value
WINDOWS, to indicate that this is a Windows image.
- This value is purely informational and does not enable or disable any features.
required: false
suboptions:
type:
description:
- The type of supported feature. Currenty only VIRTIO_SCSI_MULTIQUEUE is supported.
For newer Windows images, the server might also populate this property with the
value WINDOWS to indicate that this is a Windows image. This value is purely informational
and does not enable or disable any features.
required: false
choices: ['VIRTIO_SCSI_MULTIQUEUE']
image_encryption_key:
description:
- Encrypts the image using a customer-supplied encryption key.
- After you encrypt an image with a customer-supplied key, you must provide the same
key if you use the image later (e.g. to create a disk from the image) .
required: false
suboptions:
raw_key:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64
to either encrypt or decrypt this resource.
required: false
sha256:
description:
- The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key
that protects this resource.
required: false
licenses:
description:
- Any applicable license URI.
required: false
name:
description:
- Name of the resource; provided by the client when the resource is created. The name
must be 1-63 characters long, and comply with RFC1035. Specifically, the name must
be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following characters
must be a dash, lowercase letter, or digit, except the last character, which cannot
be a dash.
required: true
raw_disk:
description:
- The parameters of the raw disk image.
required: false
suboptions:
container_type:
description:
- The format used to encode and transmit the block device, which should be TAR. This
is just a container and transmission format and not a runtime format. Provided by
the client when the disk image is created.
required: false
choices: ['TAR']
sha1_checksum:
description:
- An optional SHA1 checksum of the disk image before unpackaging.
- This is provided by the client when the disk image is created.
required: false
source:
description:
- The full Google Cloud Storage URL where disk storage is stored You must provide
either this property or the sourceDisk property but not both.
required: false
source_disk:
description:
- A reference to Disk resource.
required: false
source_disk_encryption_key:
description:
- The customer-supplied encryption key of the source disk. Required if the source
disk is protected by a customer-supplied encryption key.
required: false
suboptions:
raw_key:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64
to either encrypt or decrypt this resource.
required: false
sha256:
description:
- The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key
that protects this resource.
required: false
source_disk_id:
description:
- The ID value of the disk used to create this image. This value may be used to determine
whether the image was taken from the current or a previous instance of a given disk
name.
required: false
source_type:
description:
- The type of the image used to create this disk. The default and only value is RAW
.
required: false
choices: ['RAW']
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: create a disk
gcp_compute_disk:
name: 'disk-image'
zone: us-central1-a
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
scopes:
- https://www.googleapis.com/auth/compute
state: present
register: disk
- name: create a image
gcp_compute_image:
name: testObject
source_disk: "{{ disk }}"
project: testProject
auth_kind: service_account
service_account_file: /tmp/auth.pem
scopes:
- https://www.googleapis.com/auth/compute
state: present
'''
RETURN = '''
archive_size_bytes:
description:
- Size of the image tar.gz archive stored in Google Cloud Storage (in bytes).
returned: success
type: int
creation_timestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
deprecated:
description:
- The deprecation status associated with this image.
returned: success
type: complex
contains:
deleted:
description:
- An optional RFC3339 timestamp on or after which the state of this resource is intended
to change to DELETED. This is only informational and the status will not change
unless the client explicitly changes it.
returned: success
type: str
deprecated:
description:
- An optional RFC3339 timestamp on or after which the state of this resource is intended
to change to DEPRECATED. This is only informational and the status will not change
unless the client explicitly changes it.
returned: success
type: str
obsolete:
description:
- An optional RFC3339 timestamp on or after which the state of this resource is intended
to change to OBSOLETE. This is only informational and the status will not change
unless the client explicitly changes it.
returned: success
type: str
replacement:
description:
- The URL of the suggested replacement for a deprecated resource.
- The suggested replacement resource must be the same kind of resource as the deprecated
resource.
returned: success
type: str
state:
description:
- The deprecation state of this resource. This can be DEPRECATED, OBSOLETE, or DELETED.
Operations which create a new resource using a DEPRECATED resource will return successfully,
but with a warning indicating the deprecated resource and recommending its replacement.
Operations which use OBSOLETE or DELETED resources will be rejected and result in
an error.
returned: success
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
disk_size_gb:
description:
- Size of the image when restored onto a persistent disk (in GB).
returned: success
type: int
family:
description:
- The name of the image family to which this image belongs. You can create disks by
specifying an image family instead of a specific image name. The image family always
returns its latest image that is not deprecated. The name of the image family must
comply with RFC1035.
returned: success
type: str
guest_os_features:
description:
- A list of features to enable on the guest OS. Applicable for bootable images only.
Currently, only one feature can be enabled, VIRTIO_SCSI_MULTIQUEUE, which allows
each virtual CPU to have its own queue. For Windows images, you can only enable
VIRTIO_SCSI_MULTIQUEUE on images with driver version 1.2.0.1621 or higher. Linux
images with kernel versions 3.17 and higher will support VIRTIO_SCSI_MULTIQUEUE.
- For new Windows images, the server might also populate this field with the value
WINDOWS, to indicate that this is a Windows image.
- This value is purely informational and does not enable or disable any features.
returned: success
type: complex
contains:
type:
description:
- The type of supported feature. Currenty only VIRTIO_SCSI_MULTIQUEUE is supported.
For newer Windows images, the server might also populate this property with the
value WINDOWS to indicate that this is a Windows image. This value is purely informational
and does not enable or disable any features.
returned: success
type: str
id:
description:
- The unique identifier for the resource. This identifier is defined by the server.
returned: success
type: int
image_encryption_key:
description:
- Encrypts the image using a customer-supplied encryption key.
- After you encrypt an image with a customer-supplied key, you must provide the same
key if you use the image later (e.g. to create a disk from the image) .
returned: success
type: complex
contains:
raw_key:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64
to either encrypt or decrypt this resource.
returned: success
type: str
sha256:
description:
- The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key
that protects this resource.
returned: success
type: str
licenses:
description:
- Any applicable license URI.
returned: success
type: list
name:
description:
- Name of the resource; provided by the client when the resource is created. The name
must be 1-63 characters long, and comply with RFC1035. Specifically, the name must
be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following characters
must be a dash, lowercase letter, or digit, except the last character, which cannot
be a dash.
returned: success
type: str
raw_disk:
description:
- The parameters of the raw disk image.
returned: success
type: complex
contains:
container_type:
description:
- The format used to encode and transmit the block device, which should be TAR. This
is just a container and transmission format and not a runtime format. Provided by
the client when the disk image is created.
returned: success
type: str
sha1_checksum:
description:
- An optional SHA1 checksum of the disk image before unpackaging.
- This is provided by the client when the disk image is created.
returned: success
type: str
source:
description:
- The full Google Cloud Storage URL where disk storage is stored You must provide
either this property or the sourceDisk property but not both.
returned: success
type: str
source_disk:
description:
- A reference to Disk resource.
returned: success
type: dict
source_disk_encryption_key:
description:
- The customer-supplied encryption key of the source disk. Required if the source
disk is protected by a customer-supplied encryption key.
returned: success
type: complex
contains:
raw_key:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64
to either encrypt or decrypt this resource.
returned: success
type: str
sha256:
description:
- The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key
that protects this resource.
returned: success
type: str
source_disk_id:
description:
- The ID value of the disk used to create this image. This value may be used to determine
whether the image was taken from the current or a previous instance of a given disk
name.
returned: success
type: str
source_type:
description:
- The type of the image used to create this disk. The default and only value is RAW
.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
description=dict(type='str'),
disk_size_gb=dict(type='int'),
family=dict(type='str'),
guest_os_features=dict(type='list', elements='dict', options=dict(
type=dict(type='str', choices=['VIRTIO_SCSI_MULTIQUEUE'])
)),
image_encryption_key=dict(type='dict', options=dict(
raw_key=dict(type='str'),
sha256=dict(type='str')
)),
licenses=dict(type='list', elements='str'),
name=dict(required=True, type='str'),
raw_disk=dict(type='dict', options=dict(
container_type=dict(type='str', choices=['TAR']),
sha1_checksum=dict(type='str'),
source=dict(type='str')
)),
source_disk=dict(type='dict'),
source_disk_encryption_key=dict(type='dict', options=dict(
raw_key=dict(type='str'),
sha256=dict(type='str')
)),
source_disk_id=dict(type='str'),
source_type=dict(type='str', choices=['RAW'])
)
)
state = module.params['state']
kind = 'compute#image'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
fetch = update(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.put(link, resource_to_request(module)))
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#image',
u'description': module.params.get('description'),
u'diskSizeGb': module.params.get('disk_size_gb'),
u'family': module.params.get('family'),
u'guestOsFeatures': ImageGuestOsFeatuArray(module.params.get('guest_os_features', []), module).to_request(),
u'imageEncryptionKey': ImageImageEncryKey(module.params.get('image_encryption_key', {}), module).to_request(),
u'licenses': module.params.get('licenses'),
u'name': module.params.get('name'),
u'rawDisk': ImageRawDisk(module.params.get('raw_disk', {}), module).to_request(),
u'sourceDisk': replace_resource_dict(module.params.get(u'source_disk', {}), 'selfLink'),
u'sourceDiskEncryptionKey': ImagSourDiskEncrKey(module.params.get('source_disk_encryption_key', {}), module).to_request(),
u'sourceDiskId': module.params.get('source_disk_id'),
u'sourceType': module.params.get('source_type')
}
return_vals = {}
for k, v in request.items():
if v:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/images/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/images".format(**module.params)
def return_if_object(module, response, kind):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
if result['kind'] != kind:
module.fail_json(msg="Incorrect result: {kind}".format(**result))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'archiveSizeBytes': response.get(u'archiveSizeBytes'),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'deprecated': ImageDeprecated(response.get(u'deprecated', {}), module).from_response(),
u'description': response.get(u'description'),
u'diskSizeGb': response.get(u'diskSizeGb'),
u'family': response.get(u'family'),
u'guestOsFeatures': ImageGuestOsFeatuArray(response.get(u'guestOsFeatures', []), module).from_response(),
u'id': response.get(u'id'),
u'imageEncryptionKey': ImageImageEncryKey(response.get(u'imageEncryptionKey', {}), module).from_response(),
u'licenses': response.get(u'licenses'),
u'name': response.get(u'name'),
u'rawDisk': ImageRawDisk(response.get(u'rawDisk', {}), module).from_response(),
u'sourceDisk': response.get(u'sourceDisk'),
u'sourceDiskEncryptionKey': ImagSourDiskEncrKey(response.get(u'sourceDiskEncryptionKey', {}), module).from_response(),
u'sourceDiskId': response.get(u'sourceDiskId'),
u'sourceType': response.get(u'sourceType')
}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return None
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#image')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], 'message')
time.sleep(1.0)
if status not in ['PENDING', 'RUNNING', 'DONE']:
module.fail_json(msg="Invalid result %s" % status)
op_result = fetch_resource(module, op_uri, 'compute#operation')
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
class ImageDeprecated(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({
u'deleted': self.request.get('deleted'),
u'deprecated': self.request.get('deprecated'),
u'obsolete': self.request.get('obsolete'),
u'replacement': self.request.get('replacement'),
u'state': self.request.get('state')
})
def from_response(self):
return remove_nones_from_dict({
u'deleted': self.request.get(u'deleted'),
u'deprecated': self.request.get(u'deprecated'),
u'obsolete': self.request.get(u'obsolete'),
u'replacement': self.request.get(u'replacement'),
u'state': self.request.get(u'state')
})
class ImageGuestOsFeatuArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict({
u'type': item.get('type')
})
def _response_from_item(self, item):
return remove_nones_from_dict({
u'type': item.get(u'type')
})
class ImageImageEncryKey(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({
u'rawKey': self.request.get('raw_key'),
u'sha256': self.request.get('sha256')
})
def from_response(self):
return remove_nones_from_dict({
u'rawKey': self.request.get(u'rawKey'),
u'sha256': self.request.get(u'sha256')
})
class ImageRawDisk(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({
u'containerType': self.request.get('container_type'),
u'sha1Checksum': self.request.get('sha1_checksum'),
u'source': self.request.get('source')
})
def from_response(self):
return remove_nones_from_dict({
u'containerType': self.request.get(u'containerType'),
u'sha1Checksum': self.request.get(u'sha1Checksum'),
u'source': self.request.get(u'source')
})
class ImagSourDiskEncrKey(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({
u'rawKey': self.request.get('raw_key'),
u'sha256': self.request.get('sha256')
})
def from_response(self):
return remove_nones_from_dict({
u'rawKey': self.request.get(u'rawKey'),
u'sha256': self.request.get(u'sha256')
})
if __name__ == '__main__':
main()
| hryamzik/ansible | lib/ansible/modules/cloud/google/gcp_compute_image.py | Python | gpl-3.0 | 29,989 |
# coding=utf-8
"""
Programa que escanea todos los recursos de cada tipo y construye
el __init__ respectivo
Uso
$ __scan__.py -arg1 -arg2 -arg3
-folder
Game template
Autor: PABLO PIZARRO @ ppizarro ~
Fecha: ABRIL 2015
"""
# Importación de librerías de sistema
from datetime import date
import os
import sys
from bin import Configloader
reload(sys)
# noinspection PyUnresolvedReferences
sys.setdefaultencoding('UTF8')
sys.dont_write_bytecode = True
# Se obtiene el directorio actual
__actualpath = str(os.path.abspath(os.path.dirname(__file__))) + "/"
sys.path.append(__actualpath.replace("\\resources", "\\bin"))
sys.path.append(__actualpath.replace("\\resources", ""))
# Importación de librerías internas
# Se cargan las configuraciones
config = Configloader(".config/filetype.ini")
folderformats = {}
for folder in config.getParameters():
if folder != "FORMAT":
_fileformats = config.getValue(folder).strip(config.getValue("FORMAT"))
folderformats[folder] = _fileformats
scanconfig = Configloader(".config/scan.ini")
# Constantes del programa
HASH_LEN = int(scanconfig.getValue("SIZE"))
HEADER = """#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Directorio {0}
# Fichero generado automáticamente usando __scan__.py
#
# Game template
# Autor: PABLO PIZARRO @ ppizarro ~
# Archivos totales: {3}
# Generado el: {2}
# Importación de liberías
import os
# Definición de variables
__actualpath = str(os.path.abspath(os.path.dirname(__file__))).replace("\\\\","/") + "/"
# Librería de /{0}
{1} = {{"""
HEADER_LAST = """}}
# Función que retorna el elemento <index> de la librería {0}
def get{1}(index):
try:
return {0}[index]
except:
return -1
# Función que retorna los archivos en {0}
def get{1}Keys():
return {0}.keys()
# Función que imprime los archivos en {0}
def print{1}Values():
print "<KEY> {0}[<KEY>]"
for file in {0}.keys():
print file +" "+str({0}[file])
"""
LOG_FOLDER = __actualpath.replace("\\resources", "\\data\\log")
def get_filename(fullpath):
"""
Función que retorna el nombre del archivo dado el string completo de su
dirección estatica
:param fullpath:
:return:
"""
fullpath = fullpath.split("\\")
filename = str(fullpath[len(fullpath) - 1]).split(".")
return filename[0]
def get_date():
"""
Obtiene la fecha del dia actual
:return:
"""
fecha = date.today()
return str(fecha.day) + "/" + str(fecha.month) + "/" + str(fecha.year)
def get_folder(filename):
"""
Función que retorna la carpeta contenedora dado un archivo.
:param filename:
:return:
"""
fullpath = filename.split("\\")
return fullpath[len(fullpath) - 2]
def get_hash(filename):
"""
Función que retorna un identificador para cada elemento.
:param filename:
:return:
"""
filename = filename.split("\\")
filename = filename[len(filename) - 1]
return abs(hash(filename)) % (10 ** HASH_LEN)
# noinspection PyShadowingNames
def hash_valid(listhash):
"""
Función que retorna si no se tienen colisiones en el hash.
:param listhash:
:return:
"""
size = len(listhash)
colisions = 0
for i in range(0, size - 1):
if listhash[i][0] == listhash[i + 1][0]:
colisions += 1
if colisions:
return False, colisions
else:
return True, 0
def isfolder(filename):
"""
Función que comprueba si un nombre es una carpeta.
:param filename:
:return:
"""
for char in config.getValue("NOTFILE"):
if char in filename:
return False
return True
# noinspection PyShadowingNames,PyShadowingBuiltins
def look(parentfolder, folder, folderlist):
"""
Función recursiva que añade todos los ficheros que se encuentren en
parentfolder.
:param parentfolder:
:param folder:
:param folderlist:
:return:
"""
for file in os.listdir(folder): # @ReservedAssignment
if isfolder(file):
look(parentfolder, folder + "\\" + file, folderlist)
else:
if validfile(parentfolder, file):
folderlist.append(folder + "\\" + file)
# noinspection PyShadowingNames
def validfile(folder, filename):
"""
Función que comprueba si un archivo tiene un formato valido.
:param folder:
:param filename:
:return:
"""
filename = filename.split(".")
return filename[1] in folderformats[folder.upper()]
# Si se ejecuta el programa
if __name__ == '__main__':
# Se obtiene la hora
SCAN_DATE = get_date()
def _run(_folder):
print scanconfig.getValue("ARGUMENT_LOADING").format(_folder),
if _folder in os.listdir(__actualpath):
# Se cargan los archivos
filelist = []
look(_folder, __actualpath + _folder, filelist)
if scanconfig.isTrue("HASH"):
hashedlist = []
for _i in filelist:
hashedlist.append([get_hash(_i), _i])
hashedlist.sort()
initfile = open(__actualpath + _folder + "/__init__.py", "w")
c = 0
totalelements = len(hashedlist)
for line in HEADER.split("\n"):
initfile.write(line.format(_folder, _folder.upper(), SCAN_DATE, totalelements) + "\n")
for line in hashedlist:
if c < totalelements - 1:
initfile.write(
"\t" + str(line[0]).rjust(HASH_LEN) + ": " + line[
1].replace(__actualpath,
'__actualpath + "').replace("\\",
"/").replace(
_folder + "/", "") + '", \\\n')
else:
initfile.write(
"\t" + str(line[0]).rjust(HASH_LEN) + ": " + line[
1].replace(__actualpath,
'__actualpath + "').replace("\\",
"/").replace(
_folder + "/", "") + '"\n')
c += 1
for line in HEADER_LAST.split("\n"):
initfile.write(line.format(_folder.upper(), _folder.title()) + "\n")
initfile.close()
# Se verifican colisiones de la hashlist
validate_hash = hash_valid(hashedlist)
if validate_hash[0]:
print scanconfig.getValue("FOLDER_LOADED").format(
len(filelist))
else:
print scanconfig.getValue(
"FOLDER_LOADED_HASHNOTVALID").format(validate_hash[1],
len(filelist),
scanconfig.getValue(
"SIZE"))
del hashedlist
else:
filelist.sort()
initfile = open(__actualpath + _folder + "/__init__.py", "w")
c = 0
totalelements = len(filelist)
prev_folder = ""
for line in HEADER.split("\n"):
initfile.write(line.format(_folder, _folder.upper(), SCAN_DATE, totalelements) + "\n")
for line in filelist:
linefolder = get_folder(line)
if prev_folder != linefolder:
prev_folder = linefolder
initfile.write("\n\t#" + linefolder.title() + "\n")
if c < totalelements - 1:
initfile.write(
'\t"' + get_filename(line) + '": ' + line.replace(
__actualpath, '__actualpath + "').replace("\\",
"/").replace(
_folder + "/", "") + '", \\\n')
else:
initfile.write(
'\t"' + get_filename(line) + '": ' + line.replace(
__actualpath, '__actualpath + "').replace("\\",
"/").replace(
_folder + "/", "") + '"\n')
c += 1
for line in HEADER_LAST.split("\n"):
initfile.write(line.format(_folder.upper(), _folder.title()) + "\n")
initfile.close()
print scanconfig.getValue("FOLDER_LOADED").format(
len(filelist))
del filelist
else:
print scanconfig.getValue("FOLDER_NOT_EXIST").format(_folder)
for i in range(1, len(sys.argv)):
if "-" not in sys.argv[i]:
print scanconfig.getValue("ARGUMENT_NOTVALID").format(sys.argv[i])
else:
folder = str(sys.argv[i]).replace("-", "").lower()
_run(folder)
if len(sys.argv) == 1:
print str(scanconfig.getValue("DEFAULT_RUNNING")).format(
scanconfig.getValue("DEFAULT_FOLDERS"))
for folder in scanconfig.getValue("DEFAULT_FOLDERS").split(","):
_run(folder.strip())
| ppizarror/Ned-For-Spod | resources/__scan__.py | Python | gpl-2.0 | 9,806 |
"""
Description:
This provides a VTK widget for pyGtk. This embeds a vtkRenderWindow
inside a GTK widget. This is based on GtkVTKRenderWindow.py.
The extensions here allow the use of gtkglext rather than gtkgl and
pygtk-2 rather than pygtk-0. It requires pygtk-2.0.0 or later.
There is a working example at the bottom.
Credits:
John Hunter <[email protected]> developed and tested
this code based on VTK's GtkVTKRenderWindow.py and extended it to
work with pygtk-2.0.0.
License:
VTK license.
"""
import math, sys
import pygtk
pygtk.require('2.0')
import gtk
import gtk.gtkgl
from gtk import gdk
import vtk
class GtkGLExtVTKRenderWindowBase(gtk.gtkgl.DrawingArea):
""" A base class that enables one to embed a vtkRenderWindow into
a pyGTK widget. This class embeds the RenderWindow correctly.
Provided are some empty methods that can be overloaded to provide
a user defined interaction behaviour. The event handling
functions have names that are somewhat similar to the ones in the
vtkInteractorStyle class included with VTK. """
def __init__(self, *args):
gtk.gtkgl.DrawingArea.__init__(self)
self.set_double_buffered(gtk.FALSE)
self._RenderWindow = vtk.vtkRenderWindow()
# private attributes
self.__Created = 0
# used by the LOD actors
self._DesiredUpdateRate = 15
self._StillUpdateRate = 0.0001
self.ConnectSignals()
# need this to be able to handle key_press events.
self.set_flags(gtk.CAN_FOCUS)
# default size
self.set_size_request(300, 300)
def ConnectSignals(self):
self.connect("realize", self.OnRealize)
self.connect("expose_event", self.OnExpose)
self.connect("configure_event", self.OnConfigure)
self.connect("button_press_event", self.OnButtonDown)
self.connect("button_release_event", self.OnButtonUp)
self.connect("motion_notify_event", self.OnMouseMove)
self.connect("enter_notify_event", self.OnEnter)
self.connect("leave_notify_event", self.OnLeave)
self.connect("key_press_event", self.OnKeyPress)
self.connect("delete_event", self.OnDestroy)
self.add_events(gdk.EXPOSURE_MASK|
gdk.BUTTON_PRESS_MASK |
gdk.BUTTON_RELEASE_MASK |
gdk.KEY_PRESS_MASK |
gdk.POINTER_MOTION_MASK |
gdk.POINTER_MOTION_HINT_MASK |
gdk.ENTER_NOTIFY_MASK |
gdk.LEAVE_NOTIFY_MASK)
def GetRenderWindow(self):
return self._RenderWindow
def GetRenderer(self):
self._RenderWindow.GetRenderers().InitTraversal()
return self._RenderWindow.GetRenderers().GetNextItem()
def SetDesiredUpdateRate(self, rate):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor."""
self._DesiredUpdateRate = rate
def GetDesiredUpdateRate(self):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor."""
return self._DesiredUpdateRate
def SetStillUpdateRate(self, rate):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor."""
self._StillUpdateRate = rate
def GetStillUpdateRate(self):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor."""
return self._StillUpdateRate
def Render(self):
if self.__Created:
self._RenderWindow.Render()
def OnRealize(self, *args):
if self.__Created == 0:
# you can't get the xid without the window being realized.
self.realize()
if sys.platform=='win32':
win_id = str(self.widget.window.handle)
else:
win_id = str(self.widget.window.xid)
self._RenderWindow.SetWindowInfo(win_id)
self.__Created = 1
return gtk.TRUE
def Created(self):
return self.__Created
def OnConfigure(self, widget, event):
self.widget=widget
self._RenderWindow.SetSize(event.width, event.height)
self.Render()
return gtk.TRUE
def OnExpose(self, *args):
self.Render()
return gtk.TRUE
def OnDestroy(self, *args):
self.hide()
del self._RenderWindow
self.destroy()
return gtk.TRUE
def OnButtonDown(self, wid, event):
"""Mouse button pressed."""
self._RenderWindow.SetDesiredUpdateRate(self._DesiredUpdateRate)
return gtk.TRUE
def OnButtonUp(self, wid, event):
"""Mouse button released."""
self._RenderWindow.SetDesiredUpdateRate(self._StillUpdateRate)
return gtk.TRUE
def OnMouseMove(self, wid, event):
"""Mouse has moved."""
return gtk.TRUE
def OnEnter(self, wid, event):
"""Entering the vtkRenderWindow."""
return gtk.TRUE
def OnLeave(self, wid, event):
"""Leaving the vtkRenderWindow."""
return gtk.TRUE
def OnKeyPress(self, wid, event):
"""Key pressed."""
return gtk.TRUE
def OnKeyRelease(self, wid, event):
"Key released."
return gtk.TRUE
class GtkGLExtVTKRenderWindow(GtkGLExtVTKRenderWindowBase):
""" An example of a fully functional GtkGLExtVTKRenderWindow that
is based on the vtkRenderWidget.py provided with the VTK
sources."""
def __init__(self, *args):
GtkGLExtVTKRenderWindowBase.__init__(self)
self._CurrentRenderer = None
self._CurrentCamera = None
self._CurrentZoom = 1.0
self._CurrentLight = None
self._ViewportCenterX = 0
self._ViewportCenterY = 0
self._Picker = vtk.vtkCellPicker()
self._PickedAssembly = None
self._PickedProperty = vtk.vtkProperty()
self._PickedProperty.SetColor(1, 0, 0)
self._PrePickedProperty = None
self._OldFocus = None
# these record the previous mouse position
self._LastX = 0
self._LastY = 0
def OnButtonDown(self, wid, event):
self._RenderWindow.SetDesiredUpdateRate(self._DesiredUpdateRate)
return self.StartMotion(wid, event)
return gtk.TRUE
def OnButtonUp(self, wid, event):
self._RenderWindow.SetDesiredUpdateRate(self._StillUpdateRate)
return self.EndMotion(wid, event)
return gtk.TRUE
def OnMouseMove(self, wid, event=None):
if ((event.state & gdk.BUTTON1_MASK) == gdk.BUTTON1_MASK):
if ((event.state & gdk.SHIFT_MASK) == gdk.SHIFT_MASK):
m = self.get_pointer()
self.Pan(m[0], m[1])
else:
m = self.get_pointer()
self.Rotate(m[0], m[1])
elif ((event.state & gdk.BUTTON2_MASK) == gdk.BUTTON2_MASK):
m = self.get_pointer()
self.Pan(m[0], m[1])
elif ((event.state & gdk.BUTTON3_MASK) == gdk.BUTTON3_MASK):
m = self.get_pointer()
self.Zoom(m[0], m[1])
else:
return gtk.FALSE
return gtk.TRUE
def OnEnter(self, wid, event=None):
# a render hack because grab_focus blanks the renderwin
self.grab_focus()
w = self.get_pointer()
self.UpdateRenderer(w[0], w[1])
return gtk.TRUE
def OnKeyPress(self, wid, event=None):
#if (event.keyval == gdk.keyval_from_name("q") or
# event.keyval == gdk.keyval_from_name("Q")):
# gtk.mainquit()
if (event.keyval == gdk.keyval_from_name('r') or
event.keyval == gdk.keyval_from_name('R')):
self.Reset()
return gtk.TRUE
elif (event.keyval == gdk.keyval_from_name('w') or
event.keyval == gdk.keyval_from_name('W')):
self.Wireframe()
return gtk.TRUE
elif (event.keyval == gdk.keyval_from_name('s') or
event.keyval == gdk.keyval_from_name('S')):
self.Surface()
return gtk.TRUE
elif (event.keyval == gdk.keyval_from_name('p') or
event.keyval == gdk.keyval_from_name('P')):
m = self.get_pointer()
self.PickActor(m[0], m[1])
return gtk.TRUE
else:
return gtk.FALSE
def GetZoomFactor(self):
return self._CurrentZoom
def SetZoomFactor(self, zf):
self._CurrentZoom = zf
def GetPicker(self):
return self._Picker
def Render(self):
if (self._CurrentLight):
light = self._CurrentLight
light.SetPosition(self._CurrentCamera.GetPosition())
light.SetFocalPoint(self._CurrentCamera.GetFocalPoint())
GtkGLExtVTKRenderWindowBase.Render(self)
def UpdateRenderer(self,x,y):
"""
UpdateRenderer will identify the renderer under the mouse and set
up _CurrentRenderer, _CurrentCamera, and _CurrentLight.
"""
windowX,windowY = self.widget.window.get_size()
renderers = self._RenderWindow.GetRenderers()
numRenderers = renderers.GetNumberOfItems()
self._CurrentRenderer = None
renderers.InitTraversal()
for i in range(0,numRenderers):
renderer = renderers.GetNextItem()
vx,vy = (0,0)
if (windowX > 1):
vx = float(x)/(windowX-1)
if (windowY > 1):
vy = (windowY-float(y)-1)/(windowY-1)
(vpxmin,vpymin,vpxmax,vpymax) = renderer.GetViewport()
if (vx >= vpxmin and vx <= vpxmax and
vy >= vpymin and vy <= vpymax):
self._CurrentRenderer = renderer
self._ViewportCenterX = float(windowX)*(vpxmax-vpxmin)/2.0\
+vpxmin
self._ViewportCenterY = float(windowY)*(vpymax-vpymin)/2.0\
+vpymin
self._CurrentCamera = self._CurrentRenderer.GetActiveCamera()
lights = self._CurrentRenderer.GetLights()
lights.InitTraversal()
self._CurrentLight = lights.GetNextItem()
break
self._LastX = x
self._LastY = y
def GetCurrentRenderer(self):
if self._CurrentRenderer is None:
renderers = self._RenderWindow.GetRenderers()
numRenderers = renderers.GetNumberOfItems()
renderers.InitTraversal()
for i in range(0,numRenderers):
renderer = renderers.GetNextItem()
break
self._CurrentRenderer = renderer
return self._CurrentRenderer
def GetCurrentCamera(self):
if self._CurrentCamera is None:
renderer = self.GetCurrentRenderer()
self._CurrentCamera = renderer.GetActiveCamera()
return self._CurrentCamera
def StartMotion(self, wid, event=None):
x = event.x
y = event.y
self.UpdateRenderer(x,y)
return gtk.TRUE
def EndMotion(self, wid, event=None):
if self._CurrentRenderer:
self.Render()
return gtk.TRUE
def Rotate(self,x,y):
if self._CurrentRenderer:
self._CurrentCamera.Azimuth(self._LastX - x)
self._CurrentCamera.Elevation(y - self._LastY)
self._CurrentCamera.OrthogonalizeViewUp()
self._LastX = x
self._LastY = y
self._CurrentRenderer.ResetCameraClippingRange()
self.Render()
def Pan(self,x,y):
if self._CurrentRenderer:
renderer = self._CurrentRenderer
camera = self._CurrentCamera
(pPoint0,pPoint1,pPoint2) = camera.GetPosition()
(fPoint0,fPoint1,fPoint2) = camera.GetFocalPoint()
if (camera.GetParallelProjection()):
renderer.SetWorldPoint(fPoint0,fPoint1,fPoint2,1.0)
renderer.WorldToDisplay()
fx,fy,fz = renderer.GetDisplayPoint()
renderer.SetDisplayPoint(fx-x+self._LastX,
fy+y-self._LastY,
fz)
renderer.DisplayToWorld()
fx,fy,fz,fw = renderer.GetWorldPoint()
camera.SetFocalPoint(fx,fy,fz)
renderer.SetWorldPoint(pPoint0,pPoint1,pPoint2,1.0)
renderer.WorldToDisplay()
fx,fy,fz = renderer.GetDisplayPoint()
renderer.SetDisplayPoint(fx-x+self._LastX,
fy+y-self._LastY,
fz)
renderer.DisplayToWorld()
fx,fy,fz,fw = renderer.GetWorldPoint()
camera.SetPosition(fx,fy,fz)
else:
(fPoint0,fPoint1,fPoint2) = camera.GetFocalPoint()
# Specify a point location in world coordinates
renderer.SetWorldPoint(fPoint0,fPoint1,fPoint2,1.0)
renderer.WorldToDisplay()
# Convert world point coordinates to display coordinates
dPoint = renderer.GetDisplayPoint()
focalDepth = dPoint[2]
aPoint0 = self._ViewportCenterX + (x - self._LastX)
aPoint1 = self._ViewportCenterY - (y - self._LastY)
renderer.SetDisplayPoint(aPoint0,aPoint1,focalDepth)
renderer.DisplayToWorld()
(rPoint0,rPoint1,rPoint2,rPoint3) = renderer.GetWorldPoint()
if (rPoint3 != 0.0):
rPoint0 = rPoint0/rPoint3
rPoint1 = rPoint1/rPoint3
rPoint2 = rPoint2/rPoint3
camera.SetFocalPoint((fPoint0 - rPoint0) + fPoint0,
(fPoint1 - rPoint1) + fPoint1,
(fPoint2 - rPoint2) + fPoint2)
camera.SetPosition((fPoint0 - rPoint0) + pPoint0,
(fPoint1 - rPoint1) + pPoint1,
(fPoint2 - rPoint2) + pPoint2)
self._LastX = x
self._LastY = y
self.Render()
def Zoom(self,x,y):
if self._CurrentRenderer:
renderer = self._CurrentRenderer
camera = self._CurrentCamera
zoomFactor = math.pow(1.02,(0.5*(self._LastY - y)))
self._CurrentZoom = self._CurrentZoom * zoomFactor
if camera.GetParallelProjection():
parallelScale = camera.GetParallelScale()/zoomFactor
camera.SetParallelScale(parallelScale)
else:
camera.Dolly(zoomFactor)
renderer.ResetCameraClippingRange()
self._LastX = x
self._LastY = y
self.Render()
def Reset(self):
if self._CurrentRenderer:
self._CurrentRenderer.ResetCamera()
self.Render()
def Wireframe(self):
actors = self._CurrentRenderer.GetActors()
numActors = actors.GetNumberOfItems()
actors.InitTraversal()
for i in range(0,numActors):
actor = actors.GetNextItem()
actor.GetProperty().SetRepresentationToWireframe()
self.Render()
def Surface(self):
actors = self._CurrentRenderer.GetActors()
numActors = actors.GetNumberOfItems()
actors.InitTraversal()
for i in range(0,numActors):
actor = actors.GetNextItem()
actor.GetProperty().SetRepresentationToSurface()
self.Render()
def PickActor(self,x,y):
if self._CurrentRenderer:
renderer = self._CurrentRenderer
picker = self._Picker
windowX,windowY = self.widget.window.get_size()
picker.Pick(x,(windowY - y - 1),0.0,renderer)
assembly = picker.GetAssembly()
if (self._PickedAssembly != None and
self._PrePickedProperty != None):
self._PickedAssembly.SetProperty(self._PrePickedProperty)
# release hold of the property
self._PrePickedProperty.UnRegister(self._PrePickedProperty)
self._PrePickedProperty = None
if (assembly != None):
self._PickedAssembly = assembly
self._PrePickedProperty = self._PickedAssembly.GetProperty()
# hold onto the property
self._PrePickedProperty.Register(self._PrePickedProperty)
self._PickedAssembly.SetProperty(self._PickedProperty)
self.Render()
def main():
# The main window
window = gtk.Window()
window.set_title("A GtkGLExtVTKRenderWindow Demo!")
window.connect("destroy", gtk.mainquit)
window.connect("delete_event", gtk.mainquit)
window.set_border_width(10)
vtkgtk = GtkGLExtVTKRenderWindow()
vtkgtk.show()
vbox = gtk.VBox(spacing=3)
vbox.show()
vbox.pack_start(vtkgtk)
button = gtk.Button('My Button')
button.show()
vbox.pack_start(button)
window.add(vbox)
window.set_size_request(400, 400)
# The VTK stuff.
cone = vtk.vtkConeSource()
cone.SetResolution(80)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInput(cone.GetOutput())
#coneActor = vtk.vtkLODActor()
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
coneActor.GetProperty().SetColor(0.5, 0.5, 1.0)
ren = vtk.vtkRenderer()
vtkgtk.GetRenderWindow().AddRenderer(ren)
ren.AddActor(coneActor)
# show the main window and start event processing.
window.show()
gtk.mainloop()
if __name__ == "__main__":
main()
| b3c/VTK-5.8 | Wrapping/Python/vtk/gtk/GtkGLExtVTKRenderWindow.py | Python | bsd-3-clause | 17,778 |
from flask import Flask, render_template
from flask_bootstrap import Bootstrap
from flask_appconfig import AppConfig
from flask_wtf import Form, RecaptchaField
from wtforms import TextField, HiddenField, ValidationError, RadioField,\
BooleanField, SubmitField
from wtforms.validators import Required
class ExampleForm(Form):
field1 = TextField('First Field', description='This is field one.')
field2 = TextField('Second Field', description='This is field two.',
validators=[Required()])
hidden_field = HiddenField('You cannot see this', description='Nope')
recaptcha = RecaptchaField('A sample recaptcha field')
radio_field = RadioField('This is a radio field', choices=[
('head_radio', 'Head radio'),
('radio_76fm', "Radio '76 FM"),
('lips_106', 'Lips 106'),
('wctr', 'WCTR'),
])
checkbox_field = BooleanField('This is a checkbox',
description='Checkboxes can be tricky.')
submit_button = SubmitField('Submit Form')
def validate_hidden_field(form, field):
raise ValidationError('Always wrong')
def create_app(configfile=None):
app = Flask(__name__)
AppConfig(app, configfile) # Flask-Appconfig is not necessary, but
# highly recommend =)
# https://github.com/mbr/flask-appconfig
Bootstrap(app)
# in a real app, these should be configured through Flask-Appconfig
app.config['SECRET_KEY'] = 'devkey'
app.config['RECAPTCHA_PUBLIC_KEY'] = \
'6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'
@app.route('/')
def index():
form = ExampleForm()
return render_template('index.html', form=form)
return app
if __name__ == '__main__':
create_app().run(debug=True)
| miguelgrinberg/flask-bootstrap | sample_application/sample_app.py | Python | apache-2.0 | 1,821 |
"""
Merge an arbitrary number of settings files ordered by priority.
This script will generate a new settings yaml file and write it to standard
output. The files are ordered on the command line from lowest priority to
highest priority, where higher priority settings override lower priority ones.
Usage:
pip install -e . # from the root of edx-load-tests
merge_settings settings_1.yml settings_2.yml ... settings_N.yml > merged.yml
Keys present in settings_N.yml are guaranteed to appear in the output file
because it has the highest priority.
"""
import sys
import click
from helpers.settings import Settings
@click.command()
@click.argument('settings_files', type=click.File(), nargs=-1)
def main(settings_files):
"""
The only command, and main entry point for this script.
Arguments:
settings_files (tuple of file objects):
The file objects refer to settings files. The files (tuple
elements) are ordered from lowest to highest priority.
"""
if len(settings_files) == 1:
# Only one filename was given on the command line, so we might as well
# preserve any comments by copying directly to stdout
sys.stdout.write(settings_files[0].read())
else:
merged_settings = Settings()
for settings_file in settings_files:
merged_settings.update(Settings.from_file(settings_file))
merged_settings.dump(sys.stdout)
| edx/edx-load-tests | util/merge_settings.py | Python | apache-2.0 | 1,441 |
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2001-2006 Donald N. Allingham
# Copyright (C) 2008 Gary Burton
# Copyright (C) 2010 Nick Hall
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Base view for Place Views
"""
#-------------------------------------------------------------------------
#
# GTK/Gnome modules
#
#-------------------------------------------------------------------------
from gi.repository import Gdk
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.lib import Place
from gramps.gui.views.listview import ListView, TEXT, ICON
from gramps.gui.widgets.menuitem import add_menuitem
from gramps.gen.errors import WindowActiveError
from gramps.gui.views.bookmarks import PlaceBookmarks
from gramps.gen.config import config
from gramps.gui.dialog import ErrorDialog
from gramps.gui.pluginmanager import GuiPluginManager
from gramps.gui.ddtargets import DdTargets
from gramps.gui.editors import EditPlace, DeletePlaceQuery
from gramps.gui.filters.sidebar import PlaceSidebarFilter
from gramps.gui.merge import MergePlace
from gramps.gen.plug import CATEGORY_QR_PLACE
from gramps.gen.utils.location import located_in
#-------------------------------------------------------------------------
#
# internationalization
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# PlaceBaseView
#
#-------------------------------------------------------------------------
class PlaceBaseView(ListView):
""" base view class for place views, be they flat list or tree
"""
COL_NAME = 0
COL_ID = 1
COL_TITLE = 2
COL_TYPE = 3
COL_CODE = 4
COL_LAT = 5
COL_LON = 6
COL_PRIV = 7
COL_TAGS = 8
COL_CHAN = 9
# column definitions
COLUMNS = [
(_('Name'), TEXT, None),
(_('ID'), TEXT, None),
(_('Title'), TEXT, None),
(_('Type'), TEXT, None),
(_('Code'), TEXT, None),
(_('Latitude'), TEXT, None),
(_('Longitude'), TEXT, None),
(_('Private'), ICON, 'gramps-lock'),
(_('Tags'), TEXT, None),
(_('Last Changed'), TEXT, None),
]
# default setting with visible columns, order of the col, and their size
CONFIGSETTINGS = (
('columns.visible', [COL_NAME, COL_ID, COL_TYPE, COL_CODE]),
('columns.rank', [COL_NAME, COL_TITLE, COL_ID, COL_TYPE, COL_CODE,
COL_LAT, COL_LON, COL_PRIV, COL_TAGS, COL_CHAN]),
('columns.size', [250, 250, 75, 100, 100, 150, 150, 40, 100, 100])
)
ADD_MSG = _("Add a new place")
EDIT_MSG = _("Edit the selected place")
DEL_MSG = _("Delete the selected place")
MERGE_MSG = _("Merge the selected places")
FILTER_TYPE = "Place"
QR_CATEGORY = CATEGORY_QR_PLACE
def __init__(self, pdata, dbstate, uistate, title, model, nav_group):
signal_map = {
'place-add' : self.row_add,
'place-update' : self.row_update,
'place-delete' : self.row_delete,
'place-rebuild' : self.object_build,
}
self.mapservice = config.get('interface.mapservice')
self.mapservicedata = {}
ListView.__init__(
self, title, pdata, dbstate, uistate,
model, signal_map,
PlaceBookmarks, nav_group,
multiple=True,
filter_class=PlaceSidebarFilter)
self.func_list.update({
'<PRIMARY>J' : self.jump,
'<PRIMARY>BackSpace' : self.key_delete,
})
self.maptoolbtn = None
self.additional_uis.append(self.additional_ui())
def navigation_type(self):
return 'Place'
def define_actions(self):
ListView.define_actions(self)
self._add_toolmenu_action('MapsList', _('Loading...'),
_("Attempt to see selected locations with a Map "
"Service (OpenstreetMap, Google Maps, ...)"),
self.gotomap,
_('Select a Map Service'))
self._add_action('GotoMap', 'go-jump',
_('_Look up with Map Service'),
callback=self.gotomap,
tip=_("Attempt to see this location with a Map "
"Service (OpenstreetMap, Google Maps, ...)"))
self._add_action('FilterEdit', None, _('Place Filter Editor'),
callback=self.filter_editor)
self._add_action('QuickReport', None, _("Quick View"), None, None, None)
def set_inactive(self):
"""called by viewmanager when moving away from the page
Here we need to remove the menutoolbutton from the menu
"""
tb = self.uistate.viewmanager.uimanager.get_widget('/ToolBar')
tb.remove(self.maptoolbtn)
ListView.set_inactive(self)
def change_page(self):
"""
Called by viewmanager at end of realization when arriving on the page
At this point the Toolbar is created. We need to:
1. get the menutoolbutton
2. add all possible map services in the drop down menu
3. add the actions that correspond to clicking in this drop down menu
4. set icon and label of the menutoolbutton now that it is realized
5. store label so it can be changed when selection changes
"""
ListView.change_page(self)
#menutoolbutton has to be made and added in correct place on toolbar
if not self.maptoolbtn:
self.maptoolbtn = Gtk.MenuToolButton()
self.maptoolbtn.set_icon_name('go-jump')
self.maptoolbtn.connect('clicked', self.gotomap)
self.mmenu = self.__create_maps_menu_actions()
self.maptoolbtn.set_menu(self.mmenu)
self.maptoolbtn.show()
tb = self.uistate.viewmanager.uimanager.get_widget('/ToolBar')
ind = tb.get_item_index(self.uistate.viewmanager.uimanager.get_widget(
'/ToolBar/CommonEdit/Merge'))
tb.insert(self.maptoolbtn, ind+1)
widget = self.maptoolbtn
if not self.mapservicedata:
return
self.mapslistlabel = []
if not self.mapservice in self.mapservicedata:
#stored val no longer exists, use the first key instead
self.set_mapservice(list(self.mapservicedata.keys())[0])
#store all gtk labels to be able to update label on selection change_('Loading...'),
widget.set_menu(self.mmenu)
widget.set_arrow_tooltip_text(_('Select a Map Service'))
widget.set_tooltip_text(
_("Attempt to see selected locations with a Map "
"Service (OpenstreetMap, Google Maps, ...)"))
lbl = Gtk.Label(label=self.mapservice_label())
lbl.show()
self.mapslistlabel.append(lbl)
widget.set_label_widget(self.mapslistlabel[-1])
widget.set_icon_name('go-jump')
if self.drag_info():
self.list.enable_model_drag_source(Gdk.ModifierType.BUTTON1_MASK,
[],
Gdk.DragAction.COPY)
tglist = Gtk.TargetList.new([])
tglist.add(self.drag_info().atom_drag_type,
self.drag_info().target_flags,
self.drag_info().app_id)
tglist.add_text_targets (0)
self.list.drag_source_set_target_list(tglist)
def __create_maps_menu_actions(self):
"""
Function creating a menu and actions that are used as dropdown menu
from the menutoolbutton
"""
menu = Gtk.Menu()
#select the map services to show
self.mapservicedata = {}
servlist = GuiPluginManager.get_instance().get_reg_mapservices()
for i, pdata in enumerate(servlist):
key = pdata.id.replace(' ', '-')
add_menuitem(menu, pdata.name, None,
make_callback(self.set_mapservice, key))
self.mapservicedata[key] = pdata
return menu
def set_mapservice(self, mapkey):
"""
change the service that runs on click of the menutoolbutton
used as callback menu on menu clicks
"""
self.mapservice = mapkey
for label in self.mapslistlabel:
label.set_label(self.mapservice_label())
label.show()
config.set('interface.mapservice', mapkey)
config.save()
def mapservice_label(self):
"""
return the current label for the menutoolbutton
"""
return self.mapservicedata[self.mapservice].name
def gotomap(self, obj):
"""
Run the map service
"""
#First test if any map service is available
if not len(self.mapservicedata):
msg = _("No map service is available.")
msg2 = _("Check your installation.")
ErrorDialog(msg, msg2, parent=self.uistate.window)
return
place_handles = self.selected_handles()
try:
place_handle = self.selected_handles()[0]
except IndexError:
msg = _("No place selected.")
msg2 = _("You need to select a place to be able to view it"
" on a map. Some Map Services might support multiple"
" selections.")
ErrorDialog(msg, msg2, parent=self.uistate.window)
return
#TODO: support for descriptions in some cases. For now, pass None
#TODO: Later this might be 'Birth of William' ....
places = [(x, None) for x in place_handles]
#run the mapservice:
pmgr = GuiPluginManager.get_instance()
serv = self.mapservicedata[self.mapservice]
mod = pmgr.load_plugin(serv)
if mod:
servfunc = eval('mod.' + serv.mapservice)
servfunc()(self.dbstate.db, places, self.uistate)
else:
print('Failed to load map plugin, see Plugin Manager')
def drag_info(self):
return DdTargets.PLACE_LINK
def get_stock(self):
return 'gramps-place'
def additional_ui(self):
return '''<ui>
<menubar name="MenuBar">
<menu action="FileMenu">
<placeholder name="LocalExport">
<menuitem action="ExportTab"/>
</placeholder>
</menu>
<menu action="BookMenu">
<placeholder name="AddEditBook">
<menuitem action="AddBook"/>
<menuitem action="EditBook"/>
</placeholder>
</menu>
<menu action="GoMenu">
<placeholder name="CommonGo">
<menuitem action="Back"/>
<menuitem action="Forward"/>
<separator/>
</placeholder>
</menu>
<menu action="EditMenu">
<placeholder name="CommonEdit">
<menuitem action="Add"/>
<menuitem action="Edit"/>
<menuitem action="Remove"/>
<menuitem action="Merge"/>
</placeholder>
<menuitem action="FilterEdit"/>
</menu>
</menubar>
<toolbar name="ToolBar">
<placeholder name="CommonNavigation">
<toolitem action="Back"/>
<toolitem action="Forward"/>
</placeholder>
<placeholder name="CommonEdit">
<toolitem action="Add"/>
<toolitem action="Edit"/>
<toolitem action="Remove"/>
<toolitem action="Merge"/>
<separator/>
</placeholder>
</toolbar>
<popup name="Popup">
<menuitem action="Back"/>
<menuitem action="Forward"/>
<separator/>
<menuitem action="Add"/>
<menuitem action="Edit"/>
<menuitem action="Remove"/>
<menuitem action="Merge"/>
<separator/>
<menu name="QuickReport" action="QuickReport"/>
<separator/>
<menuitem action="GotoMap"/>
</popup>
</ui>'''
def add(self, obj):
try:
EditPlace(self.dbstate, self.uistate, [], Place())
except WindowActiveError:
pass
def remove(self, obj):
for handle in self.selected_handles():
for link in self.dbstate.db.find_backlink_handles(handle,['Place']):
msg = _("Cannot delete place.")
msg2 = _("This place is currently referenced by another place. "
"First remove the places it contains.")
ErrorDialog(msg, msg2, parent=self.uistate.window)
return
self.remove_selected_objects()
def remove_object_from_handle(self, handle):
person_list = [
item[1] for item in
self.dbstate.db.find_backlink_handles(handle,['Person'])]
family_list = [
item[1] for item in
self.dbstate.db.find_backlink_handles(handle,['Family'])]
event_list = [
item[1] for item in
self.dbstate.db.find_backlink_handles(handle,['Event'])]
object = self.dbstate.db.get_place_from_handle(handle)
query = DeletePlaceQuery(self.dbstate, self.uistate, object,
person_list, family_list, event_list)
is_used = len(person_list) + len(family_list) + len(event_list) > 0
return (query, is_used, object)
def edit(self, obj):
for handle in self.selected_handles():
place = self.dbstate.db.get_place_from_handle(handle)
try:
EditPlace(self.dbstate, self.uistate, [], place)
except WindowActiveError:
pass
def merge(self, obj):
"""
Merge the selected places.
"""
mlist = self.selected_handles()
if len(mlist) != 2:
msg = _("Cannot merge places.")
msg2 = _("Exactly two places must be selected to perform a merge. "
"A second place can be selected by holding down the "
"control key while clicking on the desired place.")
ErrorDialog(msg, msg2, parent=self.uistate.window)
else:
if (located_in(self.dbstate.db, mlist[0], mlist[1]) or
located_in(self.dbstate.db, mlist[1], mlist[0])):
msg = _("Cannot merge places.")
msg2 = _("Merging these places would create a cycle in the "
"place hierarchy.")
ErrorDialog(msg, msg2, parent=self.uistate.window)
else:
MergePlace(self.dbstate, self.uistate, [], mlist[0], mlist[1],
self.merged)
def merged(self):
"""
Rebuild the model after a merge to reflect changes in the hierarchy.
"""
if not (self.model.get_flags() & Gtk.TreeModelFlags.LIST_ONLY):
self.build_tree()
def get_handle_from_gramps_id(self, gid):
obj = self.dbstate.db.get_place_from_gramps_id(gid)
if obj:
return obj.get_handle()
else:
return None
def tag_updated(self, handle_list):
"""
Update tagged rows when a tag color changes.
"""
all_links = set([])
for tag_handle in handle_list:
links = set([link[1] for link in
self.dbstate.db.find_backlink_handles(tag_handle,
include_classes='Place')])
all_links = all_links.union(links)
self.row_update(list(all_links))
def add_tag(self, transaction, place_handle, tag_handle):
"""
Add the given tag to the given place.
"""
place = self.dbstate.db.get_place_from_handle(place_handle)
place.add_tag(tag_handle)
self.dbstate.db.commit_place(place, transaction)
def get_default_gramplets(self):
"""
Define the default gramplets for the sidebar and bottombar.
"""
return (("Place Filter",),
("Place Details",
"Place Enclosed By",
"Place Encloses",
"Place Gallery",
"Place Citations",
"Place Notes",
"Place Backlinks"))
def make_callback(func, val):
return lambda x: func(val)
| beernarrd/gramps | gramps/plugins/lib/libplaceview.py | Python | gpl-2.0 | 17,547 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from cx_Freeze import setup, Executable
from command import cmds
import glob
includes = []
excludes = []
packages = []
setup(
name='lit',
version='0.1.4',
install_requires=[
'wmi',
'pywin32'
],
cmdclass=cmds,
options={
'build_exe': {
'packages': packages,
'includes': includes,
'excludes': excludes,
'include_files': ['style.css'] + glob.glob('ele/*')
}
},
executables=[
Executable(
'lit.py',
base='Win32GUI',
targetName='lit.exe',
compress=True,
icon='icons/48.ico'
),
Executable(
'server.py',
base='Win32GUI',
targetName='litserver.exe',
compress=True
)
]
)
| Answeror/lit | setup.py | Python | mit | 866 |
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^', include('maps.urls')),
url(r'^venue/', include('venues.urls')),
url(r'^sightings/', include('sightings.urls')),
# Examples:
# url(r'^$', 'hoponit.views.home', name='home'),
# url(r'^hoponit/', include('hoponit.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
| shaunokeefe/hoponit | hoponit/hoponit/urls.py | Python | mit | 734 |
# -*- coding: utf-8 -*-
import codecs
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from sphinx.util.nodes import set_source_info
class FileInputDirective(Directive):
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'removelinebreaks': directives.flag,
'prepend': directives.unchanged,
'indent': directives.flag,
'split': int,
'splitend': directives.unchanged,
}
def run(self):
document = self.state.document
if not document.settings.file_insertion_enabled:
return [document.reporter.warning('File insertion disabled',
line=self.lineno)]
env = document.settings.env
rel_filename, filename = env.relfn2path(self.arguments[0])
encoding = self.options.get('encoding', env.config.source_encoding)
codec_info = codecs.lookup(encoding)
f = None
try:
f = codecs.StreamReaderWriter(open(filename, 'rb'),
codec_info[2], codec_info[3], 'strict')
lines = f.readlines()
except (IOError, OSError):
return [document.reporter.warning(
'Include file %r not found or reading it failed' % filename,
line=self.lineno)]
except UnicodeError:
return [document.reporter.warning(
'Encoding %r used for reading included file %r seems to '
'be wrong, try giving an :encoding: option' %
(encoding, filename))]
finally:
if f is not None:
f.close()
text = ''.join(lines)
if 'removelinebreaks' in self.options:
text = text.replace('\n', ' ').replace('\r', ' ')
prepend = self.options.get('prepend')
if prepend is not None:
prepend = prepend + ' '
text = prepend + text
split = self.options.get('split')
if split is not None:
splitend = self.options.get('splitend', '')
linelength = split - len(splitend)
# Add a line break if the text will be split into multiple lines
if len(text) > split:
splitend += '\n'
output, start, end = '', 0, linelength
while True:
# Find last space in range and extract text before it
end = min(text.rfind(' ', start, end) + 1, len(text))
line = text[start:end]
# Prepend spaces in the length of 'prepend' for all but the
# first line.
if start > 0:
line = ' ' * len(prepend) + line
output += line
start = end
end += linelength - len(prepend)
if start == len(text):
text = output.rstrip()
break
else:
# Don't insert split end on last line
output += splitend
retnode = nodes.literal_block(text, text, source=filename)
set_source_info(self, retnode)
env.note_dependency(rel_filename)
return [retnode]
def setup(app):
app.add_directive('fileinclude', FileInputDirective)
| tomka/CATMAID | sphinx-doc/source/exts/catmaid_fileinclude.py | Python | gpl-3.0 | 3,363 |
#
# grc.py -- Ginga Remote Control module
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import threading
import ginga.util.six as six
if six.PY2:
import xmlrpclib
import SimpleXMLRPCServer
import cPickle as pickle
else:
import xmlrpc.client as xmlrpclib
import xmlrpc.server as SimpleXMLRPCServer
import pickle
from ginga.util.six.moves import map
from ginga.misc import Task, log
# undefined passed value--for a data type that cannot be converted
undefined = '#UNDEFINED'
class RemoteClient(object):
def __init__(self, host, port):
self.host = host
self.port = port
self._proxy = None
def __connect(self):
# Get proxy to server
url = "http://%s:%d" % (self.host, self.port)
self._proxy = xmlrpclib.ServerProxy(url, allow_none=True)
return self._proxy
def __getattr__(self, method_name):
def call(*args, **kwdargs):
if self._proxy is None:
self.__connect()
# marshall args and kwdargs
p_args = marshall(args)
p_kwdargs = marshall(kwdargs)
res = self._proxy.dispatch_call(method_name, p_args, p_kwdargs)
return unmarshall(res)
return call
class RemoteServer(object):
def __init__(self, obj, host='localhost', port=9000, ev_quit=None,
logger=None):
super(RemoteServer, self).__init__()
self.robj = obj
# What port to listen for requests
self.port = port
# If blank, listens on all interfaces
self.host = host
if logger is None:
logger = log.get_logger(null=True)
self.logger = logger
if ev_quit is None:
ev_quit = threading.Event()
self.ev_quit = ev_quit
def start(self, thread_pool=None):
self.server = SimpleXMLRPCServer.SimpleXMLRPCServer((self.host,
self.port),
allow_none=True)
self.server.register_function(self.dispatch_call)
if thread_pool is not None:
t1 = Task.FuncTask2(self.monitor_shutdown)
thread_pool.addTask(t1)
t2 = Task.FuncTask2(self.server.serve_forever, poll_interval=0.1)
thread_pool.addTask(t2)
else:
self.server.serve_forever(poll_interval=0.1)
def stop(self):
self.server.shutdown()
def restart(self):
# restart server
self.server.shutdown()
self.start()
def monitor_shutdown(self):
# the thread running this method waits until the entire viewer
# is exiting and then shuts down the XML-RPC server which is
# running in a different thread
self.ev_quit.wait()
self.server.shutdown()
def dispatch_call(self, method_name, p_args, p_kwdargs):
if hasattr(self.robj, method_name):
method = getattr(self.robj, method_name)
# unmarshall args, kwdargs
self.logger.debug("unmarshalling params")
args = unmarshall(p_args)
kwdargs = unmarshall(p_kwdargs)
self.logger.debug("calling method '%s'" % (method_name))
res = method(*args, **kwdargs)
self.logger.debug("marshalling return val")
return marshall(method(*args, **kwdargs))
raise AttributeError("No such method: '%s'" % (method_name))
# List of XML-RPC acceptable return types
ok_types = [str, int, float, bool, list, tuple, dict]
## def marshall(res):
## """Transform results into XML-RPC friendy ones.
## """
## ptype = type(res)
## if ptype in ok_types:
## return (0, res)
## raise ValueError("Don't know how to marshall this type of argument (%s)" % (
## ptype))
## ## pkl = pickle.dumps(res)
## ## return ('pickle', pkl)
## def unmarshall(rtnval):
## (kind, res) = rtnval
## if kind == 0:
## # this is a type passable by the transport
## return res
## raise ValueError("Don't know how to marshall this kind of argument (%s)" % (
## kind))
## ## if kind == 'pickle':
## ## return pickle.loads(res)
## def marshall(res):
## pkl = pickle.dumps(res)
## return ('pickle', pkl)
## def unmarshall(rtnval):
## (kind, res) = rtnval
## return pickle.loads(res)
def marshall(res):
if not type(res) in ok_types:
res = undefined
return res
def unmarshall(rtnval):
return rtnval
def prep_arg(arg):
try:
return float(arg)
except ValueError:
try:
return int(arg)
except ValueError:
return arg
def prep_args(args):
a, k = [], {}
for arg in args:
if '=' in arg:
key, arg = arg.split('=')
k[key] = prep_arg(arg)
else:
a.append(prep_arg(arg))
return a, k
| rajul/ginga | ginga/util/grc.py | Python | bsd-3-clause | 5,101 |
#!/usr/bin/env python
#
# Crypto.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
from .JSClass import JSClass
class Crypto(JSClass):
def __init__(self):
pass
@property
def enableSmartCardEvents(self):
return False
@property
def version(self):
return "2.4"
def disableRightClick(self):
pass
def importUserCertificates(self, nickname, cmmfResponse, forceToBackUp):
return ""
def logout(self):
pass
| pdelsante/thug | thug/DOM/Crypto.py | Python | gpl-2.0 | 1,079 |
import _plotly_utils.basevalidators
class TickwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="tickwidth", parent_name="layout.ternary.caxis", **kwargs
):
super(TickwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 0),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/layout/ternary/caxis/_tickwidth.py | Python | mit | 467 |
# Downloading dependancies
import nltk
nltk.download("stopwords")
nltk.download("punkt")
nltk.download("wordnet")
# Setup of ambiruptor package
from setuptools import setup
setup(name='ambiruptor',
version='0.1',
description='Disambiguation tool',
author='Ambiruptor',
license='GNU GENERAL PUBLIC LICENSE',
packages=['ambiruptor',
'ambiruptor.base',
'ambiruptor.library',
'ambiruptor.library.learners',
'ambiruptor.library.miners',
'ambiruptor.library.preprocessors',
],
zip_safe=False)
| Ambiruptor/Ambiruptor | setup.py | Python | gpl-3.0 | 620 |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class FunctionVersionContentTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.serverless.v1.services("ZSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.functions("ZHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.function_versions("ZNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.function_version_content().fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://serverless.twilio.com/v1/Services/ZSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Functions/ZHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Versions/ZNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Content',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "ZN00000000000000000000000000000000",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ZS00000000000000000000000000000000",
"function_sid": "ZH00000000000000000000000000000000",
"content": "exports.handler = function (context, event, callback) {\\n const request = require(\\"request\\");\\n return request(\\"http://www.google.com\\", function (error, response, body) {\\n callback(null, response.statusCode);\\n });\\n};",
"url": "https://serverless.twilio.com/v1/Services/ZS00000000000000000000000000000000/Functions/ZH00000000000000000000000000000000/Versions/ZN00000000000000000000000000000000/Content"
}
'''
))
actual = self.client.serverless.v1.services("ZSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.functions("ZHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.function_versions("ZNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.function_version_content().fetch()
self.assertIsNotNone(actual)
| twilio/twilio-python | tests/integration/serverless/v1/service/function/function_version/test_function_version_content.py | Python | mit | 2,404 |
# Allows the creation of infix operators
# Thanks to Ferdinand Jamitzky
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/384122
class Infix(object):
def __init__(self, function):
self.function = function
def __ror__(self, other):
return Infix(lambda x: self.function(other, x))
def __or__(self, other):
return self.function(other)
def __rlshift__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __rshift__(self, other):
return self.function(other)
def __call__(self, value1, value2):
return self.function(value1, value2)
# To create a binary operator just make a function that takes 2 arguments like say
# def my_add (a, b):
# return a + b
#
# Then we get import this...
# from Infix import Infix # Lets us make binary infix style operators
#
# Then we make the operator, lets call it p...
# p = Infix(my_add)
#
# Now to use it just put in
# arg1 |p| arg2
| Kevin-Ray-Johnson/DM_Desk_Calc | Infix.py | Python | gpl-2.0 | 976 |
"""
Command to retrieve aggregated student forums data in a .csv
"""
import csv
import optparse
import os
from django.core.management.base import BaseCommand, CommandError
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from instructor.utils import collect_student_forums_data
class Command(BaseCommand):
"""
Retrieve aggregated student forums data, write to .csv
"""
help = ('Usage: collect_course_forums_data <course_id> --output-dir=<output_dir>')
args = '<course_id>'
option_list = BaseCommand.option_list + (
optparse.make_option(
'-o',
'--output-dir',
action='store',
dest='output_dir',
default=None,
help='Write output to a directory rather than stdout',
),
)
def handle(self, *args, **options):
if not args:
raise CommandError('Course ID must be specified to fetch data')
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(args[0])
except InvalidKeyError:
raise CommandError('The course ID given was invalid')
file_name = "{course_id}-student-forums.csv".format(course_id=args[0].replace("/", "-"))
if options['output_dir']:
csv_file = open(os.path.join(options['output_dir'], file_name), 'wb')
else:
csv_file = self.stdout
writer = csv.writer(csv_file, dialect='excel', quotechar='"', quoting=csv.QUOTE_ALL)
header, rows = collect_student_forums_data(course_id)
writer.writerow(header)
for row in rows:
writer.writerow(_utf8_encoded_row(row))
def _utf8_encoded_row(row):
"""Encodes a row to utf-8"""
return [unicode(item).encode('utf-8') for item in row]
| jbassen/edx-platform | lms/djangoapps/instructor/management/commands/collect_student_forums_data.py | Python | agpl-3.0 | 1,826 |
{
'name': 'Control access to Apps',
'version': '1.0.0',
'author': 'IT-Projects LLC, Ivan Yelizariev',
'category': 'Tools',
'website': 'https://twitter.com/yelizariev',
'price': 10.00,
'currency': 'EUR',
'depends': [
'web_settings_dashboard',
'access_restricted'
],
'data': [
'security/access_apps_security.xml',
'security/ir.model.access.csv',
],
'qweb': [
'static/src/xml/dashboard.xml',
],
'installable': True
}
| berpweb/berp_custom | access_apps/__openerp__.py | Python | agpl-3.0 | 510 |
# -*- coding: utf-8 -*-
# Copyright (C) 2013 by
# Fred Morstatter <[email protected]>
# Jordi Torrents <[email protected]>
# All rights reserved.
# BSD license.
import random
from networkx.utils import not_implemented_for
from networkx.utils import py_random_state
__all__ = ['average_clustering']
__author__ = """\n""".join(['Fred Morstatter <[email protected]>',
'Jordi Torrents <[email protected]>'])
@py_random_state(2)
@not_implemented_for('directed')
def average_clustering(G, trials=1000, seed=None):
r"""Estimates the average clustering coefficient of G.
The local clustering of each node in `G` is the fraction of triangles
that actually exist over all possible triangles in its neighborhood.
The average clustering coefficient of a graph `G` is the mean of
local clusterings.
This function finds an approximate average clustering coefficient
for G by repeating `n` times (defined in `trials`) the following
experiment: choose a node at random, choose two of its neighbors
at random, and check if they are connected. The approximate
coefficient is the fraction of triangles found over the number
of trials [1]_.
Parameters
----------
G : NetworkX graph
trials : integer
Number of trials to perform (default 1000).
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
c : float
Approximated average clustering coefficient.
References
----------
.. [1] Schank, Thomas, and Dorothea Wagner. Approximating clustering
coefficient and transitivity. Universität Karlsruhe, Fakultät für
Informatik, 2004.
http://www.emis.ams.org/journals/JGAA/accepted/2005/SchankWagner2005.9.2.pdf
"""
n = len(G)
triangles = 0
nodes = list(G)
for i in [int(seed.random() * n) for i in range(trials)]:
nbrs = list(G[nodes[i]])
if len(nbrs) < 2:
continue
u, v = seed.sample(nbrs, 2)
if u in G[v]:
triangles += 1
return triangles / float(trials)
| kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/networkx/algorithms/approximation/clustering_coefficient.py | Python | gpl-3.0 | 2,210 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.